You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by he...@apache.org on 2017/03/29 02:53:53 UTC

[12/14] incubator-impala git commit: IMPALA-4758: (1/2) Update gutil/ from Kudu@a1bfd7b

IMPALA-4758: (1/2) Update gutil/ from Kudu@a1bfd7b

* Copy gutil from Kudu
* Minimal changes to gutil/CMakeLists.txt

Change-Id: Ic708a9c4e76ede17af9b06e0a0a8e9ae7d357960
Reviewed-on: http://gerrit.cloudera.org:8080/5687
Reviewed-by: Dan Hecht <dh...@cloudera.com>
Tested-by: Henry Robinson <he...@cloudera.com>


Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/02f3e3fc
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/02f3e3fc
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/02f3e3fc

Branch: refs/heads/master
Commit: 02f3e3fcc1c58bcaf5080ddee939c9081412a553
Parents: ff095f1
Author: Henry Robinson <he...@cloudera.com>
Authored: Tue Oct 25 14:45:32 2016 -0700
Committer: Henry Robinson <he...@cloudera.com>
Committed: Wed Mar 29 02:52:21 2017 +0000

----------------------------------------------------------------------
 be/src/gutil/CMakeLists.txt                     |  110 +-
 be/src/gutil/algorithm.h                        |  463 ---
 be/src/gutil/atomic_refcount.h                  |   35 +-
 be/src/gutil/atomicops-internals-powerpc.h      |   29 +-
 be/src/gutil/atomicops-internals-tsan.h         |  217 ++
 be/src/gutil/atomicops-internals-x86.cc         |   47 +-
 be/src/gutil/atomicops-internals-x86.h          |   71 +-
 be/src/gutil/atomicops.h                        |   16 +-
 .../auxiliary/atomicops-internals-arm-generic.h |    4 +-
 .../auxiliary/atomicops-internals-arm-v6plus.h  |    2 +-
 .../auxiliary/atomicops-internals-macosx.h      |  397 ---
 .../auxiliary/atomicops-internals-windows.h     |    2 +-
 be/src/gutil/basictypes.h                       |    4 +-
 be/src/gutil/bind.h                             |  539 ++++
 be/src/gutil/bind.h.pump                        |  153 +
 be/src/gutil/bind_helpers.h                     |  551 ++++
 be/src/gutil/bind_internal.h                    | 2695 ++++++++++++++++++
 be/src/gutil/bind_internal.h.pump               |  464 +++
 be/src/gutil/bits.cc                            |   18 +-
 be/src/gutil/bits.h                             |   46 +-
 be/src/gutil/callback.h                         |  765 +++++
 be/src/gutil/callback.h.pump                    |  436 +++
 be/src/gutil/callback_forward.h                 |   17 +
 be/src/gutil/callback_internal.cc               |   36 +
 be/src/gutil/callback_internal.h                |  177 ++
 be/src/gutil/casts.h                            |   18 +-
 be/src/gutil/charmap.h                          |   10 +-
 be/src/gutil/cpu.cc                             |  289 ++
 be/src/gutil/cpu.h                              |   90 +
 be/src/gutil/cycleclock-inl.h                   |   39 +-
 be/src/gutil/dynamic_annotations.c              |  173 ++
 be/src/gutil/dynamic_annotations.cc             |  173 --
 be/src/gutil/dynamic_annotations.h              |   62 +-
 be/src/gutil/endian.h                           |   35 +-
 be/src/gutil/fixedarray.h                       |   31 +-
 be/src/gutil/gscoped_ptr.h                      |   50 +-
 be/src/gutil/hash/builtin_type_hash.h           |    8 +-
 be/src/gutil/hash/city.cc                       |   14 +-
 be/src/gutil/hash/city.h                        |    4 +-
 be/src/gutil/hash/hash.cc                       |   18 +-
 be/src/gutil/hash/hash.h                        |   59 +-
 be/src/gutil/hash/hash128to64.h                 |    4 +-
 be/src/gutil/hash/jenkins.cc                    |    8 +-
 be/src/gutil/hash/jenkins.h                     |    2 +-
 be/src/gutil/hash/jenkins_lookup2.h             |    4 +-
 be/src/gutil/hash/legacy_hash.h                 |    9 +-
 be/src/gutil/hash/string_hash.h                 |   10 +-
 be/src/gutil/int128.cc                          |    4 +-
 be/src/gutil/int128.h                           |    2 +-
 be/src/gutil/integral_types.h                   |   26 +-
 be/src/gutil/linux_syscall_support.h            |   16 +-
 be/src/gutil/logging-inl.h                      |   23 +-
 be/src/gutil/macros.h                           |   27 +-
 be/src/gutil/manual_constructor.h               |    2 +-
 be/src/gutil/map-util.h                         |  277 +-
 be/src/gutil/mathlimits.cc                      |   30 +-
 be/src/gutil/mathlimits.h                       |   34 +-
 be/src/gutil/move.h                             |   15 +-
 be/src/gutil/once.cc                            |    8 +-
 be/src/gutil/once.h                             |   12 +-
 be/src/gutil/paranoid.h                         |    3 +-
 be/src/gutil/port.h                             |  178 +-
 be/src/gutil/proto/types.pb.cc                  |  107 -
 be/src/gutil/proto/types.pb.h                   |   94 -
 be/src/gutil/proto/types.proto                  |   28 -
 .../gutil/raw_scoped_refptr_mismatch_checker.h  |   63 +
 be/src/gutil/ref_counted.cc                     |   95 +
 be/src/gutil/ref_counted.h                      |  354 +++
 be/src/gutil/ref_counted_memory.cc              |   99 +
 be/src/gutil/ref_counted_memory.h               |  150 +
 be/src/gutil/singleton.h                        |    5 +-
 be/src/gutil/sparsetable.h                      | 1838 ------------
 be/src/gutil/spinlock.cc                        |   10 +-
 be/src/gutil/spinlock.h                         |    8 +-
 be/src/gutil/spinlock_internal.cc               |    8 +-
 be/src/gutil/spinlock_internal.h                |    4 +-
 be/src/gutil/spinlock_linux-inl.h               |    2 +-
 be/src/gutil/spinlock_win32-inl.h               |   55 +-
 be/src/gutil/stl_util.h                         |   58 +-
 be/src/gutil/stringprintf.cc                    |   10 +-
 be/src/gutil/stringprintf.h                     |    2 +-
 be/src/gutil/strings/ascii_ctype.cc             |    2 +-
 be/src/gutil/strings/charset.cc                 |    2 +-
 be/src/gutil/strings/charset.h                  |    2 +-
 be/src/gutil/strings/escaping.cc                |   35 +-
 be/src/gutil/strings/escaping.h                 |    9 +-
 be/src/gutil/strings/fastmem.h                  |    4 +-
 be/src/gutil/strings/human_readable.cc          |   26 +-
 be/src/gutil/strings/human_readable.h           |    6 +-
 be/src/gutil/strings/join.cc                    |   32 +-
 be/src/gutil/strings/join.h                     |   58 +-
 be/src/gutil/strings/memutil.cc                 |   18 +-
 be/src/gutil/strings/memutil.h                  |    2 +-
 be/src/gutil/strings/numbers.cc                 |   87 +-
 be/src/gutil/strings/numbers.h                  |   39 +-
 be/src/gutil/strings/serialize.cc               |   69 +-
 be/src/gutil/strings/serialize.h                |   15 +-
 be/src/gutil/strings/split.cc                   |   42 +-
 be/src/gutil/strings/split.h                    |   23 +-
 be/src/gutil/strings/split_internal.h           |   22 +-
 be/src/gutil/strings/strcat.cc                  |   12 +-
 be/src/gutil/strings/strcat.h                   |   22 +-
 be/src/gutil/strings/string_util-test.cc        |   58 +
 be/src/gutil/strings/stringpiece.cc             |   37 +-
 be/src/gutil/strings/stringpiece.h              |   47 +-
 be/src/gutil/strings/strip.cc                   |   16 +-
 be/src/gutil/strings/strip.h                    |    4 +-
 be/src/gutil/strings/substitute.cc              |   16 +-
 be/src/gutil/strings/substitute.h               |    6 +-
 be/src/gutil/strings/util.cc                    |  289 +-
 be/src/gutil/strings/util.h                     |   26 +-
 be/src/gutil/strtoint.cc                        |    4 +-
 be/src/gutil/strtoint.h                         |    6 +-
 be/src/gutil/synchronization_profiling.h        |    2 +-
 be/src/gutil/sysinfo.cc                         |   41 +-
 be/src/gutil/sysinfo.h                          |    4 +
 be/src/gutil/template_util.h                    |   31 +
 .../gutil/threading/thread_collision_warner.cc  |   82 +
 .../gutil/threading/thread_collision_warner.h   |  248 ++
 be/src/gutil/type_traits.h                      |   12 +-
 be/src/gutil/utf/rune.c                         |  350 +++
 be/src/gutil/utf/rune.cc                        |  354 ---
 be/src/gutil/walltime.cc                        |   47 +-
 be/src/gutil/walltime.h                         |   66 +-
 124 files changed, 9709 insertions(+), 4615 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/02f3e3fc/be/src/gutil/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/be/src/gutil/CMakeLists.txt b/be/src/gutil/CMakeLists.txt
index bb24bc1..f895679 100644
--- a/be/src/gutil/CMakeLists.txt
+++ b/be/src/gutil/CMakeLists.txt
@@ -15,69 +15,59 @@
 # specific language governing permissions and limitations
 # under the License.
 
-cmake_minimum_required(VERSION 2.6)
-
-set(LIBRARY_OUTPUT_PATH "${BUILD_OUTPUT_ROOT_DIRECTORY}/gutil")
-set(EXECUTABLE_OUTPUT_PATH "${BUILD_OUTPUT_ROOT_DIRECTORY}/gutil")
-
-# Ignore the code in proto/ for now until needed; otherwise this adds an extra dependency
-# on protoc to the build for no gain.
-
-# PROTOBUF_GENERATE_CPP(PROTO_SRCS PROTO_HDRS
-#   SOURCE_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../
-#   BINARY_ROOT ${CMAKE_CURRENT_BINARY_DIR}/../
-#   )
-# SET(GUTIL_SRCS ${PROTO_SRCS})
-
-# PROTOBUF_GENERATE_CPP(PROTO_SRCS PROTO_HDRS
-#   SOURCE_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../
-#   BINARY_ROOT ${CMAKE_CURRENT_BINARY_DIR}/../
-#   PROTO_FILES proto/types.proto
-#   )
-# SET(GUTIL_SRCS ${GUTIL_SRCS} ${PROTO_SRCS})
-
-INCLUDE_DIRECTORIES($ENV{IMPALA_HOME}/thirdparty/)
 INCLUDE_DIRECTORIES(${GLOG_INCLUDE_DIR})
 
-ADD_LIBRARY(gutil
-#  ${GUTIL_SRCS}
-./bits.cc
-./dynamic_annotations.cc
-./stringprintf.cc
-./strings/memutil.cc
-./strings/charset.cc
-./strings/util.cc
-./strings/human_readable.cc
-./strings/join.cc
-./strings/numbers.cc
-./strings/split.cc
-./strings/strip.cc
-./strings/ascii_ctype.cc
-./strings/escaping.cc
-./strings/stringpiece.cc
-./strings/substitute.cc
-./strings/strcat.cc
-./strings/serialize.cc
-./mathlimits.cc
-./int128.cc
-./strtoint.cc
-./once.cc
-./atomicops-internals-x86.cc
-./spinlock.cc
-./spinlock_internal.cc
-./sysinfo.cc
-./walltime.cc
-./hash/hash.cc
-./hash/jenkins.cc
-./hash/city.cc
-./utf/rune.cc
-)
+set(GUTIL_SRCS
+  atomicops-internals-x86.cc
+  bits.cc
+  callback_internal.cc
+  cpu.cc
+  dynamic_annotations.c
+  hash/city.cc
+  hash/hash.cc
+  hash/jenkins.cc
+  int128.cc
+  mathlimits.cc
+  once.cc
+  ref_counted.cc
+  ref_counted_memory.cc
+  spinlock.cc
+  spinlock_internal.cc
+  stringprintf.cc
+  strings/ascii_ctype.cc
+  strings/charset.cc
+  strings/escaping.cc
+  strings/human_readable.cc
+  strings/join.cc
+  strings/memutil.cc
+  strings/numbers.cc
+  strings/serialize.cc
+  strings/split.cc
+  strings/strcat.cc
+  strings/stringpiece.cc
+  strings/strip.cc
+  strings/substitute.cc
+  strings/util.cc
+  strtoint.cc
+  sysinfo.cc
+  threading/thread_collision_warner.cc
+  utf/rune.c
+  walltime.cc)
 
+set(GUTIL_LIBS
+  glog
+  protobuf)
 
+if (NOT APPLE)
+  set(GUTIL_LIBS
+    ${GUTIL_LIBS}
+    rt) # clock_gettime() requires -lrt
+endif()
 
-# Disable warnings which trigger a lot in the Google code:
-SET_TARGET_PROPERTIES(gutil PROPERTIES
-  COMPILE_FLAGS "${CXX_COMMON_FLAGS} -funsigned-char -Wno-deprecated -Wno-char-subscripts -fPIC")
+ADD_EXPORTABLE_LIBRARY(gutil
+  SRCS ${GUTIL_SRCS}
+  DEPS ${GUTIL_LIBS}
+  # Disable warnings which trigger a lot in the Google code:
+  COMPILE_FLAGS "-funsigned-char -Wno-deprecated -Wno-char-subscripts")
 
-TARGET_LINK_LIBRARIES(gutil glogstatic gflagsstatic)
-SET(GUTIL_LIBS gutil)
+add_kudu_test(strings/string_util-test)

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/02f3e3fc/be/src/gutil/algorithm.h
----------------------------------------------------------------------
diff --git a/be/src/gutil/algorithm.h b/be/src/gutil/algorithm.h
deleted file mode 100644
index 3322df3..0000000
--- a/be/src/gutil/algorithm.h
+++ /dev/null
@@ -1,463 +0,0 @@
-// Copyright 2006 Google Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// ---
-//
-//
-// This file contains some Google extensions to the standard
-// <algorithm> C++ header. Many of these algorithms were in the
-// original STL before it was proposed for standardization.
-
-#ifndef UTIL_GTL_ALGORITHM_H_
-#define UTIL_GTL_ALGORITHM_H_
-
-#include <stddef.h>
-#include <algorithm>
-using std::copy;
-using std::max;
-using std::min;
-using std::reverse;
-using std::sort;
-using std::swap;
-#include <functional>
-using std::binary_function;
-using std::less;
-#include <iterator>
-using std::back_insert_iterator;
-using std::iterator_traits;
-#include <utility>
-using std::make_pair;
-using std::pair;
-
-namespace util {
-namespace gtl {
-
-// Returns true if [first, last) contains an element equal to value.
-// Complexity: linear.
-template<typename InputIterator, typename EqualityComparable>
-bool contains(InputIterator first, InputIterator last,
-              const EqualityComparable& value) {
-  return std::find(first, last, value) != last;
-}
-
-// There is no contains_if().  Use any() instead.
-
-template<typename InputIterator, typename ForwardIterator>
-bool contains_some_of(InputIterator first1, InputIterator last1,
-                      ForwardIterator first2, ForwardIterator last2) {
-  return std::find_first_of(first1, last1, first2, last2) != last1;
-}
-
-template<typename InputIterator, typename ForwardIterator, typename Predicate>
-bool contains_some_of(InputIterator first1, InputIterator last1,
-                      ForwardIterator first2, ForwardIterator last2,
-                      Predicate pred) {
-  return std::find_first_of(first1, last1, first2, last2, pred) != last1;
-}
-
-template<typename InputIterator, typename EqualityComparable>
-typename std::iterator_traits<InputIterator>::pointer
-find_or_null(InputIterator first, InputIterator last,
-             const EqualityComparable& value) {
-  const InputIterator it = std::find(first, last, value);
-  return it != last ? &*it : NULL;
-}
-
-template<typename InputIterator, typename Predicate>
-typename std::iterator_traits<InputIterator>::pointer
-find_if_or_null(InputIterator first, InputIterator last, Predicate pred) {
-  const InputIterator it = std::find_if(first, last, pred);
-  return it != last ? &*it : NULL;
-}
-
-// Copies all elements that satisfy the predicate pred from [first,
-// last) to out. This is the complement of remove_copy_if. Complexity:
-// exactly last-first applications of pred.
-template <typename InputIterator, typename OutputIterator, typename Predicate>
-OutputIterator copy_if(InputIterator first, InputIterator last,
-                       OutputIterator out,
-                       Predicate pred) {
-  for (; first != last; ++first) {
-    if (pred(*first))
-      *out++ = *first;
-  }
-  return out;
-}
-
-// Copies n elements to out.  Equivalent to copy(first, first + n, out) for
-// random access iterators and a longer code block for lesser iterators.
-template <typename InputIterator, typename Size, typename OutputIterator>
-OutputIterator copy_n(InputIterator first, Size n, OutputIterator out) {
-  while (n > 0) {
-    *out = *first;
-    ++out;
-    ++first;
-    --n;
-  }
-  return out;
-}
-
-// Returns true if pred is true for every element in [first, last). Complexity:
-// at most last-first applications of pred.
-template <typename InputIterator, typename Predicate>
-bool all(InputIterator first, InputIterator last, Predicate pred) {
-  for (; first != last; ++first) {
-    if (!pred(*first))
-      return false;
-  }
-  return true;
-}
-
-// Returns true if pred is false for every element in [first,
-// last). Complexity: at most last-first applications of pred.
-template <typename InputIterator, typename Predicate>
-bool none(InputIterator first, InputIterator last, Predicate pred) {
-  return find_if(first, last, pred) == last;
-}
-
-// Returns true if pred is true for at least one element in [first, last).
-// Complexity: at most last-first applications of pred.
-template <typename InputIterator, typename Predicate>
-bool any(InputIterator first, InputIterator last, Predicate pred) {
-  return !none(first, last, pred);
-}
-
-// Returns a pair of iterators p such that p.first points to the
-// minimum element in the range and p.second points to the maximum,
-// ordered by comp. Complexity: at most floor((3/2) (N-1))
-// comparisons. Postcondition: If the return value is <min, max>, min is
-// the first iterator i in [first, last) such that comp(*j, *i) is
-// false for every other iterator j, and max is the last iterator i
-// such that comp(*i, *j) is false for every other iterator j. Or less
-// formally, min is the first minimum and max is the last maximum.
-template <typename ForwardIter, typename Compare>
-std::pair<ForwardIter, ForwardIter> minmax_element(ForwardIter first,
-                                                   const ForwardIter last,
-                                                   Compare comp) {
-  // Initialization: for N<2, set min=max=first. For N >= 2, set min
-  // to be the smaller of the first two and max to be the larger. (Taking
-  // care that min is the first, and max the second, if the two compare
-  // equivalent.)
-  ForwardIter min(first);
-  ForwardIter max(first);
-
-  if (first != last) {
-    ++first;
-  }
-
-  if (first != last) {
-    max = first;
-    if (comp(*max, *min))
-      swap(min, max);
-    ++first;
-  }
-
-  while (first != last) {
-    ForwardIter next(first);
-    ++next;
-
-    if (next != last) {
-      // We have two elements to look at that we haven't seen already,
-      // *first and *next. Compare the smaller of them with the
-      // current min, and the larger against the current max. The
-      // subtle point: write all of the comparisons so that, if the
-      // two things being compared are equivalent, we take the first
-      // one as the smaller and the last as the larger.
-      if (comp(*next, *first)) {
-        if (comp(*next, *min))
-          min = next;
-        if (!comp(*first, *max))
-          max = first;
-      } else {
-        if (comp(*first, *min))
-          min = first;
-        if (!comp(*next, *max))
-          max = next;
-      }
-    } else {
-      // There is only one element left that we haven't seen already, *first.
-      // Adjust min or max, as appropriate, and exit the loop.
-      if (comp(*first, *min)) {
-        min = first;
-      } else if (!comp(*first, *max)) {
-        max = first;
-      }
-      break;
-    }
-
-    first = next;
-    ++first;
-  }
-
-  return make_pair(min, max);
-}
-
-// Returns a pair of iterators p such that p.first points to the first
-// minimum element in the range and p.second points to the last
-// maximum, ordered by operator<.
-template <typename ForwardIter>
-inline std::pair<ForwardIter, ForwardIter> minmax_element(ForwardIter first,
-                                                          ForwardIter last) {
-  typedef typename std::iterator_traits<ForwardIter>::value_type value_type;
-  return util::gtl::minmax_element(first, last, std::less<value_type>());
-}
-
-// Returns true if [first, last) is sorted in nondescending order by
-// pred.  Complexity: for nonempty ranges, at most last-first - 1
-// applications of comp.
-template <typename ForwardIterator, typename Compare>
-bool is_sorted(ForwardIterator first, ForwardIterator last, Compare comp) {
-  if (first != last) {
-    ForwardIterator next(first);
-    ++next;
-    while (next != last) {
-      if (comp(*next, *first))
-        return false;
-      first = next;
-      ++next;
-    }
-  }
-  return true;
-}
-
-// Returns true if [first, last) is sorted in nondescending order by
-// operator<.  Complexity: for nonempty ranges, exactly last-first - 1
-// applications of operator<.
-template <typename ForwardIterator>
-inline bool is_sorted(ForwardIterator first, ForwardIterator last) {
-  typedef typename std::iterator_traits<ForwardIterator>::value_type value_type;
-  return util::gtl::is_sorted(first, last, std::less<value_type>());
-}
-
-// Returns true if [first, last) is partitioned by pred, i.e. if all
-// elements that satisfy pred appear before those that do
-// not. Complexity: linear.
-template <typename ForwardIterator, typename Predicate>
-inline bool is_partitioned(ForwardIterator first, ForwardIterator last,
-                           Predicate pred) {
-  for (; first != last; ++first) {
-    if (!pred(*first)) {
-      ++first;
-      return util::gtl::none(first, last, pred);
-    }
-  }
-  return true;
-}
-
-// Precondition: is_partitioned(first, last, pred). Returns: the
-// partition point p, i.e. an iterator mid satisfying the conditions
-// all(first, mid, pred) and none(mid, last, pred).  Complexity:
-// O(log(last-first)) applications of pred.
-template <typename ForwardIterator, typename Predicate>
-ForwardIterator partition_point(ForwardIterator first, ForwardIterator last,
-                                Predicate pred) {
-  typedef typename std::iterator_traits<ForwardIterator>::difference_type diff;
-  diff n = distance(first, last);
-
-  // Loop invariant: n == distance(first, last)
-  while (first != last) {
-    diff half = n/2;
-    ForwardIterator mid = first;
-    advance(mid, half);
-    if (pred(*mid)) {
-      first = mid;
-      ++first;
-      n -= half+1;
-    } else {
-      n = half;
-      last = mid;
-    }
-  }
-
-  return first;
-}
-
-// Copies all elements that satisfy pred to out_true and all elements
-// that don't satisfy it to out_false. Returns: a pair p such that
-// p.first is the end of the range beginning at out_t and p.second is
-// the end of the range beginning at out_f. Complexity: exactly
-// last-first applications of pred.
-template <typename InputIterator,
-          typename OutputIterator1, typename OutputIterator2,
-          typename Predicate>
-std::pair<OutputIterator1, OutputIterator2>
-partition_copy(InputIterator first, InputIterator last,
-               OutputIterator1 out_true, OutputIterator2 out_false,
-               Predicate pred) {
-  for (; first != last; ++first) {
-    if (pred(*first))
-      *out_true++ = *first;
-    else
-      *out_false++ = *first;
-  }
-  return make_pair(out_true, out_false);
-}
-
-// Reorders elements in [first, last), so that for each consecutive group
-// of duplicate elements (according to eq predicate) the first one is left and
-// others are moved at the end of the range. Returns: iterator middle such that
-// [first, middle) contains no two consecutive elements that are duplicates and
-// [middle, last) contains elements removed from all groups. It's stable for
-// range [first, middle) meaning the order of elements are the same as order of
-// their corresponding groups in input, but the order in range [middle, last)
-// is not preserved. Function is similar to std::unique, but ensures that
-// removed elements are properly copied and accessible at the range's end.
-// Complexity: exactly last-first-1 applications of eq; at most middle-first-1
-// swap operations.
-template <typename ForwardIterator, typename Equals>
-ForwardIterator unique_partition(ForwardIterator first, ForwardIterator last,
-                                 Equals eq) {
-  first = adjacent_find(first, last, eq);
-  if (first == last)
-    return last;
-
-  // Points to right-most element within range of unique elements being built.
-  ForwardIterator result = first;
-
-  // 'first' iterator goes through the sequence starting from element after
-  // first equal elements pair (found by adjacent_find above).
-  ++first;
-  while (++first != last) {
-    // If we encounter an element that isn't equal to right-most element in
-    // result, then extend the range and swap this element into it.
-    // Otherwise just continue incrementing 'first'.
-    if (!eq(*result, *first)) {
-      swap(*++result, *first);
-    }
-  }
-  // Return past-the-end upper-bound of the resulting range.
-  return ++result;
-}
-
-// Reorders elements in [first, last) range moving duplicates for each
-// consecutive group of elements to the end. Equality is checked using ==.
-template <typename ForwardIterator>
-inline ForwardIterator unique_partition(ForwardIterator first,
-                                        ForwardIterator last) {
-  typedef typename std::iterator_traits<ForwardIterator>::value_type T;
-  return unique_partition(first, last, std::equal_to<T>());
-}
-
-// Samples k elements from the next n.
-// Elements have the same order in the output stream as they did on input.
-//
-// This is Algorithm S from section 3.4.2 of Knuth, TAOCP, 2nd edition.
-// My k corresponds to Knuth n-m.
-// My n corrsponds to Knuth N-t.
-//
-// RngFunctor is any functor that can be called as:
-//   size_t RngFunctor(size_t x)
-// The return value is an integral value in the half-open range [0, x)
-// such that all values are equally likely.  Behavior is unspecified if x==0.
-// (This function never calls RngFunctor with x==0).
-
-template <typename InputIterator, typename OutputIterator, typename RngFunctor>
-inline void sample_k_of_n(InputIterator in, size_t k, size_t n,
-                          RngFunctor& rng, OutputIterator out) {
-  if (k > n) {
-    k = n;
-  }
-  while (k > 0) {
-    if (rng(n) < k) {
-      *out++ = *in;
-      k--;
-    }
-    ++in;
-    --n;
-  }
-}
-
-// Finds the longest prefix of a range that is a binary max heap with
-// respect to a given StrictWeakOrdering.  If first == last, returns last.
-// Otherwise, return an iterator it such that [first,it) is a heap but
-// no longer prefix is -- in other words, first + i for the lowest i
-// such that comp(first[(i-1)/2], first[i]) returns true.
-template <typename RandomAccessIterator, typename StrictWeakOrdering>
-RandomAccessIterator gtl_is_binary_heap_until(RandomAccessIterator first,
-                                              RandomAccessIterator last,
-                                              StrictWeakOrdering comp) {
-  if (last - first < 2) return last;
-  RandomAccessIterator parent = first;
-  bool is_right_child = false;
-  for (RandomAccessIterator child = first + 1; child != last; ++child) {
-    if (comp(*parent, *child)) return child;
-    if (is_right_child) ++parent;
-    is_right_child = !is_right_child;
-  }
-  return last;
-}
-
-// Special case of gtl_is_binary_heap_until where the order is std::less,
-// i.e., where we're working with a simple max heap.
-template <typename RandomAccessIterator>
-RandomAccessIterator gtl_is_binary_heap_until(RandomAccessIterator first,
-                                              RandomAccessIterator last) {
-  typedef typename std::iterator_traits<RandomAccessIterator>::value_type T;
-  return gtl_is_binary_heap_until(first, last, std::less<T>());
-}
-
-// Checks whether a range of values is a binary heap, i.e., checks that
-// no node is less (as defined by a StrictWeakOrdering) than a child.
-template <typename RandomAccessIterator, typename StrictWeakOrdering>
-bool gtl_is_binary_heap(RandomAccessIterator begin,
-                        RandomAccessIterator end,
-                        StrictWeakOrdering comp) {
-  return gtl_is_binary_heap_until(begin, end, comp) == end;
-}
-
-// Special case of gtl_is_binary_heap where the order is std::less (i.e.,
-// where we're working on a simple max heap).
-template <typename RandomAccessIterator>
-bool gtl_is_binary_heap(RandomAccessIterator begin,
-                        RandomAccessIterator end) {
-  return gtl_is_binary_heap_until(begin, end) == end;
-}
-
-// Unqualified calls to is_heap are ambiguous with some build types,
-// namespace that can clash with names that C++11 added to ::std.
-// By calling util::gtl::is_heap, clients can avoid those errors,
-// and by using the underlying is_heap call we ensure consistency
-// with the standard library's heap implementation just in case a
-// standard library ever uses anything other than a binary heap.
-#if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus > 199711L \
-  || defined(LIBCXX) || _MSC_VER >= 1600 /* Visual Studio 2010 */
-using std::is_heap;
-#elif defined __GNUC__
-/* using __gnu_cxx::is_heap; */
-#elif defined _MSC_VER
-// For old versions of MSVC++, we know by inspection that make_heap()
-// traffics in binary max heaps, so gtl_is_binary_heap is an acceptable
-// implementation for is_heap.
-template <typename RandomAccessIterator>
-bool is_heap(RandomAccessIterator begin, RandomAccessIterator end) {
-  return gtl_is_binary_heap(begin, end);
-}
-
-template <typename RandomAccessIterator, typename StrictWeakOrdering>
-bool is_heap(RandomAccessIterator begin,
-             RandomAccessIterator end,
-             StrictWeakOrdering comp) {
-  return gtl_is_binary_heap(begin, end, comp);
-}
-#else
-// We need an implementation of is_heap that matches the library's
-// implementation of make_heap() and friends.  gtl_is_binary_heap will
-// *probably* work, but let's be safe and not make that assumption.
-#error No implementation of is_heap defined for this toolchain.
-#endif
-
-}  // namespace gtl
-}  // namespace util
-
-#endif  // UTIL_GTL_ALGORITHM_H_

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/02f3e3fc/be/src/gutil/atomic_refcount.h
----------------------------------------------------------------------
diff --git a/be/src/gutil/atomic_refcount.h b/be/src/gutil/atomic_refcount.h
index 9560a03..9c80921 100644
--- a/be/src/gutil/atomic_refcount.h
+++ b/be/src/gutil/atomic_refcount.h
@@ -37,11 +37,12 @@
 //
 // If you need to do something very different from this, use a Mutex.
 
-#include "gutil/atomicops.h"
-#include "gutil/integral_types.h"
 #include <glog/logging.h>
-#include "gutil/logging-inl.h"
-#include "gutil/dynamic_annotations.h"
+
+#include "kudu/gutil/atomicops.h"
+#include "kudu/gutil/integral_types.h"
+#include "kudu/gutil/logging-inl.h"
+#include "kudu/gutil/dynamic_annotations.h"
 
 namespace base {
 
@@ -100,7 +101,16 @@ inline bool RefCountIsOne(const volatile Atomic32 *ptr) {
   return res;
 }
 
-
+// Return whether the reference count is zero.  With conventional object
+// referencing counting, the object will be destroyed, so the reference count
+// should never be zero.  Hence this is generally used for a debug check.
+inline bool RefCountIsZero(const volatile Atomic32 *ptr) {
+  bool res = (subtle::Acquire_Load(ptr) == 0);
+  if (res) {
+    ANNOTATE_HAPPENS_AFTER(ptr);
+  }
+  return res;
+}
 
 #if BASE_HAS_ATOMIC64
 // Implementations for Atomic64, if available.
@@ -132,6 +142,13 @@ inline bool RefCountIsOne(const volatile base::subtle::Atomic64 *ptr) {
   }
   return res;
 }
+inline bool RefCountIsZero(const volatile base::subtle::Atomic64 *ptr) {
+  bool res = (base::subtle::Acquire_Load(ptr) == 0);
+  if (res) {
+    ANNOTATE_HAPPENS_AFTER(ptr);
+  }
+  return res;
+}
 #endif
 
 #ifdef AtomicWordCastType
@@ -163,6 +180,14 @@ inline bool RefCountIsOne(const volatile AtomicWord *ptr) {
   }
   return res;
 }
+inline bool RefCountIsZero(const volatile AtomicWord *ptr) {
+  bool res = base::subtle::Acquire_Load(
+      reinterpret_cast<const volatile AtomicWordCastType *>(ptr)) == 0;
+  if (res) {
+    ANNOTATE_HAPPENS_AFTER(ptr);
+  }
+  return res;
+}
 #endif
 
 } // namespace base

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/02f3e3fc/be/src/gutil/atomicops-internals-powerpc.h
----------------------------------------------------------------------
diff --git a/be/src/gutil/atomicops-internals-powerpc.h b/be/src/gutil/atomicops-internals-powerpc.h
index 98ca0a6..0e56475 100644
--- a/be/src/gutil/atomicops-internals-powerpc.h
+++ b/be/src/gutil/atomicops-internals-powerpc.h
@@ -1,16 +1,21 @@
 // Copyright 2012 Google Inc.
 //
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
 //
-//     http://www.apache.org/licenses/LICENSE-2.0
+//   http://www.apache.org/licenses/LICENSE-2.0
 //
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
 //
 // All Rights Reserved.
 //
@@ -23,8 +28,8 @@
 // This is not tested and may contain bugs.  Until we have bootstrapped
 // this.
 
-#ifndef SUPERSONIC_OPENSOURCE_AUXILIARY_ATOMICOPS_INTERNALS_POWERPC_H_
-#define SUPERSONIC_OPENSOURCE_AUXILIARY_ATOMICOPS_INTERNALS_POWERPC_H_
+#ifndef GUTIL_ATOMICOPS_INTERNALS_POWERPC_H_
+#define GUTIL_ATOMICOPS_INTERNALS_POWERPC_H_
 
 typedef int32_t Atomic32;
 #define BASE_HAS_ATOMIC64 1  // Use only in tests and base/atomic*
@@ -296,4 +301,4 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
 
 #undef ATOMICOPS_COMPILER_BARRIER
 
-#endif  // SUPERSONIC_OPENSOURCE_AUXILIARY_ATOMICOPS_INTERNALS_POWERPC_H_
+#endif  // GUTIL_ATOMICOPS_INTERNALS_POWERPC_H_

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/02f3e3fc/be/src/gutil/atomicops-internals-tsan.h
----------------------------------------------------------------------
diff --git a/be/src/gutil/atomicops-internals-tsan.h b/be/src/gutil/atomicops-internals-tsan.h
new file mode 100644
index 0000000..aecaefc
--- /dev/null
+++ b/be/src/gutil/atomicops-internals-tsan.h
@@ -0,0 +1,217 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation for compiler-based
+// ThreadSanitizer. Use base/atomicops.h instead.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_TSAN_H_
+#define BASE_ATOMICOPS_INTERNALS_TSAN_H_
+
+// Workaround for Chromium BASE_EXPORT definition
+#ifndef BASE_EXPORT
+#define BASE_EXPORT
+#endif
+
+// This struct is not part of the public API of this module; clients may not
+// use it.  (However, it's exported via BASE_EXPORT because clients implicitly
+// do use it at link time by inlining these functions.)
+// Features of this x86.  Values may not be correct before main() is run,
+// but are set conservatively.
+struct AtomicOps_x86CPUFeatureStruct {
+  bool has_amd_lock_mb_bug;  // Processor has AMD memory-barrier bug; do lfence
+                             // after acquire compare-and-swap.
+  bool has_sse2;             // Processor has SSE2.
+};
+BASE_EXPORT extern struct AtomicOps_x86CPUFeatureStruct
+    AtomicOps_Internalx86CPUFeatures;
+
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
+
+#include <sanitizer/tsan_interface_atomic.h>
+
+typedef int32_t Atomic32;
+typedef int64_t Atomic64;
+
+namespace base {
+namespace subtle {
+
+typedef int32_t Atomic32;
+typedef int64_t Atomic64;
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
+                                  Atomic32 old_value,
+                                  Atomic32 new_value) {
+  Atomic32 cmp = old_value;
+  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
+  return cmp;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
+                                  Atomic32 new_value) {
+  return __tsan_atomic32_exchange(ptr, new_value,
+      __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
+                                Atomic32 new_value) {
+  return __tsan_atomic32_exchange(ptr, new_value,
+      __tsan_memory_order_acquire);
+}
+
+inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
+                                Atomic32 new_value) {
+  return __tsan_atomic32_exchange(ptr, new_value,
+      __tsan_memory_order_release);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
+                                   Atomic32 increment) {
+  return increment + __tsan_atomic32_fetch_add(ptr, increment,
+      __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
+                                 Atomic32 increment) {
+  return increment + __tsan_atomic32_fetch_add(ptr, increment,
+      __tsan_memory_order_acq_rel);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
+                                Atomic32 old_value,
+                                Atomic32 new_value) {
+  Atomic32 cmp = old_value;
+  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
+  return cmp;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
+                                Atomic32 old_value,
+                                Atomic32 new_value) {
+  Atomic32 cmp = old_value;
+  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_release, __tsan_memory_order_relaxed);
+  return cmp;
+}
+
+inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
+  __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
+  __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
+  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
+  __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
+  return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
+  return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
+  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+  return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
+                                      Atomic64 old_value,
+                                      Atomic64 new_value) {
+  Atomic64 cmp = old_value;
+  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
+  return cmp;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
+                                      Atomic64 new_value) {
+  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
+                                    Atomic64 new_value) {
+  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
+}
+
+inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
+                                    Atomic64 new_value) {
+  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
+                                       Atomic64 increment) {
+  return increment + __tsan_atomic64_fetch_add(ptr, increment,
+      __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
+                                     Atomic64 increment) {
+  return increment + __tsan_atomic64_fetch_add(ptr, increment,
+      __tsan_memory_order_acq_rel);
+}
+
+inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
+  __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
+  __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
+  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
+  __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
+  return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
+  return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
+  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+  return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
+                                    Atomic64 old_value,
+                                    Atomic64 new_value) {
+  Atomic64 cmp = old_value;
+  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
+  return cmp;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
+                                    Atomic64 old_value,
+                                    Atomic64 new_value) {
+  Atomic64 cmp = old_value;
+  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_release, __tsan_memory_order_relaxed);
+  return cmp;
+}
+
+inline void MemoryBarrier() {
+  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+inline void PauseCPU() {
+}
+
+}  // namespace base::subtle
+}  // namespace base
+
+#undef ATOMICOPS_COMPILER_BARRIER
+
+#endif  // BASE_ATOMICOPS_INTERNALS_TSAN_H_

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/02f3e3fc/be/src/gutil/atomicops-internals-x86.cc
----------------------------------------------------------------------
diff --git a/be/src/gutil/atomicops-internals-x86.cc b/be/src/gutil/atomicops-internals-x86.cc
index 7637b46..5d4529e 100644
--- a/be/src/gutil/atomicops-internals-x86.cc
+++ b/be/src/gutil/atomicops-internals-x86.cc
@@ -1,16 +1,21 @@
 // Copyright 2007 Google, Inc.
 //
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
 //
-//     http://www.apache.org/licenses/LICENSE-2.0
+//   http://www.apache.org/licenses/LICENSE-2.0
 //
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
 //
 // All rights reserved.
 
@@ -18,13 +23,13 @@
 // This module gets enough CPU information to optimize the
 // atomicops module on x86.
 
-#include "gutil/atomicops-internals-x86.h"
+#include "kudu/gutil/atomicops-internals-x86.h"
 
 #include <string.h>
 
 #include <glog/logging.h>
-#include "gutil/logging-inl.h"
-#include "gutil/integral_types.h"
+#include "kudu/gutil/logging-inl.h"
+#include "kudu/gutil/integral_types.h"
 
 // This file only makes sense with atomicops-internals-x86.h -- it
 // depends on structs that are defined in that file.  If atomicops.h
@@ -55,8 +60,9 @@
 // Set the flags so that code will run correctly and conservatively
 // until InitGoogle() is called.
 struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
+  false,          // bug can't exist before process spawns multiple threads
   false,          // no SSE2
-  false           // no cmpxchg16b
+  false,          // no cmpxchg16b
 };
 
 // Initialize the AtomicOps_Internalx86CPUFeatures struct.
@@ -84,6 +90,19 @@ static void AtomicOps_Internalx86CPUFeaturesInit() {
     model += ((eax >> 16) & 0xf) << 4;
   }
 
+  // Opteron Rev E has a bug in which on very rare occasions a locked
+  // instruction doesn't act as a read-acquire barrier if followed by a
+  // non-locked read-modify-write instruction.  Rev F has this bug in
+  // pre-release versions, but not in versions released to customers,
+  // so we test only for Rev E, which is family 15, model 32..63 inclusive.
+  if (strcmp(vendor, "AuthenticAMD") == 0 &&       // AMD
+      family == 15 &&
+      32 <= model && model <= 63) {
+    AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true;
+  } else {
+    AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
+  }
+
   // edx bit 26 is SSE2 which we use to tell use whether we can use mfence
   AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
 
@@ -93,6 +112,8 @@ static void AtomicOps_Internalx86CPUFeaturesInit() {
   VLOG(1) << "vendor " << vendor <<
              "  family " << family <<
              "  model " << model <<
+             "  amd_lock_mb_bug " <<
+                   AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug <<
              "  sse2 " << AtomicOps_Internalx86CPUFeatures.has_sse2 <<
              "  cmpxchg16b " << AtomicOps_Internalx86CPUFeatures.has_cmpxchg16b;
 }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/02f3e3fc/be/src/gutil/atomicops-internals-x86.h
----------------------------------------------------------------------
diff --git a/be/src/gutil/atomicops-internals-x86.h b/be/src/gutil/atomicops-internals-x86.h
index f9ae60a..acbd2e3 100644
--- a/be/src/gutil/atomicops-internals-x86.h
+++ b/be/src/gutil/atomicops-internals-x86.h
@@ -1,16 +1,21 @@
 // Copyright 2003 Google Inc.
 //
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
 //
-//     http://www.apache.org/licenses/LICENSE-2.0
+//   http://www.apache.org/licenses/LICENSE-2.0
 //
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
 //
 // All Rights Reserved.
 //
@@ -24,7 +29,7 @@
 
 #include <stdint.h>
 
-#include "common/logging.h"
+#include <glog/logging.h>
 
 #define BASE_HAS_ATOMIC64 1  // Use only in tests and base/atomic*
 
@@ -38,6 +43,8 @@
 // Features of this x86.  Values may not be correct before InitGoogle() is run,
 // but are set conservatively.
 struct AtomicOps_x86CPUFeatureStruct {
+  bool has_amd_lock_mb_bug;  // Processor has AMD memory-barrier bug; do lfence
+                             // after acquire compare-and-swap.
   bool has_sse2;             // Processor has SSE2.
   bool has_cmpxchg16b;       // Processor supports cmpxchg16b instruction.
 };
@@ -95,6 +102,9 @@ inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
                                        Atomic32 new_value) {
   CheckNaturalAlignment(ptr);
   Atomic32 old_val = NoBarrier_AtomicExchange(ptr, new_value);
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
   return old_val;
 }
 
@@ -122,15 +132,20 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
                        : "+r" (temp), "+m" (*ptr)
                        : : "memory");
   // temp now holds the old value of *ptr
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
   return temp + increment;
 }
 
-// On x86, the NoBarrier_CompareAndSwap() uses a locked instruction and so also
-// provides both acquire and release barriers.
 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return x;
 }
 
 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
@@ -139,12 +154,6 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
 }
 
-inline Atomic32 Barrier_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
   CheckNaturalAlignment(ptr);
   *ptr = value;
@@ -248,6 +257,9 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
 inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
                                        Atomic64 new_value) {
   Atomic64 old_val = NoBarrier_AtomicExchange(ptr, new_value);
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
   return old_val;
 }
 
@@ -275,6 +287,9 @@ inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
                        : "+r" (temp), "+m" (*ptr)
                        : : "memory");
   // temp now contains the previous value of *ptr
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
   return temp + increment;
 }
 
@@ -388,6 +403,9 @@ inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
                                        Atomic64 new_val) {
   CheckNaturalAlignment(ptr);
   Atomic64 old_val = NoBarrier_AtomicExchange(ptr, new_val);
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
   return old_val;
 }
 
@@ -413,6 +431,9 @@ inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
                                         Atomic64 increment) {
   CheckNaturalAlignment(ptr);
   Atomic64 new_val = NoBarrier_AtomicIncrement(ptr, increment);
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
   return new_val;
 }
 
@@ -471,7 +492,11 @@ inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
                                        Atomic64 old_value,
                                        Atomic64 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return x;
 }
 
 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
@@ -480,12 +505,6 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
 }
 
-inline Atomic64 Barrier_CompareAndSwap(volatile Atomic64* ptr,
-                                       Atomic64 old_value,
-                                       Atomic64 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
 }  // namespace subtle
 }  // namespace base
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/02f3e3fc/be/src/gutil/atomicops.h
----------------------------------------------------------------------
diff --git a/be/src/gutil/atomicops.h b/be/src/gutil/atomicops.h
index ad8d773..fa4c44c 100644
--- a/be/src/gutil/atomicops.h
+++ b/be/src/gutil/atomicops.h
@@ -67,23 +67,23 @@
 // #endif
 // ------------------------------------------------------------------------
 
-#include "gutil/arm_instruction_set_select.h"
+#include "kudu/gutil/arm_instruction_set_select.h"
 
 // ThreadSanitizer provides own implementation of atomicops.
 #if defined(THREAD_SANITIZER)
-#include "gutil/atomicops-internals-tsan.h"
+#include "kudu/gutil/atomicops-internals-tsan.h"
 #elif defined(__APPLE__)
-#include "gutil/atomicops-internals-macosx.h"
+#include "kudu/gutil/atomicops-internals-macosx.h"
 #elif defined(__GNUC__) && defined(ARMV6)
-#include "gutil/atomicops-internals-arm-v6plus.h"
+#include "kudu/gutil/atomicops-internals-arm-v6plus.h"
 #elif defined(ARMV3)
-#include "gutil/atomicops-internals-arm-generic.h"
+#include "kudu/gutil/atomicops-internals-arm-generic.h"
 #elif defined(__GNUC__) && (defined(__i386) || defined(__x86_64__))
-#include "gutil/atomicops-internals-x86.h"
+#include "kudu/gutil/atomicops-internals-x86.h"
 #elif defined(__GNUC__) && defined(ARCH_POWERPC64)
-#include "gutil/atomicops-internals-powerpc.h"
+#include "kudu/gutil/atomicops-internals-powerpc.h"
 #elif defined(OS_WINDOWS)
-#include "gutil/atomicops-internals-windows.h"
+#include "kudu/gutil/atomicops-internals-windows.h"
 #else
 #error You need to implement atomic operations for this architecture
 #endif

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/02f3e3fc/be/src/gutil/auxiliary/atomicops-internals-arm-generic.h
----------------------------------------------------------------------
diff --git a/be/src/gutil/auxiliary/atomicops-internals-arm-generic.h b/be/src/gutil/auxiliary/atomicops-internals-arm-generic.h
index 0a73b85..417c6a0 100644
--- a/be/src/gutil/auxiliary/atomicops-internals-arm-generic.h
+++ b/be/src/gutil/auxiliary/atomicops-internals-arm-generic.h
@@ -11,8 +11,8 @@
 
 #include <stdio.h>
 #include <stdlib.h>
-#include "gutil/macros.h"  // For COMPILE_ASSERT
-#include "gutil/port.h"  // ATTRIBUTE_WEAK
+#include "kudu/gutil/macros.h"  // For COMPILE_ASSERT
+#include "kudu/gutil/port.h"  // ATTRIBUTE_WEAK
 
 typedef int32_t Atomic32;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/02f3e3fc/be/src/gutil/auxiliary/atomicops-internals-arm-v6plus.h
----------------------------------------------------------------------
diff --git a/be/src/gutil/auxiliary/atomicops-internals-arm-v6plus.h b/be/src/gutil/auxiliary/atomicops-internals-arm-v6plus.h
index 631f0f0..edafc4e 100644
--- a/be/src/gutil/auxiliary/atomicops-internals-arm-v6plus.h
+++ b/be/src/gutil/auxiliary/atomicops-internals-arm-v6plus.h
@@ -12,7 +12,7 @@
 
 #include <stdio.h>
 #include <stdlib.h>
-#include "gutil/basictypes.h"  // For COMPILE_ASSERT
+#include "kudu/gutil/basictypes.h"  // For COMPILE_ASSERT
 
 // The LDREXD and STREXD instructions in ARM all v7 variants or above.  In v6,
 // only some variants support it.  For simplicity, we only use exclusive

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/02f3e3fc/be/src/gutil/auxiliary/atomicops-internals-macosx.h
----------------------------------------------------------------------
diff --git a/be/src/gutil/auxiliary/atomicops-internals-macosx.h b/be/src/gutil/auxiliary/atomicops-internals-macosx.h
deleted file mode 100644
index 7a63bd5..0000000
--- a/be/src/gutil/auxiliary/atomicops-internals-macosx.h
+++ /dev/null
@@ -1,397 +0,0 @@
-// Copyright 2006 Google Inc.
-// All Rights Reserved.
-//
-//
-// Implementation of atomic operations for Mac OS X.  This file should not
-// be included directly.  Clients should instead include
-// "base/atomicops.h".
-
-#ifndef BASE_AUXILIARY_ATOMICOPS_INTERNALS_MACOSX_H_
-#define BASE_AUXILIARY_ATOMICOPS_INTERNALS_MACOSX_H_
-
-typedef int32_t Atomic32;
-
-// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
-// on the Mac, even when they are the same size.  Similarly, on __ppc64__,
-// AtomicWord and Atomic64 are always different.  Thus, we need explicit
-// casting.
-#ifdef __LP64__
-#define AtomicWordCastType base::subtle::Atomic64
-#else
-#define AtomicWordCastType Atomic32
-#endif
-
-#if defined(__LP64__) || defined(__i386__)
-#define BASE_HAS_ATOMIC64 1  // Use only in tests and base/atomic*
-#endif
-
-#include <libkern/OSAtomic.h>
-
-#if !defined(__LP64__) && defined(__ppc__)
-
-// The Mac 64-bit OSAtomic implementations are not available for 32-bit PowerPC,
-// while the underlying assembly instructions are available only some
-// implementations of PowerPC.
-
-// The following inline functions will fail with the error message at compile
-// time ONLY IF they are called.  So it is safe to use this header if user
-// code only calls AtomicWord and Atomic32 operations.
-//
-// NOTE(user): Implementation notes to implement the atomic ops below may
-// be found in "PowerPC Virtual Environment Architecture, Book II,
-// Version 2.02", January 28, 2005, Appendix B, page 46.  Unfortunately,
-// extra care must be taken to ensure data are properly 8-byte aligned, and
-// that data are returned correctly according to Mac OS X ABI specs.
-
-inline int64_t OSAtomicCompareAndSwap64(
-    int64_t oldValue, int64_t newValue, int64_t *theValue) {
-  __asm__ __volatile__(
-      "_OSAtomicCompareAndSwap64_not_supported_for_32_bit_ppc\n\t");
-  return 0;
-}
-
-inline int64_t OSAtomicAdd64(int64_t theAmount, int64_t *theValue) {
-  __asm__ __volatile__(
-      "_OSAtomicAdd64_not_supported_for_32_bit_ppc\n\t");
-  return 0;
-}
-
-inline int64_t OSAtomicCompareAndSwap64Barrier(
-    int64_t oldValue, int64_t newValue, int64_t *theValue) {
-  int64_t prev = OSAtomicCompareAndSwap64(oldValue, newValue, theValue);
-  OSMemoryBarrier();
-  return prev;
-}
-
-inline int64_t OSAtomicAdd64Barrier(
-    int64_t theAmount, int64_t *theValue) {
-  int64_t new_val = OSAtomicAdd64(theAmount, theValue);
-  OSMemoryBarrier();
-  return new_val;
-}
-#endif
-
-
-namespace base {
-namespace subtle {
-
-typedef int64_t Atomic64;
-
-inline void MemoryBarrier() {
-  OSMemoryBarrier();
-}
-
-// 32-bit Versions.
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
-                                         Atomic32 old_value,
-                                         Atomic32 new_value) {
-  Atomic32 prev_value;
-  do {
-    if (OSAtomicCompareAndSwap32(old_value, new_value,
-                                 const_cast<Atomic32*>(ptr))) {
-      return old_value;
-    }
-    prev_value = *ptr;
-  } while (prev_value == old_value);
-  return prev_value;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
-                                         Atomic32 new_value) {
-  Atomic32 old_value;
-  do {
-    old_value = *ptr;
-  } while (!OSAtomicCompareAndSwap32(old_value, new_value,
-                                     const_cast<Atomic32*>(ptr)));
-  return old_value;
-}
-
-inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
-                                       Atomic32 new_value) {
-  Atomic32 old_value;
-  do {
-    old_value = *ptr;
-  } while (!OSAtomicCompareAndSwap32Barrier(old_value, new_value,
-                                            const_cast<Atomic32*>(ptr)));
-  return old_value;
-}
-
-inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
-                                       Atomic32 new_value) {
-  return Acquire_AtomicExchange(ptr, new_value);
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
-                                          Atomic32 increment) {
-  return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
-                                          Atomic32 increment) {
-  return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  Atomic32 prev_value;
-  do {
-    if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
-                                        const_cast<Atomic32*>(ptr))) {
-      return old_value;
-    }
-    prev_value = *ptr;
-  } while (prev_value == old_value);
-  return prev_value;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  return Acquire_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
-  MemoryBarrier();
-  *ptr = value;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
-  return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
-  Atomic32 value = *ptr;
-  MemoryBarrier();
-  return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
-// 64-bit version
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
-                                         Atomic64 old_value,
-                                         Atomic64 new_value) {
-  Atomic64 prev_value;
-  do {
-    if (OSAtomicCompareAndSwap64(old_value, new_value,
-                                 const_cast<Atomic64*>(ptr))) {
-      return old_value;
-    }
-    prev_value = *ptr;
-  } while (prev_value == old_value);
-  return prev_value;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
-                                         Atomic64 new_value) {
-  Atomic64 old_value;
-  do {
-    old_value = *ptr;
-  } while (!OSAtomicCompareAndSwap64(old_value, new_value,
-                                     const_cast<Atomic64*>(ptr)));
-  return old_value;
-}
-
-inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
-                                       Atomic64 new_value) {
-  Atomic64 old_value;
-  do {
-    old_value = *ptr;
-  } while (!OSAtomicCompareAndSwap64Barrier(old_value, new_value,
-                                            const_cast<Atomic64*>(ptr)));
-  return old_value;
-}
-
-inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
-                                       Atomic64 new_value) {
-  return Acquire_AtomicExchange(ptr, new_value);
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
-                                          Atomic64 increment) {
-  return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
-                                        Atomic64 increment) {
-  return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
-                                       Atomic64 old_value,
-                                       Atomic64 new_value) {
-  Atomic64 prev_value;
-  do {
-    if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
-                                        const_cast<Atomic64*>(ptr))) {
-      return old_value;
-    }
-    prev_value = *ptr;
-  } while (prev_value == old_value);
-  return prev_value;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
-                                       Atomic64 old_value,
-                                       Atomic64 new_value) {
-  // The lib kern interface does not distinguish between
-  // Acquire and Release memory barriers; they are equivalent.
-  return Acquire_CompareAndSwap(ptr, old_value, new_value);
-}
-
-#ifdef __LP64__
-
-// 64-bit implementation on 64-bit platform
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
-  *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
-  MemoryBarrier();
-  *ptr = value;
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
-  return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
-  Atomic64 value = *ptr;
-  MemoryBarrier();
-  return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
-#else
-
-// 64-bit implementation on 32-bit platform
-
-#if defined(__ppc__)
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
-   __asm__ __volatile__(
-       "_NoBarrier_Store_not_supported_for_32_bit_ppc\n\t");
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
-   __asm__ __volatile__(
-       "_NoBarrier_Load_not_supported_for_32_bit_ppc\n\t");
-   return 0;
-}
-
-#elif defined(__i386__)
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
-  __asm__ __volatile__("movq %1, %%mm0\n\t"    // Use mmx reg for 64-bit atomic
-                       "movq %%mm0, %0\n\t"  // moves (ptr could be read-only)
-                       "emms\n\t"              // Reset FP registers
-                       : "=m" (*ptr)
-                       : "m" (value)
-                       : // mark the FP stack and mmx registers as clobbered
-                         "st", "st(1)", "st(2)", "st(3)", "st(4)",
-                         "st(5)", "st(6)", "st(7)", "mm0", "mm1",
-                         "mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
-
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
-  Atomic64 value;
-  __asm__ __volatile__("movq %1, %%mm0\n\t"  // Use mmx reg for 64-bit atomic
-                       "movq %%mm0, %0\n\t"  // moves (ptr could be read-only)
-                       "emms\n\t"            // Reset FP registers
-                       : "=m" (value)
-                       : "m" (*ptr)
-                       : // mark the FP stack and mmx registers as clobbered
-                         "st", "st(1)", "st(2)", "st(3)", "st(4)",
-                         "st(5)", "st(6)", "st(7)", "mm0", "mm1",
-                         "mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
-
-  return value;
-}
-
-#elif defined(__arm__)
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
-  int store_failed;
-  Atomic64 dummy;
-  __asm__ __volatile__(
-      "1:\n"
-      // Dummy load to lock cache line.
-      "ldrexd  %1, [%3]\n"
-      "strexd  %0, %2, [%3]\n"
-      "teq     %0, #0\n"
-      "bne     1b"
-      : "=&r" (store_failed), "=&r"(dummy)
-      : "r"(value), "r" (ptr)
-      : "cc", "memory");
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
-  Atomic64 res;
-  __asm__ __volatile__(
-      "ldrexd   %0, [%1]\n"
-      "clrex\n"
-      : "=r" (res)
-      : "r"(ptr), "Q"(*ptr));
-  return res;
-}
-
-#endif
-
-
-inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
-  NoBarrier_Store(ptr, value);
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
-  MemoryBarrier();
-  NoBarrier_Store(ptr, value);
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
-  Atomic64 value = NoBarrier_Load(ptr);
-  MemoryBarrier();
-  return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
-  MemoryBarrier();
-  return NoBarrier_Load(ptr);
-}
-#endif  // __LP64__
-
-}   // namespace base::subtle
-}   // namespace base
-
-// NOTE(user): The following is also deprecated.  New callers should use
-// the base::subtle namespace.
-inline void MemoryBarrier() {
-  base::subtle::MemoryBarrier();
-}
-#endif  // BASE_AUXILIARY_ATOMICOPS_INTERNALS_MACOSX_H_

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/02f3e3fc/be/src/gutil/auxiliary/atomicops-internals-windows.h
----------------------------------------------------------------------
diff --git a/be/src/gutil/auxiliary/atomicops-internals-windows.h b/be/src/gutil/auxiliary/atomicops-internals-windows.h
index 11d7d9d..96674dc 100644
--- a/be/src/gutil/auxiliary/atomicops-internals-windows.h
+++ b/be/src/gutil/auxiliary/atomicops-internals-windows.h
@@ -11,7 +11,7 @@
 
 #include <stdio.h>
 #include <stdlib.h>
-#include "gutil/basictypes.h"  // For COMPILE_ASSERT
+#include "kudu/gutil/basictypes.h"  // For COMPILE_ASSERT
 
 typedef int32 Atomic32;
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/02f3e3fc/be/src/gutil/basictypes.h
----------------------------------------------------------------------
diff --git a/be/src/gutil/basictypes.h b/be/src/gutil/basictypes.h
index 2f21d03..1c095ca 100644
--- a/be/src/gutil/basictypes.h
+++ b/be/src/gutil/basictypes.h
@@ -5,8 +5,8 @@
 #ifndef BASE_BASICTYPES_H_
 #define BASE_BASICTYPES_H_
 
-#include "gutil/integral_types.h"
-#include "gutil/macros.h"
+#include "kudu/gutil/integral_types.h"
+#include "kudu/gutil/macros.h"
 
 // Argument type used in interfaces that can optionally take ownership
 // of a passed in argument.  If TAKE_OWNERSHIP is passed, the called