You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@bigtop.apache.org by iw...@apache.org on 2021/03/16 13:30:19 UTC

[bigtop] branch master updated: BIGTOP-3524: Hadoop 3.2.2 build failure on Arm (#758)

This is an automated email from the ASF dual-hosted git repository.

iwasakims pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/bigtop.git


The following commit(s) were added to refs/heads/master by this push:
     new ff1a369  BIGTOP-3524: Hadoop 3.2.2 build failure on Arm (#758)
ff1a369 is described below

commit ff1a369aa35c4ac6aa3f03120d7fd2e72c55c3e1
Author: Jun He <ju...@linaro.org>
AuthorDate: Tue Mar 16 21:30:12 2021 +0800

    BIGTOP-3524: Hadoop 3.2.2 build failure on Arm (#758)
    
    Fix protobuf missing header on Arm64, and backport arm64 atomic ops.
    
    Change-Id: I3d0899782e21f6012577f9949dc7ef3d6ca2f43d
    Signed-off-by: Jun He <ju...@arm.com>
    
    Co-authored-by: Jun He <ju...@arm.com>
---
 ...generic-GCC-support-for-atomic-operations.patch | 209 --------
 ...omic-operations-with-support-of-arm64-and.patch | 573 +++++++++++++++++++++
 bigtop_toolchain/manifests/protobuf.pp             |  10 +-
 3 files changed, 578 insertions(+), 214 deletions(-)

diff --git a/bigtop_toolchain/files/0001-Add-generic-GCC-support-for-atomic-operations.patch b/bigtop_toolchain/files/0001-Add-generic-GCC-support-for-atomic-operations.patch
deleted file mode 100644
index 171695f..0000000
--- a/bigtop_toolchain/files/0001-Add-generic-GCC-support-for-atomic-operations.patch
+++ /dev/null
@@ -1,209 +0,0 @@
-From d099ec11fc8c2eb97df2bf2fbb6996066eefca46 Mon Sep 17 00:00:00 2001
-From: Stanislav Ochotnicky <so...@redhat.com>
-Date: Thu, 2 May 2013 10:43:47 +0200
-Subject: [PATCH] Add generic GCC support for atomic operations
-
-This is useful for architectures where no specialized code has been
-written.
----
- src/google/protobuf/stubs/atomicops.h              |   2 +-
- .../stubs/atomicops_internals_generic_gcc.h        | 139 +++++++++++++++++++++
- src/google/protobuf/stubs/platform_macros.h        |  14 ++-
- 3 files changed, 153 insertions(+), 2 deletions(-)
- create mode 100644 src/google/protobuf/stubs/atomicops_internals_generic_gcc.h
-
-diff --git a/src/google/protobuf/stubs/atomicops.h b/src/google/protobuf/stubs/atomicops.h
-index b8581fa..883b125 100644
---- a/src/google/protobuf/stubs/atomicops.h
-+++ b/src/google/protobuf/stubs/atomicops.h
-@@ -185,7 +185,7 @@ GOOGLE_PROTOBUF_ATOMICOPS_ERROR
- #elif defined(__pnacl__)
- #include <google/protobuf/stubs/atomicops_internals_pnacl.h>
- #else
--GOOGLE_PROTOBUF_ATOMICOPS_ERROR
-+#include <google/protobuf/stubs/atomicops_internals_generic_gcc.h>
- #endif
- 
- // Unknown.
-diff --git a/src/google/protobuf/stubs/atomicops_internals_generic_gcc.h b/src/google/protobuf/stubs/atomicops_internals_generic_gcc.h
-new file mode 100644
-index 0000000..3fc2a9b
---- /dev/null
-+++ b/src/google/protobuf/stubs/atomicops_internals_generic_gcc.h
-@@ -0,0 +1,139 @@
-+// Protocol Buffers - Google's data interchange format
-+// Copyright 2013 Red Hat Inc.  All rights reserved.
-+// http://code.google.com/p/protobuf/
-+//
-+// Redistribution and use in source and binary forms, with or without
-+// modification, are permitted provided that the following conditions are
-+// met:
-+//
-+//     * Redistributions of source code must retain the above copyright
-+// notice, this list of conditions and the following disclaimer.
-+//     * Redistributions in binary form must reproduce the above
-+// copyright notice, this list of conditions and the following disclaimer
-+// in the documentation and/or other materials provided with the
-+// distribution.
-+//     * Neither the name of Red Hat Inc. nor the names of its
-+// contributors may be used to endorse or promote products derived from
-+// this software without specific prior written permission.
-+//
-+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+
-+// This file is an internal atomic implementation, use atomicops.h instead.
-+
-+#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_
-+#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_
-+
-+namespace google {
-+namespace protobuf {
-+namespace internal {
-+
-+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
-+                                         Atomic32 old_value,
-+                                         Atomic32 new_value) {
-+  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,
-+                              __ATOMIC_RELAXED, __ATOMIC_RELAXED);
-+  return old_value;
-+}
-+
-+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
-+                                         Atomic32 new_value) {
-+  return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
-+}
-+
-+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
-+                                          Atomic32 increment) {
-+  return __atomic_add_fetch(ptr, increment, __ATOMIC_RELAXED);
-+}
-+
-+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
-+                                        Atomic32 increment) {
-+  return __atomic_add_fetch(ptr, increment, __ATOMIC_SEQ_CST);
-+}
-+
-+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
-+                                       Atomic32 old_value,
-+                                       Atomic32 new_value) {
-+  __atomic_compare_exchange(ptr, &old_value, &new_value, true,
-+                            __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
-+  return old_value;
-+}
-+
-+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
-+                                       Atomic32 old_value,
-+                                       Atomic32 new_value) {
-+  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,
-+                            __ATOMIC_RELEASE, __ATOMIC_ACQUIRE);
-+  return old_value;
-+}
-+
-+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
-+  __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
-+}
-+
-+inline void MemoryBarrier() {
-+  __sync_synchronize();
-+}
-+
-+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-+  __atomic_store_n(ptr, value, __ATOMIC_ACQUIRE);
-+}
-+
-+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
-+  __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
-+}
-+
-+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
-+  return __atomic_load_n(ptr, __ATOMIC_RELAXED);
-+}
-+
-+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
-+  return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
-+}
-+
-+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-+  return __atomic_load_n(ptr, __ATOMIC_RELEASE);
-+}
-+
-+#ifdef __LP64__
-+
-+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
-+  __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
-+}
-+
-+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
-+  return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
-+}
-+
-+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
-+                                       Atomic64 old_value,
-+                                       Atomic64 new_value) {
-+  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,
-+                              __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
-+  return old_value;
-+}
-+
-+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
-+                                         Atomic64 old_value,
-+                                         Atomic64 new_value) {
-+  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,
-+                            __ATOMIC_RELAXED, __ATOMIC_RELAXED);
-+  return old_value;
-+}
-+
-+#endif // defined(__LP64__)
-+
-+}  // namespace internal
-+}  // namespace protobuf
-+}  // namespace google
-+
-+#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_
-diff --git a/src/google/protobuf/stubs/platform_macros.h b/src/google/protobuf/stubs/platform_macros.h
-index b1df60e..db691d8 100644
---- a/src/google/protobuf/stubs/platform_macros.h
-+++ b/src/google/protobuf/stubs/platform_macros.h
-@@ -43,6 +43,9 @@
- #elif defined(_M_IX86) || defined(__i386__)
- #define GOOGLE_PROTOBUF_ARCH_IA32 1
- #define GOOGLE_PROTOBUF_ARCH_32_BIT 1
-+#elif defined(__aarch64__)
-+#define GOOGLE_PROTOBUF_ARCH_AARCH64 1
-+#define GOOGLE_PROTOBUF_ARCH_64_BIT 1
- #elif defined(__QNX__)
- #define GOOGLE_PROTOBUF_ARCH_ARM_QNX 1
- #define GOOGLE_PROTOBUF_ARCH_32_BIT 1
-@@ -54,9 +57,18 @@
- #define GOOGLE_PROTOBUF_ARCH_32_BIT 1
- #elif defined(__pnacl__)
- #define GOOGLE_PROTOBUF_ARCH_32_BIT 1
--#elif defined(__ppc__)
-+#elif defined(__ppc64__) || defined(__PPC64__)
-+#define GOOGLE_PROTOBUF_ARCH_PPC64 1
-+#define GOOGLE_PROTOBUF_ARCH_64_BIT 1
-+#elif defined(__ppc__) || defined(__PPC__)
- #define GOOGLE_PROTOBUF_ARCH_PPC 1
- #define GOOGLE_PROTOBUF_ARCH_32_BIT 1
-+#elif defined(__s390x__)
-+#define GOOGLE_PROTOBUF_ARCH_64_BIT 1
-+#define GOOGLE_PROTOBUF_ARCH_S390X 1
-+#elif defined(__s390__)
-+#define GOOGLE_PROTOBUF_ARCH_32_BIT 1
-+#define GOOGLE_PROTOBUF_ARCH_S390 1
- #else
- #error Host architecture was not detected as supported by protobuf
- #endif
--- 
-1.8.1.4
-
diff --git a/bigtop_toolchain/files/0001-Backport-atomic-operations-with-support-of-arm64-and.patch b/bigtop_toolchain/files/0001-Backport-atomic-operations-with-support-of-arm64-and.patch
new file mode 100644
index 0000000..b1dfde4
--- /dev/null
+++ b/bigtop_toolchain/files/0001-Backport-atomic-operations-with-support-of-arm64-and.patch
@@ -0,0 +1,573 @@
+From 810f9fae7e12142c3bd47ab7011ed071284760f8 Mon Sep 17 00:00:00 2001
+From: Jun He <ju...@arm.com>
+Date: Fri, 5 Mar 2021 02:49:08 +0000
+Subject: [PATCH] Backport atomic operations with support of arm64 and generic
+
+Signed-off-by: Jun He <ju...@arm.com>
+---
+ src/Makefile.am                               |   2 +
+ src/google/protobuf/stubs/atomicops.h         |   8 +-
+ .../stubs/atomicops_internals_arm64_gcc.h     | 325 ++++++++++++++++++
+ .../stubs/atomicops_internals_generic_gcc.h   | 137 ++++++++
+ src/google/protobuf/stubs/platform_macros.h   |  16 +
+ 5 files changed, 486 insertions(+), 2 deletions(-)
+ create mode 100644 src/google/protobuf/stubs/atomicops_internals_arm64_gcc.h
+ create mode 100644 src/google/protobuf/stubs/atomicops_internals_generic_gcc.h
+
+diff --git a/src/Makefile.am b/src/Makefile.am
+index df733d9b8..f9fa82ec0 100644
+--- a/src/Makefile.am
++++ b/src/Makefile.am
+@@ -40,6 +40,7 @@ MAINTAINERCLEANFILES =   \
+ nobase_include_HEADERS =                                        \
+   google/protobuf/stubs/atomicops.h                             \
+   google/protobuf/stubs/atomicops_internals_arm_gcc.h           \
++  google/protobuf/stubs/atomicops_internals_arm64_gcc.h         \
+   google/protobuf/stubs/atomicops_internals_arm_qnx.h           \
+   google/protobuf/stubs/atomicops_internals_atomicword_compat.h \
+   google/protobuf/stubs/atomicops_internals_macosx.h            \
+@@ -47,6 +48,7 @@ nobase_include_HEADERS =                                        \
+   google/protobuf/stubs/atomicops_internals_pnacl.h             \
+   google/protobuf/stubs/atomicops_internals_x86_gcc.h           \
+   google/protobuf/stubs/atomicops_internals_x86_msvc.h          \
++  google/protobuf/stubs/atomicops_internals_generic_gcc.h       \
+   google/protobuf/stubs/common.h                                \
+   google/protobuf/stubs/platform_macros.h                       \
+   google/protobuf/stubs/once.h                                  \
+diff --git a/src/google/protobuf/stubs/atomicops.h b/src/google/protobuf/stubs/atomicops.h
+index b8581fa27..5b2e64cb5 100644
+--- a/src/google/protobuf/stubs/atomicops.h
++++ b/src/google/protobuf/stubs/atomicops.h
+@@ -176,14 +176,18 @@ GOOGLE_PROTOBUF_ATOMICOPS_ERROR
+ #elif defined(__GNUC__)
+ #if defined(GOOGLE_PROTOBUF_ARCH_IA32) || defined(GOOGLE_PROTOBUF_ARCH_X64)
+ #include <google/protobuf/stubs/atomicops_internals_x86_gcc.h>
+-#elif defined(GOOGLE_PROTOBUF_ARCH_ARM)
++#elif defined(GOOGLE_PROTOBUF_ARCH_ARM) && defined(__linux__)
+ #include <google/protobuf/stubs/atomicops_internals_arm_gcc.h>
++#elif defined(GOOGLE_PROTOBUF_ARCH_AARCH64)
++#include <google/protobuf/stubs/atomicops_internals_arm64_gcc.h>
+ #elif defined(GOOGLE_PROTOBUF_ARCH_ARM_QNX)
+ #include <google/protobuf/stubs/atomicops_internals_arm_qnx.h>
+-#elif defined(GOOGLE_PROTOBUF_ARCH_MIPS)
++#elif defined(GOOGLE_PROTOBUF_ARCH_MIPS) || defined(GOOGLE_PROTOBUF_ARCH_MIPS64)
+ #include <google/protobuf/stubs/atomicops_internals_mips_gcc.h>
+ #elif defined(__pnacl__)
+ #include <google/protobuf/stubs/atomicops_internals_pnacl.h>
++#elif (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4))
++#include <google/protobuf/stubs/atomicops_internals_generic_gcc.h>
+ #else
+ GOOGLE_PROTOBUF_ATOMICOPS_ERROR
+ #endif
+diff --git a/src/google/protobuf/stubs/atomicops_internals_arm64_gcc.h b/src/google/protobuf/stubs/atomicops_internals_arm64_gcc.h
+new file mode 100644
+index 000000000..fe9727ad0
+--- /dev/null
++++ b/src/google/protobuf/stubs/atomicops_internals_arm64_gcc.h
+@@ -0,0 +1,325 @@
++// Protocol Buffers - Google's data interchange format
++// Copyright 2012 Google Inc.  All rights reserved.
++// http://code.google.com/p/protobuf/
++//
++// Redistribution and use in source and binary forms, with or without
++// modification, are permitted provided that the following conditions are
++// met:
++//
++//     * Redistributions of source code must retain the above copyright
++// notice, this list of conditions and the following disclaimer.
++//     * Redistributions in binary form must reproduce the above
++// copyright notice, this list of conditions and the following disclaimer
++// in the documentation and/or other materials provided with the
++// distribution.
++//     * Neither the name of Google Inc. nor the names of its
++// contributors may be used to endorse or promote products derived from
++// this software without specific prior written permission.
++//
++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++
++// This file is an internal atomic implementation, use atomicops.h instead.
++
++#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM64_GCC_H_
++#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM64_GCC_H_
++
++namespace google {
++namespace protobuf {
++namespace internal {
++
++inline void MemoryBarrier() {
++  __asm__ __volatile__ ("dmb ish" ::: "memory");  // NOLINT
++}
++
++// NoBarrier versions of the operation include "memory" in the clobber list.
++// This is not required for direct usage of the NoBarrier versions of the
++// operations. However this is required for correctness when they are used as
++// part of the Acquire or Release versions, to ensure that nothing from outside
++// the call is reordered between the operation and the memory barrier. This does
++// not change the code generated, so has no or minimal impact on the
++// NoBarrier operations.
++
++inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
++                                         Atomic32 old_value,
++                                         Atomic32 new_value) {
++  Atomic32 prev;
++  int32_t temp;
++
++  __asm__ __volatile__ (  // NOLINT
++    "0:                                    \n\t"
++    "ldxr %w[prev], %[ptr]                 \n\t"  // Load the previous value.
++    "cmp %w[prev], %w[old_value]           \n\t"
++    "bne 1f                                \n\t"
++    "stxr %w[temp], %w[new_value], %[ptr]  \n\t"  // Try to store the new value.
++    "cbnz %w[temp], 0b                     \n\t"  // Retry if it did not work.
++    "1:                                    \n\t"
++    : [prev]"=&r" (prev),
++      [temp]"=&r" (temp),
++      [ptr]"+Q" (*ptr)
++    : [old_value]"IJr" (old_value),
++      [new_value]"r" (new_value)
++    : "cc", "memory"
++  );  // NOLINT
++
++  return prev;
++}
++
++inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
++                                         Atomic32 new_value) {
++  Atomic32 result;
++  int32_t temp;
++
++  __asm__ __volatile__ (  // NOLINT
++    "0:                                    \n\t"
++    "ldxr %w[result], %[ptr]               \n\t"  // Load the previous value.
++    "stxr %w[temp], %w[new_value], %[ptr]  \n\t"  // Try to store the new value.
++    "cbnz %w[temp], 0b                     \n\t"  // Retry if it did not work.
++    : [result]"=&r" (result),
++      [temp]"=&r" (temp),
++      [ptr]"+Q" (*ptr)
++    : [new_value]"r" (new_value)
++    : "memory"
++  );  // NOLINT
++
++  return result;
++}
++
++inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
++                                          Atomic32 increment) {
++  Atomic32 result;
++  int32_t temp;
++
++  __asm__ __volatile__ (  // NOLINT
++    "0:                                       \n\t"
++    "ldxr %w[result], %[ptr]                  \n\t"  // Load the previous value.
++    "add %w[result], %w[result], %w[increment]\n\t"
++    "stxr %w[temp], %w[result], %[ptr]        \n\t"  // Try to store the result.
++    "cbnz %w[temp], 0b                        \n\t"  // Retry on failure.
++    : [result]"=&r" (result),
++      [temp]"=&r" (temp),
++      [ptr]"+Q" (*ptr)
++    : [increment]"IJr" (increment)
++    : "memory"
++  );  // NOLINT
++
++  return result;
++}
++
++inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
++                                        Atomic32 increment) {
++  MemoryBarrier();
++  Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
++  MemoryBarrier();
++
++  return result;
++}
++
++inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
++                                       Atomic32 old_value,
++                                       Atomic32 new_value) {
++  Atomic32 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
++  MemoryBarrier();
++
++  return prev;
++}
++
++inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
++                                       Atomic32 old_value,
++                                       Atomic32 new_value) {
++  MemoryBarrier();
++  Atomic32 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
++
++  return prev;
++}
++
++inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
++  *ptr = value;
++}
++
++inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
++  *ptr = value;
++  MemoryBarrier();
++}
++
++inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
++  __asm__ __volatile__ (  // NOLINT
++    "stlr %w[value], %[ptr]  \n\t"
++    : [ptr]"=Q" (*ptr)
++    : [value]"r" (value)
++    : "memory"
++  );  // NOLINT
++}
++
++inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
++  return *ptr;
++}
++
++inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
++  Atomic32 value;
++
++  __asm__ __volatile__ (  // NOLINT
++    "ldar %w[value], %[ptr]  \n\t"
++    : [value]"=r" (value)
++    : [ptr]"Q" (*ptr)
++    : "memory"
++  );  // NOLINT
++
++  return value;
++}
++
++inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
++  MemoryBarrier();
++  return *ptr;
++}
++
++// 64-bit versions of the operations.
++// See the 32-bit versions for comments.
++
++inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
++                                         Atomic64 old_value,
++                                         Atomic64 new_value) {
++  Atomic64 prev;
++  int32_t temp;
++
++  __asm__ __volatile__ (  // NOLINT
++    "0:                                    \n\t"
++    "ldxr %[prev], %[ptr]                  \n\t"
++    "cmp %[prev], %[old_value]             \n\t"
++    "bne 1f                                \n\t"
++    "stxr %w[temp], %[new_value], %[ptr]   \n\t"
++    "cbnz %w[temp], 0b                     \n\t"
++    "1:                                    \n\t"
++    : [prev]"=&r" (prev),
++      [temp]"=&r" (temp),
++      [ptr]"+Q" (*ptr)
++    : [old_value]"IJr" (old_value),
++      [new_value]"r" (new_value)
++    : "cc", "memory"
++  );  // NOLINT
++
++  return prev;
++}
++
++inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
++                                         Atomic64 new_value) {
++  Atomic64 result;
++  int32_t temp;
++
++  __asm__ __volatile__ (  // NOLINT
++    "0:                                    \n\t"
++    "ldxr %[result], %[ptr]                \n\t"
++    "stxr %w[temp], %[new_value], %[ptr]   \n\t"
++    "cbnz %w[temp], 0b                     \n\t"
++    : [result]"=&r" (result),
++      [temp]"=&r" (temp),
++      [ptr]"+Q" (*ptr)
++    : [new_value]"r" (new_value)
++    : "memory"
++  );  // NOLINT
++
++  return result;
++}
++
++inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
++                                          Atomic64 increment) {
++  Atomic64 result;
++  int32_t temp;
++
++  __asm__ __volatile__ (  // NOLINT
++    "0:                                     \n\t"
++    "ldxr %[result], %[ptr]                 \n\t"
++    "add %[result], %[result], %[increment] \n\t"
++    "stxr %w[temp], %[result], %[ptr]       \n\t"
++    "cbnz %w[temp], 0b                      \n\t"
++    : [result]"=&r" (result),
++      [temp]"=&r" (temp),
++      [ptr]"+Q" (*ptr)
++    : [increment]"IJr" (increment)
++    : "memory"
++  );  // NOLINT
++
++  return result;
++}
++
++inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
++                                        Atomic64 increment) {
++  MemoryBarrier();
++  Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment);
++  MemoryBarrier();
++
++  return result;
++}
++
++inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
++                                       Atomic64 old_value,
++                                       Atomic64 new_value) {
++  Atomic64 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
++  MemoryBarrier();
++
++  return prev;
++}
++
++inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
++                                       Atomic64 old_value,
++                                       Atomic64 new_value) {
++  MemoryBarrier();
++  Atomic64 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
++
++  return prev;
++}
++
++inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
++  *ptr = value;
++}
++
++inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
++  *ptr = value;
++  MemoryBarrier();
++}
++
++inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
++  __asm__ __volatile__ (  // NOLINT
++    "stlr %x[value], %[ptr]  \n\t"
++    : [ptr]"=Q" (*ptr)
++    : [value]"r" (value)
++    : "memory"
++  );  // NOLINT
++}
++
++inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
++  return *ptr;
++}
++
++inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
++  Atomic64 value;
++
++  __asm__ __volatile__ (  // NOLINT
++    "ldar %x[value], %[ptr]  \n\t"
++    : [value]"=r" (value)
++    : [ptr]"Q" (*ptr)
++    : "memory"
++  );  // NOLINT
++
++  return value;
++}
++
++inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
++  MemoryBarrier();
++  return *ptr;
++}
++
++}  // namespace internal
++}  // namespace protobuf
++}  // namespace google
++
++#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM64_GCC_H_
+diff --git a/src/google/protobuf/stubs/atomicops_internals_generic_gcc.h b/src/google/protobuf/stubs/atomicops_internals_generic_gcc.h
+new file mode 100644
+index 000000000..e30bb4449
+--- /dev/null
++++ b/src/google/protobuf/stubs/atomicops_internals_generic_gcc.h
+@@ -0,0 +1,137 @@
++// Copyright 2013 Red Hat Inc.  All rights reserved.
++//
++// Redistribution and use in source and binary forms, with or without
++// modification, are permitted provided that the following conditions are
++// met:
++//
++//     * Redistributions of source code must retain the above copyright
++// notice, this list of conditions and the following disclaimer.
++//     * Redistributions in binary form must reproduce the above
++// copyright notice, this list of conditions and the following disclaimer
++// in the documentation and/or other materials provided with the
++// distribution.
++//     * Neither the name of Red Hat Inc. nor the names of its
++// contributors may be used to endorse or promote products derived from
++// this software without specific prior written permission.
++//
++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++
++// This file is an internal atomic implementation, use atomicops.h instead.
++
++#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_
++#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_
++
++namespace google {
++namespace protobuf {
++namespace internal {
++
++inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
++                                         Atomic32 old_value,
++                                         Atomic32 new_value) {
++  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,
++                              __ATOMIC_RELAXED, __ATOMIC_RELAXED);
++  return old_value;
++}
++
++inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
++                                         Atomic32 new_value) {
++  return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
++}
++
++inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
++                                          Atomic32 increment) {
++  return __atomic_add_fetch(ptr, increment, __ATOMIC_RELAXED);
++}
++
++inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
++                                        Atomic32 increment) {
++  return __atomic_add_fetch(ptr, increment, __ATOMIC_SEQ_CST);
++}
++
++inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
++                                       Atomic32 old_value,
++                                       Atomic32 new_value) {
++  __atomic_compare_exchange(ptr, &old_value, &new_value, true,
++                            __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
++  return old_value;
++}
++
++inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
++                                       Atomic32 old_value,
++                                       Atomic32 new_value) {
++  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,
++                              __ATOMIC_RELEASE, __ATOMIC_ACQUIRE);
++  return old_value;
++}
++
++inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
++  __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
++}
++
++inline void MemoryBarrier() {
++  __sync_synchronize();
++}
++
++inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
++  __atomic_store_n(ptr, value, __ATOMIC_ACQUIRE);
++}
++
++inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
++  __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
++}
++
++inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
++  return __atomic_load_n(ptr, __ATOMIC_RELAXED);
++}
++
++inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
++  return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
++}
++
++inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
++  return __atomic_load_n(ptr, __ATOMIC_RELEASE);
++}
++
++#ifdef __LP64__
++
++inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
++  __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
++}
++
++inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
++  return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
++}
++
++inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
++                                       Atomic64 old_value,
++                                       Atomic64 new_value) {
++  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,
++                              __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
++  return old_value;
++}
++
++inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
++                                         Atomic64 old_value,
++                                         Atomic64 new_value) {
++  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,
++                              __ATOMIC_RELAXED, __ATOMIC_RELAXED);
++  return old_value;
++}
++
++#endif // defined(__LP64__)
++
++}  // namespace internal
++}  // namespace protobuf
++}  // namespace google
++
++#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_
+diff --git a/src/google/protobuf/stubs/platform_macros.h b/src/google/protobuf/stubs/platform_macros.h
+index b1df60e46..1705b4162 100644
+--- a/src/google/protobuf/stubs/platform_macros.h
++++ b/src/google/protobuf/stubs/platform_macros.h
+@@ -49,14 +49,30 @@
+ #elif defined(__ARMEL__)
+ #define GOOGLE_PROTOBUF_ARCH_ARM 1
+ #define GOOGLE_PROTOBUF_ARCH_32_BIT 1
++#elif defined(__aarch64__)
++#define GOOGLE_PROTOBUF_ARCH_AARCH64 1
++#define GOOGLE_PROTOBUF_ARCH_64_BIT 1
+ #elif defined(__MIPSEL__)
++#if defined(__LP64__)
++#define GOOGLE_PROTOBUF_ARCH_MIPS64 1
++#define GOOGLE_PROTOBUF_ARCH_64_BIT 1
++#else
+ #define GOOGLE_PROTOBUF_ARCH_MIPS 1
+ #define GOOGLE_PROTOBUF_ARCH_32_BIT 1
++#endif
+ #elif defined(__pnacl__)
+ #define GOOGLE_PROTOBUF_ARCH_32_BIT 1
+ #elif defined(__ppc__)
+ #define GOOGLE_PROTOBUF_ARCH_PPC 1
+ #define GOOGLE_PROTOBUF_ARCH_32_BIT 1
++#elif defined(__GNUC__) && \
++ (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4))
++// We fallback to the generic GCC >= 4.7 implementation in atomicops.h
++# if __LP64__
++#  define GOOGLE_PROTOBUF_ARCH_64_BIT 1
++# else
++#  define GOOGLE_PROTOBUF_ARCH_32_BIT 1
++# endif
+ #else
+ #error Host architecture was not detected as supported by protobuf
+ #endif
+-- 
+2.17.1
+
diff --git a/bigtop_toolchain/manifests/protobuf.pp b/bigtop_toolchain/manifests/protobuf.pp
index c2c0eb1..808b702 100644
--- a/bigtop_toolchain/manifests/protobuf.pp
+++ b/bigtop_toolchain/manifests/protobuf.pp
@@ -22,20 +22,20 @@ class bigtop_toolchain::protobuf {
   $protobuf8 = "protobuf-2.5.0.tar.gz"
   $protobuf8dir = "protobuf-2.5.0"
 
-  file { "/usr/src/0001-Add-generic-GCC-support-for-atomic-operations.patch":
-    source => "puppet:///modules/bigtop_toolchain/0001-Add-generic-GCC-support-for-atomic-operations.patch"
+  file { "/usr/src/0001-Backport-atomic-operations-with-support-of-arm64-and.patch":
+    source => "puppet:///modules/bigtop_toolchain/0001-Backport-atomic-operations-with-support-of-arm64-and.patch"
   }
 
   exec { "download protobuf":
      cwd  => "/usr/src",
-     command => "/usr/bin/wget $url/$protobuf8 && mkdir -p $protobuf8dir && /bin/tar -xvzf $protobuf8 -C $protobuf8dir --strip-components=1 && cd $protobuf8dir && /usr/bin/patch -p1 </usr/src/0001-Add-generic-GCC-support-for-atomic-operations.patch && curl -o config.guess 'http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD' && cp config.guess gtest/build-aux/",
+     command => "/usr/bin/wget $url/$protobuf8 && mkdir -p $protobuf8dir && /bin/tar -xvzf $protobuf8 -C $protobuf8dir --strip-components=1 && cd $protobuf8dir && /usr/bin/patch -p1 </usr/src/0001-Backport-atomic-operations-with-support-of-arm64-and.patch && curl -o config.guess 'http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD' && cp config.guess gtest/build-aux/",
      creates => "/usr/src/$protobuf8dir",
-     require => File["/usr/src/0001-Add-generic-GCC-support-for-atomic-operations.patch"]
+     require => File["/usr/src/0001-Backport-atomic-operations-with-support-of-arm64-and.patch"]
   }
 
   exec { "install protobuf":
      cwd => "/usr/src/$protobuf8dir",
-     command => "/usr/src/$protobuf8dir/configure --prefix=/usr/local --disable-shared --with-pic && /usr/bin/make install",
+     command => "/usr/src/$protobuf8dir/autogen.sh && /usr/src/$protobuf8dir/configure --prefix=/usr/local --disable-shared --with-pic && /usr/bin/make install",
      creates => "/usr/local/bin/protoc",
      require => EXEC["download protobuf"],
      timeout => 3000