Bug 1250403 - Part 2. Import crbug #354405 for aarch64. r?billm draft
authorMakoto Kato <m_kato@ga2.so-net.ne.jp>
Sun, 28 Feb 2016 19:11:09 +0900
changeset 335279 8fb2a36f9a1e40886c960975c9059906d9b32273
parent 335278 6622687aef7ab77062450a60da796736708c7d01
child 515103 a0fad539f62e2c5c217cecff3695007f41441f89
push id11753
push userm_kato@ga2.so-net.ne.jp
push dateSun, 28 Feb 2016 10:32:52 +0000
reviewersbillm
bugs1250403, 354405
milestone47.0a1
Bug 1250403 - Part 2. Import crbug #354405 for aarch64. r?billm MozReview-Commit-ID: A3sArb6IE6m
ipc/chromium/moz.build
ipc/chromium/src/base/atomicops.h
ipc/chromium/src/base/atomicops_internals_arm64_gcc.h
--- a/ipc/chromium/moz.build
+++ b/ipc/chromium/moz.build
@@ -157,17 +157,17 @@ if os_bsd or os_linux:
         ]
     if CONFIG['MOZ_ENABLE_QT']:
         SOURCES += [
             '!moc_message_pump_qt.cc',
             'src/base/message_pump_qt.cc',
         ]
 
 ost = CONFIG['OS_TEST']
-if '86' not in ost and 'arm' not in ost and 'mips' not in ost:
+if '86' not in ost and 'arm' not in ost and 'aarch64' != ost and 'mips' not in ost:
     SOURCES += [
         'src/base/atomicops_internals_mutex.cc',
     ]
 
 CXXFLAGS += CONFIG['TK_CFLAGS']
 
 include('/ipc/chromium/chromium-config.mozbuild')
 
--- a/ipc/chromium/src/base/atomicops.h
+++ b/ipc/chromium/src/base/atomicops.h
@@ -133,17 +133,19 @@ Atomic64 Release_Load(volatile const Ato
 
 // Include our platform specific implementation.
 #if defined(OS_WIN) && defined(ARCH_CPU_X86_FAMILY)
 #include "base/atomicops_internals_x86_msvc.h"
 #elif defined(OS_MACOSX) && defined(ARCH_CPU_X86_FAMILY)
 #include "base/atomicops_internals_x86_macosx.h"
 #elif defined(COMPILER_GCC) && defined(ARCH_CPU_X86_FAMILY)
 #include "base/atomicops_internals_x86_gcc.h"
-#elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARM_FAMILY)
+#elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARMEL)
 #include "base/atomicops_internals_arm_gcc.h"
+#elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARM64)
+#include "base/atomicops_internals_arm64_gcc.h"
 #elif defined(COMPILER_GCC) && defined(ARCH_CPU_MIPS)
 #include "base/atomicops_internals_mips_gcc.h"
 #else
 #include "base/atomicops_internals_mutex.h"
 #endif
 
 #endif  // BASE_ATOMICOPS_H_
new file mode 100644
--- /dev/null
+++ b/ipc/chromium/src/base/atomicops_internals_arm64_gcc.h
@@ -0,0 +1,360 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+
+// TODO(rmcilroy): Investigate whether we can use __sync__ intrinsics instead of
+//                 the hand coded assembly without introducing perf regressions.
+// TODO(rmcilroy): Investigate whether we can use acquire / release versions of
+//                 exclusive load / store assembly instructions and do away with
+//                 the barriers.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_
+#define BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_
+
+#if defined(OS_QNX)
+#include <sys/cpuinline.h>
+#endif
+
+namespace base {
+namespace subtle {
+
+inline void MemoryBarrier() {
+  __asm__ __volatile__ (  // NOLINT
+    "dmb ish                                  \n\t"  // Data memory barrier.
+    ::: "memory"
+  );  // NOLINT
+}
+
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                    \n\t"
+    "ldxr %w[prev], %[ptr]                 \n\t"  // Load the previous value.
+    "cmp %w[prev], %w[old_value]           \n\t"
+    "bne 1f                                \n\t"
+    "stxr %w[temp], %w[new_value], %[ptr]  \n\t"  // Try to store the new value.
+    "cbnz %w[temp], 0b                     \n\t"  // Retry if it did not work.
+    "1:                                    \n\t"
+    "clrex                                 \n\t"  // In case we didn't swap.
+    : [prev]"=&r" (prev),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [old_value]"r" (old_value),
+      [new_value]"r" (new_value)
+    : "memory", "cc"
+  );  // NOLINT
+
+  return prev;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 result;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                    \n\t"
+    "ldxr %w[result], %[ptr]               \n\t"  // Load the previous value.
+    "stxr %w[temp], %w[new_value], %[ptr]  \n\t"  // Try to store the new value.
+    "cbnz %w[temp], 0b                     \n\t"  // Retry if it did not work.
+    : [result]"=&r" (result),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [new_value]"r" (new_value)
+    : "memory"
+  );  // NOLINT
+
+  return result;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  Atomic32 result;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                       \n\t"
+    "ldxr %w[result], %[ptr]                  \n\t"  // Load the previous value.
+    "add %w[result], %w[result], %w[increment]\n\t"
+    "stxr %w[temp], %w[result], %[ptr]        \n\t"  // Try to store the result.
+    "cbnz %w[temp], 0b                        \n\t"  // Retry on failure.
+    : [result]"=&r" (result),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [increment]"r" (increment)
+    : "memory"
+  );  // NOLINT
+
+  return result;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  MemoryBarrier();
+  Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
+  MemoryBarrier();
+
+  return result;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 prev;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                    \n\t"
+    "ldxr %w[prev], %[ptr]                 \n\t"  // Load the previous value.
+    "cmp %w[prev], %w[old_value]           \n\t"
+    "bne 1f                                \n\t"
+    "stxr %w[temp], %w[new_value], %[ptr]  \n\t"  // Try to store the new value.
+    "cbnz %w[temp], 0b                     \n\t"  // Retry if it did not work.
+    "dmb ish                               \n\t"  // Data memory barrier.
+    "1:                                    \n\t"
+    // If the compare failed the 'dmb' is unnecessary, but we still need a
+    // 'clrex'.
+    "clrex                                 \n\t"
+    : [prev]"=&r" (prev),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [old_value]"r" (old_value),
+      [new_value]"r" (new_value)
+    : "memory", "cc"
+  );  // NOLINT
+
+  return prev;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 prev;
+  int32_t temp;
+
+  MemoryBarrier();
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                    \n\t"
+    "ldxr %w[prev], %[ptr]                 \n\t"  // Load the previous value.
+    "cmp %w[prev], %w[old_value]           \n\t"
+    "bne 1f                                \n\t"
+    "stxr %w[temp], %w[new_value], %[ptr]  \n\t"  // Try to store the new value.
+    "cbnz %w[temp], 0b                     \n\t"  // Retry if it did not work.
+    "1:                                    \n\t"
+    // If the compare failed the we still need a 'clrex'.
+    "clrex                                 \n\t"
+    : [prev]"=&r" (prev),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [old_value]"r" (old_value),
+      [new_value]"r" (new_value)
+    : "memory", "cc"
+  );  // NOLINT
+
+  return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+// 64-bit versions of the operations.
+// See the 32-bit versions for comments.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  Atomic64 prev;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                    \n\t"
+    "ldxr %[prev], %[ptr]                  \n\t"
+    "cmp %[prev], %[old_value]             \n\t"
+    "bne 1f                                \n\t"
+    "stxr %w[temp], %[new_value], %[ptr]   \n\t"
+    "cbnz %w[temp], 0b                     \n\t"
+    "1:                                    \n\t"
+    "clrex                                 \n\t"
+    : [prev]"=&r" (prev),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [old_value]"r" (old_value),
+      [new_value]"r" (new_value)
+    : "memory", "cc"
+  );  // NOLINT
+
+  return prev;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  Atomic64 result;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                    \n\t"
+    "ldxr %[result], %[ptr]                \n\t"
+    "stxr %w[temp], %[new_value], %[ptr]   \n\t"
+    "cbnz %w[temp], 0b                     \n\t"
+    : [result]"=&r" (result),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [new_value]"r" (new_value)
+    : "memory"
+  );  // NOLINT
+
+  return result;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  Atomic64 result;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                     \n\t"
+    "ldxr %[result], %[ptr]                 \n\t"
+    "add %[result], %[result], %[increment] \n\t"
+    "stxr %w[temp], %[result], %[ptr]       \n\t"
+    "cbnz %w[temp], 0b                      \n\t"
+    : [result]"=&r" (result),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [increment]"r" (increment)
+    : "memory"
+  );  // NOLINT
+
+  return result;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  MemoryBarrier();
+  Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment);
+  MemoryBarrier();
+
+  return result;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 prev;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                    \n\t"
+    "ldxr %[prev], %[ptr]                  \n\t"
+    "cmp %[prev], %[old_value]             \n\t"
+    "bne 1f                                \n\t"
+    "stxr %w[temp], %[new_value], %[ptr]   \n\t"
+    "cbnz %w[temp], 0b                     \n\t"
+    "dmb ish                               \n\t"
+    "1:                                    \n\t"
+    "clrex                                 \n\t"
+    : [prev]"=&r" (prev),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [old_value]"r" (old_value),
+      [new_value]"r" (new_value)
+    : "memory", "cc"
+  );  // NOLINT
+
+  return prev;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 prev;
+  int32_t temp;
+
+  MemoryBarrier();
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                    \n\t"
+    "ldxr %[prev], %[ptr]                  \n\t"
+    "cmp %[prev], %[old_value]             \n\t"
+    "bne 1f                                \n\t"
+    "stxr %w[temp], %[new_value], %[ptr]   \n\t"
+    "cbnz %w[temp], 0b                     \n\t"
+    "1:                                    \n\t"
+    "clrex                                 \n\t"
+    : [prev]"=&r" (prev),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [old_value]"r" (old_value),
+      [new_value]"r" (new_value)
+    : "memory", "cc"
+  );  // NOLINT
+
+  return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+}  // namespace base::subtle
+}  // namespace base
+
+#endif  // BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_