master
  1//===-- sanitizer_atomic_clang.h --------------------------------*- C++ -*-===//
  2//
  3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4// See https://llvm.org/LICENSE.txt for license information.
  5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6//
  7//===----------------------------------------------------------------------===//
  8//
  9// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
 10// Not intended for direct inclusion. Include sanitizer_atomic.h.
 11//
 12//===----------------------------------------------------------------------===//
 13
 14#ifndef SANITIZER_ATOMIC_CLANG_H
 15#define SANITIZER_ATOMIC_CLANG_H
 16
 17// Helper to suppress warnings related to 8-byte atomic accesses when the target
 18// is 32-bit AIX (where such accesses use libatomic).
 19#if defined(_AIX) && !defined(__powerpc64__) && defined(__clang__)
 20#  define SANITIZER_IGNORE_ATOMIC_ALIGNMENT_BEGIN \
 21    _Pragma("clang diagnostic push")              \
 22        _Pragma("clang diagnostic ignored \"-Watomic-alignment\"")
 23#  define SANITIZER_IGNORE_ATOMIC_ALIGNMENT_END _Pragma("clang diagnostic pop")
 24#else
 25#  define SANITIZER_IGNORE_ATOMIC_ALIGNMENT_BEGIN
 26#  define SANITIZER_IGNORE_ATOMIC_ALIGNMENT_END
 27#endif
 28
 29namespace __sanitizer {
 30
 31// We use the compiler builtin atomic operations for loads and stores, which
 32// generates correct code for all architectures, but may require libatomic
 33// on platforms where e.g. 64-bit atomics are not supported natively.
 34
 35// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
 36// for mappings of the memory model to different processors.
 37
 38inline void atomic_signal_fence(memory_order mo) { __atomic_signal_fence(mo); }
 39
 40inline void atomic_thread_fence(memory_order mo) { __atomic_thread_fence(mo); }
 41
 42inline void proc_yield(int cnt) {
 43  __asm__ __volatile__("" ::: "memory");
 44#if defined(__i386__) || defined(__x86_64__)
 45  for (int i = 0; i < cnt; i++) __asm__ __volatile__("pause");
 46  __asm__ __volatile__("" ::: "memory");
 47#endif
 48}
 49
 50SANITIZER_IGNORE_ATOMIC_ALIGNMENT_BEGIN
 51template <typename T>
 52inline typename T::Type atomic_load(const volatile T *a, memory_order mo) {
 53  DCHECK(mo == memory_order_relaxed || mo == memory_order_consume ||
 54         mo == memory_order_acquire || mo == memory_order_seq_cst);
 55  DCHECK(!((uptr)a % sizeof(*a)));
 56  return __atomic_load_n(&a->val_dont_use, mo);
 57}
 58
 59template <typename T>
 60inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
 61  DCHECK(mo == memory_order_relaxed || mo == memory_order_release ||
 62         mo == memory_order_seq_cst);
 63  DCHECK(!((uptr)a % sizeof(*a)));
 64  __atomic_store_n(&a->val_dont_use, v, mo);
 65}
 66
 67template <typename T>
 68inline typename T::Type atomic_fetch_add(volatile T *a, typename T::Type v,
 69                                         memory_order mo) {
 70  DCHECK(!((uptr)a % sizeof(*a)));
 71  return __atomic_fetch_add(&a->val_dont_use, v, mo);
 72}
 73
 74template <typename T>
 75inline typename T::Type atomic_fetch_sub(volatile T *a, typename T::Type v,
 76                                         memory_order mo) {
 77  (void)mo;
 78  DCHECK(!((uptr)a % sizeof(*a)));
 79  return __atomic_fetch_sub(&a->val_dont_use, v, mo);
 80}
 81
 82template <typename T>
 83inline typename T::Type atomic_exchange(volatile T *a, typename T::Type v,
 84                                        memory_order mo) {
 85  DCHECK(!((uptr)a % sizeof(*a)));
 86  return __atomic_exchange_n(&a->val_dont_use, v, mo);
 87}
 88
 89template <typename T>
 90inline bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
 91                                           typename T::Type xchg,
 92                                           memory_order mo) {
 93  // Transitioned from __sync_val_compare_and_swap to support targets like
 94  // SPARC V8 that cannot inline atomic cmpxchg.  __atomic_compare_exchange
 95  // can then be resolved from libatomic.  __ATOMIC_SEQ_CST is used to best
 96  // match the __sync builtin memory order.
 97  return __atomic_compare_exchange(&a->val_dont_use, cmp, &xchg, false,
 98                                   __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
 99}
100
101template <typename T>
102inline bool atomic_compare_exchange_weak(volatile T *a, typename T::Type *cmp,
103                                         typename T::Type xchg,
104                                         memory_order mo) {
105  return atomic_compare_exchange_strong(a, cmp, xchg, mo);
106}
107
108SANITIZER_IGNORE_ATOMIC_ALIGNMENT_END
109
110}  // namespace __sanitizer
111
112#undef ATOMIC_ORDER
113
114#endif  // SANITIZER_ATOMIC_CLANG_H