tsan_interface_atomic.cpp revision 360784
1//===-- tsan_interface_atomic.cpp -----------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11//===----------------------------------------------------------------------===//
12
13// ThreadSanitizer atomic operations are based on C++11/C1x standards.
14// For background see C++11 standard.  A slightly older, publicly
15// available draft of the standard (not entirely up-to-date, but close enough
16// for casual browsing) is available here:
17// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
18// The following page contains more background information:
19// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
20
21#include "sanitizer_common/sanitizer_placement_new.h"
22#include "sanitizer_common/sanitizer_stacktrace.h"
23#include "sanitizer_common/sanitizer_mutex.h"
24#include "tsan_flags.h"
25#include "tsan_interface.h"
26#include "tsan_rtl.h"
27
28using namespace __tsan;
29
30#if !SANITIZER_GO && __TSAN_HAS_INT128
31// Protects emulation of 128-bit atomic operations.
32static StaticSpinMutex mutex128;
33#endif
34
35static bool IsLoadOrder(morder mo) {
36  return mo == mo_relaxed || mo == mo_consume
37      || mo == mo_acquire || mo == mo_seq_cst;
38}
39
40static bool IsStoreOrder(morder mo) {
41  return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
42}
43
44static bool IsReleaseOrder(morder mo) {
45  return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
46}
47
48static bool IsAcquireOrder(morder mo) {
49  return mo == mo_consume || mo == mo_acquire
50      || mo == mo_acq_rel || mo == mo_seq_cst;
51}
52
53static bool IsAcqRelOrder(morder mo) {
54  return mo == mo_acq_rel || mo == mo_seq_cst;
55}
56
57template<typename T> T func_xchg(volatile T *v, T op) {
58  T res = __sync_lock_test_and_set(v, op);
59  // __sync_lock_test_and_set does not contain full barrier.
60  __sync_synchronize();
61  return res;
62}
63
64template<typename T> T func_add(volatile T *v, T op) {
65  return __sync_fetch_and_add(v, op);
66}
67
68template<typename T> T func_sub(volatile T *v, T op) {
69  return __sync_fetch_and_sub(v, op);
70}
71
72template<typename T> T func_and(volatile T *v, T op) {
73  return __sync_fetch_and_and(v, op);
74}
75
76template<typename T> T func_or(volatile T *v, T op) {
77  return __sync_fetch_and_or(v, op);
78}
79
80template<typename T> T func_xor(volatile T *v, T op) {
81  return __sync_fetch_and_xor(v, op);
82}
83
84template<typename T> T func_nand(volatile T *v, T op) {
85  // clang does not support __sync_fetch_and_nand.
86  T cmp = *v;
87  for (;;) {
88    T newv = ~(cmp & op);
89    T cur = __sync_val_compare_and_swap(v, cmp, newv);
90    if (cmp == cur)
91      return cmp;
92    cmp = cur;
93  }
94}
95
96template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
97  return __sync_val_compare_and_swap(v, cmp, xch);
98}
99
100// clang does not support 128-bit atomic ops.
101// Atomic ops are executed under tsan internal mutex,
102// here we assume that the atomic variables are not accessed
103// from non-instrumented code.
104#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
105    && __TSAN_HAS_INT128
106a128 func_xchg(volatile a128 *v, a128 op) {
107  SpinMutexLock lock(&mutex128);
108  a128 cmp = *v;
109  *v = op;
110  return cmp;
111}
112
113a128 func_add(volatile a128 *v, a128 op) {
114  SpinMutexLock lock(&mutex128);
115  a128 cmp = *v;
116  *v = cmp + op;
117  return cmp;
118}
119
120a128 func_sub(volatile a128 *v, a128 op) {
121  SpinMutexLock lock(&mutex128);
122  a128 cmp = *v;
123  *v = cmp - op;
124  return cmp;
125}
126
127a128 func_and(volatile a128 *v, a128 op) {
128  SpinMutexLock lock(&mutex128);
129  a128 cmp = *v;
130  *v = cmp & op;
131  return cmp;
132}
133
134a128 func_or(volatile a128 *v, a128 op) {
135  SpinMutexLock lock(&mutex128);
136  a128 cmp = *v;
137  *v = cmp | op;
138  return cmp;
139}
140
141a128 func_xor(volatile a128 *v, a128 op) {
142  SpinMutexLock lock(&mutex128);
143  a128 cmp = *v;
144  *v = cmp ^ op;
145  return cmp;
146}
147
148a128 func_nand(volatile a128 *v, a128 op) {
149  SpinMutexLock lock(&mutex128);
150  a128 cmp = *v;
151  *v = ~(cmp & op);
152  return cmp;
153}
154
155a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
156  SpinMutexLock lock(&mutex128);
157  a128 cur = *v;
158  if (cur == cmp)
159    *v = xch;
160  return cur;
161}
162#endif
163
164template<typename T>
165static int SizeLog() {
166  if (sizeof(T) <= 1)
167    return kSizeLog1;
168  else if (sizeof(T) <= 2)
169    return kSizeLog2;
170  else if (sizeof(T) <= 4)
171    return kSizeLog4;
172  else
173    return kSizeLog8;
174  // For 16-byte atomics we also use 8-byte memory access,
175  // this leads to false negatives only in very obscure cases.
176}
177
178#if !SANITIZER_GO
179static atomic_uint8_t *to_atomic(const volatile a8 *a) {
180  return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
181}
182
183static atomic_uint16_t *to_atomic(const volatile a16 *a) {
184  return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
185}
186#endif
187
188static atomic_uint32_t *to_atomic(const volatile a32 *a) {
189  return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
190}
191
192static atomic_uint64_t *to_atomic(const volatile a64 *a) {
193  return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
194}
195
196static memory_order to_mo(morder mo) {
197  switch (mo) {
198  case mo_relaxed: return memory_order_relaxed;
199  case mo_consume: return memory_order_consume;
200  case mo_acquire: return memory_order_acquire;
201  case mo_release: return memory_order_release;
202  case mo_acq_rel: return memory_order_acq_rel;
203  case mo_seq_cst: return memory_order_seq_cst;
204  }
205  CHECK(0);
206  return memory_order_seq_cst;
207}
208
209template<typename T>
210static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
211  return atomic_load(to_atomic(a), to_mo(mo));
212}
213
214#if __TSAN_HAS_INT128 && !SANITIZER_GO
215static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
216  SpinMutexLock lock(&mutex128);
217  return *a;
218}
219#endif
220
221template<typename T>
222static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
223  CHECK(IsLoadOrder(mo));
224  // This fast-path is critical for performance.
225  // Assume the access is atomic.
226  if (!IsAcquireOrder(mo)) {
227    MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
228    return NoTsanAtomicLoad(a, mo);
229  }
230  // Don't create sync object if it does not exist yet. For example, an atomic
231  // pointer is initialized to nullptr and then periodically acquire-loaded.
232  T v = NoTsanAtomicLoad(a, mo);
233  SyncVar *s = ctx->metamap.GetIfExistsAndLock((uptr)a, false);
234  if (s) {
235    AcquireImpl(thr, pc, &s->clock);
236    // Re-read under sync mutex because we need a consistent snapshot
237    // of the value and the clock we acquire.
238    v = NoTsanAtomicLoad(a, mo);
239    s->mtx.ReadUnlock();
240  }
241  MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
242  return v;
243}
244
245template<typename T>
246static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
247  atomic_store(to_atomic(a), v, to_mo(mo));
248}
249
250#if __TSAN_HAS_INT128 && !SANITIZER_GO
251static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
252  SpinMutexLock lock(&mutex128);
253  *a = v;
254}
255#endif
256
257template<typename T>
258static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
259    morder mo) {
260  CHECK(IsStoreOrder(mo));
261  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
262  // This fast-path is critical for performance.
263  // Assume the access is atomic.
264  // Strictly saying even relaxed store cuts off release sequence,
265  // so must reset the clock.
266  if (!IsReleaseOrder(mo)) {
267    NoTsanAtomicStore(a, v, mo);
268    return;
269  }
270  __sync_synchronize();
271  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
272  thr->fast_state.IncrementEpoch();
273  // Can't increment epoch w/o writing to the trace as well.
274  TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
275  ReleaseStoreImpl(thr, pc, &s->clock);
276  NoTsanAtomicStore(a, v, mo);
277  s->mtx.Unlock();
278}
279
280template<typename T, T (*F)(volatile T *v, T op)>
281static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
282  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
283  SyncVar *s = 0;
284  if (mo != mo_relaxed) {
285    s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
286    thr->fast_state.IncrementEpoch();
287    // Can't increment epoch w/o writing to the trace as well.
288    TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
289    if (IsAcqRelOrder(mo))
290      AcquireReleaseImpl(thr, pc, &s->clock);
291    else if (IsReleaseOrder(mo))
292      ReleaseImpl(thr, pc, &s->clock);
293    else if (IsAcquireOrder(mo))
294      AcquireImpl(thr, pc, &s->clock);
295  }
296  v = F(a, v);
297  if (s)
298    s->mtx.Unlock();
299  return v;
300}
301
302template<typename T>
303static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
304  return func_xchg(a, v);
305}
306
307template<typename T>
308static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
309  return func_add(a, v);
310}
311
312template<typename T>
313static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
314  return func_sub(a, v);
315}
316
317template<typename T>
318static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
319  return func_and(a, v);
320}
321
322template<typename T>
323static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
324  return func_or(a, v);
325}
326
327template<typename T>
328static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
329  return func_xor(a, v);
330}
331
332template<typename T>
333static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
334  return func_nand(a, v);
335}
336
337template<typename T>
338static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
339    morder mo) {
340  return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
341}
342
343template<typename T>
344static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
345    morder mo) {
346  return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
347}
348
349template<typename T>
350static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
351    morder mo) {
352  return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
353}
354
355template<typename T>
356static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
357    morder mo) {
358  return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
359}
360
361template<typename T>
362static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
363    morder mo) {
364  return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
365}
366
367template<typename T>
368static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
369    morder mo) {
370  return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
371}
372
373template<typename T>
374static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
375    morder mo) {
376  return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
377}
378
379template<typename T>
380static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
381  return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
382}
383
384#if __TSAN_HAS_INT128
385static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
386    morder mo, morder fmo) {
387  a128 old = *c;
388  a128 cur = func_cas(a, old, v);
389  if (cur == old)
390    return true;
391  *c = cur;
392  return false;
393}
394#endif
395
396template<typename T>
397static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
398  NoTsanAtomicCAS(a, &c, v, mo, fmo);
399  return c;
400}
401
402template<typename T>
403static bool AtomicCAS(ThreadState *thr, uptr pc,
404    volatile T *a, T *c, T v, morder mo, morder fmo) {
405  (void)fmo;  // Unused because llvm does not pass it yet.
406  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
407  SyncVar *s = 0;
408  bool write_lock = mo != mo_acquire && mo != mo_consume;
409  if (mo != mo_relaxed) {
410    s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
411    thr->fast_state.IncrementEpoch();
412    // Can't increment epoch w/o writing to the trace as well.
413    TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
414    if (IsAcqRelOrder(mo))
415      AcquireReleaseImpl(thr, pc, &s->clock);
416    else if (IsReleaseOrder(mo))
417      ReleaseImpl(thr, pc, &s->clock);
418    else if (IsAcquireOrder(mo))
419      AcquireImpl(thr, pc, &s->clock);
420  }
421  T cc = *c;
422  T pr = func_cas(a, cc, v);
423  if (s) {
424    if (write_lock)
425      s->mtx.Unlock();
426    else
427      s->mtx.ReadUnlock();
428  }
429  if (pr == cc)
430    return true;
431  *c = pr;
432  return false;
433}
434
435template<typename T>
436static T AtomicCAS(ThreadState *thr, uptr pc,
437    volatile T *a, T c, T v, morder mo, morder fmo) {
438  AtomicCAS(thr, pc, a, &c, v, mo, fmo);
439  return c;
440}
441
442#if !SANITIZER_GO
443static void NoTsanAtomicFence(morder mo) {
444  __sync_synchronize();
445}
446
447static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
448  // FIXME(dvyukov): not implemented.
449  __sync_synchronize();
450}
451#endif
452
453// Interface functions follow.
454#if !SANITIZER_GO
455
456// C/C++
457
458static morder convert_morder(morder mo) {
459  if (flags()->force_seq_cst_atomics)
460    return (morder)mo_seq_cst;
461
462  // Filter out additional memory order flags:
463  // MEMMODEL_SYNC        = 1 << 15
464  // __ATOMIC_HLE_ACQUIRE = 1 << 16
465  // __ATOMIC_HLE_RELEASE = 1 << 17
466  //
467  // HLE is an optimization, and we pretend that elision always fails.
468  // MEMMODEL_SYNC is used when lowering __sync_ atomics,
469  // since we use __sync_ atomics for actual atomic operations,
470  // we can safely ignore it as well. It also subtly affects semantics,
471  // but we don't model the difference.
472  return (morder)(mo & 0x7fff);
473}
474
475#define SCOPED_ATOMIC(func, ...) \
476    ThreadState *const thr = cur_thread(); \
477    if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) { \
478      ProcessPendingSignals(thr); \
479      return NoTsanAtomic##func(__VA_ARGS__); \
480    } \
481    const uptr callpc = (uptr)__builtin_return_address(0); \
482    uptr pc = StackTrace::GetCurrentPc(); \
483    mo = convert_morder(mo); \
484    AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
485    ScopedAtomic sa(thr, callpc, a, mo, __func__); \
486    return Atomic##func(thr, pc, __VA_ARGS__); \
487/**/
488
489class ScopedAtomic {
490 public:
491  ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
492               morder mo, const char *func)
493      : thr_(thr) {
494    FuncEntry(thr_, pc);
495    DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
496  }
497  ~ScopedAtomic() {
498    ProcessPendingSignals(thr_);
499    FuncExit(thr_);
500  }
501 private:
502  ThreadState *thr_;
503};
504
505static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
506  StatInc(thr, StatAtomic);
507  StatInc(thr, t);
508  StatInc(thr, size == 1 ? StatAtomic1
509             : size == 2 ? StatAtomic2
510             : size == 4 ? StatAtomic4
511             : size == 8 ? StatAtomic8
512             :             StatAtomic16);
513  StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
514             : mo == mo_consume ? StatAtomicConsume
515             : mo == mo_acquire ? StatAtomicAcquire
516             : mo == mo_release ? StatAtomicRelease
517             : mo == mo_acq_rel ? StatAtomicAcq_Rel
518             :                    StatAtomicSeq_Cst);
519}
520
521extern "C" {
522SANITIZER_INTERFACE_ATTRIBUTE
523a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
524  SCOPED_ATOMIC(Load, a, mo);
525}
526
527SANITIZER_INTERFACE_ATTRIBUTE
528a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
529  SCOPED_ATOMIC(Load, a, mo);
530}
531
532SANITIZER_INTERFACE_ATTRIBUTE
533a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
534  SCOPED_ATOMIC(Load, a, mo);
535}
536
537SANITIZER_INTERFACE_ATTRIBUTE
538a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
539  SCOPED_ATOMIC(Load, a, mo);
540}
541
542#if __TSAN_HAS_INT128
543SANITIZER_INTERFACE_ATTRIBUTE
544a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
545  SCOPED_ATOMIC(Load, a, mo);
546}
547#endif
548
549SANITIZER_INTERFACE_ATTRIBUTE
550void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
551  SCOPED_ATOMIC(Store, a, v, mo);
552}
553
554SANITIZER_INTERFACE_ATTRIBUTE
555void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
556  SCOPED_ATOMIC(Store, a, v, mo);
557}
558
559SANITIZER_INTERFACE_ATTRIBUTE
560void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
561  SCOPED_ATOMIC(Store, a, v, mo);
562}
563
564SANITIZER_INTERFACE_ATTRIBUTE
565void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
566  SCOPED_ATOMIC(Store, a, v, mo);
567}
568
569#if __TSAN_HAS_INT128
570SANITIZER_INTERFACE_ATTRIBUTE
571void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
572  SCOPED_ATOMIC(Store, a, v, mo);
573}
574#endif
575
576SANITIZER_INTERFACE_ATTRIBUTE
577a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
578  SCOPED_ATOMIC(Exchange, a, v, mo);
579}
580
581SANITIZER_INTERFACE_ATTRIBUTE
582a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
583  SCOPED_ATOMIC(Exchange, a, v, mo);
584}
585
586SANITIZER_INTERFACE_ATTRIBUTE
587a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
588  SCOPED_ATOMIC(Exchange, a, v, mo);
589}
590
591SANITIZER_INTERFACE_ATTRIBUTE
592a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
593  SCOPED_ATOMIC(Exchange, a, v, mo);
594}
595
596#if __TSAN_HAS_INT128
597SANITIZER_INTERFACE_ATTRIBUTE
598a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
599  SCOPED_ATOMIC(Exchange, a, v, mo);
600}
601#endif
602
603SANITIZER_INTERFACE_ATTRIBUTE
604a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
605  SCOPED_ATOMIC(FetchAdd, a, v, mo);
606}
607
608SANITIZER_INTERFACE_ATTRIBUTE
609a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
610  SCOPED_ATOMIC(FetchAdd, a, v, mo);
611}
612
613SANITIZER_INTERFACE_ATTRIBUTE
614a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
615  SCOPED_ATOMIC(FetchAdd, a, v, mo);
616}
617
618SANITIZER_INTERFACE_ATTRIBUTE
619a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
620  SCOPED_ATOMIC(FetchAdd, a, v, mo);
621}
622
623#if __TSAN_HAS_INT128
624SANITIZER_INTERFACE_ATTRIBUTE
625a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
626  SCOPED_ATOMIC(FetchAdd, a, v, mo);
627}
628#endif
629
630SANITIZER_INTERFACE_ATTRIBUTE
631a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
632  SCOPED_ATOMIC(FetchSub, a, v, mo);
633}
634
635SANITIZER_INTERFACE_ATTRIBUTE
636a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
637  SCOPED_ATOMIC(FetchSub, a, v, mo);
638}
639
640SANITIZER_INTERFACE_ATTRIBUTE
641a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
642  SCOPED_ATOMIC(FetchSub, a, v, mo);
643}
644
645SANITIZER_INTERFACE_ATTRIBUTE
646a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
647  SCOPED_ATOMIC(FetchSub, a, v, mo);
648}
649
650#if __TSAN_HAS_INT128
651SANITIZER_INTERFACE_ATTRIBUTE
652a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
653  SCOPED_ATOMIC(FetchSub, a, v, mo);
654}
655#endif
656
657SANITIZER_INTERFACE_ATTRIBUTE
658a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
659  SCOPED_ATOMIC(FetchAnd, a, v, mo);
660}
661
662SANITIZER_INTERFACE_ATTRIBUTE
663a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
664  SCOPED_ATOMIC(FetchAnd, a, v, mo);
665}
666
667SANITIZER_INTERFACE_ATTRIBUTE
668a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
669  SCOPED_ATOMIC(FetchAnd, a, v, mo);
670}
671
672SANITIZER_INTERFACE_ATTRIBUTE
673a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
674  SCOPED_ATOMIC(FetchAnd, a, v, mo);
675}
676
677#if __TSAN_HAS_INT128
678SANITIZER_INTERFACE_ATTRIBUTE
679a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
680  SCOPED_ATOMIC(FetchAnd, a, v, mo);
681}
682#endif
683
684SANITIZER_INTERFACE_ATTRIBUTE
685a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
686  SCOPED_ATOMIC(FetchOr, a, v, mo);
687}
688
689SANITIZER_INTERFACE_ATTRIBUTE
690a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
691  SCOPED_ATOMIC(FetchOr, a, v, mo);
692}
693
694SANITIZER_INTERFACE_ATTRIBUTE
695a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
696  SCOPED_ATOMIC(FetchOr, a, v, mo);
697}
698
699SANITIZER_INTERFACE_ATTRIBUTE
700a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
701  SCOPED_ATOMIC(FetchOr, a, v, mo);
702}
703
704#if __TSAN_HAS_INT128
705SANITIZER_INTERFACE_ATTRIBUTE
706a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
707  SCOPED_ATOMIC(FetchOr, a, v, mo);
708}
709#endif
710
711SANITIZER_INTERFACE_ATTRIBUTE
712a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
713  SCOPED_ATOMIC(FetchXor, a, v, mo);
714}
715
716SANITIZER_INTERFACE_ATTRIBUTE
717a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
718  SCOPED_ATOMIC(FetchXor, a, v, mo);
719}
720
721SANITIZER_INTERFACE_ATTRIBUTE
722a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
723  SCOPED_ATOMIC(FetchXor, a, v, mo);
724}
725
726SANITIZER_INTERFACE_ATTRIBUTE
727a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
728  SCOPED_ATOMIC(FetchXor, a, v, mo);
729}
730
731#if __TSAN_HAS_INT128
732SANITIZER_INTERFACE_ATTRIBUTE
733a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
734  SCOPED_ATOMIC(FetchXor, a, v, mo);
735}
736#endif
737
738SANITIZER_INTERFACE_ATTRIBUTE
739a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
740  SCOPED_ATOMIC(FetchNand, a, v, mo);
741}
742
743SANITIZER_INTERFACE_ATTRIBUTE
744a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
745  SCOPED_ATOMIC(FetchNand, a, v, mo);
746}
747
748SANITIZER_INTERFACE_ATTRIBUTE
749a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
750  SCOPED_ATOMIC(FetchNand, a, v, mo);
751}
752
753SANITIZER_INTERFACE_ATTRIBUTE
754a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
755  SCOPED_ATOMIC(FetchNand, a, v, mo);
756}
757
758#if __TSAN_HAS_INT128
759SANITIZER_INTERFACE_ATTRIBUTE
760a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
761  SCOPED_ATOMIC(FetchNand, a, v, mo);
762}
763#endif
764
765SANITIZER_INTERFACE_ATTRIBUTE
766int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
767    morder mo, morder fmo) {
768  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
769}
770
771SANITIZER_INTERFACE_ATTRIBUTE
772int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
773    morder mo, morder fmo) {
774  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
775}
776
777SANITIZER_INTERFACE_ATTRIBUTE
778int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
779    morder mo, morder fmo) {
780  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
781}
782
783SANITIZER_INTERFACE_ATTRIBUTE
784int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
785    morder mo, morder fmo) {
786  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
787}
788
789#if __TSAN_HAS_INT128
790SANITIZER_INTERFACE_ATTRIBUTE
791int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
792    morder mo, morder fmo) {
793  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
794}
795#endif
796
797SANITIZER_INTERFACE_ATTRIBUTE
798int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
799    morder mo, morder fmo) {
800  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
801}
802
803SANITIZER_INTERFACE_ATTRIBUTE
804int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
805    morder mo, morder fmo) {
806  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
807}
808
809SANITIZER_INTERFACE_ATTRIBUTE
810int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
811    morder mo, morder fmo) {
812  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
813}
814
815SANITIZER_INTERFACE_ATTRIBUTE
816int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
817    morder mo, morder fmo) {
818  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
819}
820
821#if __TSAN_HAS_INT128
822SANITIZER_INTERFACE_ATTRIBUTE
823int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
824    morder mo, morder fmo) {
825  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
826}
827#endif
828
829SANITIZER_INTERFACE_ATTRIBUTE
830a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
831    morder mo, morder fmo) {
832  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
833}
834
835SANITIZER_INTERFACE_ATTRIBUTE
836a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
837    morder mo, morder fmo) {
838  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
839}
840
841SANITIZER_INTERFACE_ATTRIBUTE
842a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
843    morder mo, morder fmo) {
844  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
845}
846
847SANITIZER_INTERFACE_ATTRIBUTE
848a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
849    morder mo, morder fmo) {
850  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
851}
852
853#if __TSAN_HAS_INT128
854SANITIZER_INTERFACE_ATTRIBUTE
855a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
856    morder mo, morder fmo) {
857  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
858}
859#endif
860
861SANITIZER_INTERFACE_ATTRIBUTE
862void __tsan_atomic_thread_fence(morder mo) {
863  char* a = 0;
864  SCOPED_ATOMIC(Fence, mo);
865}
866
867SANITIZER_INTERFACE_ATTRIBUTE
868void __tsan_atomic_signal_fence(morder mo) {
869}
870}  // extern "C"
871
872#else  // #if !SANITIZER_GO
873
874// Go
875
876#define ATOMIC(func, ...) \
877    if (thr->ignore_sync) { \
878      NoTsanAtomic##func(__VA_ARGS__); \
879    } else { \
880      FuncEntry(thr, cpc); \
881      Atomic##func(thr, pc, __VA_ARGS__); \
882      FuncExit(thr); \
883    } \
884/**/
885
886#define ATOMIC_RET(func, ret, ...) \
887    if (thr->ignore_sync) { \
888      (ret) = NoTsanAtomic##func(__VA_ARGS__); \
889    } else { \
890      FuncEntry(thr, cpc); \
891      (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
892      FuncExit(thr); \
893    } \
894/**/
895
896extern "C" {
897SANITIZER_INTERFACE_ATTRIBUTE
898void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
899  ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
900}
901
902SANITIZER_INTERFACE_ATTRIBUTE
903void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
904  ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
905}
906
907SANITIZER_INTERFACE_ATTRIBUTE
908void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
909  ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
910}
911
912SANITIZER_INTERFACE_ATTRIBUTE
913void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
914  ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
915}
916
917SANITIZER_INTERFACE_ATTRIBUTE
918void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
919  ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
920}
921
922SANITIZER_INTERFACE_ATTRIBUTE
923void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
924  ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
925}
926
927SANITIZER_INTERFACE_ATTRIBUTE
928void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
929  ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
930}
931
932SANITIZER_INTERFACE_ATTRIBUTE
933void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
934  ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
935}
936
937SANITIZER_INTERFACE_ATTRIBUTE
938void __tsan_go_atomic32_compare_exchange(
939    ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
940  a32 cur = 0;
941  a32 cmp = *(a32*)(a+8);
942  ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
943  *(bool*)(a+16) = (cur == cmp);
944}
945
946SANITIZER_INTERFACE_ATTRIBUTE
947void __tsan_go_atomic64_compare_exchange(
948    ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
949  a64 cur = 0;
950  a64 cmp = *(a64*)(a+8);
951  ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
952  *(bool*)(a+24) = (cur == cmp);
953}
954}  // extern "C"
955#endif  // #if !SANITIZER_GO
956