1//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
2//
3// This file is distributed under the University of Illinois Open Source
4// License. See LICENSE.TXT for details.
5//
6//===----------------------------------------------------------------------===//
7//
8// This file is a part of ThreadSanitizer (TSan), a race detector.
9//
10// Main internal TSan header file.
11//
12// Ground rules:
13//   - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
14//     function-scope locals)
15//   - All functions/classes/etc reside in namespace __tsan, except for those
16//     declared in tsan_interface.h.
17//   - Platform-specific files should be used instead of ifdefs (*).
18//   - No system headers included in header files (*).
19//   - Platform specific headres included only into platform-specific files (*).
20//
21//  (*) Except when inlining is critical for performance.
22//===----------------------------------------------------------------------===//
23
24#ifndef TSAN_RTL_H
25#define TSAN_RTL_H
26
27#include "sanitizer_common/sanitizer_allocator.h"
28#include "sanitizer_common/sanitizer_allocator_internal.h"
29#include "sanitizer_common/sanitizer_asm.h"
30#include "sanitizer_common/sanitizer_common.h"
31#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
32#include "sanitizer_common/sanitizer_libignore.h"
33#include "sanitizer_common/sanitizer_suppressions.h"
34#include "sanitizer_common/sanitizer_thread_registry.h"
35#include "tsan_clock.h"
36#include "tsan_defs.h"
37#include "tsan_flags.h"
38#include "tsan_sync.h"
39#include "tsan_trace.h"
40#include "tsan_vector.h"
41#include "tsan_report.h"
42#include "tsan_platform.h"
43#include "tsan_mutexset.h"
44#include "tsan_ignoreset.h"
45#include "tsan_stack_trace.h"
46
47#if SANITIZER_WORDSIZE != 64
48# error "ThreadSanitizer is supported only on 64-bit platforms"
49#endif
50
51namespace __tsan {
52
53#ifndef TSAN_GO
54struct MapUnmapCallback;
55typedef SizeClassAllocator64<kHeapMemBeg, kHeapMemEnd - kHeapMemBeg, 0,
56    DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
57typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
58typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
59typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
60    SecondaryAllocator> Allocator;
61Allocator *allocator();
62#endif
63
64void TsanCheckFailed(const char *file, int line, const char *cond,
65                     u64 v1, u64 v2);
66
67const u64 kShadowRodata = (u64)-1;  // .rodata shadow marker
68
69// FastState (from most significant bit):
70//   ignore          : 1
71//   tid             : kTidBits
72//   unused          : -
73//   history_size    : 3
74//   epoch           : kClkBits
75class FastState {
76 public:
77  FastState(u64 tid, u64 epoch) {
78    x_ = tid << kTidShift;
79    x_ |= epoch;
80    DCHECK_EQ(tid, this->tid());
81    DCHECK_EQ(epoch, this->epoch());
82    DCHECK_EQ(GetIgnoreBit(), false);
83  }
84
85  explicit FastState(u64 x)
86      : x_(x) {
87  }
88
89  u64 raw() const {
90    return x_;
91  }
92
93  u64 tid() const {
94    u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
95    return res;
96  }
97
98  u64 TidWithIgnore() const {
99    u64 res = x_ >> kTidShift;
100    return res;
101  }
102
103  u64 epoch() const {
104    u64 res = x_ & ((1ull << kClkBits) - 1);
105    return res;
106  }
107
108  void IncrementEpoch() {
109    u64 old_epoch = epoch();
110    x_ += 1;
111    DCHECK_EQ(old_epoch + 1, epoch());
112    (void)old_epoch;
113  }
114
115  void SetIgnoreBit() { x_ |= kIgnoreBit; }
116  void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
117  bool GetIgnoreBit() const { return (s64)x_ < 0; }
118
119  void SetHistorySize(int hs) {
120    CHECK_GE(hs, 0);
121    CHECK_LE(hs, 7);
122    x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
123  }
124
125  ALWAYS_INLINE
126  int GetHistorySize() const {
127    return (int)((x_ >> kHistoryShift) & kHistoryMask);
128  }
129
130  void ClearHistorySize() {
131    SetHistorySize(0);
132  }
133
134  ALWAYS_INLINE
135  u64 GetTracePos() const {
136    const int hs = GetHistorySize();
137    // When hs == 0, the trace consists of 2 parts.
138    const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
139    return epoch() & mask;
140  }
141
142 private:
143  friend class Shadow;
144  static const int kTidShift = 64 - kTidBits - 1;
145  static const u64 kIgnoreBit = 1ull << 63;
146  static const u64 kFreedBit = 1ull << 63;
147  static const u64 kHistoryShift = kClkBits;
148  static const u64 kHistoryMask = 7;
149  u64 x_;
150};
151
152// Shadow (from most significant bit):
153//   freed           : 1
154//   tid             : kTidBits
155//   is_atomic       : 1
156//   is_read         : 1
157//   size_log        : 2
158//   addr0           : 3
159//   epoch           : kClkBits
160class Shadow : public FastState {
161 public:
162  explicit Shadow(u64 x)
163      : FastState(x) {
164  }
165
166  explicit Shadow(const FastState &s)
167      : FastState(s.x_) {
168    ClearHistorySize();
169  }
170
171  void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
172    DCHECK_EQ((x_ >> kClkBits) & 31, 0);
173    DCHECK_LE(addr0, 7);
174    DCHECK_LE(kAccessSizeLog, 3);
175    x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
176    DCHECK_EQ(kAccessSizeLog, size_log());
177    DCHECK_EQ(addr0, this->addr0());
178  }
179
180  void SetWrite(unsigned kAccessIsWrite) {
181    DCHECK_EQ(x_ & kReadBit, 0);
182    if (!kAccessIsWrite)
183      x_ |= kReadBit;
184    DCHECK_EQ(kAccessIsWrite, IsWrite());
185  }
186
187  void SetAtomic(bool kIsAtomic) {
188    DCHECK(!IsAtomic());
189    if (kIsAtomic)
190      x_ |= kAtomicBit;
191    DCHECK_EQ(IsAtomic(), kIsAtomic);
192  }
193
194  bool IsAtomic() const {
195    return x_ & kAtomicBit;
196  }
197
198  bool IsZero() const {
199    return x_ == 0;
200  }
201
202  static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
203    u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
204    DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
205    return shifted_xor == 0;
206  }
207
208  static ALWAYS_INLINE
209  bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
210    u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
211    return masked_xor == 0;
212  }
213
214  static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
215      unsigned kS2AccessSize) {
216    bool res = false;
217    u64 diff = s1.addr0() - s2.addr0();
218    if ((s64)diff < 0) {  // s1.addr0 < s2.addr0  // NOLINT
219      // if (s1.addr0() + size1) > s2.addr0()) return true;
220      if (s1.size() > -diff)
221        res = true;
222    } else {
223      // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
224      if (kS2AccessSize > diff)
225        res = true;
226    }
227    DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
228    DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
229    return res;
230  }
231
232  u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
233  u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
234  bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
235  bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
236
237  // The idea behind the freed bit is as follows.
238  // When the memory is freed (or otherwise unaccessible) we write to the shadow
239  // values with tid/epoch related to the free and the freed bit set.
240  // During memory accesses processing the freed bit is considered
241  // as msb of tid. So any access races with shadow with freed bit set
242  // (it is as if write from a thread with which we never synchronized before).
243  // This allows us to detect accesses to freed memory w/o additional
244  // overheads in memory access processing and at the same time restore
245  // tid/epoch of free.
246  void MarkAsFreed() {
247     x_ |= kFreedBit;
248  }
249
250  bool IsFreed() const {
251    return x_ & kFreedBit;
252  }
253
254  bool GetFreedAndReset() {
255    bool res = x_ & kFreedBit;
256    x_ &= ~kFreedBit;
257    return res;
258  }
259
260  bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
261    bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
262        | (u64(kIsAtomic) << kAtomicShift));
263    DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
264    return v;
265  }
266
267  bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
268    bool v = ((x_ >> kReadShift) & 3)
269        <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
270    DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
271        (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
272    return v;
273  }
274
275  bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
276    bool v = ((x_ >> kReadShift) & 3)
277        >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
278    DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
279        (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
280    return v;
281  }
282
283 private:
284  static const u64 kReadShift   = 5 + kClkBits;
285  static const u64 kReadBit     = 1ull << kReadShift;
286  static const u64 kAtomicShift = 6 + kClkBits;
287  static const u64 kAtomicBit   = 1ull << kAtomicShift;
288
289  u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
290
291  static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
292    if (s1.addr0() == s2.addr0()) return true;
293    if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
294      return true;
295    if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
296      return true;
297    return false;
298  }
299};
300
301struct SignalContext;
302
303struct JmpBuf {
304  uptr sp;
305  uptr mangled_sp;
306  int int_signal_send;
307  bool in_blocking_func;
308  uptr in_signal_handler;
309  uptr *shadow_stack_pos;
310};
311
312// This struct is stored in TLS.
313struct ThreadState {
314  FastState fast_state;
315  // Synch epoch represents the threads's epoch before the last synchronization
316  // action. It allows to reduce number of shadow state updates.
317  // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
318  // if we are processing write to X from the same thread at epoch=200,
319  // we do nothing, because both writes happen in the same 'synch epoch'.
320  // That is, if another memory access does not race with the former write,
321  // it does not race with the latter as well.
322  // QUESTION: can we can squeeze this into ThreadState::Fast?
323  // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
324  // taken by epoch between synchs.
325  // This way we can save one load from tls.
326  u64 fast_synch_epoch;
327  // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
328  // We do not distinguish beteween ignoring reads and writes
329  // for better performance.
330  int ignore_reads_and_writes;
331  int ignore_sync;
332  // Go does not support ignores.
333#ifndef TSAN_GO
334  IgnoreSet mop_ignore_set;
335  IgnoreSet sync_ignore_set;
336#endif
337  // C/C++ uses fixed size shadow stack embed into Trace.
338  // Go uses malloc-allocated shadow stack with dynamic size.
339  uptr *shadow_stack;
340  uptr *shadow_stack_end;
341  uptr *shadow_stack_pos;
342  u64 *racy_shadow_addr;
343  u64 racy_state[2];
344  MutexSet mset;
345  ThreadClock clock;
346#ifndef TSAN_GO
347  AllocatorCache alloc_cache;
348  InternalAllocatorCache internal_alloc_cache;
349  Vector<JmpBuf> jmp_bufs;
350  int ignore_interceptors;
351#endif
352  u64 stat[StatCnt];
353  const int tid;
354  const int unique_id;
355  bool in_symbolizer;
356  bool in_ignored_lib;
357  bool is_dead;
358  bool is_freeing;
359  bool is_vptr_access;
360  const uptr stk_addr;
361  const uptr stk_size;
362  const uptr tls_addr;
363  const uptr tls_size;
364  ThreadContext *tctx;
365
366  InternalDeadlockDetector internal_deadlock_detector;
367  DDPhysicalThread *dd_pt;
368  DDLogicalThread *dd_lt;
369
370  atomic_uintptr_t in_signal_handler;
371  SignalContext *signal_ctx;
372
373  DenseSlabAllocCache block_cache;
374  DenseSlabAllocCache sync_cache;
375  DenseSlabAllocCache clock_cache;
376
377#ifndef TSAN_GO
378  u32 last_sleep_stack_id;
379  ThreadClock last_sleep_clock;
380#endif
381
382  // Set in regions of runtime that must be signal-safe and fork-safe.
383  // If set, malloc must not be called.
384  int nomalloc;
385
386  explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
387                       unsigned reuse_count,
388                       uptr stk_addr, uptr stk_size,
389                       uptr tls_addr, uptr tls_size);
390};
391
392#ifndef TSAN_GO
393__attribute__((tls_model("initial-exec")))
394extern THREADLOCAL char cur_thread_placeholder[];
395INLINE ThreadState *cur_thread() {
396  return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
397}
398#endif
399
400class ThreadContext : public ThreadContextBase {
401 public:
402  explicit ThreadContext(int tid);
403  ~ThreadContext();
404  ThreadState *thr;
405  u32 creation_stack_id;
406  SyncClock sync;
407  // Epoch at which the thread had started.
408  // If we see an event from the thread stamped by an older epoch,
409  // the event is from a dead thread that shared tid with this thread.
410  u64 epoch0;
411  u64 epoch1;
412
413  // Override superclass callbacks.
414  void OnDead();
415  void OnJoined(void *arg);
416  void OnFinished();
417  void OnStarted(void *arg);
418  void OnCreated(void *arg);
419  void OnReset();
420  void OnDetached(void *arg);
421};
422
423struct RacyStacks {
424  MD5Hash hash[2];
425  bool operator==(const RacyStacks &other) const {
426    if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
427      return true;
428    if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
429      return true;
430    return false;
431  }
432};
433
434struct RacyAddress {
435  uptr addr_min;
436  uptr addr_max;
437};
438
439struct FiredSuppression {
440  ReportType type;
441  uptr pc;
442  Suppression *supp;
443};
444
445struct Context {
446  Context();
447
448  bool initialized;
449  bool after_multithreaded_fork;
450
451  MetaMap metamap;
452
453  Mutex report_mtx;
454  int nreported;
455  int nmissed_expected;
456  atomic_uint64_t last_symbolize_time_ns;
457
458  void *background_thread;
459  atomic_uint32_t stop_background_thread;
460
461  ThreadRegistry *thread_registry;
462
463  Vector<RacyStacks> racy_stacks;
464  Vector<RacyAddress> racy_addresses;
465  // Number of fired suppressions may be large enough.
466  InternalMmapVector<FiredSuppression> fired_suppressions;
467  DDetector *dd;
468
469  ClockAlloc clock_alloc;
470
471  Flags flags;
472
473  u64 stat[StatCnt];
474  u64 int_alloc_cnt[MBlockTypeCount];
475  u64 int_alloc_siz[MBlockTypeCount];
476};
477
478extern Context *ctx;  // The one and the only global runtime context.
479
480struct ScopedIgnoreInterceptors {
481  ScopedIgnoreInterceptors() {
482#ifndef TSAN_GO
483    cur_thread()->ignore_interceptors++;
484#endif
485  }
486
487  ~ScopedIgnoreInterceptors() {
488#ifndef TSAN_GO
489    cur_thread()->ignore_interceptors--;
490#endif
491  }
492};
493
494class ScopedReport {
495 public:
496  explicit ScopedReport(ReportType typ);
497  ~ScopedReport();
498
499  void AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
500                       const MutexSet *mset);
501  void AddStack(StackTrace stack, bool suppressable = false);
502  void AddThread(const ThreadContext *tctx, bool suppressable = false);
503  void AddThread(int unique_tid, bool suppressable = false);
504  void AddUniqueTid(int unique_tid);
505  void AddMutex(const SyncVar *s);
506  u64 AddMutex(u64 id);
507  void AddLocation(uptr addr, uptr size);
508  void AddSleep(u32 stack_id);
509  void SetCount(int count);
510
511  const ReportDesc *GetReport() const;
512
513 private:
514  ReportDesc *rep_;
515  // Symbolizer makes lots of intercepted calls. If we try to process them,
516  // at best it will cause deadlocks on internal mutexes.
517  ScopedIgnoreInterceptors ignore_interceptors_;
518
519  void AddDeadMutex(u64 id);
520
521  ScopedReport(const ScopedReport&);
522  void operator = (const ScopedReport&);
523};
524
525void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
526                  MutexSet *mset);
527
528template<typename StackTraceTy>
529void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack) {
530  uptr size = thr->shadow_stack_pos - thr->shadow_stack;
531  uptr start = 0;
532  if (size + !!toppc > kStackTraceMax) {
533    start = size + !!toppc - kStackTraceMax;
534    size = kStackTraceMax - !!toppc;
535  }
536  stack->Init(&thr->shadow_stack[start], size, toppc);
537}
538
539
540void StatAggregate(u64 *dst, u64 *src);
541void StatOutput(u64 *stat);
542void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
543  if (kCollectStats)
544    thr->stat[typ] += n;
545}
546void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
547  if (kCollectStats)
548    thr->stat[typ] = n;
549}
550
551void MapShadow(uptr addr, uptr size);
552void MapThreadTrace(uptr addr, uptr size);
553void DontNeedShadowFor(uptr addr, uptr size);
554void InitializeShadowMemory();
555void InitializeInterceptors();
556void InitializeLibIgnore();
557void InitializeDynamicAnnotations();
558
559void ForkBefore(ThreadState *thr, uptr pc);
560void ForkParentAfter(ThreadState *thr, uptr pc);
561void ForkChildAfter(ThreadState *thr, uptr pc);
562
563void ReportRace(ThreadState *thr);
564bool OutputReport(ThreadState *thr, const ScopedReport &srep);
565bool IsFiredSuppression(Context *ctx, const ScopedReport &srep,
566                        StackTrace trace);
567bool IsExpectedReport(uptr addr, uptr size);
568void PrintMatchedBenignRaces();
569bool FrameIsInternal(const ReportStack *frame);
570ReportStack *SkipTsanInternalFrames(ReportStack *ent);
571
572#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
573# define DPrintf Printf
574#else
575# define DPrintf(...)
576#endif
577
578#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
579# define DPrintf2 Printf
580#else
581# define DPrintf2(...)
582#endif
583
584u32 CurrentStackId(ThreadState *thr, uptr pc);
585ReportStack *SymbolizeStackId(u32 stack_id);
586void PrintCurrentStack(ThreadState *thr, uptr pc);
587void PrintCurrentStackSlow(uptr pc);  // uses libunwind
588
589void Initialize(ThreadState *thr);
590int Finalize(ThreadState *thr);
591
592void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
593void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
594
595void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
596    int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
597void MemoryAccessImpl(ThreadState *thr, uptr addr,
598    int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
599    u64 *shadow_mem, Shadow cur);
600void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
601    uptr size, bool is_write);
602void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
603    uptr size, uptr step, bool is_write);
604void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
605    int size, bool kAccessIsWrite, bool kIsAtomic);
606
607const int kSizeLog1 = 0;
608const int kSizeLog2 = 1;
609const int kSizeLog4 = 2;
610const int kSizeLog8 = 3;
611
612void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
613                                     uptr addr, int kAccessSizeLog) {
614  MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
615}
616
617void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
618                                      uptr addr, int kAccessSizeLog) {
619  MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
620}
621
622void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
623                                           uptr addr, int kAccessSizeLog) {
624  MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
625}
626
627void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
628                                            uptr addr, int kAccessSizeLog) {
629  MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
630}
631
632void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
633void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
634void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
635
636void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
637void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
638void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
639void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
640
641void FuncEntry(ThreadState *thr, uptr pc);
642void FuncExit(ThreadState *thr);
643
644int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
645void ThreadStart(ThreadState *thr, int tid, uptr os_id);
646void ThreadFinish(ThreadState *thr);
647int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
648void ThreadJoin(ThreadState *thr, uptr pc, int tid);
649void ThreadDetach(ThreadState *thr, uptr pc, int tid);
650void ThreadFinalize(ThreadState *thr);
651void ThreadSetName(ThreadState *thr, const char *name);
652int ThreadCount(ThreadState *thr);
653void ProcessPendingSignals(ThreadState *thr);
654
655void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
656                 bool rw, bool recursive, bool linker_init);
657void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
658void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1,
659               bool try_lock = false);
660int  MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false);
661void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false);
662void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
663void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
664void MutexRepair(ThreadState *thr, uptr pc, uptr addr);  // call on EOWNERDEAD
665
666void Acquire(ThreadState *thr, uptr pc, uptr addr);
667void AcquireGlobal(ThreadState *thr, uptr pc);
668void Release(ThreadState *thr, uptr pc, uptr addr);
669void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
670void AfterSleep(ThreadState *thr, uptr pc);
671void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
672void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
673void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
674void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
675
676// The hacky call uses custom calling convention and an assembly thunk.
677// It is considerably faster that a normal call for the caller
678// if it is not executed (it is intended for slow paths from hot functions).
679// The trick is that the call preserves all registers and the compiler
680// does not treat it as a call.
681// If it does not work for you, use normal call.
682#if TSAN_DEBUG == 0 && defined(__x86_64__)
683// The caller may not create the stack frame for itself at all,
684// so we create a reserve stack frame for it (1024b must be enough).
685#define HACKY_CALL(f) \
686  __asm__ __volatile__("sub $1024, %%rsp;" \
687                       CFI_INL_ADJUST_CFA_OFFSET(1024) \
688                       ".hidden " #f "_thunk;" \
689                       "call " #f "_thunk;" \
690                       "add $1024, %%rsp;" \
691                       CFI_INL_ADJUST_CFA_OFFSET(-1024) \
692                       ::: "memory", "cc");
693#else
694#define HACKY_CALL(f) f()
695#endif
696
697void TraceSwitch(ThreadState *thr);
698uptr TraceTopPC(ThreadState *thr);
699uptr TraceSize();
700uptr TraceParts();
701Trace *ThreadTrace(int tid);
702
703extern "C" void __tsan_trace_switch();
704void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
705                                        EventType typ, u64 addr) {
706  if (!kCollectHistory)
707    return;
708  DCHECK_GE((int)typ, 0);
709  DCHECK_LE((int)typ, 7);
710  DCHECK_EQ(GetLsb(addr, 61), addr);
711  StatInc(thr, StatEvents);
712  u64 pos = fs.GetTracePos();
713  if (UNLIKELY((pos % kTracePartSize) == 0)) {
714#ifndef TSAN_GO
715    HACKY_CALL(__tsan_trace_switch);
716#else
717    TraceSwitch(thr);
718#endif
719  }
720  Event *trace = (Event*)GetThreadTrace(fs.tid());
721  Event *evp = &trace[pos];
722  Event ev = (u64)addr | ((u64)typ << 61);
723  *evp = ev;
724}
725
726}  // namespace __tsan
727
728#endif  // TSAN_RTL_H
729