1//===-- tsan_rtl.cc -------------------------------------------------------===//
2//
3// This file is distributed under the University of Illinois Open Source
4// License. See LICENSE.TXT for details.
5//
6//===----------------------------------------------------------------------===//
7//
8// This file is a part of ThreadSanitizer (TSan), a race detector.
9//
10// Main file (entry points) for the TSan run-time.
11//===----------------------------------------------------------------------===//
12
13#include "sanitizer_common/sanitizer_atomic.h"
14#include "sanitizer_common/sanitizer_common.h"
15#include "sanitizer_common/sanitizer_libc.h"
16#include "sanitizer_common/sanitizer_stackdepot.h"
17#include "sanitizer_common/sanitizer_placement_new.h"
18#include "sanitizer_common/sanitizer_symbolizer.h"
19#include "tsan_defs.h"
20#include "tsan_platform.h"
21#include "tsan_rtl.h"
22#include "tsan_mman.h"
23#include "tsan_suppressions.h"
24#include "tsan_symbolize.h"
25
26#ifdef __SSE3__
27// <emmintrin.h> transitively includes <stdlib.h>,
28// and it's prohibited to include std headers into tsan runtime.
29// So we do this dirty trick.
30#define _MM_MALLOC_H_INCLUDED
31#define __MM_MALLOC_H
32#include <emmintrin.h>
33typedef __m128i m128;
34#endif
35
36volatile int __tsan_resumed = 0;
37
38extern "C" void __tsan_resume() {
39  __tsan_resumed = 1;
40}
41
42namespace __tsan {
43
44#ifndef TSAN_GO
45THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
46#endif
47static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
48Context *ctx;
49
50// Can be overriden by a front-end.
51#ifdef TSAN_EXTERNAL_HOOKS
52bool OnFinalize(bool failed);
53void OnInitialize();
54#else
55SANITIZER_INTERFACE_ATTRIBUTE
56bool WEAK OnFinalize(bool failed) {
57  return failed;
58}
59SANITIZER_INTERFACE_ATTRIBUTE
60void WEAK OnInitialize() {}
61#endif
62
63static char thread_registry_placeholder[sizeof(ThreadRegistry)];
64
65static ThreadContextBase *CreateThreadContext(u32 tid) {
66  // Map thread trace when context is created.
67  MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event));
68  MapThreadTrace(GetThreadTraceHeader(tid), sizeof(Trace));
69  new(ThreadTrace(tid)) Trace();
70  void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
71  return new(mem) ThreadContext(tid);
72}
73
74#ifndef TSAN_GO
75static const u32 kThreadQuarantineSize = 16;
76#else
77static const u32 kThreadQuarantineSize = 64;
78#endif
79
80Context::Context()
81  : initialized()
82  , report_mtx(MutexTypeReport, StatMtxReport)
83  , nreported()
84  , nmissed_expected()
85  , thread_registry(new(thread_registry_placeholder) ThreadRegistry(
86      CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse))
87  , racy_stacks(MBlockRacyStacks)
88  , racy_addresses(MBlockRacyAddresses)
89  , fired_suppressions(8) {
90}
91
92// The objects are allocated in TLS, so one may rely on zero-initialization.
93ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
94                         unsigned reuse_count,
95                         uptr stk_addr, uptr stk_size,
96                         uptr tls_addr, uptr tls_size)
97  : fast_state(tid, epoch)
98  // Do not touch these, rely on zero initialization,
99  // they may be accessed before the ctor.
100  // , ignore_reads_and_writes()
101  // , ignore_interceptors()
102  , clock(tid, reuse_count)
103#ifndef TSAN_GO
104  , jmp_bufs(MBlockJmpBuf)
105#endif
106  , tid(tid)
107  , unique_id(unique_id)
108  , stk_addr(stk_addr)
109  , stk_size(stk_size)
110  , tls_addr(tls_addr)
111  , tls_size(tls_size)
112#ifndef TSAN_GO
113  , last_sleep_clock(tid)
114#endif
115{
116}
117
118static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
119  uptr n_threads;
120  uptr n_running_threads;
121  ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
122  InternalScopedBuffer<char> buf(4096);
123  WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads);
124  internal_write(fd, buf.data(), internal_strlen(buf.data()));
125}
126
127static void BackgroundThread(void *arg) {
128#ifndef TSAN_GO
129  // This is a non-initialized non-user thread, nothing to see here.
130  // We don't use ScopedIgnoreInterceptors, because we want ignores to be
131  // enabled even when the thread function exits (e.g. during pthread thread
132  // shutdown code).
133  cur_thread()->ignore_interceptors++;
134#endif
135  const u64 kMs2Ns = 1000 * 1000;
136
137  fd_t mprof_fd = kInvalidFd;
138  if (flags()->profile_memory && flags()->profile_memory[0]) {
139    if (internal_strcmp(flags()->profile_memory, "stdout") == 0) {
140      mprof_fd = 1;
141    } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) {
142      mprof_fd = 2;
143    } else {
144      InternalScopedBuffer<char> filename(4096);
145      internal_snprintf(filename.data(), filename.size(), "%s.%d",
146          flags()->profile_memory, (int)internal_getpid());
147      uptr openrv = OpenFile(filename.data(), true);
148      if (internal_iserror(openrv)) {
149        Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
150            &filename[0]);
151      } else {
152        mprof_fd = openrv;
153      }
154    }
155  }
156
157  u64 last_flush = NanoTime();
158  uptr last_rss = 0;
159  for (int i = 0;
160      atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
161      i++) {
162    SleepForMillis(100);
163    u64 now = NanoTime();
164
165    // Flush memory if requested.
166    if (flags()->flush_memory_ms > 0) {
167      if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
168        VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
169        FlushShadowMemory();
170        last_flush = NanoTime();
171      }
172    }
173    // GetRSS can be expensive on huge programs, so don't do it every 100ms.
174    if (flags()->memory_limit_mb > 0) {
175      uptr rss = GetRSS();
176      uptr limit = uptr(flags()->memory_limit_mb) << 20;
177      VPrintf(1, "ThreadSanitizer: memory flush check"
178                 " RSS=%llu LAST=%llu LIMIT=%llu\n",
179              (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
180      if (2 * rss > limit + last_rss) {
181        VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
182        FlushShadowMemory();
183        rss = GetRSS();
184        VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
185      }
186      last_rss = rss;
187    }
188
189    // Write memory profile if requested.
190    if (mprof_fd != kInvalidFd)
191      MemoryProfiler(ctx, mprof_fd, i);
192
193#ifndef TSAN_GO
194    // Flush symbolizer cache if requested.
195    if (flags()->flush_symbolizer_ms > 0) {
196      u64 last = atomic_load(&ctx->last_symbolize_time_ns,
197                             memory_order_relaxed);
198      if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
199        Lock l(&ctx->report_mtx);
200        SpinMutexLock l2(&CommonSanitizerReportMutex);
201        SymbolizeFlush();
202        atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
203      }
204    }
205#endif
206  }
207}
208
209static void StartBackgroundThread() {
210  ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
211}
212
213#ifndef TSAN_GO
214static void StopBackgroundThread() {
215  atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
216  internal_join_thread(ctx->background_thread);
217  ctx->background_thread = 0;
218}
219#endif
220
221void DontNeedShadowFor(uptr addr, uptr size) {
222  uptr shadow_beg = MemToShadow(addr);
223  uptr shadow_end = MemToShadow(addr + size);
224  FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
225}
226
227void MapShadow(uptr addr, uptr size) {
228  // Global data is not 64K aligned, but there are no adjacent mappings,
229  // so we can get away with unaligned mapping.
230  // CHECK_EQ(addr, addr & ~((64 << 10) - 1));  // windows wants 64K alignment
231  MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
232
233  // Meta shadow is 2:1, so tread carefully.
234  static bool data_mapped = false;
235  static uptr mapped_meta_end = 0;
236  uptr meta_begin = (uptr)MemToMeta(addr);
237  uptr meta_end = (uptr)MemToMeta(addr + size);
238  meta_begin = RoundDownTo(meta_begin, 64 << 10);
239  meta_end = RoundUpTo(meta_end, 64 << 10);
240  if (!data_mapped) {
241    // First call maps data+bss.
242    data_mapped = true;
243    MmapFixedNoReserve(meta_begin, meta_end - meta_begin);
244  } else {
245    // Mapping continous heap.
246    // Windows wants 64K alignment.
247    meta_begin = RoundDownTo(meta_begin, 64 << 10);
248    meta_end = RoundUpTo(meta_end, 64 << 10);
249    if (meta_end <= mapped_meta_end)
250      return;
251    if (meta_begin < mapped_meta_end)
252      meta_begin = mapped_meta_end;
253    MmapFixedNoReserve(meta_begin, meta_end - meta_begin);
254    mapped_meta_end = meta_end;
255  }
256  VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
257      addr, addr+size, meta_begin, meta_end);
258}
259
260void MapThreadTrace(uptr addr, uptr size) {
261  DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
262  CHECK_GE(addr, kTraceMemBeg);
263  CHECK_LE(addr + size, kTraceMemEnd);
264  CHECK_EQ(addr, addr & ~((64 << 10) - 1));  // windows wants 64K alignment
265  uptr addr1 = (uptr)MmapFixedNoReserve(addr, size);
266  if (addr1 != addr) {
267    Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p->%p)\n",
268        addr, size, addr1);
269    Die();
270  }
271}
272
273static void CheckShadowMapping() {
274  for (uptr i = 0; i < ARRAY_SIZE(UserRegions); i += 2) {
275    const uptr beg = UserRegions[i];
276    const uptr end = UserRegions[i + 1];
277    VPrintf(3, "checking shadow region %p-%p\n", beg, end);
278    for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
279      for (int x = -1; x <= 1; x++) {
280        const uptr p = p0 + x;
281        if (p < beg || p >= end)
282          continue;
283        const uptr s = MemToShadow(p);
284        VPrintf(3, "  checking pointer %p -> %p\n", p, s);
285        CHECK(IsAppMem(p));
286        CHECK(IsShadowMem(s));
287        CHECK_EQ(p & ~(kShadowCell - 1), ShadowToMem(s));
288        const uptr m = (uptr)MemToMeta(p);
289        CHECK(IsMetaMem(m));
290      }
291    }
292  }
293}
294
295void Initialize(ThreadState *thr) {
296  // Thread safe because done before all threads exist.
297  static bool is_initialized = false;
298  if (is_initialized)
299    return;
300  is_initialized = true;
301  // We are not ready to handle interceptors yet.
302  ScopedIgnoreInterceptors ignore;
303  SanitizerToolName = "ThreadSanitizer";
304  // Install tool-specific callbacks in sanitizer_common.
305  SetCheckFailedCallback(TsanCheckFailed);
306
307  ctx = new(ctx_placeholder) Context;
308  const char *options = GetEnv(kTsanOptionsEnv);
309  InitializeFlags(&ctx->flags, options);
310#ifndef TSAN_GO
311  InitializeAllocator();
312#endif
313  InitializeInterceptors();
314  CheckShadowMapping();
315  InitializePlatform();
316  InitializeMutex();
317  InitializeDynamicAnnotations();
318#ifndef TSAN_GO
319  InitializeShadowMemory();
320#endif
321  // Setup correct file descriptor for error reports.
322  __sanitizer_set_report_path(common_flags()->log_path);
323  InitializeSuppressions();
324#ifndef TSAN_GO
325  InitializeLibIgnore();
326  Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
327#endif
328  StartBackgroundThread();
329#ifndef TSAN_GO
330  SetSandboxingCallback(StopBackgroundThread);
331#endif
332  if (common_flags()->detect_deadlocks)
333    ctx->dd = DDetector::Create(flags());
334
335  VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
336          (int)internal_getpid());
337
338  // Initialize thread 0.
339  int tid = ThreadCreate(thr, 0, 0, true);
340  CHECK_EQ(tid, 0);
341  ThreadStart(thr, tid, internal_getpid());
342  ctx->initialized = true;
343
344  if (flags()->stop_on_start) {
345    Printf("ThreadSanitizer is suspended at startup (pid %d)."
346           " Call __tsan_resume().\n",
347           (int)internal_getpid());
348    while (__tsan_resumed == 0) {}
349  }
350
351  OnInitialize();
352}
353
354int Finalize(ThreadState *thr) {
355  bool failed = false;
356
357  if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
358    SleepForMillis(flags()->atexit_sleep_ms);
359
360  // Wait for pending reports.
361  ctx->report_mtx.Lock();
362  CommonSanitizerReportMutex.Lock();
363  CommonSanitizerReportMutex.Unlock();
364  ctx->report_mtx.Unlock();
365
366#ifndef TSAN_GO
367  if (common_flags()->verbosity)
368    AllocatorPrintStats();
369#endif
370
371  ThreadFinalize(thr);
372
373  if (ctx->nreported) {
374    failed = true;
375#ifndef TSAN_GO
376    Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
377#else
378    Printf("Found %d data race(s)\n", ctx->nreported);
379#endif
380  }
381
382  if (ctx->nmissed_expected) {
383    failed = true;
384    Printf("ThreadSanitizer: missed %d expected races\n",
385        ctx->nmissed_expected);
386  }
387
388  if (common_flags()->print_suppressions)
389    PrintMatchedSuppressions();
390#ifndef TSAN_GO
391  if (flags()->print_benign)
392    PrintMatchedBenignRaces();
393#endif
394
395  failed = OnFinalize(failed);
396
397  StatAggregate(ctx->stat, thr->stat);
398  StatOutput(ctx->stat);
399  return failed ? flags()->exitcode : 0;
400}
401
402#ifndef TSAN_GO
403void ForkBefore(ThreadState *thr, uptr pc) {
404  ctx->thread_registry->Lock();
405  ctx->report_mtx.Lock();
406}
407
408void ForkParentAfter(ThreadState *thr, uptr pc) {
409  ctx->report_mtx.Unlock();
410  ctx->thread_registry->Unlock();
411}
412
413void ForkChildAfter(ThreadState *thr, uptr pc) {
414  ctx->report_mtx.Unlock();
415  ctx->thread_registry->Unlock();
416
417  uptr nthread = 0;
418  ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */);
419  VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
420      " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
421  if (nthread == 1) {
422    internal_start_thread(&BackgroundThread, 0);
423  } else {
424    // We've just forked a multi-threaded process. We cannot reasonably function
425    // after that (some mutexes may be locked before fork). So just enable
426    // ignores for everything in the hope that we will exec soon.
427    ctx->after_multithreaded_fork = true;
428    thr->ignore_interceptors++;
429    ThreadIgnoreBegin(thr, pc);
430    ThreadIgnoreSyncBegin(thr, pc);
431  }
432}
433#endif
434
435#ifdef TSAN_GO
436NOINLINE
437void GrowShadowStack(ThreadState *thr) {
438  const int sz = thr->shadow_stack_end - thr->shadow_stack;
439  const int newsz = 2 * sz;
440  uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
441      newsz * sizeof(uptr));
442  internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
443  internal_free(thr->shadow_stack);
444  thr->shadow_stack = newstack;
445  thr->shadow_stack_pos = newstack + sz;
446  thr->shadow_stack_end = newstack + newsz;
447}
448#endif
449
450u32 CurrentStackId(ThreadState *thr, uptr pc) {
451  if (thr->shadow_stack_pos == 0)  // May happen during bootstrap.
452    return 0;
453  if (pc != 0) {
454#ifndef TSAN_GO
455    DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
456#else
457    if (thr->shadow_stack_pos == thr->shadow_stack_end)
458      GrowShadowStack(thr);
459#endif
460    thr->shadow_stack_pos[0] = pc;
461    thr->shadow_stack_pos++;
462  }
463  u32 id = StackDepotPut(
464      StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
465  if (pc != 0)
466    thr->shadow_stack_pos--;
467  return id;
468}
469
470void TraceSwitch(ThreadState *thr) {
471  thr->nomalloc++;
472  Trace *thr_trace = ThreadTrace(thr->tid);
473  Lock l(&thr_trace->mtx);
474  unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
475  TraceHeader *hdr = &thr_trace->headers[trace];
476  hdr->epoch0 = thr->fast_state.epoch();
477  ObtainCurrentStack(thr, 0, &hdr->stack0);
478  hdr->mset0 = thr->mset;
479  thr->nomalloc--;
480}
481
482Trace *ThreadTrace(int tid) {
483  return (Trace*)GetThreadTraceHeader(tid);
484}
485
486uptr TraceTopPC(ThreadState *thr) {
487  Event *events = (Event*)GetThreadTrace(thr->tid);
488  uptr pc = events[thr->fast_state.GetTracePos()];
489  return pc;
490}
491
492uptr TraceSize() {
493  return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
494}
495
496uptr TraceParts() {
497  return TraceSize() / kTracePartSize;
498}
499
500#ifndef TSAN_GO
501extern "C" void __tsan_trace_switch() {
502  TraceSwitch(cur_thread());
503}
504
505extern "C" void __tsan_report_race() {
506  ReportRace(cur_thread());
507}
508#endif
509
510ALWAYS_INLINE
511Shadow LoadShadow(u64 *p) {
512  u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
513  return Shadow(raw);
514}
515
516ALWAYS_INLINE
517void StoreShadow(u64 *sp, u64 s) {
518  atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
519}
520
521ALWAYS_INLINE
522void StoreIfNotYetStored(u64 *sp, u64 *s) {
523  StoreShadow(sp, *s);
524  *s = 0;
525}
526
527ALWAYS_INLINE
528void HandleRace(ThreadState *thr, u64 *shadow_mem,
529                              Shadow cur, Shadow old) {
530  thr->racy_state[0] = cur.raw();
531  thr->racy_state[1] = old.raw();
532  thr->racy_shadow_addr = shadow_mem;
533#ifndef TSAN_GO
534  HACKY_CALL(__tsan_report_race);
535#else
536  ReportRace(thr);
537#endif
538}
539
540static inline bool HappensBefore(Shadow old, ThreadState *thr) {
541  return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
542}
543
544ALWAYS_INLINE
545void MemoryAccessImpl1(ThreadState *thr, uptr addr,
546    int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
547    u64 *shadow_mem, Shadow cur) {
548  StatInc(thr, StatMop);
549  StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
550  StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
551
552  // This potentially can live in an MMX/SSE scratch register.
553  // The required intrinsics are:
554  // __m128i _mm_move_epi64(__m128i*);
555  // _mm_storel_epi64(u64*, __m128i);
556  u64 store_word = cur.raw();
557
558  // scan all the shadow values and dispatch to 4 categories:
559  // same, replace, candidate and race (see comments below).
560  // we consider only 3 cases regarding access sizes:
561  // equal, intersect and not intersect. initially I considered
562  // larger and smaller as well, it allowed to replace some
563  // 'candidates' with 'same' or 'replace', but I think
564  // it's just not worth it (performance- and complexity-wise).
565
566  Shadow old(0);
567  if (kShadowCnt == 1) {
568    int idx = 0;
569#include "tsan_update_shadow_word_inl.h"
570  } else if (kShadowCnt == 2) {
571    int idx = 0;
572#include "tsan_update_shadow_word_inl.h"
573    idx = 1;
574#include "tsan_update_shadow_word_inl.h"
575  } else if (kShadowCnt == 4) {
576    int idx = 0;
577#include "tsan_update_shadow_word_inl.h"
578    idx = 1;
579#include "tsan_update_shadow_word_inl.h"
580    idx = 2;
581#include "tsan_update_shadow_word_inl.h"
582    idx = 3;
583#include "tsan_update_shadow_word_inl.h"
584  } else if (kShadowCnt == 8) {
585    int idx = 0;
586#include "tsan_update_shadow_word_inl.h"
587    idx = 1;
588#include "tsan_update_shadow_word_inl.h"
589    idx = 2;
590#include "tsan_update_shadow_word_inl.h"
591    idx = 3;
592#include "tsan_update_shadow_word_inl.h"
593    idx = 4;
594#include "tsan_update_shadow_word_inl.h"
595    idx = 5;
596#include "tsan_update_shadow_word_inl.h"
597    idx = 6;
598#include "tsan_update_shadow_word_inl.h"
599    idx = 7;
600#include "tsan_update_shadow_word_inl.h"
601  } else {
602    CHECK(false);
603  }
604
605  // we did not find any races and had already stored
606  // the current access info, so we are done
607  if (LIKELY(store_word == 0))
608    return;
609  // choose a random candidate slot and replace it
610  StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
611  StatInc(thr, StatShadowReplace);
612  return;
613 RACE:
614  HandleRace(thr, shadow_mem, cur, old);
615  return;
616}
617
618void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
619    int size, bool kAccessIsWrite, bool kIsAtomic) {
620  while (size) {
621    int size1 = 1;
622    int kAccessSizeLog = kSizeLog1;
623    if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) {
624      size1 = 8;
625      kAccessSizeLog = kSizeLog8;
626    } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) {
627      size1 = 4;
628      kAccessSizeLog = kSizeLog4;
629    } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) {
630      size1 = 2;
631      kAccessSizeLog = kSizeLog2;
632    }
633    MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
634    addr += size1;
635    size -= size1;
636  }
637}
638
639ALWAYS_INLINE
640bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
641  Shadow cur(a);
642  for (uptr i = 0; i < kShadowCnt; i++) {
643    Shadow old(LoadShadow(&s[i]));
644    if (Shadow::Addr0AndSizeAreEqual(cur, old) &&
645        old.TidWithIgnore() == cur.TidWithIgnore() &&
646        old.epoch() > sync_epoch &&
647        old.IsAtomic() == cur.IsAtomic() &&
648        old.IsRead() <= cur.IsRead())
649      return true;
650  }
651  return false;
652}
653
654#if defined(__SSE3__) && TSAN_SHADOW_COUNT == 4
655#define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \
656    _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \
657    (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
658ALWAYS_INLINE
659bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
660  // This is an optimized version of ContainsSameAccessSlow.
661  // load current access into access[0:63]
662  const m128 access     = _mm_cvtsi64_si128(a);
663  // duplicate high part of access in addr0:
664  // addr0[0:31]        = access[32:63]
665  // addr0[32:63]       = access[32:63]
666  // addr0[64:95]       = access[32:63]
667  // addr0[96:127]      = access[32:63]
668  const m128 addr0      = SHUF(access, access, 1, 1, 1, 1);
669  // load 4 shadow slots
670  const m128 shadow0    = _mm_load_si128((__m128i*)s);
671  const m128 shadow1    = _mm_load_si128((__m128i*)s + 1);
672  // load high parts of 4 shadow slots into addr_vect:
673  // addr_vect[0:31]    = shadow0[32:63]
674  // addr_vect[32:63]   = shadow0[96:127]
675  // addr_vect[64:95]   = shadow1[32:63]
676  // addr_vect[96:127]  = shadow1[96:127]
677  m128 addr_vect        = SHUF(shadow0, shadow1, 1, 3, 1, 3);
678  if (!is_write) {
679    // set IsRead bit in addr_vect
680    const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15);
681    const m128 rw_mask  = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0);
682    addr_vect           = _mm_or_si128(addr_vect, rw_mask);
683  }
684  // addr0 == addr_vect?
685  const m128 addr_res   = _mm_cmpeq_epi32(addr0, addr_vect);
686  // epoch1[0:63]       = sync_epoch
687  const m128 epoch1     = _mm_cvtsi64_si128(sync_epoch);
688  // epoch[0:31]        = sync_epoch[0:31]
689  // epoch[32:63]       = sync_epoch[0:31]
690  // epoch[64:95]       = sync_epoch[0:31]
691  // epoch[96:127]      = sync_epoch[0:31]
692  const m128 epoch      = SHUF(epoch1, epoch1, 0, 0, 0, 0);
693  // load low parts of shadow cell epochs into epoch_vect:
694  // epoch_vect[0:31]   = shadow0[0:31]
695  // epoch_vect[32:63]  = shadow0[64:95]
696  // epoch_vect[64:95]  = shadow1[0:31]
697  // epoch_vect[96:127] = shadow1[64:95]
698  const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2);
699  // epoch_vect >= sync_epoch?
700  const m128 epoch_res  = _mm_cmpgt_epi32(epoch_vect, epoch);
701  // addr_res & epoch_res
702  const m128 res        = _mm_and_si128(addr_res, epoch_res);
703  // mask[0] = res[7]
704  // mask[1] = res[15]
705  // ...
706  // mask[15] = res[127]
707  const int mask        = _mm_movemask_epi8(res);
708  return mask != 0;
709}
710#endif
711
712ALWAYS_INLINE
713bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
714#if defined(__SSE3__) && TSAN_SHADOW_COUNT == 4
715  bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
716  // NOTE: this check can fail if the shadow is concurrently mutated
717  // by other threads.
718  DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
719  return res;
720#else
721  return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
722#endif
723}
724
725ALWAYS_INLINE USED
726void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
727    int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
728  u64 *shadow_mem = (u64*)MemToShadow(addr);
729  DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
730      " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
731      (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
732      (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
733      (uptr)shadow_mem[0], (uptr)shadow_mem[1],
734      (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
735#if TSAN_DEBUG
736  if (!IsAppMem(addr)) {
737    Printf("Access to non app mem %zx\n", addr);
738    DCHECK(IsAppMem(addr));
739  }
740  if (!IsShadowMem((uptr)shadow_mem)) {
741    Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
742    DCHECK(IsShadowMem((uptr)shadow_mem));
743  }
744#endif
745
746  if (kCppMode && *shadow_mem == kShadowRodata) {
747    // Access to .rodata section, no races here.
748    // Measurements show that it can be 10-20% of all memory accesses.
749    StatInc(thr, StatMop);
750    StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
751    StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
752    StatInc(thr, StatMopRodata);
753    return;
754  }
755
756  FastState fast_state = thr->fast_state;
757  if (fast_state.GetIgnoreBit()) {
758    StatInc(thr, StatMop);
759    StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
760    StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
761    StatInc(thr, StatMopIgnored);
762    return;
763  }
764
765  Shadow cur(fast_state);
766  cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
767  cur.SetWrite(kAccessIsWrite);
768  cur.SetAtomic(kIsAtomic);
769
770  if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
771      thr->fast_synch_epoch, kAccessIsWrite))) {
772    StatInc(thr, StatMop);
773    StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
774    StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
775    StatInc(thr, StatMopSame);
776    return;
777  }
778
779  if (kCollectHistory) {
780    fast_state.IncrementEpoch();
781    thr->fast_state = fast_state;
782    TraceAddEvent(thr, fast_state, EventTypeMop, pc);
783    cur.IncrementEpoch();
784  }
785
786  MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
787      shadow_mem, cur);
788}
789
790// Called by MemoryAccessRange in tsan_rtl_thread.cc
791ALWAYS_INLINE USED
792void MemoryAccessImpl(ThreadState *thr, uptr addr,
793    int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
794    u64 *shadow_mem, Shadow cur) {
795  if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
796      thr->fast_synch_epoch, kAccessIsWrite))) {
797    StatInc(thr, StatMop);
798    StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
799    StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
800    StatInc(thr, StatMopSame);
801    return;
802  }
803
804  MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
805      shadow_mem, cur);
806}
807
808static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
809                           u64 val) {
810  (void)thr;
811  (void)pc;
812  if (size == 0)
813    return;
814  // FIXME: fix me.
815  uptr offset = addr % kShadowCell;
816  if (offset) {
817    offset = kShadowCell - offset;
818    if (size <= offset)
819      return;
820    addr += offset;
821    size -= offset;
822  }
823  DCHECK_EQ(addr % 8, 0);
824  // If a user passes some insane arguments (memset(0)),
825  // let it just crash as usual.
826  if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
827    return;
828  // Don't want to touch lots of shadow memory.
829  // If a program maps 10MB stack, there is no need reset the whole range.
830  size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
831  // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
832  // so we do it only for C/C++.
833  if (kGoMode || size < common_flags()->clear_shadow_mmap_threshold) {
834    u64 *p = (u64*)MemToShadow(addr);
835    CHECK(IsShadowMem((uptr)p));
836    CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
837    // FIXME: may overwrite a part outside the region
838    for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
839      p[i++] = val;
840      for (uptr j = 1; j < kShadowCnt; j++)
841        p[i++] = 0;
842    }
843  } else {
844    // The region is big, reset only beginning and end.
845    const uptr kPageSize = 4096;
846    u64 *begin = (u64*)MemToShadow(addr);
847    u64 *end = begin + size / kShadowCell * kShadowCnt;
848    u64 *p = begin;
849    // Set at least first kPageSize/2 to page boundary.
850    while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
851      *p++ = val;
852      for (uptr j = 1; j < kShadowCnt; j++)
853        *p++ = 0;
854    }
855    // Reset middle part.
856    u64 *p1 = p;
857    p = RoundDown(end, kPageSize);
858    UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
859    MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1);
860    // Set the ending.
861    while (p < end) {
862      *p++ = val;
863      for (uptr j = 1; j < kShadowCnt; j++)
864        *p++ = 0;
865    }
866  }
867}
868
869void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
870  MemoryRangeSet(thr, pc, addr, size, 0);
871}
872
873void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
874  // Processing more than 1k (4k of shadow) is expensive,
875  // can cause excessive memory consumption (user does not necessary touch
876  // the whole range) and most likely unnecessary.
877  if (size > 1024)
878    size = 1024;
879  CHECK_EQ(thr->is_freeing, false);
880  thr->is_freeing = true;
881  MemoryAccessRange(thr, pc, addr, size, true);
882  thr->is_freeing = false;
883  if (kCollectHistory) {
884    thr->fast_state.IncrementEpoch();
885    TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
886  }
887  Shadow s(thr->fast_state);
888  s.ClearIgnoreBit();
889  s.MarkAsFreed();
890  s.SetWrite(true);
891  s.SetAddr0AndSizeLog(0, 3);
892  MemoryRangeSet(thr, pc, addr, size, s.raw());
893}
894
895void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
896  if (kCollectHistory) {
897    thr->fast_state.IncrementEpoch();
898    TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
899  }
900  Shadow s(thr->fast_state);
901  s.ClearIgnoreBit();
902  s.SetWrite(true);
903  s.SetAddr0AndSizeLog(0, 3);
904  MemoryRangeSet(thr, pc, addr, size, s.raw());
905}
906
907ALWAYS_INLINE USED
908void FuncEntry(ThreadState *thr, uptr pc) {
909  StatInc(thr, StatFuncEnter);
910  DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
911  if (kCollectHistory) {
912    thr->fast_state.IncrementEpoch();
913    TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
914  }
915
916  // Shadow stack maintenance can be replaced with
917  // stack unwinding during trace switch (which presumably must be faster).
918  DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
919#ifndef TSAN_GO
920  DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
921#else
922  if (thr->shadow_stack_pos == thr->shadow_stack_end)
923    GrowShadowStack(thr);
924#endif
925  thr->shadow_stack_pos[0] = pc;
926  thr->shadow_stack_pos++;
927}
928
929ALWAYS_INLINE USED
930void FuncExit(ThreadState *thr) {
931  StatInc(thr, StatFuncExit);
932  DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
933  if (kCollectHistory) {
934    thr->fast_state.IncrementEpoch();
935    TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
936  }
937
938  DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
939#ifndef TSAN_GO
940  DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
941#endif
942  thr->shadow_stack_pos--;
943}
944
945void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
946  DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
947  thr->ignore_reads_and_writes++;
948  CHECK_GT(thr->ignore_reads_and_writes, 0);
949  thr->fast_state.SetIgnoreBit();
950#ifndef TSAN_GO
951  if (!ctx->after_multithreaded_fork)
952    thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
953#endif
954}
955
956void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
957  DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
958  thr->ignore_reads_and_writes--;
959  CHECK_GE(thr->ignore_reads_and_writes, 0);
960  if (thr->ignore_reads_and_writes == 0) {
961    thr->fast_state.ClearIgnoreBit();
962#ifndef TSAN_GO
963    thr->mop_ignore_set.Reset();
964#endif
965  }
966}
967
968void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
969  DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
970  thr->ignore_sync++;
971  CHECK_GT(thr->ignore_sync, 0);
972#ifndef TSAN_GO
973  if (!ctx->after_multithreaded_fork)
974    thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
975#endif
976}
977
978void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
979  DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
980  thr->ignore_sync--;
981  CHECK_GE(thr->ignore_sync, 0);
982#ifndef TSAN_GO
983  if (thr->ignore_sync == 0)
984    thr->sync_ignore_set.Reset();
985#endif
986}
987
988bool MD5Hash::operator==(const MD5Hash &other) const {
989  return hash[0] == other.hash[0] && hash[1] == other.hash[1];
990}
991
992#if TSAN_DEBUG
993void build_consistency_debug() {}
994#else
995void build_consistency_release() {}
996#endif
997
998#if TSAN_COLLECT_STATS
999void build_consistency_stats() {}
1000#else
1001void build_consistency_nostats() {}
1002#endif
1003
1004#if TSAN_SHADOW_COUNT == 1
1005void build_consistency_shadow1() {}
1006#elif TSAN_SHADOW_COUNT == 2
1007void build_consistency_shadow2() {}
1008#elif TSAN_SHADOW_COUNT == 4
1009void build_consistency_shadow4() {}
1010#else
1011void build_consistency_shadow8() {}
1012#endif
1013
1014}  // namespace __tsan
1015
1016#ifndef TSAN_GO
1017// Must be included in this file to make sure everything is inlined.
1018#include "tsan_interface_inl.h"
1019#endif
1020