1//===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is shared between run-time libraries of sanitizers.
10//
11// It declares common functions and classes that are used in both runtimes.
12// Implementation of some functions are provided in sanitizer_common, while
13// others must be defined by run-time library itself.
14//===----------------------------------------------------------------------===//
15#ifndef SANITIZER_COMMON_H
16#define SANITIZER_COMMON_H
17
18#include "sanitizer_flags.h"
19#include "sanitizer_internal_defs.h"
20#include "sanitizer_libc.h"
21#include "sanitizer_list.h"
22#include "sanitizer_mutex.h"
23
24#if defined(_MSC_VER) && !defined(__clang__)
25extern "C" void _ReadWriteBarrier();
26#pragma intrinsic(_ReadWriteBarrier)
27#endif
28
29namespace __sanitizer {
30
31struct AddressInfo;
32struct BufferedStackTrace;
33struct SignalContext;
34struct StackTrace;
35struct SymbolizedStack;
36
37// Constants.
38const uptr kWordSize = SANITIZER_WORDSIZE / 8;
39const uptr kWordSizeInBits = 8 * kWordSize;
40
41const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE;
42
43const uptr kMaxPathLength = 4096;
44
45const uptr kMaxThreadStackSize = 1 << 30;  // 1Gb
46
47const uptr kErrorMessageBufferSize = 1 << 16;
48
49// Denotes fake PC values that come from JIT/JAVA/etc.
50// For such PC values __tsan_symbolize_external_ex() will be called.
51const u64 kExternalPCBit = 1ULL << 60;
52
53extern const char *SanitizerToolName;  // Can be changed by the tool.
54
55extern atomic_uint32_t current_verbosity;
56inline void SetVerbosity(int verbosity) {
57  atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
58}
59inline int Verbosity() {
60  return atomic_load(&current_verbosity, memory_order_relaxed);
61}
62
63#if SANITIZER_ANDROID
64inline uptr GetPageSize() {
65// Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
66  return 4096;
67}
68inline uptr GetPageSizeCached() {
69  return 4096;
70}
71#else
72uptr GetPageSize();
73extern uptr PageSizeCached;
74inline uptr GetPageSizeCached() {
75  if (!PageSizeCached)
76    PageSizeCached = GetPageSize();
77  return PageSizeCached;
78}
79#endif
80uptr GetMmapGranularity();
81uptr GetMaxVirtualAddress();
82uptr GetMaxUserVirtualAddress();
83// Threads
84tid_t GetTid();
85int TgKill(pid_t pid, tid_t tid, int sig);
86uptr GetThreadSelf();
87void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
88                                uptr *stack_bottom);
89void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
90                          uptr *tls_addr, uptr *tls_size);
91
92// Memory management
93void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
94inline void *MmapOrDieQuietly(uptr size, const char *mem_type) {
95  return MmapOrDie(size, mem_type, /*raw_report*/ true);
96}
97void UnmapOrDie(void *addr, uptr size);
98// Behaves just like MmapOrDie, but tolerates out of memory condition, in that
99// case returns nullptr.
100void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
101bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)
102     WARN_UNUSED_RESULT;
103bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size,
104                             const char *name = nullptr) WARN_UNUSED_RESULT;
105void *MmapNoReserveOrDie(uptr size, const char *mem_type);
106void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
107// Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
108// that case returns nullptr.
109void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size,
110                                 const char *name = nullptr);
111void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
112void *MmapNoAccess(uptr size);
113// Map aligned chunk of address space; size and alignment are powers of two.
114// Dies on all but out of memory errors, in the latter case returns nullptr.
115void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
116                                   const char *mem_type);
117// Disallow access to a memory range.  Use MmapFixedNoAccess to allocate an
118// unaccessible memory.
119bool MprotectNoAccess(uptr addr, uptr size);
120bool MprotectReadOnly(uptr addr, uptr size);
121bool MprotectReadWrite(uptr addr, uptr size);
122
123void MprotectMallocZones(void *addr, int prot);
124
125#if SANITIZER_WINDOWS
126// Zero previously mmap'd memory. Currently used only on Windows.
127bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) WARN_UNUSED_RESULT;
128#endif
129
130#if SANITIZER_LINUX
131// Unmap memory. Currently only used on Linux.
132void UnmapFromTo(uptr from, uptr to);
133#endif
134
135// Maps shadow_size_bytes of shadow memory and returns shadow address. It will
136// be aligned to the mmap granularity * 2^shadow_scale, or to
137// 2^min_shadow_base_alignment if that is larger. The returned address will
138// have max(2^min_shadow_base_alignment, mmap granularity) on the left, and
139// shadow_size_bytes bytes on the right, which on linux is mapped no access.
140// The high_mem_end may be updated if the original shadow size doesn't fit.
141uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
142                      uptr min_shadow_base_alignment, uptr &high_mem_end);
143
144// Let S = max(shadow_size, num_aliases * alias_size, ring_buffer_size).
145// Reserves 2*S bytes of address space to the right of the returned address and
146// ring_buffer_size bytes to the left.  The returned address is aligned to 2*S.
147// Also creates num_aliases regions of accessible memory starting at offset S
148// from the returned address.  Each region has size alias_size and is backed by
149// the same physical memory.
150uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
151                                uptr num_aliases, uptr ring_buffer_size);
152
153// Reserve memory range [beg, end]. If madvise_shadow is true then apply
154// madvise (e.g. hugepages, core dumping) requested by options.
155void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
156                              bool madvise_shadow = true);
157
158// Protect size bytes of memory starting at addr. Also try to protect
159// several pages at the start of the address space as specified by
160// zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start.
161void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
162                uptr zero_base_max_shadow_start);
163
164// Find an available address space.
165uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
166                              uptr *largest_gap_found, uptr *max_occupied_addr);
167
168// Used to check if we can map shadow memory to a fixed location.
169bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
170// Releases memory pages entirely within the [beg, end] address range. Noop if
171// the provided range does not contain at least one entire page.
172void ReleaseMemoryPagesToOS(uptr beg, uptr end);
173void IncreaseTotalMmap(uptr size);
174void DecreaseTotalMmap(uptr size);
175uptr GetRSS();
176void SetShadowRegionHugePageMode(uptr addr, uptr length);
177bool DontDumpShadowMemory(uptr addr, uptr length);
178// Check if the built VMA size matches the runtime one.
179void CheckVMASize();
180void RunMallocHooks(void *ptr, uptr size);
181void RunFreeHooks(void *ptr);
182
183class ReservedAddressRange {
184 public:
185  uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
186  uptr InitAligned(uptr size, uptr align, const char *name = nullptr);
187  uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr);
188  uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
189  void Unmap(uptr addr, uptr size);
190  void *base() const { return base_; }
191  uptr size() const { return size_; }
192
193 private:
194  void* base_;
195  uptr size_;
196  const char* name_;
197  uptr os_handle_;
198};
199
200typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
201                               /*out*/ uptr *stats);
202
203// Parse the contents of /proc/self/smaps and generate a memory profile.
204// |cb| is a tool-specific callback that fills the |stats| array.
205void GetMemoryProfile(fill_profile_f cb, uptr *stats);
206void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
207                            uptr smaps_len);
208
209// Simple low-level (mmap-based) allocator for internal use. Doesn't have
210// constructor, so all instances of LowLevelAllocator should be
211// linker initialized.
212//
213// NOTE: Users should instead use the singleton provided via
214// `GetGlobalLowLevelAllocator()` rather than create a new one. This way, the
215// number of mmap fragments can be reduced and use the same contiguous mmap
216// provided by this singleton.
217class LowLevelAllocator {
218 public:
219  // Requires an external lock.
220  void *Allocate(uptr size);
221
222 private:
223  char *allocated_end_;
224  char *allocated_current_;
225};
226// Set the min alignment of LowLevelAllocator to at least alignment.
227void SetLowLevelAllocateMinAlignment(uptr alignment);
228typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
229// Allows to register tool-specific callbacks for LowLevelAllocator.
230// Passing NULL removes the callback.
231void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
232
233LowLevelAllocator &GetGlobalLowLevelAllocator();
234
235// IO
236void CatastrophicErrorWrite(const char *buffer, uptr length);
237void RawWrite(const char *buffer);
238bool ColorizeReports();
239void RemoveANSIEscapeSequencesFromString(char *buffer);
240void Printf(const char *format, ...) FORMAT(1, 2);
241void Report(const char *format, ...) FORMAT(1, 2);
242void SetPrintfAndReportCallback(void (*callback)(const char *));
243#define VReport(level, ...)                                              \
244  do {                                                                   \
245    if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
246  } while (0)
247#define VPrintf(level, ...)                                              \
248  do {                                                                   \
249    if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
250  } while (0)
251
252// Lock sanitizer error reporting and protects against nested errors.
253class ScopedErrorReportLock {
254 public:
255  ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); }
256  ~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); }
257
258  static void Lock() SANITIZER_ACQUIRE(mutex_);
259  static void Unlock() SANITIZER_RELEASE(mutex_);
260  static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_);
261
262 private:
263  static atomic_uintptr_t reporting_thread_;
264  static StaticSpinMutex mutex_;
265};
266
267extern uptr stoptheworld_tracer_pid;
268extern uptr stoptheworld_tracer_ppid;
269
270bool IsAccessibleMemoryRange(uptr beg, uptr size);
271
272// Error report formatting.
273const char *StripPathPrefix(const char *filepath,
274                            const char *strip_file_prefix);
275// Strip the directories from the module name.
276const char *StripModuleName(const char *module);
277
278// OS
279uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
280uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
281uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len);
282uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
283const char *GetProcessName();
284void UpdateProcessName();
285void CacheBinaryName();
286void DisableCoreDumperIfNecessary();
287void DumpProcessMap();
288const char *GetEnv(const char *name);
289bool SetEnv(const char *name, const char *value);
290
291u32 GetUid();
292void ReExec();
293void CheckASLR();
294void CheckMPROTECT();
295char **GetArgv();
296char **GetEnviron();
297void PrintCmdline();
298bool StackSizeIsUnlimited();
299void SetStackSizeLimitInBytes(uptr limit);
300bool AddressSpaceIsUnlimited();
301void SetAddressSpaceUnlimited();
302void AdjustStackSize(void *attr);
303void PlatformPrepareForSandboxing(void *args);
304void SetSandboxingCallback(void (*f)());
305
306void InitializeCoverage(bool enabled, const char *coverage_dir);
307
308void InitTlsSize();
309uptr GetTlsSize();
310
311// Other
312void WaitForDebugger(unsigned seconds, const char *label);
313void SleepForSeconds(unsigned seconds);
314void SleepForMillis(unsigned millis);
315u64 NanoTime();
316u64 MonotonicNanoTime();
317int Atexit(void (*function)(void));
318bool TemplateMatch(const char *templ, const char *str);
319
320// Exit
321void NORETURN Abort();
322void NORETURN Die();
323void NORETURN
324CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
325void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
326                                      const char *mmap_type, error_t err,
327                                      bool raw_report = false);
328void NORETURN ReportMunmapFailureAndDie(void *ptr, uptr size, error_t err,
329                                        bool raw_report = false);
330
331// Returns true if the platform-specific error reported is an OOM error.
332bool ErrorIsOOM(error_t err);
333
334// This reports an error in the form:
335//
336//   `ERROR: {{SanitizerToolName}}: out of memory: {{err_msg}}`
337//
338// Downstream tools that read sanitizer output will know that errors starting
339// in this format are specifically OOM errors.
340#define ERROR_OOM(err_msg, ...) \
341  Report("ERROR: %s: out of memory: " err_msg, SanitizerToolName, __VA_ARGS__)
342
343// Specific tools may override behavior of "Die" function to do tool-specific
344// job.
345typedef void (*DieCallbackType)(void);
346
347// It's possible to add several callbacks that would be run when "Die" is
348// called. The callbacks will be run in the opposite order. The tools are
349// strongly recommended to setup all callbacks during initialization, when there
350// is only a single thread.
351bool AddDieCallback(DieCallbackType callback);
352bool RemoveDieCallback(DieCallbackType callback);
353
354void SetUserDieCallback(DieCallbackType callback);
355
356void SetCheckUnwindCallback(void (*callback)());
357
358// Functions related to signal handling.
359typedef void (*SignalHandlerType)(int, void *, void *);
360HandleSignalMode GetHandleSignalMode(int signum);
361void InstallDeadlySignalHandlers(SignalHandlerType handler);
362
363// Signal reporting.
364// Each sanitizer uses slightly different implementation of stack unwinding.
365typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,
366                                              const void *callback_context,
367                                              BufferedStackTrace *stack);
368// Print deadly signal report and die.
369void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
370                        UnwindSignalStackCallbackType unwind,
371                        const void *unwind_context);
372
373// Part of HandleDeadlySignal, exposed for asan.
374void StartReportDeadlySignal();
375// Part of HandleDeadlySignal, exposed for asan.
376void ReportDeadlySignal(const SignalContext &sig, u32 tid,
377                        UnwindSignalStackCallbackType unwind,
378                        const void *unwind_context);
379
380// Alternative signal stack (POSIX-only).
381void SetAlternateSignalStack();
382void UnsetAlternateSignalStack();
383
384// Construct a one-line string:
385//   SUMMARY: SanitizerToolName: error_message
386// and pass it to __sanitizer_report_error_summary.
387// If alt_tool_name is provided, it's used in place of SanitizerToolName.
388void ReportErrorSummary(const char *error_message,
389                        const char *alt_tool_name = nullptr);
390// Same as above, but construct error_message as:
391//   error_type file:line[:column][ function]
392void ReportErrorSummary(const char *error_type, const AddressInfo &info,
393                        const char *alt_tool_name = nullptr);
394// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
395void ReportErrorSummary(const char *error_type, const StackTrace *trace,
396                        const char *alt_tool_name = nullptr);
397// Skips frames which we consider internal and not usefull to the users.
398const SymbolizedStack *SkipInternalFrames(const SymbolizedStack *frames);
399
400void ReportMmapWriteExec(int prot, int mflags);
401
402// Math
403#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
404extern "C" {
405unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
406unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
407#if defined(_WIN64)
408unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);
409unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);
410#endif
411}
412#endif
413
414inline uptr MostSignificantSetBitIndex(uptr x) {
415  CHECK_NE(x, 0U);
416  unsigned long up;
417#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
418# ifdef _WIN64
419  up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
420# else
421  up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
422# endif
423#elif defined(_WIN64)
424  _BitScanReverse64(&up, x);
425#else
426  _BitScanReverse(&up, x);
427#endif
428  return up;
429}
430
431inline uptr LeastSignificantSetBitIndex(uptr x) {
432  CHECK_NE(x, 0U);
433  unsigned long up;
434#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
435# ifdef _WIN64
436  up = __builtin_ctzll(x);
437# else
438  up = __builtin_ctzl(x);
439# endif
440#elif defined(_WIN64)
441  _BitScanForward64(&up, x);
442#else
443  _BitScanForward(&up, x);
444#endif
445  return up;
446}
447
448inline constexpr bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; }
449
450inline uptr RoundUpToPowerOfTwo(uptr size) {
451  CHECK(size);
452  if (IsPowerOfTwo(size)) return size;
453
454  uptr up = MostSignificantSetBitIndex(size);
455  CHECK_LT(size, (1ULL << (up + 1)));
456  CHECK_GT(size, (1ULL << up));
457  return 1ULL << (up + 1);
458}
459
460inline constexpr uptr RoundUpTo(uptr size, uptr boundary) {
461  RAW_CHECK(IsPowerOfTwo(boundary));
462  return (size + boundary - 1) & ~(boundary - 1);
463}
464
465inline constexpr uptr RoundDownTo(uptr x, uptr boundary) {
466  return x & ~(boundary - 1);
467}
468
469inline constexpr bool IsAligned(uptr a, uptr alignment) {
470  return (a & (alignment - 1)) == 0;
471}
472
473inline uptr Log2(uptr x) {
474  CHECK(IsPowerOfTwo(x));
475  return LeastSignificantSetBitIndex(x);
476}
477
478// Don't use std::min, std::max or std::swap, to minimize dependency
479// on libstdc++.
480template <class T>
481constexpr T Min(T a, T b) {
482  return a < b ? a : b;
483}
484template <class T>
485constexpr T Max(T a, T b) {
486  return a > b ? a : b;
487}
488template <class T>
489constexpr T Abs(T a) {
490  return a < 0 ? -a : a;
491}
492template<class T> void Swap(T& a, T& b) {
493  T tmp = a;
494  a = b;
495  b = tmp;
496}
497
498// Char handling
499inline bool IsSpace(int c) {
500  return (c == ' ') || (c == '\n') || (c == '\t') ||
501         (c == '\f') || (c == '\r') || (c == '\v');
502}
503inline bool IsDigit(int c) {
504  return (c >= '0') && (c <= '9');
505}
506inline int ToLower(int c) {
507  return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
508}
509
510// A low-level vector based on mmap. May incur a significant memory overhead for
511// small vectors.
512// WARNING: The current implementation supports only POD types.
513template<typename T>
514class InternalMmapVectorNoCtor {
515 public:
516  using value_type = T;
517  void Initialize(uptr initial_capacity) {
518    capacity_bytes_ = 0;
519    size_ = 0;
520    data_ = 0;
521    reserve(initial_capacity);
522  }
523  void Destroy() { UnmapOrDie(data_, capacity_bytes_); }
524  T &operator[](uptr i) {
525    CHECK_LT(i, size_);
526    return data_[i];
527  }
528  const T &operator[](uptr i) const {
529    CHECK_LT(i, size_);
530    return data_[i];
531  }
532  void push_back(const T &element) {
533    if (UNLIKELY(size_ >= capacity())) {
534      CHECK_EQ(size_, capacity());
535      uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
536      Realloc(new_capacity);
537    }
538    internal_memcpy(&data_[size_++], &element, sizeof(T));
539  }
540  T &back() {
541    CHECK_GT(size_, 0);
542    return data_[size_ - 1];
543  }
544  void pop_back() {
545    CHECK_GT(size_, 0);
546    size_--;
547  }
548  uptr size() const {
549    return size_;
550  }
551  const T *data() const {
552    return data_;
553  }
554  T *data() {
555    return data_;
556  }
557  uptr capacity() const { return capacity_bytes_ / sizeof(T); }
558  void reserve(uptr new_size) {
559    // Never downsize internal buffer.
560    if (new_size > capacity())
561      Realloc(new_size);
562  }
563  void resize(uptr new_size) {
564    if (new_size > size_) {
565      reserve(new_size);
566      internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
567    }
568    size_ = new_size;
569  }
570
571  void clear() { size_ = 0; }
572  bool empty() const { return size() == 0; }
573
574  const T *begin() const {
575    return data();
576  }
577  T *begin() {
578    return data();
579  }
580  const T *end() const {
581    return data() + size();
582  }
583  T *end() {
584    return data() + size();
585  }
586
587  void swap(InternalMmapVectorNoCtor &other) {
588    Swap(data_, other.data_);
589    Swap(capacity_bytes_, other.capacity_bytes_);
590    Swap(size_, other.size_);
591  }
592
593 private:
594  NOINLINE void Realloc(uptr new_capacity) {
595    CHECK_GT(new_capacity, 0);
596    CHECK_LE(size_, new_capacity);
597    uptr new_capacity_bytes =
598        RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached());
599    T *new_data = (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector");
600    internal_memcpy(new_data, data_, size_ * sizeof(T));
601    UnmapOrDie(data_, capacity_bytes_);
602    data_ = new_data;
603    capacity_bytes_ = new_capacity_bytes;
604  }
605
606  T *data_;
607  uptr capacity_bytes_;
608  uptr size_;
609};
610
611template <typename T>
612bool operator==(const InternalMmapVectorNoCtor<T> &lhs,
613                const InternalMmapVectorNoCtor<T> &rhs) {
614  if (lhs.size() != rhs.size()) return false;
615  return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0;
616}
617
618template <typename T>
619bool operator!=(const InternalMmapVectorNoCtor<T> &lhs,
620                const InternalMmapVectorNoCtor<T> &rhs) {
621  return !(lhs == rhs);
622}
623
624template<typename T>
625class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
626 public:
627  InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); }
628  explicit InternalMmapVector(uptr cnt) {
629    InternalMmapVectorNoCtor<T>::Initialize(cnt);
630    this->resize(cnt);
631  }
632  ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
633  // Disallow copies and moves.
634  InternalMmapVector(const InternalMmapVector &) = delete;
635  InternalMmapVector &operator=(const InternalMmapVector &) = delete;
636  InternalMmapVector(InternalMmapVector &&) = delete;
637  InternalMmapVector &operator=(InternalMmapVector &&) = delete;
638};
639
640class InternalScopedString {
641 public:
642  InternalScopedString() : buffer_(1) { buffer_[0] = '\0'; }
643
644  uptr length() const { return buffer_.size() - 1; }
645  void clear() {
646    buffer_.resize(1);
647    buffer_[0] = '\0';
648  }
649  void Append(const char *str);
650  void AppendF(const char *format, ...) FORMAT(2, 3);
651  const char *data() const { return buffer_.data(); }
652  char *data() { return buffer_.data(); }
653
654 private:
655  InternalMmapVector<char> buffer_;
656};
657
658template <class T>
659struct CompareLess {
660  bool operator()(const T &a, const T &b) const { return a < b; }
661};
662
663// HeapSort for arrays and InternalMmapVector.
664template <class T, class Compare = CompareLess<T>>
665void Sort(T *v, uptr size, Compare comp = {}) {
666  if (size < 2)
667    return;
668  // Stage 1: insert elements to the heap.
669  for (uptr i = 1; i < size; i++) {
670    uptr j, p;
671    for (j = i; j > 0; j = p) {
672      p = (j - 1) / 2;
673      if (comp(v[p], v[j]))
674        Swap(v[j], v[p]);
675      else
676        break;
677    }
678  }
679  // Stage 2: swap largest element with the last one,
680  // and sink the new top.
681  for (uptr i = size - 1; i > 0; i--) {
682    Swap(v[0], v[i]);
683    uptr j, max_ind;
684    for (j = 0; j < i; j = max_ind) {
685      uptr left = 2 * j + 1;
686      uptr right = 2 * j + 2;
687      max_ind = j;
688      if (left < i && comp(v[max_ind], v[left]))
689        max_ind = left;
690      if (right < i && comp(v[max_ind], v[right]))
691        max_ind = right;
692      if (max_ind != j)
693        Swap(v[j], v[max_ind]);
694      else
695        break;
696    }
697  }
698}
699
700// Works like std::lower_bound: finds the first element that is not less
701// than the val.
702template <class Container, class T,
703          class Compare = CompareLess<typename Container::value_type>>
704uptr InternalLowerBound(const Container &v, const T &val, Compare comp = {}) {
705  uptr first = 0;
706  uptr last = v.size();
707  while (last > first) {
708    uptr mid = (first + last) / 2;
709    if (comp(v[mid], val))
710      first = mid + 1;
711    else
712      last = mid;
713  }
714  return first;
715}
716
717enum ModuleArch {
718  kModuleArchUnknown,
719  kModuleArchI386,
720  kModuleArchX86_64,
721  kModuleArchX86_64H,
722  kModuleArchARMV6,
723  kModuleArchARMV7,
724  kModuleArchARMV7S,
725  kModuleArchARMV7K,
726  kModuleArchARM64,
727  kModuleArchLoongArch64,
728  kModuleArchRISCV64,
729  kModuleArchHexagon
730};
731
732// Sorts and removes duplicates from the container.
733template <class Container,
734          class Compare = CompareLess<typename Container::value_type>>
735void SortAndDedup(Container &v, Compare comp = {}) {
736  Sort(v.data(), v.size(), comp);
737  uptr size = v.size();
738  if (size < 2)
739    return;
740  uptr last = 0;
741  for (uptr i = 1; i < size; ++i) {
742    if (comp(v[last], v[i])) {
743      ++last;
744      if (last != i)
745        v[last] = v[i];
746    } else {
747      CHECK(!comp(v[i], v[last]));
748    }
749  }
750  v.resize(last + 1);
751}
752
753constexpr uptr kDefaultFileMaxSize = FIRST_32_SECOND_64(1 << 26, 1 << 28);
754
755// Opens the file 'file_name" and reads up to 'max_len' bytes.
756// The resulting buffer is mmaped and stored in '*buff'.
757// Returns true if file was successfully opened and read.
758bool ReadFileToVector(const char *file_name,
759                      InternalMmapVectorNoCtor<char> *buff,
760                      uptr max_len = kDefaultFileMaxSize,
761                      error_t *errno_p = nullptr);
762
763// Opens the file 'file_name" and reads up to 'max_len' bytes.
764// This function is less I/O efficient than ReadFileToVector as it may reread
765// file multiple times to avoid mmap during read attempts. It's used to read
766// procmap, so short reads with mmap in between can produce inconsistent result.
767// The resulting buffer is mmaped and stored in '*buff'.
768// The size of the mmaped region is stored in '*buff_size'.
769// The total number of read bytes is stored in '*read_len'.
770// Returns true if file was successfully opened and read.
771bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
772                      uptr *read_len, uptr max_len = kDefaultFileMaxSize,
773                      error_t *errno_p = nullptr);
774
775int GetModuleAndOffsetForPc(uptr pc, char *module_name, uptr module_name_len,
776                            uptr *pc_offset);
777
778// When adding a new architecture, don't forget to also update
779// script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
780inline const char *ModuleArchToString(ModuleArch arch) {
781  switch (arch) {
782    case kModuleArchUnknown:
783      return "";
784    case kModuleArchI386:
785      return "i386";
786    case kModuleArchX86_64:
787      return "x86_64";
788    case kModuleArchX86_64H:
789      return "x86_64h";
790    case kModuleArchARMV6:
791      return "armv6";
792    case kModuleArchARMV7:
793      return "armv7";
794    case kModuleArchARMV7S:
795      return "armv7s";
796    case kModuleArchARMV7K:
797      return "armv7k";
798    case kModuleArchARM64:
799      return "arm64";
800    case kModuleArchLoongArch64:
801      return "loongarch64";
802    case kModuleArchRISCV64:
803      return "riscv64";
804    case kModuleArchHexagon:
805      return "hexagon";
806  }
807  CHECK(0 && "Invalid module arch");
808  return "";
809}
810
811#if SANITIZER_APPLE
812const uptr kModuleUUIDSize = 16;
813#else
814const uptr kModuleUUIDSize = 32;
815#endif
816const uptr kMaxSegName = 16;
817
818// Represents a binary loaded into virtual memory (e.g. this can be an
819// executable or a shared object).
820class LoadedModule {
821 public:
822  LoadedModule()
823      : full_name_(nullptr),
824        base_address_(0),
825        max_address_(0),
826        arch_(kModuleArchUnknown),
827        uuid_size_(0),
828        instrumented_(false) {
829    internal_memset(uuid_, 0, kModuleUUIDSize);
830    ranges_.clear();
831  }
832  void set(const char *module_name, uptr base_address);
833  void set(const char *module_name, uptr base_address, ModuleArch arch,
834           u8 uuid[kModuleUUIDSize], bool instrumented);
835  void setUuid(const char *uuid, uptr size);
836  void clear();
837  void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
838                       const char *name = nullptr);
839  bool containsAddress(uptr address) const;
840
841  const char *full_name() const { return full_name_; }
842  uptr base_address() const { return base_address_; }
843  uptr max_address() const { return max_address_; }
844  ModuleArch arch() const { return arch_; }
845  const u8 *uuid() const { return uuid_; }
846  uptr uuid_size() const { return uuid_size_; }
847  bool instrumented() const { return instrumented_; }
848
849  struct AddressRange {
850    AddressRange *next;
851    uptr beg;
852    uptr end;
853    bool executable;
854    bool writable;
855    char name[kMaxSegName];
856
857    AddressRange(uptr beg, uptr end, bool executable, bool writable,
858                 const char *name)
859        : next(nullptr),
860          beg(beg),
861          end(end),
862          executable(executable),
863          writable(writable) {
864      internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name));
865    }
866  };
867
868  const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
869
870 private:
871  char *full_name_;  // Owned.
872  uptr base_address_;
873  uptr max_address_;
874  ModuleArch arch_;
875  uptr uuid_size_;
876  u8 uuid_[kModuleUUIDSize];
877  bool instrumented_;
878  IntrusiveList<AddressRange> ranges_;
879};
880
881// List of LoadedModules. OS-dependent implementation is responsible for
882// filling this information.
883class ListOfModules {
884 public:
885  ListOfModules() : initialized(false) {}
886  ~ListOfModules() { clear(); }
887  void init();
888  void fallbackInit();  // Uses fallback init if available, otherwise clears
889  const LoadedModule *begin() const { return modules_.begin(); }
890  LoadedModule *begin() { return modules_.begin(); }
891  const LoadedModule *end() const { return modules_.end(); }
892  LoadedModule *end() { return modules_.end(); }
893  uptr size() const { return modules_.size(); }
894  const LoadedModule &operator[](uptr i) const {
895    CHECK_LT(i, modules_.size());
896    return modules_[i];
897  }
898
899 private:
900  void clear() {
901    for (auto &module : modules_) module.clear();
902    modules_.clear();
903  }
904  void clearOrInit() {
905    initialized ? clear() : modules_.Initialize(kInitialCapacity);
906    initialized = true;
907  }
908
909  InternalMmapVectorNoCtor<LoadedModule> modules_;
910  // We rarely have more than 16K loaded modules.
911  static const uptr kInitialCapacity = 1 << 14;
912  bool initialized;
913};
914
915// Callback type for iterating over a set of memory ranges.
916typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
917
918enum AndroidApiLevel {
919  ANDROID_NOT_ANDROID = 0,
920  ANDROID_KITKAT = 19,
921  ANDROID_LOLLIPOP_MR1 = 22,
922  ANDROID_POST_LOLLIPOP = 23
923};
924
925void WriteToSyslog(const char *buffer);
926
927#if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__)
928#define SANITIZER_WIN_TRACE 1
929#else
930#define SANITIZER_WIN_TRACE 0
931#endif
932
933#if SANITIZER_APPLE || SANITIZER_WIN_TRACE
934void LogFullErrorReport(const char *buffer);
935#else
936inline void LogFullErrorReport(const char *buffer) {}
937#endif
938
939#if SANITIZER_LINUX || SANITIZER_APPLE
940void WriteOneLineToSyslog(const char *s);
941void LogMessageOnPrintf(const char *str);
942#else
943inline void WriteOneLineToSyslog(const char *s) {}
944inline void LogMessageOnPrintf(const char *str) {}
945#endif
946
947#if SANITIZER_LINUX || SANITIZER_WIN_TRACE
948// Initialize Android logging. Any writes before this are silently lost.
949void AndroidLogInit();
950void SetAbortMessage(const char *);
951#else
952inline void AndroidLogInit() {}
953// FIXME: MacOS implementation could use CRSetCrashLogMessage.
954inline void SetAbortMessage(const char *) {}
955#endif
956
957#if SANITIZER_ANDROID
958void SanitizerInitializeUnwinder();
959AndroidApiLevel AndroidGetApiLevel();
960#else
961inline void AndroidLogWrite(const char *buffer_unused) {}
962inline void SanitizerInitializeUnwinder() {}
963inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
964#endif
965
966inline uptr GetPthreadDestructorIterations() {
967#if SANITIZER_ANDROID
968  return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
969#elif SANITIZER_POSIX
970  return 4;
971#else
972// Unused on Windows.
973  return 0;
974#endif
975}
976
977void *internal_start_thread(void *(*func)(void*), void *arg);
978void internal_join_thread(void *th);
979void MaybeStartBackgroudThread();
980
981// Make the compiler think that something is going on there.
982// Use this inside a loop that looks like memset/memcpy/etc to prevent the
983// compiler from recognising it and turning it into an actual call to
984// memset/memcpy/etc.
985static inline void SanitizerBreakOptimization(void *arg) {
986#if defined(_MSC_VER) && !defined(__clang__)
987  _ReadWriteBarrier();
988#else
989  __asm__ __volatile__("" : : "r" (arg) : "memory");
990#endif
991}
992
993struct SignalContext {
994  void *siginfo;
995  void *context;
996  uptr addr;
997  uptr pc;
998  uptr sp;
999  uptr bp;
1000  bool is_memory_access;
1001  enum WriteFlag { Unknown, Read, Write } write_flag;
1002
1003  // In some cases the kernel cannot provide the true faulting address; `addr`
1004  // will be zero then.  This field allows to distinguish between these cases
1005  // and dereferences of null.
1006  bool is_true_faulting_addr;
1007
1008  // VS2013 doesn't implement unrestricted unions, so we need a trivial default
1009  // constructor
1010  SignalContext() = default;
1011
1012  // Creates signal context in a platform-specific manner.
1013  // SignalContext is going to keep pointers to siginfo and context without
1014  // owning them.
1015  SignalContext(void *siginfo, void *context)
1016      : siginfo(siginfo),
1017        context(context),
1018        addr(GetAddress()),
1019        is_memory_access(IsMemoryAccess()),
1020        write_flag(GetWriteFlag()),
1021        is_true_faulting_addr(IsTrueFaultingAddress()) {
1022    InitPcSpBp();
1023  }
1024
1025  static void DumpAllRegisters(void *context);
1026
1027  // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
1028  int GetType() const;
1029
1030  // String description of the signal.
1031  const char *Describe() const;
1032
1033  // Returns true if signal is stack overflow.
1034  bool IsStackOverflow() const;
1035
1036 private:
1037  // Platform specific initialization.
1038  void InitPcSpBp();
1039  uptr GetAddress() const;
1040  WriteFlag GetWriteFlag() const;
1041  bool IsMemoryAccess() const;
1042  bool IsTrueFaultingAddress() const;
1043};
1044
1045void InitializePlatformEarly();
1046
1047template <typename Fn>
1048class RunOnDestruction {
1049 public:
1050  explicit RunOnDestruction(Fn fn) : fn_(fn) {}
1051  ~RunOnDestruction() { fn_(); }
1052
1053 private:
1054  Fn fn_;
1055};
1056
1057// A simple scope guard. Usage:
1058// auto cleanup = at_scope_exit([]{ do_cleanup; });
1059template <typename Fn>
1060RunOnDestruction<Fn> at_scope_exit(Fn fn) {
1061  return RunOnDestruction<Fn>(fn);
1062}
1063
1064// Linux on 64-bit s390 had a nasty bug that crashes the whole machine
1065// if a process uses virtual memory over 4TB (as many sanitizers like
1066// to do).  This function will abort the process if running on a kernel
1067// that looks vulnerable.
1068#if SANITIZER_LINUX && SANITIZER_S390_64
1069void AvoidCVE_2016_2143();
1070#else
1071inline void AvoidCVE_2016_2143() {}
1072#endif
1073
1074struct StackDepotStats {
1075  uptr n_uniq_ids;
1076  uptr allocated;
1077};
1078
1079// The default value for allocator_release_to_os_interval_ms common flag to
1080// indicate that sanitizer allocator should not attempt to release memory to OS.
1081const s32 kReleaseToOSIntervalNever = -1;
1082
1083void CheckNoDeepBind(const char *filename, int flag);
1084
1085// Returns the requested amount of random data (up to 256 bytes) that can then
1086// be used to seed a PRNG. Defaults to blocking like the underlying syscall.
1087bool GetRandom(void *buffer, uptr length, bool blocking = true);
1088
1089// Returns the number of logical processors on the system.
1090u32 GetNumberOfCPUs();
1091extern u32 NumberOfCPUsCached;
1092inline u32 GetNumberOfCPUsCached() {
1093  if (!NumberOfCPUsCached)
1094    NumberOfCPUsCached = GetNumberOfCPUs();
1095  return NumberOfCPUsCached;
1096}
1097
1098}  // namespace __sanitizer
1099
1100inline void *operator new(__sanitizer::operator_new_size_type size,
1101                          __sanitizer::LowLevelAllocator &alloc) {
1102  return alloc.Allocate(size);
1103}
1104
1105#endif  // SANITIZER_COMMON_H
1106