1//===-- hwasan_report.cpp -------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of HWAddressSanitizer.
10//
11// Error reporting.
12//===----------------------------------------------------------------------===//
13
14#include "hwasan_report.h"
15
16#include <dlfcn.h>
17
18#include "hwasan.h"
19#include "hwasan_allocator.h"
20#include "hwasan_globals.h"
21#include "hwasan_mapping.h"
22#include "hwasan_thread.h"
23#include "hwasan_thread_list.h"
24#include "sanitizer_common/sanitizer_allocator_internal.h"
25#include "sanitizer_common/sanitizer_array_ref.h"
26#include "sanitizer_common/sanitizer_common.h"
27#include "sanitizer_common/sanitizer_flags.h"
28#include "sanitizer_common/sanitizer_internal_defs.h"
29#include "sanitizer_common/sanitizer_mutex.h"
30#include "sanitizer_common/sanitizer_report_decorator.h"
31#include "sanitizer_common/sanitizer_stackdepot.h"
32#include "sanitizer_common/sanitizer_stacktrace_printer.h"
33#include "sanitizer_common/sanitizer_symbolizer.h"
34
35using namespace __sanitizer;
36
37namespace __hwasan {
38
39class ScopedReport {
40 public:
41  explicit ScopedReport(bool fatal) : fatal(fatal) {
42    Lock lock(&error_message_lock_);
43    error_message_ptr_ = fatal ? &error_message_ : nullptr;
44    ++hwasan_report_count;
45  }
46
47  ~ScopedReport() {
48    void (*report_cb)(const char *);
49    {
50      Lock lock(&error_message_lock_);
51      report_cb = error_report_callback_;
52      error_message_ptr_ = nullptr;
53    }
54    if (report_cb)
55      report_cb(error_message_.data());
56    if (fatal)
57      SetAbortMessage(error_message_.data());
58    if (common_flags()->print_module_map >= 2 ||
59        (fatal && common_flags()->print_module_map))
60      DumpProcessMap();
61    if (fatal)
62      Die();
63  }
64
65  static void MaybeAppendToErrorMessage(const char *msg) {
66    Lock lock(&error_message_lock_);
67    if (!error_message_ptr_)
68      return;
69    error_message_ptr_->Append(msg);
70  }
71
72  static void SetErrorReportCallback(void (*callback)(const char *)) {
73    Lock lock(&error_message_lock_);
74    error_report_callback_ = callback;
75  }
76
77 private:
78  InternalScopedString error_message_;
79  bool fatal;
80
81  static Mutex error_message_lock_;
82  static InternalScopedString *error_message_ptr_
83      SANITIZER_GUARDED_BY(error_message_lock_);
84  static void (*error_report_callback_)(const char *);
85};
86
87Mutex ScopedReport::error_message_lock_;
88InternalScopedString *ScopedReport::error_message_ptr_;
89void (*ScopedReport::error_report_callback_)(const char *);
90
91// If there is an active ScopedReport, append to its error message.
92void AppendToErrorMessageBuffer(const char *buffer) {
93  ScopedReport::MaybeAppendToErrorMessage(buffer);
94}
95
96static StackTrace GetStackTraceFromId(u32 id) {
97  CHECK(id);
98  StackTrace res = StackDepotGet(id);
99  CHECK(res.trace);
100  return res;
101}
102
103static void MaybePrintAndroidHelpUrl() {
104#if SANITIZER_ANDROID
105  Printf(
106      "Learn more about HWASan reports: "
107      "https://source.android.com/docs/security/test/memory-safety/"
108      "hwasan-reports\n");
109#endif
110}
111
112namespace {
113// A RAII object that holds a copy of the current thread stack ring buffer.
114// The actual stack buffer may change while we are iterating over it (for
115// example, Printf may call syslog() which can itself be built with hwasan).
116class SavedStackAllocations {
117 public:
118  SavedStackAllocations() = default;
119
120  explicit SavedStackAllocations(Thread *t) { CopyFrom(t); }
121
122  void CopyFrom(Thread *t) {
123    StackAllocationsRingBuffer *rb = t->stack_allocations();
124    uptr size = rb->size() * sizeof(uptr);
125    void *storage =
126        MmapAlignedOrDieOnFatalError(size, size * 2, "saved stack allocations");
127    new (&rb_) StackAllocationsRingBuffer(*rb, storage);
128    thread_id_ = t->unique_id();
129  }
130
131  ~SavedStackAllocations() {
132    if (rb_) {
133      StackAllocationsRingBuffer *rb = get();
134      UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr));
135    }
136  }
137
138  const StackAllocationsRingBuffer *get() const {
139    return (const StackAllocationsRingBuffer *)&rb_;
140  }
141
142  StackAllocationsRingBuffer *get() {
143    return (StackAllocationsRingBuffer *)&rb_;
144  }
145
146  u32 thread_id() const { return thread_id_; }
147
148 private:
149  uptr rb_ = 0;
150  u32 thread_id_;
151};
152
153class Decorator: public __sanitizer::SanitizerCommonDecorator {
154 public:
155  Decorator() : SanitizerCommonDecorator() { }
156  const char *Access() { return Blue(); }
157  const char *Allocation() const { return Magenta(); }
158  const char *Origin() const { return Magenta(); }
159  const char *Name() const { return Green(); }
160  const char *Location() { return Green(); }
161  const char *Thread() { return Green(); }
162};
163}  // namespace
164
165static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr,
166                               HeapAllocationRecord *har, uptr *ring_index,
167                               uptr *num_matching_addrs,
168                               uptr *num_matching_addrs_4b) {
169  if (!rb) return false;
170
171  *num_matching_addrs = 0;
172  *num_matching_addrs_4b = 0;
173  for (uptr i = 0, size = rb->size(); i < size; i++) {
174    auto h = (*rb)[i];
175    if (h.tagged_addr <= tagged_addr &&
176        h.tagged_addr + h.requested_size > tagged_addr) {
177      *har = h;
178      *ring_index = i;
179      return true;
180    }
181
182    // Measure the number of heap ring buffer entries that would have matched
183    // if we had only one entry per address (e.g. if the ring buffer data was
184    // stored at the address itself). This will help us tune the allocator
185    // implementation for MTE.
186    if (UntagAddr(h.tagged_addr) <= UntagAddr(tagged_addr) &&
187        UntagAddr(h.tagged_addr) + h.requested_size > UntagAddr(tagged_addr)) {
188      ++*num_matching_addrs;
189    }
190
191    // Measure the number of heap ring buffer entries that would have matched
192    // if we only had 4 tag bits, which is the case for MTE.
193    auto untag_4b = [](uptr p) {
194      return p & ((1ULL << 60) - 1);
195    };
196    if (untag_4b(h.tagged_addr) <= untag_4b(tagged_addr) &&
197        untag_4b(h.tagged_addr) + h.requested_size > untag_4b(tagged_addr)) {
198      ++*num_matching_addrs_4b;
199    }
200  }
201  return false;
202}
203
204static void PrintStackAllocations(const StackAllocationsRingBuffer *sa,
205                                  tag_t addr_tag, uptr untagged_addr) {
206  uptr frames = Min((uptr)flags()->stack_history_size, sa->size());
207  bool found_local = false;
208  InternalScopedString location;
209  for (uptr i = 0; i < frames; i++) {
210    const uptr *record_addr = &(*sa)[i];
211    uptr record = *record_addr;
212    if (!record)
213      break;
214    tag_t base_tag =
215        reinterpret_cast<uptr>(record_addr) >> kRecordAddrBaseTagShift;
216    uptr fp = (record >> kRecordFPShift) << kRecordFPLShift;
217    uptr pc_mask = (1ULL << kRecordFPShift) - 1;
218    uptr pc = record & pc_mask;
219    FrameInfo frame;
220    if (Symbolizer::GetOrInit()->SymbolizeFrame(pc, &frame)) {
221      for (LocalInfo &local : frame.locals) {
222        if (!local.has_frame_offset || !local.has_size || !local.has_tag_offset)
223          continue;
224        if (!(local.name && internal_strlen(local.name)) &&
225            !(local.function_name && internal_strlen(local.function_name)) &&
226            !(local.decl_file && internal_strlen(local.decl_file)))
227          continue;
228        tag_t obj_tag = base_tag ^ local.tag_offset;
229        if (obj_tag != addr_tag)
230          continue;
231        // Guess top bits of local variable from the faulting address, because
232        // we only store bits 4-19 of FP (bits 0-3 are guaranteed to be zero).
233        uptr local_beg = (fp + local.frame_offset) |
234                         (untagged_addr & ~(uptr(kRecordFPModulus) - 1));
235        uptr local_end = local_beg + local.size;
236
237        if (!found_local) {
238          Printf("\nPotentially referenced stack objects:\n");
239          found_local = true;
240        }
241
242        uptr offset;
243        const char *whence;
244        const char *cause;
245        if (local_beg <= untagged_addr && untagged_addr < local_end) {
246          offset = untagged_addr - local_beg;
247          whence = "inside";
248          cause = "use-after-scope";
249        } else if (untagged_addr >= local_end) {
250          offset = untagged_addr - local_end;
251          whence = "after";
252          cause = "stack-buffer-overflow";
253        } else {
254          offset = local_beg - untagged_addr;
255          whence = "before";
256          cause = "stack-buffer-overflow";
257        }
258        Decorator d;
259        Printf("%s", d.Error());
260        Printf("Cause: %s\n", cause);
261        Printf("%s", d.Default());
262        Printf("%s", d.Location());
263        StackTracePrinter::GetOrInit()->RenderSourceLocation(
264            &location, local.decl_file, local.decl_line, /* column= */ 0,
265            common_flags()->symbolize_vs_style,
266            common_flags()->strip_path_prefix);
267        Printf(
268            "%p is located %zd bytes %s a %zd-byte local variable %s [%p,%p) "
269            "in %s %s\n",
270            untagged_addr, offset, whence, local_end - local_beg, local.name,
271            local_beg, local_end, local.function_name, location.data());
272        location.clear();
273        Printf("%s\n", d.Default());
274      }
275      frame.Clear();
276    }
277  }
278
279  if (found_local)
280    return;
281
282  // We didn't find any locals. Most likely we don't have symbols, so dump
283  // the information that we have for offline analysis.
284  InternalScopedString frame_desc;
285  Printf("Previously allocated frames:\n");
286  for (uptr i = 0; i < frames; i++) {
287    const uptr *record_addr = &(*sa)[i];
288    uptr record = *record_addr;
289    if (!record)
290      break;
291    uptr pc_mask = (1ULL << 48) - 1;
292    uptr pc = record & pc_mask;
293    frame_desc.AppendF("  record_addr:0x%zx record:0x%zx",
294                       reinterpret_cast<uptr>(record_addr), record);
295    SymbolizedStackHolder symbolized_stack(
296        Symbolizer::GetOrInit()->SymbolizePC(pc));
297    const SymbolizedStack *frame = symbolized_stack.get();
298    if (frame) {
299      StackTracePrinter::GetOrInit()->RenderFrame(
300          &frame_desc, " %F %L", 0, frame->info.address, &frame->info,
301          common_flags()->symbolize_vs_style,
302          common_flags()->strip_path_prefix);
303    }
304    Printf("%s\n", frame_desc.data());
305    frame_desc.clear();
306  }
307}
308
309// Returns true if tag == *tag_ptr, reading tags from short granules if
310// necessary. This may return a false positive if tags 1-15 are used as a
311// regular tag rather than a short granule marker.
312static bool TagsEqual(tag_t tag, tag_t *tag_ptr) {
313  if (tag == *tag_ptr)
314    return true;
315  if (*tag_ptr == 0 || *tag_ptr > kShadowAlignment - 1)
316    return false;
317  uptr mem = ShadowToMem(reinterpret_cast<uptr>(tag_ptr));
318  tag_t inline_tag = *reinterpret_cast<tag_t *>(mem + kShadowAlignment - 1);
319  return tag == inline_tag;
320}
321
322// HWASan globals store the size of the global in the descriptor. In cases where
323// we don't have a binary with symbols, we can't grab the size of the global
324// from the debug info - but we might be able to retrieve it from the
325// descriptor. Returns zero if the lookup failed.
326static uptr GetGlobalSizeFromDescriptor(uptr ptr) {
327  // Find the ELF object that this global resides in.
328  Dl_info info;
329  if (dladdr(reinterpret_cast<void *>(ptr), &info) == 0)
330    return 0;
331  auto *ehdr = reinterpret_cast<const ElfW(Ehdr) *>(info.dli_fbase);
332  auto *phdr_begin = reinterpret_cast<const ElfW(Phdr) *>(
333      reinterpret_cast<const u8 *>(ehdr) + ehdr->e_phoff);
334
335  // Get the load bias. This is normally the same as the dli_fbase address on
336  // position-independent code, but can be different on non-PIE executables,
337  // binaries using LLD's partitioning feature, or binaries compiled with a
338  // linker script.
339  ElfW(Addr) load_bias = 0;
340  for (const auto &phdr :
341       ArrayRef<const ElfW(Phdr)>(phdr_begin, phdr_begin + ehdr->e_phnum)) {
342    if (phdr.p_type != PT_LOAD || phdr.p_offset != 0)
343      continue;
344    load_bias = reinterpret_cast<ElfW(Addr)>(ehdr) - phdr.p_vaddr;
345    break;
346  }
347
348  // Walk all globals in this ELF object, looking for the one we're interested
349  // in. Once we find it, we can stop iterating and return the size of the
350  // global we're interested in.
351  for (const hwasan_global &global :
352       HwasanGlobalsFor(load_bias, phdr_begin, ehdr->e_phnum))
353    if (global.addr() <= ptr && ptr < global.addr() + global.size())
354      return global.size();
355
356  return 0;
357}
358
359void ReportStats() {}
360
361constexpr uptr kDumpWidth = 16;
362constexpr uptr kShadowLines = 17;
363constexpr uptr kShadowDumpSize = kShadowLines * kDumpWidth;
364
365constexpr uptr kShortLines = 3;
366constexpr uptr kShortDumpSize = kShortLines * kDumpWidth;
367constexpr uptr kShortDumpOffset = (kShadowLines - kShortLines) / 2 * kDumpWidth;
368
369static uptr GetPrintTagStart(uptr addr) {
370  addr = MemToShadow(addr);
371  addr = RoundDownTo(addr, kDumpWidth);
372  addr -= kDumpWidth * (kShadowLines / 2);
373  return addr;
374}
375
376template <typename PrintTag>
377static void PrintTagInfoAroundAddr(uptr addr, uptr num_rows,
378                                   InternalScopedString &s,
379                                   PrintTag print_tag) {
380  uptr center_row_beg = RoundDownTo(addr, kDumpWidth);
381  uptr beg_row = center_row_beg - kDumpWidth * (num_rows / 2);
382  uptr end_row = center_row_beg + kDumpWidth * ((num_rows + 1) / 2);
383  for (uptr row = beg_row; row < end_row; row += kDumpWidth) {
384    s.Append(row == center_row_beg ? "=>" : "  ");
385    s.AppendF("%p:", (void *)ShadowToMem(row));
386    for (uptr i = 0; i < kDumpWidth; i++) {
387      s.Append(row + i == addr ? "[" : " ");
388      print_tag(s, row + i);
389      s.Append(row + i == addr ? "]" : " ");
390    }
391    s.AppendF("\n");
392  }
393}
394
395template <typename GetTag, typename GetShortTag>
396static void PrintTagsAroundAddr(uptr addr, GetTag get_tag,
397                                GetShortTag get_short_tag) {
398  InternalScopedString s;
399  addr = MemToShadow(addr);
400  s.AppendF(
401      "\nMemory tags around the buggy address (one tag corresponds to %zd "
402      "bytes):\n",
403      kShadowAlignment);
404  PrintTagInfoAroundAddr(addr, kShadowLines, s,
405                         [&](InternalScopedString &s, uptr tag_addr) {
406                           tag_t tag = get_tag(tag_addr);
407                           s.AppendF("%02x", tag);
408                         });
409
410  s.AppendF(
411      "Tags for short granules around the buggy address (one tag corresponds "
412      "to %zd bytes):\n",
413      kShadowAlignment);
414  PrintTagInfoAroundAddr(addr, kShortLines, s,
415                         [&](InternalScopedString &s, uptr tag_addr) {
416                           tag_t tag = get_tag(tag_addr);
417                           if (tag >= 1 && tag <= kShadowAlignment) {
418                             tag_t short_tag = get_short_tag(tag_addr);
419                             s.AppendF("%02x", short_tag);
420                           } else {
421                             s.AppendF("..");
422                           }
423                         });
424  s.AppendF(
425      "See "
426      "https://clang.llvm.org/docs/"
427      "HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
428      "description of short granule tags\n");
429  Printf("%s", s.data());
430}
431
432static uptr GetTopPc(const StackTrace *stack) {
433  return stack->size ? StackTrace::GetPreviousInstructionPc(stack->trace[0])
434                     : 0;
435}
436
437namespace {
438class BaseReport {
439 public:
440  BaseReport(StackTrace *stack, bool fatal, uptr tagged_addr, uptr access_size)
441      : scoped_report(fatal),
442        stack(stack),
443        tagged_addr(tagged_addr),
444        access_size(access_size),
445        untagged_addr(UntagAddr(tagged_addr)),
446        ptr_tag(GetTagFromPointer(tagged_addr)),
447        mismatch_offset(FindMismatchOffset()),
448        heap(CopyHeapChunk()),
449        allocations(CopyAllocations()),
450        candidate(FindBufferOverflowCandidate()),
451        shadow(CopyShadow()) {}
452
453 protected:
454  struct OverflowCandidate {
455    uptr untagged_addr = 0;
456    bool after = false;
457    bool is_close = false;
458
459    struct {
460      uptr begin = 0;
461      uptr end = 0;
462      u32 thread_id = 0;
463      u32 stack_id = 0;
464      bool is_allocated = false;
465    } heap;
466  };
467
468  struct HeapAllocation {
469    HeapAllocationRecord har = {};
470    uptr ring_index = 0;
471    uptr num_matching_addrs = 0;
472    uptr num_matching_addrs_4b = 0;
473    u32 free_thread_id = 0;
474  };
475
476  struct Allocations {
477    ArrayRef<SavedStackAllocations> stack;
478    ArrayRef<HeapAllocation> heap;
479  };
480
481  struct HeapChunk {
482    uptr begin = 0;
483    uptr size = 0;
484    u32 stack_id = 0;
485    bool from_small_heap = false;
486    bool is_allocated = false;
487  };
488
489  struct Shadow {
490    uptr addr = 0;
491    tag_t tags[kShadowDumpSize] = {};
492    tag_t short_tags[kShortDumpSize] = {};
493  };
494
495  sptr FindMismatchOffset() const;
496  Shadow CopyShadow() const;
497  tag_t GetTagCopy(uptr addr) const;
498  tag_t GetShortTagCopy(uptr addr) const;
499  HeapChunk CopyHeapChunk() const;
500  Allocations CopyAllocations();
501  OverflowCandidate FindBufferOverflowCandidate() const;
502  void PrintAddressDescription() const;
503  void PrintHeapOrGlobalCandidate() const;
504  void PrintTags(uptr addr) const;
505
506  SavedStackAllocations stack_allocations_storage[16];
507  HeapAllocation heap_allocations_storage[256];
508
509  const ScopedReport scoped_report;
510  const StackTrace *stack = nullptr;
511  const uptr tagged_addr = 0;
512  const uptr access_size = 0;
513  const uptr untagged_addr = 0;
514  const tag_t ptr_tag = 0;
515  const sptr mismatch_offset = 0;
516
517  const HeapChunk heap;
518  const Allocations allocations;
519  const OverflowCandidate candidate;
520
521  const Shadow shadow;
522};
523
524sptr BaseReport::FindMismatchOffset() const {
525  if (!access_size)
526    return 0;
527  sptr offset =
528      __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr), access_size);
529  CHECK_GE(offset, 0);
530  CHECK_LT(offset, static_cast<sptr>(access_size));
531  tag_t *tag_ptr =
532      reinterpret_cast<tag_t *>(MemToShadow(untagged_addr + offset));
533  tag_t mem_tag = *tag_ptr;
534
535  if (mem_tag && mem_tag < kShadowAlignment) {
536    tag_t *granule_ptr = reinterpret_cast<tag_t *>((untagged_addr + offset) &
537                                                   ~(kShadowAlignment - 1));
538    // If offset is 0, (untagged_addr + offset) is not aligned to granules.
539    // This is the offset of the leftmost accessed byte within the bad granule.
540    u8 in_granule_offset = (untagged_addr + offset) & (kShadowAlignment - 1);
541    tag_t short_tag = granule_ptr[kShadowAlignment - 1];
542    // The first mismatch was a short granule that matched the ptr_tag.
543    if (short_tag == ptr_tag) {
544      // If the access starts after the end of the short granule, then the first
545      // bad byte is the first byte of the access; otherwise it is the first
546      // byte past the end of the short granule
547      if (mem_tag > in_granule_offset) {
548        offset += mem_tag - in_granule_offset;
549      }
550    }
551  }
552  return offset;
553}
554
555BaseReport::Shadow BaseReport::CopyShadow() const {
556  Shadow result;
557  if (!MemIsApp(untagged_addr))
558    return result;
559
560  result.addr = GetPrintTagStart(untagged_addr + mismatch_offset);
561  uptr tag_addr = result.addr;
562  uptr short_end = kShortDumpOffset + ARRAY_SIZE(shadow.short_tags);
563  for (uptr i = 0; i < ARRAY_SIZE(result.tags); ++i, ++tag_addr) {
564    if (!MemIsShadow(tag_addr))
565      continue;
566    result.tags[i] = *reinterpret_cast<tag_t *>(tag_addr);
567    if (i < kShortDumpOffset || i >= short_end)
568      continue;
569    uptr granule_addr = ShadowToMem(tag_addr);
570    if (1 <= result.tags[i] && result.tags[i] <= kShadowAlignment &&
571        IsAccessibleMemoryRange(granule_addr, kShadowAlignment)) {
572      result.short_tags[i - kShortDumpOffset] =
573          *reinterpret_cast<tag_t *>(granule_addr + kShadowAlignment - 1);
574    }
575  }
576  return result;
577}
578
579tag_t BaseReport::GetTagCopy(uptr addr) const {
580  CHECK_GE(addr, shadow.addr);
581  uptr idx = addr - shadow.addr;
582  CHECK_LT(idx, ARRAY_SIZE(shadow.tags));
583  return shadow.tags[idx];
584}
585
586tag_t BaseReport::GetShortTagCopy(uptr addr) const {
587  CHECK_GE(addr, shadow.addr + kShortDumpOffset);
588  uptr idx = addr - shadow.addr - kShortDumpOffset;
589  CHECK_LT(idx, ARRAY_SIZE(shadow.short_tags));
590  return shadow.short_tags[idx];
591}
592
593BaseReport::HeapChunk BaseReport::CopyHeapChunk() const {
594  HeapChunk result = {};
595  if (MemIsShadow(untagged_addr))
596    return result;
597  HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
598  result.begin = chunk.Beg();
599  if (result.begin) {
600    result.size = chunk.ActualSize();
601    result.from_small_heap = chunk.FromSmallHeap();
602    result.is_allocated = chunk.IsAllocated();
603    result.stack_id = chunk.GetAllocStackId();
604  }
605  return result;
606}
607
608BaseReport::Allocations BaseReport::CopyAllocations() {
609  if (MemIsShadow(untagged_addr))
610    return {};
611  uptr stack_allocations_count = 0;
612  uptr heap_allocations_count = 0;
613  hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
614    if (stack_allocations_count < ARRAY_SIZE(stack_allocations_storage) &&
615        t->AddrIsInStack(untagged_addr)) {
616      stack_allocations_storage[stack_allocations_count++].CopyFrom(t);
617    }
618
619    if (heap_allocations_count < ARRAY_SIZE(heap_allocations_storage)) {
620      // Scan all threads' ring buffers to find if it's a heap-use-after-free.
621      HeapAllocationRecord har;
622      uptr ring_index, num_matching_addrs, num_matching_addrs_4b;
623      if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har,
624                             &ring_index, &num_matching_addrs,
625                             &num_matching_addrs_4b)) {
626        auto &ha = heap_allocations_storage[heap_allocations_count++];
627        ha.har = har;
628        ha.ring_index = ring_index;
629        ha.num_matching_addrs = num_matching_addrs;
630        ha.num_matching_addrs_4b = num_matching_addrs_4b;
631        ha.free_thread_id = t->unique_id();
632      }
633    }
634  });
635
636  return {{stack_allocations_storage, stack_allocations_count},
637          {heap_allocations_storage, heap_allocations_count}};
638}
639
640BaseReport::OverflowCandidate BaseReport::FindBufferOverflowCandidate() const {
641  OverflowCandidate result = {};
642  if (MemIsShadow(untagged_addr))
643    return result;
644  // Check if this looks like a heap buffer overflow by scanning
645  // the shadow left and right and looking for the first adjacent
646  // object with a different memory tag. If that tag matches ptr_tag,
647  // check the allocator if it has a live chunk there.
648  tag_t *tag_ptr = reinterpret_cast<tag_t *>(MemToShadow(untagged_addr));
649  tag_t *candidate_tag_ptr = nullptr, *left = tag_ptr, *right = tag_ptr;
650  uptr candidate_distance = 0;
651  for (; candidate_distance < 1000; candidate_distance++) {
652    if (MemIsShadow(reinterpret_cast<uptr>(left)) && TagsEqual(ptr_tag, left)) {
653      candidate_tag_ptr = left;
654      break;
655    }
656    --left;
657    if (MemIsShadow(reinterpret_cast<uptr>(right)) &&
658        TagsEqual(ptr_tag, right)) {
659      candidate_tag_ptr = right;
660      break;
661    }
662    ++right;
663  }
664
665  constexpr auto kCloseCandidateDistance = 1;
666  result.is_close = candidate_distance <= kCloseCandidateDistance;
667
668  result.after = candidate_tag_ptr == left;
669  result.untagged_addr = ShadowToMem(reinterpret_cast<uptr>(candidate_tag_ptr));
670  HwasanChunkView chunk = FindHeapChunkByAddress(result.untagged_addr);
671  if (chunk.IsAllocated()) {
672    result.heap.is_allocated = true;
673    result.heap.begin = chunk.Beg();
674    result.heap.end = chunk.End();
675    result.heap.thread_id = chunk.GetAllocThreadId();
676    result.heap.stack_id = chunk.GetAllocStackId();
677  }
678  return result;
679}
680
681void BaseReport::PrintHeapOrGlobalCandidate() const {
682  Decorator d;
683  if (candidate.heap.is_allocated) {
684    uptr offset;
685    const char *whence;
686    if (candidate.heap.begin <= untagged_addr &&
687        untagged_addr < candidate.heap.end) {
688      offset = untagged_addr - candidate.heap.begin;
689      whence = "inside";
690    } else if (candidate.after) {
691      offset = untagged_addr - candidate.heap.end;
692      whence = "after";
693    } else {
694      offset = candidate.heap.begin - untagged_addr;
695      whence = "before";
696    }
697    Printf("%s", d.Error());
698    Printf("\nCause: heap-buffer-overflow\n");
699    Printf("%s", d.Default());
700    Printf("%s", d.Location());
701    Printf("%p is located %zd bytes %s a %zd-byte region [%p,%p)\n",
702           untagged_addr, offset, whence,
703           candidate.heap.end - candidate.heap.begin, candidate.heap.begin,
704           candidate.heap.end);
705    Printf("%s", d.Allocation());
706    Printf("allocated by thread T%u here:\n", candidate.heap.thread_id);
707    Printf("%s", d.Default());
708    GetStackTraceFromId(candidate.heap.stack_id).Print();
709    return;
710  }
711  // Check whether the address points into a loaded library. If so, this is
712  // most likely a global variable.
713  const char *module_name;
714  uptr module_address;
715  Symbolizer *sym = Symbolizer::GetOrInit();
716  if (sym->GetModuleNameAndOffsetForPC(candidate.untagged_addr, &module_name,
717                                       &module_address)) {
718    Printf("%s", d.Error());
719    Printf("\nCause: global-overflow\n");
720    Printf("%s", d.Default());
721    DataInfo info;
722    Printf("%s", d.Location());
723    if (sym->SymbolizeData(candidate.untagged_addr, &info) && info.start) {
724      Printf(
725          "%p is located %zd bytes %s a %zd-byte global variable "
726          "%s [%p,%p) in %s\n",
727          untagged_addr,
728          candidate.after ? untagged_addr - (info.start + info.size)
729                          : info.start - untagged_addr,
730          candidate.after ? "after" : "before", info.size, info.name,
731          info.start, info.start + info.size, module_name);
732    } else {
733      uptr size = GetGlobalSizeFromDescriptor(candidate.untagged_addr);
734      if (size == 0)
735        // We couldn't find the size of the global from the descriptors.
736        Printf(
737            "%p is located %s a global variable in "
738            "\n    #0 0x%x (%s+0x%x)\n",
739            untagged_addr, candidate.after ? "after" : "before",
740            candidate.untagged_addr, module_name, module_address);
741      else
742        Printf(
743            "%p is located %s a %zd-byte global variable in "
744            "\n    #0 0x%x (%s+0x%x)\n",
745            untagged_addr, candidate.after ? "after" : "before", size,
746            candidate.untagged_addr, module_name, module_address);
747    }
748    Printf("%s", d.Default());
749  }
750}
751
752void BaseReport::PrintAddressDescription() const {
753  Decorator d;
754  int num_descriptions_printed = 0;
755
756  if (MemIsShadow(untagged_addr)) {
757    Printf("%s%p is HWAsan shadow memory.\n%s", d.Location(), untagged_addr,
758           d.Default());
759    return;
760  }
761
762  // Print some very basic information about the address, if it's a heap.
763  if (heap.begin) {
764    Printf(
765        "%s[%p,%p) is a %s %s heap chunk; "
766        "size: %zd offset: %zd\n%s",
767        d.Location(), heap.begin, heap.begin + heap.size,
768        heap.from_small_heap ? "small" : "large",
769        heap.is_allocated ? "allocated" : "unallocated", heap.size,
770        untagged_addr - heap.begin, d.Default());
771  }
772
773  auto announce_by_id = [](u32 thread_id) {
774    hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
775      if (thread_id == t->unique_id())
776        t->Announce();
777    });
778  };
779
780  // Check stack first. If the address is on the stack of a live thread, we
781  // know it cannot be a heap / global overflow.
782  for (const auto &sa : allocations.stack) {
783    Printf("%s", d.Error());
784    Printf("\nCause: stack tag-mismatch\n");
785    Printf("%s", d.Location());
786    Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
787           sa.thread_id());
788    Printf("%s", d.Default());
789    announce_by_id(sa.thread_id());
790    PrintStackAllocations(sa.get(), ptr_tag, untagged_addr);
791    num_descriptions_printed++;
792  }
793
794  if (allocations.stack.empty() && candidate.untagged_addr &&
795      candidate.is_close) {
796    PrintHeapOrGlobalCandidate();
797    num_descriptions_printed++;
798  }
799
800  for (const auto &ha : allocations.heap) {
801    const HeapAllocationRecord har = ha.har;
802
803    Printf("%s", d.Error());
804    Printf("\nCause: use-after-free\n");
805    Printf("%s", d.Location());
806    Printf("%p is located %zd bytes inside a %zd-byte region [%p,%p)\n",
807           untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
808           har.requested_size, UntagAddr(har.tagged_addr),
809           UntagAddr(har.tagged_addr) + har.requested_size);
810    Printf("%s", d.Allocation());
811    Printf("freed by thread T%u here:\n", ha.free_thread_id);
812    Printf("%s", d.Default());
813    GetStackTraceFromId(har.free_context_id).Print();
814
815    Printf("%s", d.Allocation());
816    Printf("previously allocated by thread T%u here:\n", har.alloc_thread_id);
817    Printf("%s", d.Default());
818    GetStackTraceFromId(har.alloc_context_id).Print();
819
820    // Print a developer note: the index of this heap object
821    // in the thread's deallocation ring buffer.
822    Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ha.ring_index + 1,
823           flags()->heap_history_size);
824    Printf("hwasan_dev_note_num_matching_addrs: %zd\n", ha.num_matching_addrs);
825    Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n",
826           ha.num_matching_addrs_4b);
827
828    announce_by_id(ha.free_thread_id);
829    // TODO: announce_by_id(har.alloc_thread_id);
830    num_descriptions_printed++;
831  }
832
833  if (candidate.untagged_addr && num_descriptions_printed == 0) {
834    PrintHeapOrGlobalCandidate();
835    num_descriptions_printed++;
836  }
837
838  // Print the remaining threads, as an extra information, 1 line per thread.
839  if (flags()->print_live_threads_info) {
840    Printf("\n");
841    hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
842  }
843
844  if (!num_descriptions_printed)
845    // We exhausted our possibilities. Bail out.
846    Printf("HWAddressSanitizer can not describe address in more detail.\n");
847  if (num_descriptions_printed > 1) {
848    Printf(
849        "There are %d potential causes, printed above in order "
850        "of likeliness.\n",
851        num_descriptions_printed);
852  }
853}
854
855void BaseReport::PrintTags(uptr addr) const {
856  if (shadow.addr) {
857    PrintTagsAroundAddr(
858        addr, [&](uptr addr) { return GetTagCopy(addr); },
859        [&](uptr addr) { return GetShortTagCopy(addr); });
860  }
861}
862
863class InvalidFreeReport : public BaseReport {
864 public:
865  InvalidFreeReport(StackTrace *stack, uptr tagged_addr)
866      : BaseReport(stack, flags()->halt_on_error, tagged_addr, 0) {}
867  ~InvalidFreeReport();
868
869 private:
870};
871
872InvalidFreeReport::~InvalidFreeReport() {
873  Decorator d;
874  Printf("%s", d.Error());
875  uptr pc = GetTopPc(stack);
876  const char *bug_type = "invalid-free";
877  const Thread *thread = GetCurrentThread();
878  if (thread) {
879    Report("ERROR: %s: %s on address %p at pc %p on thread T%zd\n",
880           SanitizerToolName, bug_type, untagged_addr, pc, thread->unique_id());
881  } else {
882    Report("ERROR: %s: %s on address %p at pc %p on unknown thread\n",
883           SanitizerToolName, bug_type, untagged_addr, pc);
884  }
885  Printf("%s", d.Access());
886  if (shadow.addr) {
887    Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag,
888           GetTagCopy(MemToShadow(untagged_addr)));
889  }
890  Printf("%s", d.Default());
891
892  stack->Print();
893
894  PrintAddressDescription();
895  PrintTags(untagged_addr);
896  MaybePrintAndroidHelpUrl();
897  ReportErrorSummary(bug_type, stack);
898}
899
900class TailOverwrittenReport : public BaseReport {
901 public:
902  explicit TailOverwrittenReport(StackTrace *stack, uptr tagged_addr,
903                                 uptr orig_size, const u8 *expected)
904      : BaseReport(stack, flags()->halt_on_error, tagged_addr, 0),
905        orig_size(orig_size),
906        tail_size(kShadowAlignment - (orig_size % kShadowAlignment)) {
907    CHECK_GT(tail_size, 0U);
908    CHECK_LT(tail_size, kShadowAlignment);
909    internal_memcpy(tail_copy,
910                    reinterpret_cast<u8 *>(untagged_addr + orig_size),
911                    tail_size);
912    internal_memcpy(actual_expected, expected, tail_size);
913    // Short granule is stashed in the last byte of the magic string. To avoid
914    // confusion, make the expected magic string contain the short granule tag.
915    if (orig_size % kShadowAlignment != 0)
916      actual_expected[tail_size - 1] = ptr_tag;
917  }
918  ~TailOverwrittenReport();
919
920 private:
921  const uptr orig_size = 0;
922  const uptr tail_size = 0;
923  u8 actual_expected[kShadowAlignment] = {};
924  u8 tail_copy[kShadowAlignment] = {};
925};
926
927TailOverwrittenReport::~TailOverwrittenReport() {
928  Decorator d;
929  Printf("%s", d.Error());
930  const char *bug_type = "allocation-tail-overwritten";
931  Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName,
932         bug_type, untagged_addr, untagged_addr + orig_size, orig_size);
933  Printf("\n%s", d.Default());
934  Printf(
935      "Stack of invalid access unknown. Issue detected at deallocation "
936      "time.\n");
937  Printf("%s", d.Allocation());
938  Printf("deallocated here:\n");
939  Printf("%s", d.Default());
940  stack->Print();
941  if (heap.begin) {
942    Printf("%s", d.Allocation());
943    Printf("allocated here:\n");
944    Printf("%s", d.Default());
945    GetStackTraceFromId(heap.stack_id).Print();
946  }
947
948  InternalScopedString s;
949  u8 *tail = tail_copy;
950  s.AppendF("Tail contains: ");
951  for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.AppendF(".. ");
952  for (uptr i = 0; i < tail_size; i++) s.AppendF("%02x ", tail[i]);
953  s.AppendF("\n");
954  s.AppendF("Expected:      ");
955  for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.AppendF(".. ");
956  for (uptr i = 0; i < tail_size; i++) s.AppendF("%02x ", actual_expected[i]);
957  s.AppendF("\n");
958  s.AppendF("               ");
959  for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.AppendF("   ");
960  for (uptr i = 0; i < tail_size; i++)
961    s.AppendF("%s ", actual_expected[i] != tail[i] ? "^^" : "  ");
962
963  s.AppendF(
964      "\nThis error occurs when a buffer overflow overwrites memory\n"
965      "after a heap object, but within the %zd-byte granule, e.g.\n"
966      "   char *x = new char[20];\n"
967      "   x[25] = 42;\n"
968      "%s does not detect such bugs in uninstrumented code at the time of "
969      "write,"
970      "\nbut can detect them at the time of free/delete.\n"
971      "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
972      kShadowAlignment, SanitizerToolName);
973  Printf("%s", s.data());
974  GetCurrentThread()->Announce();
975  PrintTags(untagged_addr);
976  MaybePrintAndroidHelpUrl();
977  ReportErrorSummary(bug_type, stack);
978}
979
980class TagMismatchReport : public BaseReport {
981 public:
982  explicit TagMismatchReport(StackTrace *stack, uptr tagged_addr,
983                             uptr access_size, bool is_store, bool fatal,
984                             uptr *registers_frame)
985      : BaseReport(stack, fatal, tagged_addr, access_size),
986        is_store(is_store),
987        registers_frame(registers_frame) {}
988  ~TagMismatchReport();
989
990 private:
991  const bool is_store;
992  const uptr *registers_frame;
993};
994
995TagMismatchReport::~TagMismatchReport() {
996  Decorator d;
997  // TODO: when possible, try to print heap-use-after-free, etc.
998  const char *bug_type = "tag-mismatch";
999  uptr pc = GetTopPc(stack);
1000  Printf("%s", d.Error());
1001  Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
1002         untagged_addr, pc);
1003
1004  Thread *t = GetCurrentThread();
1005
1006  tag_t mem_tag = GetTagCopy(MemToShadow(untagged_addr + mismatch_offset));
1007
1008  Printf("%s", d.Access());
1009  if (mem_tag && mem_tag < kShadowAlignment) {
1010    tag_t short_tag =
1011        GetShortTagCopy(MemToShadow(untagged_addr + mismatch_offset));
1012    Printf(
1013        "%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread T%zd\n",
1014        is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
1015        mem_tag, short_tag, t->unique_id());
1016  } else {
1017    Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n",
1018           is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
1019           mem_tag, t->unique_id());
1020  }
1021  if (mismatch_offset)
1022    Printf("Invalid access starting at offset %zu\n", mismatch_offset);
1023  Printf("%s", d.Default());
1024
1025  stack->Print();
1026
1027  PrintAddressDescription();
1028  t->Announce();
1029
1030  PrintTags(untagged_addr + mismatch_offset);
1031
1032  if (registers_frame)
1033    ReportRegisters(registers_frame, pc);
1034
1035  MaybePrintAndroidHelpUrl();
1036  ReportErrorSummary(bug_type, stack);
1037}
1038}  // namespace
1039
1040void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
1041  InvalidFreeReport R(stack, tagged_addr);
1042}
1043
1044void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
1045                           const u8 *expected) {
1046  TailOverwrittenReport R(stack, tagged_addr, orig_size, expected);
1047}
1048
1049void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
1050                       bool is_store, bool fatal, uptr *registers_frame) {
1051  TagMismatchReport R(stack, tagged_addr, access_size, is_store, fatal,
1052                      registers_frame);
1053}
1054
1055// See the frame breakdown defined in __hwasan_tag_mismatch (from
1056// hwasan_tag_mismatch_{aarch64,riscv64}.S).
1057void ReportRegisters(const uptr *frame, uptr pc) {
1058  Printf("\nRegisters where the failure occurred (pc %p):\n", pc);
1059
1060  // We explicitly print a single line (4 registers/line) each iteration to
1061  // reduce the amount of logcat error messages printed. Each Printf() will
1062  // result in a new logcat line, irrespective of whether a newline is present,
1063  // and so we wish to reduce the number of Printf() calls we have to make.
1064#if defined(__aarch64__)
1065  Printf("    x0  %016llx  x1  %016llx  x2  %016llx  x3  %016llx\n",
1066       frame[0], frame[1], frame[2], frame[3]);
1067#elif SANITIZER_RISCV64
1068  Printf("    sp  %016llx  x1  %016llx  x2  %016llx  x3  %016llx\n",
1069         reinterpret_cast<const u8 *>(frame) + 256, frame[1], frame[2],
1070         frame[3]);
1071#endif
1072  Printf("    x4  %016llx  x5  %016llx  x6  %016llx  x7  %016llx\n",
1073       frame[4], frame[5], frame[6], frame[7]);
1074  Printf("    x8  %016llx  x9  %016llx  x10 %016llx  x11 %016llx\n",
1075       frame[8], frame[9], frame[10], frame[11]);
1076  Printf("    x12 %016llx  x13 %016llx  x14 %016llx  x15 %016llx\n",
1077       frame[12], frame[13], frame[14], frame[15]);
1078  Printf("    x16 %016llx  x17 %016llx  x18 %016llx  x19 %016llx\n",
1079       frame[16], frame[17], frame[18], frame[19]);
1080  Printf("    x20 %016llx  x21 %016llx  x22 %016llx  x23 %016llx\n",
1081       frame[20], frame[21], frame[22], frame[23]);
1082  Printf("    x24 %016llx  x25 %016llx  x26 %016llx  x27 %016llx\n",
1083       frame[24], frame[25], frame[26], frame[27]);
1084  // hwasan_check* reduces the stack pointer by 256, then __hwasan_tag_mismatch
1085  // passes it to this function.
1086#if defined(__aarch64__)
1087  Printf("    x28 %016llx  x29 %016llx  x30 %016llx   sp %016llx\n", frame[28],
1088         frame[29], frame[30], reinterpret_cast<const u8 *>(frame) + 256);
1089#elif SANITIZER_RISCV64
1090  Printf("    x28 %016llx  x29 %016llx  x30 %016llx  x31 %016llx\n", frame[28],
1091         frame[29], frame[30], frame[31]);
1092#else
1093#endif
1094}
1095
1096}  // namespace __hwasan
1097
1098void __hwasan_set_error_report_callback(void (*callback)(const char *)) {
1099  __hwasan::ScopedReport::SetErrorReportCallback(callback);
1100}
1101