1//===-- hwasan_allocator.cpp ------------------------ ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of HWAddressSanitizer.
10//
11// HWAddressSanitizer allocator.
12//===----------------------------------------------------------------------===//
13
14#include "sanitizer_common/sanitizer_atomic.h"
15#include "sanitizer_common/sanitizer_errno.h"
16#include "sanitizer_common/sanitizer_stackdepot.h"
17#include "hwasan.h"
18#include "hwasan_allocator.h"
19#include "hwasan_checks.h"
20#include "hwasan_mapping.h"
21#include "hwasan_malloc_bisect.h"
22#include "hwasan_thread.h"
23#include "hwasan_report.h"
24#include "lsan/lsan_common.h"
25
26namespace __hwasan {
27
28static Allocator allocator;
29static AllocatorCache fallback_allocator_cache;
30static SpinMutex fallback_mutex;
31static atomic_uint8_t hwasan_allocator_tagging_enabled;
32
33static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask;
34static constexpr tag_t kFallbackFreeTag = 0xBC;
35
36enum {
37  // Either just allocated by underlying allocator, but AsanChunk is not yet
38  // ready, or almost returned to undelying allocator and AsanChunk is already
39  // meaningless.
40  CHUNK_INVALID = 0,
41  // The chunk is allocated and not yet freed.
42  CHUNK_ALLOCATED = 1,
43};
44
45
46// Initialized in HwasanAllocatorInit, an never changed.
47static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
48static uptr max_malloc_size;
49
50bool HwasanChunkView::IsAllocated() const {
51  return metadata_ && metadata_->IsAllocated();
52}
53
54uptr HwasanChunkView::Beg() const {
55  return block_;
56}
57uptr HwasanChunkView::End() const {
58  return Beg() + UsedSize();
59}
60uptr HwasanChunkView::UsedSize() const {
61  return metadata_->GetRequestedSize();
62}
63u32 HwasanChunkView::GetAllocStackId() const {
64  return metadata_->GetAllocStackId();
65}
66
67u32 HwasanChunkView::GetAllocThreadId() const {
68  return metadata_->GetAllocThreadId();
69}
70
71uptr HwasanChunkView::ActualSize() const {
72  return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
73}
74
75bool HwasanChunkView::FromSmallHeap() const {
76  return allocator.FromPrimary(reinterpret_cast<void *>(block_));
77}
78
79bool HwasanChunkView::AddrIsInside(uptr addr) const {
80  return (addr >= Beg()) && (addr < Beg() + UsedSize());
81}
82
83inline void Metadata::SetAllocated(u32 stack, u64 size) {
84  Thread *t = GetCurrentThread();
85  u64 context = t ? t->unique_id() : kMainTid;
86  context <<= 32;
87  context += stack;
88  requested_size_low = size & ((1ul << 32) - 1);
89  requested_size_high = size >> 32;
90  atomic_store(&alloc_context_id, context, memory_order_relaxed);
91  atomic_store(&chunk_state, CHUNK_ALLOCATED, memory_order_release);
92}
93
94inline void Metadata::SetUnallocated() {
95  atomic_store(&chunk_state, CHUNK_INVALID, memory_order_release);
96  requested_size_low = 0;
97  requested_size_high = 0;
98  atomic_store(&alloc_context_id, 0, memory_order_relaxed);
99}
100
101inline bool Metadata::IsAllocated() const {
102  return atomic_load(&chunk_state, memory_order_relaxed) == CHUNK_ALLOCATED;
103}
104
105inline u64 Metadata::GetRequestedSize() const {
106  return (static_cast<u64>(requested_size_high) << 32) + requested_size_low;
107}
108
109inline u32 Metadata::GetAllocStackId() const {
110  return atomic_load(&alloc_context_id, memory_order_relaxed);
111}
112
113inline u32 Metadata::GetAllocThreadId() const {
114  u64 context = atomic_load(&alloc_context_id, memory_order_relaxed);
115  u32 tid = context >> 32;
116  return tid;
117}
118
119void GetAllocatorStats(AllocatorStatCounters s) {
120  allocator.GetStats(s);
121}
122
123inline void Metadata::SetLsanTag(__lsan::ChunkTag tag) {
124  lsan_tag = tag;
125}
126
127inline __lsan::ChunkTag Metadata::GetLsanTag() const {
128  return static_cast<__lsan::ChunkTag>(lsan_tag);
129}
130
131uptr GetAliasRegionStart() {
132#if defined(HWASAN_ALIASING_MODE)
133  constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
134  uptr AliasRegionStart =
135      __hwasan_shadow_memory_dynamic_address + kAliasRegionOffset;
136
137  CHECK_EQ(AliasRegionStart >> kTaggableRegionCheckShift,
138           __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
139  CHECK_EQ(
140      (AliasRegionStart + kAliasRegionOffset - 1) >> kTaggableRegionCheckShift,
141      __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
142  return AliasRegionStart;
143#else
144  return 0;
145#endif
146}
147
148void HwasanAllocatorInit() {
149  atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
150                       !flags()->disable_allocator_tagging);
151  SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
152  allocator.InitLinkerInitialized(
153      common_flags()->allocator_release_to_os_interval_ms,
154      GetAliasRegionStart());
155  for (uptr i = 0; i < sizeof(tail_magic); i++)
156    tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
157  if (common_flags()->max_allocation_size_mb) {
158    max_malloc_size = common_flags()->max_allocation_size_mb << 20;
159    max_malloc_size = Min(max_malloc_size, kMaxAllowedMallocSize);
160  } else {
161    max_malloc_size = kMaxAllowedMallocSize;
162  }
163}
164
165void HwasanAllocatorLock() { allocator.ForceLock(); }
166
167void HwasanAllocatorUnlock() { allocator.ForceUnlock(); }
168
169void AllocatorThreadStart(AllocatorCache *cache) { allocator.InitCache(cache); }
170
171void AllocatorThreadFinish(AllocatorCache *cache) {
172  allocator.SwallowCache(cache);
173  allocator.DestroyCache(cache);
174}
175
176static uptr TaggedSize(uptr size) {
177  if (!size) size = 1;
178  uptr new_size = RoundUpTo(size, kShadowAlignment);
179  CHECK_GE(new_size, size);
180  return new_size;
181}
182
183static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
184                            bool zeroise) {
185  // Keep this consistent with LSAN and ASAN behavior.
186  if (UNLIKELY(orig_size == 0))
187    orig_size = 1;
188  if (UNLIKELY(orig_size > max_malloc_size)) {
189    if (AllocatorMayReturnNull()) {
190      Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
191             orig_size);
192      return nullptr;
193    }
194    ReportAllocationSizeTooBig(orig_size, max_malloc_size, stack);
195  }
196  if (UNLIKELY(IsRssLimitExceeded())) {
197    if (AllocatorMayReturnNull())
198      return nullptr;
199    ReportRssLimitExceeded(stack);
200  }
201
202  alignment = Max(alignment, kShadowAlignment);
203  uptr size = TaggedSize(orig_size);
204  Thread *t = GetCurrentThread();
205  void *allocated;
206  if (t) {
207    allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
208  } else {
209    SpinMutexLock l(&fallback_mutex);
210    AllocatorCache *cache = &fallback_allocator_cache;
211    allocated = allocator.Allocate(cache, size, alignment);
212  }
213  if (UNLIKELY(!allocated)) {
214    SetAllocatorOutOfMemory();
215    if (AllocatorMayReturnNull())
216      return nullptr;
217    ReportOutOfMemory(size, stack);
218  }
219  if (zeroise) {
220    // The secondary allocator mmaps memory, which should be zero-inited so we
221    // don't need to explicitly clear it.
222    if (allocator.FromPrimary(allocated))
223      internal_memset(allocated, 0, size);
224  } else if (flags()->max_malloc_fill_size > 0) {
225    uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
226    internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
227  }
228  if (size != orig_size) {
229    u8 *tail = reinterpret_cast<u8 *>(allocated) + orig_size;
230    uptr tail_length = size - orig_size;
231    internal_memcpy(tail, tail_magic, tail_length - 1);
232    // Short granule is excluded from magic tail, so we explicitly untag.
233    tail[tail_length - 1] = 0;
234  }
235
236  void *user_ptr = allocated;
237  if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
238      atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
239      flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
240    tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
241    uptr tag_size = orig_size ? orig_size : 1;
242    uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
243    user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
244    if (full_granule_size != tag_size) {
245      u8 *short_granule = reinterpret_cast<u8 *>(allocated) + full_granule_size;
246      TagMemoryAligned((uptr)short_granule, kShadowAlignment,
247                       tag_size % kShadowAlignment);
248      short_granule[kShadowAlignment - 1] = tag;
249    }
250  } else {
251    // Tagging can not be completely skipped. If it's disabled, we need to tag
252    // with zeros.
253    user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
254  }
255
256  Metadata *meta =
257      reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
258#if CAN_SANITIZE_LEAKS
259  meta->SetLsanTag(__lsan::DisabledInThisThread() ? __lsan::kIgnored
260                                                  : __lsan::kDirectlyLeaked);
261#endif
262  meta->SetAllocated(StackDepotPut(*stack), orig_size);
263  RunMallocHooks(user_ptr, orig_size);
264  return user_ptr;
265}
266
267static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
268  CHECK(tagged_ptr);
269  uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
270  if (!InTaggableRegion(tagged_uptr))
271    return true;
272  tag_t mem_tag = *reinterpret_cast<tag_t *>(
273      MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
274  return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
275}
276
277static bool CheckInvalidFree(StackTrace *stack, void *untagged_ptr,
278                             void *tagged_ptr) {
279  // This function can return true if halt_on_error is false.
280  if (!MemIsApp(reinterpret_cast<uptr>(untagged_ptr)) ||
281      !PointerAndMemoryTagsMatch(tagged_ptr)) {
282    ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
283    return true;
284  }
285  return false;
286}
287
288static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
289  CHECK(tagged_ptr);
290  void *untagged_ptr = UntagPtr(tagged_ptr);
291
292  if (CheckInvalidFree(stack, untagged_ptr, tagged_ptr))
293    return;
294
295  void *aligned_ptr = reinterpret_cast<void *>(
296      RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
297  tag_t pointer_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));
298  Metadata *meta =
299      reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
300  if (!meta) {
301    ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
302    return;
303  }
304
305  RunFreeHooks(tagged_ptr);
306
307  uptr orig_size = meta->GetRequestedSize();
308  u32 free_context_id = StackDepotPut(*stack);
309  u32 alloc_context_id = meta->GetAllocStackId();
310  u32 alloc_thread_id = meta->GetAllocThreadId();
311
312  bool in_taggable_region =
313      InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr));
314
315  // Check tail magic.
316  uptr tagged_size = TaggedSize(orig_size);
317  if (flags()->free_checks_tail_magic && orig_size &&
318      tagged_size != orig_size) {
319    uptr tail_size = tagged_size - orig_size - 1;
320    CHECK_LT(tail_size, kShadowAlignment);
321    void *tail_beg = reinterpret_cast<void *>(
322        reinterpret_cast<uptr>(aligned_ptr) + orig_size);
323    tag_t short_granule_memtag = *(reinterpret_cast<tag_t *>(
324        reinterpret_cast<uptr>(tail_beg) + tail_size));
325    if (tail_size &&
326        (internal_memcmp(tail_beg, tail_magic, tail_size) ||
327         (in_taggable_region && pointer_tag != short_granule_memtag)))
328      ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
329                            orig_size, tail_magic);
330  }
331
332  // TODO(kstoimenov): consider meta->SetUnallocated(free_context_id).
333  meta->SetUnallocated();
334  // This memory will not be reused by anyone else, so we are free to keep it
335  // poisoned.
336  Thread *t = GetCurrentThread();
337  if (flags()->max_free_fill_size > 0) {
338    uptr fill_size =
339        Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
340    internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
341  }
342  if (in_taggable_region && flags()->tag_in_free && malloc_bisect(stack, 0) &&
343      atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
344      allocator.FromPrimary(untagged_ptr) /* Secondary 0-tag and unmap.*/) {
345    // Always store full 8-bit tags on free to maximize UAF detection.
346    tag_t tag;
347    if (t) {
348      // Make sure we are not using a short granule tag as a poison tag. This
349      // would make us attempt to read the memory on a UaF.
350      // The tag can be zero if tagging is disabled on this thread.
351      do {
352        tag = t->GenerateRandomTag(/*num_bits=*/8);
353      } while (
354          UNLIKELY((tag < kShadowAlignment || tag == pointer_tag) && tag != 0));
355    } else {
356      static_assert(kFallbackFreeTag >= kShadowAlignment,
357                    "fallback tag must not be a short granule tag.");
358      tag = kFallbackFreeTag;
359    }
360    TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
361                     tag);
362  }
363  if (t) {
364    allocator.Deallocate(t->allocator_cache(), aligned_ptr);
365    if (auto *ha = t->heap_allocations())
366      ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_thread_id,
367                alloc_context_id, free_context_id,
368                static_cast<u32>(orig_size)});
369  } else {
370    SpinMutexLock l(&fallback_mutex);
371    AllocatorCache *cache = &fallback_allocator_cache;
372    allocator.Deallocate(cache, aligned_ptr);
373  }
374}
375
376static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
377                              uptr new_size, uptr alignment) {
378  void *untagged_ptr_old = UntagPtr(tagged_ptr_old);
379  if (CheckInvalidFree(stack, untagged_ptr_old, tagged_ptr_old))
380    return nullptr;
381  void *tagged_ptr_new =
382      HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
383  if (tagged_ptr_old && tagged_ptr_new) {
384    Metadata *meta =
385        reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
386    void *untagged_ptr_new = UntagPtr(tagged_ptr_new);
387    internal_memcpy(untagged_ptr_new, untagged_ptr_old,
388                    Min(new_size, static_cast<uptr>(meta->GetRequestedSize())));
389    HwasanDeallocate(stack, tagged_ptr_old);
390  }
391  return tagged_ptr_new;
392}
393
394static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
395  if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
396    if (AllocatorMayReturnNull())
397      return nullptr;
398    ReportCallocOverflow(nmemb, size, stack);
399  }
400  return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
401}
402
403HwasanChunkView FindHeapChunkByAddress(uptr address) {
404  if (!allocator.PointerIsMine(reinterpret_cast<void *>(address)))
405    return HwasanChunkView();
406  void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
407  if (!block)
408    return HwasanChunkView();
409  Metadata *metadata =
410      reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
411  return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
412}
413
414static const void *AllocationBegin(const void *p) {
415  const void *untagged_ptr = UntagPtr(p);
416  if (!untagged_ptr)
417    return nullptr;
418
419  const void *beg = allocator.GetBlockBegin(untagged_ptr);
420  if (!beg)
421    return nullptr;
422
423  Metadata *b = (Metadata *)allocator.GetMetaData(beg);
424  if (b->GetRequestedSize() == 0)
425    return nullptr;
426
427  tag_t tag = GetTagFromPointer((uptr)p);
428  return (const void *)AddTagToPointer((uptr)beg, tag);
429}
430
431static uptr AllocationSize(const void *p) {
432  const void *untagged_ptr = UntagPtr(p);
433  if (!untagged_ptr) return 0;
434  const void *beg = allocator.GetBlockBegin(untagged_ptr);
435  if (!beg)
436    return 0;
437  Metadata *b = (Metadata *)allocator.GetMetaData(beg);
438  return b->GetRequestedSize();
439}
440
441static uptr AllocationSizeFast(const void *p) {
442  const void *untagged_ptr = UntagPtr(p);
443  void *aligned_ptr = reinterpret_cast<void *>(
444      RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
445  Metadata *meta =
446      reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
447  return meta->GetRequestedSize();
448}
449
450void *hwasan_malloc(uptr size, StackTrace *stack) {
451  return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
452}
453
454void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
455  return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
456}
457
458void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
459  if (!ptr)
460    return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
461  if (size == 0) {
462    HwasanDeallocate(stack, ptr);
463    return nullptr;
464  }
465  return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
466}
467
468void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
469  if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
470    errno = errno_ENOMEM;
471    if (AllocatorMayReturnNull())
472      return nullptr;
473    ReportReallocArrayOverflow(nmemb, size, stack);
474  }
475  return hwasan_realloc(ptr, nmemb * size, stack);
476}
477
478void *hwasan_valloc(uptr size, StackTrace *stack) {
479  return SetErrnoOnNull(
480      HwasanAllocate(stack, size, GetPageSizeCached(), false));
481}
482
483void *hwasan_pvalloc(uptr size, StackTrace *stack) {
484  uptr PageSize = GetPageSizeCached();
485  if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
486    errno = errno_ENOMEM;
487    if (AllocatorMayReturnNull())
488      return nullptr;
489    ReportPvallocOverflow(size, stack);
490  }
491  // pvalloc(0) should allocate one page.
492  size = size ? RoundUpTo(size, PageSize) : PageSize;
493  return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
494}
495
496void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
497  if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
498    errno = errno_EINVAL;
499    if (AllocatorMayReturnNull())
500      return nullptr;
501    ReportInvalidAlignedAllocAlignment(size, alignment, stack);
502  }
503  return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
504}
505
506void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
507  if (UNLIKELY(!IsPowerOfTwo(alignment))) {
508    errno = errno_EINVAL;
509    if (AllocatorMayReturnNull())
510      return nullptr;
511    ReportInvalidAllocationAlignment(alignment, stack);
512  }
513  return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
514}
515
516int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
517                        StackTrace *stack) {
518  if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
519    if (AllocatorMayReturnNull())
520      return errno_EINVAL;
521    ReportInvalidPosixMemalignAlignment(alignment, stack);
522  }
523  void *ptr = HwasanAllocate(stack, size, alignment, false);
524  if (UNLIKELY(!ptr))
525    // OOM error is already taken care of by HwasanAllocate.
526    return errno_ENOMEM;
527  CHECK(IsAligned((uptr)ptr, alignment));
528  *memptr = ptr;
529  return 0;
530}
531
532void hwasan_free(void *ptr, StackTrace *stack) {
533  return HwasanDeallocate(stack, ptr);
534}
535
536}  // namespace __hwasan
537
538// --- Implementation of LSan-specific functions --- {{{1
539namespace __lsan {
540
541void LockAllocator() {
542  __hwasan::HwasanAllocatorLock();
543}
544
545void UnlockAllocator() {
546  __hwasan::HwasanAllocatorUnlock();
547}
548
549void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
550  *begin = (uptr)&__hwasan::allocator;
551  *end = *begin + sizeof(__hwasan::allocator);
552}
553
554uptr PointsIntoChunk(void *p) {
555  p = UntagPtr(p);
556  uptr addr = reinterpret_cast<uptr>(p);
557  uptr chunk =
558      reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBeginFastLocked(p));
559  if (!chunk)
560    return 0;
561  __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
562      __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
563  if (!metadata || !metadata->IsAllocated())
564    return 0;
565  if (addr < chunk + metadata->GetRequestedSize())
566    return chunk;
567  if (IsSpecialCaseOfOperatorNew0(chunk, metadata->GetRequestedSize(), addr))
568    return chunk;
569  return 0;
570}
571
572uptr GetUserBegin(uptr chunk) {
573  CHECK_EQ(UntagAddr(chunk), chunk);
574  void *block = __hwasan::allocator.GetBlockBeginFastLocked(
575      reinterpret_cast<void *>(chunk));
576  if (!block)
577    return 0;
578  __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
579      __hwasan::allocator.GetMetaData(block));
580  if (!metadata || !metadata->IsAllocated())
581    return 0;
582
583  return reinterpret_cast<uptr>(block);
584}
585
586uptr GetUserAddr(uptr chunk) {
587  if (!InTaggableRegion(chunk))
588    return chunk;
589  tag_t mem_tag = *(tag_t *)__hwasan::MemToShadow(chunk);
590  return AddTagToPointer(chunk, mem_tag);
591}
592
593LsanMetadata::LsanMetadata(uptr chunk) {
594  CHECK_EQ(UntagAddr(chunk), chunk);
595  metadata_ =
596      chunk ? __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk))
597            : nullptr;
598}
599
600bool LsanMetadata::allocated() const {
601  if (!metadata_)
602    return false;
603  __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
604  return m->IsAllocated();
605}
606
607ChunkTag LsanMetadata::tag() const {
608  __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
609  return m->GetLsanTag();
610}
611
612void LsanMetadata::set_tag(ChunkTag value) {
613  __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
614  m->SetLsanTag(value);
615}
616
617uptr LsanMetadata::requested_size() const {
618  __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
619  return m->GetRequestedSize();
620}
621
622u32 LsanMetadata::stack_trace_id() const {
623  __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
624  return m->GetAllocStackId();
625}
626
627void ForEachChunk(ForEachChunkCallback callback, void *arg) {
628  __hwasan::allocator.ForEachChunk(callback, arg);
629}
630
631IgnoreObjectResult IgnoreObject(const void *p) {
632  p = UntagPtr(p);
633  uptr addr = reinterpret_cast<uptr>(p);
634  uptr chunk = reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBegin(p));
635  if (!chunk)
636    return kIgnoreObjectInvalid;
637  __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
638      __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
639  if (!metadata || !metadata->IsAllocated())
640    return kIgnoreObjectInvalid;
641  if (addr >= chunk + metadata->GetRequestedSize())
642    return kIgnoreObjectInvalid;
643  if (metadata->GetLsanTag() == kIgnored)
644    return kIgnoreObjectAlreadyIgnored;
645
646  metadata->SetLsanTag(kIgnored);
647  return kIgnoreObjectSuccess;
648}
649
650}  // namespace __lsan
651
652using namespace __hwasan;
653
654void __hwasan_enable_allocator_tagging() {
655  atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
656}
657
658void __hwasan_disable_allocator_tagging() {
659  atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
660}
661
662uptr __sanitizer_get_current_allocated_bytes() {
663  uptr stats[AllocatorStatCount];
664  allocator.GetStats(stats);
665  return stats[AllocatorStatAllocated];
666}
667
668uptr __sanitizer_get_heap_size() {
669  uptr stats[AllocatorStatCount];
670  allocator.GetStats(stats);
671  return stats[AllocatorStatMapped];
672}
673
674uptr __sanitizer_get_free_bytes() { return 1; }
675
676uptr __sanitizer_get_unmapped_bytes() { return 1; }
677
678uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
679
680int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
681
682const void *__sanitizer_get_allocated_begin(const void *p) {
683  return AllocationBegin(p);
684}
685
686uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
687
688uptr __sanitizer_get_allocated_size_fast(const void *p) {
689  DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
690  uptr ret = AllocationSizeFast(p);
691  DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
692  return ret;
693}
694
695void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
696