lsan_allocator.cpp revision 360784
1//=-- lsan_allocator.cpp --------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of LeakSanitizer.
10// See lsan_allocator.h for details.
11//
12//===----------------------------------------------------------------------===//
13
14#include "lsan_allocator.h"
15
16#include "sanitizer_common/sanitizer_allocator.h"
17#include "sanitizer_common/sanitizer_allocator_checks.h"
18#include "sanitizer_common/sanitizer_allocator_interface.h"
19#include "sanitizer_common/sanitizer_allocator_report.h"
20#include "sanitizer_common/sanitizer_errno.h"
21#include "sanitizer_common/sanitizer_internal_defs.h"
22#include "sanitizer_common/sanitizer_stackdepot.h"
23#include "sanitizer_common/sanitizer_stacktrace.h"
24#include "lsan_common.h"
25
26extern "C" void *memset(void *ptr, int value, uptr num);
27
28namespace __lsan {
29#if defined(__i386__) || defined(__arm__)
30static const uptr kMaxAllowedMallocSize = 1UL << 30;
31#elif defined(__mips64) || defined(__aarch64__)
32static const uptr kMaxAllowedMallocSize = 4UL << 30;
33#else
34static const uptr kMaxAllowedMallocSize = 8UL << 30;
35#endif
36
37static Allocator allocator;
38
39static uptr max_malloc_size;
40
41void InitializeAllocator() {
42  SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
43  allocator.InitLinkerInitialized(
44      common_flags()->allocator_release_to_os_interval_ms);
45  if (common_flags()->max_allocation_size_mb)
46    max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
47                          kMaxAllowedMallocSize);
48  else
49    max_malloc_size = kMaxAllowedMallocSize;
50}
51
52void AllocatorThreadFinish() {
53  allocator.SwallowCache(GetAllocatorCache());
54}
55
56static ChunkMetadata *Metadata(const void *p) {
57  return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
58}
59
60static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
61  if (!p) return;
62  ChunkMetadata *m = Metadata(p);
63  CHECK(m);
64  m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
65  m->stack_trace_id = StackDepotPut(stack);
66  m->requested_size = size;
67  atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
68}
69
70static void RegisterDeallocation(void *p) {
71  if (!p) return;
72  ChunkMetadata *m = Metadata(p);
73  CHECK(m);
74  atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
75}
76
77static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) {
78  if (AllocatorMayReturnNull()) {
79    Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size);
80    return nullptr;
81  }
82  ReportAllocationSizeTooBig(size, max_malloc_size, &stack);
83}
84
85void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
86               bool cleared) {
87  if (size == 0)
88    size = 1;
89  if (size > max_malloc_size)
90    return ReportAllocationSizeTooBig(size, stack);
91  void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
92  if (UNLIKELY(!p)) {
93    SetAllocatorOutOfMemory();
94    if (AllocatorMayReturnNull())
95      return nullptr;
96    ReportOutOfMemory(size, &stack);
97  }
98  // Do not rely on the allocator to clear the memory (it's slow).
99  if (cleared && allocator.FromPrimary(p))
100    memset(p, 0, size);
101  RegisterAllocation(stack, p, size);
102  if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
103  RunMallocHooks(p, size);
104  return p;
105}
106
107static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
108  if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
109    if (AllocatorMayReturnNull())
110      return nullptr;
111    ReportCallocOverflow(nmemb, size, &stack);
112  }
113  size *= nmemb;
114  return Allocate(stack, size, 1, true);
115}
116
117void Deallocate(void *p) {
118  if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
119  RunFreeHooks(p);
120  RegisterDeallocation(p);
121  allocator.Deallocate(GetAllocatorCache(), p);
122}
123
124void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
125                 uptr alignment) {
126  RegisterDeallocation(p);
127  if (new_size > max_malloc_size) {
128    allocator.Deallocate(GetAllocatorCache(), p);
129    return ReportAllocationSizeTooBig(new_size, stack);
130  }
131  p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
132  RegisterAllocation(stack, p, new_size);
133  return p;
134}
135
136void GetAllocatorCacheRange(uptr *begin, uptr *end) {
137  *begin = (uptr)GetAllocatorCache();
138  *end = *begin + sizeof(AllocatorCache);
139}
140
141uptr GetMallocUsableSize(const void *p) {
142  ChunkMetadata *m = Metadata(p);
143  if (!m) return 0;
144  return m->requested_size;
145}
146
147int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
148                        const StackTrace &stack) {
149  if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
150    if (AllocatorMayReturnNull())
151      return errno_EINVAL;
152    ReportInvalidPosixMemalignAlignment(alignment, &stack);
153  }
154  void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
155  if (UNLIKELY(!ptr))
156    // OOM error is already taken care of by Allocate.
157    return errno_ENOMEM;
158  CHECK(IsAligned((uptr)ptr, alignment));
159  *memptr = ptr;
160  return 0;
161}
162
163void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) {
164  if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
165    errno = errno_EINVAL;
166    if (AllocatorMayReturnNull())
167      return nullptr;
168    ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
169  }
170  return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
171}
172
173void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
174  if (UNLIKELY(!IsPowerOfTwo(alignment))) {
175    errno = errno_EINVAL;
176    if (AllocatorMayReturnNull())
177      return nullptr;
178    ReportInvalidAllocationAlignment(alignment, &stack);
179  }
180  return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
181}
182
183void *lsan_malloc(uptr size, const StackTrace &stack) {
184  return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));
185}
186
187void lsan_free(void *p) {
188  Deallocate(p);
189}
190
191void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
192  return SetErrnoOnNull(Reallocate(stack, p, size, 1));
193}
194
195void *lsan_reallocarray(void *ptr, uptr nmemb, uptr size,
196                        const StackTrace &stack) {
197  if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
198    errno = errno_ENOMEM;
199    if (AllocatorMayReturnNull())
200      return nullptr;
201    ReportReallocArrayOverflow(nmemb, size, &stack);
202  }
203  return lsan_realloc(ptr, nmemb * size, stack);
204}
205
206void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
207  return SetErrnoOnNull(Calloc(nmemb, size, stack));
208}
209
210void *lsan_valloc(uptr size, const StackTrace &stack) {
211  return SetErrnoOnNull(
212      Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
213}
214
215void *lsan_pvalloc(uptr size, const StackTrace &stack) {
216  uptr PageSize = GetPageSizeCached();
217  if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
218    errno = errno_ENOMEM;
219    if (AllocatorMayReturnNull())
220      return nullptr;
221    ReportPvallocOverflow(size, &stack);
222  }
223  // pvalloc(0) should allocate one page.
224  size = size ? RoundUpTo(size, PageSize) : PageSize;
225  return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory));
226}
227
228uptr lsan_mz_size(const void *p) {
229  return GetMallocUsableSize(p);
230}
231
232///// Interface to the common LSan module. /////
233
234void LockAllocator() {
235  allocator.ForceLock();
236}
237
238void UnlockAllocator() {
239  allocator.ForceUnlock();
240}
241
242void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
243  *begin = (uptr)&allocator;
244  *end = *begin + sizeof(allocator);
245}
246
247uptr PointsIntoChunk(void* p) {
248  uptr addr = reinterpret_cast<uptr>(p);
249  uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
250  if (!chunk) return 0;
251  // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
252  // valid, but we don't want that.
253  if (addr < chunk) return 0;
254  ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
255  CHECK(m);
256  if (!m->allocated)
257    return 0;
258  if (addr < chunk + m->requested_size)
259    return chunk;
260  if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
261    return chunk;
262  return 0;
263}
264
265uptr GetUserBegin(uptr chunk) {
266  return chunk;
267}
268
269LsanMetadata::LsanMetadata(uptr chunk) {
270  metadata_ = Metadata(reinterpret_cast<void *>(chunk));
271  CHECK(metadata_);
272}
273
274bool LsanMetadata::allocated() const {
275  return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
276}
277
278ChunkTag LsanMetadata::tag() const {
279  return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
280}
281
282void LsanMetadata::set_tag(ChunkTag value) {
283  reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
284}
285
286uptr LsanMetadata::requested_size() const {
287  return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
288}
289
290u32 LsanMetadata::stack_trace_id() const {
291  return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
292}
293
294void ForEachChunk(ForEachChunkCallback callback, void *arg) {
295  allocator.ForEachChunk(callback, arg);
296}
297
298IgnoreObjectResult IgnoreObjectLocked(const void *p) {
299  void *chunk = allocator.GetBlockBegin(p);
300  if (!chunk || p < chunk) return kIgnoreObjectInvalid;
301  ChunkMetadata *m = Metadata(chunk);
302  CHECK(m);
303  if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
304    if (m->tag == kIgnored)
305      return kIgnoreObjectAlreadyIgnored;
306    m->tag = kIgnored;
307    return kIgnoreObjectSuccess;
308  } else {
309    return kIgnoreObjectInvalid;
310  }
311}
312} // namespace __lsan
313
314using namespace __lsan;
315
316extern "C" {
317SANITIZER_INTERFACE_ATTRIBUTE
318uptr __sanitizer_get_current_allocated_bytes() {
319  uptr stats[AllocatorStatCount];
320  allocator.GetStats(stats);
321  return stats[AllocatorStatAllocated];
322}
323
324SANITIZER_INTERFACE_ATTRIBUTE
325uptr __sanitizer_get_heap_size() {
326  uptr stats[AllocatorStatCount];
327  allocator.GetStats(stats);
328  return stats[AllocatorStatMapped];
329}
330
331SANITIZER_INTERFACE_ATTRIBUTE
332uptr __sanitizer_get_free_bytes() { return 0; }
333
334SANITIZER_INTERFACE_ATTRIBUTE
335uptr __sanitizer_get_unmapped_bytes() { return 0; }
336
337SANITIZER_INTERFACE_ATTRIBUTE
338uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
339
340SANITIZER_INTERFACE_ATTRIBUTE
341int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
342
343SANITIZER_INTERFACE_ATTRIBUTE
344uptr __sanitizer_get_allocated_size(const void *p) {
345  return GetMallocUsableSize(p);
346}
347
348#if !SANITIZER_SUPPORTS_WEAK_HOOKS
349// Provide default (no-op) implementation of malloc hooks.
350SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
351void __sanitizer_malloc_hook(void *ptr, uptr size) {
352  (void)ptr;
353  (void)size;
354}
355SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
356void __sanitizer_free_hook(void *ptr) {
357  (void)ptr;
358}
359#endif
360} // extern "C"
361