1//===-- sanitizer_stoptheworld_linux_libcdep.cpp --------------------------===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// See sanitizer_stoptheworld.h for details. 10// This implementation was inspired by Markus Gutschke's linuxthreads.cc. 11// 12//===----------------------------------------------------------------------===// 13 14#include "sanitizer_platform.h" 15 16#if SANITIZER_LINUX && \ 17 (defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \ 18 defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \ 19 defined(__arm__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64) 20 21#include "sanitizer_stoptheworld.h" 22 23#include "sanitizer_platform_limits_posix.h" 24#include "sanitizer_atomic.h" 25 26#include <errno.h> 27#include <sched.h> // for CLONE_* definitions 28#include <stddef.h> 29#include <sys/prctl.h> // for PR_* definitions 30#include <sys/ptrace.h> // for PTRACE_* definitions 31#include <sys/types.h> // for pid_t 32#include <sys/uio.h> // for iovec 33#include <elf.h> // for NT_PRSTATUS 34#if (defined(__aarch64__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64) && \ 35 !SANITIZER_ANDROID 36// GLIBC 2.20+ sys/user does not include asm/ptrace.h 37# include <asm/ptrace.h> 38#endif 39#include <sys/user.h> // for user_regs_struct 40#if SANITIZER_ANDROID && SANITIZER_MIPS 41# include <asm/reg.h> // for mips SP register in sys/user.h 42#endif 43#include <sys/wait.h> // for signal-related stuff 44 45#ifdef sa_handler 46# undef sa_handler 47#endif 48 49#ifdef sa_sigaction 50# undef sa_sigaction 51#endif 52 53#include "sanitizer_common.h" 54#include "sanitizer_flags.h" 55#include "sanitizer_libc.h" 56#include "sanitizer_linux.h" 57#include "sanitizer_mutex.h" 58#include "sanitizer_placement_new.h" 59 60// Sufficiently old kernel headers don't provide this value, but we can still 61// call prctl with it. If the runtime kernel is new enough, the prctl call will 62// have the desired effect; if the kernel is too old, the call will error and we 63// can ignore said error. 64#ifndef PR_SET_PTRACER 65#define PR_SET_PTRACER 0x59616d61 66#endif 67 68// This module works by spawning a Linux task which then attaches to every 69// thread in the caller process with ptrace. This suspends the threads, and 70// PTRACE_GETREGS can then be used to obtain their register state. The callback 71// supplied to StopTheWorld() is run in the tracer task while the threads are 72// suspended. 73// The tracer task must be placed in a different thread group for ptrace to 74// work, so it cannot be spawned as a pthread. Instead, we use the low-level 75// clone() interface (we want to share the address space with the caller 76// process, so we prefer clone() over fork()). 77// 78// We don't use any libc functions, relying instead on direct syscalls. There 79// are two reasons for this: 80// 1. calling a library function while threads are suspended could cause a 81// deadlock, if one of the treads happens to be holding a libc lock; 82// 2. it's generally not safe to call libc functions from the tracer task, 83// because clone() does not set up a thread-local storage for it. Any 84// thread-local variables used by libc will be shared between the tracer task 85// and the thread which spawned it. 86 87namespace __sanitizer { 88 89class SuspendedThreadsListLinux final : public SuspendedThreadsList { 90 public: 91 SuspendedThreadsListLinux() { thread_ids_.reserve(1024); } 92 93 tid_t GetThreadID(uptr index) const override; 94 uptr ThreadCount() const override; 95 bool ContainsTid(tid_t thread_id) const; 96 void Append(tid_t tid); 97 98 PtraceRegistersStatus GetRegistersAndSP(uptr index, 99 InternalMmapVector<uptr> *buffer, 100 uptr *sp) const override; 101 102 private: 103 InternalMmapVector<tid_t> thread_ids_; 104}; 105 106// Structure for passing arguments into the tracer thread. 107struct TracerThreadArgument { 108 StopTheWorldCallback callback; 109 void *callback_argument; 110 // The tracer thread waits on this mutex while the parent finishes its 111 // preparations. 112 Mutex mutex; 113 // Tracer thread signals its completion by setting done. 114 atomic_uintptr_t done; 115 uptr parent_pid; 116}; 117 118// This class handles thread suspending/unsuspending in the tracer thread. 119class ThreadSuspender { 120 public: 121 explicit ThreadSuspender(pid_t pid, TracerThreadArgument *arg) 122 : arg(arg) 123 , pid_(pid) { 124 CHECK_GE(pid, 0); 125 } 126 bool SuspendAllThreads(); 127 void ResumeAllThreads(); 128 void KillAllThreads(); 129 SuspendedThreadsListLinux &suspended_threads_list() { 130 return suspended_threads_list_; 131 } 132 TracerThreadArgument *arg; 133 private: 134 SuspendedThreadsListLinux suspended_threads_list_; 135 pid_t pid_; 136 bool SuspendThread(tid_t thread_id); 137}; 138 139bool ThreadSuspender::SuspendThread(tid_t tid) { 140 // Are we already attached to this thread? 141 // Currently this check takes linear time, however the number of threads is 142 // usually small. 143 if (suspended_threads_list_.ContainsTid(tid)) return false; 144 int pterrno; 145 if (internal_iserror(internal_ptrace(PTRACE_ATTACH, tid, nullptr, nullptr), 146 &pterrno)) { 147 // Either the thread is dead, or something prevented us from attaching. 148 // Log this event and move on. 149 VReport(1, "Could not attach to thread %zu (errno %d).\n", (uptr)tid, 150 pterrno); 151 return false; 152 } else { 153 VReport(2, "Attached to thread %zu.\n", (uptr)tid); 154 // The thread is not guaranteed to stop before ptrace returns, so we must 155 // wait on it. Note: if the thread receives a signal concurrently, 156 // we can get notification about the signal before notification about stop. 157 // In such case we need to forward the signal to the thread, otherwise 158 // the signal will be missed (as we do PTRACE_DETACH with arg=0) and 159 // any logic relying on signals will break. After forwarding we need to 160 // continue to wait for stopping, because the thread is not stopped yet. 161 // We do ignore delivery of SIGSTOP, because we want to make stop-the-world 162 // as invisible as possible. 163 for (;;) { 164 int status; 165 uptr waitpid_status; 166 HANDLE_EINTR(waitpid_status, internal_waitpid(tid, &status, __WALL)); 167 int wperrno; 168 if (internal_iserror(waitpid_status, &wperrno)) { 169 // Got a ECHILD error. I don't think this situation is possible, but it 170 // doesn't hurt to report it. 171 VReport(1, "Waiting on thread %zu failed, detaching (errno %d).\n", 172 (uptr)tid, wperrno); 173 internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr); 174 return false; 175 } 176 if (WIFSTOPPED(status) && WSTOPSIG(status) != SIGSTOP) { 177 internal_ptrace(PTRACE_CONT, tid, nullptr, 178 (void*)(uptr)WSTOPSIG(status)); 179 continue; 180 } 181 break; 182 } 183 suspended_threads_list_.Append(tid); 184 return true; 185 } 186} 187 188void ThreadSuspender::ResumeAllThreads() { 189 for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++) { 190 pid_t tid = suspended_threads_list_.GetThreadID(i); 191 int pterrno; 192 if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr), 193 &pterrno)) { 194 VReport(2, "Detached from thread %d.\n", tid); 195 } else { 196 // Either the thread is dead, or we are already detached. 197 // The latter case is possible, for instance, if this function was called 198 // from a signal handler. 199 VReport(1, "Could not detach from thread %d (errno %d).\n", tid, pterrno); 200 } 201 } 202} 203 204void ThreadSuspender::KillAllThreads() { 205 for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++) 206 internal_ptrace(PTRACE_KILL, suspended_threads_list_.GetThreadID(i), 207 nullptr, nullptr); 208} 209 210bool ThreadSuspender::SuspendAllThreads() { 211 ThreadLister thread_lister(pid_); 212 bool retry = true; 213 InternalMmapVector<tid_t> threads; 214 threads.reserve(128); 215 for (int i = 0; i < 30 && retry; ++i) { 216 retry = false; 217 switch (thread_lister.ListThreads(&threads)) { 218 case ThreadLister::Error: 219 ResumeAllThreads(); 220 return false; 221 case ThreadLister::Incomplete: 222 retry = true; 223 break; 224 case ThreadLister::Ok: 225 break; 226 } 227 for (tid_t tid : threads) { 228 if (SuspendThread(tid)) 229 retry = true; 230 } 231 } 232 return suspended_threads_list_.ThreadCount(); 233} 234 235// Pointer to the ThreadSuspender instance for use in signal handler. 236static ThreadSuspender *thread_suspender_instance = nullptr; 237 238// Synchronous signals that should not be blocked. 239static const int kSyncSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS, 240 SIGXCPU, SIGXFSZ }; 241 242static void TracerThreadDieCallback() { 243 // Generally a call to Die() in the tracer thread should be fatal to the 244 // parent process as well, because they share the address space. 245 // This really only works correctly if all the threads are suspended at this 246 // point. So we correctly handle calls to Die() from within the callback, but 247 // not those that happen before or after the callback. Hopefully there aren't 248 // a lot of opportunities for that to happen... 249 ThreadSuspender *inst = thread_suspender_instance; 250 if (inst && stoptheworld_tracer_pid == internal_getpid()) { 251 inst->KillAllThreads(); 252 thread_suspender_instance = nullptr; 253 } 254} 255 256// Signal handler to wake up suspended threads when the tracer thread dies. 257static void TracerThreadSignalHandler(int signum, __sanitizer_siginfo *siginfo, 258 void *uctx) { 259 SignalContext ctx(siginfo, uctx); 260 Printf("Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\n", signum, 261 ctx.addr, ctx.pc, ctx.sp); 262 ThreadSuspender *inst = thread_suspender_instance; 263 if (inst) { 264 if (signum == SIGABRT) 265 inst->KillAllThreads(); 266 else 267 inst->ResumeAllThreads(); 268 RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback)); 269 thread_suspender_instance = nullptr; 270 atomic_store(&inst->arg->done, 1, memory_order_relaxed); 271 } 272 internal__exit((signum == SIGABRT) ? 1 : 2); 273} 274 275// Size of alternative stack for signal handlers in the tracer thread. 276static const int kHandlerStackSize = 8192; 277 278// This function will be run as a cloned task. 279static int TracerThread(void* argument) { 280 TracerThreadArgument *tracer_thread_argument = 281 (TracerThreadArgument *)argument; 282 283 internal_prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0); 284 // Check if parent is already dead. 285 if (internal_getppid() != tracer_thread_argument->parent_pid) 286 internal__exit(4); 287 288 // Wait for the parent thread to finish preparations. 289 tracer_thread_argument->mutex.Lock(); 290 tracer_thread_argument->mutex.Unlock(); 291 292 RAW_CHECK(AddDieCallback(TracerThreadDieCallback)); 293 294 ThreadSuspender thread_suspender(internal_getppid(), tracer_thread_argument); 295 // Global pointer for the signal handler. 296 thread_suspender_instance = &thread_suspender; 297 298 // Alternate stack for signal handling. 299 InternalMmapVector<char> handler_stack_memory(kHandlerStackSize); 300 stack_t handler_stack; 301 internal_memset(&handler_stack, 0, sizeof(handler_stack)); 302 handler_stack.ss_sp = handler_stack_memory.data(); 303 handler_stack.ss_size = kHandlerStackSize; 304 internal_sigaltstack(&handler_stack, nullptr); 305 306 // Install our handler for synchronous signals. Other signals should be 307 // blocked by the mask we inherited from the parent thread. 308 for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) { 309 __sanitizer_sigaction act; 310 internal_memset(&act, 0, sizeof(act)); 311 act.sigaction = TracerThreadSignalHandler; 312 act.sa_flags = SA_ONSTACK | SA_SIGINFO; 313 internal_sigaction_norestorer(kSyncSignals[i], &act, 0); 314 } 315 316 int exit_code = 0; 317 if (!thread_suspender.SuspendAllThreads()) { 318 VReport(1, "Failed suspending threads.\n"); 319 exit_code = 3; 320 } else { 321 tracer_thread_argument->callback(thread_suspender.suspended_threads_list(), 322 tracer_thread_argument->callback_argument); 323 thread_suspender.ResumeAllThreads(); 324 exit_code = 0; 325 } 326 RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback)); 327 thread_suspender_instance = nullptr; 328 atomic_store(&tracer_thread_argument->done, 1, memory_order_relaxed); 329 return exit_code; 330} 331 332class ScopedStackSpaceWithGuard { 333 public: 334 explicit ScopedStackSpaceWithGuard(uptr stack_size) { 335 stack_size_ = stack_size; 336 guard_size_ = GetPageSizeCached(); 337 // FIXME: Omitting MAP_STACK here works in current kernels but might break 338 // in the future. 339 guard_start_ = (uptr)MmapOrDie(stack_size_ + guard_size_, 340 "ScopedStackWithGuard"); 341 CHECK(MprotectNoAccess((uptr)guard_start_, guard_size_)); 342 } 343 ~ScopedStackSpaceWithGuard() { 344 UnmapOrDie((void *)guard_start_, stack_size_ + guard_size_); 345 } 346 void *Bottom() const { 347 return (void *)(guard_start_ + stack_size_ + guard_size_); 348 } 349 350 private: 351 uptr stack_size_; 352 uptr guard_size_; 353 uptr guard_start_; 354}; 355 356// We have a limitation on the stack frame size, so some stuff had to be moved 357// into globals. 358static __sanitizer_sigset_t blocked_sigset; 359static __sanitizer_sigset_t old_sigset; 360 361class StopTheWorldScope { 362 public: 363 StopTheWorldScope() { 364 // Make this process dumpable. Processes that are not dumpable cannot be 365 // attached to. 366 process_was_dumpable_ = internal_prctl(PR_GET_DUMPABLE, 0, 0, 0, 0); 367 if (!process_was_dumpable_) 368 internal_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0); 369 } 370 371 ~StopTheWorldScope() { 372 // Restore the dumpable flag. 373 if (!process_was_dumpable_) 374 internal_prctl(PR_SET_DUMPABLE, 0, 0, 0, 0); 375 } 376 377 private: 378 int process_was_dumpable_; 379}; 380 381// When sanitizer output is being redirected to file (i.e. by using log_path), 382// the tracer should write to the parent's log instead of trying to open a new 383// file. Alert the logging code to the fact that we have a tracer. 384struct ScopedSetTracerPID { 385 explicit ScopedSetTracerPID(uptr tracer_pid) { 386 stoptheworld_tracer_pid = tracer_pid; 387 stoptheworld_tracer_ppid = internal_getpid(); 388 } 389 ~ScopedSetTracerPID() { 390 stoptheworld_tracer_pid = 0; 391 stoptheworld_tracer_ppid = 0; 392 } 393}; 394 395void StopTheWorld(StopTheWorldCallback callback, void *argument) { 396 StopTheWorldScope in_stoptheworld; 397 // Prepare the arguments for TracerThread. 398 struct TracerThreadArgument tracer_thread_argument; 399 tracer_thread_argument.callback = callback; 400 tracer_thread_argument.callback_argument = argument; 401 tracer_thread_argument.parent_pid = internal_getpid(); 402 atomic_store(&tracer_thread_argument.done, 0, memory_order_relaxed); 403 const uptr kTracerStackSize = 2 * 1024 * 1024; 404 ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize); 405 // Block the execution of TracerThread until after we have set ptrace 406 // permissions. 407 tracer_thread_argument.mutex.Lock(); 408 // Signal handling story. 409 // We don't want async signals to be delivered to the tracer thread, 410 // so we block all async signals before creating the thread. An async signal 411 // handler can temporary modify errno, which is shared with this thread. 412 // We ought to use pthread_sigmask here, because sigprocmask has undefined 413 // behavior in multithreaded programs. However, on linux sigprocmask is 414 // equivalent to pthread_sigmask with the exception that pthread_sigmask 415 // does not allow to block some signals used internally in pthread 416 // implementation. We are fine with blocking them here, we are really not 417 // going to pthread_cancel the thread. 418 // The tracer thread should not raise any synchronous signals. But in case it 419 // does, we setup a special handler for sync signals that properly kills the 420 // parent as well. Note: we don't pass CLONE_SIGHAND to clone, so handlers 421 // in the tracer thread won't interfere with user program. Double note: if a 422 // user does something along the lines of 'kill -11 pid', that can kill the 423 // process even if user setup own handler for SEGV. 424 // Thing to watch out for: this code should not change behavior of user code 425 // in any observable way. In particular it should not override user signal 426 // handlers. 427 internal_sigfillset(&blocked_sigset); 428 for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) 429 internal_sigdelset(&blocked_sigset, kSyncSignals[i]); 430 int rv = internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset); 431 CHECK_EQ(rv, 0); 432 uptr tracer_pid = internal_clone( 433 TracerThread, tracer_stack.Bottom(), 434 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_UNTRACED, 435 &tracer_thread_argument, nullptr /* parent_tidptr */, 436 nullptr /* newtls */, nullptr /* child_tidptr */); 437 internal_sigprocmask(SIG_SETMASK, &old_sigset, 0); 438 int local_errno = 0; 439 if (internal_iserror(tracer_pid, &local_errno)) { 440 VReport(1, "Failed spawning a tracer thread (errno %d).\n", local_errno); 441 tracer_thread_argument.mutex.Unlock(); 442 } else { 443 ScopedSetTracerPID scoped_set_tracer_pid(tracer_pid); 444 // On some systems we have to explicitly declare that we want to be traced 445 // by the tracer thread. 446 internal_prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0); 447 // Allow the tracer thread to start. 448 tracer_thread_argument.mutex.Unlock(); 449 // NOTE: errno is shared between this thread and the tracer thread. 450 // internal_waitpid() may call syscall() which can access/spoil errno, 451 // so we can't call it now. Instead we for the tracer thread to finish using 452 // the spin loop below. Man page for sched_yield() says "In the Linux 453 // implementation, sched_yield() always succeeds", so let's hope it does not 454 // spoil errno. Note that this spin loop runs only for brief periods before 455 // the tracer thread has suspended us and when it starts unblocking threads. 456 while (atomic_load(&tracer_thread_argument.done, memory_order_relaxed) == 0) 457 sched_yield(); 458 // Now the tracer thread is about to exit and does not touch errno, 459 // wait for it. 460 for (;;) { 461 uptr waitpid_status = internal_waitpid(tracer_pid, nullptr, __WALL); 462 if (!internal_iserror(waitpid_status, &local_errno)) 463 break; 464 if (local_errno == EINTR) 465 continue; 466 VReport(1, "Waiting on the tracer thread failed (errno %d).\n", 467 local_errno); 468 break; 469 } 470 } 471} 472 473// Platform-specific methods from SuspendedThreadsList. 474#if SANITIZER_ANDROID && defined(__arm__) 475typedef pt_regs regs_struct; 476#define REG_SP ARM_sp 477 478#elif SANITIZER_LINUX && defined(__arm__) 479typedef user_regs regs_struct; 480#define REG_SP uregs[13] 481 482#elif defined(__i386__) || defined(__x86_64__) 483typedef user_regs_struct regs_struct; 484#if defined(__i386__) 485#define REG_SP esp 486#else 487#define REG_SP rsp 488#endif 489#define ARCH_IOVEC_FOR_GETREGSET 490// Support ptrace extensions even when compiled without required kernel support 491#ifndef NT_X86_XSTATE 492#define NT_X86_XSTATE 0x202 493#endif 494#ifndef PTRACE_GETREGSET 495#define PTRACE_GETREGSET 0x4204 496#endif 497// Compiler may use FP registers to store pointers. 498static constexpr uptr kExtraRegs[] = {NT_X86_XSTATE, NT_FPREGSET}; 499 500#elif defined(__powerpc__) || defined(__powerpc64__) 501typedef pt_regs regs_struct; 502#define REG_SP gpr[PT_R1] 503 504#elif defined(__mips__) 505typedef struct user regs_struct; 506# if SANITIZER_ANDROID 507# define REG_SP regs[EF_R29] 508# else 509# define REG_SP regs[EF_REG29] 510# endif 511 512#elif defined(__aarch64__) 513typedef struct user_pt_regs regs_struct; 514#define REG_SP sp 515static constexpr uptr kExtraRegs[] = {0}; 516#define ARCH_IOVEC_FOR_GETREGSET 517 518#elif defined(__loongarch__) 519typedef struct user_pt_regs regs_struct; 520#define REG_SP regs[3] 521static constexpr uptr kExtraRegs[] = {0}; 522#define ARCH_IOVEC_FOR_GETREGSET 523 524#elif SANITIZER_RISCV64 525typedef struct user_regs_struct regs_struct; 526// sys/ucontext.h already defines REG_SP as 2. Undefine it first. 527#undef REG_SP 528#define REG_SP sp 529static constexpr uptr kExtraRegs[] = {0}; 530#define ARCH_IOVEC_FOR_GETREGSET 531 532#elif defined(__s390__) 533typedef _user_regs_struct regs_struct; 534#define REG_SP gprs[15] 535static constexpr uptr kExtraRegs[] = {0}; 536#define ARCH_IOVEC_FOR_GETREGSET 537 538#else 539#error "Unsupported architecture" 540#endif // SANITIZER_ANDROID && defined(__arm__) 541 542tid_t SuspendedThreadsListLinux::GetThreadID(uptr index) const { 543 CHECK_LT(index, thread_ids_.size()); 544 return thread_ids_[index]; 545} 546 547uptr SuspendedThreadsListLinux::ThreadCount() const { 548 return thread_ids_.size(); 549} 550 551bool SuspendedThreadsListLinux::ContainsTid(tid_t thread_id) const { 552 for (uptr i = 0; i < thread_ids_.size(); i++) { 553 if (thread_ids_[i] == thread_id) return true; 554 } 555 return false; 556} 557 558void SuspendedThreadsListLinux::Append(tid_t tid) { 559 thread_ids_.push_back(tid); 560} 561 562PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP( 563 uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const { 564 pid_t tid = GetThreadID(index); 565 constexpr uptr uptr_sz = sizeof(uptr); 566 int pterrno; 567#ifdef ARCH_IOVEC_FOR_GETREGSET 568 auto AppendF = [&](uptr regset) { 569 uptr size = buffer->size(); 570 // NT_X86_XSTATE requires 64bit alignment. 571 uptr size_up = RoundUpTo(size, 8 / uptr_sz); 572 buffer->reserve(Max<uptr>(1024, size_up)); 573 struct iovec regset_io; 574 for (;; buffer->resize(buffer->capacity() * 2)) { 575 buffer->resize(buffer->capacity()); 576 uptr available_bytes = (buffer->size() - size_up) * uptr_sz; 577 regset_io.iov_base = buffer->data() + size_up; 578 regset_io.iov_len = available_bytes; 579 bool fail = 580 internal_iserror(internal_ptrace(PTRACE_GETREGSET, tid, 581 (void *)regset, (void *)®set_io), 582 &pterrno); 583 if (fail) { 584 VReport(1, "Could not get regset %p from thread %d (errno %d).\n", 585 (void *)regset, tid, pterrno); 586 buffer->resize(size); 587 return false; 588 } 589 590 // Far enough from the buffer size, no need to resize and repeat. 591 if (regset_io.iov_len + 64 < available_bytes) 592 break; 593 } 594 buffer->resize(size_up + RoundUpTo(regset_io.iov_len, uptr_sz) / uptr_sz); 595 return true; 596 }; 597 598 buffer->clear(); 599 bool fail = !AppendF(NT_PRSTATUS); 600 if (!fail) { 601 // Accept the first available and do not report errors. 602 for (uptr regs : kExtraRegs) 603 if (regs && AppendF(regs)) 604 break; 605 } 606#else 607 buffer->resize(RoundUpTo(sizeof(regs_struct), uptr_sz) / uptr_sz); 608 bool fail = internal_iserror( 609 internal_ptrace(PTRACE_GETREGS, tid, nullptr, buffer->data()), &pterrno); 610 if (fail) 611 VReport(1, "Could not get registers from thread %d (errno %d).\n", tid, 612 pterrno); 613#endif 614 if (fail) { 615 // ESRCH means that the given thread is not suspended or already dead. 616 // Therefore it's unsafe to inspect its data (e.g. walk through stack) and 617 // we should notify caller about this. 618 return pterrno == ESRCH ? REGISTERS_UNAVAILABLE_FATAL 619 : REGISTERS_UNAVAILABLE; 620 } 621 622 *sp = reinterpret_cast<regs_struct *>(buffer->data())[0].REG_SP; 623 return REGISTERS_AVAILABLE; 624} 625 626} // namespace __sanitizer 627 628#endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) 629 // || defined(__aarch64__) || defined(__powerpc64__) 630 // || defined(__s390__) || defined(__i386__) || defined(__arm__) 631 // || SANITIZER_LOONGARCH64 632