blob: 403bda1174cc46b7a9998e26be388c2173e93850 [file] [log] [blame]
Nico Weber65492d92019-07-31 18:51:27 +00001//===-- sanitizer_stoptheworld_linux_libcdep.cpp --------------------------===//
Alexander Potapenko845b5752013-03-15 14:37:21 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Alexander Potapenko845b5752013-03-15 14:37:21 +00006//
7//===----------------------------------------------------------------------===//
8//
9// See sanitizer_stoptheworld.h for details.
10// This implementation was inspired by Markus Gutschke's linuxthreads.cc.
11//
12//===----------------------------------------------------------------------===//
13
Evgeniy Stepanov0af67232013-03-19 14:33:38 +000014#include "sanitizer_platform.h"
Vedant Kumar552c0112015-09-30 23:52:54 +000015
Alexey Baturo7ce4dfb2020-10-04 12:38:06 +030016#if SANITIZER_LINUX && \
17 (defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
18 defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
19 defined(__arm__) || SANITIZER_RISCV64)
Alexander Potapenko845b5752013-03-15 14:37:21 +000020
21#include "sanitizer_stoptheworld.h"
22
Dmitry Vyukovf54835f2013-10-15 12:57:59 +000023#include "sanitizer_platform_limits_posix.h"
Dmitry Vyukov72f16972015-03-05 14:37:28 +000024#include "sanitizer_atomic.h"
Dmitry Vyukovf54835f2013-10-15 12:57:59 +000025
Alexander Potapenko845b5752013-03-15 14:37:21 +000026#include <errno.h>
Sergey Matveev69931c52013-09-02 11:36:19 +000027#include <sched.h> // for CLONE_* definitions
Alexander Potapenko845b5752013-03-15 14:37:21 +000028#include <stddef.h>
29#include <sys/prctl.h> // for PR_* definitions
30#include <sys/ptrace.h> // for PTRACE_* definitions
31#include <sys/types.h> // for pid_t
Adhemerval Zanellad7984712015-08-05 15:17:59 +000032#include <sys/uio.h> // for iovec
33#include <elf.h> // for NT_PRSTATUS
Alexey Baturo7ce4dfb2020-10-04 12:38:06 +030034#if (defined(__aarch64__) || SANITIZER_RISCV64) && !SANITIZER_ANDROID
Renato Golin4481fe02015-08-05 18:34:20 +000035// GLIBC 2.20+ sys/user does not include asm/ptrace.h
Maxim Ostapenko35460602017-04-18 07:22:26 +000036# include <asm/ptrace.h>
37#endif
38#include <sys/user.h> // for user_regs_struct
39#if SANITIZER_ANDROID && SANITIZER_MIPS
40# include <asm/reg.h> // for mips SP register in sys/user.h
Alexey Samsonov8d18cc32013-04-03 07:06:10 +000041#endif
Alexander Potapenko845b5752013-03-15 14:37:21 +000042#include <sys/wait.h> // for signal-related stuff
43
Dmitry Vyukovf54835f2013-10-15 12:57:59 +000044#ifdef sa_handler
45# undef sa_handler
46#endif
47
48#ifdef sa_sigaction
49# undef sa_sigaction
50#endif
51
Alexander Potapenko845b5752013-03-15 14:37:21 +000052#include "sanitizer_common.h"
Dmitry Vyukov21e99312013-10-15 15:35:56 +000053#include "sanitizer_flags.h"
Alexander Potapenko845b5752013-03-15 14:37:21 +000054#include "sanitizer_libc.h"
55#include "sanitizer_linux.h"
56#include "sanitizer_mutex.h"
Dmitry Vyukov6f7ca812013-03-18 08:09:42 +000057#include "sanitizer_placement_new.h"
Alexander Potapenko845b5752013-03-15 14:37:21 +000058
Sylvestre Ledrua8c54602017-11-08 07:25:19 +000059// Sufficiently old kernel headers don't provide this value, but we can still
60// call prctl with it. If the runtime kernel is new enough, the prctl call will
61// have the desired effect; if the kernel is too old, the call will error and we
62// can ignore said error.
63#ifndef PR_SET_PTRACER
64#define PR_SET_PTRACER 0x59616d61
65#endif
66
Alexander Potapenko845b5752013-03-15 14:37:21 +000067// This module works by spawning a Linux task which then attaches to every
68// thread in the caller process with ptrace. This suspends the threads, and
69// PTRACE_GETREGS can then be used to obtain their register state. The callback
70// supplied to StopTheWorld() is run in the tracer task while the threads are
71// suspended.
72// The tracer task must be placed in a different thread group for ptrace to
73// work, so it cannot be spawned as a pthread. Instead, we use the low-level
74// clone() interface (we want to share the address space with the caller
75// process, so we prefer clone() over fork()).
76//
Sergey Matveev14b99242013-10-15 11:54:38 +000077// We don't use any libc functions, relying instead on direct syscalls. There
78// are two reasons for this:
Alexander Potapenko845b5752013-03-15 14:37:21 +000079// 1. calling a library function while threads are suspended could cause a
80// deadlock, if one of the treads happens to be holding a libc lock;
81// 2. it's generally not safe to call libc functions from the tracer task,
82// because clone() does not set up a thread-local storage for it. Any
83// thread-local variables used by libc will be shared between the tracer task
84// and the thread which spawned it.
Alexander Potapenko845b5752013-03-15 14:37:21 +000085
Alexander Potapenko845b5752013-03-15 14:37:21 +000086namespace __sanitizer {
Dmitry Vyukov72f16972015-03-05 14:37:28 +000087
Vitaly Bukad48f2d72020-11-02 17:33:22 -080088class SuspendedThreadsListLinux final : public SuspendedThreadsList {
Francis Ricci5989dd22017-04-17 20:29:38 +000089 public:
Vitaly Buka44f555092018-05-07 05:56:24 +000090 SuspendedThreadsListLinux() { thread_ids_.reserve(1024); }
Francis Ricci5989dd22017-04-17 20:29:38 +000091
Logan Smith8ed02132020-07-20 15:13:22 -070092 tid_t GetThreadID(uptr index) const override;
Logan Smith8b6179f2020-07-20 14:38:10 -070093 uptr ThreadCount() const override;
Francis Ricci5989dd22017-04-17 20:29:38 +000094 bool ContainsTid(tid_t thread_id) const;
95 void Append(tid_t tid);
96
Vitaly Bukacd134762020-09-16 01:14:55 -070097 PtraceRegistersStatus GetRegistersAndSP(uptr index,
98 InternalMmapVector<uptr> *buffer,
Logan Smith8b6179f2020-07-20 14:38:10 -070099 uptr *sp) const override;
Francis Ricci5989dd22017-04-17 20:29:38 +0000100
101 private:
102 InternalMmapVector<tid_t> thread_ids_;
103};
104
Dmitry Vyukov72f16972015-03-05 14:37:28 +0000105// Structure for passing arguments into the tracer thread.
106struct TracerThreadArgument {
107 StopTheWorldCallback callback;
108 void *callback_argument;
109 // The tracer thread waits on this mutex while the parent finishes its
110 // preparations.
Dmitry Vyukov4e15ee22021-07-29 09:44:48 +0200111 Mutex mutex;
Dmitry Vyukov72f16972015-03-05 14:37:28 +0000112 // Tracer thread signals its completion by setting done.
113 atomic_uintptr_t done;
114 uptr parent_pid;
115};
116
Alexander Potapenko845b5752013-03-15 14:37:21 +0000117// This class handles thread suspending/unsuspending in the tracer thread.
118class ThreadSuspender {
119 public:
Dmitry Vyukov72f16972015-03-05 14:37:28 +0000120 explicit ThreadSuspender(pid_t pid, TracerThreadArgument *arg)
121 : arg(arg)
122 , pid_(pid) {
Alexander Potapenko845b5752013-03-15 14:37:21 +0000123 CHECK_GE(pid, 0);
124 }
125 bool SuspendAllThreads();
126 void ResumeAllThreads();
127 void KillAllThreads();
Francis Ricci5989dd22017-04-17 20:29:38 +0000128 SuspendedThreadsListLinux &suspended_threads_list() {
Alexander Potapenko845b5752013-03-15 14:37:21 +0000129 return suspended_threads_list_;
130 }
Dmitry Vyukov72f16972015-03-05 14:37:28 +0000131 TracerThreadArgument *arg;
Alexander Potapenko845b5752013-03-15 14:37:21 +0000132 private:
Francis Ricci5989dd22017-04-17 20:29:38 +0000133 SuspendedThreadsListLinux suspended_threads_list_;
Alexander Potapenko845b5752013-03-15 14:37:21 +0000134 pid_t pid_;
Kuba Mracekceb30b02017-04-17 18:17:38 +0000135 bool SuspendThread(tid_t thread_id);
Alexander Potapenko845b5752013-03-15 14:37:21 +0000136};
137
Kuba Mracekceb30b02017-04-17 18:17:38 +0000138bool ThreadSuspender::SuspendThread(tid_t tid) {
Alexander Potapenko845b5752013-03-15 14:37:21 +0000139 // Are we already attached to this thread?
140 // Currently this check takes linear time, however the number of threads is
141 // usually small.
Francis Ricci5989dd22017-04-17 20:29:38 +0000142 if (suspended_threads_list_.ContainsTid(tid)) return false;
Peter Collingbourne6f4be192013-05-08 14:43:49 +0000143 int pterrno;
Vedant Kumar552c0112015-09-30 23:52:54 +0000144 if (internal_iserror(internal_ptrace(PTRACE_ATTACH, tid, nullptr, nullptr),
Peter Collingbourne6f4be192013-05-08 14:43:49 +0000145 &pterrno)) {
Alexander Potapenko845b5752013-03-15 14:37:21 +0000146 // Either the thread is dead, or something prevented us from attaching.
147 // Log this event and move on.
Evgeniy Stepanov9fd4e9e2017-04-18 01:08:00 +0000148 VReport(1, "Could not attach to thread %zu (errno %d).\n", (uptr)tid,
149 pterrno);
Alexander Potapenko845b5752013-03-15 14:37:21 +0000150 return false;
151 } else {
Evgeniy Stepanov9fd4e9e2017-04-18 01:08:00 +0000152 VReport(2, "Attached to thread %zu.\n", (uptr)tid);
Alexander Potapenko845b5752013-03-15 14:37:21 +0000153 // The thread is not guaranteed to stop before ptrace returns, so we must
Dmitry Vyukovc0e912d2015-02-19 09:02:29 +0000154 // wait on it. Note: if the thread receives a signal concurrently,
155 // we can get notification about the signal before notification about stop.
156 // In such case we need to forward the signal to the thread, otherwise
157 // the signal will be missed (as we do PTRACE_DETACH with arg=0) and
158 // any logic relying on signals will break. After forwarding we need to
159 // continue to wait for stopping, because the thread is not stopped yet.
160 // We do ignore delivery of SIGSTOP, because we want to make stop-the-world
161 // as invisible as possible.
162 for (;;) {
163 int status;
164 uptr waitpid_status;
165 HANDLE_EINTR(waitpid_status, internal_waitpid(tid, &status, __WALL));
166 int wperrno;
167 if (internal_iserror(waitpid_status, &wperrno)) {
168 // Got a ECHILD error. I don't think this situation is possible, but it
169 // doesn't hurt to report it.
Evgeniy Stepanov9fd4e9e2017-04-18 01:08:00 +0000170 VReport(1, "Waiting on thread %zu failed, detaching (errno %d).\n",
171 (uptr)tid, wperrno);
Vedant Kumar552c0112015-09-30 23:52:54 +0000172 internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr);
Dmitry Vyukovc0e912d2015-02-19 09:02:29 +0000173 return false;
174 }
175 if (WIFSTOPPED(status) && WSTOPSIG(status) != SIGSTOP) {
Vedant Kumarf997bd82015-10-01 00:48:07 +0000176 internal_ptrace(PTRACE_CONT, tid, nullptr,
177 (void*)(uptr)WSTOPSIG(status));
Dmitry Vyukovc0e912d2015-02-19 09:02:29 +0000178 continue;
179 }
180 break;
Alexander Potapenko845b5752013-03-15 14:37:21 +0000181 }
Dmitry Vyukovc0e912d2015-02-19 09:02:29 +0000182 suspended_threads_list_.Append(tid);
Alexander Potapenko845b5752013-03-15 14:37:21 +0000183 return true;
184 }
185}
186
187void ThreadSuspender::ResumeAllThreads() {
Francis Ricci5989dd22017-04-17 20:29:38 +0000188 for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++) {
Alexander Potapenko845b5752013-03-15 14:37:21 +0000189 pid_t tid = suspended_threads_list_.GetThreadID(i);
Peter Collingbourne6f4be192013-05-08 14:43:49 +0000190 int pterrno;
Vedant Kumar552c0112015-09-30 23:52:54 +0000191 if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr),
Peter Collingbourne6f4be192013-05-08 14:43:49 +0000192 &pterrno)) {
Dmitry Vyukovb79ac882015-03-02 17:36:02 +0000193 VReport(2, "Detached from thread %d.\n", tid);
Alexander Potapenko845b5752013-03-15 14:37:21 +0000194 } else {
195 // Either the thread is dead, or we are already detached.
196 // The latter case is possible, for instance, if this function was called
197 // from a signal handler.
Sergey Matveev9be70fb2013-12-05 12:04:51 +0000198 VReport(1, "Could not detach from thread %d (errno %d).\n", tid, pterrno);
Alexander Potapenko845b5752013-03-15 14:37:21 +0000199 }
200 }
201}
202
203void ThreadSuspender::KillAllThreads() {
Francis Ricci5989dd22017-04-17 20:29:38 +0000204 for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++)
Alexander Potapenko845b5752013-03-15 14:37:21 +0000205 internal_ptrace(PTRACE_KILL, suspended_threads_list_.GetThreadID(i),
Vedant Kumar552c0112015-09-30 23:52:54 +0000206 nullptr, nullptr);
Alexander Potapenko845b5752013-03-15 14:37:21 +0000207}
208
209bool ThreadSuspender::SuspendAllThreads() {
Alexey Samsonov46b86652013-04-05 07:41:21 +0000210 ThreadLister thread_lister(pid_);
Vitaly Buka3f1fd792018-05-15 01:39:13 +0000211 bool retry = true;
Vitaly Buka34794a92018-05-09 21:21:26 +0000212 InternalMmapVector<tid_t> threads;
Vitaly Bukae2953dc2018-05-07 23:29:48 +0000213 threads.reserve(128);
Vitaly Buka3f1fd792018-05-15 01:39:13 +0000214 for (int i = 0; i < 30 && retry; ++i) {
Vitaly Buka5dcc94c2018-05-10 04:16:44 +0000215 retry = false;
Vitaly Bukae0c6ead2018-05-10 04:02:59 +0000216 switch (thread_lister.ListThreads(&threads)) {
217 case ThreadLister::Error:
218 ResumeAllThreads();
219 return false;
220 case ThreadLister::Incomplete:
Vitaly Buka5dcc94c2018-05-10 04:16:44 +0000221 retry = true;
Vitaly Bukae0c6ead2018-05-10 04:02:59 +0000222 break;
223 case ThreadLister::Ok:
224 break;
Vitaly Bukae2953dc2018-05-07 23:29:48 +0000225 }
Vitaly Bukad2af3682019-09-12 02:20:36 +0000226 for (tid_t tid : threads) {
Alexander Potapenko845b5752013-03-15 14:37:21 +0000227 if (SuspendThread(tid))
Vitaly Buka5dcc94c2018-05-10 04:16:44 +0000228 retry = true;
Vitaly Bukad2af3682019-09-12 02:20:36 +0000229 }
230 }
Vitaly Buka327f5f32018-05-10 08:16:23 +0000231 return suspended_threads_list_.ThreadCount();
Alexander Potapenko845b5752013-03-15 14:37:21 +0000232}
233
234// Pointer to the ThreadSuspender instance for use in signal handler.
Vedant Kumar552c0112015-09-30 23:52:54 +0000235static ThreadSuspender *thread_suspender_instance = nullptr;
Alexander Potapenko845b5752013-03-15 14:37:21 +0000236
Dmitry Vyukov72f16972015-03-05 14:37:28 +0000237// Synchronous signals that should not be blocked.
238static const int kSyncSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS,
239 SIGXCPU, SIGXFSZ };
Alexander Potapenko845b5752013-03-15 14:37:21 +0000240
Sergey Matveevef7db732013-08-26 13:20:31 +0000241static void TracerThreadDieCallback() {
242 // Generally a call to Die() in the tracer thread should be fatal to the
243 // parent process as well, because they share the address space.
244 // This really only works correctly if all the threads are suspended at this
245 // point. So we correctly handle calls to Die() from within the callback, but
246 // not those that happen before or after the callback. Hopefully there aren't
247 // a lot of opportunities for that to happen...
Dmitry Vyukov72f16972015-03-05 14:37:28 +0000248 ThreadSuspender *inst = thread_suspender_instance;
Vedant Kumar552c0112015-09-30 23:52:54 +0000249 if (inst && stoptheworld_tracer_pid == internal_getpid()) {
Dmitry Vyukov72f16972015-03-05 14:37:28 +0000250 inst->KillAllThreads();
Vedant Kumar552c0112015-09-30 23:52:54 +0000251 thread_suspender_instance = nullptr;
Dmitry Vyukov72f16972015-03-05 14:37:28 +0000252 }
Alexey Samsonovb92aa0f2015-08-24 22:21:44 +0000253}
254
255// Signal handler to wake up suspended threads when the tracer thread dies.
Vitaly Buka8b689f42017-11-10 04:27:47 +0000256static void TracerThreadSignalHandler(int signum, __sanitizer_siginfo *siginfo,
257 void *uctx) {
Vitaly Buka73c10162017-09-14 02:48:41 +0000258 SignalContext ctx(siginfo, uctx);
Alexey Samsonoveb649bc2016-02-12 20:20:51 +0000259 Printf("Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\n", signum,
260 ctx.addr, ctx.pc, ctx.sp);
Alexey Samsonovb92aa0f2015-08-24 22:21:44 +0000261 ThreadSuspender *inst = thread_suspender_instance;
Vedant Kumar552c0112015-09-30 23:52:54 +0000262 if (inst) {
Alexey Samsonovb92aa0f2015-08-24 22:21:44 +0000263 if (signum == SIGABRT)
264 inst->KillAllThreads();
265 else
266 inst->ResumeAllThreads();
267 RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
Vedant Kumar552c0112015-09-30 23:52:54 +0000268 thread_suspender_instance = nullptr;
Alexey Samsonovb92aa0f2015-08-24 22:21:44 +0000269 atomic_store(&inst->arg->done, 1, memory_order_relaxed);
270 }
271 internal__exit((signum == SIGABRT) ? 1 : 2);
Sergey Matveevef7db732013-08-26 13:20:31 +0000272}
273
Alexander Potapenko845b5752013-03-15 14:37:21 +0000274// Size of alternative stack for signal handlers in the tracer thread.
Vitaly Bukaac03fb62017-10-15 04:18:29 +0000275static const int kHandlerStackSize = 8192;
Alexander Potapenko845b5752013-03-15 14:37:21 +0000276
277// This function will be run as a cloned task.
Alexey Samsonovf9dbbda2013-03-18 06:27:13 +0000278static int TracerThread(void* argument) {
Alexander Potapenko845b5752013-03-15 14:37:21 +0000279 TracerThreadArgument *tracer_thread_argument =
280 (TracerThreadArgument *)argument;
281
Sergey Matveevadef7542013-10-08 18:01:03 +0000282 internal_prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
283 // Check if parent is already dead.
Sergey Matveev6f7fb432013-10-09 13:36:20 +0000284 if (internal_getppid() != tracer_thread_argument->parent_pid)
Sergey Matveevadef7542013-10-08 18:01:03 +0000285 internal__exit(4);
286
Alexander Potapenko845b5752013-03-15 14:37:21 +0000287 // Wait for the parent thread to finish preparations.
Alexey Samsonovf9dbbda2013-03-18 06:27:13 +0000288 tracer_thread_argument->mutex.Lock();
289 tracer_thread_argument->mutex.Unlock();
Alexander Potapenko845b5752013-03-15 14:37:21 +0000290
Alexey Samsonovb92aa0f2015-08-24 22:21:44 +0000291 RAW_CHECK(AddDieCallback(TracerThreadDieCallback));
Sergey Matveevef7db732013-08-26 13:20:31 +0000292
Dmitry Vyukov72f16972015-03-05 14:37:28 +0000293 ThreadSuspender thread_suspender(internal_getppid(), tracer_thread_argument);
Alexander Potapenko845b5752013-03-15 14:37:21 +0000294 // Global pointer for the signal handler.
295 thread_suspender_instance = &thread_suspender;
296
297 // Alternate stack for signal handling.
Vitaly Buka2a209552018-05-07 05:56:36 +0000298 InternalMmapVector<char> handler_stack_memory(kHandlerStackSize);
Kostya Serebryanyc56d4442017-07-13 21:59:01 +0000299 stack_t handler_stack;
Alexander Potapenko845b5752013-03-15 14:37:21 +0000300 internal_memset(&handler_stack, 0, sizeof(handler_stack));
301 handler_stack.ss_sp = handler_stack_memory.data();
302 handler_stack.ss_size = kHandlerStackSize;
Vedant Kumar552c0112015-09-30 23:52:54 +0000303 internal_sigaltstack(&handler_stack, nullptr);
Alexander Potapenko845b5752013-03-15 14:37:21 +0000304
Dmitry Vyukov72f16972015-03-05 14:37:28 +0000305 // Install our handler for synchronous signals. Other signals should be
306 // blocked by the mask we inherited from the parent thread.
307 for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) {
308 __sanitizer_sigaction act;
309 internal_memset(&act, 0, sizeof(act));
310 act.sigaction = TracerThreadSignalHandler;
311 act.sa_flags = SA_ONSTACK | SA_SIGINFO;
312 internal_sigaction_norestorer(kSyncSignals[i], &act, 0);
Alexander Potapenko845b5752013-03-15 14:37:21 +0000313 }
314
315 int exit_code = 0;
316 if (!thread_suspender.SuspendAllThreads()) {
Sergey Matveev9be70fb2013-12-05 12:04:51 +0000317 VReport(1, "Failed suspending threads.\n");
Alexander Potapenko845b5752013-03-15 14:37:21 +0000318 exit_code = 3;
319 } else {
320 tracer_thread_argument->callback(thread_suspender.suspended_threads_list(),
321 tracer_thread_argument->callback_argument);
322 thread_suspender.ResumeAllThreads();
323 exit_code = 0;
324 }
Alexey Samsonovb92aa0f2015-08-24 22:21:44 +0000325 RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
Vedant Kumar552c0112015-09-30 23:52:54 +0000326 thread_suspender_instance = nullptr;
Dmitry Vyukov72f16972015-03-05 14:37:28 +0000327 atomic_store(&tracer_thread_argument->done, 1, memory_order_relaxed);
Alexander Potapenko845b5752013-03-15 14:37:21 +0000328 return exit_code;
329}
330
Alexander Potapenko1d021bf2013-04-01 14:38:56 +0000331class ScopedStackSpaceWithGuard {
332 public:
333 explicit ScopedStackSpaceWithGuard(uptr stack_size) {
334 stack_size_ = stack_size;
335 guard_size_ = GetPageSizeCached();
336 // FIXME: Omitting MAP_STACK here works in current kernels but might break
337 // in the future.
338 guard_start_ = (uptr)MmapOrDie(stack_size_ + guard_size_,
339 "ScopedStackWithGuard");
Timur Iskhodzhanovea1f3322015-04-10 15:02:19 +0000340 CHECK(MprotectNoAccess((uptr)guard_start_, guard_size_));
Alexander Potapenko1d021bf2013-04-01 14:38:56 +0000341 }
342 ~ScopedStackSpaceWithGuard() {
343 UnmapOrDie((void *)guard_start_, stack_size_ + guard_size_);
344 }
345 void *Bottom() const {
346 return (void *)(guard_start_ + stack_size_ + guard_size_);
347 }
348
349 private:
350 uptr stack_size_;
351 uptr guard_size_;
352 uptr guard_start_;
353};
354
Sergey Matveevef7db732013-08-26 13:20:31 +0000355// We have a limitation on the stack frame size, so some stuff had to be moved
356// into globals.
Alexander Potapenkod5802fe2014-01-31 11:29:51 +0000357static __sanitizer_sigset_t blocked_sigset;
358static __sanitizer_sigset_t old_sigset;
Dmitry Vyukov6f7ca812013-03-18 08:09:42 +0000359
Sergey Matveevef7db732013-08-26 13:20:31 +0000360class StopTheWorldScope {
361 public:
362 StopTheWorldScope() {
Sergey Matveevef7db732013-08-26 13:20:31 +0000363 // Make this process dumpable. Processes that are not dumpable cannot be
364 // attached to.
365 process_was_dumpable_ = internal_prctl(PR_GET_DUMPABLE, 0, 0, 0, 0);
366 if (!process_was_dumpable_)
367 internal_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
Alexander Potapenko845b5752013-03-15 14:37:21 +0000368 }
Sergey Matveevef7db732013-08-26 13:20:31 +0000369
370 ~StopTheWorldScope() {
Sergey Matveevef7db732013-08-26 13:20:31 +0000371 // Restore the dumpable flag.
372 if (!process_was_dumpable_)
373 internal_prctl(PR_SET_DUMPABLE, 0, 0, 0, 0);
Sergey Matveevef7db732013-08-26 13:20:31 +0000374 }
Alexey Samsonovef643ce2013-08-28 11:26:09 +0000375
Sergey Matveevef7db732013-08-26 13:20:31 +0000376 private:
377 int process_was_dumpable_;
378};
379
Sergey Matveev7bc300c2013-12-04 14:37:01 +0000380// When sanitizer output is being redirected to file (i.e. by using log_path),
381// the tracer should write to the parent's log instead of trying to open a new
382// file. Alert the logging code to the fact that we have a tracer.
383struct ScopedSetTracerPID {
384 explicit ScopedSetTracerPID(uptr tracer_pid) {
385 stoptheworld_tracer_pid = tracer_pid;
386 stoptheworld_tracer_ppid = internal_getpid();
387 }
388 ~ScopedSetTracerPID() {
389 stoptheworld_tracer_pid = 0;
390 stoptheworld_tracer_ppid = 0;
391 }
392};
393
Sergey Matveevef7db732013-08-26 13:20:31 +0000394void StopTheWorld(StopTheWorldCallback callback, void *argument) {
395 StopTheWorldScope in_stoptheworld;
Alexander Potapenko845b5752013-03-15 14:37:21 +0000396 // Prepare the arguments for TracerThread.
397 struct TracerThreadArgument tracer_thread_argument;
398 tracer_thread_argument.callback = callback;
399 tracer_thread_argument.callback_argument = argument;
Sergey Matveev6f7fb432013-10-09 13:36:20 +0000400 tracer_thread_argument.parent_pid = internal_getpid();
Dmitry Vyukov72f16972015-03-05 14:37:28 +0000401 atomic_store(&tracer_thread_argument.done, 0, memory_order_relaxed);
Alexander Potapenko1d021bf2013-04-01 14:38:56 +0000402 const uptr kTracerStackSize = 2 * 1024 * 1024;
403 ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize);
Alexey Samsonovf9dbbda2013-03-18 06:27:13 +0000404 // Block the execution of TracerThread until after we have set ptrace
405 // permissions.
406 tracer_thread_argument.mutex.Lock();
Dmitry Vyukov72f16972015-03-05 14:37:28 +0000407 // Signal handling story.
408 // We don't want async signals to be delivered to the tracer thread,
409 // so we block all async signals before creating the thread. An async signal
410 // handler can temporary modify errno, which is shared with this thread.
411 // We ought to use pthread_sigmask here, because sigprocmask has undefined
412 // behavior in multithreaded programs. However, on linux sigprocmask is
413 // equivalent to pthread_sigmask with the exception that pthread_sigmask
414 // does not allow to block some signals used internally in pthread
415 // implementation. We are fine with blocking them here, we are really not
416 // going to pthread_cancel the thread.
417 // The tracer thread should not raise any synchronous signals. But in case it
418 // does, we setup a special handler for sync signals that properly kills the
419 // parent as well. Note: we don't pass CLONE_SIGHAND to clone, so handlers
420 // in the tracer thread won't interfere with user program. Double note: if a
421 // user does something along the lines of 'kill -11 pid', that can kill the
422 // process even if user setup own handler for SEGV.
423 // Thing to watch out for: this code should not change behavior of user code
424 // in any observable way. In particular it should not override user signal
425 // handlers.
426 internal_sigfillset(&blocked_sigset);
427 for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++)
428 internal_sigdelset(&blocked_sigset, kSyncSignals[i]);
429 int rv = internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);
430 CHECK_EQ(rv, 0);
Sergey Matveev69931c52013-09-02 11:36:19 +0000431 uptr tracer_pid = internal_clone(
432 TracerThread, tracer_stack.Bottom(),
433 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_UNTRACED,
Vedant Kumar552c0112015-09-30 23:52:54 +0000434 &tracer_thread_argument, nullptr /* parent_tidptr */,
435 nullptr /* newtls */, nullptr /* child_tidptr */);
Dmitry Vyukov72f16972015-03-05 14:37:28 +0000436 internal_sigprocmask(SIG_SETMASK, &old_sigset, 0);
Sergey Matveev69931c52013-09-02 11:36:19 +0000437 int local_errno = 0;
438 if (internal_iserror(tracer_pid, &local_errno)) {
Sergey Matveev9be70fb2013-12-05 12:04:51 +0000439 VReport(1, "Failed spawning a tracer thread (errno %d).\n", local_errno);
Alexey Samsonovf9dbbda2013-03-18 06:27:13 +0000440 tracer_thread_argument.mutex.Unlock();
Alexander Potapenko845b5752013-03-15 14:37:21 +0000441 } else {
Sergey Matveev7bc300c2013-12-04 14:37:01 +0000442 ScopedSetTracerPID scoped_set_tracer_pid(tracer_pid);
Alexander Potapenko845b5752013-03-15 14:37:21 +0000443 // On some systems we have to explicitly declare that we want to be traced
444 // by the tracer thread.
Alexander Potapenko845b5752013-03-15 14:37:21 +0000445 internal_prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0);
Alexander Potapenko845b5752013-03-15 14:37:21 +0000446 // Allow the tracer thread to start.
Alexey Samsonovf9dbbda2013-03-18 06:27:13 +0000447 tracer_thread_argument.mutex.Unlock();
Dmitry Vyukov72f16972015-03-05 14:37:28 +0000448 // NOTE: errno is shared between this thread and the tracer thread.
Dmitry Vyukov0fc13a92015-03-06 08:43:44 +0000449 // internal_waitpid() may call syscall() which can access/spoil errno,
Dmitry Vyukov72f16972015-03-05 14:37:28 +0000450 // so we can't call it now. Instead we for the tracer thread to finish using
Dmitry Vyukov0fc13a92015-03-06 08:43:44 +0000451 // the spin loop below. Man page for sched_yield() says "In the Linux
Dmitry Vyukov72f16972015-03-05 14:37:28 +0000452 // implementation, sched_yield() always succeeds", so let's hope it does not
453 // spoil errno. Note that this spin loop runs only for brief periods before
454 // the tracer thread has suspended us and when it starts unblocking threads.
455 while (atomic_load(&tracer_thread_argument.done, memory_order_relaxed) == 0)
456 sched_yield();
457 // Now the tracer thread is about to exit and does not touch errno,
458 // wait for it.
459 for (;;) {
Vedant Kumar552c0112015-09-30 23:52:54 +0000460 uptr waitpid_status = internal_waitpid(tracer_pid, nullptr, __WALL);
Dmitry Vyukov72f16972015-03-05 14:37:28 +0000461 if (!internal_iserror(waitpid_status, &local_errno))
462 break;
463 if (local_errno == EINTR)
464 continue;
Sergey Matveev9be70fb2013-12-05 12:04:51 +0000465 VReport(1, "Waiting on the tracer thread failed (errno %d).\n",
466 local_errno);
Dmitry Vyukov72f16972015-03-05 14:37:28 +0000467 break;
468 }
Alexander Potapenko845b5752013-03-15 14:37:21 +0000469 }
Alexander Potapenko845b5752013-03-15 14:37:21 +0000470}
471
Alexander Potapenko9cc2e432013-04-01 13:36:42 +0000472// Platform-specific methods from SuspendedThreadsList.
Sergey Matveev37432e82013-05-13 10:35:20 +0000473#if SANITIZER_ANDROID && defined(__arm__)
Alexey Samsonov8d18cc32013-04-03 07:06:10 +0000474typedef pt_regs regs_struct;
Sergey Matveev37432e82013-05-13 10:35:20 +0000475#define REG_SP ARM_sp
476
477#elif SANITIZER_LINUX && defined(__arm__)
478typedef user_regs regs_struct;
479#define REG_SP uregs[13]
480
481#elif defined(__i386__) || defined(__x86_64__)
Alexey Samsonov8d18cc32013-04-03 07:06:10 +0000482typedef user_regs_struct regs_struct;
Sergey Matveev37432e82013-05-13 10:35:20 +0000483#if defined(__i386__)
484#define REG_SP esp
485#else
486#define REG_SP rsp
Alexey Samsonov8d18cc32013-04-03 07:06:10 +0000487#endif
Vitaly Buka5813fca2020-09-17 12:15:00 -0700488#define ARCH_IOVEC_FOR_GETREGSET
Vitaly Bukaf97ca482020-10-19 22:44:34 -0700489// Support ptrace extensions even when compiled without required kernel support
490#ifndef NT_X86_XSTATE
491#define NT_X86_XSTATE 0x202
492#endif
Jeroen Dobbelaere8d33f082020-12-09 00:59:47 -0800493#ifndef PTRACE_GETREGSET
494#define PTRACE_GETREGSET 0x4204
495#endif
Vitaly Buka5813fca2020-09-17 12:15:00 -0700496// Compiler may use FP registers to store pointers.
497static constexpr uptr kExtraRegs[] = {NT_X86_XSTATE, NT_FPREGSET};
Alexey Samsonov8d18cc32013-04-03 07:06:10 +0000498
Kostya Serebryany2b427162013-05-15 12:36:29 +0000499#elif defined(__powerpc__) || defined(__powerpc64__)
500typedef pt_regs regs_struct;
501#define REG_SP gpr[PT_R1]
502
Kostya Serebryanyc1aa0e82013-06-03 14:49:25 +0000503#elif defined(__mips__)
504typedef struct user regs_struct;
Mohit K. Bhakkadbeb155b2016-03-16 08:23:10 +0000505# if SANITIZER_ANDROID
506# define REG_SP regs[EF_R29]
507# else
508# define REG_SP regs[EF_REG29]
509# endif
Kostya Serebryanyc1aa0e82013-06-03 14:49:25 +0000510
Adhemerval Zanellad7984712015-08-05 15:17:59 +0000511#elif defined(__aarch64__)
512typedef struct user_pt_regs regs_struct;
513#define REG_SP sp
Vitaly Buka03358be2020-09-17 17:42:33 -0700514static constexpr uptr kExtraRegs[] = {0};
Adhemerval Zanellad7984712015-08-05 15:17:59 +0000515#define ARCH_IOVEC_FOR_GETREGSET
516
Alexey Baturo7ce4dfb2020-10-04 12:38:06 +0300517#elif SANITIZER_RISCV64
518typedef struct user_regs_struct regs_struct;
Luís Marques1bc85cb2020-11-25 00:03:34 +0000519// sys/ucontext.h already defines REG_SP as 2. Undefine it first.
520#undef REG_SP
Alexey Baturo7ce4dfb2020-10-04 12:38:06 +0300521#define REG_SP sp
522static constexpr uptr kExtraRegs[] = {0};
523#define ARCH_IOVEC_FOR_GETREGSET
524
Marcin Koscielnicki7ecdeb72016-04-26 10:41:30 +0000525#elif defined(__s390__)
526typedef _user_regs_struct regs_struct;
527#define REG_SP gprs[15]
Vitaly Buka03358be2020-09-17 17:42:33 -0700528static constexpr uptr kExtraRegs[] = {0};
Marcin Koscielnicki7ecdeb72016-04-26 10:41:30 +0000529#define ARCH_IOVEC_FOR_GETREGSET
530
Sergey Matveev37432e82013-05-13 10:35:20 +0000531#else
532#error "Unsupported architecture"
533#endif // SANITIZER_ANDROID && defined(__arm__)
534
Francis Ricci5989dd22017-04-17 20:29:38 +0000535tid_t SuspendedThreadsListLinux::GetThreadID(uptr index) const {
536 CHECK_LT(index, thread_ids_.size());
537 return thread_ids_[index];
538}
539
540uptr SuspendedThreadsListLinux::ThreadCount() const {
541 return thread_ids_.size();
542}
543
544bool SuspendedThreadsListLinux::ContainsTid(tid_t thread_id) const {
545 for (uptr i = 0; i < thread_ids_.size(); i++) {
546 if (thread_ids_[i] == thread_id) return true;
547 }
548 return false;
549}
550
551void SuspendedThreadsListLinux::Append(tid_t tid) {
552 thread_ids_.push_back(tid);
553}
554
555PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(
Vitaly Bukacd134762020-09-16 01:14:55 -0700556 uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {
Alexander Potapenko9cc2e432013-04-01 13:36:42 +0000557 pid_t tid = GetThreadID(index);
Vitaly Buka5813fca2020-09-17 12:15:00 -0700558 constexpr uptr uptr_sz = sizeof(uptr);
Peter Collingbourne6f4be192013-05-08 14:43:49 +0000559 int pterrno;
Adhemerval Zanellad7984712015-08-05 15:17:59 +0000560#ifdef ARCH_IOVEC_FOR_GETREGSET
Vitaly Buka5813fca2020-09-17 12:15:00 -0700561 auto append = [&](uptr regset) {
562 uptr size = buffer->size();
563 // NT_X86_XSTATE requires 64bit alignment.
564 uptr size_up = RoundUpTo(size, 8 / uptr_sz);
565 buffer->reserve(Max<uptr>(1024, size_up));
566 struct iovec regset_io;
567 for (;; buffer->resize(buffer->capacity() * 2)) {
568 buffer->resize(buffer->capacity());
569 uptr available_bytes = (buffer->size() - size_up) * uptr_sz;
570 regset_io.iov_base = buffer->data() + size_up;
571 regset_io.iov_len = available_bytes;
572 bool fail =
573 internal_iserror(internal_ptrace(PTRACE_GETREGSET, tid,
574 (void *)regset, (void *)&regset_io),
575 &pterrno);
576 if (fail) {
577 VReport(1, "Could not get regset %p from thread %d (errno %d).\n",
Vitaly Bukac0e7f642020-09-18 01:17:54 -0700578 (void *)regset, tid, pterrno);
Vitaly Buka5813fca2020-09-17 12:15:00 -0700579 buffer->resize(size);
580 return false;
581 }
582
583 // Far enough from the buffer size, no need to resize and repeat.
584 if (regset_io.iov_len + 64 < available_bytes)
585 break;
586 }
587 buffer->resize(size_up + RoundUpTo(regset_io.iov_len, uptr_sz) / uptr_sz);
588 return true;
589 };
590
591 buffer->clear();
592 bool fail = !append(NT_PRSTATUS);
593 if (!fail) {
594 // Accept the first available and do not report errors.
595 for (uptr regs : kExtraRegs)
Vitaly Buka03358be2020-09-17 17:42:33 -0700596 if (regs && append(regs))
Vitaly Buka5813fca2020-09-17 12:15:00 -0700597 break;
598 }
Adhemerval Zanellad7984712015-08-05 15:17:59 +0000599#else
Vitaly Buka5813fca2020-09-17 12:15:00 -0700600 buffer->resize(RoundUpTo(sizeof(regs_struct), uptr_sz) / uptr_sz);
601 bool fail = internal_iserror(
602 internal_ptrace(PTRACE_GETREGS, tid, nullptr, buffer->data()), &pterrno);
603 if (fail)
Sergey Matveev9be70fb2013-12-05 12:04:51 +0000604 VReport(1, "Could not get registers from thread %d (errno %d).\n", tid,
605 pterrno);
Vitaly Buka5813fca2020-09-17 12:15:00 -0700606#endif
607 if (fail) {
Maxim Ostapenkofe863a62017-04-06 07:42:27 +0000608 // ESRCH means that the given thread is not suspended or already dead.
609 // Therefore it's unsafe to inspect its data (e.g. walk through stack) and
610 // we should notify caller about this.
611 return pterrno == ESRCH ? REGISTERS_UNAVAILABLE_FATAL
612 : REGISTERS_UNAVAILABLE;
Alexander Potapenko9cc2e432013-04-01 13:36:42 +0000613 }
Sergey Matveev37432e82013-05-13 10:35:20 +0000614
Vitaly Buka5813fca2020-09-17 12:15:00 -0700615 *sp = reinterpret_cast<regs_struct *>(buffer->data())[0].REG_SP;
Maxim Ostapenkofe863a62017-04-06 07:42:27 +0000616 return REGISTERS_AVAILABLE;
Alexander Potapenko9cc2e432013-04-01 13:36:42 +0000617}
618
Vedant Kumar552c0112015-09-30 23:52:54 +0000619} // namespace __sanitizer
Alexander Potapenko845b5752013-03-15 14:37:21 +0000620
Bill Schmidt29791622015-12-08 21:54:39 +0000621#endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__)
622 // || defined(__aarch64__) || defined(__powerpc64__)
Maxim Ostapenkode3b9a22017-04-11 14:58:26 +0000623 // || defined(__s390__) || defined(__i386__) || defined(__arm__)