[GWP-ASan] Add locking around unwinder for atfork protection.

Unwinders (like libc's backtrace()) can call their own locks (like the
libdl lock). We need to let the unwinder release the locks before
forking. Wrap a new lock around the unwinder for atfork protection.

Reviewed By: eugenis

Differential Revision: https://reviews.llvm.org/D95889

GitOrigin-RevId: 30973f6fe01cc0a9624147466f0c54b91a1b61d7
diff --git a/guarded_pool_allocator.cpp b/guarded_pool_allocator.cpp
index 86304d9..5e3455e 100644
--- a/guarded_pool_allocator.cpp
+++ b/guarded_pool_allocator.cpp
@@ -103,9 +103,15 @@
     installAtFork();
 }
 
-void GuardedPoolAllocator::disable() { PoolMutex.lock(); }
+void GuardedPoolAllocator::disable() {
+  PoolMutex.lock();
+  BacktraceMutex.lock();
+}
 
-void GuardedPoolAllocator::enable() { PoolMutex.unlock(); }
+void GuardedPoolAllocator::enable() {
+  PoolMutex.unlock();
+  BacktraceMutex.unlock();
+}
 
 void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb,
                                    void *Arg) {
@@ -232,7 +238,10 @@
       roundUpTo(Size, PageSize));
 
   Meta->RecordAllocation(UserPtr, Size);
-  Meta->AllocationTrace.RecordBacktrace(Backtrace);
+  {
+    ScopedLock UL(BacktraceMutex);
+    Meta->AllocationTrace.RecordBacktrace(Backtrace);
+  }
 
   return reinterpret_cast<void *>(UserPtr);
 }
@@ -281,6 +290,7 @@
     // otherwise non-reentrant unwinders may deadlock.
     if (!getThreadLocals()->RecursiveGuard) {
       ScopedRecursiveGuard SRG;
+      ScopedLock UL(BacktraceMutex);
       Meta->DeallocationTrace.RecordBacktrace(Backtrace);
     }
   }
diff --git a/guarded_pool_allocator.h b/guarded_pool_allocator.h
index 86521f9..26a4599 100644
--- a/guarded_pool_allocator.h
+++ b/guarded_pool_allocator.h
@@ -196,6 +196,10 @@
 
   // A mutex to protect the guarded slot and metadata pool for this class.
   Mutex PoolMutex;
+  // Some unwinders can grab the libdl lock. In order to provide atfork
+  // protection, we need to ensure that we allow an unwinding thread to release
+  // the libdl lock before forking.
+  Mutex BacktraceMutex;
   // Record the number allocations that we've sampled. We store this amount so
   // that we don't randomly choose to recycle a slot that previously had an
   // allocation before all the slots have been utilised.