[scudo] releaseToOSMaybe can fail if it can't allocate PageMap

PageMap is allocated with MAP_ALLOWNOMEM if there's no static buffer
left. So it can be failed and return nullptr without any assertion
triggered. Instead of crashing in the releaseToOSMaybe in the middle,
just return and let the program handles the page failure.

Reviewed By: cferris

Differential Revision: https://reviews.llvm.org/D151379

GitOrigin-RevId: 11ea40cff5413057d823a4b3ac5ac419b674dc56
diff --git a/primary32.h b/primary32.h
index 726db75..b3d6e53 100644
--- a/primary32.h
+++ b/primary32.h
@@ -871,6 +871,11 @@
                                        RegionIndex, AllocatedGroupSize,
                                        /*MayContainLastBlockInRegion=*/true);
       }
+
+      // We may not be able to do the page release In a rare case that we may
+      // fail on PageMap allocation.
+      if (UNLIKELY(!Context.hasBlockMarked()))
+        return 0;
     }
 
     if (!Context.hasBlockMarked())
diff --git a/primary64.h b/primary64.h
index 3924837..d3a1aea 100644
--- a/primary64.h
+++ b/primary64.h
@@ -1105,6 +1105,10 @@
                                             ReleaseOffset);
     PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
                                ReleaseRangeSize, ReleaseOffset);
+    // We may not be able to do the page release in a rare case that we may
+    // fail on PageMap allocation.
+    if (UNLIKELY(!Context.ensurePageMapAllocated()))
+      return 0;
 
     for (BatchGroup &BG : GroupToRelease) {
       const uptr BatchGroupBase =
diff --git a/release.h b/release.h
index dadf652..9ffc88d 100644
--- a/release.h
+++ b/release.h
@@ -235,7 +235,6 @@
         PackingRatioLog;
     BufferSize = SizePerRegion * sizeof(*Buffer) * Regions;
     Buffer = Buffers.getBuffer(BufferSize);
-    DCHECK_NE(Buffer, nullptr);
   }
 
   bool isAllocated() const { return !!Buffer; }
@@ -423,25 +422,27 @@
     return PageMap.isAllocated();
   }
 
-  void ensurePageMapAllocated() {
+  bool ensurePageMapAllocated() {
     if (PageMap.isAllocated())
-      return;
+      return true;
     PageMap.reset(NumberOfRegions, PagesCount, FullPagesBlockCountMax);
-    DCHECK(PageMap.isAllocated());
+    // TODO: Log some message when we fail on PageMap allocation.
+    return PageMap.isAllocated();
   }
 
   // Mark all the blocks in the given range [From, to). Instead of visiting all
   // the blocks, we will just mark the page as all counted. Note the `From` and
   // `To` has to be page aligned but with one exception, if `To` is equal to the
   // RegionSize, it's not necessary to be aligned with page size.
-  void markRangeAsAllCounted(uptr From, uptr To, uptr Base,
+  bool markRangeAsAllCounted(uptr From, uptr To, uptr Base,
                              const uptr RegionIndex, const uptr RegionSize) {
     DCHECK_LT(From, To);
     DCHECK_LE(To, Base + RegionSize);
     DCHECK_EQ(From % PageSize, 0U);
     DCHECK_LE(To - From, RegionSize);
 
-    ensurePageMapAllocated();
+    if (!ensurePageMapAllocated())
+      return false;
 
     uptr FromInRegion = From - Base;
     uptr ToInRegion = To - Base;
@@ -449,7 +450,7 @@
 
     // The straddling block sits across entire range.
     if (FirstBlockInRange >= ToInRegion)
-      return;
+      return true;
 
     // First block may not sit at the first pape in the range, move
     // `FromInRegion` to the first block page.
@@ -516,14 +517,17 @@
       PageMap.setAsAllCountedRange(RegionIndex, getPageIndex(FromInRegion),
                                    getPageIndex(ToInRegion - 1));
     }
+
+    return true;
   }
 
   template <class TransferBatchT, typename DecompactPtrT>
-  void markFreeBlocksInRegion(const IntrusiveList<TransferBatchT> &FreeList,
+  bool markFreeBlocksInRegion(const IntrusiveList<TransferBatchT> &FreeList,
                               DecompactPtrT DecompactPtr, const uptr Base,
                               const uptr RegionIndex, const uptr RegionSize,
                               bool MayContainLastBlockInRegion) {
-    ensurePageMapAllocated();
+    if (!ensurePageMapAllocated())
+      return false;
 
     if (MayContainLastBlockInRegion) {
       const uptr LastBlockInRegion =
@@ -582,6 +586,8 @@
         }
       }
     }
+
+    return true;
   }
 
   uptr getPageIndex(uptr P) { return (P >> PageSizeLog) - ReleasePageOffset; }
diff --git a/tests/release_test.cpp b/tests/release_test.cpp
index b6ec9fc..41f0b16 100644
--- a/tests/release_test.cpp
+++ b/tests/release_test.cpp
@@ -22,13 +22,16 @@
   for (scudo::uptr I = 0; I < SCUDO_WORDSIZE; I++) {
     // Various valid counter's max values packed into one word.
     scudo::RegionPageMap PageMap2N(1U, 1U, 1UL << I);
+    ASSERT_TRUE(PageMap2N.isAllocated());
     EXPECT_EQ(sizeof(scudo::uptr), PageMap2N.getBufferSize());
     // Check the "all bit set" values too.
     scudo::RegionPageMap PageMap2N1_1(1U, 1U, ~0UL >> I);
+    ASSERT_TRUE(PageMap2N1_1.isAllocated());
     EXPECT_EQ(sizeof(scudo::uptr), PageMap2N1_1.getBufferSize());
     // Verify the packing ratio, the counter is Expected to be packed into the
     // closest power of 2 bits.
     scudo::RegionPageMap PageMap(1U, SCUDO_WORDSIZE, 1UL << I);
+    ASSERT_TRUE(PageMap.isAllocated());
     EXPECT_EQ(sizeof(scudo::uptr) * scudo::roundUpPowerOfTwo(I + 1),
               PageMap.getBufferSize());
   }
@@ -40,6 +43,7 @@
         (scudo::getPageSizeCached() / 8) * (SCUDO_WORDSIZE >> I);
     scudo::RegionPageMap PageMap(1U, NumCounters,
                                        1UL << ((1UL << I) - 1));
+    ASSERT_TRUE(PageMap.isAllocated());
     PageMap.inc(0U, 0U);
     for (scudo::uptr C = 1; C < NumCounters - 1; C++) {
       EXPECT_EQ(0UL, PageMap.get(0U, C));