[scudo] Fix missing one block in range marking

When a range contains only one block, we may not mark the pages touched
by the block as can-be-released. This happens in the last group and if
it only contains single block.

Also enhance the existing tests and add a new test for testing the last
block.

Differential Revision: https://reviews.llvm.org/D149866

GitOrigin-RevId: 6fb70a8a607ebb5ceb27115f9e61665264e1cddb
diff --git a/release.h b/release.h
index b7a0131..dadf652 100644
--- a/release.h
+++ b/release.h
@@ -480,8 +480,9 @@
     }
 
     uptr LastBlockInRange = roundDownSlow(ToInRegion - 1, BlockSize);
-    if (LastBlockInRange < FromInRegion)
-      return;
+
+    // Note that LastBlockInRange may be smaller than `FromInRegion` at this
+    // point because it may contain only one block in the range.
 
     // When the last block sits across `To`, we can't just mark the pages
     // occupied by the last block as all counted. Instead, we increment the
@@ -540,13 +541,18 @@
       //   last block
       const uptr RoundedRegionSize = roundUp(RegionSize, PageSize);
       const uptr TrailingBlockBase = LastBlockInRegion + BlockSize;
-      // Only the last page touched by the last block needs to mark the trailing
-      // blocks. If the difference between `RoundedRegionSize` and
+      // If the difference between `RoundedRegionSize` and
       // `TrailingBlockBase` is larger than a page, that implies the reported
       // `RegionSize` may not be accurate.
       DCHECK_LT(RoundedRegionSize - TrailingBlockBase, PageSize);
+
+      // Only the last page touched by the last block needs to mark the trailing
+      // blocks. Note that if the last "pretend" block straddles the boundary,
+      // we still have to count it in so that the logic of counting the number
+      // of blocks on a page is consistent.
       uptr NumTrailingBlocks =
-          roundUpSlow(RoundedRegionSize - TrailingBlockBase, BlockSize) /
+          (roundUpSlow(RoundedRegionSize - TrailingBlockBase, BlockSize) +
+           BlockSize - 1) /
           BlockSize;
       if (NumTrailingBlocks > 0) {
         PageMap.incN(RegionIndex, getPageIndex(TrailingBlockBase),
diff --git a/tests/release_test.cpp b/tests/release_test.cpp
index 1fc4284..b6ec9fc 100644
--- a/tests/release_test.cpp
+++ b/tests/release_test.cpp
@@ -315,12 +315,13 @@
     const scudo::uptr RoundedRegionSize = scudo::roundUp(RegionSize, PageSize);
 
     std::vector<scudo::uptr> Pages(RoundedRegionSize / PageSize, 0);
-    for (scudo::uptr Block = 0; Block + BlockSize <= RoundedRegionSize;
-         Block += BlockSize) {
-      for (scudo::uptr page = Block / PageSize;
-           page <= (Block + BlockSize - 1) / PageSize; ++page) {
-        ASSERT_LT(page, Pages.size());
-        ++Pages[page];
+    for (scudo::uptr Block = 0; Block < RoundedRegionSize; Block += BlockSize) {
+      for (scudo::uptr Page = Block / PageSize;
+           Page <= (Block + BlockSize - 1) / PageSize &&
+           Page < RoundedRegionSize / PageSize;
+           ++Page) {
+        ASSERT_LT(Page, Pages.size());
+        ++Pages[Page];
       }
     }
 
@@ -410,8 +411,6 @@
 template <class SizeClassMap> void testReleasePartialRegion() {
   typedef FreeBatch<SizeClassMap> Batch;
   const scudo::uptr PageSize = scudo::getPageSizeCached();
-  const scudo::uptr ReleaseBase = PageSize;
-  const scudo::uptr BasePageOffset = ReleaseBase / PageSize;
 
   for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) {
     // In the following, we want to ensure the region includes at least 2 pages
@@ -419,8 +418,11 @@
     // the last block is tricky, so we always test the case that includes the
     // last block.
     const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I);
+    const scudo::uptr ReleaseBase = scudo::roundUp(BlockSize, PageSize);
+    const scudo::uptr BasePageOffset = ReleaseBase / PageSize;
     const scudo::uptr RegionSize =
-        scudo::roundUpSlow(scudo::roundUp(BlockSize, PageSize) * 2, BlockSize) +
+        scudo::roundUpSlow(scudo::roundUp(BlockSize, PageSize) + ReleaseBase,
+                           BlockSize) +
         BlockSize;
     const scudo::uptr RoundedRegionSize = scudo::roundUp(RegionSize, PageSize);
 
@@ -429,7 +431,7 @@
 
     // Skip the blocks in the first page and add the remaining.
     std::vector<scudo::uptr> Pages(RoundedRegionSize / PageSize, 0);
-    for (scudo::uptr Block = scudo::roundUpSlow(PageSize, BlockSize);
+    for (scudo::uptr Block = scudo::roundUpSlow(ReleaseBase, BlockSize);
          Block + BlockSize <= RoundedRegionSize; Block += BlockSize) {
       for (scudo::uptr Page = Block / PageSize;
            Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
@@ -444,7 +446,7 @@
       ++Pages.back();
 
     Batch *CurrentBatch = nullptr;
-    for (scudo::uptr Block = scudo::roundUpSlow(PageSize, BlockSize);
+    for (scudo::uptr Block = scudo::roundUpSlow(ReleaseBase, BlockSize);
          Block < RegionSize; Block += BlockSize) {
       if (CurrentBatch == nullptr ||
           CurrentBatch->getCount() == Batch::MaxCount) {
@@ -459,7 +461,7 @@
       auto SkipRegion = [](UNUSED scudo::uptr RegionIndex) { return false; };
       ReleasedPagesRecorder Recorder(ReleaseBase);
       releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
-      const scudo::uptr FirstBlock = scudo::roundUpSlow(PageSize, BlockSize);
+      const scudo::uptr FirstBlock = scudo::roundUpSlow(ReleaseBase, BlockSize);
 
       for (scudo::uptr P = 0; P < RoundedRegionSize; P += PageSize) {
         if (P < FirstBlock) {
@@ -563,6 +565,69 @@
   testReleasePartialRegion<scudo::SvelteSizeClassMap>();
 }
 
+template <class SizeClassMap> void testReleaseRangeWithSingleBlock() {
+  const scudo::uptr PageSize = scudo::getPageSizeCached();
+
+  // We want to test if a memory group only contains single block that will be
+  // handled properly. The case is like:
+  //
+  //   From                     To
+  //     +----------------------+
+  //  +------------+------------+
+  //  |            |            |
+  //  +------------+------------+
+  //                            ^
+  //                        RegionSize
+  //
+  // Note that `From` will be page aligned.
+  //
+  // If the second from the last block is aligned at `From`, then we expect all
+  // the pages after `From` will be marked as can-be-released. Otherwise, the
+  // pages only touched by the last blocks will be marked as can-be-released.
+  for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) {
+    const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I);
+    const scudo::uptr From = scudo::roundUp(BlockSize, PageSize);
+    const scudo::uptr To =
+        From % BlockSize == 0
+            ? From + BlockSize
+            : scudo::roundDownSlow(From + BlockSize, BlockSize) + BlockSize;
+    const scudo::uptr RoundedRegionSize = scudo::roundUp(To, PageSize);
+
+    std::vector<scudo::uptr> Pages(RoundedRegionSize / PageSize, 0);
+    for (scudo::uptr Block = (To - BlockSize); Block < RoundedRegionSize;
+         Block += BlockSize) {
+      for (scudo::uptr Page = Block / PageSize;
+           Page <= (Block + BlockSize - 1) / PageSize &&
+           Page < RoundedRegionSize / PageSize;
+           ++Page) {
+        ASSERT_LT(Page, Pages.size());
+        ++Pages[Page];
+      }
+    }
+
+    scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
+                                      /*ReleaseSize=*/To,
+                                      /*ReleaseBase=*/0U);
+    Context.markRangeAsAllCounted(From, To, /*Base=*/0U, /*RegionIndex=*/0,
+                                  /*RegionSize=*/To);
+
+    for (scudo::uptr Page = 0; Page < RoundedRegionSize; Page += PageSize) {
+      if (Context.PageMap.get(/*Region=*/0U, Page / PageSize) !=
+          Pages[Page / PageSize]) {
+        EXPECT_TRUE(
+            Context.PageMap.isAllCounted(/*Region=*/0U, Page / PageSize));
+      }
+    }
+  } // for each size class
+}
+
+TEST(ScudoReleaseTest, RangeReleaseRegionWithSingleBlock) {
+  testReleaseRangeWithSingleBlock<scudo::DefaultSizeClassMap>();
+  testReleaseRangeWithSingleBlock<scudo::AndroidSizeClassMap>();
+  testReleaseRangeWithSingleBlock<scudo::FuchsiaSizeClassMap>();
+  testReleaseRangeWithSingleBlock<scudo::SvelteSizeClassMap>();
+}
+
 TEST(ScudoReleaseTest, BufferPool) {
   constexpr scudo::uptr StaticBufferCount = SCUDO_WORDSIZE - 1;
   constexpr scudo::uptr StaticBufferSize = 512U;