[NFC][sanitizer] Add entry point for compression

Add Compression::Test type which just pretends packing,
but does nothing useful. It's only called from test for now.

Depends on D114493.

Reviewed By: kstoimenov

Differential Revision: https://reviews.llvm.org/D114494
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
index 0f880fd..b1c15d8 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
@@ -46,13 +46,13 @@
   return OffsetToId(idx);
 }
 
-StackTrace StackStore::Load(Id id) const {
+StackTrace StackStore::Load(Id id) {
   if (!id)
     return {};
   uptr idx = IdToOffset(id);
   uptr block_idx = GetBlockIdx(idx);
   CHECK_LT(block_idx, ARRAY_SIZE(blocks_));
-  const uptr *stack_trace = blocks_[block_idx].Get();
+  const uptr *stack_trace = blocks_[block_idx].GetOrUnpack();
   if (!stack_trace)
     return {};
   stack_trace += GetInBlockIdx(idx);
@@ -61,9 +61,11 @@
 }
 
 uptr StackStore::Allocated() const {
-  return RoundUpTo(atomic_load_relaxed(&total_frames_) * sizeof(uptr),
-                   GetPageSizeCached()) +
-         sizeof(*this);
+  uptr next_block = GetBlockIdx(
+      RoundUpTo(atomic_load_relaxed(&total_frames_), kBlockSizeFrames));
+  uptr res = 0;
+  for (uptr i = 0; i < next_block; ++i) res += blocks_[i].Allocated();
+  return res + sizeof(*this);
 }
 
 uptr *StackStore::Alloc(uptr count, uptr *idx, uptr *pack) {
@@ -90,8 +92,10 @@
   }
 }
 
-void StackStore::Pack() {
-  // TODO
+uptr StackStore::Pack(Compression type) {
+  uptr res = 0;
+  for (BlockInfo &b : blocks_) res += b.Pack(type);
+  return res;
 }
 
 void StackStore::TestOnlyUnmap() {
@@ -124,6 +128,60 @@
   return Create();
 }
 
+uptr *StackStore::BlockInfo::GetOrUnpack() {
+  SpinMutexLock l(&mtx_);
+  switch (state) {
+    case State::Storing:
+      state = State::Unpacked;
+      FALLTHROUGH;
+    case State::Unpacked:
+      return Get();
+    case State::Packed:
+      break;
+  }
+
+  uptr *ptr = Get();
+  CHECK_NE(nullptr, ptr);
+  // Fake unpacking.
+  for (uptr i = 0; i < kBlockSizeFrames; ++i) ptr[i] = ~ptr[i];
+  state = State::Unpacked;
+  return Get();
+}
+
+uptr StackStore::BlockInfo::Pack(Compression type) {
+  if (type == Compression::None)
+    return 0;
+
+  SpinMutexLock l(&mtx_);
+  switch (state) {
+    case State::Unpacked:
+    case State::Packed:
+      return 0;
+    case State::Storing:
+      break;
+  }
+
+  uptr *ptr = Get();
+  if (!ptr || !Stored(0))
+    return 0;
+
+  // Fake packing.
+  for (uptr i = 0; i < kBlockSizeFrames; ++i) ptr[i] = ~ptr[i];
+  state = State::Packed;
+  return kBlockSizeBytes - kBlockSizeBytes / 10;
+}
+
+uptr StackStore::BlockInfo::Allocated() const {
+  SpinMutexLock l(&mtx_);
+  switch (state) {
+    case State::Packed:
+      return kBlockSizeBytes / 10;
+    case State::Unpacked:
+    case State::Storing:
+      return kBlockSizeBytes;
+  }
+}
+
 void StackStore::BlockInfo::TestOnlyUnmap() {
   if (uptr *ptr = Get())
     UnmapOrDie(ptr, StackStore::kBlockSizeBytes);
@@ -134,4 +192,9 @@
          kBlockSizeFrames;
 }
 
+bool StackStore::BlockInfo::IsPacked() const {
+  SpinMutexLock l(&mtx_);
+  return state == State::Packed;
+}
+
 }  // namespace __sanitizer
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
index a5b457b0..e0bc4e9 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
@@ -23,6 +23,11 @@
   static constexpr uptr kBlockSizeBytes = kBlockSizeFrames * sizeof(uptr);
 
  public:
+  enum class Compression : u8 {
+    None = 0,
+    Test,
+  };
+
   constexpr StackStore() = default;
 
   using Id = u32;  // Enough for 2^32 * sizeof(uptr) bytes of traces.
@@ -31,10 +36,14 @@
 
   Id Store(const StackTrace &trace,
            uptr *pack /* number of blocks completed by this call */);
-  StackTrace Load(Id id) const;
+  StackTrace Load(Id id);
   uptr Allocated() const;
 
-  void Pack();
+  // Packs all blocks which don't expect any more writes. A block is going to be
+  // packed once. As soon trace from that block was requested, it will unpack
+  // and stay unpacked after that.
+  // Returns the number of released bytes.
+  uptr Pack(Compression type);
 
   void TestOnlyUnmap();
 
@@ -71,16 +80,28 @@
     // Counter to track store progress to know when we can Pack() the block.
     atomic_uint32_t stored_;
     // Protects alloc of new blocks.
-    StaticSpinMutex mtx_;
+    mutable StaticSpinMutex mtx_;
+
+    enum class State : u8 {
+      Storing = 0,
+      Packed,
+      Unpacked,
+    };
+    State state GUARDED_BY(mtx_);
 
     uptr *Create();
 
    public:
     uptr *Get() const;
     uptr *GetOrCreate();
+    uptr *GetOrUnpack();
+    uptr Pack(Compression type);
+    uptr Allocated() const;
     void TestOnlyUnmap();
     bool Stored(uptr n);
+    bool IsPacked() const;
   };
+
   BlockInfo blocks_[kBlockCount] = {};
 };
 
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
index 2579c1b..527221b 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
@@ -76,6 +76,8 @@
   stack_hash = hash;
   uptr pack = 0;
   store_id = stackStore.Store(args, &pack);
+  if (pack)
+    stackStore.Pack(StackStore::Compression::None);
 }
 
 StackDepotNode::args_type StackDepotNode::load(u32 id) const {
diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_stack_store_test.cpp b/compiler-rt/lib/sanitizer_common/tests/sanitizer_stack_store_test.cpp
index 50f0614..ddc7ba0 100644
--- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_stack_store_test.cpp
+++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_stack_store_test.cpp
@@ -55,9 +55,16 @@
     return res;
   }
 
+  uptr CountPackedBlocks() const {
+    uptr res = 0;
+    for (const BlockInfo& b : store_.blocks_) res += b.IsPacked();
+    return res;
+  }
+
   uptr IdToOffset(StackStore::Id id) const { return store_.IdToOffset(id); }
 
   static constexpr uptr kBlockSizeFrames = StackStore::kBlockSizeFrames;
+  static constexpr uptr kBlockSizeBytes = StackStore::kBlockSizeBytes;
 
   StackStore store_ = {};
 };
@@ -121,4 +128,51 @@
   EXPECT_EQ(GetTotalFramesCount() / kBlockSizeFrames, total_ready);
 }
 
+struct StackStorePackTest : public StackStoreTest,
+                            public ::testing::WithParamInterface<
+                                std::pair<StackStore::Compression, uptr>> {};
+
+INSTANTIATE_TEST_SUITE_P(
+    PackUnpacks, StackStorePackTest,
+    ::testing::ValuesIn({
+        StackStorePackTest::ParamType(StackStore::Compression::Test, 4),
+    }));
+
+TEST_P(StackStorePackTest, PackUnpack) {
+  std::vector<StackStore::Id> ids;
+  StackStore::Compression type = GetParam().first;
+  uptr expected_ratio = GetParam().second;
+  ForEachTrace([&](const StackTrace& s) {
+    uptr pack = 0;
+    ids.push_back(store_.Store(s, &pack));
+    if (pack) {
+      uptr before = store_.Allocated();
+      uptr diff = store_.Pack(type);
+      uptr after = store_.Allocated();
+      EXPECT_EQ(before - after, diff);
+      EXPECT_LT(after, before);
+      EXPECT_GE(kBlockSizeBytes / (kBlockSizeBytes - (before - after)),
+                expected_ratio);
+    }
+  });
+  uptr packed_blocks = CountPackedBlocks();
+  // Unpack random block.
+  store_.Load(kBlockSizeFrames * 7 + 123);
+  EXPECT_EQ(packed_blocks - 1, CountPackedBlocks());
+
+  // Unpack all blocks.
+  auto id = ids.begin();
+  ForEachTrace([&](const StackTrace& s) {
+    StackTrace trace = store_.Load(*(id++));
+    EXPECT_EQ(s.size, trace.size);
+    EXPECT_EQ(s.tag, trace.tag);
+    EXPECT_EQ(std::vector<uptr>(s.trace, s.trace + s.size),
+              std::vector<uptr>(trace.trace, trace.trace + trace.size));
+  });
+  EXPECT_EQ(0u, CountPackedBlocks());
+
+  EXPECT_EQ(0u, store_.Pack(type));
+  EXPECT_EQ(0u, CountPackedBlocks());
+}
+
 }  // namespace __sanitizer