master
  1//===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
  2//
  3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4// See https://llvm.org/LICENSE.txt for license information.
  5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6//
  7//===----------------------------------------------------------------------===//
  8//
  9// Memory quarantine for AddressSanitizer and potentially other tools.
 10// Quarantine caches some specified amount of memory in per-thread caches,
 11// then evicts to global FIFO queue. When the queue reaches specified threshold,
 12// oldest memory is recycled.
 13//
 14//===----------------------------------------------------------------------===//
 15
 16#ifndef SANITIZER_QUARANTINE_H
 17#define SANITIZER_QUARANTINE_H
 18
 19#include "sanitizer_internal_defs.h"
 20#include "sanitizer_mutex.h"
 21#include "sanitizer_list.h"
 22
 23namespace __sanitizer {
 24
 25template<typename Node> class QuarantineCache;
 26
 27struct QuarantineBatch {
 28  static const uptr kSize = 1021;
 29  QuarantineBatch *next;
 30  uptr size;
 31  uptr count;
 32  void *batch[kSize];
 33
 34  void init(void *ptr, uptr size) {
 35    count = 1;
 36    batch[0] = ptr;
 37    this->size = size + sizeof(QuarantineBatch);  // Account for the batch size.
 38  }
 39
 40  // The total size of quarantined nodes recorded in this batch.
 41  uptr quarantined_size() const {
 42    return size - sizeof(QuarantineBatch);
 43  }
 44
 45  void push_back(void *ptr, uptr size) {
 46    CHECK_LT(count, kSize);
 47    batch[count++] = ptr;
 48    this->size += size;
 49  }
 50
 51  bool can_merge(const QuarantineBatch* const from) const {
 52    return count + from->count <= kSize;
 53  }
 54
 55  void merge(QuarantineBatch* const from) {
 56    CHECK_LE(count + from->count, kSize);
 57    CHECK_GE(size, sizeof(QuarantineBatch));
 58
 59    for (uptr i = 0; i < from->count; ++i)
 60      batch[count + i] = from->batch[i];
 61    count += from->count;
 62    size += from->quarantined_size();
 63
 64    from->count = 0;
 65    from->size = sizeof(QuarantineBatch);
 66  }
 67};
 68
 69COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13));  // 8Kb.
 70
 71template<typename Callback, typename Node>
 72class Quarantine {
 73 public:
 74  typedef QuarantineCache<Callback> Cache;
 75
 76  explicit Quarantine(LinkerInitialized)
 77      : cache_(LINKER_INITIALIZED) {
 78  }
 79
 80  void Init(uptr size, uptr cache_size) {
 81    // Thread local quarantine size can be zero only when global quarantine size
 82    // is zero (it allows us to perform just one atomic read per Put() call).
 83    CHECK((size == 0 && cache_size == 0) || cache_size != 0);
 84
 85    atomic_store_relaxed(&max_size_, size);
 86    atomic_store_relaxed(&min_size_, size / 10 * 9);  // 90% of max size.
 87    atomic_store_relaxed(&max_cache_size_, cache_size);
 88
 89    cache_mutex_.Init();
 90    recycle_mutex_.Init();
 91  }
 92
 93  uptr GetMaxSize() const { return atomic_load_relaxed(&max_size_); }
 94  uptr GetMaxCacheSize() const { return atomic_load_relaxed(&max_cache_size_); }
 95
 96  void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
 97    uptr max_cache_size = GetMaxCacheSize();
 98    if (max_cache_size && size <= GetMaxSize()) {
 99      cb.PreQuarantine(ptr);
100      c->Enqueue(cb, ptr, size);
101    } else {
102      // GetMaxCacheSize() == 0 only when GetMaxSize() == 0 (see Init).
103      cb.RecyclePassThrough(ptr);
104    }
105    // Check cache size anyway to accommodate for runtime cache_size change.
106    if (c->Size() > max_cache_size)
107      Drain(c, cb);
108  }
109
110  void NOINLINE Drain(Cache *c, Callback cb) {
111    {
112      SpinMutexLock l(&cache_mutex_);
113      cache_.Transfer(c);
114    }
115    if (cache_.Size() > GetMaxSize() && recycle_mutex_.TryLock())
116      Recycle(atomic_load_relaxed(&min_size_), cb);
117  }
118
119  void NOINLINE DrainAndRecycle(Cache *c, Callback cb) {
120    {
121      SpinMutexLock l(&cache_mutex_);
122      cache_.Transfer(c);
123    }
124    recycle_mutex_.Lock();
125    Recycle(0, cb);
126  }
127
128  void PrintStats() const {
129    // It assumes that the world is stopped, just as the allocator's PrintStats.
130    Printf("Quarantine limits: global: %zdMb; thread local: %zdKb\n",
131           GetMaxSize() >> 20, GetMaxCacheSize() >> 10);
132    cache_.PrintStats();
133  }
134
135 private:
136  // Read-only data.
137  char pad0_[kCacheLineSize];
138  atomic_uintptr_t max_size_;
139  atomic_uintptr_t min_size_;
140  atomic_uintptr_t max_cache_size_;
141  char pad1_[kCacheLineSize];
142  StaticSpinMutex cache_mutex_;
143  StaticSpinMutex recycle_mutex_;
144  Cache cache_;
145  char pad2_[kCacheLineSize];
146
147  void NOINLINE Recycle(uptr min_size, Callback cb)
148      SANITIZER_REQUIRES(recycle_mutex_) SANITIZER_RELEASE(recycle_mutex_) {
149    Cache tmp;
150    {
151      SpinMutexLock l(&cache_mutex_);
152      // Go over the batches and merge partially filled ones to
153      // save some memory, otherwise batches themselves (since the memory used
154      // by them is counted against quarantine limit) can overcome the actual
155      // user's quarantined chunks, which diminishes the purpose of the
156      // quarantine.
157      uptr cache_size = cache_.Size();
158      uptr overhead_size = cache_.OverheadSize();
159      CHECK_GE(cache_size, overhead_size);
160      // Do the merge only when overhead exceeds this predefined limit (might
161      // require some tuning). It saves us merge attempt when the batch list
162      // quarantine is unlikely to contain batches suitable for merge.
163      const uptr kOverheadThresholdPercents = 100;
164      if (cache_size > overhead_size &&
165          overhead_size * (100 + kOverheadThresholdPercents) >
166              cache_size * kOverheadThresholdPercents) {
167        cache_.MergeBatches(&tmp);
168      }
169      // Extract enough chunks from the quarantine to get below the max
170      // quarantine size and leave some leeway for the newly quarantined chunks.
171      while (cache_.Size() > min_size) {
172        tmp.EnqueueBatch(cache_.DequeueBatch());
173      }
174    }
175    recycle_mutex_.Unlock();
176    DoRecycle(&tmp, cb);
177  }
178
179  void NOINLINE DoRecycle(Cache *c, Callback cb) {
180    while (QuarantineBatch *b = c->DequeueBatch()) {
181      const uptr kPrefetch = 16;
182      CHECK(kPrefetch <= ARRAY_SIZE(b->batch));
183      for (uptr i = 0; i < kPrefetch; i++)
184        PREFETCH(b->batch[i]);
185      for (uptr i = 0, count = b->count; i < count; i++) {
186        if (i + kPrefetch < count)
187          PREFETCH(b->batch[i + kPrefetch]);
188        cb.Recycle((Node*)b->batch[i]);
189      }
190      cb.Deallocate(b);
191    }
192  }
193};
194
195// Per-thread cache of memory blocks.
196template<typename Callback>
197class QuarantineCache {
198 public:
199  explicit QuarantineCache(LinkerInitialized) {
200  }
201
202  QuarantineCache()
203      : size_() {
204    list_.clear();
205  }
206
207  // Total memory used, including internal accounting.
208  uptr Size() const {
209    return atomic_load_relaxed(&size_);
210  }
211
212  // Memory used for internal accounting.
213  uptr OverheadSize() const {
214    return list_.size() * sizeof(QuarantineBatch);
215  }
216
217  void Enqueue(Callback cb, void *ptr, uptr size) {
218    if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
219      QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
220      CHECK(b);
221      b->init(ptr, size);
222      EnqueueBatch(b);
223    } else {
224      list_.back()->push_back(ptr, size);
225      SizeAdd(size);
226    }
227  }
228
229  void Transfer(QuarantineCache *from_cache) {
230    list_.append_back(&from_cache->list_);
231    SizeAdd(from_cache->Size());
232
233    atomic_store_relaxed(&from_cache->size_, 0);
234  }
235
236  void EnqueueBatch(QuarantineBatch *b) {
237    list_.push_back(b);
238    SizeAdd(b->size);
239  }
240
241  QuarantineBatch *DequeueBatch() {
242    if (list_.empty())
243      return nullptr;
244    QuarantineBatch *b = list_.front();
245    list_.pop_front();
246    SizeSub(b->size);
247    return b;
248  }
249
250  void MergeBatches(QuarantineCache *to_deallocate) {
251    uptr extracted_size = 0;
252    QuarantineBatch *current = list_.front();
253    while (current && current->next) {
254      if (current->can_merge(current->next)) {
255        QuarantineBatch *extracted = current->next;
256        // Move all the chunks into the current batch.
257        current->merge(extracted);
258        CHECK_EQ(extracted->count, 0);
259        CHECK_EQ(extracted->size, sizeof(QuarantineBatch));
260        // Remove the next batch from the list and account for its size.
261        list_.extract(current, extracted);
262        extracted_size += extracted->size;
263        // Add it to deallocation list.
264        to_deallocate->EnqueueBatch(extracted);
265      } else {
266        current = current->next;
267      }
268    }
269    SizeSub(extracted_size);
270  }
271
272  void PrintStats() const {
273    uptr batch_count = 0;
274    uptr total_overhead_bytes = 0;
275    uptr total_bytes = 0;
276    uptr total_quarantine_chunks = 0;
277    for (List::ConstIterator it = list_.begin(); it != list_.end(); ++it) {
278      batch_count++;
279      total_bytes += (*it).size;
280      total_overhead_bytes += (*it).size - (*it).quarantined_size();
281      total_quarantine_chunks += (*it).count;
282    }
283    uptr quarantine_chunks_capacity = batch_count * QuarantineBatch::kSize;
284    int chunks_usage_percent = quarantine_chunks_capacity == 0 ?
285        0 : total_quarantine_chunks * 100 / quarantine_chunks_capacity;
286    uptr total_quarantined_bytes = total_bytes - total_overhead_bytes;
287    int memory_overhead_percent = total_quarantined_bytes == 0 ?
288        0 : total_overhead_bytes * 100 / total_quarantined_bytes;
289    Printf("Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); "
290           "chunks: %zd (capacity: %zd); %d%% chunks used; %d%% memory overhead"
291           "\n",
292           batch_count, total_bytes, total_quarantined_bytes,
293           total_quarantine_chunks, quarantine_chunks_capacity,
294           chunks_usage_percent, memory_overhead_percent);
295  }
296
297 private:
298  typedef IntrusiveList<QuarantineBatch> List;
299
300  List list_;
301  atomic_uintptr_t size_;
302
303  void SizeAdd(uptr add) {
304    atomic_store_relaxed(&size_, Size() + add);
305  }
306  void SizeSub(uptr sub) {
307    atomic_store_relaxed(&size_, Size() - sub);
308  }
309};
310
311} // namespace __sanitizer
312
313#endif // SANITIZER_QUARANTINE_H