master
  1//===-- sanitizer_flat_map.h ------------------------------------*- C++ -*-===//
  2//
  3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4// See https://llvm.org/LICENSE.txt for license information.
  5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6//
  7//===----------------------------------------------------------------------===//
  8//
  9// Part of the Sanitizer Allocator.
 10//
 11//===----------------------------------------------------------------------===//
 12
 13#ifndef SANITIZER_FLAT_MAP_H
 14#define SANITIZER_FLAT_MAP_H
 15
 16#include "sanitizer_atomic.h"
 17#include "sanitizer_common.h"
 18#include "sanitizer_internal_defs.h"
 19#include "sanitizer_local_address_space_view.h"
 20#include "sanitizer_mutex.h"
 21
 22namespace __sanitizer {
 23
 24// Maps integers in rage [0, kSize) to values.
 25template <typename T, u64 kSize,
 26          typename AddressSpaceViewTy = LocalAddressSpaceView>
 27class FlatMap {
 28 public:
 29  using AddressSpaceView = AddressSpaceViewTy;
 30  void Init() { internal_memset(map_, 0, sizeof(map_)); }
 31
 32  constexpr uptr size() const { return kSize; }
 33
 34  bool contains(uptr idx) const {
 35    CHECK_LT(idx, kSize);
 36    return true;
 37  }
 38
 39  T &operator[](uptr idx) {
 40    DCHECK_LT(idx, kSize);
 41    return map_[idx];
 42  }
 43
 44  const T &operator[](uptr idx) const {
 45    DCHECK_LT(idx, kSize);
 46    return map_[idx];
 47  }
 48
 49 private:
 50  T map_[kSize];
 51};
 52
 53// TwoLevelMap maps integers in range [0, kSize1*kSize2) to values.
 54// It is implemented as a two-dimensional array: array of kSize1 pointers
 55// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
 56// Each value is initially zero and can be set to something else only once.
 57// Setting and getting values from multiple threads is safe w/o extra locking.
 58template <typename T, u64 kSize1, u64 kSize2,
 59          typename AddressSpaceViewTy = LocalAddressSpaceView>
 60class TwoLevelMap {
 61  static_assert(IsPowerOfTwo(kSize2), "Use a power of two for performance.");
 62
 63 public:
 64  using AddressSpaceView = AddressSpaceViewTy;
 65  void Init() {
 66    mu_.Init();
 67    internal_memset(map1_, 0, sizeof(map1_));
 68  }
 69
 70  void TestOnlyUnmap() {
 71    for (uptr i = 0; i < kSize1; i++) {
 72      T *p = Get(i);
 73      if (!p)
 74        continue;
 75      UnmapOrDie(p, kSize2);
 76    }
 77    Init();
 78  }
 79
 80  uptr MemoryUsage() const {
 81    uptr res = 0;
 82    for (uptr i = 0; i < kSize1; i++) {
 83      T *p = Get(i);
 84      if (!p)
 85        continue;
 86      res += MmapSize();
 87    }
 88    return res;
 89  }
 90
 91  constexpr uptr size() const { return kSize1 * kSize2; }
 92  constexpr uptr size1() const { return kSize1; }
 93  constexpr uptr size2() const { return kSize2; }
 94
 95  bool contains(uptr idx) const {
 96    CHECK_LT(idx, kSize1 * kSize2);
 97    return Get(idx / kSize2);
 98  }
 99
100  const T &operator[](uptr idx) const {
101    DCHECK_LT(idx, kSize1 * kSize2);
102    T *map2 = GetOrCreate(idx / kSize2);
103    return *AddressSpaceView::Load(&map2[idx % kSize2]);
104  }
105
106  T &operator[](uptr idx) {
107    DCHECK_LT(idx, kSize1 * kSize2);
108    T *map2 = GetOrCreate(idx / kSize2);
109    return *AddressSpaceView::LoadWritable(&map2[idx % kSize2]);
110  }
111
112  void Lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mu_.Lock(); }
113
114  void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mu_.Unlock(); }
115
116 private:
117  constexpr uptr MmapSize() const {
118    return RoundUpTo(kSize2 * sizeof(T), GetPageSizeCached());
119  }
120
121  T *Get(uptr idx) const {
122    DCHECK_LT(idx, kSize1);
123    return reinterpret_cast<T *>(
124        atomic_load(&map1_[idx], memory_order_acquire));
125  }
126
127  T *GetOrCreate(uptr idx) const {
128    DCHECK_LT(idx, kSize1);
129    // This code needs to use memory_order_acquire/consume, but we use
130    // memory_order_relaxed for performance reasons (matters for arm64). We
131    // expect memory_order_relaxed to be effectively equivalent to
132    // memory_order_consume in this case for all relevant architectures: all
133    // dependent data is reachable only by dereferencing the resulting pointer.
134    // If relaxed load fails to see stored ptr, the code will fall back to
135    // Create() and reload the value again with locked mutex as a memory
136    // barrier.
137    T *res = reinterpret_cast<T *>(atomic_load_relaxed(&map1_[idx]));
138    if (LIKELY(res))
139      return res;
140    return Create(idx);
141  }
142
143  NOINLINE T *Create(uptr idx) const {
144    SpinMutexLock l(&mu_);
145    T *res = Get(idx);
146    if (!res) {
147      res = reinterpret_cast<T *>(MmapOrDie(MmapSize(), "TwoLevelMap"));
148      atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
149                   memory_order_release);
150    }
151    return res;
152  }
153
154  mutable StaticSpinMutex mu_;
155  mutable atomic_uintptr_t map1_[kSize1];
156};
157
158template <u64 kSize, typename AddressSpaceViewTy = LocalAddressSpaceView>
159using FlatByteMap = FlatMap<u8, kSize, AddressSpaceViewTy>;
160
161template <u64 kSize1, u64 kSize2,
162          typename AddressSpaceViewTy = LocalAddressSpaceView>
163using TwoLevelByteMap = TwoLevelMap<u8, kSize1, kSize2, AddressSpaceViewTy>;
164}  // namespace __sanitizer
165
166#endif