master
  1//===-- sanitizer_stackdepotbase.h ------------------------------*- C++ -*-===//
  2//
  3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4// See https://llvm.org/LICENSE.txt for license information.
  5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6//
  7//===----------------------------------------------------------------------===//
  8//
  9// Implementation of a mapping from arbitrary values to unique 32-bit
 10// identifiers.
 11//===----------------------------------------------------------------------===//
 12
 13#ifndef SANITIZER_STACKDEPOTBASE_H
 14#define SANITIZER_STACKDEPOTBASE_H
 15
 16#include <stdio.h>
 17
 18#include "sanitizer_atomic.h"
 19#include "sanitizer_flat_map.h"
 20#include "sanitizer_internal_defs.h"
 21#include "sanitizer_mutex.h"
 22
 23namespace __sanitizer {
 24
 25template <class Node, int kReservedBits, int kTabSizeLog>
 26class StackDepotBase {
 27  static constexpr u32 kIdSizeLog =
 28      sizeof(u32) * 8 - Max(kReservedBits, 1 /* At least 1 bit for locking. */);
 29  static constexpr u32 kNodesSize1Log = kIdSizeLog / 2;
 30  static constexpr u32 kNodesSize2Log = kIdSizeLog - kNodesSize1Log;
 31  static constexpr int kTabSize = 1 << kTabSizeLog;  // Hash table size.
 32  static constexpr u32 kUnlockMask = (1ull << kIdSizeLog) - 1;
 33  static constexpr u32 kLockMask = ~kUnlockMask;
 34
 35 public:
 36  typedef typename Node::args_type args_type;
 37  typedef typename Node::handle_type handle_type;
 38  typedef typename Node::hash_type hash_type;
 39
 40  static constexpr u64 kNodesSize1 = 1ull << kNodesSize1Log;
 41  static constexpr u64 kNodesSize2 = 1ull << kNodesSize2Log;
 42
 43  // Maps stack trace to an unique id.
 44  u32 Put(args_type args, bool *inserted = nullptr);
 45  // Retrieves a stored stack trace by the id.
 46  args_type Get(u32 id);
 47
 48  StackDepotStats GetStats() const {
 49    return {
 50        atomic_load_relaxed(&n_uniq_ids),
 51        nodes.MemoryUsage() + Node::allocated(),
 52    };
 53  }
 54
 55  void LockBeforeFork();
 56  void UnlockAfterFork(bool fork_child);
 57  void PrintAll();
 58
 59  void TestOnlyUnmap() {
 60    nodes.TestOnlyUnmap();
 61    internal_memset(this, 0, sizeof(*this));
 62  }
 63
 64 private:
 65  friend Node;
 66  u32 find(u32 s, args_type args, hash_type hash) const;
 67  static u32 lock(atomic_uint32_t *p);
 68  static void unlock(atomic_uint32_t *p, u32 s);
 69  atomic_uint32_t tab[kTabSize];  // Hash table of Node's.
 70
 71  atomic_uint32_t n_uniq_ids;
 72
 73  TwoLevelMap<Node, kNodesSize1, kNodesSize2> nodes;
 74
 75  friend class StackDepotReverseMap;
 76};
 77
 78template <class Node, int kReservedBits, int kTabSizeLog>
 79u32 StackDepotBase<Node, kReservedBits, kTabSizeLog>::find(
 80    u32 s, args_type args, hash_type hash) const {
 81  // Searches linked list s for the stack, returns its id.
 82  for (; s;) {
 83    const Node &node = nodes[s];
 84    if (node.eq(hash, args))
 85      return s;
 86    s = node.link;
 87  }
 88  return 0;
 89}
 90
 91template <class Node, int kReservedBits, int kTabSizeLog>
 92u32 StackDepotBase<Node, kReservedBits, kTabSizeLog>::lock(atomic_uint32_t *p) {
 93  // Uses the pointer lsb as mutex.
 94  for (int i = 0;; i++) {
 95    u32 cmp = atomic_load(p, memory_order_relaxed);
 96    if ((cmp & kLockMask) == 0 &&
 97        atomic_compare_exchange_weak(p, &cmp, cmp | kLockMask,
 98                                     memory_order_acquire))
 99      return cmp;
100    if (i < 10)
101      proc_yield(10);
102    else
103      internal_sched_yield();
104  }
105}
106
107template <class Node, int kReservedBits, int kTabSizeLog>
108void StackDepotBase<Node, kReservedBits, kTabSizeLog>::unlock(
109    atomic_uint32_t *p, u32 s) {
110  DCHECK_EQ(s & kLockMask, 0);
111  atomic_store(p, s, memory_order_release);
112}
113
114template <class Node, int kReservedBits, int kTabSizeLog>
115u32 StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
116                                                          bool *inserted) {
117  if (inserted)
118    *inserted = false;
119  if (!LIKELY(Node::is_valid(args)))
120    return 0;
121  hash_type h = Node::hash(args);
122  atomic_uint32_t *p = &tab[h % kTabSize];
123  u32 v = atomic_load(p, memory_order_consume);
124  u32 s = v & kUnlockMask;
125  // First, try to find the existing stack.
126  u32 node = find(s, args, h);
127  if (LIKELY(node))
128    return node;
129
130  // If failed, lock, retry and insert new.
131  u32 s2 = lock(p);
132  if (s2 != s) {
133    node = find(s2, args, h);
134    if (node) {
135      unlock(p, s2);
136      return node;
137    }
138  }
139  s = atomic_fetch_add(&n_uniq_ids, 1, memory_order_relaxed) + 1;
140  CHECK_EQ(s & kUnlockMask, s);
141  CHECK_EQ(s & (((u32)-1) >> kReservedBits), s);
142  Node &new_node = nodes[s];
143  new_node.store(s, args, h);
144  new_node.link = s2;
145  unlock(p, s);
146  if (inserted) *inserted = true;
147  return s;
148}
149
150template <class Node, int kReservedBits, int kTabSizeLog>
151typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::args_type
152StackDepotBase<Node, kReservedBits, kTabSizeLog>::Get(u32 id) {
153  if (id == 0)
154    return args_type();
155  CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
156  if (!nodes.contains(id))
157    return args_type();
158  const Node &node = nodes[id];
159  return node.load(id);
160}
161
162template <class Node, int kReservedBits, int kTabSizeLog>
163void StackDepotBase<Node, kReservedBits, kTabSizeLog>::LockBeforeFork() {
164  // Do not lock hash table. It's very expensive, but it's not rely needed. The
165  // parent process will neither lock nor unlock. Child process risks to be
166  // deadlocked on already locked buckets. To avoid deadlock we will unlock
167  // every locked buckets in `UnlockAfterFork`. This may affect consistency of
168  // the hash table, but the only issue is a few items inserted by parent
169  // process will be not found by child, and the child may insert them again,
170  // wasting some space in `stackStore`.
171
172  // We still need to lock nodes.
173  nodes.Lock();
174}
175
176template <class Node, int kReservedBits, int kTabSizeLog>
177void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAfterFork(
178    bool fork_child) {
179  nodes.Unlock();
180
181  // Only unlock in child process to avoid deadlock. See `LockBeforeFork`.
182  if (!fork_child)
183    return;
184
185  for (int i = 0; i < kTabSize; ++i) {
186    atomic_uint32_t *p = &tab[i];
187    uptr s = atomic_load(p, memory_order_relaxed);
188    if (s & kLockMask)
189      unlock(p, s & kUnlockMask);
190  }
191}
192
193template <class Node, int kReservedBits, int kTabSizeLog>
194void StackDepotBase<Node, kReservedBits, kTabSizeLog>::PrintAll() {
195  for (int i = 0; i < kTabSize; ++i) {
196    atomic_uint32_t *p = &tab[i];
197    u32 s = atomic_load(p, memory_order_consume) & kUnlockMask;
198    for (; s;) {
199      const Node &node = nodes[s];
200      Printf("Stack for id %u:\n", s);
201      node.load(s).Print();
202      s = node.link;
203    }
204  }
205}
206
207} // namespace __sanitizer
208
209#endif // SANITIZER_STACKDEPOTBASE_H