master
   1//===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
   2//
   3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
   4// See https://llvm.org/LICENSE.txt for license information.
   5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
   6//
   7//===----------------------------------------------------------------------===//
   8//
   9// This file is shared between run-time libraries of sanitizers.
  10//
  11// It declares common functions and classes that are used in both runtimes.
  12// Implementation of some functions are provided in sanitizer_common, while
  13// others must be defined by run-time library itself.
  14//===----------------------------------------------------------------------===//
  15#ifndef SANITIZER_COMMON_H
  16#define SANITIZER_COMMON_H
  17
  18#include "sanitizer_flags.h"
  19#include "sanitizer_internal_defs.h"
  20#include "sanitizer_libc.h"
  21#include "sanitizer_list.h"
  22#include "sanitizer_mutex.h"
  23
  24#if defined(_MSC_VER) && !defined(__clang__)
  25extern "C" void _ReadWriteBarrier();
  26#pragma intrinsic(_ReadWriteBarrier)
  27#endif
  28
  29namespace __sanitizer {
  30
  31struct AddressInfo;
  32struct BufferedStackTrace;
  33struct SignalContext;
  34struct StackTrace;
  35struct SymbolizedStack;
  36
  37// Constants.
  38const uptr kWordSize = SANITIZER_WORDSIZE / 8;
  39const uptr kWordSizeInBits = 8 * kWordSize;
  40
  41const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE;
  42
  43const uptr kMaxPathLength = 4096;
  44
  45const uptr kMaxThreadStackSize = 1 << 30;  // 1Gb
  46
  47const uptr kErrorMessageBufferSize = 1 << 16;
  48
  49// Denotes fake PC values that come from JIT/JAVA/etc.
  50// For such PC values __tsan_symbolize_external_ex() will be called.
  51const u64 kExternalPCBit = 1ULL << 60;
  52
  53extern const char *SanitizerToolName;  // Can be changed by the tool.
  54
  55extern atomic_uint32_t current_verbosity;
  56inline void SetVerbosity(int verbosity) {
  57  atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
  58}
  59inline int Verbosity() {
  60  return atomic_load(&current_verbosity, memory_order_relaxed);
  61}
  62
  63#if SANITIZER_ANDROID && !defined(__aarch64__)
  64// 32-bit Android only has 4k pages.
  65inline uptr GetPageSize() { return 4096; }
  66inline uptr GetPageSizeCached() { return 4096; }
  67#else
  68uptr GetPageSize();
  69extern uptr PageSizeCached;
  70inline uptr GetPageSizeCached() {
  71  if (!PageSizeCached)
  72    PageSizeCached = GetPageSize();
  73  return PageSizeCached;
  74}
  75#endif
  76
  77uptr GetMmapGranularity();
  78uptr GetMaxVirtualAddress();
  79uptr GetMaxUserVirtualAddress();
  80// Threads
  81tid_t GetTid();
  82int TgKill(pid_t pid, tid_t tid, int sig);
  83uptr GetThreadSelf();
  84void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
  85                                uptr *stack_bottom);
  86void GetThreadStackAndTls(bool main, uptr *stk_begin, uptr *stk_end,
  87                          uptr *tls_begin, uptr *tls_end);
  88
  89// Memory management
  90void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
  91
  92inline void *MmapOrDieQuietly(uptr size, const char *mem_type) {
  93  return MmapOrDie(size, mem_type, /*raw_report*/ true);
  94}
  95void UnmapOrDie(void *addr, uptr size, bool raw_report = false);
  96// Behaves just like MmapOrDie, but tolerates out of memory condition, in that
  97// case returns nullptr.
  98void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
  99bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)
 100     WARN_UNUSED_RESULT;
 101bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size,
 102                             const char *name = nullptr) WARN_UNUSED_RESULT;
 103void *MmapNoReserveOrDie(uptr size, const char *mem_type);
 104void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
 105// Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
 106// that case returns nullptr.
 107void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size,
 108                                 const char *name = nullptr);
 109void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
 110void *MmapNoAccess(uptr size);
 111// Map aligned chunk of address space; size and alignment are powers of two.
 112// Dies on all but out of memory errors, in the latter case returns nullptr.
 113void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
 114                                   const char *mem_type);
 115// Disallow access to a memory range.  Use MmapFixedNoAccess to allocate an
 116// unaccessible memory.
 117bool MprotectNoAccess(uptr addr, uptr size);
 118bool MprotectReadOnly(uptr addr, uptr size);
 119bool MprotectReadWrite(uptr addr, uptr size);
 120
 121void MprotectMallocZones(void *addr, int prot);
 122
 123#if SANITIZER_WINDOWS
 124// Zero previously mmap'd memory. Currently used only on Windows.
 125bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) WARN_UNUSED_RESULT;
 126#endif
 127
 128#if SANITIZER_LINUX
 129// Unmap memory. Currently only used on Linux.
 130void UnmapFromTo(uptr from, uptr to);
 131#endif
 132
 133// Maps shadow_size_bytes of shadow memory and returns shadow address. It will
 134// be aligned to the mmap granularity * 2^shadow_scale, or to
 135// 2^min_shadow_base_alignment if that is larger. The returned address will
 136// have max(2^min_shadow_base_alignment, mmap granularity) on the left, and
 137// shadow_size_bytes bytes on the right, which on linux is mapped no access.
 138// The high_mem_end may be updated if the original shadow size doesn't fit.
 139uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
 140                      uptr min_shadow_base_alignment, uptr &high_mem_end,
 141                      uptr granularity);
 142
 143// Let S = max(shadow_size, num_aliases * alias_size, ring_buffer_size).
 144// Reserves 2*S bytes of address space to the right of the returned address and
 145// ring_buffer_size bytes to the left.  The returned address is aligned to 2*S.
 146// Also creates num_aliases regions of accessible memory starting at offset S
 147// from the returned address.  Each region has size alias_size and is backed by
 148// the same physical memory.
 149uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
 150                                uptr num_aliases, uptr ring_buffer_size);
 151
 152// Reserve memory range [beg, end]. If madvise_shadow is true then apply
 153// madvise (e.g. hugepages, core dumping) requested by options.
 154void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
 155                              bool madvise_shadow = true);
 156
 157// Protect size bytes of memory starting at addr. Also try to protect
 158// several pages at the start of the address space as specified by
 159// zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start.
 160void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
 161                uptr zero_base_max_shadow_start);
 162
 163// Find an available address space.
 164uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
 165                              uptr *largest_gap_found, uptr *max_occupied_addr);
 166
 167// Used to check if we can map shadow memory to a fixed location.
 168bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
 169// Releases memory pages entirely within the [beg, end) address range. Noop if
 170// the provided range does not contain at least one entire page.
 171void ReleaseMemoryPagesToOS(uptr beg, uptr end);
 172void IncreaseTotalMmap(uptr size);
 173void DecreaseTotalMmap(uptr size);
 174uptr GetRSS();
 175void SetShadowRegionHugePageMode(uptr addr, uptr length);
 176bool DontDumpShadowMemory(uptr addr, uptr length);
 177// Check if the built VMA size matches the runtime one.
 178void CheckVMASize();
 179void RunMallocHooks(void *ptr, uptr size);
 180int RunFreeHooks(void *ptr);
 181
 182class ReservedAddressRange {
 183 public:
 184  uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
 185  uptr InitAligned(uptr size, uptr align, const char *name = nullptr);
 186  uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr);
 187  uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
 188  void Unmap(uptr addr, uptr size);
 189  void *base() const { return base_; }
 190  uptr size() const { return size_; }
 191
 192 private:
 193  void* base_;
 194  uptr size_;
 195  const char* name_;
 196  uptr os_handle_;
 197};
 198
 199typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
 200                               /*out*/ uptr *stats);
 201
 202// Parse the contents of /proc/self/smaps and generate a memory profile.
 203// |cb| is a tool-specific callback that fills the |stats| array.
 204void GetMemoryProfile(fill_profile_f cb, uptr *stats);
 205void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
 206                            uptr smaps_len);
 207
 208// Simple low-level (mmap-based) allocator for internal use. Doesn't have
 209// constructor, so all instances of LowLevelAllocator should be
 210// linker initialized.
 211//
 212// NOTE: Users should instead use the singleton provided via
 213// `GetGlobalLowLevelAllocator()` rather than create a new one. This way, the
 214// number of mmap fragments can be reduced and use the same contiguous mmap
 215// provided by this singleton.
 216class LowLevelAllocator {
 217 public:
 218  // Requires an external lock.
 219  void *Allocate(uptr size);
 220
 221 private:
 222  char *allocated_end_;
 223  char *allocated_current_;
 224};
 225// Set the min alignment of LowLevelAllocator to at least alignment.
 226void SetLowLevelAllocateMinAlignment(uptr alignment);
 227typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
 228// Allows to register tool-specific callbacks for LowLevelAllocator.
 229// Passing NULL removes the callback.
 230void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
 231
 232LowLevelAllocator &GetGlobalLowLevelAllocator();
 233
 234// IO
 235void CatastrophicErrorWrite(const char *buffer, uptr length);
 236void RawWrite(const char *buffer);
 237bool ColorizeReports();
 238void RemoveANSIEscapeSequencesFromString(char *buffer);
 239void Printf(const char *format, ...) FORMAT(1, 2);
 240void Report(const char *format, ...) FORMAT(1, 2);
 241void SetPrintfAndReportCallback(void (*callback)(const char *));
 242#define VReport(level, ...)                     \
 243  do {                                          \
 244    if (UNLIKELY((uptr)Verbosity() >= (level))) \
 245      Report(__VA_ARGS__);                      \
 246  } while (0)
 247#define VPrintf(level, ...)                     \
 248  do {                                          \
 249    if (UNLIKELY((uptr)Verbosity() >= (level))) \
 250      Printf(__VA_ARGS__);                      \
 251  } while (0)
 252
 253// Lock sanitizer error reporting and protects against nested errors.
 254class ScopedErrorReportLock {
 255 public:
 256  ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); }
 257  ~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); }
 258
 259  static void Lock() SANITIZER_ACQUIRE(mutex_);
 260  static void Unlock() SANITIZER_RELEASE(mutex_);
 261  static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_);
 262
 263 private:
 264  static atomic_uintptr_t reporting_thread_;
 265  static StaticSpinMutex mutex_;
 266};
 267
 268extern uptr stoptheworld_tracer_pid;
 269extern uptr stoptheworld_tracer_ppid;
 270
 271// Returns true if the entire range can be read.
 272bool IsAccessibleMemoryRange(uptr beg, uptr size);
 273// Attempts to copy `n` bytes from memory range starting at `src` to `dest`.
 274// Returns true if the entire range can be read. Returns `false` if any part of
 275// the source range cannot be read, in which case the contents of `dest` are
 276// undefined.
 277bool TryMemCpy(void *dest, const void *src, uptr n);
 278// Copies accessible memory, and zero fill inaccessible.
 279void MemCpyAccessible(void *dest, const void *src, uptr n);
 280
 281// Error report formatting.
 282const char *StripPathPrefix(const char *filepath,
 283                            const char *strip_file_prefix);
 284// Strip the directories from the module name.
 285const char *StripModuleName(const char *module);
 286
 287// OS
 288uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
 289uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
 290uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len);
 291uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
 292const char *GetProcessName();
 293void UpdateProcessName();
 294void CacheBinaryName();
 295void DisableCoreDumperIfNecessary();
 296void DumpProcessMap();
 297const char *GetEnv(const char *name);
 298bool SetEnv(const char *name, const char *value);
 299
 300u32 GetUid();
 301void ReExec();
 302void CheckASLR();
 303void CheckMPROTECT();
 304char **GetArgv();
 305char **GetEnviron();
 306void PrintCmdline();
 307bool StackSizeIsUnlimited();
 308void SetStackSizeLimitInBytes(uptr limit);
 309bool AddressSpaceIsUnlimited();
 310void SetAddressSpaceUnlimited();
 311void AdjustStackSize(void *attr);
 312void PlatformPrepareForSandboxing(void *args);
 313void SetSandboxingCallback(void (*f)());
 314
 315void InitializeCoverage(bool enabled, const char *coverage_dir);
 316
 317void InitTlsSize();
 318uptr GetTlsSize();
 319
 320// Other
 321void WaitForDebugger(unsigned seconds, const char *label);
 322void SleepForSeconds(unsigned seconds);
 323void SleepForMillis(unsigned millis);
 324u64 NanoTime();
 325u64 MonotonicNanoTime();
 326int Atexit(void (*function)(void));
 327bool TemplateMatch(const char *templ, const char *str);
 328
 329// Exit
 330void NORETURN Abort();
 331void NORETURN Die();
 332void NORETURN
 333CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
 334void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
 335                                      const char *mmap_type, error_t err,
 336                                      bool raw_report = false);
 337void NORETURN ReportMunmapFailureAndDie(void *ptr, uptr size, error_t err,
 338                                        bool raw_report = false);
 339
 340// Returns true if the platform-specific error reported is an OOM error.
 341bool ErrorIsOOM(error_t err);
 342
 343// This reports an error in the form:
 344//
 345//   `ERROR: {{SanitizerToolName}}: out of memory: {{err_msg}}`
 346//
 347// Downstream tools that read sanitizer output will know that errors starting
 348// in this format are specifically OOM errors.
 349#define ERROR_OOM(err_msg, ...) \
 350  Report("ERROR: %s: out of memory: " err_msg, SanitizerToolName, __VA_ARGS__)
 351
 352// Specific tools may override behavior of "Die" function to do tool-specific
 353// job.
 354typedef void (*DieCallbackType)(void);
 355
 356// It's possible to add several callbacks that would be run when "Die" is
 357// called. The callbacks will be run in the opposite order. The tools are
 358// strongly recommended to setup all callbacks during initialization, when there
 359// is only a single thread.
 360bool AddDieCallback(DieCallbackType callback);
 361bool RemoveDieCallback(DieCallbackType callback);
 362
 363void SetUserDieCallback(DieCallbackType callback);
 364
 365void SetCheckUnwindCallback(void (*callback)());
 366
 367// Functions related to signal handling.
 368typedef void (*SignalHandlerType)(int, void *, void *);
 369HandleSignalMode GetHandleSignalMode(int signum);
 370void InstallDeadlySignalHandlers(SignalHandlerType handler);
 371
 372// Signal reporting.
 373// Each sanitizer uses slightly different implementation of stack unwinding.
 374typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,
 375                                              const void *callback_context,
 376                                              BufferedStackTrace *stack);
 377// Print deadly signal report and die.
 378void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
 379                        UnwindSignalStackCallbackType unwind,
 380                        const void *unwind_context);
 381
 382// Part of HandleDeadlySignal, exposed for asan.
 383void StartReportDeadlySignal();
 384// Part of HandleDeadlySignal, exposed for asan.
 385void ReportDeadlySignal(const SignalContext &sig, u32 tid,
 386                        UnwindSignalStackCallbackType unwind,
 387                        const void *unwind_context);
 388
 389// Alternative signal stack (POSIX-only).
 390void SetAlternateSignalStack();
 391void UnsetAlternateSignalStack();
 392
 393// Construct a one-line string:
 394//   SUMMARY: SanitizerToolName: error_message
 395// and pass it to __sanitizer_report_error_summary.
 396// If alt_tool_name is provided, it's used in place of SanitizerToolName.
 397void ReportErrorSummary(const char *error_message,
 398                        const char *alt_tool_name = nullptr);
 399// Same as above, but construct error_message as:
 400//   error_type file:line[:column][ function]
 401void ReportErrorSummary(const char *error_type, const AddressInfo &info,
 402                        const char *alt_tool_name = nullptr);
 403// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
 404void ReportErrorSummary(const char *error_type, const StackTrace *trace,
 405                        const char *alt_tool_name = nullptr);
 406// Skips frames which we consider internal and not usefull to the users.
 407const SymbolizedStack *SkipInternalFrames(const SymbolizedStack *frames);
 408
 409void ReportMmapWriteExec(int prot, int mflags);
 410
 411// Math
 412#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
 413extern "C" {
 414unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
 415unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
 416#if defined(_WIN64)
 417unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);
 418unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);
 419#endif
 420}
 421#endif
 422
 423inline uptr MostSignificantSetBitIndex(uptr x) {
 424  CHECK_NE(x, 0U);
 425  unsigned long up;
 426#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
 427# ifdef _WIN64
 428  up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
 429# else
 430  up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
 431# endif
 432#elif defined(_WIN64)
 433  _BitScanReverse64(&up, x);
 434#else
 435  _BitScanReverse(&up, x);
 436#endif
 437  return up;
 438}
 439
 440inline uptr LeastSignificantSetBitIndex(uptr x) {
 441  CHECK_NE(x, 0U);
 442  unsigned long up;
 443#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
 444# ifdef _WIN64
 445  up = __builtin_ctzll(x);
 446# else
 447  up = __builtin_ctzl(x);
 448# endif
 449#elif defined(_WIN64)
 450  _BitScanForward64(&up, x);
 451#else
 452  _BitScanForward(&up, x);
 453#endif
 454  return up;
 455}
 456
 457inline constexpr bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; }
 458
 459inline uptr RoundUpToPowerOfTwo(uptr size) {
 460  CHECK(size);
 461  if (IsPowerOfTwo(size)) return size;
 462
 463  uptr up = MostSignificantSetBitIndex(size);
 464  CHECK_LT(size, (1ULL << (up + 1)));
 465  CHECK_GT(size, (1ULL << up));
 466  return 1ULL << (up + 1);
 467}
 468
 469inline constexpr uptr RoundUpTo(uptr size, uptr boundary) {
 470  RAW_CHECK(IsPowerOfTwo(boundary));
 471  return (size + boundary - 1) & ~(boundary - 1);
 472}
 473
 474inline constexpr uptr RoundDownTo(uptr x, uptr boundary) {
 475  return x & ~(boundary - 1);
 476}
 477
 478inline constexpr bool IsAligned(uptr a, uptr alignment) {
 479  return (a & (alignment - 1)) == 0;
 480}
 481
 482inline uptr Log2(uptr x) {
 483  CHECK(IsPowerOfTwo(x));
 484  return LeastSignificantSetBitIndex(x);
 485}
 486
 487// Don't use std::min, std::max or std::swap, to minimize dependency
 488// on libstdc++.
 489template <class T>
 490constexpr T Min(T a, T b) {
 491  return a < b ? a : b;
 492}
 493template <class T>
 494constexpr T Max(T a, T b) {
 495  return a > b ? a : b;
 496}
 497template <class T>
 498constexpr T Abs(T a) {
 499  return a < 0 ? -a : a;
 500}
 501template<class T> void Swap(T& a, T& b) {
 502  T tmp = a;
 503  a = b;
 504  b = tmp;
 505}
 506
 507// Char handling
 508inline bool IsSpace(int c) {
 509  return (c == ' ') || (c == '\n') || (c == '\t') ||
 510         (c == '\f') || (c == '\r') || (c == '\v');
 511}
 512inline bool IsDigit(int c) {
 513  return (c >= '0') && (c <= '9');
 514}
 515inline int ToLower(int c) {
 516  return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
 517}
 518
 519// A low-level vector based on mmap. May incur a significant memory overhead for
 520// small vectors.
 521// WARNING: The current implementation supports only POD types.
 522template <typename T, bool raw_report = false>
 523class InternalMmapVectorNoCtor {
 524 public:
 525  using value_type = T;
 526  void Initialize(uptr initial_capacity) {
 527    capacity_bytes_ = 0;
 528    size_ = 0;
 529    data_ = 0;
 530    reserve(initial_capacity);
 531  }
 532  void Destroy() { UnmapOrDie(data_, capacity_bytes_, raw_report); }
 533  T &operator[](uptr i) {
 534    CHECK_LT(i, size_);
 535    return data_[i];
 536  }
 537  const T &operator[](uptr i) const {
 538    CHECK_LT(i, size_);
 539    return data_[i];
 540  }
 541  void push_back(const T &element) {
 542    if (UNLIKELY(size_ >= capacity())) {
 543      CHECK_EQ(size_, capacity());
 544      uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
 545      Realloc(new_capacity);
 546    }
 547    internal_memcpy(&data_[size_++], &element, sizeof(T));
 548  }
 549  T &back() {
 550    CHECK_GT(size_, 0);
 551    return data_[size_ - 1];
 552  }
 553  void pop_back() {
 554    CHECK_GT(size_, 0);
 555    size_--;
 556  }
 557  uptr size() const {
 558    return size_;
 559  }
 560  const T *data() const {
 561    return data_;
 562  }
 563  T *data() {
 564    return data_;
 565  }
 566  uptr capacity() const { return capacity_bytes_ / sizeof(T); }
 567  void reserve(uptr new_size) {
 568    // Never downsize internal buffer.
 569    if (new_size > capacity())
 570      Realloc(new_size);
 571  }
 572  void resize(uptr new_size) {
 573    if (new_size > size_) {
 574      reserve(new_size);
 575      internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
 576    }
 577    size_ = new_size;
 578  }
 579
 580  void clear() { size_ = 0; }
 581  bool empty() const { return size() == 0; }
 582
 583  const T *begin() const {
 584    return data();
 585  }
 586  T *begin() {
 587    return data();
 588  }
 589  const T *end() const {
 590    return data() + size();
 591  }
 592  T *end() {
 593    return data() + size();
 594  }
 595
 596  void swap(InternalMmapVectorNoCtor &other) {
 597    Swap(data_, other.data_);
 598    Swap(capacity_bytes_, other.capacity_bytes_);
 599    Swap(size_, other.size_);
 600  }
 601
 602 private:
 603  NOINLINE void Realloc(uptr new_capacity) {
 604    CHECK_GT(new_capacity, 0);
 605    CHECK_LE(size_, new_capacity);
 606    uptr new_capacity_bytes =
 607        RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached());
 608    T *new_data =
 609        (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector", raw_report);
 610    internal_memcpy(new_data, data_, size_ * sizeof(T));
 611    UnmapOrDie(data_, capacity_bytes_, raw_report);
 612    data_ = new_data;
 613    capacity_bytes_ = new_capacity_bytes;
 614  }
 615
 616  T *data_;
 617  uptr capacity_bytes_;
 618  uptr size_;
 619};
 620
 621template <typename T>
 622bool operator==(const InternalMmapVectorNoCtor<T> &lhs,
 623                const InternalMmapVectorNoCtor<T> &rhs) {
 624  if (lhs.size() != rhs.size()) return false;
 625  return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0;
 626}
 627
 628template <typename T>
 629bool operator!=(const InternalMmapVectorNoCtor<T> &lhs,
 630                const InternalMmapVectorNoCtor<T> &rhs) {
 631  return !(lhs == rhs);
 632}
 633
 634template<typename T>
 635class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
 636 public:
 637  InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); }
 638  explicit InternalMmapVector(uptr cnt) {
 639    InternalMmapVectorNoCtor<T>::Initialize(cnt);
 640    this->resize(cnt);
 641  }
 642  ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
 643  // Disallow copies and moves.
 644  InternalMmapVector(const InternalMmapVector &) = delete;
 645  InternalMmapVector &operator=(const InternalMmapVector &) = delete;
 646  InternalMmapVector(InternalMmapVector &&) = delete;
 647  InternalMmapVector &operator=(InternalMmapVector &&) = delete;
 648};
 649
 650class InternalScopedString {
 651 public:
 652  InternalScopedString() : buffer_(1) { buffer_[0] = '\0'; }
 653
 654  uptr length() const { return buffer_.size() - 1; }
 655  void clear() {
 656    buffer_.resize(1);
 657    buffer_[0] = '\0';
 658  }
 659  void Append(const char *str);
 660  void AppendF(const char *format, ...) FORMAT(2, 3);
 661  const char *data() const { return buffer_.data(); }
 662  char *data() { return buffer_.data(); }
 663
 664 private:
 665  InternalMmapVector<char> buffer_;
 666};
 667
 668template <class T>
 669struct CompareLess {
 670  bool operator()(const T &a, const T &b) const { return a < b; }
 671};
 672
 673// HeapSort for arrays and InternalMmapVector.
 674template <class T, class Compare = CompareLess<T>>
 675void Sort(T *v, uptr size, Compare comp = {}) {
 676  if (size < 2)
 677    return;
 678  // Stage 1: insert elements to the heap.
 679  for (uptr i = 1; i < size; i++) {
 680    uptr j, p;
 681    for (j = i; j > 0; j = p) {
 682      p = (j - 1) / 2;
 683      if (comp(v[p], v[j]))
 684        Swap(v[j], v[p]);
 685      else
 686        break;
 687    }
 688  }
 689  // Stage 2: swap largest element with the last one,
 690  // and sink the new top.
 691  for (uptr i = size - 1; i > 0; i--) {
 692    Swap(v[0], v[i]);
 693    uptr j, max_ind;
 694    for (j = 0; j < i; j = max_ind) {
 695      uptr left = 2 * j + 1;
 696      uptr right = 2 * j + 2;
 697      max_ind = j;
 698      if (left < i && comp(v[max_ind], v[left]))
 699        max_ind = left;
 700      if (right < i && comp(v[max_ind], v[right]))
 701        max_ind = right;
 702      if (max_ind != j)
 703        Swap(v[j], v[max_ind]);
 704      else
 705        break;
 706    }
 707  }
 708}
 709
 710// Works like std::lower_bound: finds the first element that is not less
 711// than the val.
 712template <class Container, class T,
 713          class Compare = CompareLess<typename Container::value_type>>
 714uptr InternalLowerBound(const Container &v, const T &val, Compare comp = {}) {
 715  uptr first = 0;
 716  uptr last = v.size();
 717  while (last > first) {
 718    uptr mid = (first + last) / 2;
 719    if (comp(v[mid], val))
 720      first = mid + 1;
 721    else
 722      last = mid;
 723  }
 724  return first;
 725}
 726
 727enum ModuleArch {
 728  kModuleArchUnknown,
 729  kModuleArchI386,
 730  kModuleArchX86_64,
 731  kModuleArchX86_64H,
 732  kModuleArchARMV6,
 733  kModuleArchARMV7,
 734  kModuleArchARMV7S,
 735  kModuleArchARMV7K,
 736  kModuleArchARM64,
 737  kModuleArchLoongArch64,
 738  kModuleArchRISCV64,
 739  kModuleArchHexagon
 740};
 741
 742// Sorts and removes duplicates from the container.
 743template <class Container,
 744          class Compare = CompareLess<typename Container::value_type>>
 745void SortAndDedup(Container &v, Compare comp = {}) {
 746  Sort(v.data(), v.size(), comp);
 747  uptr size = v.size();
 748  if (size < 2)
 749    return;
 750  uptr last = 0;
 751  for (uptr i = 1; i < size; ++i) {
 752    if (comp(v[last], v[i])) {
 753      ++last;
 754      if (last != i)
 755        v[last] = v[i];
 756    } else {
 757      CHECK(!comp(v[i], v[last]));
 758    }
 759  }
 760  v.resize(last + 1);
 761}
 762
 763constexpr uptr kDefaultFileMaxSize = FIRST_32_SECOND_64(1 << 26, 1 << 28);
 764
 765// Opens the file 'file_name" and reads up to 'max_len' bytes.
 766// The resulting buffer is mmaped and stored in '*buff'.
 767// Returns true if file was successfully opened and read.
 768bool ReadFileToVector(const char *file_name,
 769                      InternalMmapVectorNoCtor<char> *buff,
 770                      uptr max_len = kDefaultFileMaxSize,
 771                      error_t *errno_p = nullptr);
 772
 773// Opens the file 'file_name" and reads up to 'max_len' bytes.
 774// This function is less I/O efficient than ReadFileToVector as it may reread
 775// file multiple times to avoid mmap during read attempts. It's used to read
 776// procmap, so short reads with mmap in between can produce inconsistent result.
 777// The resulting buffer is mmaped and stored in '*buff'.
 778// The size of the mmaped region is stored in '*buff_size'.
 779// The total number of read bytes is stored in '*read_len'.
 780// Returns true if file was successfully opened and read.
 781bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
 782                      uptr *read_len, uptr max_len = kDefaultFileMaxSize,
 783                      error_t *errno_p = nullptr);
 784
 785int GetModuleAndOffsetForPc(uptr pc, char *module_name, uptr module_name_len,
 786                            uptr *pc_offset);
 787
 788// When adding a new architecture, don't forget to also update
 789// script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
 790inline const char *ModuleArchToString(ModuleArch arch) {
 791  switch (arch) {
 792    case kModuleArchUnknown:
 793      return "";
 794    case kModuleArchI386:
 795      return "i386";
 796    case kModuleArchX86_64:
 797      return "x86_64";
 798    case kModuleArchX86_64H:
 799      return "x86_64h";
 800    case kModuleArchARMV6:
 801      return "armv6";
 802    case kModuleArchARMV7:
 803      return "armv7";
 804    case kModuleArchARMV7S:
 805      return "armv7s";
 806    case kModuleArchARMV7K:
 807      return "armv7k";
 808    case kModuleArchARM64:
 809      return "arm64";
 810    case kModuleArchLoongArch64:
 811      return "loongarch64";
 812    case kModuleArchRISCV64:
 813      return "riscv64";
 814    case kModuleArchHexagon:
 815      return "hexagon";
 816  }
 817  CHECK(0 && "Invalid module arch");
 818  return "";
 819}
 820
 821#if SANITIZER_APPLE
 822const uptr kModuleUUIDSize = 16;
 823#else
 824const uptr kModuleUUIDSize = 32;
 825#endif
 826const uptr kMaxSegName = 16;
 827
 828// Represents a binary loaded into virtual memory (e.g. this can be an
 829// executable or a shared object).
 830class LoadedModule {
 831 public:
 832  LoadedModule()
 833      : full_name_(nullptr),
 834        base_address_(0),
 835        max_address_(0),
 836        arch_(kModuleArchUnknown),
 837        uuid_size_(0),
 838        instrumented_(false) {
 839    internal_memset(uuid_, 0, kModuleUUIDSize);
 840    ranges_.clear();
 841  }
 842  void set(const char *module_name, uptr base_address);
 843  void set(const char *module_name, uptr base_address, ModuleArch arch,
 844           u8 uuid[kModuleUUIDSize], bool instrumented);
 845  void setUuid(const char *uuid, uptr size);
 846  void clear();
 847  void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
 848                       const char *name = nullptr);
 849  bool containsAddress(uptr address) const;
 850
 851  const char *full_name() const { return full_name_; }
 852  uptr base_address() const { return base_address_; }
 853  uptr max_address() const { return max_address_; }
 854  ModuleArch arch() const { return arch_; }
 855  const u8 *uuid() const { return uuid_; }
 856  uptr uuid_size() const { return uuid_size_; }
 857  bool instrumented() const { return instrumented_; }
 858
 859  struct AddressRange {
 860    AddressRange *next;
 861    uptr beg;
 862    uptr end;
 863    bool executable;
 864    bool writable;
 865    char name[kMaxSegName];
 866
 867    AddressRange(uptr beg, uptr end, bool executable, bool writable,
 868                 const char *name)
 869        : next(nullptr),
 870          beg(beg),
 871          end(end),
 872          executable(executable),
 873          writable(writable) {
 874      internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name));
 875    }
 876  };
 877
 878  const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
 879
 880 private:
 881  char *full_name_;  // Owned.
 882  uptr base_address_;
 883  uptr max_address_;
 884  ModuleArch arch_;
 885  uptr uuid_size_;
 886  u8 uuid_[kModuleUUIDSize];
 887  bool instrumented_;
 888  IntrusiveList<AddressRange> ranges_;
 889};
 890
 891// List of LoadedModules. OS-dependent implementation is responsible for
 892// filling this information.
 893class ListOfModules {
 894 public:
 895  ListOfModules() : initialized(false) {}
 896  ~ListOfModules() { clear(); }
 897  void init();
 898  void fallbackInit();  // Uses fallback init if available, otherwise clears
 899  const LoadedModule *begin() const { return modules_.begin(); }
 900  LoadedModule *begin() { return modules_.begin(); }
 901  const LoadedModule *end() const { return modules_.end(); }
 902  LoadedModule *end() { return modules_.end(); }
 903  uptr size() const { return modules_.size(); }
 904  const LoadedModule &operator[](uptr i) const {
 905    CHECK_LT(i, modules_.size());
 906    return modules_[i];
 907  }
 908
 909 private:
 910  void clear() {
 911    for (auto &module : modules_) module.clear();
 912    modules_.clear();
 913  }
 914  void clearOrInit() {
 915    initialized ? clear() : modules_.Initialize(kInitialCapacity);
 916    initialized = true;
 917  }
 918
 919  InternalMmapVectorNoCtor<LoadedModule> modules_;
 920  // We rarely have more than 16K loaded modules.
 921  static const uptr kInitialCapacity = 1 << 14;
 922  bool initialized;
 923};
 924
 925// Callback type for iterating over a set of memory ranges.
 926typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
 927
 928void WriteToSyslog(const char *buffer);
 929
 930#if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__)
 931#define SANITIZER_WIN_TRACE 1
 932#else
 933#define SANITIZER_WIN_TRACE 0
 934#endif
 935
 936#if SANITIZER_APPLE || SANITIZER_WIN_TRACE
 937void LogFullErrorReport(const char *buffer);
 938#else
 939inline void LogFullErrorReport(const char *buffer) {}
 940#endif
 941
 942#if SANITIZER_LINUX || SANITIZER_APPLE
 943void WriteOneLineToSyslog(const char *s);
 944void LogMessageOnPrintf(const char *str);
 945#else
 946inline void WriteOneLineToSyslog(const char *s) {}
 947inline void LogMessageOnPrintf(const char *str) {}
 948#endif
 949
 950#if SANITIZER_LINUX || SANITIZER_WIN_TRACE
 951// Initialize Android logging. Any writes before this are silently lost.
 952void AndroidLogInit();
 953void SetAbortMessage(const char *);
 954#else
 955inline void AndroidLogInit() {}
 956// FIXME: MacOS implementation could use CRSetCrashLogMessage.
 957inline void SetAbortMessage(const char *) {}
 958#endif
 959
 960inline uptr GetPthreadDestructorIterations() {
 961#if SANITIZER_POSIX
 962  return 4;
 963#else
 964// Unused on Windows.
 965  return 0;
 966#endif
 967}
 968
 969void *internal_start_thread(void *(*func)(void*), void *arg);
 970void internal_join_thread(void *th);
 971void MaybeStartBackgroudThread();
 972
 973// Make the compiler think that something is going on there.
 974// Use this inside a loop that looks like memset/memcpy/etc to prevent the
 975// compiler from recognising it and turning it into an actual call to
 976// memset/memcpy/etc.
 977static inline void SanitizerBreakOptimization(void *arg) {
 978#if defined(_MSC_VER) && !defined(__clang__)
 979  _ReadWriteBarrier();
 980#else
 981  __asm__ __volatile__("" : : "r" (arg) : "memory");
 982#endif
 983}
 984
 985struct SignalContext {
 986  void *siginfo;
 987  void *context;
 988  uptr addr;
 989  uptr pc;
 990  uptr sp;
 991  uptr bp;
 992  bool is_memory_access;
 993  enum WriteFlag { Unknown, Read, Write } write_flag;
 994
 995  // In some cases the kernel cannot provide the true faulting address; `addr`
 996  // will be zero then.  This field allows to distinguish between these cases
 997  // and dereferences of null.
 998  bool is_true_faulting_addr;
 999
1000  // VS2013 doesn't implement unrestricted unions, so we need a trivial default
1001  // constructor
1002  SignalContext() = default;
1003
1004  // Creates signal context in a platform-specific manner.
1005  // SignalContext is going to keep pointers to siginfo and context without
1006  // owning them.
1007  SignalContext(void *siginfo, void *context)
1008      : siginfo(siginfo),
1009        context(context),
1010        addr(GetAddress()),
1011        is_memory_access(IsMemoryAccess()),
1012        write_flag(GetWriteFlag()),
1013        is_true_faulting_addr(IsTrueFaultingAddress()) {
1014    InitPcSpBp();
1015  }
1016
1017  static void DumpAllRegisters(void *context);
1018
1019  // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
1020  int GetType() const;
1021
1022  // String description of the signal.
1023  const char *Describe() const;
1024
1025  // Returns true if signal is stack overflow.
1026  bool IsStackOverflow() const;
1027
1028 private:
1029  // Platform specific initialization.
1030  void InitPcSpBp();
1031  uptr GetAddress() const;
1032  WriteFlag GetWriteFlag() const;
1033  bool IsMemoryAccess() const;
1034  bool IsTrueFaultingAddress() const;
1035};
1036
1037void InitializePlatformEarly();
1038
1039template <typename Fn>
1040class RunOnDestruction {
1041 public:
1042  explicit RunOnDestruction(Fn fn) : fn_(fn) {}
1043  ~RunOnDestruction() { fn_(); }
1044
1045 private:
1046  Fn fn_;
1047};
1048
1049// A simple scope guard. Usage:
1050// auto cleanup = at_scope_exit([]{ do_cleanup; });
1051template <typename Fn>
1052RunOnDestruction<Fn> at_scope_exit(Fn fn) {
1053  return RunOnDestruction<Fn>(fn);
1054}
1055
1056// Linux on 64-bit s390 had a nasty bug that crashes the whole machine
1057// if a process uses virtual memory over 4TB (as many sanitizers like
1058// to do).  This function will abort the process if running on a kernel
1059// that looks vulnerable.
1060#if SANITIZER_LINUX && SANITIZER_S390_64
1061void AvoidCVE_2016_2143();
1062#else
1063inline void AvoidCVE_2016_2143() {}
1064#endif
1065
1066struct StackDepotStats {
1067  uptr n_uniq_ids;
1068  uptr allocated;
1069};
1070
1071// The default value for allocator_release_to_os_interval_ms common flag to
1072// indicate that sanitizer allocator should not attempt to release memory to OS.
1073const s32 kReleaseToOSIntervalNever = -1;
1074
1075void CheckNoDeepBind(const char *filename, int flag);
1076
1077// Returns the requested amount of random data (up to 256 bytes) that can then
1078// be used to seed a PRNG. Defaults to blocking like the underlying syscall.
1079bool GetRandom(void *buffer, uptr length, bool blocking = true);
1080
1081// Returns the number of logical processors on the system.
1082u32 GetNumberOfCPUs();
1083extern u32 NumberOfCPUsCached;
1084inline u32 GetNumberOfCPUsCached() {
1085  if (!NumberOfCPUsCached)
1086    NumberOfCPUsCached = GetNumberOfCPUs();
1087  return NumberOfCPUsCached;
1088}
1089
1090}  // namespace __sanitizer
1091
1092inline void *operator new(__sanitizer::usize size,
1093                          __sanitizer::LowLevelAllocator &alloc) {
1094  return alloc.Allocate(size);
1095}
1096
1097#endif  // SANITIZER_COMMON_H