Commit 854b88fda0
Changed files (213)
lib
tsan
interception
sanitizer_common
lib/tsan/interception/interception.h
@@ -14,9 +14,10 @@
#ifndef INTERCEPTION_H
#define INTERCEPTION_H
+#include "sanitizer_common/sanitizer_asm.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
-#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_MAC && \
+#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_APPLE && \
!SANITIZER_NETBSD && !SANITIZER_WINDOWS && !SANITIZER_FUCHSIA && \
!SANITIZER_SOLARIS
# error "Interception doesn't work on this operating system."
@@ -67,28 +68,54 @@ typedef __sanitizer::OFF64_T OFF64_T;
// for more details). To intercept such functions you need to use the
// INTERCEPTOR_WITH_SUFFIX(...) macro.
-// How it works:
-// To replace system functions on Linux we just need to declare functions
-// with same names in our library and then obtain the real function pointers
+// How it works on Linux
+// ---------------------
+//
+// To replace system functions on Linux we just need to declare functions with
+// the same names in our library and then obtain the real function pointers
// using dlsym().
-// There is one complication. A user may also intercept some of the functions
-// we intercept. To resolve this we declare our interceptors with __interceptor_
-// prefix, and then make actual interceptors weak aliases to __interceptor_
-// functions.
//
-// This is not so on Mac OS, where the two-level namespace makes
-// our replacement functions invisible to other libraries. This may be overcomed
-// using the DYLD_FORCE_FLAT_NAMESPACE, but some errors loading the shared
-// libraries in Chromium were noticed when doing so.
+// There is one complication: a user may also intercept some of the functions we
+// intercept. To allow for up to 3 interceptors (including ours) of a given
+// function "func", the interceptor implementation is in ___interceptor_func,
+// which is aliased by a weak function __interceptor_func, which in turn is
+// aliased (via a trampoline) by weak wrapper function "func".
+//
+// Most user interceptors should define a foreign interceptor as follows:
+//
+// - provide a non-weak function "func" that performs interception;
+// - if __interceptor_func exists, call it to perform the real functionality;
+// - if it does not exist, figure out the real function and call it instead.
+//
+// In rare cases, a foreign interceptor (of another dynamic analysis runtime)
+// may be defined as follows (on supported architectures):
+//
+// - provide a non-weak function __interceptor_func that performs interception;
+// - if ___interceptor_func exists, call it to perform the real functionality;
+// - if it does not exist, figure out the real function and call it instead;
+// - provide a weak function "func" that is an alias to __interceptor_func.
+//
+// With this protocol, sanitizer interceptors, foreign user interceptors, and
+// foreign interceptors of other dynamic analysis runtimes, or any combination
+// thereof, may co-exist simultaneously.
+//
+// How it works on Mac OS
+// ----------------------
+//
+// This is not so on Mac OS, where the two-level namespace makes our replacement
+// functions invisible to other libraries. This may be overcomed using the
+// DYLD_FORCE_FLAT_NAMESPACE, but some errors loading the shared libraries in
+// Chromium were noticed when doing so.
+//
// Instead we create a dylib containing a __DATA,__interpose section that
// associates library functions with their wrappers. When this dylib is
-// preloaded before an executable using DYLD_INSERT_LIBRARIES, it routes all
-// the calls to interposed functions done through stubs to the wrapper
-// functions.
+// preloaded before an executable using DYLD_INSERT_LIBRARIES, it routes all the
+// calls to interposed functions done through stubs to the wrapper functions.
+//
// As it's decided at compile time which functions are to be intercepted on Mac,
// INTERCEPT_FUNCTION() is effectively a no-op on this system.
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include <sys/cdefs.h> // For __DARWIN_ALIAS_C().
// Just a pair of pointers.
@@ -100,53 +127,102 @@ struct interpose_substitution {
// For a function foo() create a global pair of pointers { wrap_foo, foo } in
// the __DATA,__interpose section.
// As a result all the calls to foo() will be routed to wrap_foo() at runtime.
-#define INTERPOSER(func_name) __attribute__((used)) \
+#define INTERPOSER(func_name) __attribute__((used)) \
const interpose_substitution substitution_##func_name[] \
__attribute__((section("__DATA, __interpose"))) = { \
- { reinterpret_cast<const uptr>(WRAP(func_name)), \
- reinterpret_cast<const uptr>(func_name) } \
+ { reinterpret_cast<const uptr>(WRAP(func_name)), \
+ reinterpret_cast<const uptr>(func_name) } \
}
// For a function foo() and a wrapper function bar() create a global pair
// of pointers { bar, foo } in the __DATA,__interpose section.
// As a result all the calls to foo() will be routed to bar() at runtime.
#define INTERPOSER_2(func_name, wrapper_name) __attribute__((used)) \
-const interpose_substitution substitution_##func_name[] \
- __attribute__((section("__DATA, __interpose"))) = { \
- { reinterpret_cast<const uptr>(wrapper_name), \
- reinterpret_cast<const uptr>(func_name) } \
+const interpose_substitution substitution_##func_name[] \
+ __attribute__((section("__DATA, __interpose"))) = { \
+ { reinterpret_cast<const uptr>(wrapper_name), \
+ reinterpret_cast<const uptr>(func_name) } \
}
# define WRAP(x) wrap_##x
-# define WRAPPER_NAME(x) "wrap_"#x
+# define TRAMPOLINE(x) WRAP(x)
# define INTERCEPTOR_ATTRIBUTE
# define DECLARE_WRAPPER(ret_type, func, ...)
#elif SANITIZER_WINDOWS
# define WRAP(x) __asan_wrap_##x
-# define WRAPPER_NAME(x) "__asan_wrap_"#x
+# define TRAMPOLINE(x) WRAP(x)
# define INTERCEPTOR_ATTRIBUTE __declspec(dllexport)
-# define DECLARE_WRAPPER(ret_type, func, ...) \
+# define DECLARE_WRAPPER(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__);
-# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
+# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__);
-#elif SANITIZER_FREEBSD || SANITIZER_NETBSD
-# define WRAP(x) __interceptor_ ## x
-# define WRAPPER_NAME(x) "__interceptor_" #x
+#elif !SANITIZER_FUCHSIA // LINUX, FREEBSD, NETBSD, SOLARIS
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
+# if ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT
+// Weak aliases of weak aliases do not work, therefore we need to set up a
+// trampoline function. The function "func" is a weak alias to the trampoline
+// (so that we may check if "func" was overridden), which calls the weak
+// function __interceptor_func, which in turn aliases the actual interceptor
+// implementation ___interceptor_func:
+//
+// [wrapper "func": weak] --(alias)--> [TRAMPOLINE(func)]
+// |
+// +--------(tail call)-------+
+// |
+// v
+// [__interceptor_func: weak] --(alias)--> [WRAP(func)]
+//
+// We use inline assembly to define most of this, because not all compilers
+// support functions with the "naked" attribute with every architecture.
+# define WRAP(x) ___interceptor_ ## x
+# define TRAMPOLINE(x) __interceptor_trampoline_ ## x
+# if SANITIZER_FREEBSD || SANITIZER_NETBSD
// FreeBSD's dynamic linker (incompliantly) gives non-weak symbols higher
// priority than weak ones so weak aliases won't work for indirect calls
// in position-independent (-fPIC / -fPIE) mode.
-# define DECLARE_WRAPPER(ret_type, func, ...) \
- extern "C" ret_type func(__VA_ARGS__) \
- __attribute__((alias("__interceptor_" #func), visibility("default")));
-#elif !SANITIZER_FUCHSIA
-# define WRAP(x) __interceptor_ ## x
-# define WRAPPER_NAME(x) "__interceptor_" #x
-# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
-# define DECLARE_WRAPPER(ret_type, func, ...) \
- extern "C" ret_type func(__VA_ARGS__) \
- __attribute__((weak, alias("__interceptor_" #func), visibility("default")));
+# define __ASM_WEAK_WRAPPER(func) ".globl " #func "\n"
+# else
+# define __ASM_WEAK_WRAPPER(func) ".weak " #func "\n"
+# endif // SANITIZER_FREEBSD || SANITIZER_NETBSD
+// Keep trampoline implementation in sync with sanitizer_common/sanitizer_asm.h
+# define DECLARE_WRAPPER(ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__); \
+ extern "C" ret_type TRAMPOLINE(func)(__VA_ARGS__); \
+ extern "C" ret_type __interceptor_##func(__VA_ARGS__) \
+ INTERCEPTOR_ATTRIBUTE __attribute__((weak)) ALIAS(WRAP(func)); \
+ asm( \
+ ".text\n" \
+ __ASM_WEAK_WRAPPER(func) \
+ ".set " #func ", " SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
+ ".globl " SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
+ ".type " SANITIZER_STRINGIFY(TRAMPOLINE(func)) ", %function\n" \
+ SANITIZER_STRINGIFY(TRAMPOLINE(func)) ":\n" \
+ SANITIZER_STRINGIFY(CFI_STARTPROC) "\n" \
+ SANITIZER_STRINGIFY(ASM_TAIL_CALL) " __interceptor_" \
+ SANITIZER_STRINGIFY(ASM_PREEMPTIBLE_SYM(func)) "\n" \
+ SANITIZER_STRINGIFY(CFI_ENDPROC) "\n" \
+ ".size " SANITIZER_STRINGIFY(TRAMPOLINE(func)) ", " \
+ ".-" SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
+ );
+# else // ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT
+// Some architectures cannot implement efficient interceptor trampolines with
+// just a plain jump due to complexities of resolving a preemptible symbol. In
+// those cases, revert to just this scheme:
+//
+// [wrapper "func": weak] --(alias)--> [WRAP(func)]
+//
+# define WRAP(x) __interceptor_ ## x
+# define TRAMPOLINE(x) WRAP(x)
+# if SANITIZER_FREEBSD || SANITIZER_NETBSD
+# define __ATTRIBUTE_WEAK_WRAPPER
+# else
+# define __ATTRIBUTE_WEAK_WRAPPER __attribute__((weak))
+# endif // SANITIZER_FREEBSD || SANITIZER_NETBSD
+# define DECLARE_WRAPPER(ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__) \
+ INTERCEPTOR_ATTRIBUTE __ATTRIBUTE_WEAK_WRAPPER ALIAS(WRAP(func));
+# endif // ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT
#endif
#if SANITIZER_FUCHSIA
@@ -157,33 +233,35 @@ const interpose_substitution substitution_##func_name[] \
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
# define REAL(x) __unsanitized_##x
# define DECLARE_REAL(ret_type, func, ...)
-#elif !SANITIZER_MAC
+#elif !SANITIZER_APPLE
# define PTR_TO_REAL(x) real_##x
# define REAL(x) __interception::PTR_TO_REAL(x)
# define FUNC_TYPE(x) x##_type
-# define DECLARE_REAL(ret_type, func, ...) \
+# define DECLARE_REAL(ret_type, func, ...) \
typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
- namespace __interception { \
- extern FUNC_TYPE(func) PTR_TO_REAL(func); \
+ namespace __interception { \
+ extern FUNC_TYPE(func) PTR_TO_REAL(func); \
}
# define ASSIGN_REAL(dst, src) REAL(dst) = REAL(src)
-#else // SANITIZER_MAC
+#else // SANITIZER_APPLE
# define REAL(x) x
# define DECLARE_REAL(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__);
# define ASSIGN_REAL(x, y)
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
#if !SANITIZER_FUCHSIA
-# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
+# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
DECLARE_REAL(ret_type, func, __VA_ARGS__) \
+ extern "C" ret_type TRAMPOLINE(func)(__VA_ARGS__); \
extern "C" ret_type WRAP(func)(__VA_ARGS__);
// Declare an interceptor and its wrapper defined in a different translation
// unit (ex. asm).
-# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...) \
- extern "C" ret_type WRAP(func)(__VA_ARGS__); \
- extern "C" ret_type func(__VA_ARGS__);
+# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...) \
+ extern "C" ret_type TRAMPOLINE(func)(__VA_ARGS__); \
+ extern "C" ret_type WRAP(func)(__VA_ARGS__); \
+ extern "C" ret_type func(__VA_ARGS__);
#else
# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...)
# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...)
@@ -193,7 +271,7 @@ const interpose_substitution substitution_##func_name[] \
// macros does its job. In exceptional cases you may need to call REAL(foo)
// without defining INTERCEPTOR(..., foo, ...). For example, if you override
// foo with an interceptor for other function.
-#if !SANITIZER_MAC && !SANITIZER_FUCHSIA
+#if !SANITIZER_APPLE && !SANITIZER_FUCHSIA
# define DEFINE_REAL(ret_type, func, ...) \
typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
namespace __interception { \
@@ -213,25 +291,23 @@ const interpose_substitution substitution_##func_name[] \
__interceptor_##func(__VA_ARGS__); \
extern "C" INTERCEPTOR_ATTRIBUTE ret_type func(__VA_ARGS__)
-#elif !SANITIZER_MAC
+#elif !SANITIZER_APPLE
-#define INTERCEPTOR(ret_type, func, ...) \
- DEFINE_REAL(ret_type, func, __VA_ARGS__) \
- DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \
- extern "C" \
- INTERCEPTOR_ATTRIBUTE \
- ret_type WRAP(func)(__VA_ARGS__)
+#define INTERCEPTOR(ret_type, func, ...) \
+ DEFINE_REAL(ret_type, func, __VA_ARGS__) \
+ DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \
+ extern "C" INTERCEPTOR_ATTRIBUTE ret_type WRAP(func)(__VA_ARGS__)
// We don't need INTERCEPTOR_WITH_SUFFIX on non-Darwin for now.
#define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \
INTERCEPTOR(ret_type, func, __VA_ARGS__)
-#else // SANITIZER_MAC
+#else // SANITIZER_APPLE
-#define INTERCEPTOR_ZZZ(suffix, ret_type, func, ...) \
- extern "C" ret_type func(__VA_ARGS__) suffix; \
- extern "C" ret_type WRAP(func)(__VA_ARGS__); \
- INTERPOSER(func); \
+#define INTERCEPTOR_ZZZ(suffix, ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__) suffix; \
+ extern "C" ret_type WRAP(func)(__VA_ARGS__); \
+ INTERPOSER(func); \
extern "C" INTERCEPTOR_ATTRIBUTE ret_type WRAP(func)(__VA_ARGS__)
#define INTERCEPTOR(ret_type, func, ...) \
@@ -246,14 +322,12 @@ const interpose_substitution substitution_##func_name[] \
#endif
#if SANITIZER_WINDOWS
-# define INTERCEPTOR_WINAPI(ret_type, func, ...) \
+# define INTERCEPTOR_WINAPI(ret_type, func, ...) \
typedef ret_type (__stdcall *FUNC_TYPE(func))(__VA_ARGS__); \
- namespace __interception { \
- FUNC_TYPE(func) PTR_TO_REAL(func); \
- } \
- extern "C" \
- INTERCEPTOR_ATTRIBUTE \
- ret_type __stdcall WRAP(func)(__VA_ARGS__)
+ namespace __interception { \
+ FUNC_TYPE(func) PTR_TO_REAL(func); \
+ } \
+ extern "C" INTERCEPTOR_ATTRIBUTE ret_type __stdcall WRAP(func)(__VA_ARGS__)
#endif
// ISO C++ forbids casting between pointer-to-function and pointer-to-object,
@@ -278,7 +352,7 @@ typedef unsigned long uptr;
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \
INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver)
-#elif SANITIZER_MAC
+#elif SANITIZER_APPLE
# include "interception_mac.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \
lib/tsan/interception/interception_linux.cpp
@@ -33,7 +33,7 @@ static int StrCmp(const char *s1, const char *s2) {
}
#endif
-static void *GetFuncAddr(const char *name, uptr wrapper_addr) {
+static void *GetFuncAddr(const char *name, uptr trampoline) {
#if SANITIZER_NETBSD
// FIXME: Find a better way to handle renames
if (StrCmp(name, "sigaction"))
@@ -50,17 +50,17 @@ static void *GetFuncAddr(const char *name, uptr wrapper_addr) {
// In case `name' is not loaded, dlsym ends up finding the actual wrapper.
// We don't want to intercept the wrapper and have it point to itself.
- if ((uptr)addr == wrapper_addr)
+ if ((uptr)addr == trampoline)
addr = nullptr;
}
return addr;
}
bool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func,
- uptr wrapper) {
- void *addr = GetFuncAddr(name, wrapper);
+ uptr trampoline) {
+ void *addr = GetFuncAddr(name, trampoline);
*ptr_to_real = (uptr)addr;
- return addr && (func == wrapper);
+ return addr && (func == trampoline);
}
// dlvsym is a GNU extension supported by some other platforms.
@@ -70,12 +70,12 @@ static void *GetFuncAddr(const char *name, const char *ver) {
}
bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
- uptr func, uptr wrapper) {
+ uptr func, uptr trampoline) {
void *addr = GetFuncAddr(name, ver);
*ptr_to_real = (uptr)addr;
- return addr && (func == wrapper);
+ return addr && (func == trampoline);
}
-#endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
+# endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
} // namespace __interception
lib/tsan/interception/interception_linux.h
@@ -15,7 +15,7 @@
SANITIZER_SOLARIS
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
-# error "interception_linux.h should be included from interception library only"
+# error interception_linux.h should be included from interception library only
#endif
#ifndef INTERCEPTION_LINUX_H
@@ -23,26 +23,26 @@
namespace __interception {
bool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func,
- uptr wrapper);
+ uptr trampoline);
bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
- uptr func, uptr wrapper);
+ uptr func, uptr trampoline);
} // namespace __interception
#define INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) \
::__interception::InterceptFunction( \
#func, \
- (::__interception::uptr *) & REAL(func), \
- (::__interception::uptr) & (func), \
- (::__interception::uptr) & WRAP(func))
+ (::__interception::uptr *)&REAL(func), \
+ (::__interception::uptr)&(func), \
+ (::__interception::uptr)&TRAMPOLINE(func))
// dlvsym is a GNU extension supported by some other platforms.
#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
::__interception::InterceptFunction( \
#func, symver, \
- (::__interception::uptr *) & REAL(func), \
- (::__interception::uptr) & (func), \
- (::__interception::uptr) & WRAP(func))
+ (::__interception::uptr *)&REAL(func), \
+ (::__interception::uptr)&(func), \
+ (::__interception::uptr)&TRAMPOLINE(func))
#else
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
lib/tsan/interception/interception_mac.cpp
@@ -13,6 +13,6 @@
#include "interception.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
lib/tsan/interception/interception_mac.h
@@ -11,7 +11,7 @@
// Mac-specific interception methods.
//===----------------------------------------------------------------------===//
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
# error "interception_mac.h should be included from interception.h only"
@@ -24,4 +24,4 @@
#define INTERCEPT_FUNCTION_VER_MAC(func, symver)
#endif // INTERCEPTION_MAC_H
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
lib/tsan/interception/interception_type_test.cpp
@@ -13,7 +13,7 @@
#include "interception.h"
-#if SANITIZER_LINUX || SANITIZER_MAC
+#if SANITIZER_LINUX || SANITIZER_APPLE
#include <sys/types.h>
#include <stddef.h>
@@ -24,9 +24,9 @@ COMPILER_CHECK(sizeof(::SSIZE_T) == sizeof(ssize_t));
COMPILER_CHECK(sizeof(::PTRDIFF_T) == sizeof(ptrdiff_t));
COMPILER_CHECK(sizeof(::INTMAX_T) == sizeof(intmax_t));
-#if !SANITIZER_MAC
+# if SANITIZER_GLIBC || SANITIZER_ANDROID
COMPILER_CHECK(sizeof(::OFF64_T) == sizeof(off64_t));
-#endif
+# endif
// The following are the cases when pread (and friends) is used instead of
// pread64. In those cases we need OFF_T to match off_t. We don't care about the
lib/tsan/interception/interception_win.cpp
@@ -56,7 +56,7 @@
// tramp: jmp QWORD [addr]
// addr: .bytes <hook>
//
-// Note: <real> is equilavent to <label>.
+// Note: <real> is equivalent to <label>.
//
// 3) HotPatch
//
@@ -141,8 +141,29 @@ static const int kBranchLength =
FIRST_32_SECOND_64(kJumpInstructionLength, kIndirectJumpInstructionLength);
static const int kDirectBranchLength = kBranchLength + kAddressLength;
+# if defined(_MSC_VER)
+# define INTERCEPTION_FORMAT(f, a)
+# else
+# define INTERCEPTION_FORMAT(f, a) __attribute__((format(printf, f, a)))
+# endif
+
+static void (*ErrorReportCallback)(const char *format, ...)
+ INTERCEPTION_FORMAT(1, 2);
+
+void SetErrorReportCallback(void (*callback)(const char *format, ...)) {
+ ErrorReportCallback = callback;
+}
+
+# define ReportError(...) \
+ do { \
+ if (ErrorReportCallback) \
+ ErrorReportCallback(__VA_ARGS__); \
+ } while (0)
+
static void InterceptionFailed() {
- // Do we have a good way to abort with an error message here?
+ ReportError("interception_win: failed due to an unrecoverable error.\n");
+ // This acts like an abort when no debugger is attached. According to an old
+ // comment, calling abort() leads to an infinite recursion in CheckFailed.
__debugbreak();
}
@@ -249,8 +270,13 @@ static void WritePadding(uptr from, uptr size) {
}
static void WriteJumpInstruction(uptr from, uptr target) {
- if (!DistanceIsWithin2Gig(from + kJumpInstructionLength, target))
+ if (!DistanceIsWithin2Gig(from + kJumpInstructionLength, target)) {
+ ReportError(
+ "interception_win: cannot write jmp further than 2GB away, from %p to "
+ "%p.\n",
+ (void *)from, (void *)target);
InterceptionFailed();
+ }
ptrdiff_t offset = target - from - kJumpInstructionLength;
*(u8*)from = 0xE9;
*(u32*)(from + 1) = offset;
@@ -274,6 +300,10 @@ static void WriteIndirectJumpInstruction(uptr from, uptr indirect_target) {
int offset = indirect_target - from - kIndirectJumpInstructionLength;
if (!DistanceIsWithin2Gig(from + kIndirectJumpInstructionLength,
indirect_target)) {
+ ReportError(
+ "interception_win: cannot write indirect jmp with target further than "
+ "2GB away, from %p to %p.\n",
+ (void *)from, (void *)indirect_target);
InterceptionFailed();
}
*(u16*)from = 0x25FF;
@@ -398,8 +428,44 @@ static uptr AllocateMemoryForTrampoline(uptr image_address, size_t size) {
return allocated_space;
}
+// The following prologues cannot be patched because of the short jump
+// jumping to the patching region.
+
+#if SANITIZER_WINDOWS64
+// ntdll!wcslen in Win11
+// 488bc1 mov rax,rcx
+// 0fb710 movzx edx,word ptr [rax]
+// 4883c002 add rax,2
+// 6685d2 test dx,dx
+// 75f4 jne -12
+static const u8 kPrologueWithShortJump1[] = {
+ 0x48, 0x8b, 0xc1, 0x0f, 0xb7, 0x10, 0x48, 0x83,
+ 0xc0, 0x02, 0x66, 0x85, 0xd2, 0x75, 0xf4,
+};
+
+// ntdll!strrchr in Win11
+// 4c8bc1 mov r8,rcx
+// 8a01 mov al,byte ptr [rcx]
+// 48ffc1 inc rcx
+// 84c0 test al,al
+// 75f7 jne -9
+static const u8 kPrologueWithShortJump2[] = {
+ 0x4c, 0x8b, 0xc1, 0x8a, 0x01, 0x48, 0xff, 0xc1,
+ 0x84, 0xc0, 0x75, 0xf7,
+};
+#endif
+
// Returns 0 on error.
static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
+#if SANITIZER_WINDOWS64
+ if (memcmp((u8*)address, kPrologueWithShortJump1,
+ sizeof(kPrologueWithShortJump1)) == 0 ||
+ memcmp((u8*)address, kPrologueWithShortJump2,
+ sizeof(kPrologueWithShortJump2)) == 0) {
+ return 0;
+ }
+#endif
+
switch (*(u64*)address) {
case 0x90909090909006EB: // stub: jmp over 6 x nop.
return 8;
@@ -456,6 +522,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0xFF8B: // 8B FF : mov edi, edi
case 0xEC8B: // 8B EC : mov ebp, esp
case 0xc889: // 89 C8 : mov eax, ecx
+ case 0xE589: // 89 E5 : mov ebp, esp
case 0xC18B: // 8B C1 : mov eax, ecx
case 0xC033: // 33 C0 : xor eax, eax
case 0xC933: // 33 C9 : xor ecx, ecx
@@ -477,6 +544,14 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0xA1: // A1 XX XX XX XX XX XX XX XX :
// movabs eax, dword ptr ds:[XXXXXXXX]
return 9;
+
+ case 0x83:
+ const u8 next_byte = *(u8*)(address + 1);
+ const u8 mod = next_byte >> 6;
+ const u8 rm = next_byte & 7;
+ if (mod == 1 && rm == 4)
+ return 5; // 83 ModR/M SIB Disp8 Imm8
+ // add|or|adc|sbb|and|sub|xor|cmp [r+disp8], imm8
}
switch (*(u16*)address) {
@@ -493,6 +568,8 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0x5641: // push r14
case 0x5741: // push r15
case 0x9066: // Two-byte NOP
+ case 0xc084: // test al, al
+ case 0x018a: // mov al, byte ptr [rcx]
return 2;
case 0x058B: // 8B 05 XX XX XX XX : mov eax, dword ptr [XX XX XX XX]
@@ -509,6 +586,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0xd12b48: // 48 2b d1 : sub rdx, rcx
case 0x07c1f6: // f6 c1 07 : test cl, 0x7
case 0xc98548: // 48 85 C9 : test rcx, rcx
+ case 0xd28548: // 48 85 d2 : test rdx, rdx
case 0xc0854d: // 4d 85 c0 : test r8, r8
case 0xc2b60f: // 0f b6 c2 : movzx eax, dl
case 0xc03345: // 45 33 c0 : xor r8d, r8d
@@ -522,6 +600,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0xca2b48: // 48 2b ca : sub rcx, rdx
case 0x10b70f: // 0f b7 10 : movzx edx, WORD PTR [rax]
case 0xc00b4d: // 3d 0b c0 : or r8, r8
+ case 0xc08b41: // 41 8b c0 : mov eax, r8d
case 0xd18b48: // 48 8b d1 : mov rdx, rcx
case 0xdc8b4c: // 4c 8b dc : mov r11, rsp
case 0xd18b4c: // 4c 8b d1 : mov r10, rcx
@@ -556,6 +635,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0x246c8948: // 48 89 6C 24 XX : mov QWORD ptr [rsp + XX], rbp
case 0x245c8948: // 48 89 5c 24 XX : mov QWORD PTR [rsp + XX], rbx
case 0x24748948: // 48 89 74 24 XX : mov QWORD PTR [rsp + XX], rsi
+ case 0x247c8948: // 48 89 7c 24 XX : mov QWORD PTR [rsp + XX], rdi
case 0x244C8948: // 48 89 4C 24 XX : mov QWORD PTR [rsp + XX], rcx
case 0x24548948: // 48 89 54 24 XX : mov QWORD PTR [rsp + XX], rdx
case 0x244c894c: // 4c 89 4c 24 XX : mov QWORD PTR [rsp + XX], r9
@@ -592,6 +672,8 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0x24448B: // 8B 44 24 XX : mov eax, dword ptr [esp + XX]
case 0x244C8B: // 8B 4C 24 XX : mov ecx, dword ptr [esp + XX]
case 0x24548B: // 8B 54 24 XX : mov edx, dword ptr [esp + XX]
+ case 0x245C8B: // 8B 5C 24 XX : mov ebx, dword ptr [esp + XX]
+ case 0x246C8B: // 8B 6C 24 XX : mov ebp, dword ptr [esp + XX]
case 0x24748B: // 8B 74 24 XX : mov esi, dword ptr [esp + XX]
case 0x247C8B: // 8B 7C 24 XX : mov edi, dword ptr [esp + XX]
return 4;
@@ -603,12 +685,20 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
}
#endif
- // Unknown instruction!
- // FIXME: Unknown instruction failures might happen when we add a new
- // interceptor or a new compiler version. In either case, they should result
- // in visible and readable error messages. However, merely calling abort()
- // leads to an infinite recursion in CheckFailed.
- InterceptionFailed();
+ // Unknown instruction! This might happen when we add a new interceptor, use
+ // a new compiler version, or if Windows changed how some functions are
+ // compiled. In either case, we print the address and 8 bytes of instructions
+ // to notify the user about the error and to help identify the unknown
+ // instruction. Don't treat this as a fatal error, though we can break the
+ // debugger if one has been attached.
+ u8 *bytes = (u8 *)address;
+ ReportError(
+ "interception_win: unhandled instruction at %p: %02x %02x %02x %02x %02x "
+ "%02x %02x %02x\n",
+ (void *)address, bytes[0], bytes[1], bytes[2], bytes[3], bytes[4],
+ bytes[5], bytes[6], bytes[7]);
+ if (::IsDebuggerPresent())
+ __debugbreak();
return 0;
}
@@ -629,6 +719,8 @@ static bool CopyInstructions(uptr to, uptr from, size_t size) {
while (cursor != size) {
size_t rel_offset = 0;
size_t instruction_size = GetInstructionSize(from + cursor, &rel_offset);
+ if (!instruction_size)
+ return false;
_memcpy((void*)(to + cursor), (void*)(from + cursor),
(size_t)instruction_size);
if (rel_offset) {
@@ -689,7 +781,7 @@ bool OverrideFunctionWithRedirectJump(
return false;
if (orig_old_func) {
- uptr relative_offset = *(u32*)(old_func + 1);
+ sptr relative_offset = *(s32 *)(old_func + 1);
uptr absolute_target = old_func + relative_offset + kJumpInstructionLength;
*orig_old_func = absolute_target;
}
@@ -846,6 +938,10 @@ static void **InterestingDLLsAvailable() {
"msvcr120.dll", // VS2013
"vcruntime140.dll", // VS2015
"ucrtbase.dll", // Universal CRT
+#if (defined(__MINGW32__) && defined(__i386__))
+ "libc++.dll", // libc++
+ "libunwind.dll", // libunwind
+#endif
// NTDLL should go last as it exports some functions that we should
// override in the CRT [presumably only used internally].
"ntdll.dll", NULL};
@@ -1019,4 +1115,4 @@ bool OverrideImportedFunction(const char *module_to_patch,
} // namespace __interception
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
lib/tsan/interception/interception_win.h
@@ -41,6 +41,11 @@ bool OverrideImportedFunction(const char *module_to_patch,
const char *function_name, uptr new_function,
uptr *orig_old_func);
+// Sets a callback to be used for reporting errors by interception_win. The
+// callback will be called with printf-like arguments. Intended to be used with
+// __sanitizer::Report. Pass nullptr to disable error reporting (default).
+void SetErrorReportCallback(void (*callback)(const char *format, ...));
+
#if !SANITIZER_WINDOWS64
// Exposed for unittests
bool OverrideFunctionWithDetour(
lib/tsan/sanitizer_common/sancov_flags.inc
@@ -14,7 +14,7 @@
#endif
SANCOV_FLAG(bool, symbolize, true,
- "If set, converage information will be symbolized by sancov tool "
+ "If set, coverage information will be symbolized by sancov tool "
"after dumping.")
SANCOV_FLAG(bool, help, false, "Print flags help.")
lib/tsan/sanitizer_common/sanitizer_addrhashmap.h
@@ -39,6 +39,11 @@ namespace __sanitizer {
// the current thread has exclusive access to the data
// if !h.exists() then the element never existed
// }
+// {
+// Map::Handle h(&m, addr, false, true);
+// this will create a new element or return a handle to an existing element
+// if !h.created() this thread does *not* have exclusive access to the data
+// }
template<typename T, uptr kSize>
class AddrHashMap {
private:
@@ -56,7 +61,7 @@ class AddrHashMap {
static const uptr kBucketSize = 3;
struct Bucket {
- RWMutex mtx;
+ Mutex mtx;
atomic_uintptr_t add;
Cell cells[kBucketSize];
};
@@ -89,6 +94,12 @@ class AddrHashMap {
bool create_;
};
+ typedef void (*ForEachCallback)(const uptr key, const T &val, void *arg);
+ // ForEach acquires a lock on each bucket while iterating over
+ // elements. Note that this only ensures that the structure of the hashmap is
+ // unchanged, there may be a data race to the element itself.
+ void ForEach(ForEachCallback cb, void *arg);
+
private:
friend class Handle;
Bucket *table_;
@@ -98,6 +109,33 @@ class AddrHashMap {
uptr calcHash(uptr addr);
};
+template <typename T, uptr kSize>
+void AddrHashMap<T, kSize>::ForEach(ForEachCallback cb, void *arg) {
+ for (uptr n = 0; n < kSize; n++) {
+ Bucket *bucket = &table_[n];
+
+ ReadLock lock(&bucket->mtx);
+
+ for (uptr i = 0; i < kBucketSize; i++) {
+ Cell *c = &bucket->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
+ if (addr1 != 0)
+ cb(addr1, c->val, arg);
+ }
+
+ // Iterate over any additional cells.
+ if (AddBucket *add =
+ (AddBucket *)atomic_load(&bucket->add, memory_order_acquire)) {
+ for (uptr i = 0; i < add->size; i++) {
+ Cell *c = &add->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
+ if (addr1 != 0)
+ cb(addr1, c->val, arg);
+ }
+ }
+ }
+}
+
template<typename T, uptr kSize>
AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr) {
map_ = map;
@@ -163,7 +201,8 @@ AddrHashMap<T, kSize>::AddrHashMap() {
}
template <typename T, uptr kSize>
-void AddrHashMap<T, kSize>::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
+void AddrHashMap<T, kSize>::acquire(Handle *h)
+ SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
uptr addr = h->addr_;
uptr hash = calcHash(addr);
Bucket *b = &table_[hash];
@@ -292,7 +331,8 @@ void AddrHashMap<T, kSize>::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
}
template <typename T, uptr kSize>
- void AddrHashMap<T, kSize>::release(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
+ void AddrHashMap<T, kSize>::release(Handle *h)
+ SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
if (!h->cell_)
return;
Bucket *b = h->bucket_;
lib/tsan/sanitizer_common/sanitizer_allocator.cpp
@@ -17,6 +17,7 @@
#include "sanitizer_allocator_internal.h"
#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
+#include "sanitizer_platform.h"
namespace __sanitizer {
@@ -24,66 +25,6 @@ namespace __sanitizer {
const char *PrimaryAllocatorName = "SizeClassAllocator";
const char *SecondaryAllocatorName = "LargeMmapAllocator";
-// ThreadSanitizer for Go uses libc malloc/free.
-#if defined(SANITIZER_USE_MALLOC)
-# if SANITIZER_LINUX && !SANITIZER_ANDROID
-extern "C" void *__libc_malloc(uptr size);
-# if !SANITIZER_GO
-extern "C" void *__libc_memalign(uptr alignment, uptr size);
-# endif
-extern "C" void *__libc_realloc(void *ptr, uptr size);
-extern "C" void __libc_free(void *ptr);
-# else
-# include <stdlib.h>
-# define __libc_malloc malloc
-# if !SANITIZER_GO
-static void *__libc_memalign(uptr alignment, uptr size) {
- void *p;
- uptr error = posix_memalign(&p, alignment, size);
- if (error) return nullptr;
- return p;
-}
-# endif
-# define __libc_realloc realloc
-# define __libc_free free
-# endif
-
-static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
- uptr alignment) {
- (void)cache;
-#if !SANITIZER_GO
- if (alignment == 0)
- return __libc_malloc(size);
- else
- return __libc_memalign(alignment, size);
-#else
- // Windows does not provide __libc_memalign/posix_memalign. It provides
- // __aligned_malloc, but the allocated blocks can't be passed to free,
- // they need to be passed to __aligned_free. InternalAlloc interface does
- // not account for such requirement. Alignemnt does not seem to be used
- // anywhere in runtime, so just call __libc_malloc for now.
- DCHECK_EQ(alignment, 0);
- return __libc_malloc(size);
-#endif
-}
-
-static void *RawInternalRealloc(void *ptr, uptr size,
- InternalAllocatorCache *cache) {
- (void)cache;
- return __libc_realloc(ptr, size);
-}
-
-static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
- (void)cache;
- __libc_free(ptr);
-}
-
-InternalAllocator *internal_allocator() {
- return 0;
-}
-
-#else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
-
static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
static atomic_uint8_t internal_allocator_initialized;
static StaticSpinMutex internal_alloc_init_mu;
@@ -135,8 +76,6 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
internal_allocator()->Deallocate(cache, ptr);
}
-#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
-
static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
SetAllocatorOutOfMemory();
Report("FATAL: %s: internal allocator is out of memory trying to allocate "
@@ -187,6 +126,16 @@ void InternalFree(void *addr, InternalAllocatorCache *cache) {
RawInternalFree(addr, cache);
}
+void InternalAllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ internal_allocator_cache_mu.Lock();
+ internal_allocator()->ForceLock();
+}
+
+void InternalAllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ internal_allocator()->ForceUnlock();
+ internal_allocator_cache_mu.Unlock();
+}
+
// LowLevelAllocator
constexpr uptr kLowLevelAllocatorDefaultAlignment = 8;
static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;
@@ -197,12 +146,10 @@ void *LowLevelAllocator::Allocate(uptr size) {
size = RoundUpTo(size, low_level_alloc_min_alignment);
if (allocated_end_ - allocated_current_ < (sptr)size) {
uptr size_to_allocate = RoundUpTo(size, GetPageSizeCached());
- allocated_current_ =
- (char*)MmapOrDie(size_to_allocate, __func__);
+ allocated_current_ = (char *)MmapOrDie(size_to_allocate, __func__);
allocated_end_ = allocated_current_ + size_to_allocate;
if (low_level_alloc_callback) {
- low_level_alloc_callback((uptr)allocated_current_,
- size_to_allocate);
+ low_level_alloc_callback((uptr)allocated_current_, size_to_allocate);
}
}
CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
@@ -247,4 +194,14 @@ void PrintHintAllocatorCannotReturnNull() {
"allocator_may_return_null=1\n");
}
+static atomic_uint8_t rss_limit_exceeded;
+
+bool IsRssLimitExceeded() {
+ return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
+}
+
+void SetRssLimitExceeded(bool limit_exceeded) {
+ atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
+}
+
} // namespace __sanitizer
lib/tsan/sanitizer_common/sanitizer_allocator.h
@@ -14,6 +14,7 @@
#define SANITIZER_ALLOCATOR_H
#include "sanitizer_common.h"
+#include "sanitizer_flat_map.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_lfstack.h"
#include "sanitizer_libc.h"
@@ -43,12 +44,6 @@ void SetAllocatorOutOfMemory();
void PrintHintAllocatorCannotReturnNull();
-// Allocators call these callbacks on mmap/munmap.
-struct NoOpMapUnmapCallback {
- void OnMap(uptr p, uptr size) const { }
- void OnUnmap(uptr p, uptr size) const { }
-};
-
// Callback type for iterating over chunks.
typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
@@ -67,15 +62,24 @@ inline void RandomShuffle(T *a, u32 n, u32 *rand_state) {
*rand_state = state;
}
+struct NoOpMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const {}
+ void OnMapSecondary(uptr p, uptr size, uptr user_begin,
+ uptr user_size) const {}
+ void OnUnmap(uptr p, uptr size) const {}
+};
+
#include "sanitizer_allocator_size_class_map.h"
#include "sanitizer_allocator_stats.h"
#include "sanitizer_allocator_primary64.h"
-#include "sanitizer_allocator_bytemap.h"
#include "sanitizer_allocator_primary32.h"
#include "sanitizer_allocator_local_cache.h"
#include "sanitizer_allocator_secondary.h"
#include "sanitizer_allocator_combined.h"
+bool IsRssLimitExceeded();
+void SetRssLimitExceeded(bool limit_exceeded);
+
} // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_H
lib/tsan/sanitizer_common/sanitizer_allocator_bytemap.h
@@ -1,107 +0,0 @@
-//===-- sanitizer_allocator_bytemap.h ---------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// Part of the Sanitizer Allocator.
-//
-//===----------------------------------------------------------------------===//
-#ifndef SANITIZER_ALLOCATOR_H
-#error This file must be included inside sanitizer_allocator.h
-#endif
-
-// Maps integers in rage [0, kSize) to u8 values.
-template <u64 kSize, typename AddressSpaceViewTy = LocalAddressSpaceView>
-class FlatByteMap {
- public:
- using AddressSpaceView = AddressSpaceViewTy;
- void Init() {
- internal_memset(map_, 0, sizeof(map_));
- }
-
- void set(uptr idx, u8 val) {
- CHECK_LT(idx, kSize);
- CHECK_EQ(0U, map_[idx]);
- map_[idx] = val;
- }
- u8 operator[] (uptr idx) {
- CHECK_LT(idx, kSize);
- // FIXME: CHECK may be too expensive here.
- return map_[idx];
- }
- private:
- u8 map_[kSize];
-};
-
-// TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
-// It is implemented as a two-dimensional array: array of kSize1 pointers
-// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
-// Each value is initially zero and can be set to something else only once.
-// Setting and getting values from multiple threads is safe w/o extra locking.
-template <u64 kSize1, u64 kSize2,
- typename AddressSpaceViewTy = LocalAddressSpaceView,
- class MapUnmapCallback = NoOpMapUnmapCallback>
-class TwoLevelByteMap {
- public:
- using AddressSpaceView = AddressSpaceViewTy;
- void Init() {
- internal_memset(map1_, 0, sizeof(map1_));
- mu_.Init();
- }
-
- void TestOnlyUnmap() {
- for (uptr i = 0; i < kSize1; i++) {
- u8 *p = Get(i);
- if (!p) continue;
- MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
- UnmapOrDie(p, kSize2);
- }
- }
-
- uptr size() const { return kSize1 * kSize2; }
- uptr size1() const { return kSize1; }
- uptr size2() const { return kSize2; }
-
- void set(uptr idx, u8 val) {
- CHECK_LT(idx, kSize1 * kSize2);
- u8 *map2 = GetOrCreate(idx / kSize2);
- CHECK_EQ(0U, map2[idx % kSize2]);
- map2[idx % kSize2] = val;
- }
-
- u8 operator[] (uptr idx) const {
- CHECK_LT(idx, kSize1 * kSize2);
- u8 *map2 = Get(idx / kSize2);
- if (!map2) return 0;
- auto value_ptr = AddressSpaceView::Load(&map2[idx % kSize2]);
- return *value_ptr;
- }
-
- private:
- u8 *Get(uptr idx) const {
- CHECK_LT(idx, kSize1);
- return reinterpret_cast<u8 *>(
- atomic_load(&map1_[idx], memory_order_acquire));
- }
-
- u8 *GetOrCreate(uptr idx) {
- u8 *res = Get(idx);
- if (!res) {
- SpinMutexLock l(&mu_);
- if (!(res = Get(idx))) {
- res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
- MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
- atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
- memory_order_release);
- }
- }
- return res;
- }
-
- atomic_uintptr_t map1_[kSize1];
- StaticSpinMutex mu_;
-};
-
lib/tsan/sanitizer_common/sanitizer_allocator_combined.h
@@ -29,9 +29,9 @@ class CombinedAllocator {
LargeMmapAllocatorPtrArray,
typename PrimaryAllocator::AddressSpaceView>;
- void InitLinkerInitialized(s32 release_to_os_interval_ms) {
- stats_.InitLinkerInitialized();
- primary_.Init(release_to_os_interval_ms);
+ void InitLinkerInitialized(s32 release_to_os_interval_ms,
+ uptr heap_start = 0) {
+ primary_.Init(release_to_os_interval_ms, heap_start);
secondary_.InitLinkerInitialized();
}
@@ -112,15 +112,13 @@ class CombinedAllocator {
return new_p;
}
- bool PointerIsMine(void *p) {
+ bool PointerIsMine(const void *p) const {
if (primary_.PointerIsMine(p))
return true;
return secondary_.PointerIsMine(p);
}
- bool FromPrimary(void *p) {
- return primary_.PointerIsMine(p);
- }
+ bool FromPrimary(const void *p) const { return primary_.PointerIsMine(p); }
void *GetMetaData(const void *p) {
if (primary_.PointerIsMine(p))
@@ -136,7 +134,7 @@ class CombinedAllocator {
// This function does the same as GetBlockBegin, but is much faster.
// Must be called with the allocator locked.
- void *GetBlockBeginFastLocked(void *p) {
+ void *GetBlockBeginFastLocked(const void *p) {
if (primary_.PointerIsMine(p))
return primary_.GetBlockBegin(p);
return secondary_.GetBlockBeginFastLocked(p);
@@ -177,12 +175,12 @@ class CombinedAllocator {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
- void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
primary_.ForceLock();
secondary_.ForceLock();
}
- void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
secondary_.ForceUnlock();
primary_.ForceUnlock();
}
lib/tsan/sanitizer_common/sanitizer_allocator_dlsym.h
@@ -0,0 +1,79 @@
+//===-- sanitizer_allocator_dlsym.h -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Hack: Sanitizer initializer calls dlsym which may need to allocate and call
+// back into uninitialized sanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ALLOCATOR_DLSYM_H
+#define SANITIZER_ALLOCATOR_DLSYM_H
+
+#include "sanitizer_allocator_internal.h"
+
+namespace __sanitizer {
+
+template <typename Details>
+struct DlSymAllocator {
+ static bool Use() {
+ // Fuchsia doesn't use dlsym-based interceptors.
+ return !SANITIZER_FUCHSIA && UNLIKELY(Details::UseImpl());
+ }
+
+ static bool PointerIsMine(const void *ptr) {
+ // Fuchsia doesn't use dlsym-based interceptors.
+ return !SANITIZER_FUCHSIA &&
+ UNLIKELY(internal_allocator()->FromPrimary(ptr));
+ }
+
+ static void *Allocate(uptr size_in_bytes) {
+ void *ptr = InternalAlloc(size_in_bytes, nullptr, kWordSize);
+ CHECK(internal_allocator()->FromPrimary(ptr));
+ Details::OnAllocate(ptr,
+ internal_allocator()->GetActuallyAllocatedSize(ptr));
+ return ptr;
+ }
+
+ static void *Callocate(SIZE_T nmemb, SIZE_T size) {
+ void *ptr = InternalCalloc(nmemb, size);
+ CHECK(internal_allocator()->FromPrimary(ptr));
+ Details::OnAllocate(ptr,
+ internal_allocator()->GetActuallyAllocatedSize(ptr));
+ return ptr;
+ }
+
+ static void Free(void *ptr) {
+ uptr size = internal_allocator()->GetActuallyAllocatedSize(ptr);
+ Details::OnFree(ptr, size);
+ InternalFree(ptr);
+ }
+
+ static void *Realloc(void *ptr, uptr new_size) {
+ if (!ptr)
+ return Allocate(new_size);
+ CHECK(internal_allocator()->FromPrimary(ptr));
+ if (!new_size) {
+ Free(ptr);
+ return nullptr;
+ }
+ uptr size = internal_allocator()->GetActuallyAllocatedSize(ptr);
+ uptr memcpy_size = Min(new_size, size);
+ void *new_ptr = Allocate(new_size);
+ if (new_ptr)
+ internal_memcpy(new_ptr, ptr, memcpy_size);
+ Free(ptr);
+ return new_ptr;
+ }
+
+ static void OnAllocate(const void *ptr, uptr size) {}
+ static void OnFree(const void *ptr, uptr size) {}
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ALLOCATOR_DLSYM_H
lib/tsan/sanitizer_common/sanitizer_allocator_interface.h
@@ -21,8 +21,12 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_estimated_allocated_size(uptr size);
SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_get_ownership(const void *p);
+SANITIZER_INTERFACE_ATTRIBUTE const void *__sanitizer_get_allocated_begin(
+ const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr
__sanitizer_get_allocated_size(const void *p);
+SANITIZER_INTERFACE_ATTRIBUTE uptr
+__sanitizer_get_allocated_size_fast(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_current_allocated_bytes();
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_heap_size();
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_free_bytes();
lib/tsan/sanitizer_common/sanitizer_allocator_internal.h
@@ -48,8 +48,9 @@ void *InternalReallocArray(void *p, uptr count, uptr size,
void *InternalCalloc(uptr count, uptr size,
InternalAllocatorCache *cache = nullptr);
void InternalFree(void *p, InternalAllocatorCache *cache = nullptr);
+void InternalAllocatorLock();
+void InternalAllocatorUnlock();
InternalAllocator *internal_allocator();
-
} // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_INTERNAL_H
lib/tsan/sanitizer_common/sanitizer_allocator_primary32.h
@@ -189,7 +189,7 @@ class SizeClassAllocator32 {
sci->free_list.push_front(b);
}
- bool PointerIsMine(const void *p) {
+ bool PointerIsMine(const void *p) const {
uptr mem = reinterpret_cast<uptr>(p);
if (SANITIZER_SIGN_EXTENDED_ADDRESSES)
mem &= (kSpaceSize - 1);
@@ -198,8 +198,9 @@ class SizeClassAllocator32 {
return GetSizeClass(p) != 0;
}
- uptr GetSizeClass(const void *p) {
- return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
+ uptr GetSizeClass(const void *p) const {
+ uptr id = ComputeRegionId(reinterpret_cast<uptr>(p));
+ return possible_regions.contains(id) ? possible_regions[id] : 0;
}
void *GetBlockBegin(const void *p) {
@@ -237,13 +238,13 @@ class SizeClassAllocator32 {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
- void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
for (uptr i = 0; i < kNumClasses; i++) {
GetSizeClassInfo(i)->mutex.Lock();
}
}
- void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
for (int i = kNumClasses - 1; i >= 0; i--) {
GetSizeClassInfo(i)->mutex.Unlock();
}
@@ -251,9 +252,9 @@ class SizeClassAllocator32 {
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
- void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) const {
for (uptr region = 0; region < kNumPossibleRegions; region++)
- if (possible_regions[region]) {
+ if (possible_regions.contains(region) && possible_regions[region]) {
uptr chunk_size = ClassIdToSize(possible_regions[region]);
uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
uptr region_beg = region * kRegionSize;
@@ -292,9 +293,7 @@ class SizeClassAllocator32 {
return res;
}
- uptr ComputeRegionBeg(uptr mem) {
- return mem & ~(kRegionSize - 1);
- }
+ uptr ComputeRegionBeg(uptr mem) const { return mem & ~(kRegionSize - 1); }
uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
DCHECK_LT(class_id, kNumClasses);
@@ -305,7 +304,7 @@ class SizeClassAllocator32 {
MapUnmapCallback().OnMap(res, kRegionSize);
stat->Add(AllocatorStatMapped, kRegionSize);
CHECK(IsAligned(res, kRegionSize));
- possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
+ possible_regions[ComputeRegionId(res)] = class_id;
return res;
}
@@ -354,7 +353,7 @@ class SizeClassAllocator32 {
DCHECK_GT(max_count, 0);
TransferBatch *b = nullptr;
constexpr uptr kShuffleArraySize = 48;
- uptr shuffle_array[kShuffleArraySize];
+ UNINITIALIZED uptr shuffle_array[kShuffleArraySize];
uptr count = 0;
for (uptr i = region; i < region + n_chunks * size; i += size) {
shuffle_array[count++] = i;
lib/tsan/sanitizer_common/sanitizer_allocator_primary64.h
@@ -161,7 +161,7 @@ class SizeClassAllocator64 {
void ForceReleaseToOS() {
MemoryMapperT memory_mapper(*this);
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
- BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
+ Lock l(&GetRegionInfo(class_id)->mutex);
MaybeReleaseToOS(&memory_mapper, class_id, true /*force*/);
}
}
@@ -178,7 +178,7 @@ class SizeClassAllocator64 {
uptr region_beg = GetRegionBeginBySizeClass(class_id);
CompactPtrT *free_array = GetFreeArray(region_beg);
- BlockingMutexLock l(®ion->mutex);
+ Lock l(®ion->mutex);
uptr old_num_chunks = region->num_freed_chunks;
uptr new_num_freed_chunks = old_num_chunks + n_chunks;
// Failure to allocate free array space while releasing memory is non
@@ -204,7 +204,7 @@ class SizeClassAllocator64 {
uptr region_beg = GetRegionBeginBySizeClass(class_id);
CompactPtrT *free_array = GetFreeArray(region_beg);
- BlockingMutexLock l(®ion->mutex);
+ Lock l(®ion->mutex);
#if SANITIZER_WINDOWS
/* On Windows unmapping of memory during __sanitizer_purge_allocator is
explicit and immediate, so unmapped regions must be explicitly mapped back
@@ -282,6 +282,8 @@ class SizeClassAllocator64 {
CHECK(kMetadataSize);
uptr class_id = GetSizeClass(p);
uptr size = ClassIdToSize(class_id);
+ if (!size)
+ return nullptr;
uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
uptr region_beg = GetRegionBeginBySizeClass(class_id);
return reinterpret_cast<void *>(GetMetadataEnd(region_beg) -
@@ -300,9 +302,8 @@ class SizeClassAllocator64 {
UnmapWithCallbackOrDie((uptr)address_range.base(), address_range.size());
}
- static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats,
- uptr stats_size) {
- for (uptr class_id = 0; class_id < stats_size; class_id++)
+ static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats) {
+ for (uptr class_id = 0; class_id < kNumClasses; class_id++)
if (stats[class_id] == start)
stats[class_id] = rss;
}
@@ -315,7 +316,7 @@ class SizeClassAllocator64 {
Printf(
"%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
"num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd "
- "last released: %6zdK region: 0x%zx\n",
+ "last released: %6lldK region: 0x%zx\n",
region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id),
region->mapped_user >> 10, region->stats.n_allocated,
region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks,
@@ -328,7 +329,7 @@ class SizeClassAllocator64 {
uptr rss_stats[kNumClasses];
for (uptr class_id = 0; class_id < kNumClasses; class_id++)
rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id;
- GetMemoryProfile(FillMemoryProfile, rss_stats, kNumClasses);
+ GetMemoryProfile(FillMemoryProfile, rss_stats);
uptr total_mapped = 0;
uptr total_rss = 0;
@@ -353,13 +354,13 @@ class SizeClassAllocator64 {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
- void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
for (uptr i = 0; i < kNumClasses; i++) {
GetRegionInfo(i)->mutex.Lock();
}
}
- void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
for (int i = (int)kNumClasses - 1; i >= 0; i--) {
GetRegionInfo(i)->mutex.Unlock();
}
@@ -623,7 +624,7 @@ class SizeClassAllocator64 {
static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
// FreeArray is the array of free-d chunks (stored as 4-byte offsets).
- // In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
+ // In the worst case it may require kRegionSize/SizeClassMap::kMinSize
// elements, but in reality this will not happen. For simplicity we
// dedicate 1/8 of the region's virtual space to FreeArray.
static const uptr kFreeArraySize = kRegionSize / 8;
@@ -634,8 +635,8 @@ class SizeClassAllocator64 {
return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
}
uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
- // kRegionSize must be >= 2^32.
- COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
+ // kRegionSize should be able to satisfy the largest size class.
+ static_assert(kRegionSize >= SizeClassMap::kMaxSize);
// kRegionSize must be <= 2^36, see CompactPtrT.
COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));
// Call mmap for user memory with at least this size.
@@ -665,7 +666,7 @@ class SizeClassAllocator64 {
};
struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) RegionInfo {
- BlockingMutex mutex;
+ Mutex mutex;
uptr num_freed_chunks; // Number of elements in the freearray.
uptr mapped_free_array; // Bytes mapped for freearray.
uptr allocated_user; // Bytes allocated for user memory.
lib/tsan/sanitizer_common/sanitizer_allocator_report.cpp
@@ -128,8 +128,7 @@ void NORETURN ReportAllocationSizeTooBig(uptr user_size, uptr max_size,
void NORETURN ReportOutOfMemory(uptr requested_size, const StackTrace *stack) {
{
ScopedAllocatorErrorReport report("out-of-memory", stack);
- Report("ERROR: %s: allocator is out of memory trying to allocate 0x%zx "
- "bytes\n", SanitizerToolName, requested_size);
+ ERROR_OOM("allocator is trying to allocate 0x%zx bytes\n", requested_size);
}
Die();
}
lib/tsan/sanitizer_common/sanitizer_allocator_secondary.h
@@ -82,7 +82,7 @@ class LargeMmapAllocator {
InitLinkerInitialized();
}
- void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
+ void *Allocate(AllocatorStats *stat, const uptr size, uptr alignment) {
CHECK(IsPowerOfTwo(alignment));
uptr map_size = RoundUpMapSize(size);
if (alignment > page_size_)
@@ -99,11 +99,11 @@ class LargeMmapAllocator {
if (!map_beg)
return nullptr;
CHECK(IsAligned(map_beg, page_size_));
- MapUnmapCallback().OnMap(map_beg, map_size);
uptr map_end = map_beg + map_size;
uptr res = map_beg + page_size_;
if (res & (alignment - 1)) // Align.
res += alignment - (res & (alignment - 1));
+ MapUnmapCallback().OnMapSecondary(map_beg, map_size, res, size);
CHECK(IsAligned(res, alignment));
CHECK(IsAligned(res, page_size_));
CHECK_GE(res + size, map_beg);
@@ -161,7 +161,7 @@ class LargeMmapAllocator {
return res;
}
- bool PointerIsMine(const void *p) {
+ bool PointerIsMine(const void *p) const {
return GetBlockBegin(p) != nullptr;
}
@@ -179,7 +179,7 @@ class LargeMmapAllocator {
return GetHeader(p) + 1;
}
- void *GetBlockBegin(const void *ptr) {
+ void *GetBlockBegin(const void *ptr) const {
uptr p = reinterpret_cast<uptr>(ptr);
SpinMutexLock l(&mutex_);
uptr nearest_chunk = 0;
@@ -215,7 +215,7 @@ class LargeMmapAllocator {
// This function does the same as GetBlockBegin, but is much faster.
// Must be called with the allocator locked.
- void *GetBlockBeginFastLocked(void *ptr) {
+ void *GetBlockBeginFastLocked(const void *ptr) {
mutex_.CheckLocked();
uptr p = reinterpret_cast<uptr>(ptr);
uptr n = n_chunks_;
@@ -267,9 +267,9 @@ class LargeMmapAllocator {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
- void ForceLock() ACQUIRE(mutex_) { mutex_.Lock(); }
+ void ForceLock() SANITIZER_ACQUIRE(mutex_) { mutex_.Lock(); }
- void ForceUnlock() RELEASE(mutex_) { mutex_.Unlock(); }
+ void ForceUnlock() SANITIZER_RELEASE(mutex_) { mutex_.Unlock(); }
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
@@ -301,7 +301,7 @@ class LargeMmapAllocator {
return GetHeader(reinterpret_cast<uptr>(p));
}
- void *GetUser(const Header *h) {
+ void *GetUser(const Header *h) const {
CHECK(IsAligned((uptr)h, page_size_));
return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
}
@@ -318,5 +318,5 @@ class LargeMmapAllocator {
struct Stats {
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
} stats;
- StaticSpinMutex mutex_;
+ mutable StaticSpinMutex mutex_;
};
lib/tsan/sanitizer_common/sanitizer_allocator_size_class_map.h
@@ -193,13 +193,13 @@ class SizeClassMap {
uptr cached = MaxCachedHint(s) * s;
if (i == kBatchClassID)
d = p = l = 0;
- Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
- "cached: %zd %zd; id %zd\n",
- i, Size(i), d, p, l, MaxCachedHint(s), cached, ClassID(s));
+ Printf(
+ "c%02zu => s: %zu diff: +%zu %02zu%% l %zu cached: %zu %zu; id %zu\n",
+ i, Size(i), d, p, l, MaxCachedHint(s), cached, ClassID(s));
total_cached += cached;
prev_s = s;
}
- Printf("Total cached: %zd\n", total_cached);
+ Printf("Total cached: %zu\n", total_cached);
}
static void Validate() {
lib/tsan/sanitizer_common/sanitizer_allocator_stats.h
@@ -25,19 +25,13 @@ typedef uptr AllocatorStatCounters[AllocatorStatCount];
// Per-thread stats, live in per-thread cache.
class AllocatorStats {
public:
- void Init() {
- internal_memset(this, 0, sizeof(*this));
- }
- void InitLinkerInitialized() {}
-
+ void Init() { internal_memset(this, 0, sizeof(*this)); }
void Add(AllocatorStat i, uptr v) {
- v += atomic_load(&stats_[i], memory_order_relaxed);
- atomic_store(&stats_[i], v, memory_order_relaxed);
+ atomic_fetch_add(&stats_[i], v, memory_order_relaxed);
}
void Sub(AllocatorStat i, uptr v) {
- v = atomic_load(&stats_[i], memory_order_relaxed) - v;
- atomic_store(&stats_[i], v, memory_order_relaxed);
+ atomic_fetch_sub(&stats_[i], v, memory_order_relaxed);
}
void Set(AllocatorStat i, uptr v) {
@@ -58,17 +52,13 @@ class AllocatorStats {
// Global stats, used for aggregation and querying.
class AllocatorGlobalStats : public AllocatorStats {
public:
- void InitLinkerInitialized() {
- next_ = this;
- prev_ = this;
- }
void Init() {
internal_memset(this, 0, sizeof(*this));
- InitLinkerInitialized();
}
void Register(AllocatorStats *s) {
SpinMutexLock l(&mu_);
+ LazyInit();
s->next_ = next_;
s->prev_ = this;
next_->prev_ = s;
@@ -87,7 +77,7 @@ class AllocatorGlobalStats : public AllocatorStats {
internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
SpinMutexLock l(&mu_);
const AllocatorStats *stats = this;
- for (;;) {
+ for (; stats;) {
for (int i = 0; i < AllocatorStatCount; i++)
s[i] += stats->Get(AllocatorStat(i));
stats = stats->next_;
@@ -100,6 +90,13 @@ class AllocatorGlobalStats : public AllocatorStats {
}
private:
+ void LazyInit() {
+ if (!next_) {
+ next_ = this;
+ prev_ = this;
+ }
+ }
+
mutable StaticSpinMutex mu_;
};
lib/tsan/sanitizer_common/sanitizer_array_ref.h
@@ -0,0 +1,123 @@
+//===-- sanitizer_array_ref.h -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ARRAY_REF_H
+#define SANITIZER_ARRAY_REF_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+/// ArrayRef - Represent a constant reference to an array (0 or more elements
+/// consecutively in memory), i.e. a start pointer and a length. It allows
+/// various APIs to take consecutive elements easily and conveniently.
+///
+/// This class does not own the underlying data, it is expected to be used in
+/// situations where the data resides in some other buffer, whose lifetime
+/// extends past that of the ArrayRef. For this reason, it is not in general
+/// safe to store an ArrayRef.
+///
+/// This is intended to be trivially copyable, so it should be passed by
+/// value.
+template <typename T>
+class ArrayRef {
+ public:
+ constexpr ArrayRef() {}
+ constexpr ArrayRef(const T *begin, const T *end) : begin_(begin), end_(end) {
+ DCHECK(empty() || begin);
+ }
+ constexpr ArrayRef(const T *data, uptr length)
+ : ArrayRef(data, data + length) {}
+ template <uptr N>
+ constexpr ArrayRef(const T (&src)[N]) : ArrayRef(src, src + N) {}
+ template <typename C>
+ constexpr ArrayRef(const C &src)
+ : ArrayRef(src.data(), src.data() + src.size()) {}
+ ArrayRef(const T &one_elt) : ArrayRef(&one_elt, &one_elt + 1) {}
+
+ const T *data() const { return empty() ? nullptr : begin_; }
+
+ const T *begin() const { return begin_; }
+ const T *end() const { return end_; }
+
+ bool empty() const { return begin_ == end_; }
+
+ uptr size() const { return end_ - begin_; }
+
+ /// equals - Check for element-wise equality.
+ bool equals(ArrayRef rhs) const {
+ if (size() != rhs.size())
+ return false;
+ auto r = rhs.begin();
+ for (auto &l : *this) {
+ if (!(l == *r))
+ return false;
+ ++r;
+ }
+ return true;
+ }
+
+ /// slice(n, m) - Chop off the first N elements of the array, and keep M
+ /// elements in the array.
+ ArrayRef<T> slice(uptr N, uptr M) const {
+ DCHECK_LE(N + M, size());
+ return ArrayRef<T>(data() + N, M);
+ }
+
+ /// slice(n) - Chop off the first N elements of the array.
+ ArrayRef<T> slice(uptr N) const { return slice(N, size() - N); }
+
+ /// Drop the first \p N elements of the array.
+ ArrayRef<T> drop_front(uptr N = 1) const {
+ DCHECK_GE(size(), N);
+ return slice(N, size() - N);
+ }
+
+ /// Drop the last \p N elements of the array.
+ ArrayRef<T> drop_back(uptr N = 1) const {
+ DCHECK_GE(size(), N);
+ return slice(0, size() - N);
+ }
+
+ /// Return a copy of *this with only the first \p N elements.
+ ArrayRef<T> take_front(uptr N = 1) const {
+ if (N >= size())
+ return *this;
+ return drop_back(size() - N);
+ }
+
+ /// Return a copy of *this with only the last \p N elements.
+ ArrayRef<T> take_back(uptr N = 1) const {
+ if (N >= size())
+ return *this;
+ return drop_front(size() - N);
+ }
+
+ const T &operator[](uptr index) const {
+ DCHECK_LT(index, size());
+ return begin_[index];
+ }
+
+ private:
+ const T *begin_ = nullptr;
+ const T *end_ = nullptr;
+};
+
+template <typename T>
+inline bool operator==(ArrayRef<T> lhs, ArrayRef<T> rhs) {
+ return lhs.equals(rhs);
+}
+
+template <typename T>
+inline bool operator!=(ArrayRef<T> lhs, ArrayRef<T> rhs) {
+ return !(lhs == rhs);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ARRAY_REF_H
lib/tsan/sanitizer_common/sanitizer_asm.h
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// Various support for assemebler.
+// Various support for assembler.
//
//===----------------------------------------------------------------------===//
@@ -42,13 +42,57 @@
# define CFI_RESTORE(reg)
#endif
+#if defined(__x86_64__) || defined(__i386__) || defined(__sparc__)
+# define ASM_TAIL_CALL jmp
+#elif defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ defined(__powerpc__) || defined(__loongarch_lp64)
+# define ASM_TAIL_CALL b
+#elif defined(__s390__)
+# define ASM_TAIL_CALL jg
+#elif defined(__riscv)
+# define ASM_TAIL_CALL tail
+#endif
+
+#if defined(__ELF__) && defined(__x86_64__) || defined(__i386__) || \
+ defined(__riscv)
+# define ASM_PREEMPTIBLE_SYM(sym) sym@plt
+#else
+# define ASM_PREEMPTIBLE_SYM(sym) sym
+#endif
+
#if !defined(__APPLE__)
# define ASM_HIDDEN(symbol) .hidden symbol
# define ASM_TYPE_FUNCTION(symbol) .type symbol, %function
# define ASM_SIZE(symbol) .size symbol, .-symbol
# define ASM_SYMBOL(symbol) symbol
# define ASM_SYMBOL_INTERCEPTOR(symbol) symbol
-# define ASM_WRAPPER_NAME(symbol) __interceptor_##symbol
+# if defined(__i386__) || defined(__powerpc__) || defined(__s390__) || \
+ defined(__sparc__)
+// For details, see interception.h
+# define ASM_WRAPPER_NAME(symbol) __interceptor_##symbol
+# define ASM_TRAMPOLINE_ALIAS(symbol, name) \
+ .weak symbol; \
+ .set symbol, ASM_WRAPPER_NAME(name)
+# define ASM_INTERCEPTOR_TRAMPOLINE(name)
+# define ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT 0
+# else // Architecture supports interceptor trampoline
+// Keep trampoline implementation in sync with interception/interception.h
+# define ASM_WRAPPER_NAME(symbol) ___interceptor_##symbol
+# define ASM_TRAMPOLINE_ALIAS(symbol, name) \
+ .weak symbol; \
+ .set symbol, __interceptor_trampoline_##name
+# define ASM_INTERCEPTOR_TRAMPOLINE(name) \
+ .weak __interceptor_##name; \
+ .set __interceptor_##name, ASM_WRAPPER_NAME(name); \
+ .globl __interceptor_trampoline_##name; \
+ ASM_TYPE_FUNCTION(__interceptor_trampoline_##name); \
+ __interceptor_trampoline_##name: \
+ CFI_STARTPROC; \
+ ASM_TAIL_CALL ASM_PREEMPTIBLE_SYM(__interceptor_##name); \
+ CFI_ENDPROC; \
+ ASM_SIZE(__interceptor_trampoline_##name)
+# define ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT 1
+# endif // Architecture supports interceptor trampoline
#else
# define ASM_HIDDEN(symbol)
# define ASM_TYPE_FUNCTION(symbol)
@@ -61,8 +105,15 @@
#if defined(__ELF__) && (defined(__GNU__) || defined(__FreeBSD__) || \
defined(__Fuchsia__) || defined(__linux__))
// clang-format off
-#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits // NOLINT
+#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits
// clang-format on
#else
#define NO_EXEC_STACK_DIRECTIVE
#endif
+
+#if (defined(__x86_64__) || defined(__i386__)) && defined(__has_include) && __has_include(<cet.h>)
+#include <cet.h>
+#endif
+#ifndef _CET_ENDBR
+#define _CET_ENDBR
+#endif
lib/tsan/sanitizer_common/sanitizer_atomic_clang.h
@@ -74,13 +74,12 @@ template <typename T>
inline bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
- typedef typename T::Type Type;
- Type cmpv = *cmp;
- Type prev;
- prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
- if (prev == cmpv) return true;
- *cmp = prev;
- return false;
+ // Transitioned from __sync_val_compare_and_swap to support targets like
+ // SPARC V8 that cannot inline atomic cmpxchg. __atomic_compare_exchange
+ // can then be resolved from libatomic. __ATOMIC_SEQ_CST is used to best
+ // match the __sync builtin memory order.
+ return __atomic_compare_exchange(&a->val_dont_use, cmp, &xchg, false,
+ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
}
template<typename T>
@@ -96,8 +95,8 @@ inline bool atomic_compare_exchange_weak(volatile T *a,
// This include provides explicit template instantiations for atomic_uint64_t
// on MIPS32, which does not directly support 8 byte atomics. It has to
// proceed the template definitions above.
-#if defined(_MIPS_SIM) && defined(_ABIO32)
- #include "sanitizer_atomic_clang_mips.h"
+#if defined(_MIPS_SIM) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
+# include "sanitizer_atomic_clang_mips.h"
#endif
#undef ATOMIC_ORDER
lib/tsan/sanitizer_common/sanitizer_atomic_clang_mips.h
@@ -18,7 +18,7 @@ namespace __sanitizer {
// MIPS32 does not support atomics > 4 bytes. To address this lack of
// functionality, the sanitizer library provides helper methods which use an
-// internal spin lock mechanism to emulate atomic oprations when the size is
+// internal spin lock mechanism to emulate atomic operations when the size is
// 8 bytes.
static void __spin_lock(volatile int *lock) {
while (__sync_lock_test_and_set(lock, 1))
lib/tsan/sanitizer_common/sanitizer_chained_origin_depot.cpp
@@ -0,0 +1,148 @@
+//===-- sanitizer_chained_origin_depot.cpp --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A storage for chained origins.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_chained_origin_depot.h"
+
+#include "sanitizer_stackdepotbase.h"
+
+namespace __sanitizer {
+
+namespace {
+struct ChainedOriginDepotDesc {
+ u32 here_id;
+ u32 prev_id;
+};
+
+struct ChainedOriginDepotNode {
+ using hash_type = u32;
+ u32 link;
+ u32 here_id;
+ u32 prev_id;
+
+ typedef ChainedOriginDepotDesc args_type;
+
+ bool eq(hash_type hash, const args_type &args) const;
+
+ static uptr allocated() { return 0; }
+
+ static hash_type hash(const args_type &args);
+
+ static bool is_valid(const args_type &args);
+
+ void store(u32 id, const args_type &args, hash_type other_hash);
+
+ args_type load(u32 id) const;
+
+ struct Handle {
+ const ChainedOriginDepotNode *node_ = nullptr;
+ u32 id_ = 0;
+ Handle(const ChainedOriginDepotNode *node, u32 id) : node_(node), id_(id) {}
+ bool valid() const { return node_; }
+ u32 id() const { return id_; }
+ int here_id() const { return node_->here_id; }
+ int prev_id() const { return node_->prev_id; }
+ };
+
+ static Handle get_handle(u32 id);
+
+ typedef Handle handle_type;
+};
+
+} // namespace
+
+static StackDepotBase<ChainedOriginDepotNode, 4, 20> depot;
+
+bool ChainedOriginDepotNode::eq(hash_type hash, const args_type &args) const {
+ return here_id == args.here_id && prev_id == args.prev_id;
+}
+
+/* This is murmur2 hash for the 64->32 bit case.
+ It does not behave all that well because the keys have a very biased
+ distribution (I've seen 7-element buckets with the table only 14% full).
+
+ here_id is built of
+ * (1 bits) Reserved, zero.
+ * (8 bits) Part id = bits 13..20 of the hash value of here_id's key.
+ * (23 bits) Sequential number (each part has each own sequence).
+
+ prev_id has either the same distribution as here_id (but with 3:8:21)
+ split, or one of two reserved values (-1) or (-2). Either case can
+ dominate depending on the workload.
+*/
+ChainedOriginDepotNode::hash_type ChainedOriginDepotNode::hash(
+ const args_type &args) {
+ const u32 m = 0x5bd1e995;
+ const u32 seed = 0x9747b28c;
+ const u32 r = 24;
+ u32 h = seed;
+ u32 k = args.here_id;
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h *= m;
+ h ^= k;
+
+ k = args.prev_id;
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h *= m;
+ h ^= k;
+
+ h ^= h >> 13;
+ h *= m;
+ h ^= h >> 15;
+ return h;
+}
+
+bool ChainedOriginDepotNode::is_valid(const args_type &args) { return true; }
+
+void ChainedOriginDepotNode::store(u32 id, const args_type &args,
+ hash_type other_hash) {
+ here_id = args.here_id;
+ prev_id = args.prev_id;
+}
+
+ChainedOriginDepotNode::args_type ChainedOriginDepotNode::load(u32 id) const {
+ args_type ret = {here_id, prev_id};
+ return ret;
+}
+
+ChainedOriginDepotNode::Handle ChainedOriginDepotNode::get_handle(u32 id) {
+ return Handle(&depot.nodes[id], id);
+}
+
+ChainedOriginDepot::ChainedOriginDepot() {}
+
+StackDepotStats ChainedOriginDepot::GetStats() const {
+ return depot.GetStats();
+}
+
+bool ChainedOriginDepot::Put(u32 here_id, u32 prev_id, u32 *new_id) {
+ ChainedOriginDepotDesc desc = {here_id, prev_id};
+ bool inserted;
+ *new_id = depot.Put(desc, &inserted);
+ return inserted;
+}
+
+u32 ChainedOriginDepot::Get(u32 id, u32 *other) {
+ ChainedOriginDepotDesc desc = depot.Get(id);
+ *other = desc.prev_id;
+ return desc.here_id;
+}
+
+void ChainedOriginDepot::LockAll() { depot.LockAll(); }
+
+void ChainedOriginDepot::UnlockAll() { depot.UnlockAll(); }
+
+void ChainedOriginDepot::TestOnlyUnmap() { depot.TestOnlyUnmap(); }
+
+} // namespace __sanitizer
lib/tsan/sanitizer_common/sanitizer_chained_origin_depot.h
@@ -0,0 +1,46 @@
+//===-- sanitizer_chained_origin_depot.h ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A storage for chained origins.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_CHAINED_ORIGIN_DEPOT_H
+#define SANITIZER_CHAINED_ORIGIN_DEPOT_H
+
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+class ChainedOriginDepot {
+ public:
+ ChainedOriginDepot();
+
+ // Gets the statistic of the origin chain storage.
+ StackDepotStats GetStats() const;
+
+ // Stores a chain with StackDepot ID here_id and previous chain ID prev_id.
+ // If successful, returns true and the new chain id new_id.
+ // If the same element already exists, returns false and sets new_id to the
+ // existing ID.
+ bool Put(u32 here_id, u32 prev_id, u32 *new_id);
+
+ // Retrieves the stored StackDepot ID for the given origin ID.
+ u32 Get(u32 id, u32 *other);
+
+ void LockAll();
+ void UnlockAll();
+ void TestOnlyUnmap();
+
+ private:
+ ChainedOriginDepot(const ChainedOriginDepot &) = delete;
+ void operator=(const ChainedOriginDepot &) = delete;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_CHAINED_ORIGIN_DEPOT_H
lib/tsan/sanitizer_common/sanitizer_common.cpp
@@ -11,10 +11,12 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common.h"
+
#include "sanitizer_allocator_interface.h"
#include "sanitizer_allocator_internal.h"
#include "sanitizer_atomic.h"
#include "sanitizer_flags.h"
+#include "sanitizer_interface_internal.h"
#include "sanitizer_libc.h"
#include "sanitizer_placement_new.h"
@@ -44,15 +46,41 @@ void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
Die();
}
recursion_count++;
- Report("ERROR: %s failed to "
- "%s 0x%zx (%zd) bytes of %s (error code: %d)\n",
- SanitizerToolName, mmap_type, size, size, mem_type, err);
+ if (ErrorIsOOM(err)) {
+ ERROR_OOM("failed to %s 0x%zx (%zd) bytes of %s (error code: %d)\n",
+ mmap_type, size, size, mem_type, err);
+ } else {
+ Report(
+ "ERROR: %s failed to "
+ "%s 0x%zx (%zd) bytes of %s (error code: %d)\n",
+ SanitizerToolName, mmap_type, size, size, mem_type, err);
+ }
#if !SANITIZER_GO
DumpProcessMap();
#endif
UNREACHABLE("unable to mmap");
}
+void NORETURN ReportMunmapFailureAndDie(void *addr, uptr size, error_t err,
+ bool raw_report) {
+ static int recursion_count;
+ if (raw_report || recursion_count) {
+ // If raw report is requested or we went into recursion just die. The
+ // Report() and CHECK calls below may call munmap recursively and fail.
+ RawWrite("ERROR: Failed to munmap\n");
+ Die();
+ }
+ recursion_count++;
+ Report(
+ "ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p (error "
+ "code: %d)\n",
+ SanitizerToolName, size, size, addr, err);
+#if !SANITIZER_GO
+ DumpProcessMap();
+#endif
+ UNREACHABLE("unable to unmmap");
+}
+
typedef bool UptrComparisonFunction(const uptr &a, const uptr &b);
typedef bool U32ComparisonFunction(const u32 &a, const u32 &b);
@@ -138,13 +166,21 @@ void LoadedModule::set(const char *module_name, uptr base_address,
set(module_name, base_address);
arch_ = arch;
internal_memcpy(uuid_, uuid, sizeof(uuid_));
+ uuid_size_ = kModuleUUIDSize;
instrumented_ = instrumented;
}
+void LoadedModule::setUuid(const char *uuid, uptr size) {
+ if (size > kModuleUUIDSize)
+ size = kModuleUUIDSize;
+ internal_memcpy(uuid_, uuid, size);
+ uuid_size_ = size;
+}
+
void LoadedModule::clear() {
InternalFree(full_name_);
base_address_ = 0;
- max_executable_address_ = 0;
+ max_address_ = 0;
full_name_ = nullptr;
arch_ = kModuleArchUnknown;
internal_memset(uuid_, 0, kModuleUUIDSize);
@@ -162,8 +198,7 @@ void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable,
AddressRange *r =
new(mem) AddressRange(beg, end, executable, writable, name);
ranges_.push_back(r);
- if (executable && end > max_executable_address_)
- max_executable_address_ = end;
+ max_address_ = Max(max_address_, end);
}
bool LoadedModule::containsAddress(uptr address) const {
@@ -301,18 +336,22 @@ struct MallocFreeHook {
static MallocFreeHook MFHooks[kMaxMallocFreeHooks];
-void RunMallocHooks(const void *ptr, uptr size) {
+void RunMallocHooks(void *ptr, uptr size) {
+ __sanitizer_malloc_hook(ptr, size);
for (int i = 0; i < kMaxMallocFreeHooks; i++) {
auto hook = MFHooks[i].malloc_hook;
- if (!hook) return;
+ if (!hook)
+ break;
hook(ptr, size);
}
}
-void RunFreeHooks(const void *ptr) {
+void RunFreeHooks(void *ptr) {
+ __sanitizer_free_hook(ptr);
for (int i = 0; i < kMaxMallocFreeHooks; i++) {
auto hook = MFHooks[i].free_hook;
- if (!hook) return;
+ if (!hook)
+ break;
hook(ptr);
}
}
@@ -338,6 +377,13 @@ void SleepForSeconds(unsigned seconds) {
}
void SleepForMillis(unsigned millis) { internal_usleep((u64)millis * 1000); }
+void WaitForDebugger(unsigned seconds, const char *label) {
+ if (seconds) {
+ Report("Sleeping for %u second(s) %s\n", seconds, label);
+ SleepForSeconds(seconds);
+ }
+}
+
} // namespace __sanitizer
using namespace __sanitizer;
@@ -360,4 +406,16 @@ int __sanitizer_install_malloc_and_free_hooks(void (*malloc_hook)(const void *,
void (*free_hook)(const void *)) {
return InstallMallocFreeHooks(malloc_hook, free_hook);
}
+
+// Provide default (no-op) implementation of malloc hooks.
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook, void *ptr,
+ uptr size) {
+ (void)ptr;
+ (void)size;
+}
+
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) {
+ (void)ptr;
+}
+
} // extern "C"
lib/tsan/sanitizer_common/sanitizer_common.h
@@ -16,7 +16,6 @@
#define SANITIZER_COMMON_H
#include "sanitizer_flags.h"
-#include "sanitizer_interface_internal.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_list.h"
@@ -118,9 +117,15 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
// unaccessible memory.
bool MprotectNoAccess(uptr addr, uptr size);
bool MprotectReadOnly(uptr addr, uptr size);
+bool MprotectReadWrite(uptr addr, uptr size);
void MprotectMallocZones(void *addr, int prot);
+#if SANITIZER_WINDOWS
+// Zero previously mmap'd memory. Currently used only on Windows.
+bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) WARN_UNUSED_RESULT;
+#endif
+
#if SANITIZER_LINUX
// Unmap memory. Currently only used on Linux.
void UnmapFromTo(uptr from, uptr to);
@@ -171,8 +176,8 @@ void SetShadowRegionHugePageMode(uptr addr, uptr length);
bool DontDumpShadowMemory(uptr addr, uptr length);
// Check if the built VMA size matches the runtime one.
void CheckVMASize();
-void RunMallocHooks(const void *ptr, uptr size);
-void RunFreeHooks(const void *ptr);
+void RunMallocHooks(void *ptr, uptr size);
+void RunFreeHooks(void *ptr);
class ReservedAddressRange {
public:
@@ -192,12 +197,13 @@ class ReservedAddressRange {
};
typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
- /*out*/uptr *stats, uptr stats_size);
+ /*out*/ uptr *stats);
// Parse the contents of /proc/self/smaps and generate a memory profile.
-// |cb| is a tool-specific callback that fills the |stats| array containing
-// |stats_size| elements.
-void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size);
+// |cb| is a tool-specific callback that fills the |stats| array.
+void GetMemoryProfile(fill_profile_f cb, uptr *stats);
+void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
+ uptr smaps_len);
// Simple low-level (mmap-based) allocator for internal use. Doesn't have
// constructor, so all instances of LowLevelAllocator should be
@@ -206,6 +212,7 @@ class LowLevelAllocator {
public:
// Requires an external lock.
void *Allocate(uptr size);
+
private:
char *allocated_end_;
char *allocated_current_;
@@ -222,8 +229,8 @@ void CatastrophicErrorWrite(const char *buffer, uptr length);
void RawWrite(const char *buffer);
bool ColorizeReports();
void RemoveANSIEscapeSequencesFromString(char *buffer);
-void Printf(const char *format, ...);
-void Report(const char *format, ...);
+void Printf(const char *format, ...) FORMAT(1, 2);
+void Report(const char *format, ...) FORMAT(1, 2);
void SetPrintfAndReportCallback(void (*callback)(const char *));
#define VReport(level, ...) \
do { \
@@ -237,12 +244,12 @@ void SetPrintfAndReportCallback(void (*callback)(const char *));
// Lock sanitizer error reporting and protects against nested errors.
class ScopedErrorReportLock {
public:
- ScopedErrorReportLock() ACQUIRE(mutex_) { Lock(); }
- ~ScopedErrorReportLock() RELEASE(mutex_) { Unlock(); }
+ ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); }
+ ~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); }
- static void Lock() ACQUIRE(mutex_);
- static void Unlock() RELEASE(mutex_);
- static void CheckLocked() CHECK_LOCKED(mutex_);
+ static void Lock() SANITIZER_ACQUIRE(mutex_);
+ static void Unlock() SANITIZER_RELEASE(mutex_);
+ static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_);
private:
static atomic_uintptr_t reporting_thread_;
@@ -285,7 +292,7 @@ void SetStackSizeLimitInBytes(uptr limit);
bool AddressSpaceIsUnlimited();
void SetAddressSpaceUnlimited();
void AdjustStackSize(void *attr);
-void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
+void PlatformPrepareForSandboxing(void *args);
void SetSandboxingCallback(void (*f)());
void InitializeCoverage(bool enabled, const char *coverage_dir);
@@ -294,6 +301,7 @@ void InitTlsSize();
uptr GetTlsSize();
// Other
+void WaitForDebugger(unsigned seconds, const char *label);
void SleepForSeconds(unsigned seconds);
void SleepForMillis(unsigned millis);
u64 NanoTime();
@@ -309,6 +317,20 @@ CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
const char *mmap_type, error_t err,
bool raw_report = false);
+void NORETURN ReportMunmapFailureAndDie(void *ptr, uptr size, error_t err,
+ bool raw_report = false);
+
+// Returns true if the platform-specific error reported is an OOM error.
+bool ErrorIsOOM(error_t err);
+
+// This reports an error in the form:
+//
+// `ERROR: {{SanitizerToolName}}: out of memory: {{err_msg}}`
+//
+// Downstream tools that read sanitizer output will know that errors starting
+// in this format are specifically OOM errors.
+#define ERROR_OOM(err_msg, ...) \
+ Report("ERROR: %s: out of memory: " err_msg, SanitizerToolName, __VA_ARGS__)
// Specific tools may override behavior of "Die" function to do tool-specific
// job.
@@ -325,12 +347,6 @@ void SetUserDieCallback(DieCallbackType callback);
void SetCheckUnwindCallback(void (*callback)());
-// Callback will be called if soft_rss_limit_mb is given and the limit is
-// exceeded (exceeded==true) or if rss went down below the limit
-// (exceeded==false).
-// The callback should be registered once at the tool init time.
-void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
-
// Functions related to signal handling.
typedef void (*SignalHandlerType)(int, void *, void *);
HandleSignalMode GetHandleSignalMode(int signum);
@@ -371,7 +387,7 @@ void ReportErrorSummary(const char *error_type, const AddressInfo &info,
void ReportErrorSummary(const char *error_type, const StackTrace *trace,
const char *alt_tool_name = nullptr);
-void ReportMmapWriteExec(int prot);
+void ReportMmapWriteExec(int prot, int mflags);
// Math
#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
@@ -419,9 +435,7 @@ inline uptr LeastSignificantSetBitIndex(uptr x) {
return up;
}
-inline bool IsPowerOfTwo(uptr x) {
- return (x & (x - 1)) == 0;
-}
+inline constexpr bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; }
inline uptr RoundUpToPowerOfTwo(uptr size) {
CHECK(size);
@@ -433,16 +447,16 @@ inline uptr RoundUpToPowerOfTwo(uptr size) {
return 1ULL << (up + 1);
}
-inline uptr RoundUpTo(uptr size, uptr boundary) {
+inline constexpr uptr RoundUpTo(uptr size, uptr boundary) {
RAW_CHECK(IsPowerOfTwo(boundary));
return (size + boundary - 1) & ~(boundary - 1);
}
-inline uptr RoundDownTo(uptr x, uptr boundary) {
+inline constexpr uptr RoundDownTo(uptr x, uptr boundary) {
return x & ~(boundary - 1);
}
-inline bool IsAligned(uptr a, uptr alignment) {
+inline constexpr bool IsAligned(uptr a, uptr alignment) {
return (a & (alignment - 1)) == 0;
}
@@ -461,6 +475,10 @@ template <class T>
constexpr T Max(T a, T b) {
return a > b ? a : b;
}
+template <class T>
+constexpr T Abs(T a) {
+ return a < 0 ? -a : a;
+}
template<class T> void Swap(T& a, T& b) {
T tmp = a;
a = b;
@@ -502,8 +520,8 @@ class InternalMmapVectorNoCtor {
return data_[i];
}
void push_back(const T &element) {
- CHECK_LE(size_, capacity());
- if (size_ == capacity()) {
+ if (UNLIKELY(size_ >= capacity())) {
+ CHECK_EQ(size_, capacity());
uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
Realloc(new_capacity);
}
@@ -563,7 +581,7 @@ class InternalMmapVectorNoCtor {
}
private:
- void Realloc(uptr new_capacity) {
+ NOINLINE void Realloc(uptr new_capacity) {
CHECK_GT(new_capacity, 0);
CHECK_LE(size_, new_capacity);
uptr new_capacity_bytes =
@@ -618,7 +636,7 @@ class InternalScopedString {
buffer_.resize(1);
buffer_[0] = '\0';
}
- void append(const char *format, ...);
+ void append(const char *format, ...) FORMAT(2, 3);
const char *data() const { return buffer_.data(); }
char *data() { return buffer_.data(); }
@@ -670,11 +688,9 @@ void Sort(T *v, uptr size, Compare comp = {}) {
// Works like std::lower_bound: finds the first element that is not less
// than the val.
-template <class Container,
+template <class Container, class T,
class Compare = CompareLess<typename Container::value_type>>
-uptr InternalLowerBound(const Container &v,
- const typename Container::value_type &val,
- Compare comp = {}) {
+uptr InternalLowerBound(const Container &v, const T &val, Compare comp = {}) {
uptr first = 0;
uptr last = v.size();
while (last > first) {
@@ -697,7 +713,9 @@ enum ModuleArch {
kModuleArchARMV7S,
kModuleArchARMV7K,
kModuleArchARM64,
- kModuleArchRISCV64
+ kModuleArchLoongArch64,
+ kModuleArchRISCV64,
+ kModuleArchHexagon
};
// Sorts and removes duplicates from the container.
@@ -721,12 +739,15 @@ void SortAndDedup(Container &v, Compare comp = {}) {
v.resize(last + 1);
}
+constexpr uptr kDefaultFileMaxSize = FIRST_32_SECOND_64(1 << 26, 1 << 28);
+
// Opens the file 'file_name" and reads up to 'max_len' bytes.
// The resulting buffer is mmaped and stored in '*buff'.
// Returns true if file was successfully opened and read.
bool ReadFileToVector(const char *file_name,
InternalMmapVectorNoCtor<char> *buff,
- uptr max_len = 1 << 26, error_t *errno_p = nullptr);
+ uptr max_len = kDefaultFileMaxSize,
+ error_t *errno_p = nullptr);
// Opens the file 'file_name" and reads up to 'max_len' bytes.
// This function is less I/O efficient than ReadFileToVector as it may reread
@@ -737,9 +758,12 @@ bool ReadFileToVector(const char *file_name,
// The total number of read bytes is stored in '*read_len'.
// Returns true if file was successfully opened and read.
bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
- uptr *read_len, uptr max_len = 1 << 26,
+ uptr *read_len, uptr max_len = kDefaultFileMaxSize,
error_t *errno_p = nullptr);
+int GetModuleAndOffsetForPc(uptr pc, char *module_name, uptr module_name_len,
+ uptr *pc_offset);
+
// When adding a new architecture, don't forget to also update
// script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
inline const char *ModuleArchToString(ModuleArch arch) {
@@ -762,14 +786,22 @@ inline const char *ModuleArchToString(ModuleArch arch) {
return "armv7k";
case kModuleArchARM64:
return "arm64";
+ case kModuleArchLoongArch64:
+ return "loongarch64";
case kModuleArchRISCV64:
return "riscv64";
+ case kModuleArchHexagon:
+ return "hexagon";
}
CHECK(0 && "Invalid module arch");
return "";
}
+#if SANITIZER_APPLE
const uptr kModuleUUIDSize = 16;
+#else
+const uptr kModuleUUIDSize = 32;
+#endif
const uptr kMaxSegName = 16;
// Represents a binary loaded into virtual memory (e.g. this can be an
@@ -779,8 +811,9 @@ class LoadedModule {
LoadedModule()
: full_name_(nullptr),
base_address_(0),
- max_executable_address_(0),
+ max_address_(0),
arch_(kModuleArchUnknown),
+ uuid_size_(0),
instrumented_(false) {
internal_memset(uuid_, 0, kModuleUUIDSize);
ranges_.clear();
@@ -788,6 +821,7 @@ class LoadedModule {
void set(const char *module_name, uptr base_address);
void set(const char *module_name, uptr base_address, ModuleArch arch,
u8 uuid[kModuleUUIDSize], bool instrumented);
+ void setUuid(const char *uuid, uptr size);
void clear();
void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
const char *name = nullptr);
@@ -795,9 +829,10 @@ class LoadedModule {
const char *full_name() const { return full_name_; }
uptr base_address() const { return base_address_; }
- uptr max_executable_address() const { return max_executable_address_; }
+ uptr max_address() const { return max_address_; }
ModuleArch arch() const { return arch_; }
const u8 *uuid() const { return uuid_; }
+ uptr uuid_size() const { return uuid_size_; }
bool instrumented() const { return instrumented_; }
struct AddressRange {
@@ -824,8 +859,9 @@ class LoadedModule {
private:
char *full_name_; // Owned.
uptr base_address_;
- uptr max_executable_address_;
+ uptr max_address_;
ModuleArch arch_;
+ uptr uuid_size_;
u8 uuid_[kModuleUUIDSize];
bool instrumented_;
IntrusiveList<AddressRange> ranges_;
@@ -883,13 +919,13 @@ void WriteToSyslog(const char *buffer);
#define SANITIZER_WIN_TRACE 0
#endif
-#if SANITIZER_MAC || SANITIZER_WIN_TRACE
+#if SANITIZER_APPLE || SANITIZER_WIN_TRACE
void LogFullErrorReport(const char *buffer);
#else
inline void LogFullErrorReport(const char *buffer) {}
#endif
-#if SANITIZER_LINUX || SANITIZER_MAC
+#if SANITIZER_LINUX || SANITIZER_APPLE
void WriteOneLineToSyslog(const char *s);
void LogMessageOnPrintf(const char *str);
#else
@@ -951,7 +987,7 @@ struct SignalContext {
uptr sp;
uptr bp;
bool is_memory_access;
- enum WriteFlag { UNKNOWN, READ, WRITE } write_flag;
+ enum WriteFlag { Unknown, Read, Write } write_flag;
// In some cases the kernel cannot provide the true faulting address; `addr`
// will be zero then. This field allows to distinguish between these cases
@@ -996,7 +1032,6 @@ struct SignalContext {
};
void InitializePlatformEarly();
-void MaybeReexec();
template <typename Fn>
class RunOnDestruction {
@@ -1049,31 +1084,10 @@ inline u32 GetNumberOfCPUsCached() {
return NumberOfCPUsCached;
}
-template <typename T>
-class ArrayRef {
- public:
- ArrayRef() {}
- ArrayRef(T *begin, T *end) : begin_(begin), end_(end) {}
-
- T *begin() { return begin_; }
- T *end() { return end_; }
-
- private:
- T *begin_ = nullptr;
- T *end_ = nullptr;
-};
-
-#define PRINTF_128(v) \
- (*((u8 *)&v + 0)), (*((u8 *)&v + 1)), (*((u8 *)&v + 2)), (*((u8 *)&v + 3)), \
- (*((u8 *)&v + 4)), (*((u8 *)&v + 5)), (*((u8 *)&v + 6)), \
- (*((u8 *)&v + 7)), (*((u8 *)&v + 8)), (*((u8 *)&v + 9)), \
- (*((u8 *)&v + 10)), (*((u8 *)&v + 11)), (*((u8 *)&v + 12)), \
- (*((u8 *)&v + 13)), (*((u8 *)&v + 14)), (*((u8 *)&v + 15))
-
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
- __sanitizer::LowLevelAllocator &alloc) { // NOLINT
+ __sanitizer::LowLevelAllocator &alloc) {
return alloc.Allocate(size);
}
lib/tsan/sanitizer_common/sanitizer_common_interceptors.inc
@@ -21,19 +21,13 @@
// COMMON_INTERCEPTOR_FD_RELEASE
// COMMON_INTERCEPTOR_FD_ACCESS
// COMMON_INTERCEPTOR_SET_THREAD_NAME
-// COMMON_INTERCEPTOR_ON_DLOPEN
+// COMMON_INTERCEPTOR_DLOPEN
// COMMON_INTERCEPTOR_ON_EXIT
-// COMMON_INTERCEPTOR_MUTEX_PRE_LOCK
-// COMMON_INTERCEPTOR_MUTEX_POST_LOCK
-// COMMON_INTERCEPTOR_MUTEX_UNLOCK
-// COMMON_INTERCEPTOR_MUTEX_REPAIR
// COMMON_INTERCEPTOR_SET_PTHREAD_NAME
// COMMON_INTERCEPTOR_HANDLE_RECVMSG
// COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
-// COMMON_INTERCEPTOR_MEMSET_IMPL
-// COMMON_INTERCEPTOR_MEMMOVE_IMPL
-// COMMON_INTERCEPTOR_MEMCPY_IMPL
// COMMON_INTERCEPTOR_MMAP_IMPL
+// COMMON_INTERCEPTOR_MUNMAP_IMPL
// COMMON_INTERCEPTOR_COPY_STRING
// COMMON_INTERCEPTOR_STRNDUP_IMPL
// COMMON_INTERCEPTOR_STRERROR
@@ -132,14 +126,75 @@ extern const short *_toupper_tab_;
extern const short *_tolower_tab_;
#endif
-// Platform-specific options.
-#if SANITIZER_MAC
-#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
-#elif SANITIZER_WINDOWS64
-#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
-#else
-#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 1
-#endif // SANITIZER_MAC
+#if SANITIZER_MUSL && \
+ (defined(__i386__) || defined(__arm__) || SANITIZER_MIPS32 || SANITIZER_PPC32)
+// musl 1.2.0 on existing 32-bit architectures uses new symbol names for the
+// time-related functions that take 64-bit time_t values. See
+// https://musl.libc.org/time64.html
+#define adjtime __adjtime64
+#define adjtimex __adjtimex_time64
+#define aio_suspend __aio_suspend_time64
+#define clock_adjtime __clock_adjtime64
+#define clock_getres __clock_getres_time64
+#define clock_gettime __clock_gettime64
+#define clock_nanosleep __clock_nanosleep_time64
+#define clock_settime __clock_settime64
+#define cnd_timedwait __cnd_timedwait_time64
+#define ctime __ctime64
+#define ctime_r __ctime64_r
+#define difftime __difftime64
+#define dlsym __dlsym_time64
+#define fstatat __fstatat_time64
+#define fstat __fstat_time64
+#define ftime __ftime64
+#define futimens __futimens_time64
+#define futimesat __futimesat_time64
+#define futimes __futimes_time64
+#define getitimer __getitimer_time64
+#define getrusage __getrusage_time64
+#define gettimeofday __gettimeofday_time64
+#define gmtime __gmtime64
+#define gmtime_r __gmtime64_r
+#define localtime __localtime64
+#define localtime_r __localtime64_r
+#define lstat __lstat_time64
+#define lutimes __lutimes_time64
+#define mktime __mktime64
+#define mq_timedreceive __mq_timedreceive_time64
+#define mq_timedsend __mq_timedsend_time64
+#define mtx_timedlock __mtx_timedlock_time64
+#define nanosleep __nanosleep_time64
+#define ppoll __ppoll_time64
+#define pselect __pselect_time64
+#define pthread_cond_timedwait __pthread_cond_timedwait_time64
+#define pthread_mutex_timedlock __pthread_mutex_timedlock_time64
+#define pthread_rwlock_timedrdlock __pthread_rwlock_timedrdlock_time64
+#define pthread_rwlock_timedwrlock __pthread_rwlock_timedwrlock_time64
+#define pthread_timedjoin_np __pthread_timedjoin_np_time64
+#define recvmmsg __recvmmsg_time64
+#define sched_rr_get_interval __sched_rr_get_interval_time64
+#define select __select_time64
+#define semtimedop __semtimedop_time64
+#define sem_timedwait __sem_timedwait_time64
+#define setitimer __setitimer_time64
+#define settimeofday __settimeofday_time64
+#define sigtimedwait __sigtimedwait_time64
+#define stat __stat_time64
+#define stime __stime64
+#define thrd_sleep __thrd_sleep_time64
+#define timegm __timegm_time64
+#define timerfd_gettime __timerfd_gettime64
+#define timerfd_settime __timerfd_settime64
+#define timer_gettime __timer_gettime64
+#define timer_settime __timer_settime64
+#define timespec_get __timespec_get_time64
+#define time __time64
+#define utimensat __utimensat_time64
+#define utimes __utimes_time64
+#define utime __utime64
+#define wait3 __wait3_time64
+#define wait4 __wait4_time64
+#endif
#ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE
#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(p, size) {}
@@ -153,26 +208,6 @@ extern const short *_tolower_tab_;
#define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) {}
#endif
-#ifndef COMMON_INTERCEPTOR_MUTEX_PRE_LOCK
-#define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) {}
-#endif
-
-#ifndef COMMON_INTERCEPTOR_MUTEX_POST_LOCK
-#define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) {}
-#endif
-
-#ifndef COMMON_INTERCEPTOR_MUTEX_UNLOCK
-#define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) {}
-#endif
-
-#ifndef COMMON_INTERCEPTOR_MUTEX_REPAIR
-#define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) {}
-#endif
-
-#ifndef COMMON_INTERCEPTOR_MUTEX_INVALID
-#define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) {}
-#endif
-
#ifndef COMMON_INTERCEPTOR_HANDLE_RECVMSG
#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) ((void)(msg))
#endif
@@ -204,11 +239,11 @@ extern const short *_tolower_tab_;
#define COMMON_INTERCEPTOR_READ_STRING(ctx, s, n) \
COMMON_INTERCEPTOR_READ_RANGE((ctx), (s), \
- common_flags()->strict_string_checks ? (REAL(strlen)(s)) + 1 : (n) )
+ common_flags()->strict_string_checks ? (internal_strlen(s)) + 1 : (n) )
-#ifndef COMMON_INTERCEPTOR_ON_DLOPEN
-#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
- CheckNoDeepBind(filename, flag);
+#ifndef COMMON_INTERCEPTOR_DLOPEN
+#define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \
+ ({ CheckNoDeepBind(filename, flag); REAL(dlopen)(filename, flag); })
#endif
#ifndef COMMON_INTERCEPTOR_GET_TLS_RANGE
@@ -256,53 +291,17 @@ extern const short *_tolower_tab_;
COMMON_INTERCEPT_FUNCTION(fn)
#endif
-#ifndef COMMON_INTERCEPTOR_MEMSET_IMPL
-#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
- { \
- if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
- return internal_memset(dst, v, size); \
- COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
- if (common_flags()->intercept_intrin) \
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
- return REAL(memset)(dst, v, size); \
- }
-#endif
-
-#ifndef COMMON_INTERCEPTOR_MEMMOVE_IMPL
-#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size) \
- { \
- if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
- return internal_memmove(dst, src, size); \
- COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size); \
- if (common_flags()->intercept_intrin) { \
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
- } \
- return REAL(memmove)(dst, src, size); \
- }
-#endif
-
-#ifndef COMMON_INTERCEPTOR_MEMCPY_IMPL
-#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size) \
- { \
- if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { \
- return internal_memmove(dst, src, size); \
- } \
- COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size); \
- if (common_flags()->intercept_intrin) { \
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
- } \
- return REAL(memcpy)(dst, src, size); \
- }
-#endif
-
#ifndef COMMON_INTERCEPTOR_MMAP_IMPL
#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \
off) \
{ return REAL(mmap)(addr, sz, prot, flags, fd, off); }
#endif
+#ifndef COMMON_INTERCEPTOR_MUNMAP_IMPL
+#define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, sz) \
+ { return REAL(munmap)(addr, sz); }
+#endif
+
#ifndef COMMON_INTERCEPTOR_COPY_STRING
#define COMMON_INTERCEPTOR_COPY_STRING(ctx, to, from, size) {}
#endif
@@ -315,9 +314,11 @@ extern const short *_tolower_tab_;
if (common_flags()->intercept_strndup) { \
COMMON_INTERCEPTOR_READ_STRING(ctx, s, Min(size, copy_length + 1)); \
} \
- COMMON_INTERCEPTOR_COPY_STRING(ctx, new_mem, s, copy_length); \
- internal_memcpy(new_mem, s, copy_length); \
- new_mem[copy_length] = '\0'; \
+ if (new_mem) { \
+ COMMON_INTERCEPTOR_COPY_STRING(ctx, new_mem, s, copy_length); \
+ internal_memcpy(new_mem, s, copy_length); \
+ new_mem[copy_length] = '\0'; \
+ } \
return new_mem;
#endif
@@ -435,7 +436,7 @@ INTERCEPTOR(char*, textdomain, const char *domainname) {
if (domainname) COMMON_INTERCEPTOR_READ_STRING(ctx, domainname, 0);
char *domain = REAL(textdomain)(domainname);
if (domain) {
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(domain, REAL(strlen)(domain) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(domain, internal_strlen(domain) + 1);
}
return domain;
}
@@ -575,8 +576,8 @@ INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T size) {
#if SANITIZER_INTERCEPT_STRSTR || SANITIZER_INTERCEPT_STRCASESTR
static inline void StrstrCheck(void *ctx, char *r, const char *s1,
const char *s2) {
- uptr len1 = REAL(strlen)(s1);
- uptr len2 = REAL(strlen)(s2);
+ uptr len1 = internal_strlen(s1);
+ uptr len2 = internal_strlen(s2);
COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r ? r - s1 + len2 : len1 + 1);
COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, len2 + 1);
}
@@ -640,10 +641,10 @@ INTERCEPTOR(char*, strtok, char *str, const char *delimiters) {
// for subsequent calls). We do not need to check strtok's result.
// As the delimiters can change, we check them every call.
if (str != nullptr) {
- COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, str, internal_strlen(str) + 1);
}
COMMON_INTERCEPTOR_READ_RANGE(ctx, delimiters,
- REAL(strlen)(delimiters) + 1);
+ internal_strlen(delimiters) + 1);
return REAL(strtok)(str, delimiters);
} else {
// However, when strict_string_checks is disabled we cannot check the
@@ -657,11 +658,11 @@ INTERCEPTOR(char*, strtok, char *str, const char *delimiters) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, delimiters, 1);
char *result = REAL(strtok)(str, delimiters);
if (result != nullptr) {
- COMMON_INTERCEPTOR_READ_RANGE(ctx, result, REAL(strlen)(result) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, result, internal_strlen(result) + 1);
} else if (str != nullptr) {
// No delimiter were found, it's safe to assume that the entire str was
// scanned.
- COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, str, internal_strlen(str) + 1);
}
return result;
}
@@ -706,7 +707,7 @@ INTERCEPTOR(char*, strchr, const char *s, int c) {
if (common_flags()->intercept_strchr) {
// Keep strlen as macro argument, as macro may ignore it.
COMMON_INTERCEPTOR_READ_STRING(ctx, s,
- (result ? result - s : REAL(strlen)(s)) + 1);
+ (result ? result - s : internal_strlen(s)) + 1);
}
return result;
}
@@ -737,7 +738,7 @@ INTERCEPTOR(char*, strrchr, const char *s, int c) {
return internal_strrchr(s, c);
COMMON_INTERCEPTOR_ENTER(ctx, strrchr, s, c);
if (common_flags()->intercept_strchr)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, internal_strlen(s) + 1);
return REAL(strrchr)(s, c);
}
#define INIT_STRRCHR COMMON_INTERCEPT_FUNCTION(strrchr)
@@ -751,7 +752,7 @@ INTERCEPTOR(SIZE_T, strspn, const char *s1, const char *s2) {
COMMON_INTERCEPTOR_ENTER(ctx, strspn, s1, s2);
SIZE_T r = REAL(strspn)(s1, s2);
if (common_flags()->intercept_strspn) {
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, internal_strlen(s2) + 1);
COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r + 1);
}
return r;
@@ -762,7 +763,7 @@ INTERCEPTOR(SIZE_T, strcspn, const char *s1, const char *s2) {
COMMON_INTERCEPTOR_ENTER(ctx, strcspn, s1, s2);
SIZE_T r = REAL(strcspn)(s1, s2);
if (common_flags()->intercept_strspn) {
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, internal_strlen(s2) + 1);
COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r + 1);
}
return r;
@@ -781,9 +782,9 @@ INTERCEPTOR(char *, strpbrk, const char *s1, const char *s2) {
COMMON_INTERCEPTOR_ENTER(ctx, strpbrk, s1, s2);
char *r = REAL(strpbrk)(s1, s2);
if (common_flags()->intercept_strpbrk) {
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, internal_strlen(s2) + 1);
COMMON_INTERCEPTOR_READ_STRING(ctx, s1,
- r ? r - s1 + 1 : REAL(strlen)(s1) + 1);
+ r ? r - s1 + 1 : internal_strlen(s1) + 1);
}
return r;
}
@@ -793,57 +794,6 @@ INTERCEPTOR(char *, strpbrk, const char *s1, const char *s2) {
#define INIT_STRPBRK
#endif
-#if SANITIZER_INTERCEPT_MEMSET
-INTERCEPTOR(void *, memset, void *dst, int v, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size);
-}
-
-#define INIT_MEMSET COMMON_INTERCEPT_FUNCTION(memset)
-#else
-#define INIT_MEMSET
-#endif
-
-#if SANITIZER_INTERCEPT_MEMMOVE
-INTERCEPTOR(void *, memmove, void *dst, const void *src, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
-}
-
-#define INIT_MEMMOVE COMMON_INTERCEPT_FUNCTION(memmove)
-#else
-#define INIT_MEMMOVE
-#endif
-
-#if SANITIZER_INTERCEPT_MEMCPY
-INTERCEPTOR(void *, memcpy, void *dst, const void *src, uptr size) {
- // On OS X, calling internal_memcpy here will cause memory corruptions,
- // because memcpy and memmove are actually aliases of the same
- // implementation. We need to use internal_memmove here.
- // N.B.: If we switch this to internal_ we'll have to use internal_memmove
- // due to memcpy being an alias of memmove on OS X.
- void *ctx;
-#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
- COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
-#else
- COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
-#endif
-}
-
-#define INIT_MEMCPY \
- do { \
- if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { \
- COMMON_INTERCEPT_FUNCTION(memcpy); \
- } else { \
- ASSIGN_REAL(memcpy, memmove); \
- } \
- CHECK(REAL(memcpy)); \
- } while (false)
-
-#else
-#define INIT_MEMCPY
-#endif
-
#if SANITIZER_INTERCEPT_MEMCMP
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, uptr called_pc,
const void *s1, const void *s2, uptr n,
@@ -1251,7 +1201,7 @@ INTERCEPTOR(char *, fgets, char *s, SIZE_T size, void *file) {
// https://github.com/google/sanitizers/issues/321.
char *res = REAL(fgets)(s, size, file);
if (res)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, internal_strlen(s) + 1);
return res;
}
#define INIT_FGETS COMMON_INTERCEPT_FUNCTION(fgets)
@@ -1264,8 +1214,8 @@ INTERCEPTOR_WITH_SUFFIX(int, fputs, char *s, void *file) {
// libc file streams can call user-supplied functions, see fopencookie.
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fputs, s, file);
- if (!SANITIZER_MAC || s) { // `fputs(NULL, file)` is supported on Darwin.
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ if (!SANITIZER_APPLE || s) { // `fputs(NULL, file)` is supported on Darwin.
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, internal_strlen(s) + 1);
}
return REAL(fputs)(s, file);
}
@@ -1279,8 +1229,8 @@ INTERCEPTOR(int, puts, char *s) {
// libc file streams can call user-supplied functions, see fopencookie.
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, puts, s);
- if (!SANITIZER_MAC || s) { // `puts(NULL)` is supported on Darwin.
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ if (!SANITIZER_APPLE || s) { // `puts(NULL)` is supported on Darwin.
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, internal_strlen(s) + 1);
}
return REAL(puts)(s);
}
@@ -1295,12 +1245,21 @@ INTERCEPTOR(int, prctl, int option, unsigned long arg2, unsigned long arg3,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, prctl, option, arg2, arg3, arg4, arg5);
static const int PR_SET_NAME = 15;
- int res = REAL(prctl(option, arg2, arg3, arg4, arg5));
+ static const int PR_SET_VMA = 0x53564d41;
+ static const int PR_SCHED_CORE = 62;
+ static const int PR_SCHED_CORE_GET = 0;
+ if (option == PR_SET_VMA && arg2 == 0UL) {
+ char *name = (char *)arg5;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
+ }
+ int res = REAL(prctl)(option, arg2, arg3, arg4, arg5);
if (option == PR_SET_NAME) {
char buff[16];
internal_strncpy(buff, (char *)arg2, 15);
buff[15] = 0;
COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, buff);
+ } else if (res != -1 && option == PR_SCHED_CORE && arg2 == PR_SCHED_CORE_GET) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (u64*)(arg5), sizeof(u64));
}
return res;
}
@@ -1334,7 +1293,7 @@ static void unpoison_tm(void *ctx, __sanitizer_tm *tm) {
// Can not use COMMON_INTERCEPTOR_WRITE_RANGE here, because tm->tm_zone
// can point to shared memory and tsan would report a data race.
COMMON_INTERCEPTOR_INITIALIZE_RANGE(tm->tm_zone,
- REAL(strlen(tm->tm_zone)) + 1);
+ internal_strlen(tm->tm_zone) + 1);
}
#endif
}
@@ -1387,7 +1346,7 @@ INTERCEPTOR(char *, ctime, unsigned long *timep) {
char *res = REAL(ctime)(timep);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
}
return res;
}
@@ -1400,7 +1359,7 @@ INTERCEPTOR(char *, ctime_r, unsigned long *timep, char *result) {
char *res = REAL(ctime_r)(timep, result);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
}
return res;
}
@@ -1413,7 +1372,7 @@ INTERCEPTOR(char *, asctime, __sanitizer_tm *tm) {
char *res = REAL(asctime)(tm);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm));
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
}
return res;
}
@@ -1426,7 +1385,7 @@ INTERCEPTOR(char *, asctime_r, __sanitizer_tm *tm, char *result) {
char *res = REAL(asctime_r)(tm, result);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm));
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
}
return res;
}
@@ -1463,7 +1422,7 @@ INTERCEPTOR(char *, strptime, char *s, char *format, __sanitizer_tm *tm) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strptime, s, format, tm);
if (format)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, format, REAL(strlen)(format) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, format, internal_strlen(format) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -1532,6 +1491,16 @@ VSCANF_INTERCEPTOR_IMPL(__isoc99_vsscanf, false, str, format, ap)
INTERCEPTOR(int, __isoc99_vfscanf, void *stream, const char *format, va_list ap)
VSCANF_INTERCEPTOR_IMPL(__isoc99_vfscanf, false, stream, format, ap)
+
+INTERCEPTOR(int, __isoc23_vscanf, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(__isoc23_vscanf, false, format, ap)
+
+INTERCEPTOR(int, __isoc23_vsscanf, const char *str, const char *format,
+ va_list ap)
+VSCANF_INTERCEPTOR_IMPL(__isoc23_vsscanf, false, str, format, ap)
+
+INTERCEPTOR(int, __isoc23_vfscanf, void *stream, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(__isoc23_vfscanf, false, stream, format, ap)
#endif // SANITIZER_INTERCEPT_ISOC99_SCANF
INTERCEPTOR(int, scanf, const char *format, ...)
@@ -1552,6 +1521,15 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_fscanf, __isoc99_vfscanf, stream, format)
INTERCEPTOR(int, __isoc99_sscanf, const char *str, const char *format, ...)
FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
+
+INTERCEPTOR(int, __isoc23_scanf, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc23_scanf, __isoc23_vscanf, format)
+
+INTERCEPTOR(int, __isoc23_fscanf, void *stream, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc23_fscanf, __isoc23_vfscanf, stream, format)
+
+INTERCEPTOR(int, __isoc23_sscanf, const char *str, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc23_sscanf, __isoc23_vsscanf, str, format)
#endif
#endif
@@ -1575,7 +1553,13 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
COMMON_INTERCEPT_FUNCTION(__isoc99_fscanf); \
COMMON_INTERCEPT_FUNCTION(__isoc99_vscanf); \
COMMON_INTERCEPT_FUNCTION(__isoc99_vsscanf); \
- COMMON_INTERCEPT_FUNCTION(__isoc99_vfscanf);
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vfscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_scanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_sscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_fscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_vscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_vsscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_vfscanf);
#else
#define INIT_ISOC99_SCANF
#endif
@@ -1843,9 +1827,9 @@ INTERCEPTOR(int, ioctl, int d, unsigned long request, ...) {
const ioctl_desc *desc = ioctl_lookup(request);
ioctl_desc decoded_desc;
if (!desc) {
- VPrintf(2, "Decoding unknown ioctl 0x%x\n", request);
+ VPrintf(2, "Decoding unknown ioctl 0x%lx\n", request);
if (!ioctl_decode(request, &decoded_desc))
- Printf("WARNING: failed decoding unknown ioctl 0x%x\n", request);
+ Printf("WARNING: failed decoding unknown ioctl 0x%lx\n", request);
else
desc = &decoded_desc;
}
@@ -1869,26 +1853,26 @@ UNUSED static void unpoison_passwd(void *ctx, __sanitizer_passwd *pwd) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd, sizeof(*pwd));
if (pwd->pw_name)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_name,
- REAL(strlen)(pwd->pw_name) + 1);
+ internal_strlen(pwd->pw_name) + 1);
if (pwd->pw_passwd)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_passwd,
- REAL(strlen)(pwd->pw_passwd) + 1);
+ internal_strlen(pwd->pw_passwd) + 1);
#if !SANITIZER_ANDROID
if (pwd->pw_gecos)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_gecos,
- REAL(strlen)(pwd->pw_gecos) + 1);
+ internal_strlen(pwd->pw_gecos) + 1);
#endif
-#if SANITIZER_MAC || SANITIZER_FREEBSD || SANITIZER_NETBSD
+#if SANITIZER_APPLE || SANITIZER_FREEBSD || SANITIZER_NETBSD
if (pwd->pw_class)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_class,
- REAL(strlen)(pwd->pw_class) + 1);
+ internal_strlen(pwd->pw_class) + 1);
#endif
if (pwd->pw_dir)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_dir,
- REAL(strlen)(pwd->pw_dir) + 1);
+ internal_strlen(pwd->pw_dir) + 1);
if (pwd->pw_shell)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_shell,
- REAL(strlen)(pwd->pw_shell) + 1);
+ internal_strlen(pwd->pw_shell) + 1);
}
}
@@ -1897,13 +1881,13 @@ UNUSED static void unpoison_group(void *ctx, __sanitizer_group *grp) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp, sizeof(*grp));
if (grp->gr_name)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp->gr_name,
- REAL(strlen)(grp->gr_name) + 1);
+ internal_strlen(grp->gr_name) + 1);
if (grp->gr_passwd)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp->gr_passwd,
- REAL(strlen)(grp->gr_passwd) + 1);
+ internal_strlen(grp->gr_passwd) + 1);
char **p = grp->gr_mem;
for (; *p; ++p) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, internal_strlen(*p) + 1);
}
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp->gr_mem,
(p - grp->gr_mem + 1) * sizeof(*p));
@@ -1916,7 +1900,7 @@ INTERCEPTOR(__sanitizer_passwd *, getpwnam, const char *name) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpwnam, name);
if (name)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
__sanitizer_passwd *res = REAL(getpwnam)(name);
unpoison_passwd(ctx, res);
return res;
@@ -1931,7 +1915,7 @@ INTERCEPTOR(__sanitizer_passwd *, getpwuid, u32 uid) {
INTERCEPTOR(__sanitizer_group *, getgrnam, const char *name) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgrnam, name);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
__sanitizer_group *res = REAL(getgrnam)(name);
unpoison_group(ctx, res);
return res;
@@ -1957,7 +1941,7 @@ INTERCEPTOR(int, getpwnam_r, const char *name, __sanitizer_passwd *pwd,
char *buf, SIZE_T buflen, __sanitizer_passwd **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpwnam_r, name, pwd, buf, buflen, result);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -1984,7 +1968,7 @@ INTERCEPTOR(int, getgrnam_r, const char *name, __sanitizer_group *grp,
char *buf, SIZE_T buflen, __sanitizer_group **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgrnam_r, name, grp, buf, buflen, result);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -2229,8 +2213,20 @@ INTERCEPTOR(int, clock_getcpuclockid, pid_t pid,
return res;
}
-#define INIT_CLOCK_GETCPUCLOCKID \
- COMMON_INTERCEPT_FUNCTION(clock_getcpuclockid);
+INTERCEPTOR(int, pthread_getcpuclockid, uptr thread,
+ __sanitizer_clockid_t *clockid) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_getcpuclockid, thread, clockid);
+ int res = REAL(pthread_getcpuclockid)(thread, clockid);
+ if (!res && clockid) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, clockid, sizeof *clockid);
+ }
+ return res;
+}
+
+#define INIT_CLOCK_GETCPUCLOCKID \
+ COMMON_INTERCEPT_FUNCTION(clock_getcpuclockid); \
+ COMMON_INTERCEPT_FUNCTION(pthread_getcpuclockid);
#else
#define INIT_CLOCK_GETCPUCLOCKID
#endif
@@ -2289,7 +2285,7 @@ static void unpoison_glob_t(void *ctx, __sanitizer_glob_t *pglob) {
ctx, pglob->gl_pathv, (pglob->gl_pathc + 1) * sizeof(*pglob->gl_pathv));
for (SIZE_T i = 0; i < pglob->gl_pathc; ++i) {
char *p = pglob->gl_pathv[i];
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, REAL(strlen)(p) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, internal_strlen(p) + 1);
}
}
@@ -2319,19 +2315,19 @@ static void *wrapped_gl_readdir(void *dir) {
static void *wrapped_gl_opendir(const char *s) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, internal_strlen(s) + 1);
return pglob_copy->gl_opendir(s);
}
static int wrapped_gl_lstat(const char *s, void *st) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, internal_strlen(s) + 1);
return pglob_copy->gl_lstat(s, st);
}
static int wrapped_gl_stat(const char *s, void *st) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, internal_strlen(s) + 1);
return pglob_copy->gl_stat(s, st);
}
@@ -2410,6 +2406,136 @@ INTERCEPTOR(int, glob64, const char *pattern, int flags,
#define INIT_GLOB64
#endif // SANITIZER_INTERCEPT_GLOB64
+#if SANITIZER_INTERCEPT___B64_TO
+INTERCEPTOR(int, __b64_ntop, unsigned char const *src, SIZE_T srclength,
+ char *target, SIZE_T targsize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __b64_ntop, src, srclength, target, targsize);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, srclength);
+ int res = REAL(__b64_ntop)(src, srclength, target, targsize);
+ if (res >= 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, target, res + 1);
+ return res;
+}
+INTERCEPTOR(int, __b64_pton, char const *src, char *target, SIZE_T targsize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __b64_pton, src, target, targsize);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
+ int res = REAL(__b64_pton)(src, target, targsize);
+ if (res >= 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, target, res);
+ return res;
+}
+#define INIT___B64_TO \
+ COMMON_INTERCEPT_FUNCTION(__b64_ntop); \
+ COMMON_INTERCEPT_FUNCTION(__b64_pton);
+#else // SANITIZER_INTERCEPT___B64_TO
+#define INIT___B64_TO
+#endif // SANITIZER_INTERCEPT___B64_TO
+
+#if SANITIZER_INTERCEPT_DN_COMP_EXPAND
+# if __GLIBC_PREREQ(2, 34)
+// Changed with https://sourceware.org/git/?p=glibc.git;h=640bbdf
+# define DN_COMP_INTERCEPTOR_NAME dn_comp
+# define DN_EXPAND_INTERCEPTOR_NAME dn_expand
+# else
+# define DN_COMP_INTERCEPTOR_NAME __dn_comp
+# define DN_EXPAND_INTERCEPTOR_NAME __dn_expand
+# endif
+INTERCEPTOR(int, DN_COMP_INTERCEPTOR_NAME, unsigned char *exp_dn,
+ unsigned char *comp_dn, int length, unsigned char **dnptrs,
+ unsigned char **lastdnptr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, DN_COMP_INTERCEPTOR_NAME, exp_dn, comp_dn,
+ length, dnptrs, lastdnptr);
+ int res = REAL(DN_COMP_INTERCEPTOR_NAME)(exp_dn, comp_dn, length, dnptrs,
+ lastdnptr);
+ if (res >= 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, comp_dn, res);
+ if (dnptrs && lastdnptr) {
+ unsigned char **p = dnptrs;
+ for (; p != lastdnptr && *p; ++p)
+ ;
+ if (p != lastdnptr)
+ ++p;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dnptrs, (p - dnptrs) * sizeof(*p));
+ }
+ }
+ return res;
+}
+INTERCEPTOR(int, DN_EXPAND_INTERCEPTOR_NAME, unsigned char const *base,
+ unsigned char const *end, unsigned char const *src, char *dest,
+ int space) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, DN_EXPAND_INTERCEPTOR_NAME, base, end, src,
+ dest, space);
+ // TODO: add read check if __dn_comp intercept added
+ int res = REAL(DN_EXPAND_INTERCEPTOR_NAME)(base, end, src, dest, space);
+ if (res >= 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, internal_strlen(dest) + 1);
+ return res;
+}
+# define INIT_DN_COMP_EXPAND \
+ COMMON_INTERCEPT_FUNCTION(DN_COMP_INTERCEPTOR_NAME); \
+ COMMON_INTERCEPT_FUNCTION(DN_EXPAND_INTERCEPTOR_NAME);
+#else // SANITIZER_INTERCEPT_DN_COMP_EXPAND
+# define INIT_DN_COMP_EXPAND
+#endif // SANITIZER_INTERCEPT_DN_COMP_EXPAND
+
+#if SANITIZER_INTERCEPT_POSIX_SPAWN
+
+template <class RealSpawnPtr>
+static int PosixSpawnImpl(void *ctx, RealSpawnPtr *real_posix_spawn, pid_t *pid,
+ const char *file_or_path, const void *file_actions,
+ const void *attrp, char *const argv[],
+ char *const envp[]) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, file_or_path,
+ internal_strlen(file_or_path) + 1);
+ if (argv) {
+ for (char *const *s = argv; ; ++s) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(*s));
+ if (!*s) break;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *s, internal_strlen(*s) + 1);
+ }
+ }
+ if (envp) {
+ for (char *const *s = envp; ; ++s) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(*s));
+ if (!*s) break;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *s, internal_strlen(*s) + 1);
+ }
+ }
+ int res =
+ real_posix_spawn(pid, file_or_path, file_actions, attrp, argv, envp);
+ if (res == 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pid, sizeof(*pid));
+ return res;
+}
+INTERCEPTOR(int, posix_spawn, pid_t *pid, const char *path,
+ const void *file_actions, const void *attrp, char *const argv[],
+ char *const envp[]) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, posix_spawn, pid, path, file_actions, attrp,
+ argv, envp);
+ return PosixSpawnImpl(ctx, REAL(posix_spawn), pid, path, file_actions, attrp,
+ argv, envp);
+}
+INTERCEPTOR(int, posix_spawnp, pid_t *pid, const char *file,
+ const void *file_actions, const void *attrp, char *const argv[],
+ char *const envp[]) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, posix_spawnp, pid, file, file_actions, attrp,
+ argv, envp);
+ return PosixSpawnImpl(ctx, REAL(posix_spawnp), pid, file, file_actions, attrp,
+ argv, envp);
+}
+# define INIT_POSIX_SPAWN \
+ COMMON_INTERCEPT_FUNCTION(posix_spawn); \
+ COMMON_INTERCEPT_FUNCTION(posix_spawnp);
+#else // SANITIZER_INTERCEPT_POSIX_SPAWN
+# define INIT_POSIX_SPAWN
+#endif // SANITIZER_INTERCEPT_POSIX_SPAWN
+
#if SANITIZER_INTERCEPT_WAIT
// According to sys/wait.h, wait(), waitid(), waitpid() may have symbol version
// suffixes on Darwin. See the declaration of INTERCEPTOR_WITH_SUFFIX for
@@ -2519,7 +2645,7 @@ INTERCEPTOR(char *, inet_ntop, int af, const void *src, char *dst, u32 size) {
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
char *res = REAL(inet_ntop)(af, src, dst, size);
- if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
return res;
}
INTERCEPTOR(int, inet_pton, int af, const char *src, void *dst) {
@@ -2548,7 +2674,7 @@ INTERCEPTOR(int, inet_pton, int af, const char *src, void *dst) {
INTERCEPTOR(int, inet_aton, const char *cp, void *dst) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, inet_aton, cp, dst);
- if (cp) COMMON_INTERCEPTOR_READ_RANGE(ctx, cp, REAL(strlen)(cp) + 1);
+ if (cp) COMMON_INTERCEPTOR_READ_RANGE(ctx, cp, internal_strlen(cp) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -2590,9 +2716,9 @@ INTERCEPTOR(int, getaddrinfo, char *node, char *service,
struct __sanitizer_addrinfo **out) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getaddrinfo, node, service, hints, out);
- if (node) COMMON_INTERCEPTOR_READ_RANGE(ctx, node, REAL(strlen)(node) + 1);
+ if (node) COMMON_INTERCEPTOR_READ_RANGE(ctx, node, internal_strlen(node) + 1);
if (service)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, service, REAL(strlen)(service) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, service, internal_strlen(service) + 1);
if (hints)
COMMON_INTERCEPTOR_READ_RANGE(ctx, hints, sizeof(__sanitizer_addrinfo));
// FIXME: under ASan the call below may write to freed memory and corrupt
@@ -2608,7 +2734,7 @@ INTERCEPTOR(int, getaddrinfo, char *node, char *service,
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ai_addr, p->ai_addrlen);
if (p->ai_canonname)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ai_canonname,
- REAL(strlen)(p->ai_canonname) + 1);
+ internal_strlen(p->ai_canonname) + 1);
p = p->ai_next;
}
}
@@ -2634,9 +2760,9 @@ INTERCEPTOR(int, getnameinfo, void *sockaddr, unsigned salen, char *host,
REAL(getnameinfo)(sockaddr, salen, host, hostlen, serv, servlen, flags);
if (res == 0) {
if (host && hostlen)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, host, REAL(strlen)(host) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, host, internal_strlen(host) + 1);
if (serv && servlen)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, serv, REAL(strlen)(serv) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, serv, internal_strlen(serv) + 1);
}
return res;
}
@@ -2646,17 +2772,20 @@ INTERCEPTOR(int, getnameinfo, void *sockaddr, unsigned salen, char *host,
#endif
#if SANITIZER_INTERCEPT_GETSOCKNAME
-INTERCEPTOR(int, getsockname, int sock_fd, void *addr, int *addrlen) {
+INTERCEPTOR(int, getsockname, int sock_fd, void *addr, unsigned *addrlen) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getsockname, sock_fd, addr, addrlen);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
- int addrlen_in = *addrlen;
+ unsigned addr_sz;
+ if (addrlen) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
+ addr_sz = *addrlen;
+ }
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
int res = REAL(getsockname)(sock_fd, addr, addrlen);
- if (res == 0) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addrlen_in, *addrlen));
+ if (!res && addr && addrlen) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addr_sz, *addrlen));
}
return res;
}
@@ -2669,10 +2798,10 @@ INTERCEPTOR(int, getsockname, int sock_fd, void *addr, int *addrlen) {
static void write_hostent(void *ctx, struct __sanitizer_hostent *h) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h, sizeof(__sanitizer_hostent));
if (h->h_name)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h->h_name, REAL(strlen)(h->h_name) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h->h_name, internal_strlen(h->h_name) + 1);
char **p = h->h_aliases;
while (*p) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, internal_strlen(*p) + 1);
++p;
}
COMMON_INTERCEPTOR_WRITE_RANGE(
@@ -3161,13 +3290,17 @@ INTERCEPTOR(int, getpeername, int sockfd, void *addr, unsigned *addrlen) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpeername, sockfd, addr, addrlen);
unsigned addr_sz;
- if (addrlen) addr_sz = *addrlen;
+ if (addrlen) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
+ addr_sz = *addrlen;
+ }
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
int res = REAL(getpeername)(sockfd, addr, addrlen);
- if (!res && addr && addrlen)
+ if (!res && addr && addrlen) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addr_sz, *addrlen));
+ }
return res;
}
#define INIT_GETPEERNAME COMMON_INTERCEPT_FUNCTION(getpeername);
@@ -3196,7 +3329,7 @@ INTERCEPTOR(int, sysinfo, void *info) {
INTERCEPTOR(__sanitizer_dirent *, opendir, const char *path) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, opendir, path);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
__sanitizer_dirent *res = REAL(opendir)(path);
if (res)
COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path);
@@ -3210,7 +3343,8 @@ INTERCEPTOR(__sanitizer_dirent *, readdir, void *dirp) {
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
__sanitizer_dirent *res = REAL(readdir)(dirp);
- if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);
+ if (res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, __sanitizer_dirsiz(res));
return res;
}
@@ -3225,7 +3359,7 @@ INTERCEPTOR(int, readdir_r, void *dirp, __sanitizer_dirent *entry,
if (!res) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
if (*result)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, (*result)->d_reclen);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, __sanitizer_dirsiz(*result));
}
return res;
}
@@ -3246,7 +3380,8 @@ INTERCEPTOR(__sanitizer_dirent64 *, readdir64, void *dirp) {
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
__sanitizer_dirent64 *res = REAL(readdir64)(dirp);
- if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);
+ if (res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, __sanitizer_dirsiz(res));
return res;
}
@@ -3261,7 +3396,7 @@ INTERCEPTOR(int, readdir64_r, void *dirp, __sanitizer_dirent64 *entry,
if (!res) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
if (*result)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, (*result)->d_reclen);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, __sanitizer_dirsiz(*result));
}
return res;
}
@@ -3351,10 +3486,10 @@ INTERCEPTOR(char *, setlocale, int category, char *locale) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, setlocale, category, locale);
if (locale)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, locale, REAL(strlen)(locale) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, locale, internal_strlen(locale) + 1);
char *res = REAL(setlocale)(category, locale);
if (res) {
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
unpoison_ctype_arrays(ctx);
}
return res;
@@ -3373,7 +3508,7 @@ INTERCEPTOR(char *, getcwd, char *buf, SIZE_T size) {
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
char *res = REAL(getcwd)(buf, size);
- if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
return res;
}
#define INIT_GETCWD COMMON_INTERCEPT_FUNCTION(getcwd);
@@ -3389,7 +3524,7 @@ INTERCEPTOR(char *, get_current_dir_name, int fake) {
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
char *res = REAL(get_current_dir_name)(fake);
- if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
return res;
}
@@ -3429,30 +3564,26 @@ UNUSED static inline void StrtolFixAndCheck(void *ctx, const char *nptr,
(real_endptr - nptr) + 1 : 0);
}
-
#if SANITIZER_INTERCEPT_STRTOIMAX
-INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, strtoimax, nptr, endptr, base);
- // FIXME: under ASan the call below may write to freed memory and corrupt
- // its metadata. See
- // https://github.com/google/sanitizers/issues/321.
+template <typename Fn>
+static ALWAYS_INLINE auto StrtoimaxImpl(void *ctx, Fn real, const char *nptr,
+ char **endptr, int base)
+ -> decltype(real(nullptr, nullptr, 0)) {
char *real_endptr;
- INTMAX_T res = REAL(strtoimax)(nptr, &real_endptr, base);
+ auto res = real(nptr, &real_endptr, base);
StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
return res;
}
+INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strtoimax, nptr, endptr, base);
+ return StrtoimaxImpl(ctx, REAL(strtoimax), nptr, endptr, base);
+}
INTERCEPTOR(UINTMAX_T, strtoumax, const char *nptr, char **endptr, int base) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strtoumax, nptr, endptr, base);
- // FIXME: under ASan the call below may write to freed memory and corrupt
- // its metadata. See
- // https://github.com/google/sanitizers/issues/321.
- char *real_endptr;
- UINTMAX_T res = REAL(strtoumax)(nptr, &real_endptr, base);
- StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
- return res;
+ return StrtoimaxImpl(ctx, REAL(strtoumax), nptr, endptr, base);
}
#define INIT_STRTOIMAX \
@@ -3462,6 +3593,25 @@ INTERCEPTOR(UINTMAX_T, strtoumax, const char *nptr, char **endptr, int base) {
#define INIT_STRTOIMAX
#endif
+#if SANITIZER_INTERCEPT_STRTOIMAX && SANITIZER_GLIBC
+INTERCEPTOR(INTMAX_T, __isoc23_strtoimax, const char *nptr, char **endptr, int base) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __isoc23_strtoimax, nptr, endptr, base);
+ return StrtoimaxImpl(ctx, REAL(__isoc23_strtoimax), nptr, endptr, base);
+}
+INTERCEPTOR(UINTMAX_T, __isoc23_strtoumax, const char *nptr, char **endptr, int base) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __isoc23_strtoumax, nptr, endptr, base);
+ return StrtoimaxImpl(ctx, REAL(__isoc23_strtoumax), nptr, endptr, base);
+}
+
+# define INIT_STRTOIMAX_C23 \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_strtoimax); \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_strtoumax);
+#else
+# define INIT_STRTOIMAX_C23
+#endif
+
#if SANITIZER_INTERCEPT_MBSTOWCS
INTERCEPTOR(SIZE_T, mbstowcs, wchar_t *dest, const char *src, SIZE_T len) {
void *ctx;
@@ -3663,7 +3813,7 @@ INTERCEPTOR(int, tcgetattr, int fd, void *termios_p) {
INTERCEPTOR(char *, realpath, const char *path, char *resolved_path) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, realpath, path, resolved_path);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
// Workaround a bug in glibc where dlsym(RTLD_NEXT, ...) returns the oldest
// version of a versioned symbol. For realpath(), this gives us something
@@ -3674,11 +3824,12 @@ INTERCEPTOR(char *, realpath, const char *path, char *resolved_path) {
allocated_path = resolved_path = (char *)WRAP(malloc)(path_max + 1);
char *res = REAL(realpath)(path, resolved_path);
- if (allocated_path && !res) WRAP(free)(allocated_path);
- if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ if (allocated_path && !res)
+ WRAP(free)(allocated_path);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
return res;
}
-#define INIT_REALPATH COMMON_INTERCEPT_FUNCTION(realpath);
+# define INIT_REALPATH COMMON_INTERCEPT_FUNCTION(realpath);
#else
#define INIT_REALPATH
#endif
@@ -3687,9 +3838,9 @@ INTERCEPTOR(char *, realpath, const char *path, char *resolved_path) {
INTERCEPTOR(char *, canonicalize_file_name, const char *path) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, canonicalize_file_name, path);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
char *res = REAL(canonicalize_file_name)(path);
- if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
return res;
}
#define INIT_CANONICALIZE_FILE_NAME \
@@ -3750,7 +3901,7 @@ INTERCEPTOR(char *, strerror, int errnum) {
COMMON_INTERCEPTOR_ENTER(ctx, strerror, errnum);
COMMON_INTERCEPTOR_STRERROR();
char *res = REAL(strerror)(errnum);
- if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
return res;
}
#define INIT_STRERROR COMMON_INTERCEPT_FUNCTION(strerror);
@@ -3765,7 +3916,7 @@ INTERCEPTOR(char *, strerror, int errnum) {
// * GNU version returns message pointer, which points to either buf or some
// static storage.
#if ((_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE) || \
- SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD || \
+ SANITIZER_APPLE || SANITIZER_ANDROID || SANITIZER_NETBSD || \
SANITIZER_FREEBSD
// POSIX version. Spec is not clear on whether buf is NULL-terminated.
// At least on OSX, buf contents are valid even when the call fails.
@@ -3792,13 +3943,13 @@ INTERCEPTOR(char *, strerror_r, int errnum, char *buf, SIZE_T buflen) {
// https://github.com/google/sanitizers/issues/321.
char *res = REAL(strerror_r)(errnum, buf, buflen);
if (res == buf)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
else
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
return res;
}
#endif //(_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE ||
- //SANITIZER_MAC
+ //SANITIZER_APPLE
#define INIT_STRERROR_R COMMON_INTERCEPT_FUNCTION(strerror_r);
#else
#define INIT_STRERROR_R
@@ -3814,7 +3965,7 @@ INTERCEPTOR(int, __xpg_strerror_r, int errnum, char *buf, SIZE_T buflen) {
int res = REAL(__xpg_strerror_r)(errnum, buf, buflen);
// This version always returns a null-terminated string.
if (buf && buflen)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);
return res;
}
#define INIT_XPG_STRERROR_R COMMON_INTERCEPT_FUNCTION(__xpg_strerror_r);
@@ -3832,7 +3983,7 @@ static THREADLOCAL scandir_compar_f scandir_compar;
static int wrapped_scandir_filter(const struct __sanitizer_dirent *dir) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, __sanitizer_dirsiz(dir));
return scandir_filter(dir);
}
@@ -3840,9 +3991,9 @@ static int wrapped_scandir_compar(const struct __sanitizer_dirent **a,
const struct __sanitizer_dirent **b) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a));
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, __sanitizer_dirsiz(*a));
COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b));
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, __sanitizer_dirsiz(*b));
return scandir_compar(a, b);
}
@@ -3850,7 +4001,7 @@ INTERCEPTOR(int, scandir, char *dirp, __sanitizer_dirent ***namelist,
scandir_filter_f filter, scandir_compar_f compar) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, scandir, dirp, namelist, filter, compar);
- if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, REAL(strlen)(dirp) + 1);
+ if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, internal_strlen(dirp) + 1);
scandir_filter = filter;
scandir_compar = compar;
// FIXME: under ASan the call below may write to freed memory and corrupt
@@ -3866,7 +4017,7 @@ INTERCEPTOR(int, scandir, char *dirp, __sanitizer_dirent ***namelist,
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *namelist, sizeof(**namelist) * res);
for (int i = 0; i < res; ++i)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (*namelist)[i],
- (*namelist)[i]->d_reclen);
+ __sanitizer_dirsiz((*namelist)[i]));
}
return res;
}
@@ -3885,7 +4036,7 @@ static THREADLOCAL scandir64_compar_f scandir64_compar;
static int wrapped_scandir64_filter(const struct __sanitizer_dirent64 *dir) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, __sanitizer_dirsiz(dir));
return scandir64_filter(dir);
}
@@ -3893,9 +4044,9 @@ static int wrapped_scandir64_compar(const struct __sanitizer_dirent64 **a,
const struct __sanitizer_dirent64 **b) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a));
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, __sanitizer_dirsiz(*a));
COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b));
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, __sanitizer_dirsiz(*b));
return scandir64_compar(a, b);
}
@@ -3903,7 +4054,7 @@ INTERCEPTOR(int, scandir64, char *dirp, __sanitizer_dirent64 ***namelist,
scandir64_filter_f filter, scandir64_compar_f compar) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, scandir64, dirp, namelist, filter, compar);
- if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, REAL(strlen)(dirp) + 1);
+ if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, internal_strlen(dirp) + 1);
scandir64_filter = filter;
scandir64_compar = compar;
// FIXME: under ASan the call below may write to freed memory and corrupt
@@ -3920,7 +4071,7 @@ INTERCEPTOR(int, scandir64, char *dirp, __sanitizer_dirent64 ***namelist,
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *namelist, sizeof(**namelist) * res);
for (int i = 0; i < res; ++i)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (*namelist)[i],
- (*namelist)[i]->d_reclen);
+ __sanitizer_dirsiz((*namelist)[i]));
}
return res;
}
@@ -3999,19 +4150,20 @@ INTERCEPTOR(int, ppoll, __sanitizer_pollfd *fds, __sanitizer_nfds_t nfds,
INTERCEPTOR(int, wordexp, char *s, __sanitizer_wordexp_t *p, int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wordexp, s, p, flags);
- if (s) COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ if (s) COMMON_INTERCEPTOR_READ_RANGE(ctx, s, internal_strlen(s) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
int res = REAL(wordexp)(s, p, flags);
if (!res && p) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
- if (p->we_wordc)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->we_wordv,
- sizeof(*p->we_wordv) * p->we_wordc);
- for (uptr i = 0; i < p->we_wordc; ++i) {
+ uptr we_wordc =
+ ((flags & wordexp_wrde_dooffs) ? p->we_offs : 0) + p->we_wordc;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->we_wordv,
+ sizeof(*p->we_wordv) * (we_wordc + 1));
+ for (uptr i = 0; i < we_wordc; ++i) {
char *w = p->we_wordv[i];
- if (w) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, w, REAL(strlen)(w) + 1);
+ if (w) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, w, internal_strlen(w) + 1);
}
}
return res;
@@ -4196,12 +4348,16 @@ INTERCEPTOR(int, pthread_sigmask, int how, __sanitizer_sigset_t *set,
INTERCEPTOR(int, backtrace, void **buffer, int size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, backtrace, buffer, size);
- // FIXME: under ASan the call below may write to freed memory and corrupt
- // its metadata. See
- // https://github.com/google/sanitizers/issues/321.
- int res = REAL(backtrace)(buffer, size);
- if (res && buffer)
+ // 'buffer' might be freed memory, hence it is unsafe to directly call
+ // REAL(backtrace)(buffer, size). Instead, we use our own known-good
+ // scratch buffer.
+ void **scratch = (void**)InternalAlloc(sizeof(void*) * size);
+ int res = REAL(backtrace)(scratch, size);
+ if (res && buffer) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buffer, res * sizeof(*buffer));
+ internal_memcpy(buffer, scratch, res * sizeof(*buffer));
+ }
+ InternalFree(scratch);
return res;
}
@@ -4210,14 +4366,13 @@ INTERCEPTOR(char **, backtrace_symbols, void **buffer, int size) {
COMMON_INTERCEPTOR_ENTER(ctx, backtrace_symbols, buffer, size);
if (buffer && size)
COMMON_INTERCEPTOR_READ_RANGE(ctx, buffer, size * sizeof(*buffer));
- // FIXME: under ASan the call below may write to freed memory and corrupt
- // its metadata. See
- // https://github.com/google/sanitizers/issues/321.
+ // The COMMON_INTERCEPTOR_READ_RANGE above ensures that 'buffer' is
+ // valid for reading.
char **res = REAL(backtrace_symbols)(buffer, size);
if (res && size) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, size * sizeof(*res));
for (int i = 0; i < size; ++i)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res[i], REAL(strlen(res[i])) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res[i], internal_strlen(res[i]) + 1);
}
return res;
}
@@ -4243,90 +4398,13 @@ INTERCEPTOR(void, _exit, int status) {
#define INIT__EXIT
#endif
-#if SANITIZER_INTERCEPT_PTHREAD_MUTEX
-INTERCEPTOR(int, pthread_mutex_lock, void *m) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_lock, m);
- COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m);
- int res = REAL(pthread_mutex_lock)(m);
- if (res == errno_EOWNERDEAD)
- COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m);
- if (res == 0 || res == errno_EOWNERDEAD)
- COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m);
- if (res == errno_EINVAL)
- COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
- return res;
-}
-
-INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_unlock, m);
- COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m);
- int res = REAL(pthread_mutex_unlock)(m);
- if (res == errno_EINVAL)
- COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
- return res;
-}
-
-#define INIT_PTHREAD_MUTEX_LOCK COMMON_INTERCEPT_FUNCTION(pthread_mutex_lock)
-#define INIT_PTHREAD_MUTEX_UNLOCK \
- COMMON_INTERCEPT_FUNCTION(pthread_mutex_unlock)
-#else
-#define INIT_PTHREAD_MUTEX_LOCK
-#define INIT_PTHREAD_MUTEX_UNLOCK
-#endif
-
-#if SANITIZER_INTERCEPT___PTHREAD_MUTEX
-INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, __pthread_mutex_lock, m);
- COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m);
- int res = REAL(__pthread_mutex_lock)(m);
- if (res == errno_EOWNERDEAD)
- COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m);
- if (res == 0 || res == errno_EOWNERDEAD)
- COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m);
- if (res == errno_EINVAL)
- COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
- return res;
-}
-
-INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, __pthread_mutex_unlock, m);
- COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m);
- int res = REAL(__pthread_mutex_unlock)(m);
- if (res == errno_EINVAL)
- COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
- return res;
-}
-
-#define INIT___PTHREAD_MUTEX_LOCK \
- COMMON_INTERCEPT_FUNCTION(__pthread_mutex_lock)
-#define INIT___PTHREAD_MUTEX_UNLOCK \
- COMMON_INTERCEPT_FUNCTION(__pthread_mutex_unlock)
-#else
-#define INIT___PTHREAD_MUTEX_LOCK
-#define INIT___PTHREAD_MUTEX_UNLOCK
-#endif
-
#if SANITIZER_INTERCEPT___LIBC_MUTEX
-INTERCEPTOR(int, __libc_mutex_lock, void *m)
-ALIAS(WRAPPER_NAME(pthread_mutex_lock));
-
-INTERCEPTOR(int, __libc_mutex_unlock, void *m)
-ALIAS(WRAPPER_NAME(pthread_mutex_unlock));
-
INTERCEPTOR(int, __libc_thr_setcancelstate, int state, int *oldstate)
-ALIAS(WRAPPER_NAME(pthread_setcancelstate));
+ALIAS(WRAP(pthread_setcancelstate));
-#define INIT___LIBC_MUTEX_LOCK COMMON_INTERCEPT_FUNCTION(__libc_mutex_lock)
-#define INIT___LIBC_MUTEX_UNLOCK COMMON_INTERCEPT_FUNCTION(__libc_mutex_unlock)
#define INIT___LIBC_THR_SETCANCELSTATE \
COMMON_INTERCEPT_FUNCTION(__libc_thr_setcancelstate)
#else
-#define INIT___LIBC_MUTEX_LOCK
-#define INIT___LIBC_MUTEX_UNLOCK
#define INIT___LIBC_THR_SETCANCELSTATE
#endif
@@ -4335,16 +4413,16 @@ static void write_mntent(void *ctx, __sanitizer_mntent *mnt) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt, sizeof(*mnt));
if (mnt->mnt_fsname)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_fsname,
- REAL(strlen)(mnt->mnt_fsname) + 1);
+ internal_strlen(mnt->mnt_fsname) + 1);
if (mnt->mnt_dir)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_dir,
- REAL(strlen)(mnt->mnt_dir) + 1);
+ internal_strlen(mnt->mnt_dir) + 1);
if (mnt->mnt_type)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_type,
- REAL(strlen)(mnt->mnt_type) + 1);
+ internal_strlen(mnt->mnt_type) + 1);
if (mnt->mnt_opts)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_opts,
- REAL(strlen)(mnt->mnt_opts) + 1);
+ internal_strlen(mnt->mnt_opts) + 1);
}
#endif
@@ -4379,7 +4457,7 @@ INTERCEPTOR(__sanitizer_mntent *, getmntent_r, void *fp,
INTERCEPTOR(int, statfs, char *path, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statfs, path, buf);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -4408,7 +4486,7 @@ INTERCEPTOR(int, fstatfs, int fd, void *buf) {
INTERCEPTOR(int, statfs64, char *path, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statfs64, path, buf);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -4437,7 +4515,7 @@ INTERCEPTOR(int, fstatfs64, int fd, void *buf) {
INTERCEPTOR(int, statvfs, char *path, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statvfs, path, buf);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -4471,7 +4549,7 @@ INTERCEPTOR(int, fstatvfs, int fd, void *buf) {
INTERCEPTOR(int, statvfs64, char *path, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statvfs64, path, buf);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -4500,7 +4578,7 @@ INTERCEPTOR(int, fstatvfs64, int fd, void *buf) {
INTERCEPTOR(int, initgroups, char *user, u32 group) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, initgroups, user, group);
- if (user) COMMON_INTERCEPTOR_READ_RANGE(ctx, user, REAL(strlen)(user) + 1);
+ if (user) COMMON_INTERCEPTOR_READ_RANGE(ctx, user, internal_strlen(user) + 1);
int res = REAL(initgroups)(user, group);
return res;
}
@@ -4515,13 +4593,13 @@ INTERCEPTOR(char *, ether_ntoa, __sanitizer_ether_addr *addr) {
COMMON_INTERCEPTOR_ENTER(ctx, ether_ntoa, addr);
if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));
char *res = REAL(ether_ntoa)(addr);
- if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
return res;
}
INTERCEPTOR(__sanitizer_ether_addr *, ether_aton, char *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_aton, buf);
- if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, internal_strlen(buf) + 1);
__sanitizer_ether_addr *res = REAL(ether_aton)(buf);
if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, sizeof(*res));
return res;
@@ -4543,14 +4621,14 @@ INTERCEPTOR(int, ether_ntohost, char *hostname, __sanitizer_ether_addr *addr) {
// https://github.com/google/sanitizers/issues/321.
int res = REAL(ether_ntohost)(hostname, addr);
if (!res && hostname)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, internal_strlen(hostname) + 1);
return res;
}
INTERCEPTOR(int, ether_hostton, char *hostname, __sanitizer_ether_addr *addr) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_hostton, hostname, addr);
if (hostname)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, hostname, internal_strlen(hostname) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -4562,7 +4640,7 @@ INTERCEPTOR(int, ether_line, char *line, __sanitizer_ether_addr *addr,
char *hostname) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_line, line, addr, hostname);
- if (line) COMMON_INTERCEPTOR_READ_RANGE(ctx, line, REAL(strlen)(line) + 1);
+ if (line) COMMON_INTERCEPTOR_READ_RANGE(ctx, line, internal_strlen(line) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -4570,7 +4648,7 @@ INTERCEPTOR(int, ether_line, char *line, __sanitizer_ether_addr *addr,
if (!res) {
if (addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));
if (hostname)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, internal_strlen(hostname) + 1);
}
return res;
}
@@ -4591,14 +4669,14 @@ INTERCEPTOR(char *, ether_ntoa_r, __sanitizer_ether_addr *addr, char *buf) {
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
char *res = REAL(ether_ntoa_r)(addr, buf);
- if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
return res;
}
INTERCEPTOR(__sanitizer_ether_addr *, ether_aton_r, char *buf,
__sanitizer_ether_addr *addr) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_aton_r, buf, addr);
- if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, internal_strlen(buf) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -4766,6 +4844,27 @@ INTERCEPTOR(int, pthread_attr_getaffinity_np, void *attr, SIZE_T cpusetsize,
#define INIT_PTHREAD_ATTR_GETAFFINITY_NP
#endif
+#if SANITIZER_INTERCEPT_PTHREAD_GETAFFINITY_NP
+INTERCEPTOR(int, pthread_getaffinity_np, void *attr, SIZE_T cpusetsize,
+ void *cpuset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_getaffinity_np, attr, cpusetsize,
+ cpuset);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(pthread_getaffinity_np)(attr, cpusetsize, cpuset);
+ if (!res && cpusetsize && cpuset)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cpuset, cpusetsize);
+ return res;
+}
+
+#define INIT_PTHREAD_GETAFFINITY_NP \
+ COMMON_INTERCEPT_FUNCTION(pthread_getaffinity_np);
+#else
+#define INIT_PTHREAD_GETAFFINITY_NP
+#endif
+
#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED
INTERCEPTOR_PTHREAD_MUTEXATTR_GET(pshared, sizeof(int))
#define INIT_PTHREAD_MUTEXATTR_GETPSHARED \
@@ -4864,9 +4963,9 @@ INTERCEPTOR(char *, tmpnam, char *s) {
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, internal_strlen(s) + 1);
else
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
}
return res;
}
@@ -4883,7 +4982,7 @@ INTERCEPTOR(char *, tmpnam_r, char *s) {
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
char *res = REAL(tmpnam_r)(s);
- if (res && s) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ if (res && s) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, internal_strlen(s) + 1);
return res;
}
#define INIT_TMPNAM_R COMMON_INTERCEPT_FUNCTION(tmpnam_r);
@@ -4897,7 +4996,7 @@ INTERCEPTOR(char *, ptsname, int fd) {
COMMON_INTERCEPTOR_ENTER(ctx, ptsname, fd);
char *res = REAL(ptsname)(fd);
if (res != nullptr)
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
return res;
}
#define INIT_PTSNAME COMMON_INTERCEPT_FUNCTION(ptsname);
@@ -4911,7 +5010,7 @@ INTERCEPTOR(int, ptsname_r, int fd, char *name, SIZE_T namesize) {
COMMON_INTERCEPTOR_ENTER(ctx, ptsname_r, fd, name, namesize);
int res = REAL(ptsname_r)(fd, name, namesize);
if (res == 0)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, internal_strlen(name) + 1);
return res;
}
#define INIT_PTSNAME_R COMMON_INTERCEPT_FUNCTION(ptsname_r);
@@ -4925,7 +5024,7 @@ INTERCEPTOR(char *, ttyname, int fd) {
COMMON_INTERCEPTOR_ENTER(ctx, ttyname, fd);
char *res = REAL(ttyname)(fd);
if (res != nullptr)
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
return res;
}
#define INIT_TTYNAME COMMON_INTERCEPT_FUNCTION(ttyname);
@@ -4939,7 +5038,7 @@ INTERCEPTOR(int, ttyname_r, int fd, char *name, SIZE_T namesize) {
COMMON_INTERCEPTOR_ENTER(ctx, ttyname_r, fd, name, namesize);
int res = REAL(ttyname_r)(fd, name, namesize);
if (res == 0)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, internal_strlen(name) + 1);
return res;
}
#define INIT_TTYNAME_R COMMON_INTERCEPT_FUNCTION(ttyname_r);
@@ -4951,10 +5050,10 @@ INTERCEPTOR(int, ttyname_r, int fd, char *name, SIZE_T namesize) {
INTERCEPTOR(char *, tempnam, char *dir, char *pfx) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, tempnam, dir, pfx);
- if (dir) COMMON_INTERCEPTOR_READ_RANGE(ctx, dir, REAL(strlen)(dir) + 1);
- if (pfx) COMMON_INTERCEPTOR_READ_RANGE(ctx, pfx, REAL(strlen)(pfx) + 1);
+ if (dir) COMMON_INTERCEPTOR_READ_RANGE(ctx, dir, internal_strlen(dir) + 1);
+ if (pfx) COMMON_INTERCEPTOR_READ_RANGE(ctx, pfx, internal_strlen(pfx) + 1);
char *res = REAL(tempnam)(dir, pfx);
- if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
return res;
}
#define INIT_TEMPNAM COMMON_INTERCEPT_FUNCTION(tempnam);
@@ -5332,9 +5431,7 @@ INTERCEPTOR(void *, __tls_get_addr, void *arg) {
// On PowerPC, we also need to intercept __tls_get_addr_opt, which has
// mostly the same semantics as __tls_get_addr, but its presence enables
// some optimizations in linker (which are safe to ignore here).
-extern "C" __attribute__((alias("__interceptor___tls_get_addr"),
- visibility("default")))
-void *__tls_get_addr_opt(void *arg);
+INTERCEPTOR(void *, __tls_get_addr_opt, void *arg) ALIAS(WRAP(__tls_get_addr));
#endif
#else // SANITIZER_S390
// On s390, we have to intercept two functions here:
@@ -5368,21 +5465,20 @@ INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
#if SANITIZER_S390 && \
(SANITIZER_INTERCEPT_TLS_GET_ADDR || SANITIZER_INTERCEPT_TLS_GET_OFFSET)
-extern "C" uptr __tls_get_offset(void *arg);
-extern "C" uptr __interceptor___tls_get_offset(void *arg);
// We need a hidden symbol aliasing the above, so that we can jump
// directly to it from the assembly below.
-extern "C" __attribute__((alias("__interceptor___tls_get_addr_internal"),
- visibility("hidden")))
-uptr __tls_get_addr_hidden(void *arg);
+extern "C" __attribute__((visibility("hidden"))) uptr __tls_get_addr_hidden(
+ void *arg) ALIAS(WRAP(__tls_get_addr_internal));
+extern "C" uptr __tls_get_offset(void *arg);
+extern "C" uptr TRAMPOLINE(__tls_get_offset)(void *arg);
+extern "C" uptr WRAP(__tls_get_offset)(void *arg);
// Now carefully intercept __tls_get_offset.
asm(
".text\n"
// The __intercept_ version has to exist, so that gen_dynamic_list.py
// exports our symbol.
".weak __tls_get_offset\n"
- ".type __tls_get_offset, @function\n"
- "__tls_get_offset:\n"
+ ".set __tls_get_offset, __interceptor___tls_get_offset\n"
".global __interceptor___tls_get_offset\n"
".type __interceptor___tls_get_offset, @function\n"
"__interceptor___tls_get_offset:\n"
@@ -5414,7 +5510,7 @@ asm(
INTERCEPTOR(SSIZE_T, listxattr, const char *path, char *list, SIZE_T size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, listxattr, path, list, size);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -5427,7 +5523,7 @@ INTERCEPTOR(SSIZE_T, listxattr, const char *path, char *list, SIZE_T size) {
INTERCEPTOR(SSIZE_T, llistxattr, const char *path, char *list, SIZE_T size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, llistxattr, path, list, size);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -5458,8 +5554,8 @@ INTERCEPTOR(SSIZE_T, getxattr, const char *path, const char *name, char *value,
SIZE_T size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getxattr, path, name, value, size);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
- if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
+ if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -5471,8 +5567,8 @@ INTERCEPTOR(SSIZE_T, lgetxattr, const char *path, const char *name, char *value,
SIZE_T size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, lgetxattr, path, name, value, size);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
- if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
+ if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -5484,7 +5580,7 @@ INTERCEPTOR(SSIZE_T, fgetxattr, int fd, const char *name, char *value,
SIZE_T size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fgetxattr, fd, name, value, size);
- if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -5554,7 +5650,7 @@ INTERCEPTOR(int, getifaddrs, __sanitizer_ifaddrs **ifap) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(__sanitizer_ifaddrs));
if (p->ifa_name)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_name,
- REAL(strlen)(p->ifa_name) + 1);
+ internal_strlen(p->ifa_name) + 1);
if (p->ifa_addr)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_addr, struct_sockaddr_sz);
if (p->ifa_netmask)
@@ -5584,14 +5680,14 @@ INTERCEPTOR(char *, if_indextoname, unsigned int ifindex, char* ifname) {
// https://github.com/google/sanitizers/issues/321.
char *res = REAL(if_indextoname)(ifindex, ifname);
if (res && ifname)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifname, REAL(strlen)(ifname) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifname, internal_strlen(ifname) + 1);
return res;
}
INTERCEPTOR(unsigned int, if_nametoindex, const char* ifname) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, if_nametoindex, ifname);
if (ifname)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, ifname, REAL(strlen)(ifname) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ifname, internal_strlen(ifname) + 1);
return REAL(if_nametoindex)(ifname);
}
#define INIT_IF_INDEXTONAME \
@@ -5611,8 +5707,10 @@ INTERCEPTOR(int, capget, void *hdrp, void *datap) {
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
int res = REAL(capget)(hdrp, datap);
- if (res == 0 && datap)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datap, __user_cap_data_struct_sz);
+ if (res == 0 && datap) {
+ unsigned datasz = __user_cap_data_struct_sz(hdrp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datap, datasz);
+ }
// We can also return -1 and write to hdrp->version if the version passed in
// hdrp->version is unsupported. But that's not a trivial condition to check,
// and anyway COMMON_INTERCEPTOR_READ_RANGE protects us to some extent.
@@ -5623,8 +5721,10 @@ INTERCEPTOR(int, capset, void *hdrp, const void *datap) {
COMMON_INTERCEPTOR_ENTER(ctx, capset, hdrp, datap);
if (hdrp)
COMMON_INTERCEPTOR_READ_RANGE(ctx, hdrp, __user_cap_header_struct_sz);
- if (datap)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, datap, __user_cap_data_struct_sz);
+ if (datap) {
+ unsigned datasz = __user_cap_data_struct_sz(hdrp);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, datap, datasz);
+ }
return REAL(capset)(hdrp, datap);
}
#define INIT_CAPGET \
@@ -5634,105 +5734,6 @@ INTERCEPTOR(int, capset, void *hdrp, const void *datap) {
#define INIT_CAPGET
#endif
-#if SANITIZER_INTERCEPT_AEABI_MEM
-INTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memmove4, void *to, const void *from, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memmove8, void *to, const void *from, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memcpy, void *to, const void *from, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memcpy4, void *to, const void *from, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memcpy8, void *to, const void *from, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
-}
-
-// Note the argument order.
-INTERCEPTOR(void *, __aeabi_memset, void *block, uptr size, int c) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memset4, void *block, uptr size, int c) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memset8, void *block, uptr size, int c) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memclr, void *block, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memclr4, void *block, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
-}
-
-#define INIT_AEABI_MEM \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memmove); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memmove4); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memmove8); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy4); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy8); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memset); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memset4); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memset8); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memclr); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memclr4); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memclr8);
-#else
-#define INIT_AEABI_MEM
-#endif // SANITIZER_INTERCEPT_AEABI_MEM
-
-#if SANITIZER_INTERCEPT___BZERO
-INTERCEPTOR(void *, __bzero, void *block, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
-}
-#define INIT___BZERO COMMON_INTERCEPT_FUNCTION(__bzero);
-#else
-#define INIT___BZERO
-#endif // SANITIZER_INTERCEPT___BZERO
-
-#if SANITIZER_INTERCEPT_BZERO
-INTERCEPTOR(void *, bzero, void *block, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
-}
-#define INIT_BZERO COMMON_INTERCEPT_FUNCTION(bzero);
-#else
-#define INIT_BZERO
-#endif // SANITIZER_INTERCEPT_BZERO
-
#if SANITIZER_INTERCEPT_FTIME
INTERCEPTOR(int, ftime, __sanitizer_timeb *tp) {
void *ctx;
@@ -5849,7 +5850,7 @@ INTERCEPTOR(int, xdr_string, __sanitizer_XDR *xdrs, char **p,
COMMON_INTERCEPTOR_ENTER(ctx, xdr_string, xdrs, p, maxsize);
if (p && xdrs->x_op == __sanitizer_XDR_ENCODE) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p));
- COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, internal_strlen(*p) + 1);
}
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
@@ -5858,7 +5859,7 @@ INTERCEPTOR(int, xdr_string, __sanitizer_XDR *xdrs, char **p,
if (p && xdrs->x_op == __sanitizer_XDR_DECODE) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
if (res && *p)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, internal_strlen(*p) + 1);
}
return res;
}
@@ -6069,8 +6070,8 @@ INTERCEPTOR(int, __woverflow, __sanitizer_FILE *fp, int ch) {
INTERCEPTOR(__sanitizer_FILE *, fopen, const char *path, const char *mode) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fopen, path, mode);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, internal_strlen(mode) + 1);
__sanitizer_FILE *res = REAL(fopen)(path, mode);
COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
if (res) unpoison_file(res);
@@ -6079,7 +6080,7 @@ INTERCEPTOR(__sanitizer_FILE *, fopen, const char *path, const char *mode) {
INTERCEPTOR(__sanitizer_FILE *, fdopen, int fd, const char *mode) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fdopen, fd, mode);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, internal_strlen(mode) + 1);
__sanitizer_FILE *res = REAL(fdopen)(fd, mode);
if (res) unpoison_file(res);
return res;
@@ -6088,8 +6089,8 @@ INTERCEPTOR(__sanitizer_FILE *, freopen, const char *path, const char *mode,
__sanitizer_FILE *fp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, freopen, path, mode, fp);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, internal_strlen(mode) + 1);
COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
__sanitizer_FILE *res = REAL(freopen)(path, mode, fp);
COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
@@ -6113,7 +6114,7 @@ INTERCEPTOR(int, flopen, const char *path, int flags, ...) {
va_end(ap);
COMMON_INTERCEPTOR_ENTER(ctx, flopen, path, flags, mode);
if (path) {
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
}
return REAL(flopen)(path, flags, mode);
}
@@ -6126,7 +6127,7 @@ INTERCEPTOR(int, flopenat, int dirfd, const char *path, int flags, ...) {
va_end(ap);
COMMON_INTERCEPTOR_ENTER(ctx, flopen, path, flags, mode);
if (path) {
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
}
return REAL(flopenat)(dirfd, path, flags, mode);
}
@@ -6142,8 +6143,8 @@ INTERCEPTOR(int, flopenat, int dirfd, const char *path, int flags, ...) {
INTERCEPTOR(__sanitizer_FILE *, fopen64, const char *path, const char *mode) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fopen64, path, mode);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, internal_strlen(mode) + 1);
__sanitizer_FILE *res = REAL(fopen64)(path, mode);
COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
if (res) unpoison_file(res);
@@ -6153,8 +6154,8 @@ INTERCEPTOR(__sanitizer_FILE *, freopen64, const char *path, const char *mode,
__sanitizer_FILE *fp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, freopen64, path, mode, fp);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, internal_strlen(mode) + 1);
COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
__sanitizer_FILE *res = REAL(freopen64)(path, mode, fp);
COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
@@ -6305,8 +6306,7 @@ INTERCEPTOR(void*, dlopen, const char *filename, int flag) {
void *ctx;
COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlopen, filename, flag);
if (filename) COMMON_INTERCEPTOR_READ_STRING(ctx, filename, 0);
- COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag);
- void *res = REAL(dlopen)(filename, flag);
+ void *res = COMMON_INTERCEPTOR_DLOPEN(filename, flag);
Symbolizer::GetOrInit()->InvalidateModuleList();
COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res);
return res;
@@ -6332,9 +6332,9 @@ INTERCEPTOR(char *, getpass, const char *prompt) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpass, prompt);
if (prompt)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, prompt, REAL(strlen)(prompt)+1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, prompt, internal_strlen(prompt)+1);
char *res = REAL(getpass)(prompt);
- if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res)+1);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res)+1);
return res;
}
@@ -6475,7 +6475,7 @@ INTERCEPTOR(int, sem_init, __sanitizer_sem_t *s, int pshared, unsigned value) {
COMMON_INTERCEPTOR_ENTER(ctx, sem_init, s, pshared, value);
// Workaround a bug in glibc's "old" semaphore implementation by
// zero-initializing the sem_t contents. This has to be done here because
- // interceptors bind to the lowest symbols version by default, hitting the
+ // interceptors bind to the lowest version before glibc 2.36, hitting the
// buggy code path while the non-sanitized build of the same code works fine.
REAL(memset)(s, 0, sizeof(*s));
int res = REAL(sem_init)(s, pshared, value);
@@ -6538,17 +6538,42 @@ INTERCEPTOR(int, sem_getvalue, __sanitizer_sem_t *s, int *sval) {
}
return res;
}
-#define INIT_SEM \
- COMMON_INTERCEPT_FUNCTION(sem_init); \
- COMMON_INTERCEPT_FUNCTION(sem_destroy); \
- COMMON_INTERCEPT_FUNCTION(sem_wait); \
- COMMON_INTERCEPT_FUNCTION(sem_trywait); \
- COMMON_INTERCEPT_FUNCTION(sem_timedwait); \
- COMMON_INTERCEPT_FUNCTION(sem_post); \
- COMMON_INTERCEPT_FUNCTION(sem_getvalue);
+
+INTERCEPTOR(__sanitizer_sem_t *, sem_open, const char *name, int oflag, ...) {
+ void *ctx;
+ va_list ap;
+ va_start(ap, oflag);
+ u32 mode = va_arg(ap, u32);
+ u32 value = va_arg(ap, u32);
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_open, name, oflag, mode, value);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
+ __sanitizer_sem_t *s = REAL(sem_open)(name, oflag, mode, value);
+ if (s)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, sizeof(*s));
+ va_end(ap);
+ return s;
+}
+
+INTERCEPTOR(int, sem_unlink, const char *name) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_unlink, name);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
+ return REAL(sem_unlink)(name);
+}
+
+# define INIT_SEM \
+ COMMON_INTERCEPT_FUNCTION(sem_init); \
+ COMMON_INTERCEPT_FUNCTION(sem_destroy); \
+ COMMON_INTERCEPT_FUNCTION(sem_wait); \
+ COMMON_INTERCEPT_FUNCTION(sem_trywait); \
+ COMMON_INTERCEPT_FUNCTION(sem_timedwait); \
+ COMMON_INTERCEPT_FUNCTION(sem_post); \
+ COMMON_INTERCEPT_FUNCTION(sem_getvalue); \
+ COMMON_INTERCEPT_FUNCTION(sem_open); \
+ COMMON_INTERCEPT_FUNCTION(sem_unlink);
#else
-#define INIT_SEM
-#endif // SANITIZER_INTERCEPT_SEM
+# define INIT_SEM
+#endif // SANITIZER_INTERCEPT_SEM
#if SANITIZER_INTERCEPT_PTHREAD_SETCANCEL
INTERCEPTOR(int, pthread_setcancelstate, int state, int *oldstate) {
@@ -6631,7 +6656,7 @@ INTERCEPTOR(char *, ctermid, char *s) {
COMMON_INTERCEPTOR_ENTER(ctx, ctermid, s);
char *res = REAL(ctermid)(s);
if (res) {
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
}
return res;
}
@@ -6646,7 +6671,7 @@ INTERCEPTOR(char *, ctermid_r, char *s) {
COMMON_INTERCEPTOR_ENTER(ctx, ctermid_r, s);
char *res = REAL(ctermid_r)(s);
if (res) {
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
}
return res;
}
@@ -6772,6 +6797,23 @@ INTERCEPTOR(int, stat, const char *path, void *buf) {
#define INIT_STAT
#endif
+#if SANITIZER_INTERCEPT_STAT64
+INTERCEPTOR(int, stat64, const char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, stat64, path, buf);
+ if (common_flags()->intercept_stat)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ int res = REAL(stat64)(path, buf);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat64_sz);
+ return res;
+}
+#define INIT_STAT64 COMMON_INTERCEPT_FUNCTION(stat64)
+#else
+#define INIT_STAT64
+#endif
+
+
#if SANITIZER_INTERCEPT_LSTAT
INTERCEPTOR(int, lstat, const char *path, void *buf) {
void *ctx;
@@ -6788,6 +6830,22 @@ INTERCEPTOR(int, lstat, const char *path, void *buf) {
#define INIT_LSTAT
#endif
+#if SANITIZER_INTERCEPT_STAT64
+INTERCEPTOR(int, lstat64, const char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lstat64, path, buf);
+ if (common_flags()->intercept_stat)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ int res = REAL(lstat64)(path, buf);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat64_sz);
+ return res;
+}
+#define INIT_LSTAT64 COMMON_INTERCEPT_FUNCTION(lstat64)
+#else
+#define INIT_LSTAT64
+#endif
+
#if SANITIZER_INTERCEPT___XSTAT
INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) {
void *ctx;
@@ -6960,6 +7018,7 @@ INTERCEPTOR(int, mprobe, void *ptr) {
}
#endif
+#if SANITIZER_INTERCEPT_WCSLEN
INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wcslen, s);
@@ -6978,13 +7037,16 @@ INTERCEPTOR(SIZE_T, wcsnlen, const wchar_t *s, SIZE_T n) {
#define INIT_WCSLEN \
COMMON_INTERCEPT_FUNCTION(wcslen); \
COMMON_INTERCEPT_FUNCTION(wcsnlen);
+#else
+#define INIT_WCSLEN
+#endif
#if SANITIZER_INTERCEPT_WCSCAT
INTERCEPTOR(wchar_t *, wcscat, wchar_t *dst, const wchar_t *src) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wcscat, dst, src);
- SIZE_T src_size = REAL(wcslen)(src);
- SIZE_T dst_size = REAL(wcslen)(dst);
+ SIZE_T src_size = internal_wcslen(src);
+ SIZE_T dst_size = internal_wcslen(dst);
COMMON_INTERCEPTOR_READ_RANGE(ctx, src, (src_size + 1) * sizeof(wchar_t));
COMMON_INTERCEPTOR_READ_RANGE(ctx, dst, (dst_size + 1) * sizeof(wchar_t));
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst + dst_size,
@@ -6995,8 +7057,8 @@ INTERCEPTOR(wchar_t *, wcscat, wchar_t *dst, const wchar_t *src) {
INTERCEPTOR(wchar_t *, wcsncat, wchar_t *dst, const wchar_t *src, SIZE_T n) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wcsncat, dst, src, n);
- SIZE_T src_size = REAL(wcsnlen)(src, n);
- SIZE_T dst_size = REAL(wcslen)(dst);
+ SIZE_T src_size = internal_wcsnlen(src, n);
+ SIZE_T dst_size = internal_wcslen(dst);
COMMON_INTERCEPTOR_READ_RANGE(ctx, src,
Min(src_size + 1, n) * sizeof(wchar_t));
COMMON_INTERCEPTOR_READ_RANGE(ctx, dst, (dst_size + 1) * sizeof(wchar_t));
@@ -7015,7 +7077,7 @@ INTERCEPTOR(wchar_t *, wcsncat, wchar_t *dst, const wchar_t *src, SIZE_T n) {
INTERCEPTOR(wchar_t *, wcsdup, wchar_t *s) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wcsdup, s);
- SIZE_T len = REAL(wcslen)(s);
+ SIZE_T len = internal_wcslen(s);
COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(wchar_t) * (len + 1));
wchar_t *result = REAL(wcsdup)(s);
if (result)
@@ -7029,9 +7091,9 @@ INTERCEPTOR(wchar_t *, wcsdup, wchar_t *s) {
#endif
#if SANITIZER_INTERCEPT_STRXFRM
-static SIZE_T RealStrLen(const char *str) { return REAL(strlen)(str); }
+static SIZE_T RealStrLen(const char *str) { return internal_strlen(str); }
-static SIZE_T RealStrLen(const wchar_t *str) { return REAL(wcslen)(str); }
+static SIZE_T RealStrLen(const wchar_t *str) { return internal_wcslen(str); }
#define STRXFRM_INTERCEPTOR_IMPL(strxfrm, dest, src, len, ...) \
{ \
@@ -7105,7 +7167,7 @@ INTERCEPTOR(int, acct, const char *file) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, acct, file);
if (file)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, file, REAL(strlen)(file) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, file, internal_strlen(file) + 1);
return REAL(acct)(file);
}
#define INIT_ACCT COMMON_INTERCEPT_FUNCTION(acct)
@@ -7120,7 +7182,7 @@ INTERCEPTOR(const char *, user_from_uid, u32 uid, int nouser) {
COMMON_INTERCEPTOR_ENTER(ctx, user_from_uid, uid, nouser);
user = REAL(user_from_uid)(uid, nouser);
if (user)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, user, REAL(strlen)(user) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, user, internal_strlen(user) + 1);
return user;
}
#define INIT_USER_FROM_UID COMMON_INTERCEPT_FUNCTION(user_from_uid)
@@ -7134,7 +7196,7 @@ INTERCEPTOR(int, uid_from_user, const char *name, u32 *uid) {
int res;
COMMON_INTERCEPTOR_ENTER(ctx, uid_from_user, name, uid);
if (name)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
res = REAL(uid_from_user)(name, uid);
if (uid)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, uid, sizeof(*uid));
@@ -7152,7 +7214,7 @@ INTERCEPTOR(const char *, group_from_gid, u32 gid, int nogroup) {
COMMON_INTERCEPTOR_ENTER(ctx, group_from_gid, gid, nogroup);
group = REAL(group_from_gid)(gid, nogroup);
if (group)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, group, REAL(strlen)(group) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, group, internal_strlen(group) + 1);
return group;
}
#define INIT_GROUP_FROM_GID COMMON_INTERCEPT_FUNCTION(group_from_gid)
@@ -7166,7 +7228,7 @@ INTERCEPTOR(int, gid_from_group, const char *group, u32 *gid) {
int res;
COMMON_INTERCEPTOR_ENTER(ctx, gid_from_group, group, gid);
if (group)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, group, REAL(strlen)(group) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, group, internal_strlen(group) + 1);
res = REAL(gid_from_group)(group, gid);
if (gid)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, gid, sizeof(*gid));
@@ -7182,7 +7244,7 @@ INTERCEPTOR(int, access, const char *path, int mode) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, access, path, mode);
if (path)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
return REAL(access)(path, mode);
}
#define INIT_ACCESS COMMON_INTERCEPT_FUNCTION(access)
@@ -7195,7 +7257,7 @@ INTERCEPTOR(int, faccessat, int fd, const char *path, int mode, int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, faccessat, fd, path, mode, flags);
if (path)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
return REAL(faccessat)(fd, path, mode, flags);
}
#define INIT_FACCESSAT COMMON_INTERCEPT_FUNCTION(faccessat)
@@ -7210,7 +7272,7 @@ INTERCEPTOR(int, getgrouplist, const char *name, u32 basegid, u32 *groups,
int res;
COMMON_INTERCEPTOR_ENTER(ctx, getgrouplist, name, basegid, groups, ngroups);
if (name)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
if (ngroups)
COMMON_INTERCEPTOR_READ_RANGE(ctx, ngroups, sizeof(*ngroups));
res = REAL(getgrouplist)(name, basegid, groups, ngroups);
@@ -7234,7 +7296,7 @@ INTERCEPTOR(int, getgroupmembership, const char *name, u32 basegid, u32 *groups,
COMMON_INTERCEPTOR_ENTER(ctx, getgroupmembership, name, basegid, groups,
maxgrp, ngroups);
if (name)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
res = REAL(getgroupmembership)(name, basegid, groups, maxgrp, ngroups);
if (!res && groups && ngroups) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, groups, sizeof(*groups) * (*ngroups));
@@ -7252,7 +7314,7 @@ INTERCEPTOR(int, getgroupmembership, const char *name, u32 basegid, u32 *groups,
INTERCEPTOR(SSIZE_T, readlink, const char *path, char *buf, SIZE_T bufsiz) {
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, readlink, path, buf, bufsiz);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
SSIZE_T res = REAL(readlink)(path, buf, bufsiz);
if (res > 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res);
@@ -7269,7 +7331,7 @@ INTERCEPTOR(SSIZE_T, readlinkat, int dirfd, const char *path, char *buf,
SIZE_T bufsiz) {
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, readlinkat, dirfd, path, buf, bufsiz);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
SSIZE_T res = REAL(readlinkat)(dirfd, path, buf, bufsiz);
if (res > 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res);
@@ -7287,7 +7349,7 @@ INTERCEPTOR(int, name_to_handle_at, int dirfd, const char *pathname,
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, name_to_handle_at, dirfd, pathname, handle,
mount_id, flags);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, pathname, REAL(strlen)(pathname) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, pathname, internal_strlen(pathname) + 1);
__sanitizer_file_handle *sanitizer_handle =
reinterpret_cast<__sanitizer_file_handle*>(handle);
@@ -7351,7 +7413,7 @@ INTERCEPTOR(SIZE_T, strlcpy, char *dst, char *src, SIZE_T size) {
ctx, src, Min(internal_strnlen(src, size), size - 1) + 1);
}
res = REAL(strlcpy)(dst, src, size);
- COMMON_INTERCEPTOR_COPY_STRING(ctx, dst, src, REAL(strlen)(dst) + 1);
+ COMMON_INTERCEPTOR_COPY_STRING(ctx, dst, src, internal_strlen(dst) + 1);
return res;
}
@@ -7379,17 +7441,25 @@ INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags, int fd,
OFF_T off) {
void *ctx;
if (common_flags()->detect_write_exec)
- ReportMmapWriteExec(prot);
+ ReportMmapWriteExec(prot, flags);
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
return (void *)internal_mmap(addr, sz, prot, flags, fd, off);
COMMON_INTERCEPTOR_ENTER(ctx, mmap, addr, sz, prot, flags, fd, off);
COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, off);
}
+INTERCEPTOR(int, munmap, void *addr, SIZE_T sz) {
+ void *ctx;
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return (int)internal_munmap(addr, sz);
+ COMMON_INTERCEPTOR_ENTER(ctx, munmap, addr, sz);
+ COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, sz);
+}
+
INTERCEPTOR(int, mprotect, void *addr, SIZE_T sz, int prot) {
void *ctx;
if (common_flags()->detect_write_exec)
- ReportMmapWriteExec(prot);
+ ReportMmapWriteExec(prot, 0);
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
return (int)internal_mprotect(addr, sz, prot);
COMMON_INTERCEPTOR_ENTER(ctx, mprotect, addr, sz, prot);
@@ -7398,6 +7468,7 @@ INTERCEPTOR(int, mprotect, void *addr, SIZE_T sz, int prot) {
}
#define INIT_MMAP \
COMMON_INTERCEPT_FUNCTION(mmap); \
+ COMMON_INTERCEPT_FUNCTION(munmap); \
COMMON_INTERCEPT_FUNCTION(mprotect);
#else
#define INIT_MMAP
@@ -7408,7 +7479,7 @@ INTERCEPTOR(void *, mmap64, void *addr, SIZE_T sz, int prot, int flags, int fd,
OFF64_T off) {
void *ctx;
if (common_flags()->detect_write_exec)
- ReportMmapWriteExec(prot);
+ ReportMmapWriteExec(prot, flags);
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
return (void *)internal_mmap(addr, sz, prot, flags, fd, off);
COMMON_INTERCEPTOR_ENTER(ctx, mmap64, addr, sz, prot, flags, fd, off);
@@ -7426,7 +7497,7 @@ INTERCEPTOR(char *, devname, u64 dev, u32 type) {
COMMON_INTERCEPTOR_ENTER(ctx, devname, dev, type);
name = REAL(devname)(dev, type);
if (name)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, internal_strlen(name) + 1);
return name;
}
#define INIT_DEVNAME COMMON_INTERCEPT_FUNCTION(devname);
@@ -7448,7 +7519,7 @@ INTERCEPTOR(DEVNAME_R_RETTYPE, devname_r, u64 dev, u32 type, char *path,
COMMON_INTERCEPTOR_ENTER(ctx, devname_r, dev, type, path, len);
DEVNAME_R_RETTYPE res = REAL(devname_r)(dev, type, path, len);
if (DEVNAME_R_SUCCESS(res))
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, path, internal_strlen(path) + 1);
return res;
}
#define INIT_DEVNAME_R COMMON_INTERCEPT_FUNCTION(devname_r);
@@ -7478,7 +7549,7 @@ INTERCEPTOR(void, strmode, u32 mode, char *bp) {
COMMON_INTERCEPTOR_ENTER(ctx, strmode, mode, bp);
REAL(strmode)(mode, bp);
if (bp)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, bp, REAL(strlen)(bp) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, bp, internal_strlen(bp) + 1);
}
#define INIT_STRMODE COMMON_INTERCEPT_FUNCTION(strmode)
#else
@@ -7498,40 +7569,44 @@ INTERCEPTOR(struct __sanitizer_ttyent *, getttynam, char *name) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getttynam, name);
if (name)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
struct __sanitizer_ttyent *ttyent = REAL(getttynam)(name);
if (ttyent)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ttyent, struct_ttyent_sz);
return ttyent;
}
+#define INIT_TTYENT \
+ COMMON_INTERCEPT_FUNCTION(getttyent); \
+ COMMON_INTERCEPT_FUNCTION(getttynam);
+#else
+#define INIT_TTYENT
+#endif
+
+#if SANITIZER_INTERCEPT_TTYENTPATH
INTERCEPTOR(int, setttyentpath, char *path) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, setttyentpath, path);
if (path)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
return REAL(setttyentpath)(path);
}
-#define INIT_TTYENT \
- COMMON_INTERCEPT_FUNCTION(getttyent); \
- COMMON_INTERCEPT_FUNCTION(getttynam); \
- COMMON_INTERCEPT_FUNCTION(setttyentpath)
+#define INIT_TTYENTPATH COMMON_INTERCEPT_FUNCTION(setttyentpath);
#else
-#define INIT_TTYENT
+#define INIT_TTYENTPATH
#endif
#if SANITIZER_INTERCEPT_PROTOENT
static void write_protoent(void *ctx, struct __sanitizer_protoent *p) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, internal_strlen(p->p_name) + 1);
SIZE_T pp_size = 1; // One handles the trailing \0
for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, internal_strlen(*pp) + 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases,
- pp_size * sizeof(char **));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases, pp_size * sizeof(char *));
}
INTERCEPTOR(struct __sanitizer_protoent *, getprotoent) {
@@ -7547,7 +7622,7 @@ INTERCEPTOR(struct __sanitizer_protoent *, getprotobyname, const char *name) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getprotobyname, name);
if (name)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
struct __sanitizer_protoent *p = REAL(getprotobyname)(name);
if (p)
write_protoent(ctx, p);
@@ -7591,7 +7666,7 @@ INTERCEPTOR(int, getprotobyname_r, const char *name,
COMMON_INTERCEPTOR_ENTER(ctx, getprotobyname_r, name, result_buf, buf,
buflen, result);
if (name)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
int res = REAL(getprotobyname_r)(name, result_buf, buf, buflen, result);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof *result);
@@ -7630,15 +7705,14 @@ INTERCEPTOR(struct __sanitizer_netent *, getnetent) {
if (n) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, REAL(strlen)(n->n_name) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, internal_strlen(n->n_name) + 1);
SIZE_T nn_size = 1; // One handles the trailing \0
for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, REAL(strlen)(*nn) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, internal_strlen(*nn) + 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases,
- nn_size * sizeof(char **));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases, nn_size * sizeof(char *));
}
return n;
}
@@ -7647,20 +7721,19 @@ INTERCEPTOR(struct __sanitizer_netent *, getnetbyname, const char *name) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getnetbyname, name);
if (name)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
struct __sanitizer_netent *n = REAL(getnetbyname)(name);
if (n) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, REAL(strlen)(n->n_name) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, internal_strlen(n->n_name) + 1);
SIZE_T nn_size = 1; // One handles the trailing \0
for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, REAL(strlen)(*nn) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, internal_strlen(*nn) + 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases,
- nn_size * sizeof(char **));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases, nn_size * sizeof(char *));
}
return n;
}
@@ -7672,15 +7745,14 @@ INTERCEPTOR(struct __sanitizer_netent *, getnetbyaddr, u32 net, int type) {
if (n) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, REAL(strlen)(n->n_name) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, internal_strlen(n->n_name) + 1);
SIZE_T nn_size = 1; // One handles the trailing \0
for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, REAL(strlen)(*nn) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, internal_strlen(*nn) + 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases,
- nn_size * sizeof(char **));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases, nn_size * sizeof(char *));
}
return n;
}
@@ -7753,12 +7825,12 @@ INTERCEPTOR(void, setbuf, __sanitizer_FILE *stream, char *buf) {
unpoison_file(stream);
}
-INTERCEPTOR(void, setbuffer, __sanitizer_FILE *stream, char *buf, int mode) {
+INTERCEPTOR(void, setbuffer, __sanitizer_FILE *stream, char *buf, SIZE_T size) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, setbuffer, stream, buf, mode);
- REAL(setbuffer)(stream, buf, mode);
+ COMMON_INTERCEPTOR_ENTER(ctx, setbuffer, stream, buf, size);
+ REAL(setbuffer)(stream, buf, size);
if (buf) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer_bufsiz);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, size);
}
if (stream)
unpoison_file(stream);
@@ -7798,9 +7870,9 @@ INTERCEPTOR(int, regcomp, void *preg, const char *pattern, int cflags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, regcomp, preg, pattern, cflags);
if (pattern)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, pattern, REAL(strlen)(pattern) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, pattern, internal_strlen(pattern) + 1);
int res = REAL(regcomp)(preg, pattern, cflags);
- if (!res)
+ if (preg)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, preg, struct_regex_sz);
return res;
}
@@ -7811,7 +7883,7 @@ INTERCEPTOR(int, regexec, const void *preg, const char *string, SIZE_T nmatch,
if (preg)
COMMON_INTERCEPTOR_READ_RANGE(ctx, preg, struct_regex_sz);
if (string)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, string, REAL(strlen)(string) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, string, internal_strlen(string) + 1);
int res = REAL(regexec)(preg, string, nmatch, pmatch, eflags);
if (!res && pmatch)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pmatch, nmatch * struct_regmatch_sz);
@@ -7825,7 +7897,7 @@ INTERCEPTOR(SIZE_T, regerror, int errcode, const void *preg, char *errbuf,
COMMON_INTERCEPTOR_READ_RANGE(ctx, preg, struct_regex_sz);
SIZE_T res = REAL(regerror)(errcode, preg, errbuf, errbuf_size);
if (errbuf)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, errbuf, REAL(strlen)(errbuf) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, errbuf, internal_strlen(errbuf) + 1);
return res;
}
INTERCEPTOR(void, regfree, const void *preg) {
@@ -7850,15 +7922,15 @@ INTERCEPTOR(SSIZE_T, regnsub, char *buf, SIZE_T bufsiz, const char *sub,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, regnsub, buf, bufsiz, sub, rm, str);
if (sub)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, sub, REAL(strlen)(sub) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sub, internal_strlen(sub) + 1);
// The implementation demands and hardcodes 10 elements
if (rm)
COMMON_INTERCEPTOR_READ_RANGE(ctx, rm, 10 * struct_regmatch_sz);
if (str)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, str, internal_strlen(str) + 1);
SSIZE_T res = REAL(regnsub)(buf, bufsiz, sub, rm, str);
if (res > 0 && buf)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);
return res;
}
INTERCEPTOR(SSIZE_T, regasub, char **buf, const char *sub,
@@ -7866,16 +7938,16 @@ INTERCEPTOR(SSIZE_T, regasub, char **buf, const char *sub,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, regasub, buf, sub, rm, sstr);
if (sub)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, sub, REAL(strlen)(sub) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sub, internal_strlen(sub) + 1);
// Hardcode 10 elements as this is hardcoded size
if (rm)
COMMON_INTERCEPTOR_READ_RANGE(ctx, rm, 10 * struct_regmatch_sz);
if (sstr)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, sstr, REAL(strlen)(sstr) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sstr, internal_strlen(sstr) + 1);
SSIZE_T res = REAL(regasub)(buf, sub, rm, sstr);
if (res > 0 && buf) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, sizeof(char *));
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *buf, REAL(strlen)(*buf) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *buf, internal_strlen(*buf) + 1);
}
return res;
}
@@ -7897,7 +7969,7 @@ INTERCEPTOR(void *, fts_open, char *const *path_argv, int options,
COMMON_INTERCEPTOR_READ_RANGE(ctx, pa, sizeof(char **));
if (!*pa)
break;
- COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, REAL(strlen)(*pa) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, internal_strlen(*pa) + 1);
}
}
// TODO(kamil): handle compar callback
@@ -7989,7 +8061,7 @@ INTERCEPTOR(int, sysctlbyname, char *sname, void *oldp, SIZE_T *oldlenp,
COMMON_INTERCEPTOR_ENTER(ctx, sysctlbyname, sname, oldp, oldlenp, newp,
newlen);
if (sname)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, REAL(strlen)(sname) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, internal_strlen(sname) + 1);
if (oldlenp)
COMMON_INTERCEPTOR_READ_RANGE(ctx, oldlenp, sizeof(*oldlenp));
if (newp && newlen)
@@ -8010,7 +8082,7 @@ INTERCEPTOR(int, sysctlnametomib, const char *sname, int *name,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sysctlnametomib, sname, name, namelenp);
if (sname)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, REAL(strlen)(sname) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, internal_strlen(sname) + 1);
if (namelenp)
COMMON_INTERCEPTOR_READ_RANGE(ctx, namelenp, sizeof(*namelenp));
int res = REAL(sysctlnametomib)(sname, name, namelenp);
@@ -8050,7 +8122,7 @@ INTERCEPTOR(void *, asysctlbyname, const char *sname, SIZE_T *len) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, asysctlbyname, sname, len);
if (sname)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, REAL(strlen)(sname) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, internal_strlen(sname) + 1);
void *res = REAL(asysctlbyname)(sname, len);
if (res && len) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, len, sizeof(*len));
@@ -8073,7 +8145,7 @@ INTERCEPTOR(int, sysctlgetmibinfo, char *sname, int *name,
COMMON_INTERCEPTOR_ENTER(ctx, sysctlgetmibinfo, sname, name, namelenp, cname,
csz, rnode, v);
if (sname)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, REAL(strlen)(sname) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, internal_strlen(sname) + 1);
if (namelenp)
COMMON_INTERCEPTOR_READ_RANGE(ctx, namelenp, sizeof(*namelenp));
if (csz)
@@ -8107,7 +8179,7 @@ INTERCEPTOR(char *, nl_langinfo, long item) {
COMMON_INTERCEPTOR_ENTER(ctx, nl_langinfo, item);
char *ret = REAL(nl_langinfo)(item);
if (ret)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, REAL(strlen)(ret) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, internal_strlen(ret) + 1);
return ret;
}
#define INIT_NL_LANGINFO COMMON_INTERCEPT_FUNCTION(nl_langinfo)
@@ -8127,7 +8199,7 @@ INTERCEPTOR(int, modctl, int operation, void *argp) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, ml, sizeof(*ml));
if (ml->ml_filename)
COMMON_INTERCEPTOR_READ_RANGE(ctx, ml->ml_filename,
- REAL(strlen)(ml->ml_filename) + 1);
+ internal_strlen(ml->ml_filename) + 1);
if (ml->ml_props)
COMMON_INTERCEPTOR_READ_RANGE(ctx, ml->ml_props, ml->ml_propslen);
}
@@ -8135,7 +8207,7 @@ INTERCEPTOR(int, modctl, int operation, void *argp) {
} else if (operation == modctl_unload) {
if (argp) {
const char *name = (const char *)argp;
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
}
ret = REAL(modctl)(operation, argp);
} else if (operation == modctl_stat) {
@@ -8177,7 +8249,7 @@ INTERCEPTOR(long long, strtonum, const char *nptr, long long minval,
if (errstr) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, errstr, sizeof(const char *));
if (*errstr)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *errstr, REAL(strlen)(*errstr) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *errstr, internal_strlen(*errstr) + 1);
}
return ret;
}
@@ -8197,7 +8269,7 @@ INTERCEPTOR(char *, fparseln, __sanitizer_FILE *stream, SIZE_T *len,
COMMON_INTERCEPTOR_READ_RANGE(ctx, delim, sizeof(delim[0]) * 3);
char *ret = REAL(fparseln)(stream, len, lineno, delim, flags);
if (ret) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, REAL(strlen)(ret) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, internal_strlen(ret) + 1);
if (len)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, len, sizeof(*len));
if (lineno)
@@ -8214,7 +8286,7 @@ INTERCEPTOR(char *, fparseln, __sanitizer_FILE *stream, SIZE_T *len,
INTERCEPTOR(int, statvfs1, const char *path, void *buf, int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statvfs1, path, buf, flags);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
int res = REAL(statvfs1)(path, buf, flags);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);
return res;
@@ -8495,7 +8567,7 @@ INTERCEPTOR(char *, SHA1File, char *filename, char *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, SHA1File, filename, buf);
if (filename)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);
char *ret = REAL(SHA1File)(filename, buf);
if (ret)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA1_return_length);
@@ -8506,7 +8578,7 @@ INTERCEPTOR(char *, SHA1FileChunk, char *filename, char *buf, OFF_T offset,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, SHA1FileChunk, filename, buf, offset, length);
if (filename)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);
char *ret = REAL(SHA1FileChunk)(filename, buf, offset, length);
if (ret)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA1_return_length);
@@ -8582,7 +8654,7 @@ INTERCEPTOR(char *, MD4File, const char *filename, char *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, MD4File, filename, buf);
if (filename)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);
char *ret = REAL(MD4File)(filename, buf);
if (ret)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD4_return_length);
@@ -8665,7 +8737,7 @@ INTERCEPTOR(char *, RMD160File, char *filename, char *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, RMD160File, filename, buf);
if (filename)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);
char *ret = REAL(RMD160File)(filename, buf);
if (ret)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, RMD160_return_length);
@@ -8676,7 +8748,7 @@ INTERCEPTOR(char *, RMD160FileChunk, char *filename, char *buf, OFF_T offset,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, RMD160FileChunk, filename, buf, offset, length);
if (filename)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);
char *ret = REAL(RMD160FileChunk)(filename, buf, offset, length);
if (ret)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, RMD160_return_length);
@@ -8752,7 +8824,7 @@ INTERCEPTOR(char *, MD5File, const char *filename, char *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, MD5File, filename, buf);
if (filename)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);
char *ret = REAL(MD5File)(filename, buf);
if (ret)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD5_return_length);
@@ -8882,7 +8954,7 @@ INTERCEPTOR(char *, MD2File, const char *filename, char *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, MD2File, filename, buf);
if (filename)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);
char *ret = REAL(MD2File)(filename, buf);
if (ret)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD2_return_length);
@@ -8960,7 +9032,7 @@ INTERCEPTOR(char *, MD2Data, const unsigned char *data, unsigned int len,
void *ctx; \
COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_File, filename, buf); \
if (filename) \
- COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);\
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);\
char *ret = REAL(SHA##LEN##_File)(filename, buf); \
if (ret) \
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA##LEN##_return_length); \
@@ -8972,7 +9044,7 @@ INTERCEPTOR(char *, MD2Data, const unsigned char *data, unsigned int len,
COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_FileChunk, filename, buf, offset, \
length); \
if (filename) \
- COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);\
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);\
char *ret = REAL(SHA##LEN##_FileChunk)(filename, buf, offset, length); \
if (ret) \
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA##LEN##_return_length); \
@@ -8989,10 +9061,10 @@ INTERCEPTOR(char *, MD2Data, const unsigned char *data, unsigned int len,
return ret; \
}
-SHA2_INTERCEPTORS(224, u32);
-SHA2_INTERCEPTORS(256, u32);
-SHA2_INTERCEPTORS(384, u64);
-SHA2_INTERCEPTORS(512, u64);
+SHA2_INTERCEPTORS(224, u32)
+SHA2_INTERCEPTORS(256, u32)
+SHA2_INTERCEPTORS(384, u64)
+SHA2_INTERCEPTORS(512, u64)
#define INIT_SHA2_INTECEPTORS(LEN) \
COMMON_INTERCEPT_FUNCTION(SHA##LEN##_Init); \
@@ -9036,7 +9108,7 @@ INTERCEPTOR(int, strvis, char *dst, const char *src, int flag) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strvis, dst, src, flag);
if (src)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
int len = REAL(strvis)(dst, src, flag);
if (dst)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, len + 1);
@@ -9046,7 +9118,7 @@ INTERCEPTOR(int, stravis, char **dst, const char *src, int flag) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, stravis, dst, src, flag);
if (src)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
int len = REAL(stravis)(dst, src, flag);
if (dst) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sizeof(char *));
@@ -9059,7 +9131,7 @@ INTERCEPTOR(int, strnvis, char *dst, SIZE_T dlen, const char *src, int flag) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strnvis, dst, dlen, src, flag);
if (src)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
int len = REAL(strnvis)(dst, dlen, src, flag);
// The interface will be valid even if there is no space for NULL char
if (dst && len > 0)
@@ -9109,7 +9181,7 @@ INTERCEPTOR(char *, svis, char *dst, int c, int flag, int nextc,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, svis, dst, c, flag, nextc, extra);
if (extra)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);
char *end = REAL(svis)(dst, c, flag, nextc, extra);
if (dst && end)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, end - dst + 1);
@@ -9120,7 +9192,7 @@ INTERCEPTOR(char *, snvis, char *dst, SIZE_T dlen, int c, int flag, int nextc,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, snvis, dst, dlen, c, flag, nextc, extra);
if (extra)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);
char *end = REAL(snvis)(dst, dlen, c, flag, nextc, extra);
if (dst && end)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst,
@@ -9132,9 +9204,9 @@ INTERCEPTOR(int, strsvis, char *dst, const char *src, int flag,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strsvis, dst, src, flag, extra);
if (src)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
if (extra)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);
int len = REAL(strsvis)(dst, src, flag, extra);
if (dst)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, len + 1);
@@ -9145,9 +9217,9 @@ INTERCEPTOR(int, strsnvis, char *dst, SIZE_T dlen, const char *src, int flag,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strsnvis, dst, dlen, src, flag, extra);
if (src)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
if (extra)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);
int len = REAL(strsnvis)(dst, dlen, src, flag, extra);
// The interface will be valid even if there is no space for NULL char
if (dst && len >= 0)
@@ -9161,7 +9233,7 @@ INTERCEPTOR(int, strsvisx, char *dst, const char *src, SIZE_T len, int flag,
if (src)
COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);
if (extra)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);
int ret = REAL(strsvisx)(dst, src, len, flag, extra);
if (dst)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
@@ -9174,7 +9246,7 @@ INTERCEPTOR(int, strsnvisx, char *dst, SIZE_T dlen, const char *src, SIZE_T len,
if (src)
COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);
if (extra)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);
int ret = REAL(strsnvisx)(dst, dlen, src, len, flag, extra);
if (dst && ret >= 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
@@ -9188,7 +9260,7 @@ INTERCEPTOR(int, strsenvisx, char *dst, SIZE_T dlen, const char *src,
if (src)
COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);
if (extra)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);
// FIXME: only need to be checked when "flag | VIS_NOLOCALE" doesn't hold
// according to the implementation
if (cerr_ptr)
@@ -9215,7 +9287,7 @@ INTERCEPTOR(int, strunvis, char *dst, const char *src) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strunvis, dst, src);
if (src)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
int ret = REAL(strunvis)(dst, src);
if (ret != -1)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
@@ -9225,7 +9297,7 @@ INTERCEPTOR(int, strnunvis, char *dst, SIZE_T dlen, const char *src) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strnunvis, dst, dlen, src);
if (src)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
int ret = REAL(strnunvis)(dst, dlen, src);
if (ret != -1)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
@@ -9235,7 +9307,7 @@ INTERCEPTOR(int, strunvisx, char *dst, const char *src, int flag) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strunvisx, dst, src, flag);
if (src)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
int ret = REAL(strunvisx)(dst, src, flag);
if (ret != -1)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
@@ -9246,7 +9318,7 @@ INTERCEPTOR(int, strnunvisx, char *dst, SIZE_T dlen, const char *src,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strnunvisx, dst, dlen, src, flag);
if (src)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
int ret = REAL(strnunvisx)(dst, dlen, src, flag);
if (ret != -1)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
@@ -9282,7 +9354,7 @@ INTERCEPTOR(struct __sanitizer_cdbr *, cdbr_open, const char *path, int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, cdbr_open, path, flags);
if (path)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
struct __sanitizer_cdbr *cdbr = REAL(cdbr_open)(path, flags);
if (cdbr)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cdbr, sizeof(*cdbr));
@@ -9474,7 +9546,7 @@ INTERCEPTOR(void *, getfsspec, const char *spec) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getfsspec, spec);
if (spec)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, spec, REAL(strlen)(spec) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, spec, internal_strlen(spec) + 1);
void *ret = REAL(getfsspec)(spec);
if (ret)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, struct_fstab_sz);
@@ -9485,7 +9557,7 @@ INTERCEPTOR(void *, getfsfile, const char *file) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getfsfile, file);
if (file)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, file, REAL(strlen)(file) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, file, internal_strlen(file) + 1);
void *ret = REAL(getfsfile)(file);
if (ret)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, struct_fstab_sz);
@@ -9529,9 +9601,9 @@ INTERCEPTOR(__sanitizer_FILE *, popen, const char *command, const char *type) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, popen, command, type);
if (command)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, command, REAL(strlen)(command) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, command, internal_strlen(command) + 1);
if (type)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, type, REAL(strlen)(type) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, type, internal_strlen(type) + 1);
__sanitizer_FILE *res = REAL(popen)(command, type);
COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, nullptr);
if (res) unpoison_file(res);
@@ -9548,13 +9620,13 @@ INTERCEPTOR(__sanitizer_FILE *, popenve, const char *path,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, popenve, path, argv, envp, type);
if (path)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
if (argv) {
for (char *const *pa = argv; ; ++pa) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, pa, sizeof(char **));
if (!*pa)
break;
- COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, REAL(strlen)(*pa) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, internal_strlen(*pa) + 1);
}
}
if (envp) {
@@ -9562,11 +9634,11 @@ INTERCEPTOR(__sanitizer_FILE *, popenve, const char *path,
COMMON_INTERCEPTOR_READ_RANGE(ctx, pa, sizeof(char **));
if (!*pa)
break;
- COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, REAL(strlen)(*pa) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, internal_strlen(*pa) + 1);
}
}
if (type)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, type, REAL(strlen)(type) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, type, internal_strlen(type) + 1);
__sanitizer_FILE *res = REAL(popenve)(path, argv, envp, type);
COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, nullptr);
if (res) unpoison_file(res);
@@ -9762,7 +9834,7 @@ INTERCEPTOR(char *, fdevname, int fd) {
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
char *name = REAL(fdevname)(fd);
if (name) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, internal_strlen(name) + 1);
if (fd > 0)
COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
}
@@ -9775,7 +9847,7 @@ INTERCEPTOR(char *, fdevname_r, int fd, char *buf, SIZE_T len) {
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
char *name = REAL(fdevname_r)(fd, buf, len);
if (name && buf && len > 0) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);
if (fd > 0)
COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
}
@@ -9795,7 +9867,7 @@ INTERCEPTOR(char *, getusershell) {
COMMON_INTERCEPTOR_ENTER(ctx, getusershell);
char *res = REAL(getusershell)();
if (res)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
return res;
}
@@ -9820,7 +9892,7 @@ INTERCEPTOR(int, sl_add, void *sl, char *item) {
if (sl)
COMMON_INTERCEPTOR_READ_RANGE(ctx, sl, __sanitizer::struct_StringList_sz);
if (item)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, item, REAL(strlen)(item) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, item, internal_strlen(item) + 1);
int res = REAL(sl_add)(sl, item);
if (!res)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sl, __sanitizer::struct_StringList_sz);
@@ -9833,10 +9905,10 @@ INTERCEPTOR(char *, sl_find, void *sl, const char *item) {
if (sl)
COMMON_INTERCEPTOR_READ_RANGE(ctx, sl, __sanitizer::struct_StringList_sz);
if (item)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, item, REAL(strlen)(item) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, item, internal_strlen(item) + 1);
char *res = REAL(sl_find)(sl, item);
if (res)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
return res;
}
@@ -9872,41 +9944,6 @@ INTERCEPTOR(SSIZE_T, getrandom, void *buf, SIZE_T buflen, unsigned int flags) {
#define INIT_GETRANDOM
#endif
-#if SANITIZER_INTERCEPT_CRYPT
-INTERCEPTOR(char *, crypt, char *key, char *salt) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, crypt, key, salt);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, key, internal_strlen(key) + 1);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, salt, internal_strlen(salt) + 1);
- char *res = REAL(crypt)(key, salt);
- if (res != nullptr)
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
- return res;
-}
-#define INIT_CRYPT COMMON_INTERCEPT_FUNCTION(crypt);
-#else
-#define INIT_CRYPT
-#endif
-
-#if SANITIZER_INTERCEPT_CRYPT_R
-INTERCEPTOR(char *, crypt_r, char *key, char *salt, void *data) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, crypt_r, key, salt, data);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, key, internal_strlen(key) + 1);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, salt, internal_strlen(salt) + 1);
- char *res = REAL(crypt_r)(key, salt, data);
- if (res != nullptr) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data,
- __sanitizer::struct_crypt_data_sz);
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
- }
- return res;
-}
-#define INIT_CRYPT_R COMMON_INTERCEPT_FUNCTION(crypt_r);
-#else
-#define INIT_CRYPT_R
-#endif
-
#if SANITIZER_INTERCEPT_GETENTROPY
INTERCEPTOR(int, getentropy, void *buf, SIZE_T buflen) {
void *ctx;
@@ -9922,7 +9959,52 @@ INTERCEPTOR(int, getentropy, void *buf, SIZE_T buflen) {
#define INIT_GETENTROPY
#endif
-#if SANITIZER_INTERCEPT_QSORT
+#if SANITIZER_INTERCEPT_QSORT_R
+typedef int (*qsort_r_compar_f)(const void *, const void *, void *);
+struct qsort_r_compar_params {
+ SIZE_T size;
+ qsort_r_compar_f compar;
+ void *arg;
+};
+static int wrapped_qsort_r_compar(const void *a, const void *b, void *arg) {
+ qsort_r_compar_params *params = (qsort_r_compar_params *)arg;
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, params->size);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, params->size);
+ return params->compar(a, b, params->arg);
+}
+
+INTERCEPTOR(void, qsort_r, void *base, SIZE_T nmemb, SIZE_T size,
+ qsort_r_compar_f compar, void *arg) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, qsort_r, base, nmemb, size, compar, arg);
+ // Run the comparator over all array elements to detect any memory issues.
+ if (nmemb > 1) {
+ for (SIZE_T i = 0; i < nmemb - 1; ++i) {
+ void *p = (void *)((char *)base + i * size);
+ void *q = (void *)((char *)base + (i + 1) * size);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ compar(p, q, arg);
+ }
+ }
+ qsort_r_compar_params params = {size, compar, arg};
+ REAL(qsort_r)(base, nmemb, size, wrapped_qsort_r_compar, ¶ms);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, base, nmemb * size);
+}
+# define INIT_QSORT_R COMMON_INTERCEPT_FUNCTION(qsort_r)
+#else
+# define INIT_QSORT_R
+#endif
+
+#if SANITIZER_INTERCEPT_QSORT && SANITIZER_INTERCEPT_QSORT_R
+INTERCEPTOR(void, qsort, void *base, SIZE_T nmemb, SIZE_T size,
+ qsort_r_compar_f compar) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, qsort, base, nmemb, size, compar);
+ WRAP(qsort_r)(base, nmemb, size, compar, nullptr);
+}
+# define INIT_QSORT COMMON_INTERCEPT_FUNCTION(qsort)
+#elif SANITIZER_INTERCEPT_QSORT && !SANITIZER_INTERCEPT_QSORT_R
// Glibc qsort uses a temporary buffer allocated either on stack or on heap.
// Poisoned memory from there may get copied into the comparator arguments,
// where it needs to be dealt with. But even that is not enough - the results of
@@ -9937,7 +10019,7 @@ INTERCEPTOR(int, getentropy, void *buf, SIZE_T buflen) {
typedef int (*qsort_compar_f)(const void *, const void *);
static THREADLOCAL qsort_compar_f qsort_compar;
static THREADLOCAL SIZE_T qsort_size;
-int wrapped_qsort_compar(const void *a, const void *b) {
+static int wrapped_qsort_compar(const void *a, const void *b) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, qsort_size);
COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, qsort_size);
@@ -9979,60 +10061,34 @@ INTERCEPTOR(void, qsort, void *base, SIZE_T nmemb, SIZE_T size,
}
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, base, nmemb * size);
}
-#define INIT_QSORT COMMON_INTERCEPT_FUNCTION(qsort)
+# define INIT_QSORT COMMON_INTERCEPT_FUNCTION(qsort)
#else
-#define INIT_QSORT
+# define INIT_QSORT
#endif
-#if SANITIZER_INTERCEPT_QSORT_R
-typedef int (*qsort_r_compar_f)(const void *, const void *, void *);
-static THREADLOCAL qsort_r_compar_f qsort_r_compar;
-static THREADLOCAL SIZE_T qsort_r_size;
-int wrapped_qsort_r_compar(const void *a, const void *b, void *arg) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, qsort_r_size);
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, qsort_r_size);
- return qsort_r_compar(a, b, arg);
+#if SANITIZER_INTERCEPT_BSEARCH
+typedef int (*bsearch_compar_f)(const void *, const void *);
+struct bsearch_compar_params {
+ const void *key;
+ bsearch_compar_f compar;
+};
+
+static int wrapped_bsearch_compar(const void *key, const void *b) {
+ const bsearch_compar_params *params = (const bsearch_compar_params *)key;
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ return params->compar(params->key, b);
}
-INTERCEPTOR(void, qsort_r, void *base, SIZE_T nmemb, SIZE_T size,
- qsort_r_compar_f compar, void *arg) {
+INTERCEPTOR(void *, bsearch, const void *key, const void *base, SIZE_T nmemb,
+ SIZE_T size, bsearch_compar_f compar) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, qsort_r, base, nmemb, size, compar, arg);
- // Run the comparator over all array elements to detect any memory issues.
- if (nmemb > 1) {
- for (SIZE_T i = 0; i < nmemb - 1; ++i) {
- void *p = (void *)((char *)base + i * size);
- void *q = (void *)((char *)base + (i + 1) * size);
- COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
- compar(p, q, arg);
- }
- }
- qsort_r_compar_f old_compar = qsort_r_compar;
- SIZE_T old_size = qsort_r_size;
- // Handle qsort_r() implementations that recurse using an
- // interposable function call:
- bool already_wrapped = compar == wrapped_qsort_r_compar;
- if (already_wrapped) {
- // This case should only happen if the qsort() implementation calls itself
- // using a preemptible function call (e.g. the FreeBSD libc version).
- // Check that the size and comparator arguments are as expected.
- CHECK_NE(compar, qsort_r_compar);
- CHECK_EQ(qsort_r_size, size);
- } else {
- qsort_r_compar = compar;
- qsort_r_size = size;
- }
- REAL(qsort_r)(base, nmemb, size, wrapped_qsort_r_compar, arg);
- if (!already_wrapped) {
- qsort_r_compar = old_compar;
- qsort_r_size = old_size;
- }
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, base, nmemb * size);
+ COMMON_INTERCEPTOR_ENTER(ctx, bsearch, key, base, nmemb, size, compar);
+ bsearch_compar_params params = {key, compar};
+ return REAL(bsearch)(¶ms, base, nmemb, size, wrapped_bsearch_compar);
}
-#define INIT_QSORT_R COMMON_INTERCEPT_FUNCTION(qsort_r)
+# define INIT_BSEARCH COMMON_INTERCEPT_FUNCTION(bsearch)
#else
-#define INIT_QSORT_R
+# define INIT_BSEARCH
#endif
#if SANITIZER_INTERCEPT_SIGALTSTACK
@@ -10050,6 +10106,42 @@ INTERCEPTOR(int, sigaltstack, void *ss, void *oss) {
#define INIT_SIGALTSTACK
#endif
+#if SANITIZER_INTERCEPT_PROCCTL
+INTERCEPTOR(int, procctl, int idtype, u64 id, int cmd, uptr data) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, procctl, idtype, id, cmd, data);
+ static const int PROC_REAP_ACQUIRE = 2;
+ static const int PROC_REAP_RELEASE = 3;
+ static const int PROC_REAP_STATUS = 4;
+ static const int PROC_REAP_GETPIDS = 5;
+ static const int PROC_REAP_KILL = 6;
+ if (cmd < PROC_REAP_ACQUIRE || cmd > PROC_REAP_KILL) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, (void *)data, sizeof(int));
+ } else {
+ // reap_acquire/reap_release bears no arguments.
+ if (cmd > PROC_REAP_RELEASE) {
+ unsigned int reapsz;
+ switch (cmd) {
+ case PROC_REAP_STATUS:
+ reapsz = struct_procctl_reaper_status_sz;
+ break;
+ case PROC_REAP_GETPIDS:
+ reapsz = struct_procctl_reaper_pids_sz;
+ break;
+ case PROC_REAP_KILL:
+ reapsz = struct_procctl_reaper_kill_sz;
+ break;
+ }
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, (void *)data, reapsz);
+ }
+ }
+ return REAL(procctl)(idtype, id, cmd, data);
+}
+#define INIT_PROCCTL COMMON_INTERCEPT_FUNCTION(procctl)
+#else
+#define INIT_PROCCTL
+#endif
+
#if SANITIZER_INTERCEPT_UNAME
INTERCEPTOR(int, uname, struct utsname *utsname) {
#if SANITIZER_LINUX
@@ -10088,14 +10180,66 @@ INTERCEPTOR(int, __xuname, int size, void *utsname) {
#define INIT___XUNAME
#endif
+#if SANITIZER_INTERCEPT_HEXDUMP
+INTERCEPTOR(void, hexdump, const void *ptr, int length, const char *header, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, hexdump, ptr, length, header, flags);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, length);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, header, internal_strlen(header) + 1);
+ REAL(hexdump)(ptr, length, header, flags);
+}
+
+#define INIT_HEXDUMP COMMON_INTERCEPT_FUNCTION(hexdump);
+#else
+#define INIT_HEXDUMP
+#endif
+
+#if SANITIZER_INTERCEPT_ARGP_PARSE
+INTERCEPTOR(int, argp_parse, const struct argp *argp, int argc, char **argv,
+ unsigned flags, int *arg_index, void *input) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, argp_parse, argp, argc, argv, flags, arg_index,
+ input);
+ for (int i = 0; i < argc; i++)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, argv[i], internal_strlen(argv[i]) + 1);
+ int res = REAL(argp_parse)(argp, argc, argv, flags, arg_index, input);
+ if (!res && arg_index)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, arg_index, sizeof(int));
+ return res;
+}
+
+#define INIT_ARGP_PARSE COMMON_INTERCEPT_FUNCTION(argp_parse);
+#else
+#define INIT_ARGP_PARSE
+#endif
+
+#if SANITIZER_INTERCEPT_CPUSET_GETAFFINITY
+INTERCEPTOR(int, cpuset_getaffinity, int level, int which, __int64_t id, SIZE_T cpusetsize, __sanitizer_cpuset_t *mask) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cpuset_getaffinity, level, which, id, cpusetsize, mask);
+ int res = REAL(cpuset_getaffinity)(level, which, id, cpusetsize, mask);
+ if (mask && !res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mask, cpusetsize);
+ return res;
+}
+#define INIT_CPUSET_GETAFFINITY COMMON_INTERCEPT_FUNCTION(cpuset_getaffinity);
+#else
+#define INIT_CPUSET_GETAFFINITY
+#endif
+
#include "sanitizer_common_interceptors_netbsd_compat.inc"
+namespace __sanitizer {
+void InitializeMemintrinsicInterceptors();
+} // namespace __sanitizer
+
static void InitializeCommonInterceptors() {
#if SI_POSIX
static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1];
interceptor_metadata_map = new ((void *)&metadata_mem) MetadataHashMap();
#endif
+ __sanitizer::InitializeMemintrinsicInterceptors();
+
INIT_MMAP;
INIT_MMAP64;
INIT_TEXTDOMAIN;
@@ -10117,9 +10261,6 @@ static void InitializeCommonInterceptors() {
INIT_STRPBRK;
INIT_STRXFRM;
INIT___STRXFRM_L;
- INIT_MEMSET;
- INIT_MEMMOVE;
- INIT_MEMCPY;
INIT_MEMCHR;
INIT_MEMCMP;
INIT_BCMP;
@@ -10166,6 +10307,9 @@ static void InitializeCommonInterceptors() {
INIT_TIME;
INIT_GLOB;
INIT_GLOB64;
+ INIT___B64_TO;
+ INIT_DN_COMP_EXPAND;
+ INIT_POSIX_SPAWN;
INIT_WAIT;
INIT_WAIT4;
INIT_INET;
@@ -10200,6 +10344,7 @@ static void InitializeCommonInterceptors() {
INIT_GETCWD;
INIT_GET_CURRENT_DIR_NAME;
INIT_STRTOIMAX;
+ INIT_STRTOIMAX_C23;
INIT_MBSTOWCS;
INIT_MBSNRTOWCS;
INIT_WCSTOMBS;
@@ -10231,12 +10376,6 @@ static void InitializeCommonInterceptors() {
INIT_PTHREAD_SIGMASK;
INIT_BACKTRACE;
INIT__EXIT;
- INIT_PTHREAD_MUTEX_LOCK;
- INIT_PTHREAD_MUTEX_UNLOCK;
- INIT___PTHREAD_MUTEX_LOCK;
- INIT___PTHREAD_MUTEX_UNLOCK;
- INIT___LIBC_MUTEX_LOCK;
- INIT___LIBC_MUTEX_UNLOCK;
INIT___LIBC_THR_SETCANCELSTATE;
INIT_GETMNTENT;
INIT_GETMNTENT_R;
@@ -10254,6 +10393,7 @@ static void InitializeCommonInterceptors() {
INIT_PTHREAD_ATTR_GET_SCHED;
INIT_PTHREAD_ATTR_GETINHERITSCHED;
INIT_PTHREAD_ATTR_GETAFFINITY_NP;
+ INIT_PTHREAD_GETAFFINITY_NP;
INIT_PTHREAD_MUTEXATTR_GETPSHARED;
INIT_PTHREAD_MUTEXATTR_GETTYPE;
INIT_PTHREAD_MUTEXATTR_GETPROTOCOL;
@@ -10293,9 +10433,6 @@ static void InitializeCommonInterceptors() {
INIT_GETIFADDRS;
INIT_IF_INDEXTONAME;
INIT_CAPGET;
- INIT_AEABI_MEM;
- INIT___BZERO;
- INIT_BZERO;
INIT_FTIME;
INIT_XDR;
INIT_XDRREC_LINUX;
@@ -10322,8 +10459,10 @@ static void InitializeCommonInterceptors() {
INIT_RECV_RECVFROM;
INIT_SEND_SENDTO;
INIT_STAT;
+ INIT_STAT64;
INIT_EVENTFD_READ_WRITE;
INIT_LSTAT;
+ INIT_LSTAT64;
INIT___XSTAT;
INIT___XSTAT64;
INIT___LXSTAT;
@@ -10396,14 +10535,17 @@ static void InitializeCommonInterceptors() {
INIT_GETUSERSHELL;
INIT_SL_INIT;
INIT_GETRANDOM;
- INIT_CRYPT;
- INIT_CRYPT_R;
INIT_GETENTROPY;
INIT_QSORT;
INIT_QSORT_R;
+ INIT_BSEARCH;
INIT_SIGALTSTACK;
+ INIT_PROCCTL
INIT_UNAME;
INIT___XUNAME;
+ INIT_HEXDUMP;
+ INIT_ARGP_PARSE;
+ INIT_CPUSET_GETAFFINITY;
INIT___PRINTF_CHK;
}
lib/tsan/sanitizer_common/sanitizer_common_interceptors_format.inc
@@ -324,8 +324,8 @@ static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
continue;
int size = scanf_get_value_size(&dir);
if (size == FSS_INVALID) {
- Report("%s: WARNING: unexpected format specifier in scanf interceptor: ",
- SanitizerToolName, "%.*s\n", dir.end - dir.begin, dir.begin);
+ Report("%s: WARNING: unexpected format specifier in scanf interceptor: %.*s\n",
+ SanitizerToolName, static_cast<int>(dir.end - dir.begin), dir.begin);
break;
}
void *argp = va_arg(aq, void *);
@@ -340,11 +340,19 @@ static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
size = 0;
}
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
- // For %ms/%mc, write the allocated output buffer as well.
+ // For %mc/%mC/%ms/%m[/%mS, write the allocated output buffer as well.
if (dir.allocate) {
- char *buf = *(char **)argp;
- if (buf)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);
+ if (char *buf = *(char **)argp) {
+ if (dir.convSpecifier == 'c')
+ size = 1;
+ else if (dir.convSpecifier == 'C')
+ size = sizeof(wchar_t);
+ else if (dir.convSpecifier == 'S')
+ size = (internal_wcslen((wchar_t *)buf) + 1) * sizeof(wchar_t);
+ else // 's' or '['
+ size = internal_strlen(buf) + 1;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, size);
+ }
}
}
}
@@ -469,7 +477,7 @@ static int printf_get_value_size(PrintfDirective *dir) {
break; \
default: \
Report("WARNING: unexpected floating-point arg size" \
- " in printf interceptor: %d\n", size); \
+ " in printf interceptor: %zu\n", static_cast<uptr>(size)); \
return; \
} \
} else { \
@@ -484,7 +492,7 @@ static int printf_get_value_size(PrintfDirective *dir) {
break; \
default: \
Report("WARNING: unexpected arg size" \
- " in printf interceptor: %d\n", size); \
+ " in printf interceptor: %zu\n", static_cast<uptr>(size)); \
return; \
} \
} \
@@ -530,7 +538,7 @@ static void printf_common(void *ctx, const char *format, va_list aq) {
Report(
"%s: WARNING: unexpected format specifier in printf "
"interceptor: %.*s (reported once per process)\n",
- SanitizerToolName, dir.end - dir.begin, dir.begin);
+ SanitizerToolName, static_cast<int>(dir.end - dir.begin), dir.begin);
break;
}
if (dir.convSpecifier == 'n') {
lib/tsan/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
@@ -115,11 +115,19 @@ static void ioctl_table_fill() {
// _(SOUND_MIXER_WRITE_MUTE, WRITE, sizeof(int)); // same as ...WRITE_ENHANCE
_(BLKFLSBUF, NONE, 0);
_(BLKGETSIZE, WRITE, sizeof(uptr));
- _(BLKRAGET, WRITE, sizeof(int));
+ _(BLKRAGET, WRITE, sizeof(uptr));
_(BLKRASET, NONE, 0);
_(BLKROGET, WRITE, sizeof(int));
_(BLKROSET, READ, sizeof(int));
_(BLKRRPART, NONE, 0);
+ _(BLKFRASET, NONE, 0);
+ _(BLKFRAGET, WRITE, sizeof(uptr));
+ _(BLKSECTSET, READ, sizeof(short));
+ _(BLKSECTGET, WRITE, sizeof(short));
+ _(BLKSSZGET, WRITE, sizeof(int));
+ _(BLKBSZGET, WRITE, sizeof(int));
+ _(BLKBSZSET, READ, sizeof(uptr));
+ _(BLKGETSIZE64, WRITE, sizeof(u64));
_(CDROMEJECT, NONE, 0);
_(CDROMEJECT_SW, NONE, 0);
_(CDROMMULTISESSION, WRITE, struct_cdrom_multisession_sz);
lib/tsan/sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc
@@ -0,0 +1,244 @@
+//===-- sanitizer_common_interceptors_memintrinsics.inc ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Memintrinsic function interceptors for tools like AddressSanitizer,
+// ThreadSanitizer, MemorySanitizer, etc.
+//
+// These interceptors are part of the common interceptors, but separated out so
+// that implementations may add them, if necessary, to a separate source file
+// that should define SANITIZER_COMMON_NO_REDEFINE_BUILTINS at the top.
+//
+// This file should be included into the tool's memintrinsic interceptor file,
+// which has to define its own macros:
+// COMMON_INTERCEPTOR_ENTER
+// COMMON_INTERCEPTOR_READ_RANGE
+// COMMON_INTERCEPTOR_WRITE_RANGE
+// COMMON_INTERCEPTOR_MEMSET_IMPL
+// COMMON_INTERCEPTOR_MEMMOVE_IMPL
+// COMMON_INTERCEPTOR_MEMCPY_IMPL
+// COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
+//===----------------------------------------------------------------------===//
+
+#ifdef SANITIZER_REDEFINE_BUILTINS_H
+#error "Define SANITIZER_COMMON_NO_REDEFINE_BUILTINS in .cpp file"
+#endif
+
+#include "interception/interception.h"
+#include "sanitizer_platform_interceptors.h"
+
+// Platform-specific options.
+#if SANITIZER_APPLE
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
+#elif SANITIZER_WINDOWS64
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
+#else
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 1
+#endif // SANITIZER_APPLE
+
+#ifndef COMMON_INTERCEPTOR_MEMSET_IMPL
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memset(dst, v, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
+ if (common_flags()->intercept_intrin) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ return REAL(memset)(dst, v, size); \
+ }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MEMMOVE_IMPL
+#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memmove(dst, src, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size); \
+ if (common_flags()->intercept_intrin) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
+ } \
+ return REAL(memmove)(dst, src, size); \
+ }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MEMCPY_IMPL
+#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { \
+ return internal_memmove(dst, src, size); \
+ } \
+ COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size); \
+ if (common_flags()->intercept_intrin) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
+ } \
+ return REAL(memcpy)(dst, src, size); \
+ }
+#endif
+
+#if SANITIZER_INTERCEPT_MEMSET
+INTERCEPTOR(void *, memset, void *dst, int v, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size);
+}
+
+#define INIT_MEMSET COMMON_INTERCEPT_FUNCTION(memset)
+#else
+#define INIT_MEMSET
+#endif
+
+#if SANITIZER_INTERCEPT_MEMMOVE
+INTERCEPTOR(void *, memmove, void *dst, const void *src, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+}
+
+#define INIT_MEMMOVE COMMON_INTERCEPT_FUNCTION(memmove)
+#else
+#define INIT_MEMMOVE
+#endif
+
+#if SANITIZER_INTERCEPT_MEMCPY
+INTERCEPTOR(void *, memcpy, void *dst, const void *src, uptr size) {
+ // On OS X, calling internal_memcpy here will cause memory corruptions,
+ // because memcpy and memmove are actually aliases of the same
+ // implementation. We need to use internal_memmove here.
+ // N.B.: If we switch this to internal_ we'll have to use internal_memmove
+ // due to memcpy being an alias of memmove on OS X.
+ void *ctx;
+#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
+#else
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+#endif
+}
+
+#define INIT_MEMCPY \
+ do { \
+ if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { \
+ COMMON_INTERCEPT_FUNCTION(memcpy); \
+ } else { \
+ ASSIGN_REAL(memcpy, memmove); \
+ } \
+ CHECK(REAL(memcpy)); \
+ } while (false)
+
+#else
+#define INIT_MEMCPY
+#endif
+
+#if SANITIZER_INTERCEPT_AEABI_MEM
+INTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memmove4, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memmove8, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memcpy, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memcpy4, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memcpy8, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
+}
+
+// Note the argument order.
+INTERCEPTOR(void *, __aeabi_memset, void *block, uptr size, int c) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memset4, void *block, uptr size, int c) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memset8, void *block, uptr size, int c) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memclr, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memclr4, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+
+#define INIT_AEABI_MEM \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr8);
+#else
+#define INIT_AEABI_MEM
+#endif // SANITIZER_INTERCEPT_AEABI_MEM
+
+#if SANITIZER_INTERCEPT___BZERO
+INTERCEPTOR(void *, __bzero, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+#define INIT___BZERO COMMON_INTERCEPT_FUNCTION(__bzero);
+#else
+#define INIT___BZERO
+#endif // SANITIZER_INTERCEPT___BZERO
+
+#if SANITIZER_INTERCEPT_BZERO
+INTERCEPTOR(void *, bzero, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+#define INIT_BZERO COMMON_INTERCEPT_FUNCTION(bzero);
+#else
+#define INIT_BZERO
+#endif // SANITIZER_INTERCEPT_BZERO
+
+namespace __sanitizer {
+// This does not need to be called if InitializeCommonInterceptors() is called.
+void InitializeMemintrinsicInterceptors() {
+ INIT_MEMSET;
+ INIT_MEMMOVE;
+ INIT_MEMCPY;
+ INIT_AEABI_MEM;
+ INIT___BZERO;
+ INIT_BZERO;
+}
+} // namespace __sanitizer
lib/tsan/sanitizer_common/sanitizer_common_interceptors_netbsd_compat.inc
@@ -33,7 +33,7 @@
INTERCEPTOR(int, statvfs, char *path, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statvfs, path, buf);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -99,7 +99,7 @@ INTERCEPTOR(int, getvfsstat, void *buf, SIZE_T bufsize, int flags) {
INTERCEPTOR(int, statvfs1, const char *path, void *buf, int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statvfs1, path, buf, flags);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
int res = REAL(statvfs1)(path, buf, flags);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs90_sz);
return res;
lib/tsan/sanitizer_common/sanitizer_common_interface.inc
@@ -9,12 +9,16 @@
//===----------------------------------------------------------------------===//
INTERFACE_FUNCTION(__sanitizer_acquire_crash_state)
INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
+INTERFACE_FUNCTION(__sanitizer_annotate_double_ended_contiguous_container)
INTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address)
+INTERFACE_FUNCTION(
+ __sanitizer_double_ended_contiguous_container_find_bad_address)
INTERFACE_FUNCTION(__sanitizer_set_death_callback)
INTERFACE_FUNCTION(__sanitizer_set_report_path)
INTERFACE_FUNCTION(__sanitizer_set_report_fd)
INTERFACE_FUNCTION(__sanitizer_get_report_path)
INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
+INTERFACE_FUNCTION(__sanitizer_verify_double_ended_contiguous_container)
INTERFACE_WEAK_FUNCTION(__sanitizer_on_print)
INTERFACE_WEAK_FUNCTION(__sanitizer_report_error_summary)
INTERFACE_WEAK_FUNCTION(__sanitizer_sandbox_on_notify)
@@ -28,7 +32,9 @@ INTERFACE_FUNCTION(__sanitizer_get_module_and_offset_for_pc)
INTERFACE_FUNCTION(__sanitizer_symbolize_global)
INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
// Allocator interface.
+INTERFACE_FUNCTION(__sanitizer_get_allocated_begin)
INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
+INTERFACE_FUNCTION(__sanitizer_get_allocated_size_fast)
INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
@@ -40,3 +46,7 @@ INTERFACE_FUNCTION(__sanitizer_purge_allocator)
INTERFACE_FUNCTION(__sanitizer_print_memory_profile)
INTERFACE_WEAK_FUNCTION(__sanitizer_free_hook)
INTERFACE_WEAK_FUNCTION(__sanitizer_malloc_hook)
+// Memintrinsic functions.
+INTERFACE_FUNCTION(__sanitizer_internal_memcpy)
+INTERFACE_FUNCTION(__sanitizer_internal_memmove)
+INTERFACE_FUNCTION(__sanitizer_internal_memset)
lib/tsan/sanitizer_common/sanitizer_common_interface_posix.inc
@@ -11,3 +11,5 @@ INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_code)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_data)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_demangle)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_flush)
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_set_demangle)
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_set_inline_frames)
lib/tsan/sanitizer_common/sanitizer_common_libcdep.cpp
@@ -10,27 +10,22 @@
// run-time libraries.
//===----------------------------------------------------------------------===//
+#include "sanitizer_allocator.h"
#include "sanitizer_allocator_interface.h"
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
+#include "sanitizer_interface_internal.h"
#include "sanitizer_procmaps.h"
-
+#include "sanitizer_stackdepot.h"
namespace __sanitizer {
-static void (*SoftRssLimitExceededCallback)(bool exceeded);
-void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
- CHECK_EQ(SoftRssLimitExceededCallback, nullptr);
- SoftRssLimitExceededCallback = Callback;
-}
-
#if (SANITIZER_LINUX || SANITIZER_NETBSD) && !SANITIZER_GO
// Weak default implementation for when sanitizer_stackdepot is not linked in.
-SANITIZER_WEAK_ATTRIBUTE StackDepotStats *StackDepotGetStats() {
- return nullptr;
-}
+SANITIZER_WEAK_ATTRIBUTE StackDepotStats StackDepotGetStats() { return {}; }
void *BackgroundThread(void *arg) {
+ VPrintf(1, "%s: Started BackgroundThread\n", SanitizerToolName);
const uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
const uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
const bool heap_profile = common_flags()->heap_profile;
@@ -48,16 +43,12 @@ void *BackgroundThread(void *arg) {
prev_reported_rss = current_rss_mb;
}
// If stack depot has grown 10% since last time, print it too.
- StackDepotStats *stack_depot_stats = StackDepotGetStats();
- if (stack_depot_stats) {
- if (prev_reported_stack_depot_size * 11 / 10 <
- stack_depot_stats->allocated) {
- Printf("%s: StackDepot: %zd ids; %zdM allocated\n",
- SanitizerToolName,
- stack_depot_stats->n_uniq_ids,
- stack_depot_stats->allocated >> 20);
- prev_reported_stack_depot_size = stack_depot_stats->allocated;
- }
+ StackDepotStats stack_depot_stats = StackDepotGetStats();
+ if (prev_reported_stack_depot_size * 11 / 10 <
+ stack_depot_stats.allocated) {
+ Printf("%s: StackDepot: %zd ids; %zdM allocated\n", SanitizerToolName,
+ stack_depot_stats.n_uniq_ids, stack_depot_stats.allocated >> 20);
+ prev_reported_stack_depot_size = stack_depot_stats.allocated;
}
}
// Check RSS against the limit.
@@ -72,13 +63,13 @@ void *BackgroundThread(void *arg) {
reached_soft_rss_limit = true;
Report("%s: soft rss limit exhausted (%zdMb vs %zdMb)\n",
SanitizerToolName, soft_rss_limit_mb, current_rss_mb);
- if (SoftRssLimitExceededCallback)
- SoftRssLimitExceededCallback(true);
+ SetRssLimitExceeded(true);
} else if (soft_rss_limit_mb >= current_rss_mb &&
reached_soft_rss_limit) {
reached_soft_rss_limit = false;
- if (SoftRssLimitExceededCallback)
- SoftRssLimitExceededCallback(false);
+ Report("%s: soft rss limit unexhausted (%zdMb vs %zdMb)\n",
+ SanitizerToolName, soft_rss_limit_mb, current_rss_mb);
+ SetRssLimitExceeded(false);
}
}
if (heap_profile &&
@@ -89,6 +80,42 @@ void *BackgroundThread(void *arg) {
}
}
}
+
+void MaybeStartBackgroudThread() {
+ // Need to implement/test on other platforms.
+ // Start the background thread if one of the rss limits is given.
+ if (!common_flags()->hard_rss_limit_mb &&
+ !common_flags()->soft_rss_limit_mb &&
+ !common_flags()->heap_profile) return;
+ if (!&real_pthread_create) {
+ VPrintf(1, "%s: real_pthread_create undefined\n", SanitizerToolName);
+ return; // Can't spawn the thread anyway.
+ }
+
+ static bool started = false;
+ if (!started) {
+ started = true;
+ internal_start_thread(BackgroundThread, nullptr);
+ }
+}
+
+# if !SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL
+# ifdef __clang__
+# pragma clang diagnostic push
+// We avoid global-constructors to be sure that globals are ready when
+// sanitizers need them. This can happend before global constructors executed.
+// Here we don't mind if thread is started on later stages.
+# pragma clang diagnostic ignored "-Wglobal-constructors"
+# endif
+static struct BackgroudThreadStarted {
+ BackgroudThreadStarted() { MaybeStartBackgroudThread(); }
+} background_thread_strarter UNUSED;
+# ifdef __clang__
+# pragma clang diagnostic pop
+# endif
+# endif
+#else
+void MaybeStartBackgroudThread() {}
#endif
void WriteToSyslog(const char *msg) {
@@ -111,18 +138,6 @@ void WriteToSyslog(const char *msg) {
WriteOneLineToSyslog(p);
}
-void MaybeStartBackgroudThread() {
-#if (SANITIZER_LINUX || SANITIZER_NETBSD) && \
- !SANITIZER_GO // Need to implement/test on other platforms.
- // Start the background thread if one of the rss limits is given.
- if (!common_flags()->hard_rss_limit_mb &&
- !common_flags()->soft_rss_limit_mb &&
- !common_flags()->heap_profile) return;
- if (!&real_pthread_create) return; // Can't spawn the thread anyway.
- internal_start_thread(BackgroundThread, nullptr);
-#endif
-}
-
static void (*sandboxing_callback)();
void SetSandboxingCallback(void (*f)()) {
sandboxing_callback = f;
@@ -191,10 +206,22 @@ void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
#endif // !SANITIZER_FUCHSIA
+#if !SANITIZER_WINDOWS && !SANITIZER_GO
+// Weak default implementation for when sanitizer_stackdepot is not linked in.
+SANITIZER_WEAK_ATTRIBUTE void StackDepotStopBackgroundThread() {}
+static void StopStackDepotBackgroundThread() {
+ StackDepotStopBackgroundThread();
+}
+#else
+// SANITIZER_WEAK_ATTRIBUTE is unsupported.
+static void StopStackDepotBackgroundThread() {}
+#endif
+
} // namespace __sanitizer
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify,
__sanitizer_sandbox_arguments *args) {
+ __sanitizer::StopStackDepotBackgroundThread();
__sanitizer::PlatformPrepareForSandboxing(args);
if (__sanitizer::sandboxing_callback)
__sanitizer::sandboxing_callback();
lib/tsan/sanitizer_common/sanitizer_common_nolibc.cpp
@@ -25,9 +25,10 @@ void LogMessageOnPrintf(const char *str) {}
#endif
void WriteToSyslog(const char *buffer) {}
void Abort() { internal__exit(1); }
+bool CreateDir(const char *pathname) { return false; }
#endif // !SANITIZER_WINDOWS
-#if !SANITIZER_WINDOWS && !SANITIZER_MAC
+#if !SANITIZER_WINDOWS && !SANITIZER_APPLE
void ListOfModules::init() {}
void InitializePlatformCommonFlags(CommonFlags *cf) {}
#endif
lib/tsan/sanitizer_common/sanitizer_common_syscalls.inc
@@ -43,45 +43,47 @@
#include "sanitizer_platform.h"
#if SANITIZER_LINUX
-#include "sanitizer_libc.h"
+# include "sanitizer_libc.h"
-#define PRE_SYSCALL(name) \
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_pre_impl_##name
-#define PRE_READ(p, s) COMMON_SYSCALL_PRE_READ_RANGE(p, s)
-#define PRE_WRITE(p, s) COMMON_SYSCALL_PRE_WRITE_RANGE(p, s)
+# define PRE_SYSCALL(name) \
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_pre_impl_##name
+# define PRE_READ(p, s) COMMON_SYSCALL_PRE_READ_RANGE(p, s)
+# define PRE_WRITE(p, s) COMMON_SYSCALL_PRE_WRITE_RANGE(p, s)
-#define POST_SYSCALL(name) \
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_post_impl_##name
-#define POST_READ(p, s) COMMON_SYSCALL_POST_READ_RANGE(p, s)
-#define POST_WRITE(p, s) COMMON_SYSCALL_POST_WRITE_RANGE(p, s)
+# define POST_SYSCALL(name) \
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_post_impl_##name
+# define POST_READ(p, s) COMMON_SYSCALL_POST_READ_RANGE(p, s)
+# define POST_WRITE(p, s) COMMON_SYSCALL_POST_WRITE_RANGE(p, s)
-#ifndef COMMON_SYSCALL_ACQUIRE
-# define COMMON_SYSCALL_ACQUIRE(addr) ((void)(addr))
-#endif
+# ifndef COMMON_SYSCALL_ACQUIRE
+# define COMMON_SYSCALL_ACQUIRE(addr) ((void)(addr))
+# endif
-#ifndef COMMON_SYSCALL_RELEASE
-# define COMMON_SYSCALL_RELEASE(addr) ((void)(addr))
-#endif
+# ifndef COMMON_SYSCALL_RELEASE
+# define COMMON_SYSCALL_RELEASE(addr) ((void)(addr))
+# endif
-#ifndef COMMON_SYSCALL_FD_CLOSE
-# define COMMON_SYSCALL_FD_CLOSE(fd) ((void)(fd))
-#endif
+# ifndef COMMON_SYSCALL_FD_CLOSE
+# define COMMON_SYSCALL_FD_CLOSE(fd) ((void)(fd))
+# endif
-#ifndef COMMON_SYSCALL_FD_ACQUIRE
-# define COMMON_SYSCALL_FD_ACQUIRE(fd) ((void)(fd))
-#endif
+# ifndef COMMON_SYSCALL_FD_ACQUIRE
+# define COMMON_SYSCALL_FD_ACQUIRE(fd) ((void)(fd))
+# endif
-#ifndef COMMON_SYSCALL_FD_RELEASE
-# define COMMON_SYSCALL_FD_RELEASE(fd) ((void)(fd))
-#endif
+# ifndef COMMON_SYSCALL_FD_RELEASE
+# define COMMON_SYSCALL_FD_RELEASE(fd) ((void)(fd))
+# endif
-#ifndef COMMON_SYSCALL_PRE_FORK
-# define COMMON_SYSCALL_PRE_FORK() {}
-#endif
+# ifndef COMMON_SYSCALL_PRE_FORK
+# define COMMON_SYSCALL_PRE_FORK() \
+ {}
+# endif
-#ifndef COMMON_SYSCALL_POST_FORK
-# define COMMON_SYSCALL_POST_FORK(res) {}
-#endif
+# ifndef COMMON_SYSCALL_POST_FORK
+# define COMMON_SYSCALL_POST_FORK(res) \
+ {}
+# endif
// FIXME: do some kind of PRE_READ for all syscall arguments (int(s) and such).
@@ -130,8 +132,8 @@ struct sanitizer_kernel_sockaddr {
// Declare it "void" to catch sizeof(kernel_sigset_t).
typedef void kernel_sigset_t;
-static void kernel_write_iovec(const __sanitizer_iovec *iovec,
- SIZE_T iovlen, SIZE_T maxlen) {
+static void kernel_write_iovec(const __sanitizer_iovec *iovec, SIZE_T iovlen,
+ SIZE_T maxlen) {
for (SIZE_T i = 0; i < iovlen && maxlen; ++i) {
SSIZE_T sz = Min(iovec[i].iov_len, maxlen);
POST_WRITE(iovec[i].iov_base, sz);
@@ -141,8 +143,8 @@ static void kernel_write_iovec(const __sanitizer_iovec *iovec,
// This functions uses POST_READ, because it needs to run after syscall to know
// the real read range.
-static void kernel_read_iovec(const __sanitizer_iovec *iovec,
- SIZE_T iovlen, SIZE_T maxlen) {
+static void kernel_read_iovec(const __sanitizer_iovec *iovec, SIZE_T iovlen,
+ SIZE_T maxlen) {
POST_READ(iovec, sizeof(*iovec) * iovlen);
for (SIZE_T i = 0; i < iovlen && maxlen; ++i) {
SSIZE_T sz = Min(iovec[i].iov_len, maxlen);
@@ -155,8 +157,8 @@ PRE_SYSCALL(recvmsg)(long sockfd, sanitizer_kernel_msghdr *msg, long flags) {
PRE_READ(msg, sizeof(*msg));
}
-POST_SYSCALL(recvmsg)(long res, long sockfd, sanitizer_kernel_msghdr *msg,
- long flags) {
+POST_SYSCALL(recvmsg)
+(long res, long sockfd, sanitizer_kernel_msghdr *msg, long flags) {
if (res >= 0) {
if (msg) {
for (unsigned long i = 0; i < msg->msg_iovlen; ++i) {
@@ -167,13 +169,14 @@ POST_SYSCALL(recvmsg)(long res, long sockfd, sanitizer_kernel_msghdr *msg,
}
}
-PRE_SYSCALL(recvmmsg)(long fd, sanitizer_kernel_mmsghdr *msg, long vlen,
- long flags, void *timeout) {
+PRE_SYSCALL(recvmmsg)
+(long fd, sanitizer_kernel_mmsghdr *msg, long vlen, long flags, void *timeout) {
PRE_READ(msg, vlen * sizeof(*msg));
}
-POST_SYSCALL(recvmmsg)(long res, long fd, sanitizer_kernel_mmsghdr *msg,
- long vlen, long flags, void *timeout) {
+POST_SYSCALL(recvmmsg)
+(long res, long fd, sanitizer_kernel_mmsghdr *msg, long vlen, long flags,
+ void *timeout) {
if (res >= 0) {
if (msg) {
for (unsigned long i = 0; i < msg->msg_hdr.msg_iovlen; ++i) {
@@ -183,7 +186,8 @@ POST_SYSCALL(recvmmsg)(long res, long fd, sanitizer_kernel_mmsghdr *msg,
POST_WRITE(msg->msg_hdr.msg_control, msg->msg_hdr.msg_controllen);
POST_WRITE(&msg->msg_len, sizeof(msg->msg_len));
}
- if (timeout) POST_WRITE(timeout, struct_timespec_sz);
+ if (timeout)
+ POST_WRITE(timeout, struct_timespec_sz);
}
}
@@ -203,7 +207,8 @@ PRE_SYSCALL(time)(void *tloc) {}
POST_SYSCALL(time)(long res, void *tloc) {
if (res >= 0) {
- if (tloc) POST_WRITE(tloc, sizeof(long));
+ if (tloc)
+ POST_WRITE(tloc, sizeof(long));
}
}
@@ -211,7 +216,8 @@ PRE_SYSCALL(stime)(void *tptr) {}
POST_SYSCALL(stime)(long res, void *tptr) {
if (res >= 0) {
- if (tptr) POST_WRITE(tptr, sizeof(long));
+ if (tptr)
+ POST_WRITE(tptr, sizeof(long));
}
}
@@ -219,8 +225,10 @@ PRE_SYSCALL(gettimeofday)(void *tv, void *tz) {}
POST_SYSCALL(gettimeofday)(long res, void *tv, void *tz) {
if (res >= 0) {
- if (tv) POST_WRITE(tv, timeval_sz);
- if (tz) POST_WRITE(tz, struct_timezone_sz);
+ if (tv)
+ POST_WRITE(tv, timeval_sz);
+ if (tz)
+ POST_WRITE(tz, struct_timezone_sz);
}
}
@@ -228,26 +236,30 @@ PRE_SYSCALL(settimeofday)(void *tv, void *tz) {}
POST_SYSCALL(settimeofday)(long res, void *tv, void *tz) {
if (res >= 0) {
- if (tv) POST_WRITE(tv, timeval_sz);
- if (tz) POST_WRITE(tz, struct_timezone_sz);
+ if (tv)
+ POST_WRITE(tv, timeval_sz);
+ if (tz)
+ POST_WRITE(tz, struct_timezone_sz);
}
}
-#if !SANITIZER_ANDROID
+# if !SANITIZER_ANDROID
PRE_SYSCALL(adjtimex)(void *txc_p) {}
POST_SYSCALL(adjtimex)(long res, void *txc_p) {
if (res >= 0) {
- if (txc_p) POST_WRITE(txc_p, struct_timex_sz);
+ if (txc_p)
+ POST_WRITE(txc_p, struct_timex_sz);
}
}
-#endif
+# endif
PRE_SYSCALL(times)(void *tbuf) {}
POST_SYSCALL(times)(long res, void *tbuf) {
if (res >= 0) {
- if (tbuf) POST_WRITE(tbuf, struct_tms_sz);
+ if (tbuf)
+ POST_WRITE(tbuf, struct_tms_sz);
}
}
@@ -259,8 +271,10 @@ PRE_SYSCALL(nanosleep)(void *rqtp, void *rmtp) {}
POST_SYSCALL(nanosleep)(long res, void *rqtp, void *rmtp) {
if (res >= 0) {
- if (rqtp) POST_WRITE(rqtp, struct_timespec_sz);
- if (rmtp) POST_WRITE(rmtp, struct_timespec_sz);
+ if (rqtp)
+ POST_WRITE(rqtp, struct_timespec_sz);
+ if (rmtp)
+ POST_WRITE(rmtp, struct_timespec_sz);
}
}
@@ -296,9 +310,12 @@ PRE_SYSCALL(getresuid)(void *ruid, void *euid, void *suid) {}
POST_SYSCALL(getresuid)(long res, void *ruid, void *euid, void *suid) {
if (res >= 0) {
- if (ruid) POST_WRITE(ruid, sizeof(unsigned));
- if (euid) POST_WRITE(euid, sizeof(unsigned));
- if (suid) POST_WRITE(suid, sizeof(unsigned));
+ if (ruid)
+ POST_WRITE(ruid, sizeof(unsigned));
+ if (euid)
+ POST_WRITE(euid, sizeof(unsigned));
+ if (suid)
+ POST_WRITE(suid, sizeof(unsigned));
}
}
@@ -306,9 +323,12 @@ PRE_SYSCALL(getresgid)(void *rgid, void *egid, void *sgid) {}
POST_SYSCALL(getresgid)(long res, void *rgid, void *egid, void *sgid) {
if (res >= 0) {
- if (rgid) POST_WRITE(rgid, sizeof(unsigned));
- if (egid) POST_WRITE(egid, sizeof(unsigned));
- if (sgid) POST_WRITE(sgid, sizeof(unsigned));
+ if (rgid)
+ POST_WRITE(rgid, sizeof(unsigned));
+ if (egid)
+ POST_WRITE(egid, sizeof(unsigned));
+ if (sgid)
+ POST_WRITE(sgid, sizeof(unsigned));
}
}
@@ -326,10 +346,11 @@ POST_SYSCALL(getsid)(long res, long pid) {}
PRE_SYSCALL(getgroups)(long gidsetsize, void *grouplist) {}
-POST_SYSCALL(getgroups)(long res, long gidsetsize,
- __sanitizer___kernel_gid_t *grouplist) {
+POST_SYSCALL(getgroups)
+(long res, long gidsetsize, __sanitizer___kernel_gid_t *grouplist) {
if (res >= 0) {
- if (grouplist) POST_WRITE(grouplist, res * sizeof(*grouplist));
+ if (grouplist)
+ POST_WRITE(grouplist, res * sizeof(*grouplist));
}
}
@@ -374,11 +395,12 @@ PRE_SYSCALL(setsid)() {}
POST_SYSCALL(setsid)(long res) {}
PRE_SYSCALL(setgroups)(long gidsetsize, __sanitizer___kernel_gid_t *grouplist) {
- if (grouplist) POST_WRITE(grouplist, gidsetsize * sizeof(*grouplist));
+ if (grouplist)
+ POST_WRITE(grouplist, gidsetsize * sizeof(*grouplist));
}
-POST_SYSCALL(setgroups)(long res, long gidsetsize,
- __sanitizer___kernel_gid_t *grouplist) {}
+POST_SYSCALL(setgroups)
+(long res, long gidsetsize, __sanitizer___kernel_gid_t *grouplist) {}
PRE_SYSCALL(acct)(const void *name) {
if (name)
@@ -388,17 +410,21 @@ PRE_SYSCALL(acct)(const void *name) {
POST_SYSCALL(acct)(long res, const void *name) {}
PRE_SYSCALL(capget)(void *header, void *dataptr) {
- if (header) PRE_READ(header, __user_cap_header_struct_sz);
+ if (header)
+ PRE_READ(header, __user_cap_header_struct_sz);
}
POST_SYSCALL(capget)(long res, void *header, void *dataptr) {
if (res >= 0)
- if (dataptr) POST_WRITE(dataptr, __user_cap_data_struct_sz);
+ if (dataptr)
+ POST_WRITE(dataptr, __user_cap_data_struct_sz(header));
}
PRE_SYSCALL(capset)(void *header, const void *data) {
- if (header) PRE_READ(header, __user_cap_header_struct_sz);
- if (data) PRE_READ(data, __user_cap_data_struct_sz);
+ if (header)
+ PRE_READ(header, __user_cap_header_struct_sz);
+ if (data)
+ PRE_READ(data, __user_cap_data_struct_sz(header));
}
POST_SYSCALL(capset)(long res, void *header, const void *data) {}
@@ -411,7 +437,8 @@ PRE_SYSCALL(sigpending)(void *set) {}
POST_SYSCALL(sigpending)(long res, void *set) {
if (res >= 0) {
- if (set) POST_WRITE(set, old_sigset_t_sz);
+ if (set)
+ POST_WRITE(set, old_sigset_t_sz);
}
}
@@ -419,8 +446,10 @@ PRE_SYSCALL(sigprocmask)(long how, void *set, void *oset) {}
POST_SYSCALL(sigprocmask)(long res, long how, void *set, void *oset) {
if (res >= 0) {
- if (set) POST_WRITE(set, old_sigset_t_sz);
- if (oset) POST_WRITE(oset, old_sigset_t_sz);
+ if (set)
+ POST_WRITE(set, old_sigset_t_sz);
+ if (oset)
+ POST_WRITE(oset, old_sigset_t_sz);
}
}
@@ -428,7 +457,8 @@ PRE_SYSCALL(getitimer)(long which, void *value) {}
POST_SYSCALL(getitimer)(long res, long which, void *value) {
if (res >= 0) {
- if (value) POST_WRITE(value, struct_itimerval_sz);
+ if (value)
+ POST_WRITE(value, struct_itimerval_sz);
}
}
@@ -436,19 +466,23 @@ PRE_SYSCALL(setitimer)(long which, void *value, void *ovalue) {}
POST_SYSCALL(setitimer)(long res, long which, void *value, void *ovalue) {
if (res >= 0) {
- if (value) POST_WRITE(value, struct_itimerval_sz);
- if (ovalue) POST_WRITE(ovalue, struct_itimerval_sz);
+ if (value)
+ POST_WRITE(value, struct_itimerval_sz);
+ if (ovalue)
+ POST_WRITE(ovalue, struct_itimerval_sz);
}
}
-PRE_SYSCALL(timer_create)(long which_clock, void *timer_event_spec,
- void *created_timer_id) {}
+PRE_SYSCALL(timer_create)
+(long which_clock, void *timer_event_spec, void *created_timer_id) {}
-POST_SYSCALL(timer_create)(long res, long which_clock, void *timer_event_spec,
- void *created_timer_id) {
+POST_SYSCALL(timer_create)
+(long res, long which_clock, void *timer_event_spec, void *created_timer_id) {
if (res >= 0) {
- if (timer_event_spec) POST_WRITE(timer_event_spec, struct_sigevent_sz);
- if (created_timer_id) POST_WRITE(created_timer_id, sizeof(long));
+ if (timer_event_spec)
+ POST_WRITE(timer_event_spec, struct_sigevent_sz);
+ if (created_timer_id)
+ POST_WRITE(created_timer_id, sizeof(long));
}
}
@@ -456,7 +490,8 @@ PRE_SYSCALL(timer_gettime)(long timer_id, void *setting) {}
POST_SYSCALL(timer_gettime)(long res, long timer_id, void *setting) {
if (res >= 0) {
- if (setting) POST_WRITE(setting, struct_itimerspec_sz);
+ if (setting)
+ POST_WRITE(setting, struct_itimerspec_sz);
}
}
@@ -464,15 +499,18 @@ PRE_SYSCALL(timer_getoverrun)(long timer_id) {}
POST_SYSCALL(timer_getoverrun)(long res, long timer_id) {}
-PRE_SYSCALL(timer_settime)(long timer_id, long flags, const void *new_setting,
- void *old_setting) {
- if (new_setting) PRE_READ(new_setting, struct_itimerspec_sz);
+PRE_SYSCALL(timer_settime)
+(long timer_id, long flags, const void *new_setting, void *old_setting) {
+ if (new_setting)
+ PRE_READ(new_setting, struct_itimerspec_sz);
}
-POST_SYSCALL(timer_settime)(long res, long timer_id, long flags,
- const void *new_setting, void *old_setting) {
+POST_SYSCALL(timer_settime)
+(long res, long timer_id, long flags, const void *new_setting,
+ void *old_setting) {
if (res >= 0) {
- if (old_setting) POST_WRITE(old_setting, struct_itimerspec_sz);
+ if (old_setting)
+ POST_WRITE(old_setting, struct_itimerspec_sz);
}
}
@@ -481,7 +519,8 @@ PRE_SYSCALL(timer_delete)(long timer_id) {}
POST_SYSCALL(timer_delete)(long res, long timer_id) {}
PRE_SYSCALL(clock_settime)(long which_clock, const void *tp) {
- if (tp) PRE_READ(tp, struct_timespec_sz);
+ if (tp)
+ PRE_READ(tp, struct_timespec_sz);
}
POST_SYSCALL(clock_settime)(long res, long which_clock, const void *tp) {}
@@ -490,37 +529,42 @@ PRE_SYSCALL(clock_gettime)(long which_clock, void *tp) {}
POST_SYSCALL(clock_gettime)(long res, long which_clock, void *tp) {
if (res >= 0) {
- if (tp) POST_WRITE(tp, struct_timespec_sz);
+ if (tp)
+ POST_WRITE(tp, struct_timespec_sz);
}
}
-#if !SANITIZER_ANDROID
+# if !SANITIZER_ANDROID
PRE_SYSCALL(clock_adjtime)(long which_clock, void *tx) {}
POST_SYSCALL(clock_adjtime)(long res, long which_clock, void *tx) {
if (res >= 0) {
- if (tx) POST_WRITE(tx, struct_timex_sz);
+ if (tx)
+ POST_WRITE(tx, struct_timex_sz);
}
}
-#endif
+# endif
PRE_SYSCALL(clock_getres)(long which_clock, void *tp) {}
POST_SYSCALL(clock_getres)(long res, long which_clock, void *tp) {
if (res >= 0) {
- if (tp) POST_WRITE(tp, struct_timespec_sz);
+ if (tp)
+ POST_WRITE(tp, struct_timespec_sz);
}
}
-PRE_SYSCALL(clock_nanosleep)(long which_clock, long flags, const void *rqtp,
- void *rmtp) {
- if (rqtp) PRE_READ(rqtp, struct_timespec_sz);
+PRE_SYSCALL(clock_nanosleep)
+(long which_clock, long flags, const void *rqtp, void *rmtp) {
+ if (rqtp)
+ PRE_READ(rqtp, struct_timespec_sz);
}
-POST_SYSCALL(clock_nanosleep)(long res, long which_clock, long flags,
- const void *rqtp, void *rmtp) {
+POST_SYSCALL(clock_nanosleep)
+(long res, long which_clock, long flags, const void *rqtp, void *rmtp) {
if (res >= 0) {
- if (rmtp) POST_WRITE(rmtp, struct_timespec_sz);
+ if (rmtp)
+ POST_WRITE(rmtp, struct_timespec_sz);
}
}
@@ -532,12 +576,14 @@ PRE_SYSCALL(sched_setscheduler)(long pid, long policy, void *param) {}
POST_SYSCALL(sched_setscheduler)(long res, long pid, long policy, void *param) {
if (res >= 0) {
- if (param) POST_WRITE(param, struct_sched_param_sz);
+ if (param)
+ POST_WRITE(param, struct_sched_param_sz);
}
}
PRE_SYSCALL(sched_setparam)(long pid, void *param) {
- if (param) PRE_READ(param, struct_sched_param_sz);
+ if (param)
+ PRE_READ(param, struct_sched_param_sz);
}
POST_SYSCALL(sched_setparam)(long res, long pid, void *param) {}
@@ -550,23 +596,26 @@ PRE_SYSCALL(sched_getparam)(long pid, void *param) {}
POST_SYSCALL(sched_getparam)(long res, long pid, void *param) {
if (res >= 0) {
- if (param) POST_WRITE(param, struct_sched_param_sz);
+ if (param)
+ POST_WRITE(param, struct_sched_param_sz);
}
}
PRE_SYSCALL(sched_setaffinity)(long pid, long len, void *user_mask_ptr) {
- if (user_mask_ptr) PRE_READ(user_mask_ptr, len);
+ if (user_mask_ptr)
+ PRE_READ(user_mask_ptr, len);
}
-POST_SYSCALL(sched_setaffinity)(long res, long pid, long len,
- void *user_mask_ptr) {}
+POST_SYSCALL(sched_setaffinity)
+(long res, long pid, long len, void *user_mask_ptr) {}
PRE_SYSCALL(sched_getaffinity)(long pid, long len, void *user_mask_ptr) {}
-POST_SYSCALL(sched_getaffinity)(long res, long pid, long len,
- void *user_mask_ptr) {
+POST_SYSCALL(sched_getaffinity)
+(long res, long pid, long len, void *user_mask_ptr) {
if (res >= 0) {
- if (user_mask_ptr) POST_WRITE(user_mask_ptr, len);
+ if (user_mask_ptr)
+ POST_WRITE(user_mask_ptr, len);
}
}
@@ -586,7 +635,8 @@ PRE_SYSCALL(sched_rr_get_interval)(long pid, void *interval) {}
POST_SYSCALL(sched_rr_get_interval)(long res, long pid, void *interval) {
if (res >= 0) {
- if (interval) POST_WRITE(interval, struct_timespec_sz);
+ if (interval)
+ POST_WRITE(interval, struct_timespec_sz);
}
}
@@ -610,13 +660,14 @@ PRE_SYSCALL(restart_syscall)() {}
POST_SYSCALL(restart_syscall)(long res) {}
-PRE_SYSCALL(kexec_load)(long entry, long nr_segments, void *segments,
- long flags) {}
+PRE_SYSCALL(kexec_load)
+(long entry, long nr_segments, void *segments, long flags) {}
-POST_SYSCALL(kexec_load)(long res, long entry, long nr_segments, void *segments,
- long flags) {
+POST_SYSCALL(kexec_load)
+(long res, long entry, long nr_segments, void *segments, long flags) {
if (res >= 0) {
- if (segments) POST_WRITE(segments, struct_kexec_segment_sz);
+ if (segments)
+ POST_WRITE(segments, struct_kexec_segment_sz);
}
}
@@ -630,22 +681,26 @@ POST_SYSCALL(exit_group)(long res, long error_code) {}
PRE_SYSCALL(wait4)(long pid, void *stat_addr, long options, void *ru) {}
-POST_SYSCALL(wait4)(long res, long pid, void *stat_addr, long options,
- void *ru) {
+POST_SYSCALL(wait4)
+(long res, long pid, void *stat_addr, long options, void *ru) {
if (res >= 0) {
- if (stat_addr) POST_WRITE(stat_addr, sizeof(int));
- if (ru) POST_WRITE(ru, struct_rusage_sz);
+ if (stat_addr)
+ POST_WRITE(stat_addr, sizeof(int));
+ if (ru)
+ POST_WRITE(ru, struct_rusage_sz);
}
}
-PRE_SYSCALL(waitid)(long which, long pid, void *infop, long options, void *ru) {
-}
+PRE_SYSCALL(waitid)
+(long which, long pid, void *infop, long options, void *ru) {}
-POST_SYSCALL(waitid)(long res, long which, long pid, void *infop, long options,
- void *ru) {
+POST_SYSCALL(waitid)
+(long res, long which, long pid, void *infop, long options, void *ru) {
if (res >= 0) {
- if (infop) POST_WRITE(infop, siginfo_t_sz);
- if (ru) POST_WRITE(ru, struct_rusage_sz);
+ if (infop)
+ POST_WRITE(infop, siginfo_t_sz);
+ if (ru)
+ POST_WRITE(ru, struct_rusage_sz);
}
}
@@ -653,7 +708,8 @@ PRE_SYSCALL(waitpid)(long pid, void *stat_addr, long options) {}
POST_SYSCALL(waitpid)(long res, long pid, void *stat_addr, long options) {
if (res >= 0) {
- if (stat_addr) POST_WRITE(stat_addr, sizeof(int));
+ if (stat_addr)
+ POST_WRITE(stat_addr, sizeof(int));
}
}
@@ -661,7 +717,8 @@ PRE_SYSCALL(set_tid_address)(void *tidptr) {}
POST_SYSCALL(set_tid_address)(long res, void *tidptr) {
if (res >= 0) {
- if (tidptr) POST_WRITE(tidptr, sizeof(int));
+ if (tidptr)
+ POST_WRITE(tidptr, sizeof(int));
}
}
@@ -682,11 +739,14 @@ POST_SYSCALL(delete_module)(long res, const void *name_user, long flags) {}
PRE_SYSCALL(rt_sigprocmask)(long how, void *set, void *oset, long sigsetsize) {}
-POST_SYSCALL(rt_sigprocmask)(long res, long how, kernel_sigset_t *set,
- kernel_sigset_t *oset, long sigsetsize) {
+POST_SYSCALL(rt_sigprocmask)
+(long res, long how, kernel_sigset_t *set, kernel_sigset_t *oset,
+ long sigsetsize) {
if (res >= 0) {
- if (set) POST_WRITE(set, sigsetsize);
- if (oset) POST_WRITE(oset, sigsetsize);
+ if (set)
+ POST_WRITE(set, sigsetsize);
+ if (oset)
+ POST_WRITE(oset, sigsetsize);
}
}
@@ -694,29 +754,34 @@ PRE_SYSCALL(rt_sigpending)(void *set, long sigsetsize) {}
POST_SYSCALL(rt_sigpending)(long res, kernel_sigset_t *set, long sigsetsize) {
if (res >= 0) {
- if (set) POST_WRITE(set, sigsetsize);
+ if (set)
+ POST_WRITE(set, sigsetsize);
}
}
-PRE_SYSCALL(rt_sigtimedwait)(const kernel_sigset_t *uthese, void *uinfo,
- const void *uts, long sigsetsize) {
- if (uthese) PRE_READ(uthese, sigsetsize);
- if (uts) PRE_READ(uts, struct_timespec_sz);
+PRE_SYSCALL(rt_sigtimedwait)
+(const kernel_sigset_t *uthese, void *uinfo, const void *uts, long sigsetsize) {
+ if (uthese)
+ PRE_READ(uthese, sigsetsize);
+ if (uts)
+ PRE_READ(uts, struct_timespec_sz);
}
-POST_SYSCALL(rt_sigtimedwait)(long res, const void *uthese, void *uinfo,
- const void *uts, long sigsetsize) {
+POST_SYSCALL(rt_sigtimedwait)
+(long res, const void *uthese, void *uinfo, const void *uts, long sigsetsize) {
if (res >= 0) {
- if (uinfo) POST_WRITE(uinfo, siginfo_t_sz);
+ if (uinfo)
+ POST_WRITE(uinfo, siginfo_t_sz);
}
}
PRE_SYSCALL(rt_tgsigqueueinfo)(long tgid, long pid, long sig, void *uinfo) {}
-POST_SYSCALL(rt_tgsigqueueinfo)(long res, long tgid, long pid, long sig,
- void *uinfo) {
+POST_SYSCALL(rt_tgsigqueueinfo)
+(long res, long tgid, long pid, long sig, void *uinfo) {
if (res >= 0) {
- if (uinfo) POST_WRITE(uinfo, siginfo_t_sz);
+ if (uinfo)
+ POST_WRITE(uinfo, siginfo_t_sz);
}
}
@@ -736,7 +801,8 @@ PRE_SYSCALL(rt_sigqueueinfo)(long pid, long sig, void *uinfo) {}
POST_SYSCALL(rt_sigqueueinfo)(long res, long pid, long sig, void *uinfo) {
if (res >= 0) {
- if (uinfo) POST_WRITE(uinfo, siginfo_t_sz);
+ if (uinfo)
+ POST_WRITE(uinfo, siginfo_t_sz);
}
}
@@ -772,11 +838,11 @@ PRE_SYSCALL(bdflush)(long func, long data) {}
POST_SYSCALL(bdflush)(long res, long func, long data) {}
-PRE_SYSCALL(mount)(void *dev_name, void *dir_name, void *type, long flags,
- void *data) {}
+PRE_SYSCALL(mount)
+(void *dev_name, void *dir_name, void *type, long flags, void *data) {}
-POST_SYSCALL(mount)(long res, void *dev_name, void *dir_name, void *type,
- long flags, void *data) {
+POST_SYSCALL(mount)
+(long res, void *dev_name, void *dir_name, void *type, long flags, void *data) {
if (res >= 0) {
if (dev_name)
POST_WRITE(dev_name,
@@ -826,11 +892,12 @@ PRE_SYSCALL(stat)(const void *filename, void *statbuf) {
POST_SYSCALL(stat)(long res, const void *filename, void *statbuf) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct___old_kernel_stat_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct___old_kernel_stat_sz);
}
}
-#if !SANITIZER_ANDROID
+# if !SANITIZER_ANDROID
PRE_SYSCALL(statfs)(const void *path, void *buf) {
if (path)
PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
@@ -838,26 +905,31 @@ PRE_SYSCALL(statfs)(const void *path, void *buf) {
POST_SYSCALL(statfs)(long res, const void *path, void *buf) {
if (res >= 0) {
- if (buf) POST_WRITE(buf, struct_statfs_sz);
+ if (buf)
+ POST_WRITE(buf, struct_statfs_sz);
}
}
-PRE_SYSCALL(statfs64)(const void *path, long sz, void *buf) {
- if (path)
- PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
-}
+PRE_SYSCALL(fstatfs)(long fd, void *buf) {}
-POST_SYSCALL(statfs64)(long res, const void *path, long sz, void *buf) {
+POST_SYSCALL(fstatfs)(long res, long fd, void *buf) {
if (res >= 0) {
- if (buf) POST_WRITE(buf, struct_statfs64_sz);
+ if (buf)
+ POST_WRITE(buf, struct_statfs_sz);
}
}
+# endif // !SANITIZER_ANDROID
-PRE_SYSCALL(fstatfs)(long fd, void *buf) {}
+# if SANITIZER_GLIBC
+PRE_SYSCALL(statfs64)(const void *path, long sz, void *buf) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+}
-POST_SYSCALL(fstatfs)(long res, long fd, void *buf) {
+POST_SYSCALL(statfs64)(long res, const void *path, long sz, void *buf) {
if (res >= 0) {
- if (buf) POST_WRITE(buf, struct_statfs_sz);
+ if (buf)
+ POST_WRITE(buf, struct_statfs64_sz);
}
}
@@ -865,10 +937,11 @@ PRE_SYSCALL(fstatfs64)(long fd, long sz, void *buf) {}
POST_SYSCALL(fstatfs64)(long res, long fd, long sz, void *buf) {
if (res >= 0) {
- if (buf) POST_WRITE(buf, struct_statfs64_sz);
+ if (buf)
+ POST_WRITE(buf, struct_statfs64_sz);
}
}
-#endif // !SANITIZER_ANDROID
+# endif // SANITIZER_GLIBC
PRE_SYSCALL(lstat)(const void *filename, void *statbuf) {
if (filename)
@@ -878,7 +951,8 @@ PRE_SYSCALL(lstat)(const void *filename, void *statbuf) {
POST_SYSCALL(lstat)(long res, const void *filename, void *statbuf) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct___old_kernel_stat_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct___old_kernel_stat_sz);
}
}
@@ -886,7 +960,8 @@ PRE_SYSCALL(fstat)(long fd, void *statbuf) {}
POST_SYSCALL(fstat)(long res, long fd, void *statbuf) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct___old_kernel_stat_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct___old_kernel_stat_sz);
}
}
@@ -898,7 +973,8 @@ PRE_SYSCALL(newstat)(const void *filename, void *statbuf) {
POST_SYSCALL(newstat)(long res, const void *filename, void *statbuf) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct_kernel_stat_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct_kernel_stat_sz);
}
}
@@ -910,7 +986,8 @@ PRE_SYSCALL(newlstat)(const void *filename, void *statbuf) {
POST_SYSCALL(newlstat)(long res, const void *filename, void *statbuf) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct_kernel_stat_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct_kernel_stat_sz);
}
}
@@ -918,19 +995,21 @@ PRE_SYSCALL(newfstat)(long fd, void *statbuf) {}
POST_SYSCALL(newfstat)(long res, long fd, void *statbuf) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct_kernel_stat_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct_kernel_stat_sz);
}
}
-#if !SANITIZER_ANDROID
+# if SANITIZER_GLIBC
PRE_SYSCALL(ustat)(long dev, void *ubuf) {}
POST_SYSCALL(ustat)(long res, long dev, void *ubuf) {
if (res >= 0) {
- if (ubuf) POST_WRITE(ubuf, struct_ustat_sz);
+ if (ubuf)
+ POST_WRITE(ubuf, struct_ustat_sz);
}
}
-#endif // !SANITIZER_ANDROID
+# endif // SANITIZER_GLIBC
PRE_SYSCALL(stat64)(const void *filename, void *statbuf) {
if (filename)
@@ -940,7 +1019,8 @@ PRE_SYSCALL(stat64)(const void *filename, void *statbuf) {
POST_SYSCALL(stat64)(long res, const void *filename, void *statbuf) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct_kernel_stat64_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct_kernel_stat64_sz);
}
}
@@ -948,7 +1028,8 @@ PRE_SYSCALL(fstat64)(long fd, void *statbuf) {}
POST_SYSCALL(fstat64)(long res, long fd, void *statbuf) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct_kernel_stat64_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct_kernel_stat64_sz);
}
}
@@ -960,71 +1041,80 @@ PRE_SYSCALL(lstat64)(const void *filename, void *statbuf) {
POST_SYSCALL(lstat64)(long res, const void *filename, void *statbuf) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct_kernel_stat64_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct_kernel_stat64_sz);
}
}
-PRE_SYSCALL(setxattr)(const void *path, const void *name, const void *value,
- long size, long flags) {
+PRE_SYSCALL(setxattr)
+(const void *path, const void *name, const void *value, long size, long flags) {
if (path)
PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
if (name)
PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
- if (value) PRE_READ(value, size);
+ if (value)
+ PRE_READ(value, size);
}
-POST_SYSCALL(setxattr)(long res, const void *path, const void *name,
- const void *value, long size, long flags) {}
+POST_SYSCALL(setxattr)
+(long res, const void *path, const void *name, const void *value, long size,
+ long flags) {}
-PRE_SYSCALL(lsetxattr)(const void *path, const void *name, const void *value,
- long size, long flags) {
+PRE_SYSCALL(lsetxattr)
+(const void *path, const void *name, const void *value, long size, long flags) {
if (path)
PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
if (name)
PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
- if (value) PRE_READ(value, size);
+ if (value)
+ PRE_READ(value, size);
}
-POST_SYSCALL(lsetxattr)(long res, const void *path, const void *name,
- const void *value, long size, long flags) {}
+POST_SYSCALL(lsetxattr)
+(long res, const void *path, const void *name, const void *value, long size,
+ long flags) {}
-PRE_SYSCALL(fsetxattr)(long fd, const void *name, const void *value, long size,
- long flags) {
+PRE_SYSCALL(fsetxattr)
+(long fd, const void *name, const void *value, long size, long flags) {
if (name)
PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
- if (value) PRE_READ(value, size);
+ if (value)
+ PRE_READ(value, size);
}
-POST_SYSCALL(fsetxattr)(long res, long fd, const void *name, const void *value,
- long size, long flags) {}
+POST_SYSCALL(fsetxattr)
+(long res, long fd, const void *name, const void *value, long size,
+ long flags) {}
-PRE_SYSCALL(getxattr)(const void *path, const void *name, void *value,
- long size) {
+PRE_SYSCALL(getxattr)
+(const void *path, const void *name, void *value, long size) {
if (path)
PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
if (name)
PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
}
-POST_SYSCALL(getxattr)(long res, const void *path, const void *name,
- void *value, long size) {
+POST_SYSCALL(getxattr)
+(long res, const void *path, const void *name, void *value, long size) {
if (size && res > 0) {
- if (value) POST_WRITE(value, res);
+ if (value)
+ POST_WRITE(value, res);
}
}
-PRE_SYSCALL(lgetxattr)(const void *path, const void *name, void *value,
- long size) {
+PRE_SYSCALL(lgetxattr)
+(const void *path, const void *name, void *value, long size) {
if (path)
PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
if (name)
PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
}
-POST_SYSCALL(lgetxattr)(long res, const void *path, const void *name,
- void *value, long size) {
+POST_SYSCALL(lgetxattr)
+(long res, const void *path, const void *name, void *value, long size) {
if (size && res > 0) {
- if (value) POST_WRITE(value, res);
+ if (value)
+ POST_WRITE(value, res);
}
}
@@ -1033,10 +1123,11 @@ PRE_SYSCALL(fgetxattr)(long fd, const void *name, void *value, long size) {
PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
}
-POST_SYSCALL(fgetxattr)(long res, long fd, const void *name, void *value,
- long size) {
+POST_SYSCALL(fgetxattr)
+(long res, long fd, const void *name, void *value, long size) {
if (size && res > 0) {
- if (value) POST_WRITE(value, res);
+ if (value)
+ POST_WRITE(value, res);
}
}
@@ -1047,7 +1138,8 @@ PRE_SYSCALL(listxattr)(const void *path, void *list, long size) {
POST_SYSCALL(listxattr)(long res, const void *path, void *list, long size) {
if (size && res > 0) {
- if (list) POST_WRITE(list, res);
+ if (list)
+ POST_WRITE(list, res);
}
}
@@ -1058,7 +1150,8 @@ PRE_SYSCALL(llistxattr)(const void *path, void *list, long size) {
POST_SYSCALL(llistxattr)(long res, const void *path, void *list, long size) {
if (size && res > 0) {
- if (list) POST_WRITE(list, res);
+ if (list)
+ POST_WRITE(list, res);
}
}
@@ -1066,7 +1159,8 @@ PRE_SYSCALL(flistxattr)(long fd, void *list, long size) {}
POST_SYSCALL(flistxattr)(long res, long fd, void *list, long size) {
if (size && res > 0) {
- if (list) POST_WRITE(list, res);
+ if (list)
+ POST_WRITE(list, res);
}
}
@@ -1103,17 +1197,17 @@ PRE_SYSCALL(mprotect)(long start, long len, long prot) {}
POST_SYSCALL(mprotect)(long res, long start, long len, long prot) {}
-PRE_SYSCALL(mremap)(long addr, long old_len, long new_len, long flags,
- long new_addr) {}
+PRE_SYSCALL(mremap)
+(long addr, long old_len, long new_len, long flags, long new_addr) {}
-POST_SYSCALL(mremap)(long res, long addr, long old_len, long new_len,
- long flags, long new_addr) {}
+POST_SYSCALL(mremap)
+(long res, long addr, long old_len, long new_len, long flags, long new_addr) {}
-PRE_SYSCALL(remap_file_pages)(long start, long size, long prot, long pgoff,
- long flags) {}
+PRE_SYSCALL(remap_file_pages)
+(long start, long size, long prot, long pgoff, long flags) {}
-POST_SYSCALL(remap_file_pages)(long res, long start, long size, long prot,
- long pgoff, long flags) {}
+POST_SYSCALL(remap_file_pages)
+(long res, long start, long size, long prot, long pgoff, long flags) {}
PRE_SYSCALL(msync)(long start, long len, long flags) {}
@@ -1189,7 +1283,8 @@ PRE_SYSCALL(link)(const void *oldname, const void *newname) {
POST_SYSCALL(link)(long res, const void *oldname, const void *newname) {}
PRE_SYSCALL(symlink)(const void *old, const void *new_) {
- if (old) PRE_READ(old, __sanitizer::internal_strlen((const char *)old) + 1);
+ if (old)
+ PRE_READ(old, __sanitizer::internal_strlen((const char *)old) + 1);
if (new_)
PRE_READ(new_, __sanitizer::internal_strlen((const char *)new_) + 1);
}
@@ -1237,14 +1332,16 @@ PRE_SYSCALL(pipe)(void *fildes) {}
POST_SYSCALL(pipe)(long res, void *fildes) {
if (res >= 0)
- if (fildes) POST_WRITE(fildes, sizeof(int) * 2);
+ if (fildes)
+ POST_WRITE(fildes, sizeof(int) * 2);
}
PRE_SYSCALL(pipe2)(void *fildes, long flags) {}
POST_SYSCALL(pipe2)(long res, void *fildes, long flags) {
if (res >= 0)
- if (fildes) POST_WRITE(fildes, sizeof(int) * 2);
+ if (fildes)
+ POST_WRITE(fildes, sizeof(int) * 2);
}
PRE_SYSCALL(dup)(long fildes) {}
@@ -1272,16 +1369,18 @@ PRE_SYSCALL(flock)(long fd, long cmd) {}
POST_SYSCALL(flock)(long res, long fd, long cmd) {}
PRE_SYSCALL(io_setup)(long nr_reqs, void **ctx) {
- if (ctx) PRE_WRITE(ctx, sizeof(*ctx));
+ if (ctx)
+ PRE_WRITE(ctx, sizeof(*ctx));
}
POST_SYSCALL(io_setup)(long res, long nr_reqs, void **ctx) {
- if (res >= 0) {
- if (ctx) POST_WRITE(ctx, sizeof(*ctx));
+ if (res >= 0 && ctx) {
+ POST_WRITE(ctx, sizeof(*ctx));
// (*ctx) is actually a pointer to a kernel mapped page, and there are
// people out there who are crazy enough to peek into that page's 32-byte
// header.
- if (*ctx) POST_WRITE(*ctx, 32);
+ if (*ctx)
+ POST_WRITE(*ctx, 32);
}
}
@@ -1289,16 +1388,21 @@ PRE_SYSCALL(io_destroy)(long ctx) {}
POST_SYSCALL(io_destroy)(long res, long ctx) {}
-PRE_SYSCALL(io_getevents)(long ctx_id, long min_nr, long nr,
- __sanitizer_io_event *ioevpp, void *timeout) {
- if (timeout) PRE_READ(timeout, struct_timespec_sz);
+PRE_SYSCALL(io_getevents)
+(long ctx_id, long min_nr, long nr, __sanitizer_io_event *ioevpp,
+ void *timeout) {
+ if (timeout)
+ PRE_READ(timeout, struct_timespec_sz);
}
-POST_SYSCALL(io_getevents)(long res, long ctx_id, long min_nr, long nr,
- __sanitizer_io_event *ioevpp, void *timeout) {
+POST_SYSCALL(io_getevents)
+(long res, long ctx_id, long min_nr, long nr, __sanitizer_io_event *ioevpp,
+ void *timeout) {
if (res >= 0) {
- if (ioevpp) POST_WRITE(ioevpp, res * sizeof(*ioevpp));
- if (timeout) POST_WRITE(timeout, struct_timespec_sz);
+ if (ioevpp)
+ POST_WRITE(ioevpp, res * sizeof(*ioevpp));
+ if (timeout)
+ POST_WRITE(timeout, struct_timespec_sz);
}
for (long i = 0; i < res; i++) {
// We synchronize io_submit -> io_getevents/io_cancel using the
@@ -1308,26 +1412,26 @@ POST_SYSCALL(io_getevents)(long res, long ctx_id, long min_nr, long nr,
// synchronize on 0. But there does not seem to be a better solution
// (except wrapping all operations in own context, which is unreliable).
// We can not reliably extract fildes in io_getevents.
- COMMON_SYSCALL_ACQUIRE((void*)ioevpp[i].data);
+ COMMON_SYSCALL_ACQUIRE((void *)ioevpp[i].data);
}
}
PRE_SYSCALL(io_submit)(long ctx_id, long nr, __sanitizer_iocb **iocbpp) {
for (long i = 0; i < nr; ++i) {
uptr op = iocbpp[i]->aio_lio_opcode;
- void *data = (void*)iocbpp[i]->aio_data;
- void *buf = (void*)iocbpp[i]->aio_buf;
+ void *data = (void *)iocbpp[i]->aio_data;
+ void *buf = (void *)iocbpp[i]->aio_buf;
uptr len = (uptr)iocbpp[i]->aio_nbytes;
if (op == iocb_cmd_pwrite && buf && len) {
PRE_READ(buf, len);
} else if (op == iocb_cmd_pread && buf && len) {
POST_WRITE(buf, len);
} else if (op == iocb_cmd_pwritev) {
- __sanitizer_iovec *iovec = (__sanitizer_iovec*)buf;
+ __sanitizer_iovec *iovec = (__sanitizer_iovec *)buf;
for (uptr v = 0; v < len; v++)
PRE_READ(iovec[v].iov_base, iovec[v].iov_len);
} else if (op == iocb_cmd_preadv) {
- __sanitizer_iovec *iovec = (__sanitizer_iovec*)buf;
+ __sanitizer_iovec *iovec = (__sanitizer_iovec *)buf;
for (uptr v = 0; v < len; v++)
POST_WRITE(iovec[v].iov_base, iovec[v].iov_len);
}
@@ -1336,19 +1440,18 @@ PRE_SYSCALL(io_submit)(long ctx_id, long nr, __sanitizer_iocb **iocbpp) {
}
}
-POST_SYSCALL(io_submit)(long res, long ctx_id, long nr,
- __sanitizer_iocb **iocbpp) {}
+POST_SYSCALL(io_submit)
+(long res, long ctx_id, long nr, __sanitizer_iocb **iocbpp) {}
-PRE_SYSCALL(io_cancel)(long ctx_id, __sanitizer_iocb *iocb,
- __sanitizer_io_event *result) {
-}
+PRE_SYSCALL(io_cancel)
+(long ctx_id, __sanitizer_iocb *iocb, __sanitizer_io_event *result) {}
-POST_SYSCALL(io_cancel)(long res, long ctx_id, __sanitizer_iocb *iocb,
- __sanitizer_io_event *result) {
+POST_SYSCALL(io_cancel)
+(long res, long ctx_id, __sanitizer_iocb *iocb, __sanitizer_io_event *result) {
if (res == 0) {
if (result) {
// See comment in io_getevents.
- COMMON_SYSCALL_ACQUIRE((void*)result->data);
+ COMMON_SYSCALL_ACQUIRE((void *)result->data);
POST_WRITE(result, sizeof(*result));
}
if (iocb)
@@ -1358,19 +1461,23 @@ POST_SYSCALL(io_cancel)(long res, long ctx_id, __sanitizer_iocb *iocb,
PRE_SYSCALL(sendfile)(long out_fd, long in_fd, void *offset, long count) {}
-POST_SYSCALL(sendfile)(long res, long out_fd, long in_fd,
- __sanitizer___kernel_off_t *offset, long count) {
+POST_SYSCALL(sendfile)
+(long res, long out_fd, long in_fd, __sanitizer___kernel_off_t *offset,
+ long count) {
if (res >= 0) {
- if (offset) POST_WRITE(offset, sizeof(*offset));
+ if (offset)
+ POST_WRITE(offset, sizeof(*offset));
}
}
PRE_SYSCALL(sendfile64)(long out_fd, long in_fd, void *offset, long count) {}
-POST_SYSCALL(sendfile64)(long res, long out_fd, long in_fd,
- __sanitizer___kernel_loff_t *offset, long count) {
+POST_SYSCALL(sendfile64)
+(long res, long out_fd, long in_fd, __sanitizer___kernel_loff_t *offset,
+ long count) {
if (res >= 0) {
- if (offset) POST_WRITE(offset, sizeof(*offset));
+ if (offset)
+ POST_WRITE(offset, sizeof(*offset));
}
}
@@ -1402,9 +1509,7 @@ PRE_SYSCALL(open)(const void *filename, long flags, long mode) {
POST_SYSCALL(open)(long res, const void *filename, long flags, long mode) {}
-PRE_SYSCALL(close)(long fd) {
- COMMON_SYSCALL_FD_CLOSE((int)fd);
-}
+PRE_SYSCALL(close)(long fd) { COMMON_SYSCALL_FD_CLOSE((int)fd); }
POST_SYSCALL(close)(long res, long fd) {}
@@ -1440,7 +1545,7 @@ PRE_SYSCALL(fchown)(long fd, long user, long group) {}
POST_SYSCALL(fchown)(long res, long fd, long user, long group) {}
-#if SANITIZER_USES_UID16_SYSCALLS
+# if SANITIZER_USES_UID16_SYSCALLS
PRE_SYSCALL(chown16)(const void *filename, long user, long group) {
if (filename)
PRE_READ(filename,
@@ -1483,13 +1588,16 @@ POST_SYSCALL(setresuid16)(long res, long ruid, long euid, long suid) {}
PRE_SYSCALL(getresuid16)(void *ruid, void *euid, void *suid) {}
-POST_SYSCALL(getresuid16)(long res, __sanitizer___kernel_old_uid_t *ruid,
- __sanitizer___kernel_old_uid_t *euid,
- __sanitizer___kernel_old_uid_t *suid) {
+POST_SYSCALL(getresuid16)
+(long res, __sanitizer___kernel_old_uid_t *ruid,
+ __sanitizer___kernel_old_uid_t *euid, __sanitizer___kernel_old_uid_t *suid) {
if (res >= 0) {
- if (ruid) POST_WRITE(ruid, sizeof(*ruid));
- if (euid) POST_WRITE(euid, sizeof(*euid));
- if (suid) POST_WRITE(suid, sizeof(*suid));
+ if (ruid)
+ POST_WRITE(ruid, sizeof(*ruid));
+ if (euid)
+ POST_WRITE(euid, sizeof(*euid));
+ if (suid)
+ POST_WRITE(suid, sizeof(*suid));
}
}
@@ -1499,13 +1607,16 @@ POST_SYSCALL(setresgid16)(long res, long rgid, long egid, long sgid) {}
PRE_SYSCALL(getresgid16)(void *rgid, void *egid, void *sgid) {}
-POST_SYSCALL(getresgid16)(long res, __sanitizer___kernel_old_gid_t *rgid,
- __sanitizer___kernel_old_gid_t *egid,
- __sanitizer___kernel_old_gid_t *sgid) {
+POST_SYSCALL(getresgid16)
+(long res, __sanitizer___kernel_old_gid_t *rgid,
+ __sanitizer___kernel_old_gid_t *egid, __sanitizer___kernel_old_gid_t *sgid) {
if (res >= 0) {
- if (rgid) POST_WRITE(rgid, sizeof(*rgid));
- if (egid) POST_WRITE(egid, sizeof(*egid));
- if (sgid) POST_WRITE(sgid, sizeof(*sgid));
+ if (rgid)
+ POST_WRITE(rgid, sizeof(*rgid));
+ if (egid)
+ POST_WRITE(egid, sizeof(*egid));
+ if (sgid)
+ POST_WRITE(sgid, sizeof(*sgid));
}
}
@@ -1517,23 +1628,25 @@ PRE_SYSCALL(setfsgid16)(long gid) {}
POST_SYSCALL(setfsgid16)(long res, long gid) {}
-PRE_SYSCALL(getgroups16)(long gidsetsize,
- __sanitizer___kernel_old_gid_t *grouplist) {}
+PRE_SYSCALL(getgroups16)
+(long gidsetsize, __sanitizer___kernel_old_gid_t *grouplist) {}
-POST_SYSCALL(getgroups16)(long res, long gidsetsize,
- __sanitizer___kernel_old_gid_t *grouplist) {
+POST_SYSCALL(getgroups16)
+(long res, long gidsetsize, __sanitizer___kernel_old_gid_t *grouplist) {
if (res >= 0) {
- if (grouplist) POST_WRITE(grouplist, res * sizeof(*grouplist));
+ if (grouplist)
+ POST_WRITE(grouplist, res * sizeof(*grouplist));
}
}
-PRE_SYSCALL(setgroups16)(long gidsetsize,
- __sanitizer___kernel_old_gid_t *grouplist) {
- if (grouplist) POST_WRITE(grouplist, gidsetsize * sizeof(*grouplist));
+PRE_SYSCALL(setgroups16)
+(long gidsetsize, __sanitizer___kernel_old_gid_t *grouplist) {
+ if (grouplist)
+ POST_WRITE(grouplist, gidsetsize * sizeof(*grouplist));
}
-POST_SYSCALL(setgroups16)(long res, long gidsetsize,
- __sanitizer___kernel_old_gid_t *grouplist) {}
+POST_SYSCALL(setgroups16)
+(long res, long gidsetsize, __sanitizer___kernel_old_gid_t *grouplist) {}
PRE_SYSCALL(getuid16)() {}
@@ -1550,7 +1663,7 @@ POST_SYSCALL(getgid16)(long res) {}
PRE_SYSCALL(getegid16)() {}
POST_SYSCALL(getegid16)(long res) {}
-#endif // SANITIZER_USES_UID16_SYSCALLS
+# endif // SANITIZER_USES_UID16_SYSCALLS
PRE_SYSCALL(utime)(void *filename, void *times) {}
@@ -1559,7 +1672,8 @@ POST_SYSCALL(utime)(long res, void *filename, void *times) {
if (filename)
POST_WRITE(filename,
__sanitizer::internal_strlen((const char *)filename) + 1);
- if (times) POST_WRITE(times, struct_utimbuf_sz);
+ if (times)
+ POST_WRITE(times, struct_utimbuf_sz);
}
}
@@ -1570,7 +1684,8 @@ POST_SYSCALL(utimes)(long res, void *filename, void *utimes) {
if (filename)
POST_WRITE(filename,
__sanitizer::internal_strlen((const char *)filename) + 1);
- if (utimes) POST_WRITE(utimes, timeval_sz);
+ if (utimes)
+ POST_WRITE(utimes, timeval_sz);
}
}
@@ -1578,91 +1693,104 @@ PRE_SYSCALL(lseek)(long fd, long offset, long origin) {}
POST_SYSCALL(lseek)(long res, long fd, long offset, long origin) {}
-PRE_SYSCALL(llseek)(long fd, long offset_high, long offset_low, void *result,
- long origin) {}
+PRE_SYSCALL(llseek)
+(long fd, long offset_high, long offset_low, void *result, long origin) {}
-POST_SYSCALL(llseek)(long res, long fd, long offset_high, long offset_low,
- void *result, long origin) {
+POST_SYSCALL(llseek)
+(long res, long fd, long offset_high, long offset_low, void *result,
+ long origin) {
if (res >= 0) {
- if (result) POST_WRITE(result, sizeof(long long));
+ if (result)
+ POST_WRITE(result, sizeof(long long));
}
}
PRE_SYSCALL(readv)(long fd, const __sanitizer_iovec *vec, long vlen) {}
-POST_SYSCALL(readv)(long res, long fd, const __sanitizer_iovec *vec,
- long vlen) {
+POST_SYSCALL(readv)
+(long res, long fd, const __sanitizer_iovec *vec, long vlen) {
if (res >= 0) {
- if (vec) kernel_write_iovec(vec, vlen, res);
+ if (vec)
+ kernel_write_iovec(vec, vlen, res);
}
}
PRE_SYSCALL(write)(long fd, const void *buf, long count) {
- if (buf) PRE_READ(buf, count);
+ if (buf)
+ PRE_READ(buf, count);
}
POST_SYSCALL(write)(long res, long fd, const void *buf, long count) {}
PRE_SYSCALL(writev)(long fd, const __sanitizer_iovec *vec, long vlen) {}
-POST_SYSCALL(writev)(long res, long fd, const __sanitizer_iovec *vec,
- long vlen) {
+POST_SYSCALL(writev)
+(long res, long fd, const __sanitizer_iovec *vec, long vlen) {
if (res >= 0) {
- if (vec) kernel_read_iovec(vec, vlen, res);
+ if (vec)
+ kernel_read_iovec(vec, vlen, res);
}
}
-#ifdef _LP64
+# ifdef _LP64
PRE_SYSCALL(pread64)(long fd, void *buf, long count, long pos) {}
POST_SYSCALL(pread64)(long res, long fd, void *buf, long count, long pos) {
if (res >= 0) {
- if (buf) POST_WRITE(buf, res);
+ if (buf)
+ POST_WRITE(buf, res);
}
}
PRE_SYSCALL(pwrite64)(long fd, const void *buf, long count, long pos) {
- if (buf) PRE_READ(buf, count);
+ if (buf)
+ PRE_READ(buf, count);
}
-POST_SYSCALL(pwrite64)(long res, long fd, const void *buf, long count,
- long pos) {}
-#else
+POST_SYSCALL(pwrite64)
+(long res, long fd, const void *buf, long count, long pos) {}
+# else
PRE_SYSCALL(pread64)(long fd, void *buf, long count, long pos0, long pos1) {}
-POST_SYSCALL(pread64)(long res, long fd, void *buf, long count, long pos0,
- long pos1) {
+POST_SYSCALL(pread64)
+(long res, long fd, void *buf, long count, long pos0, long pos1) {
if (res >= 0) {
- if (buf) POST_WRITE(buf, res);
+ if (buf)
+ POST_WRITE(buf, res);
}
}
-PRE_SYSCALL(pwrite64)(long fd, const void *buf, long count, long pos0,
- long pos1) {
- if (buf) PRE_READ(buf, count);
+PRE_SYSCALL(pwrite64)
+(long fd, const void *buf, long count, long pos0, long pos1) {
+ if (buf)
+ PRE_READ(buf, count);
}
-POST_SYSCALL(pwrite64)(long res, long fd, const void *buf, long count,
- long pos0, long pos1) {}
-#endif
+POST_SYSCALL(pwrite64)
+(long res, long fd, const void *buf, long count, long pos0, long pos1) {}
+# endif
-PRE_SYSCALL(preadv)(long fd, const __sanitizer_iovec *vec, long vlen,
- long pos_l, long pos_h) {}
+PRE_SYSCALL(preadv)
+(long fd, const __sanitizer_iovec *vec, long vlen, long pos_l, long pos_h) {}
-POST_SYSCALL(preadv)(long res, long fd, const __sanitizer_iovec *vec, long vlen,
- long pos_l, long pos_h) {
+POST_SYSCALL(preadv)
+(long res, long fd, const __sanitizer_iovec *vec, long vlen, long pos_l,
+ long pos_h) {
if (res >= 0) {
- if (vec) kernel_write_iovec(vec, vlen, res);
+ if (vec)
+ kernel_write_iovec(vec, vlen, res);
}
}
-PRE_SYSCALL(pwritev)(long fd, const __sanitizer_iovec *vec, long vlen,
- long pos_l, long pos_h) {}
+PRE_SYSCALL(pwritev)
+(long fd, const __sanitizer_iovec *vec, long vlen, long pos_l, long pos_h) {}
-POST_SYSCALL(pwritev)(long res, long fd, const __sanitizer_iovec *vec,
- long vlen, long pos_l, long pos_h) {
+POST_SYSCALL(pwritev)
+(long res, long fd, const __sanitizer_iovec *vec, long vlen, long pos_l,
+ long pos_h) {
if (res >= 0) {
- if (vec) kernel_read_iovec(vec, vlen, res);
+ if (vec)
+ kernel_read_iovec(vec, vlen, res);
}
}
@@ -1717,14 +1845,15 @@ PRE_SYSCALL(quotactl)(long cmd, const void *special, long id, void *addr) {
PRE_READ(special, __sanitizer::internal_strlen((const char *)special) + 1);
}
-POST_SYSCALL(quotactl)(long res, long cmd, const void *special, long id,
- void *addr) {}
+POST_SYSCALL(quotactl)
+(long res, long cmd, const void *special, long id, void *addr) {}
PRE_SYSCALL(getdents)(long fd, void *dirent, long count) {}
POST_SYSCALL(getdents)(long res, long fd, void *dirent, long count) {
if (res >= 0) {
- if (dirent) POST_WRITE(dirent, res);
+ if (dirent)
+ POST_WRITE(dirent, res);
}
}
@@ -1732,15 +1861,16 @@ PRE_SYSCALL(getdents64)(long fd, void *dirent, long count) {}
POST_SYSCALL(getdents64)(long res, long fd, void *dirent, long count) {
if (res >= 0) {
- if (dirent) POST_WRITE(dirent, res);
+ if (dirent)
+ POST_WRITE(dirent, res);
}
}
-PRE_SYSCALL(setsockopt)(long fd, long level, long optname, void *optval,
- long optlen) {}
+PRE_SYSCALL(setsockopt)
+(long fd, long level, long optname, void *optval, long optlen) {}
-POST_SYSCALL(setsockopt)(long res, long fd, long level, long optname,
- void *optval, long optlen) {
+POST_SYSCALL(setsockopt)
+(long res, long fd, long level, long optname, void *optval, long optlen) {
if (res >= 0) {
if (optval)
POST_WRITE(optval,
@@ -1748,77 +1878,88 @@ POST_SYSCALL(setsockopt)(long res, long fd, long level, long optname,
}
}
-PRE_SYSCALL(getsockopt)(long fd, long level, long optname, void *optval,
- void *optlen) {}
+PRE_SYSCALL(getsockopt)
+(long fd, long level, long optname, void *optval, void *optlen) {}
-POST_SYSCALL(getsockopt)(long res, long fd, long level, long optname,
- void *optval, void *optlen) {
+POST_SYSCALL(getsockopt)
+(long res, long fd, long level, long optname, void *optval, void *optlen) {
if (res >= 0) {
if (optval)
POST_WRITE(optval,
__sanitizer::internal_strlen((const char *)optval) + 1);
- if (optlen) POST_WRITE(optlen, sizeof(int));
+ if (optlen)
+ POST_WRITE(optlen, sizeof(int));
}
}
PRE_SYSCALL(bind)(long arg0, sanitizer_kernel_sockaddr *arg1, long arg2) {}
-POST_SYSCALL(bind)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
- long arg2) {
+POST_SYSCALL(bind)
+(long res, long arg0, sanitizer_kernel_sockaddr *arg1, long arg2) {
if (res >= 0) {
- if (arg1) POST_WRITE(arg1, sizeof(*arg1));
+ if (arg1)
+ POST_WRITE(arg1, sizeof(*arg1));
}
}
PRE_SYSCALL(connect)(long arg0, sanitizer_kernel_sockaddr *arg1, long arg2) {}
-POST_SYSCALL(connect)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
- long arg2) {
+POST_SYSCALL(connect)
+(long res, long arg0, sanitizer_kernel_sockaddr *arg1, long arg2) {
if (res >= 0) {
- if (arg1) POST_WRITE(arg1, sizeof(*arg1));
+ if (arg1)
+ POST_WRITE(arg1, sizeof(*arg1));
}
}
PRE_SYSCALL(accept)(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {}
-POST_SYSCALL(accept)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
- void *arg2) {
+POST_SYSCALL(accept)
+(long res, long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {
if (res >= 0) {
- if (arg1) POST_WRITE(arg1, sizeof(*arg1));
- if (arg2) POST_WRITE(arg2, sizeof(unsigned));
+ if (arg1)
+ POST_WRITE(arg1, sizeof(*arg1));
+ if (arg2)
+ POST_WRITE(arg2, sizeof(unsigned));
}
}
-PRE_SYSCALL(accept4)(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2,
- long arg3) {}
+PRE_SYSCALL(accept4)
+(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2, long arg3) {}
-POST_SYSCALL(accept4)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
- void *arg2, long arg3) {
+POST_SYSCALL(accept4)
+(long res, long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2, long arg3) {
if (res >= 0) {
- if (arg1) POST_WRITE(arg1, sizeof(*arg1));
- if (arg2) POST_WRITE(arg2, sizeof(unsigned));
+ if (arg1)
+ POST_WRITE(arg1, sizeof(*arg1));
+ if (arg2)
+ POST_WRITE(arg2, sizeof(unsigned));
}
}
-PRE_SYSCALL(getsockname)(long arg0, sanitizer_kernel_sockaddr *arg1,
- void *arg2) {}
+PRE_SYSCALL(getsockname)
+(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {}
-POST_SYSCALL(getsockname)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
- void *arg2) {
+POST_SYSCALL(getsockname)
+(long res, long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {
if (res >= 0) {
- if (arg1) POST_WRITE(arg1, sizeof(*arg1));
- if (arg2) POST_WRITE(arg2, sizeof(unsigned));
+ if (arg1)
+ POST_WRITE(arg1, sizeof(*arg1));
+ if (arg2)
+ POST_WRITE(arg2, sizeof(unsigned));
}
}
-PRE_SYSCALL(getpeername)(long arg0, sanitizer_kernel_sockaddr *arg1,
- void *arg2) {}
+PRE_SYSCALL(getpeername)
+(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {}
-POST_SYSCALL(getpeername)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
- void *arg2) {
+POST_SYSCALL(getpeername)
+(long res, long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {
if (res >= 0) {
- if (arg1) POST_WRITE(arg1, sizeof(*arg1));
- if (arg2) POST_WRITE(arg2, sizeof(unsigned));
+ if (arg1)
+ POST_WRITE(arg1, sizeof(*arg1));
+ if (arg2)
+ POST_WRITE(arg2, sizeof(unsigned));
}
}
@@ -1826,18 +1967,23 @@ PRE_SYSCALL(send)(long arg0, void *arg1, long arg2, long arg3) {}
POST_SYSCALL(send)(long res, long arg0, void *arg1, long arg2, long arg3) {
if (res) {
- if (arg1) POST_READ(arg1, res);
+ if (arg1)
+ POST_READ(arg1, res);
}
}
-PRE_SYSCALL(sendto)(long arg0, void *arg1, long arg2, long arg3,
- sanitizer_kernel_sockaddr *arg4, long arg5) {}
+PRE_SYSCALL(sendto)
+(long arg0, void *arg1, long arg2, long arg3, sanitizer_kernel_sockaddr *arg4,
+ long arg5) {}
-POST_SYSCALL(sendto)(long res, long arg0, void *arg1, long arg2, long arg3,
- sanitizer_kernel_sockaddr *arg4, long arg5) {
+POST_SYSCALL(sendto)
+(long res, long arg0, void *arg1, long arg2, long arg3,
+ sanitizer_kernel_sockaddr *arg4, long arg5) {
if (res >= 0) {
- if (arg1) POST_READ(arg1, res);
- if (arg4) POST_WRITE(arg4, sizeof(*arg4));
+ if (arg1)
+ POST_READ(arg1, res);
+ if (arg4)
+ POST_WRITE(arg4, sizeof(*arg4));
}
}
@@ -1857,19 +2003,25 @@ PRE_SYSCALL(recv)(long arg0, void *buf, long len, long flags) {}
POST_SYSCALL(recv)(long res, void *buf, long len, long flags) {
if (res >= 0) {
- if (buf) POST_WRITE(buf, res);
+ if (buf)
+ POST_WRITE(buf, res);
}
}
-PRE_SYSCALL(recvfrom)(long arg0, void *buf, long len, long flags,
- sanitizer_kernel_sockaddr *arg4, void *arg5) {}
+PRE_SYSCALL(recvfrom)
+(long arg0, void *buf, long len, long flags, sanitizer_kernel_sockaddr *arg4,
+ void *arg5) {}
-POST_SYSCALL(recvfrom)(long res, long arg0, void *buf, long len, long flags,
- sanitizer_kernel_sockaddr *arg4, void *arg5) {
+POST_SYSCALL(recvfrom)
+(long res, long arg0, void *buf, long len, long flags,
+ sanitizer_kernel_sockaddr *arg4, void *arg5) {
if (res >= 0) {
- if (buf) POST_WRITE(buf, res);
- if (arg4) POST_WRITE(arg4, sizeof(*arg4));
- if (arg5) POST_WRITE(arg5, sizeof(int));
+ if (buf)
+ POST_WRITE(buf, res);
+ if (arg4)
+ POST_WRITE(arg4, sizeof(*arg4));
+ if (arg5)
+ POST_WRITE(arg5, sizeof(int));
}
}
@@ -1881,14 +2033,16 @@ PRE_SYSCALL(socketpair)(long arg0, long arg1, long arg2, int *sv) {}
POST_SYSCALL(socketpair)(long res, long arg0, long arg1, long arg2, int *sv) {
if (res >= 0)
- if (sv) POST_WRITE(sv, sizeof(int) * 2);
+ if (sv)
+ POST_WRITE(sv, sizeof(int) * 2);
}
PRE_SYSCALL(socketcall)(long call, void *args) {}
POST_SYSCALL(socketcall)(long res, long call, void *args) {
if (res >= 0) {
- if (args) POST_WRITE(args, sizeof(long));
+ if (args)
+ POST_WRITE(args, sizeof(long));
}
}
@@ -1898,25 +2052,31 @@ POST_SYSCALL(listen)(long res, long arg0, long arg1) {}
PRE_SYSCALL(poll)(void *ufds, long nfds, long timeout) {}
-POST_SYSCALL(poll)(long res, __sanitizer_pollfd *ufds, long nfds,
- long timeout) {
+POST_SYSCALL(poll)
+(long res, __sanitizer_pollfd *ufds, long nfds, long timeout) {
if (res >= 0) {
- if (ufds) POST_WRITE(ufds, nfds * sizeof(*ufds));
+ if (ufds)
+ POST_WRITE(ufds, nfds * sizeof(*ufds));
}
}
-PRE_SYSCALL(select)(long n, __sanitizer___kernel_fd_set *inp,
- __sanitizer___kernel_fd_set *outp,
- __sanitizer___kernel_fd_set *exp, void *tvp) {}
+PRE_SYSCALL(select)
+(long n, __sanitizer___kernel_fd_set *inp, __sanitizer___kernel_fd_set *outp,
+ __sanitizer___kernel_fd_set *exp, void *tvp) {}
-POST_SYSCALL(select)(long res, long n, __sanitizer___kernel_fd_set *inp,
- __sanitizer___kernel_fd_set *outp,
- __sanitizer___kernel_fd_set *exp, void *tvp) {
+POST_SYSCALL(select)
+(long res, long n, __sanitizer___kernel_fd_set *inp,
+ __sanitizer___kernel_fd_set *outp, __sanitizer___kernel_fd_set *exp,
+ void *tvp) {
if (res >= 0) {
- if (inp) POST_WRITE(inp, sizeof(*inp));
- if (outp) POST_WRITE(outp, sizeof(*outp));
- if (exp) POST_WRITE(exp, sizeof(*exp));
- if (tvp) POST_WRITE(tvp, timeval_sz);
+ if (inp)
+ POST_WRITE(inp, sizeof(*inp));
+ if (outp)
+ POST_WRITE(outp, sizeof(*outp));
+ if (exp)
+ POST_WRITE(exp, sizeof(*exp));
+ if (tvp)
+ POST_WRITE(tvp, timeval_sz);
}
}
@@ -1936,29 +2096,58 @@ PRE_SYSCALL(epoll_ctl)(long epfd, long op, long fd, void *event) {}
POST_SYSCALL(epoll_ctl)(long res, long epfd, long op, long fd, void *event) {
if (res >= 0) {
- if (event) POST_WRITE(event, struct_epoll_event_sz);
+ if (event)
+ POST_WRITE(event, struct_epoll_event_sz);
+ }
+}
+
+PRE_SYSCALL(epoll_wait)
+(long epfd, void *events, long maxevents, long timeout) {}
+
+POST_SYSCALL(epoll_wait)
+(long res, long epfd, void *events, long maxevents, long timeout) {
+ if (res >= 0) {
+ COMMON_SYSCALL_FD_ACQUIRE(epfd);
+ if (events)
+ POST_WRITE(events, res * struct_epoll_event_sz);
}
}
-PRE_SYSCALL(epoll_wait)(long epfd, void *events, long maxevents, long timeout) {
+PRE_SYSCALL(epoll_pwait)
+(long epfd, void *events, long maxevents, long timeout,
+ const kernel_sigset_t *sigmask, long sigsetsize) {
+ if (sigmask)
+ PRE_READ(sigmask, sigsetsize);
}
-POST_SYSCALL(epoll_wait)(long res, long epfd, void *events, long maxevents,
- long timeout) {
+POST_SYSCALL(epoll_pwait)
+(long res, long epfd, void *events, long maxevents, long timeout,
+ const void *sigmask, long sigsetsize) {
if (res >= 0) {
- if (events) POST_WRITE(events, struct_epoll_event_sz);
+ COMMON_SYSCALL_FD_ACQUIRE(epfd);
+ if (events)
+ POST_WRITE(events, res * struct_epoll_event_sz);
}
}
-PRE_SYSCALL(epoll_pwait)(long epfd, void *events, long maxevents, long timeout,
- const kernel_sigset_t *sigmask, long sigsetsize) {
- if (sigmask) PRE_READ(sigmask, sigsetsize);
+PRE_SYSCALL(epoll_pwait2)
+(long epfd, void *events, long maxevents,
+ const sanitizer_kernel_timespec *timeout, const kernel_sigset_t *sigmask,
+ long sigsetsize) {
+ if (timeout)
+ PRE_READ(timeout, sizeof(*timeout));
+ if (sigmask)
+ PRE_READ(sigmask, sigsetsize);
}
-POST_SYSCALL(epoll_pwait)(long res, long epfd, void *events, long maxevents,
- long timeout, const void *sigmask, long sigsetsize) {
+POST_SYSCALL(epoll_pwait2)
+(long res, long epfd, void *events, long maxevents,
+ const sanitizer_kernel_timespec *timeout, const void *sigmask,
+ long sigsetsize) {
if (res >= 0) {
- if (events) POST_WRITE(events, struct_epoll_event_sz);
+ COMMON_SYSCALL_FD_ACQUIRE(epfd);
+ if (events)
+ POST_WRITE(events, res * struct_epoll_event_sz);
}
}
@@ -1993,7 +2182,8 @@ PRE_SYSCALL(newuname)(void *name) {}
POST_SYSCALL(newuname)(long res, void *name) {
if (res >= 0) {
- if (name) POST_WRITE(name, struct_new_utsname_sz);
+ if (name)
+ POST_WRITE(name, struct_new_utsname_sz);
}
}
@@ -2001,7 +2191,8 @@ PRE_SYSCALL(uname)(void *arg0) {}
POST_SYSCALL(uname)(long res, void *arg0) {
if (res >= 0) {
- if (arg0) POST_WRITE(arg0, struct_old_utsname_sz);
+ if (arg0)
+ POST_WRITE(arg0, struct_old_utsname_sz);
}
}
@@ -2009,7 +2200,8 @@ PRE_SYSCALL(olduname)(void *arg0) {}
POST_SYSCALL(olduname)(long res, void *arg0) {
if (res >= 0) {
- if (arg0) POST_WRITE(arg0, struct_oldold_utsname_sz);
+ if (arg0)
+ POST_WRITE(arg0, struct_oldold_utsname_sz);
}
}
@@ -2017,7 +2209,8 @@ PRE_SYSCALL(getrlimit)(long resource, void *rlim) {}
POST_SYSCALL(getrlimit)(long res, long resource, void *rlim) {
if (res >= 0) {
- if (rlim) POST_WRITE(rlim, struct_rlimit_sz);
+ if (rlim)
+ POST_WRITE(rlim, struct_rlimit_sz);
}
}
@@ -2025,7 +2218,8 @@ PRE_SYSCALL(old_getrlimit)(long resource, void *rlim) {}
POST_SYSCALL(old_getrlimit)(long res, long resource, void *rlim) {
if (res >= 0) {
- if (rlim) POST_WRITE(rlim, struct_rlimit_sz);
+ if (rlim)
+ POST_WRITE(rlim, struct_rlimit_sz);
}
}
@@ -2033,29 +2227,33 @@ PRE_SYSCALL(setrlimit)(long resource, void *rlim) {}
POST_SYSCALL(setrlimit)(long res, long resource, void *rlim) {
if (res >= 0) {
- if (rlim) POST_WRITE(rlim, struct_rlimit_sz);
+ if (rlim)
+ POST_WRITE(rlim, struct_rlimit_sz);
}
}
-#if !SANITIZER_ANDROID
-PRE_SYSCALL(prlimit64)(long pid, long resource, const void *new_rlim,
- void *old_rlim) {
- if (new_rlim) PRE_READ(new_rlim, struct_rlimit64_sz);
+# if SANITIZER_GLIBC
+PRE_SYSCALL(prlimit64)
+(long pid, long resource, const void *new_rlim, void *old_rlim) {
+ if (new_rlim)
+ PRE_READ(new_rlim, struct_rlimit64_sz);
}
-POST_SYSCALL(prlimit64)(long res, long pid, long resource, const void *new_rlim,
- void *old_rlim) {
+POST_SYSCALL(prlimit64)
+(long res, long pid, long resource, const void *new_rlim, void *old_rlim) {
if (res >= 0) {
- if (old_rlim) POST_WRITE(old_rlim, struct_rlimit64_sz);
+ if (old_rlim)
+ POST_WRITE(old_rlim, struct_rlimit64_sz);
}
}
-#endif
+# endif
PRE_SYSCALL(getrusage)(long who, void *ru) {}
POST_SYSCALL(getrusage)(long res, long who, void *ru) {
if (res >= 0) {
- if (ru) POST_WRITE(ru, struct_rusage_sz);
+ if (ru)
+ POST_WRITE(ru, struct_rusage_sz);
}
}
@@ -2068,31 +2266,34 @@ PRE_SYSCALL(msgget)(long key, long msgflg) {}
POST_SYSCALL(msgget)(long res, long key, long msgflg) {}
PRE_SYSCALL(msgsnd)(long msqid, void *msgp, long msgsz, long msgflg) {
- if (msgp) PRE_READ(msgp, msgsz);
+ if (msgp)
+ PRE_READ(msgp, msgsz);
}
-POST_SYSCALL(msgsnd)(long res, long msqid, void *msgp, long msgsz,
- long msgflg) {}
+POST_SYSCALL(msgsnd)
+(long res, long msqid, void *msgp, long msgsz, long msgflg) {}
-PRE_SYSCALL(msgrcv)(long msqid, void *msgp, long msgsz, long msgtyp,
- long msgflg) {}
+PRE_SYSCALL(msgrcv)
+(long msqid, void *msgp, long msgsz, long msgtyp, long msgflg) {}
-POST_SYSCALL(msgrcv)(long res, long msqid, void *msgp, long msgsz, long msgtyp,
- long msgflg) {
+POST_SYSCALL(msgrcv)
+(long res, long msqid, void *msgp, long msgsz, long msgtyp, long msgflg) {
if (res >= 0) {
- if (msgp) POST_WRITE(msgp, res);
+ if (msgp)
+ POST_WRITE(msgp, res);
}
}
-#if !SANITIZER_ANDROID
+# if !SANITIZER_ANDROID
PRE_SYSCALL(msgctl)(long msqid, long cmd, void *buf) {}
POST_SYSCALL(msgctl)(long res, long msqid, long cmd, void *buf) {
if (res >= 0) {
- if (buf) POST_WRITE(buf, struct_msqid_ds_sz);
+ if (buf)
+ POST_WRITE(buf, struct_msqid_ds_sz);
}
}
-#endif
+# endif
PRE_SYSCALL(semget)(long key, long nsems, long semflg) {}
@@ -2106,13 +2307,14 @@ PRE_SYSCALL(semctl)(long semid, long semnum, long cmd, void *arg) {}
POST_SYSCALL(semctl)(long res, long semid, long semnum, long cmd, void *arg) {}
-PRE_SYSCALL(semtimedop)(long semid, void *sops, long nsops,
- const void *timeout) {
- if (timeout) PRE_READ(timeout, struct_timespec_sz);
+PRE_SYSCALL(semtimedop)
+(long semid, void *sops, long nsops, const void *timeout) {
+ if (timeout)
+ PRE_READ(timeout, struct_timespec_sz);
}
-POST_SYSCALL(semtimedop)(long res, long semid, void *sops, long nsops,
- const void *timeout) {}
+POST_SYSCALL(semtimedop)
+(long res, long semid, void *sops, long nsops, const void *timeout) {}
PRE_SYSCALL(shmat)(long shmid, void *shmaddr, long shmflg) {}
@@ -2138,18 +2340,20 @@ POST_SYSCALL(shmdt)(long res, void *shmaddr) {
}
}
-PRE_SYSCALL(ipc)(long call, long first, long second, long third, void *ptr,
- long fifth) {}
+PRE_SYSCALL(ipc)
+(long call, long first, long second, long third, void *ptr, long fifth) {}
-POST_SYSCALL(ipc)(long res, long call, long first, long second, long third,
- void *ptr, long fifth) {}
+POST_SYSCALL(ipc)
+(long res, long call, long first, long second, long third, void *ptr,
+ long fifth) {}
-#if !SANITIZER_ANDROID
+# if !SANITIZER_ANDROID
PRE_SYSCALL(shmctl)(long shmid, long cmd, void *buf) {}
POST_SYSCALL(shmctl)(long res, long shmid, long cmd, void *buf) {
if (res >= 0) {
- if (buf) POST_WRITE(buf, sizeof(__sanitizer_shmid_ds));
+ if (buf)
+ POST_WRITE(buf, sizeof(__sanitizer_shmid_ds));
}
}
@@ -2158,10 +2362,11 @@ PRE_SYSCALL(mq_open)(const void *name, long oflag, long mode, void *attr) {
PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
}
-POST_SYSCALL(mq_open)(long res, const void *name, long oflag, long mode,
- void *attr) {
+POST_SYSCALL(mq_open)
+(long res, const void *name, long oflag, long mode, void *attr) {
if (res >= 0) {
- if (attr) POST_WRITE(attr, struct_mq_attr_sz);
+ if (attr)
+ POST_WRITE(attr, struct_mq_attr_sz);
}
}
@@ -2172,62 +2377,73 @@ PRE_SYSCALL(mq_unlink)(const void *name) {
POST_SYSCALL(mq_unlink)(long res, const void *name) {}
-PRE_SYSCALL(mq_timedsend)(long mqdes, const void *msg_ptr, long msg_len,
- long msg_prio, const void *abs_timeout) {
- if (msg_ptr) PRE_READ(msg_ptr, msg_len);
- if (abs_timeout) PRE_READ(abs_timeout, struct_timespec_sz);
+PRE_SYSCALL(mq_timedsend)
+(long mqdes, const void *msg_ptr, long msg_len, long msg_prio,
+ const void *abs_timeout) {
+ if (msg_ptr)
+ PRE_READ(msg_ptr, msg_len);
+ if (abs_timeout)
+ PRE_READ(abs_timeout, struct_timespec_sz);
}
-POST_SYSCALL(mq_timedsend)(long res, long mqdes, const void *msg_ptr,
- long msg_len, long msg_prio,
- const void *abs_timeout) {}
+POST_SYSCALL(mq_timedsend)
+(long res, long mqdes, const void *msg_ptr, long msg_len, long msg_prio,
+ const void *abs_timeout) {}
-PRE_SYSCALL(mq_timedreceive)(long mqdes, void *msg_ptr, long msg_len,
- void *msg_prio, const void *abs_timeout) {
- if (abs_timeout) PRE_READ(abs_timeout, struct_timespec_sz);
+PRE_SYSCALL(mq_timedreceive)
+(long mqdes, void *msg_ptr, long msg_len, void *msg_prio,
+ const void *abs_timeout) {
+ if (abs_timeout)
+ PRE_READ(abs_timeout, struct_timespec_sz);
}
-POST_SYSCALL(mq_timedreceive)(long res, long mqdes, void *msg_ptr, long msg_len,
- int *msg_prio, const void *abs_timeout) {
+POST_SYSCALL(mq_timedreceive)
+(long res, long mqdes, void *msg_ptr, long msg_len, int *msg_prio,
+ const void *abs_timeout) {
if (res >= 0) {
- if (msg_ptr) POST_WRITE(msg_ptr, res);
- if (msg_prio) POST_WRITE(msg_prio, sizeof(*msg_prio));
+ if (msg_ptr)
+ POST_WRITE(msg_ptr, res);
+ if (msg_prio)
+ POST_WRITE(msg_prio, sizeof(*msg_prio));
}
}
PRE_SYSCALL(mq_notify)(long mqdes, const void *notification) {
- if (notification) PRE_READ(notification, struct_sigevent_sz);
+ if (notification)
+ PRE_READ(notification, struct_sigevent_sz);
}
POST_SYSCALL(mq_notify)(long res, long mqdes, const void *notification) {}
PRE_SYSCALL(mq_getsetattr)(long mqdes, const void *mqstat, void *omqstat) {
- if (mqstat) PRE_READ(mqstat, struct_mq_attr_sz);
+ if (mqstat)
+ PRE_READ(mqstat, struct_mq_attr_sz);
}
-POST_SYSCALL(mq_getsetattr)(long res, long mqdes, const void *mqstat,
- void *omqstat) {
+POST_SYSCALL(mq_getsetattr)
+(long res, long mqdes, const void *mqstat, void *omqstat) {
if (res >= 0) {
- if (omqstat) POST_WRITE(omqstat, struct_mq_attr_sz);
+ if (omqstat)
+ POST_WRITE(omqstat, struct_mq_attr_sz);
}
}
-#endif // SANITIZER_ANDROID
+# endif // SANITIZER_ANDROID
PRE_SYSCALL(pciconfig_iobase)(long which, long bus, long devfn) {}
POST_SYSCALL(pciconfig_iobase)(long res, long which, long bus, long devfn) {}
-PRE_SYSCALL(pciconfig_read)(long bus, long dfn, long off, long len, void *buf) {
-}
+PRE_SYSCALL(pciconfig_read)
+(long bus, long dfn, long off, long len, void *buf) {}
-POST_SYSCALL(pciconfig_read)(long res, long bus, long dfn, long off, long len,
- void *buf) {}
+POST_SYSCALL(pciconfig_read)
+(long res, long bus, long dfn, long off, long len, void *buf) {}
-PRE_SYSCALL(pciconfig_write)(long bus, long dfn, long off, long len,
- void *buf) {}
+PRE_SYSCALL(pciconfig_write)
+(long bus, long dfn, long off, long len, void *buf) {}
-POST_SYSCALL(pciconfig_write)(long res, long bus, long dfn, long off, long len,
- void *buf) {}
+POST_SYSCALL(pciconfig_write)
+(long res, long bus, long dfn, long off, long len, void *buf) {}
PRE_SYSCALL(swapon)(const void *specialfile, long swap_flags) {
if (specialfile)
@@ -2247,8 +2463,10 @@ POST_SYSCALL(swapoff)(long res, const void *specialfile) {}
PRE_SYSCALL(sysctl)(__sanitizer___sysctl_args *args) {
if (args) {
- if (args->name) PRE_READ(args->name, args->nlen * sizeof(*args->name));
- if (args->newval) PRE_READ(args->name, args->newlen);
+ if (args->name)
+ PRE_READ(args->name, args->nlen * sizeof(*args->name));
+ if (args->newval)
+ PRE_READ(args->name, args->newlen);
}
}
@@ -2265,7 +2483,8 @@ PRE_SYSCALL(sysinfo)(void *info) {}
POST_SYSCALL(sysinfo)(long res, void *info) {
if (res >= 0) {
- if (info) POST_WRITE(info, struct_sysinfo_sz);
+ if (info)
+ POST_WRITE(info, struct_sysinfo_sz);
}
}
@@ -2294,10 +2513,10 @@ PRE_SYSCALL(ni_syscall)() {}
POST_SYSCALL(ni_syscall)(long res) {}
PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
-#if !SANITIZER_ANDROID && \
- (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
- defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \
- SANITIZER_RISCV64)
+# if !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \
+ defined(__loongarch__) || SANITIZER_RISCV64)
if (data) {
if (request == ptrace_setregs) {
PRE_READ((void *)data, struct_user_regs_struct_sz);
@@ -2312,14 +2531,14 @@ PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
PRE_READ(iov->iov_base, iov->iov_len);
}
}
-#endif
+# endif
}
POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
-#if !SANITIZER_ANDROID && \
- (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
- defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \
- SANITIZER_RISCV64)
+# if !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \
+ defined(__loongarch__) || SANITIZER_RISCV64)
if (res >= 0 && data) {
// Note that this is different from the interceptor in
// sanitizer_common_interceptors.inc.
@@ -2340,11 +2559,12 @@ POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
POST_WRITE((void *)data, sizeof(void *));
}
}
-#endif
+# endif
}
-PRE_SYSCALL(add_key)(const void *_type, const void *_description,
- const void *_payload, long plen, long destringid) {
+PRE_SYSCALL(add_key)
+(const void *_type, const void *_description, const void *_payload, long plen,
+ long destringid) {
if (_type)
PRE_READ(_type, __sanitizer::internal_strlen((const char *)_type) + 1);
if (_description)
@@ -2352,11 +2572,13 @@ PRE_SYSCALL(add_key)(const void *_type, const void *_description,
__sanitizer::internal_strlen((const char *)_description) + 1);
}
-POST_SYSCALL(add_key)(long res, const void *_type, const void *_description,
- const void *_payload, long plen, long destringid) {}
+POST_SYSCALL(add_key)
+(long res, const void *_type, const void *_description, const void *_payload,
+ long plen, long destringid) {}
-PRE_SYSCALL(request_key)(const void *_type, const void *_description,
- const void *_callout_info, long destringid) {
+PRE_SYSCALL(request_key)
+(const void *_type, const void *_description, const void *_callout_info,
+ long destringid) {
if (_type)
PRE_READ(_type, __sanitizer::internal_strlen((const char *)_type) + 1);
if (_description)
@@ -2367,13 +2589,14 @@ PRE_SYSCALL(request_key)(const void *_type, const void *_description,
__sanitizer::internal_strlen((const char *)_callout_info) + 1);
}
-POST_SYSCALL(request_key)(long res, const void *_type, const void *_description,
- const void *_callout_info, long destringid) {}
+POST_SYSCALL(request_key)
+(long res, const void *_type, const void *_description,
+ const void *_callout_info, long destringid) {}
PRE_SYSCALL(keyctl)(long cmd, long arg2, long arg3, long arg4, long arg5) {}
-POST_SYSCALL(keyctl)(long res, long cmd, long arg2, long arg3, long arg4,
- long arg5) {}
+POST_SYSCALL(keyctl)
+(long res, long cmd, long arg2, long arg3, long arg4, long arg5) {}
PRE_SYSCALL(ioprio_set)(long which, long who, long ioprio) {}
@@ -2387,50 +2610,62 @@ PRE_SYSCALL(set_mempolicy)(long mode, void *nmask, long maxnode) {}
POST_SYSCALL(set_mempolicy)(long res, long mode, void *nmask, long maxnode) {
if (res >= 0) {
- if (nmask) POST_WRITE(nmask, sizeof(long));
+ if (nmask)
+ POST_WRITE(nmask, sizeof(long));
}
}
-PRE_SYSCALL(migrate_pages)(long pid, long maxnode, const void *from,
- const void *to) {
- if (from) PRE_READ(from, sizeof(long));
- if (to) PRE_READ(to, sizeof(long));
+PRE_SYSCALL(migrate_pages)
+(long pid, long maxnode, const void *from, const void *to) {
+ if (from)
+ PRE_READ(from, sizeof(long));
+ if (to)
+ PRE_READ(to, sizeof(long));
}
-POST_SYSCALL(migrate_pages)(long res, long pid, long maxnode, const void *from,
- const void *to) {}
+POST_SYSCALL(migrate_pages)
+(long res, long pid, long maxnode, const void *from, const void *to) {}
-PRE_SYSCALL(move_pages)(long pid, long nr_pages, const void **pages,
- const int *nodes, int *status, long flags) {
- if (pages) PRE_READ(pages, nr_pages * sizeof(*pages));
- if (nodes) PRE_READ(nodes, nr_pages * sizeof(*nodes));
+PRE_SYSCALL(move_pages)
+(long pid, long nr_pages, const void **pages, const int *nodes, int *status,
+ long flags) {
+ if (pages)
+ PRE_READ(pages, nr_pages * sizeof(*pages));
+ if (nodes)
+ PRE_READ(nodes, nr_pages * sizeof(*nodes));
}
-POST_SYSCALL(move_pages)(long res, long pid, long nr_pages, const void **pages,
- const int *nodes, int *status, long flags) {
+POST_SYSCALL(move_pages)
+(long res, long pid, long nr_pages, const void **pages, const int *nodes,
+ int *status, long flags) {
if (res >= 0) {
- if (status) POST_WRITE(status, nr_pages * sizeof(*status));
+ if (status)
+ POST_WRITE(status, nr_pages * sizeof(*status));
}
}
-PRE_SYSCALL(mbind)(long start, long len, long mode, void *nmask, long maxnode,
- long flags) {}
+PRE_SYSCALL(mbind)
+(long start, long len, long mode, void *nmask, long maxnode, long flags) {}
-POST_SYSCALL(mbind)(long res, long start, long len, long mode, void *nmask,
- long maxnode, long flags) {
+POST_SYSCALL(mbind)
+(long res, long start, long len, long mode, void *nmask, long maxnode,
+ long flags) {
if (res >= 0) {
- if (nmask) POST_WRITE(nmask, sizeof(long));
+ if (nmask)
+ POST_WRITE(nmask, sizeof(long));
}
}
-PRE_SYSCALL(get_mempolicy)(void *policy, void *nmask, long maxnode, long addr,
- long flags) {}
+PRE_SYSCALL(get_mempolicy)
+(void *policy, void *nmask, long maxnode, long addr, long flags) {}
-POST_SYSCALL(get_mempolicy)(long res, void *policy, void *nmask, long maxnode,
- long addr, long flags) {
+POST_SYSCALL(get_mempolicy)
+(long res, void *policy, void *nmask, long maxnode, long addr, long flags) {
if (res >= 0) {
- if (policy) POST_WRITE(policy, sizeof(int));
- if (nmask) POST_WRITE(nmask, sizeof(long));
+ if (policy)
+ POST_WRITE(policy, sizeof(int));
+ if (nmask)
+ POST_WRITE(nmask, sizeof(long));
}
}
@@ -2447,8 +2682,8 @@ PRE_SYSCALL(inotify_add_watch)(long fd, const void *path, long mask) {
PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
}
-POST_SYSCALL(inotify_add_watch)(long res, long fd, const void *path,
- long mask) {}
+POST_SYSCALL(inotify_add_watch)
+(long res, long fd, const void *path, long mask) {}
PRE_SYSCALL(inotify_rm_watch)(long fd, long wd) {}
@@ -2458,8 +2693,10 @@ PRE_SYSCALL(spu_run)(long fd, void *unpc, void *ustatus) {}
POST_SYSCALL(spu_run)(long res, long fd, unsigned *unpc, unsigned *ustatus) {
if (res >= 0) {
- if (unpc) POST_WRITE(unpc, sizeof(*unpc));
- if (ustatus) POST_WRITE(ustatus, sizeof(*ustatus));
+ if (unpc)
+ POST_WRITE(unpc, sizeof(*unpc));
+ if (ustatus)
+ POST_WRITE(ustatus, sizeof(*ustatus));
}
}
@@ -2468,8 +2705,8 @@ PRE_SYSCALL(spu_create)(const void *name, long flags, long mode, long fd) {
PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
}
-POST_SYSCALL(spu_create)(long res, const void *name, long flags, long mode,
- long fd) {}
+POST_SYSCALL(spu_create)
+(long res, const void *name, long flags, long mode, long fd) {}
PRE_SYSCALL(mknodat)(long dfd, const void *filename, long mode, long dev) {
if (filename)
@@ -2477,8 +2714,8 @@ PRE_SYSCALL(mknodat)(long dfd, const void *filename, long mode, long dev) {
__sanitizer::internal_strlen((const char *)filename) + 1);
}
-POST_SYSCALL(mknodat)(long res, long dfd, const void *filename, long mode,
- long dev) {}
+POST_SYSCALL(mknodat)
+(long res, long dfd, const void *filename, long mode, long dev) {}
PRE_SYSCALL(mkdirat)(long dfd, const void *pathname, long mode) {
if (pathname)
@@ -2503,30 +2740,33 @@ PRE_SYSCALL(symlinkat)(const void *oldname, long newdfd, const void *newname) {
PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);
}
-POST_SYSCALL(symlinkat)(long res, const void *oldname, long newdfd,
- const void *newname) {}
+POST_SYSCALL(symlinkat)
+(long res, const void *oldname, long newdfd, const void *newname) {}
-PRE_SYSCALL(linkat)(long olddfd, const void *oldname, long newdfd,
- const void *newname, long flags) {
+PRE_SYSCALL(linkat)
+(long olddfd, const void *oldname, long newdfd, const void *newname,
+ long flags) {
if (oldname)
PRE_READ(oldname, __sanitizer::internal_strlen((const char *)oldname) + 1);
if (newname)
PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);
}
-POST_SYSCALL(linkat)(long res, long olddfd, const void *oldname, long newdfd,
- const void *newname, long flags) {}
+POST_SYSCALL(linkat)
+(long res, long olddfd, const void *oldname, long newdfd, const void *newname,
+ long flags) {}
-PRE_SYSCALL(renameat)(long olddfd, const void *oldname, long newdfd,
- const void *newname) {
+PRE_SYSCALL(renameat)
+(long olddfd, const void *oldname, long newdfd, const void *newname) {
if (oldname)
PRE_READ(oldname, __sanitizer::internal_strlen((const char *)oldname) + 1);
if (newname)
PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);
}
-POST_SYSCALL(renameat)(long res, long olddfd, const void *oldname, long newdfd,
- const void *newname) {}
+POST_SYSCALL(renameat)
+(long res, long olddfd, const void *oldname, long newdfd, const void *newname) {
+}
PRE_SYSCALL(futimesat)(long dfd, const void *filename, void *utimes) {
if (filename)
@@ -2534,10 +2774,11 @@ PRE_SYSCALL(futimesat)(long dfd, const void *filename, void *utimes) {
__sanitizer::internal_strlen((const char *)filename) + 1);
}
-POST_SYSCALL(futimesat)(long res, long dfd, const void *filename,
- void *utimes) {
+POST_SYSCALL(futimesat)
+(long res, long dfd, const void *filename, void *utimes) {
if (res >= 0) {
- if (utimes) POST_WRITE(utimes, timeval_sz);
+ if (utimes)
+ POST_WRITE(utimes, timeval_sz);
}
}
@@ -2557,15 +2798,15 @@ PRE_SYSCALL(fchmodat)(long dfd, const void *filename, long mode) {
POST_SYSCALL(fchmodat)(long res, long dfd, const void *filename, long mode) {}
-PRE_SYSCALL(fchownat)(long dfd, const void *filename, long user, long group,
- long flag) {
+PRE_SYSCALL(fchownat)
+(long dfd, const void *filename, long user, long group, long flag) {
if (filename)
PRE_READ(filename,
__sanitizer::internal_strlen((const char *)filename) + 1);
}
-POST_SYSCALL(fchownat)(long res, long dfd, const void *filename, long user,
- long group, long flag) {}
+POST_SYSCALL(fchownat)
+(long res, long dfd, const void *filename, long user, long group, long flag) {}
PRE_SYSCALL(openat)(long dfd, const void *filename, long flags, long mode) {
if (filename)
@@ -2573,34 +2814,36 @@ PRE_SYSCALL(openat)(long dfd, const void *filename, long flags, long mode) {
__sanitizer::internal_strlen((const char *)filename) + 1);
}
-POST_SYSCALL(openat)(long res, long dfd, const void *filename, long flags,
- long mode) {}
+POST_SYSCALL(openat)
+(long res, long dfd, const void *filename, long flags, long mode) {}
-PRE_SYSCALL(newfstatat)(long dfd, const void *filename, void *statbuf,
- long flag) {
+PRE_SYSCALL(newfstatat)
+(long dfd, const void *filename, void *statbuf, long flag) {
if (filename)
PRE_READ(filename,
__sanitizer::internal_strlen((const char *)filename) + 1);
}
-POST_SYSCALL(newfstatat)(long res, long dfd, const void *filename,
- void *statbuf, long flag) {
+POST_SYSCALL(newfstatat)
+(long res, long dfd, const void *filename, void *statbuf, long flag) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct_kernel_stat_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct_kernel_stat_sz);
}
}
-PRE_SYSCALL(fstatat64)(long dfd, const void *filename, void *statbuf,
- long flag) {
+PRE_SYSCALL(fstatat64)
+(long dfd, const void *filename, void *statbuf, long flag) {
if (filename)
PRE_READ(filename,
__sanitizer::internal_strlen((const char *)filename) + 1);
}
-POST_SYSCALL(fstatat64)(long res, long dfd, const void *filename, void *statbuf,
- long flag) {
+POST_SYSCALL(fstatat64)
+(long res, long dfd, const void *filename, void *statbuf, long flag) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct_kernel_stat64_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct_kernel_stat64_sz);
}
}
@@ -2609,25 +2852,26 @@ PRE_SYSCALL(readlinkat)(long dfd, const void *path, void *buf, long bufsiz) {
PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
}
-POST_SYSCALL(readlinkat)(long res, long dfd, const void *path, void *buf,
- long bufsiz) {
+POST_SYSCALL(readlinkat)
+(long res, long dfd, const void *path, void *buf, long bufsiz) {
if (res >= 0) {
if (buf)
POST_WRITE(buf, __sanitizer::internal_strlen((const char *)buf) + 1);
}
}
-PRE_SYSCALL(utimensat)(long dfd, const void *filename, void *utimes,
- long flags) {
+PRE_SYSCALL(utimensat)
+(long dfd, const void *filename, void *utimes, long flags) {
if (filename)
PRE_READ(filename,
__sanitizer::internal_strlen((const char *)filename) + 1);
}
-POST_SYSCALL(utimensat)(long res, long dfd, const void *filename, void *utimes,
- long flags) {
+POST_SYSCALL(utimensat)
+(long res, long dfd, const void *filename, void *utimes, long flags) {
if (res >= 0) {
- if (utimes) POST_WRITE(utimes, struct_timespec_sz);
+ if (utimes)
+ POST_WRITE(utimes, struct_timespec_sz);
}
}
@@ -2635,24 +2879,28 @@ PRE_SYSCALL(unshare)(long unshare_flags) {}
POST_SYSCALL(unshare)(long res, long unshare_flags) {}
-PRE_SYSCALL(splice)(long fd_in, void *off_in, long fd_out, void *off_out,
- long len, long flags) {}
+PRE_SYSCALL(splice)
+(long fd_in, void *off_in, long fd_out, void *off_out, long len, long flags) {}
-POST_SYSCALL(splice)(long res, long fd_in, void *off_in, long fd_out,
- void *off_out, long len, long flags) {
+POST_SYSCALL(splice)
+(long res, long fd_in, void *off_in, long fd_out, void *off_out, long len,
+ long flags) {
if (res >= 0) {
- if (off_in) POST_WRITE(off_in, sizeof(long long));
- if (off_out) POST_WRITE(off_out, sizeof(long long));
+ if (off_in)
+ POST_WRITE(off_in, sizeof(long long));
+ if (off_out)
+ POST_WRITE(off_out, sizeof(long long));
}
}
-PRE_SYSCALL(vmsplice)(long fd, const __sanitizer_iovec *iov, long nr_segs,
- long flags) {}
+PRE_SYSCALL(vmsplice)
+(long fd, const __sanitizer_iovec *iov, long nr_segs, long flags) {}
-POST_SYSCALL(vmsplice)(long res, long fd, const __sanitizer_iovec *iov,
- long nr_segs, long flags) {
+POST_SYSCALL(vmsplice)
+(long res, long fd, const __sanitizer_iovec *iov, long nr_segs, long flags) {
if (res >= 0) {
- if (iov) kernel_read_iovec(iov, nr_segs, res);
+ if (iov)
+ kernel_read_iovec(iov, nr_segs, res);
}
}
@@ -2662,8 +2910,8 @@ POST_SYSCALL(tee)(long res, long fdin, long fdout, long len, long flags) {}
PRE_SYSCALL(get_robust_list)(long pid, void *head_ptr, void *len_ptr) {}
-POST_SYSCALL(get_robust_list)(long res, long pid, void *head_ptr,
- void *len_ptr) {}
+POST_SYSCALL(get_robust_list)
+(long res, long pid, void *head_ptr, void *len_ptr) {}
PRE_SYSCALL(set_robust_list)(void *head, long len) {}
@@ -2673,27 +2921,31 @@ PRE_SYSCALL(getcpu)(void *cpu, void *node, void *cache) {}
POST_SYSCALL(getcpu)(long res, void *cpu, void *node, void *cache) {
if (res >= 0) {
- if (cpu) POST_WRITE(cpu, sizeof(unsigned));
- if (node) POST_WRITE(node, sizeof(unsigned));
+ if (cpu)
+ POST_WRITE(cpu, sizeof(unsigned));
+ if (node)
+ POST_WRITE(node, sizeof(unsigned));
// The third argument to this system call is nowadays unused.
}
}
PRE_SYSCALL(signalfd)(long ufd, void *user_mask, long sizemask) {}
-POST_SYSCALL(signalfd)(long res, long ufd, kernel_sigset_t *user_mask,
- long sizemask) {
+POST_SYSCALL(signalfd)
+(long res, long ufd, kernel_sigset_t *user_mask, long sizemask) {
if (res >= 0) {
- if (user_mask) POST_WRITE(user_mask, sizemask);
+ if (user_mask)
+ POST_WRITE(user_mask, sizemask);
}
}
PRE_SYSCALL(signalfd4)(long ufd, void *user_mask, long sizemask, long flags) {}
-POST_SYSCALL(signalfd4)(long res, long ufd, kernel_sigset_t *user_mask,
- long sizemask, long flags) {
+POST_SYSCALL(signalfd4)
+(long res, long ufd, kernel_sigset_t *user_mask, long sizemask, long flags) {
if (res >= 0) {
- if (user_mask) POST_WRITE(user_mask, sizemask);
+ if (user_mask)
+ POST_WRITE(user_mask, sizemask);
}
}
@@ -2701,15 +2953,17 @@ PRE_SYSCALL(timerfd_create)(long clockid, long flags) {}
POST_SYSCALL(timerfd_create)(long res, long clockid, long flags) {}
-PRE_SYSCALL(timerfd_settime)(long ufd, long flags, const void *utmr,
- void *otmr) {
- if (utmr) PRE_READ(utmr, struct_itimerspec_sz);
+PRE_SYSCALL(timerfd_settime)
+(long ufd, long flags, const void *utmr, void *otmr) {
+ if (utmr)
+ PRE_READ(utmr, struct_itimerspec_sz);
}
-POST_SYSCALL(timerfd_settime)(long res, long ufd, long flags, const void *utmr,
- void *otmr) {
+POST_SYSCALL(timerfd_settime)
+(long res, long ufd, long flags, const void *utmr, void *otmr) {
if (res >= 0) {
- if (otmr) POST_WRITE(otmr, struct_itimerspec_sz);
+ if (otmr)
+ POST_WRITE(otmr, struct_itimerspec_sz);
}
}
@@ -2717,7 +2971,8 @@ PRE_SYSCALL(timerfd_gettime)(long ufd, void *otmr) {}
POST_SYSCALL(timerfd_gettime)(long res, long ufd, void *otmr) {
if (res >= 0) {
- if (otmr) POST_WRITE(otmr, struct_itimerspec_sz);
+ if (otmr)
+ POST_WRITE(otmr, struct_itimerspec_sz);
}
}
@@ -2735,33 +2990,42 @@ POST_SYSCALL(old_readdir)(long res, long arg0, void *arg1, long arg2) {
// Missing definition of 'struct old_linux_dirent'.
}
-PRE_SYSCALL(pselect6)(long arg0, __sanitizer___kernel_fd_set *arg1,
- __sanitizer___kernel_fd_set *arg2,
- __sanitizer___kernel_fd_set *arg3, void *arg4,
- void *arg5) {}
+PRE_SYSCALL(pselect6)
+(long arg0, __sanitizer___kernel_fd_set *arg1,
+ __sanitizer___kernel_fd_set *arg2, __sanitizer___kernel_fd_set *arg3,
+ void *arg4, void *arg5) {}
-POST_SYSCALL(pselect6)(long res, long arg0, __sanitizer___kernel_fd_set *arg1,
- __sanitizer___kernel_fd_set *arg2,
- __sanitizer___kernel_fd_set *arg3, void *arg4,
- void *arg5) {
+POST_SYSCALL(pselect6)
+(long res, long arg0, __sanitizer___kernel_fd_set *arg1,
+ __sanitizer___kernel_fd_set *arg2, __sanitizer___kernel_fd_set *arg3,
+ void *arg4, void *arg5) {
if (res >= 0) {
- if (arg1) POST_WRITE(arg1, sizeof(*arg1));
- if (arg2) POST_WRITE(arg2, sizeof(*arg2));
- if (arg3) POST_WRITE(arg3, sizeof(*arg3));
- if (arg4) POST_WRITE(arg4, struct_timespec_sz);
+ if (arg1)
+ POST_WRITE(arg1, sizeof(*arg1));
+ if (arg2)
+ POST_WRITE(arg2, sizeof(*arg2));
+ if (arg3)
+ POST_WRITE(arg3, sizeof(*arg3));
+ if (arg4)
+ POST_WRITE(arg4, struct_timespec_sz);
}
}
-PRE_SYSCALL(ppoll)(__sanitizer_pollfd *arg0, long arg1, void *arg2,
- const kernel_sigset_t *arg3, long arg4) {
- if (arg3) PRE_READ(arg3, arg4);
+PRE_SYSCALL(ppoll)
+(__sanitizer_pollfd *arg0, long arg1, void *arg2, const kernel_sigset_t *arg3,
+ long arg4) {
+ if (arg3)
+ PRE_READ(arg3, arg4);
}
-POST_SYSCALL(ppoll)(long res, __sanitizer_pollfd *arg0, long arg1, void *arg2,
- const void *arg3, long arg4) {
+POST_SYSCALL(ppoll)
+(long res, __sanitizer_pollfd *arg0, long arg1, void *arg2, const void *arg3,
+ long arg4) {
if (res >= 0) {
- if (arg0) POST_WRITE(arg0, sizeof(*arg0));
- if (arg2) POST_WRITE(arg2, struct_timespec_sz);
+ if (arg0)
+ POST_WRITE(arg0, sizeof(*arg0));
+ if (arg2)
+ POST_WRITE(arg2, struct_timespec_sz);
}
}
@@ -2769,81 +3033,79 @@ PRE_SYSCALL(syncfs)(long fd) {}
POST_SYSCALL(syncfs)(long res, long fd) {}
-PRE_SYSCALL(perf_event_open)(__sanitizer_perf_event_attr *attr_uptr, long pid,
- long cpu, long group_fd, long flags) {
- if (attr_uptr) PRE_READ(attr_uptr, attr_uptr->size);
+PRE_SYSCALL(perf_event_open)
+(__sanitizer_perf_event_attr *attr_uptr, long pid, long cpu, long group_fd,
+ long flags) {
+ if (attr_uptr)
+ PRE_READ(attr_uptr, attr_uptr->size);
}
-POST_SYSCALL(perf_event_open)(long res, __sanitizer_perf_event_attr *attr_uptr,
- long pid, long cpu, long group_fd, long flags) {}
+POST_SYSCALL(perf_event_open)
+(long res, __sanitizer_perf_event_attr *attr_uptr, long pid, long cpu,
+ long group_fd, long flags) {}
-PRE_SYSCALL(mmap_pgoff)(long addr, long len, long prot, long flags, long fd,
- long pgoff) {}
+PRE_SYSCALL(mmap_pgoff)
+(long addr, long len, long prot, long flags, long fd, long pgoff) {}
-POST_SYSCALL(mmap_pgoff)(long res, long addr, long len, long prot, long flags,
- long fd, long pgoff) {}
+POST_SYSCALL(mmap_pgoff)
+(long res, long addr, long len, long prot, long flags, long fd, long pgoff) {}
PRE_SYSCALL(old_mmap)(void *arg) {}
POST_SYSCALL(old_mmap)(long res, void *arg) {}
-PRE_SYSCALL(name_to_handle_at)(long dfd, const void *name, void *handle,
- void *mnt_id, long flag) {}
+PRE_SYSCALL(name_to_handle_at)
+(long dfd, const void *name, void *handle, void *mnt_id, long flag) {}
-POST_SYSCALL(name_to_handle_at)(long res, long dfd, const void *name,
- void *handle, void *mnt_id, long flag) {}
+POST_SYSCALL(name_to_handle_at)
+(long res, long dfd, const void *name, void *handle, void *mnt_id, long flag) {}
PRE_SYSCALL(open_by_handle_at)(long mountdirfd, void *handle, long flags) {}
-POST_SYSCALL(open_by_handle_at)(long res, long mountdirfd, void *handle,
- long flags) {}
+POST_SYSCALL(open_by_handle_at)
+(long res, long mountdirfd, void *handle, long flags) {}
PRE_SYSCALL(setns)(long fd, long nstype) {}
POST_SYSCALL(setns)(long res, long fd, long nstype) {}
-PRE_SYSCALL(process_vm_readv)(long pid, const __sanitizer_iovec *lvec,
- long liovcnt, const void *rvec, long riovcnt,
- long flags) {}
+PRE_SYSCALL(process_vm_readv)
+(long pid, const __sanitizer_iovec *lvec, long liovcnt, const void *rvec,
+ long riovcnt, long flags) {}
-POST_SYSCALL(process_vm_readv)(long res, long pid,
- const __sanitizer_iovec *lvec, long liovcnt,
- const void *rvec, long riovcnt, long flags) {
+POST_SYSCALL(process_vm_readv)
+(long res, long pid, const __sanitizer_iovec *lvec, long liovcnt,
+ const void *rvec, long riovcnt, long flags) {
if (res >= 0) {
- if (lvec) kernel_write_iovec(lvec, liovcnt, res);
+ if (lvec)
+ kernel_write_iovec(lvec, liovcnt, res);
}
}
-PRE_SYSCALL(process_vm_writev)(long pid, const __sanitizer_iovec *lvec,
- long liovcnt, const void *rvec, long riovcnt,
- long flags) {}
+PRE_SYSCALL(process_vm_writev)
+(long pid, const __sanitizer_iovec *lvec, long liovcnt, const void *rvec,
+ long riovcnt, long flags) {}
-POST_SYSCALL(process_vm_writev)(long res, long pid,
- const __sanitizer_iovec *lvec, long liovcnt,
- const void *rvec, long riovcnt, long flags) {
+POST_SYSCALL(process_vm_writev)
+(long res, long pid, const __sanitizer_iovec *lvec, long liovcnt,
+ const void *rvec, long riovcnt, long flags) {
if (res >= 0) {
- if (lvec) kernel_read_iovec(lvec, liovcnt, res);
+ if (lvec)
+ kernel_read_iovec(lvec, liovcnt, res);
}
}
-PRE_SYSCALL(fork)() {
- COMMON_SYSCALL_PRE_FORK();
-}
+PRE_SYSCALL(fork)() { COMMON_SYSCALL_PRE_FORK(); }
-POST_SYSCALL(fork)(long res) {
- COMMON_SYSCALL_POST_FORK(res);
-}
+POST_SYSCALL(fork)(long res) { COMMON_SYSCALL_POST_FORK(res); }
-PRE_SYSCALL(vfork)() {
- COMMON_SYSCALL_PRE_FORK();
-}
+PRE_SYSCALL(vfork)() { COMMON_SYSCALL_PRE_FORK(); }
-POST_SYSCALL(vfork)(long res) {
- COMMON_SYSCALL_POST_FORK(res);
-}
+POST_SYSCALL(vfork)(long res) { COMMON_SYSCALL_POST_FORK(res); }
-PRE_SYSCALL(sigaction)(long signum, const __sanitizer_kernel_sigaction_t *act,
- __sanitizer_kernel_sigaction_t *oldact) {
+PRE_SYSCALL(sigaction)
+(long signum, const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact) {
if (act) {
PRE_READ(&act->sigaction, sizeof(act->sigaction));
PRE_READ(&act->sa_flags, sizeof(act->sa_flags));
@@ -2851,15 +3113,16 @@ PRE_SYSCALL(sigaction)(long signum, const __sanitizer_kernel_sigaction_t *act,
}
}
-POST_SYSCALL(sigaction)(long res, long signum,
- const __sanitizer_kernel_sigaction_t *act,
- __sanitizer_kernel_sigaction_t *oldact) {
- if (res >= 0 && oldact) POST_WRITE(oldact, sizeof(*oldact));
+POST_SYSCALL(sigaction)
+(long res, long signum, const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact) {
+ if (res >= 0 && oldact)
+ POST_WRITE(oldact, sizeof(*oldact));
}
-PRE_SYSCALL(rt_sigaction)(long signum,
- const __sanitizer_kernel_sigaction_t *act,
- __sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {
+PRE_SYSCALL(rt_sigaction)
+(long signum, const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {
if (act) {
PRE_READ(&act->sigaction, sizeof(act->sigaction));
PRE_READ(&act->sa_flags, sizeof(act->sa_flags));
@@ -2867,9 +3130,9 @@ PRE_SYSCALL(rt_sigaction)(long signum,
}
}
-POST_SYSCALL(rt_sigaction)(long res, long signum,
- const __sanitizer_kernel_sigaction_t *act,
- __sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {
+POST_SYSCALL(rt_sigaction)
+(long res, long signum, const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {
if (res >= 0 && oldact) {
SIZE_T oldact_sz = ((char *)&oldact->sa_mask) - ((char *)oldact) + sz;
POST_WRITE(oldact, oldact_sz);
@@ -2906,11 +3169,11 @@ POST_SYSCALL(sigaltstack)(long res, void *ss, void *oss) {
}
} // extern "C"
-#undef PRE_SYSCALL
-#undef PRE_READ
-#undef PRE_WRITE
-#undef POST_SYSCALL
-#undef POST_READ
-#undef POST_WRITE
+# undef PRE_SYSCALL
+# undef PRE_READ
+# undef PRE_WRITE
+# undef POST_SYSCALL
+# undef POST_READ
+# undef POST_WRITE
#endif // SANITIZER_LINUX
lib/tsan/sanitizer_common/sanitizer_coverage_interface.inc
@@ -27,6 +27,16 @@ INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_gep)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_guard)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_guard_init)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_indir)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_load1)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_load2)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_load4)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_load8)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_load16)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_store1)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_store2)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_store4)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_store8)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_store16)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_switch)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_8bit_counters_init)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_bool_flag_init)
lib/tsan/sanitizer_common/sanitizer_coverage_win_dll_thunk.cpp
@@ -0,0 +1,20 @@
+//===-- sanitizer_coverage_win_dll_thunk.cpp ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a family of thunks that should be statically linked into
+// the DLLs that have instrumentation in order to delegate the calls to the
+// shared runtime that lives in the main binary.
+// See https://github.com/google/sanitizers/issues/209 for the details.
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DLL_THUNK
+#include "sanitizer_win_dll_thunk.h"
+// Sanitizer Coverage interface functions.
+#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "sanitizer_coverage_interface.inc"
+#endif // SANITIZER_DLL_THUNK
lib/tsan/sanitizer_common/sanitizer_coverage_win_dynamic_runtime_thunk.cpp
@@ -0,0 +1,26 @@
+//===-- sanitizer_coverage_win_dynamic_runtime_thunk.cpp ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines things that need to be present in the application modules
+// to interact with Sanitizer Coverage, when it is included in a dll.
+//
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK
+#define SANITIZER_IMPORT_INTERFACE 1
+#include "sanitizer_win_defs.h"
+// Define weak alias for all weak functions imported from sanitizer coverage.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name)
+#include "sanitizer_coverage_interface.inc"
+#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK
+
+namespace __sanitizer {
+// Add one, otherwise unused, external symbol to this object file so that the
+// Visual C++ linker includes it and reads the .drective section.
+void ForceWholeArchiveIncludeForSanCov() {}
+}
lib/tsan/sanitizer_common/sanitizer_coverage_win_weak_interception.cpp
@@ -0,0 +1,23 @@
+//===-- sanitizer_coverage_win_weak_interception.cpp ----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This module should be included in Sanitizer Coverage when it implemented as a
+// shared library on Windows (dll), in order to delegate the calls of weak
+// functions to the implementation in the main executable when a strong
+// definition is provided.
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DYNAMIC
+#include "sanitizer_win_weak_interception.h"
+#include "sanitizer_interface_internal.h"
+#include "sancov_flags.h"
+// Check if strong definitions for weak functions are present in the main
+// executable. If that is the case, override dll functions to point to strong
+// implementations.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "sanitizer_coverage_interface.inc"
+#endif // SANITIZER_DYNAMIC
lib/tsan/sanitizer_common/sanitizer_deadlock_detector.h
@@ -293,7 +293,7 @@ class DeadlockDetector {
}
// Returns true iff dtls is empty (no locks are currently held) and we can
- // add the node to the currently held locks w/o chanding the global state.
+ // add the node to the currently held locks w/o changing the global state.
// This operation is thread-safe as it only touches the dtls.
bool onFirstLock(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
if (!dtls->empty()) return false;
lib/tsan/sanitizer_common/sanitizer_dense_map.h
@@ -0,0 +1,705 @@
+//===- sanitizer_dense_map.h - Dense probed hash table ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This is fork of llvm/ADT/DenseMap.h class with the following changes:
+// * Use mmap to allocate.
+// * No iterators.
+// * Does not shrink.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_DENSE_MAP_H
+#define SANITIZER_DENSE_MAP_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_dense_map_info.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_type_traits.h"
+
+namespace __sanitizer {
+
+template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
+ typename BucketT>
+class DenseMapBase {
+ public:
+ using size_type = unsigned;
+ using key_type = KeyT;
+ using mapped_type = ValueT;
+ using value_type = BucketT;
+
+ WARN_UNUSED_RESULT bool empty() const { return getNumEntries() == 0; }
+ unsigned size() const { return getNumEntries(); }
+
+ /// Grow the densemap so that it can contain at least \p NumEntries items
+ /// before resizing again.
+ void reserve(size_type NumEntries) {
+ auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
+ if (NumBuckets > getNumBuckets())
+ grow(NumBuckets);
+ }
+
+ void clear() {
+ if (getNumEntries() == 0 && getNumTombstones() == 0)
+ return;
+
+ const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+ if (__sanitizer::is_trivially_destructible<ValueT>::value) {
+ // Use a simpler loop when values don't need destruction.
+ for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
+ P->getFirst() = EmptyKey;
+ } else {
+ unsigned NumEntries = getNumEntries();
+ for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+ if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
+ if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
+ P->getSecond().~ValueT();
+ --NumEntries;
+ }
+ P->getFirst() = EmptyKey;
+ }
+ }
+ CHECK_EQ(NumEntries, 0);
+ }
+ setNumEntries(0);
+ setNumTombstones(0);
+ }
+
+ /// Return 1 if the specified key is in the map, 0 otherwise.
+ size_type count(const KeyT &Key) const {
+ const BucketT *TheBucket;
+ return LookupBucketFor(Key, TheBucket) ? 1 : 0;
+ }
+
+ value_type *find(const KeyT &Key) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return TheBucket;
+ return nullptr;
+ }
+ const value_type *find(const KeyT &Key) const {
+ const BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return TheBucket;
+ return nullptr;
+ }
+
+ /// Alternate version of find() which allows a different, and possibly
+ /// less expensive, key type.
+ /// The DenseMapInfo is responsible for supplying methods
+ /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
+ /// type used.
+ template <class LookupKeyT>
+ value_type *find_as(const LookupKeyT &Key) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return TheBucket;
+ return nullptr;
+ }
+ template <class LookupKeyT>
+ const value_type *find_as(const LookupKeyT &Key) const {
+ const BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return TheBucket;
+ return nullptr;
+ }
+
+ /// lookup - Return the entry for the specified key, or a default
+ /// constructed value if no such entry exists.
+ ValueT lookup(const KeyT &Key) const {
+ const BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return TheBucket->getSecond();
+ return ValueT();
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // If the key is already in the map, it returns false and doesn't update the
+ // value.
+ detail::DenseMapPair<value_type *, bool> insert(const value_type &KV) {
+ return try_emplace(KV.first, KV.second);
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // If the key is already in the map, it returns false and doesn't update the
+ // value.
+ detail::DenseMapPair<value_type *, bool> insert(value_type &&KV) {
+ return try_emplace(__sanitizer::move(KV.first),
+ __sanitizer::move(KV.second));
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // The value is constructed in-place if the key is not in the map, otherwise
+ // it is not moved.
+ template <typename... Ts>
+ detail::DenseMapPair<value_type *, bool> try_emplace(KeyT &&Key,
+ Ts &&...Args) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return {TheBucket, false}; // Already in map.
+
+ // Otherwise, insert the new element.
+ TheBucket = InsertIntoBucket(TheBucket, __sanitizer::move(Key),
+ __sanitizer::forward<Ts>(Args)...);
+ return {TheBucket, true};
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // The value is constructed in-place if the key is not in the map, otherwise
+ // it is not moved.
+ template <typename... Ts>
+ detail::DenseMapPair<value_type *, bool> try_emplace(const KeyT &Key,
+ Ts &&...Args) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return {TheBucket, false}; // Already in map.
+
+ // Otherwise, insert the new element.
+ TheBucket =
+ InsertIntoBucket(TheBucket, Key, __sanitizer::forward<Ts>(Args)...);
+ return {TheBucket, true};
+ }
+
+ /// Alternate version of insert() which allows a different, and possibly
+ /// less expensive, key type.
+ /// The DenseMapInfo is responsible for supplying methods
+ /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
+ /// type used.
+ template <typename LookupKeyT>
+ detail::DenseMapPair<value_type *, bool> insert_as(value_type &&KV,
+ const LookupKeyT &Val) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Val, TheBucket))
+ return {TheBucket, false}; // Already in map.
+
+ // Otherwise, insert the new element.
+ TheBucket =
+ InsertIntoBucketWithLookup(TheBucket, __sanitizer::move(KV.first),
+ __sanitizer::move(KV.second), Val);
+ return {TheBucket, true};
+ }
+
+ bool erase(const KeyT &Val) {
+ BucketT *TheBucket;
+ if (!LookupBucketFor(Val, TheBucket))
+ return false; // not in map.
+
+ TheBucket->getSecond().~ValueT();
+ TheBucket->getFirst() = getTombstoneKey();
+ decrementNumEntries();
+ incrementNumTombstones();
+ return true;
+ }
+
+ void erase(value_type *I) {
+ CHECK_NE(I, nullptr);
+ BucketT *TheBucket = &*I;
+ TheBucket->getSecond().~ValueT();
+ TheBucket->getFirst() = getTombstoneKey();
+ decrementNumEntries();
+ incrementNumTombstones();
+ }
+
+ value_type &FindAndConstruct(const KeyT &Key) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return *TheBucket;
+
+ return *InsertIntoBucket(TheBucket, Key);
+ }
+
+ ValueT &operator[](const KeyT &Key) { return FindAndConstruct(Key).second; }
+
+ value_type &FindAndConstruct(KeyT &&Key) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return *TheBucket;
+
+ return *InsertIntoBucket(TheBucket, __sanitizer::move(Key));
+ }
+
+ ValueT &operator[](KeyT &&Key) {
+ return FindAndConstruct(__sanitizer::move(Key)).second;
+ }
+
+ /// Iterate over active entries of the container.
+ ///
+ /// Function can return fast to stop the process.
+ template <class Fn>
+ void forEach(Fn fn) {
+ const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+ for (auto *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+ const KeyT K = P->getFirst();
+ if (!KeyInfoT::isEqual(K, EmptyKey) &&
+ !KeyInfoT::isEqual(K, TombstoneKey)) {
+ if (!fn(*P))
+ return;
+ }
+ }
+ }
+
+ template <class Fn>
+ void forEach(Fn fn) const {
+ const_cast<DenseMapBase *>(this)->forEach(
+ [&](const value_type &KV) { return fn(KV); });
+ }
+
+ protected:
+ DenseMapBase() = default;
+
+ void destroyAll() {
+ if (getNumBuckets() == 0) // Nothing to do.
+ return;
+
+ const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+ for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+ if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
+ P->getSecond().~ValueT();
+ P->getFirst().~KeyT();
+ }
+ }
+
+ void initEmpty() {
+ setNumEntries(0);
+ setNumTombstones(0);
+
+ CHECK_EQ((getNumBuckets() & (getNumBuckets() - 1)), 0);
+ const KeyT EmptyKey = getEmptyKey();
+ for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
+ ::new (&B->getFirst()) KeyT(EmptyKey);
+ }
+
+ /// Returns the number of buckets to allocate to ensure that the DenseMap can
+ /// accommodate \p NumEntries without need to grow().
+ unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
+ // Ensure that "NumEntries * 4 < NumBuckets * 3"
+ if (NumEntries == 0)
+ return 0;
+ // +1 is required because of the strict equality.
+ // For example if NumEntries is 48, we need to return 401.
+ return RoundUpToPowerOfTwo((NumEntries * 4 / 3 + 1) + /* NextPowerOf2 */ 1);
+ }
+
+ void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
+ initEmpty();
+
+ // Insert all the old elements.
+ const KeyT EmptyKey = getEmptyKey();
+ const KeyT TombstoneKey = getTombstoneKey();
+ for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
+ if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
+ // Insert the key/value into the new table.
+ BucketT *DestBucket;
+ bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
+ (void)FoundVal; // silence warning.
+ CHECK(!FoundVal);
+ DestBucket->getFirst() = __sanitizer::move(B->getFirst());
+ ::new (&DestBucket->getSecond())
+ ValueT(__sanitizer::move(B->getSecond()));
+ incrementNumEntries();
+
+ // Free the value.
+ B->getSecond().~ValueT();
+ }
+ B->getFirst().~KeyT();
+ }
+ }
+
+ template <typename OtherBaseT>
+ void copyFrom(
+ const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
+ CHECK_NE(&other, this);
+ CHECK_EQ(getNumBuckets(), other.getNumBuckets());
+
+ setNumEntries(other.getNumEntries());
+ setNumTombstones(other.getNumTombstones());
+
+ if (__sanitizer::is_trivially_copyable<KeyT>::value &&
+ __sanitizer::is_trivially_copyable<ValueT>::value)
+ internal_memcpy(reinterpret_cast<void *>(getBuckets()),
+ other.getBuckets(), getNumBuckets() * sizeof(BucketT));
+ else
+ for (uptr i = 0; i < getNumBuckets(); ++i) {
+ ::new (&getBuckets()[i].getFirst())
+ KeyT(other.getBuckets()[i].getFirst());
+ if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
+ !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
+ ::new (&getBuckets()[i].getSecond())
+ ValueT(other.getBuckets()[i].getSecond());
+ }
+ }
+
+ static unsigned getHashValue(const KeyT &Val) {
+ return KeyInfoT::getHashValue(Val);
+ }
+
+ template <typename LookupKeyT>
+ static unsigned getHashValue(const LookupKeyT &Val) {
+ return KeyInfoT::getHashValue(Val);
+ }
+
+ static const KeyT getEmptyKey() { return KeyInfoT::getEmptyKey(); }
+
+ static const KeyT getTombstoneKey() { return KeyInfoT::getTombstoneKey(); }
+
+ private:
+ unsigned getNumEntries() const {
+ return static_cast<const DerivedT *>(this)->getNumEntries();
+ }
+
+ void setNumEntries(unsigned Num) {
+ static_cast<DerivedT *>(this)->setNumEntries(Num);
+ }
+
+ void incrementNumEntries() { setNumEntries(getNumEntries() + 1); }
+
+ void decrementNumEntries() { setNumEntries(getNumEntries() - 1); }
+
+ unsigned getNumTombstones() const {
+ return static_cast<const DerivedT *>(this)->getNumTombstones();
+ }
+
+ void setNumTombstones(unsigned Num) {
+ static_cast<DerivedT *>(this)->setNumTombstones(Num);
+ }
+
+ void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); }
+
+ void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); }
+
+ const BucketT *getBuckets() const {
+ return static_cast<const DerivedT *>(this)->getBuckets();
+ }
+
+ BucketT *getBuckets() { return static_cast<DerivedT *>(this)->getBuckets(); }
+
+ unsigned getNumBuckets() const {
+ return static_cast<const DerivedT *>(this)->getNumBuckets();
+ }
+
+ BucketT *getBucketsEnd() { return getBuckets() + getNumBuckets(); }
+
+ const BucketT *getBucketsEnd() const {
+ return getBuckets() + getNumBuckets();
+ }
+
+ void grow(unsigned AtLeast) { static_cast<DerivedT *>(this)->grow(AtLeast); }
+
+ template <typename KeyArg, typename... ValueArgs>
+ BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
+ ValueArgs &&...Values) {
+ TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
+
+ TheBucket->getFirst() = __sanitizer::forward<KeyArg>(Key);
+ ::new (&TheBucket->getSecond())
+ ValueT(__sanitizer::forward<ValueArgs>(Values)...);
+ return TheBucket;
+ }
+
+ template <typename LookupKeyT>
+ BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
+ ValueT &&Value, LookupKeyT &Lookup) {
+ TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
+
+ TheBucket->getFirst() = __sanitizer::move(Key);
+ ::new (&TheBucket->getSecond()) ValueT(__sanitizer::move(Value));
+ return TheBucket;
+ }
+
+ template <typename LookupKeyT>
+ BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
+ BucketT *TheBucket) {
+ // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
+ // the buckets are empty (meaning that many are filled with tombstones),
+ // grow the table.
+ //
+ // The later case is tricky. For example, if we had one empty bucket with
+ // tons of tombstones, failing lookups (e.g. for insertion) would have to
+ // probe almost the entire table until it found the empty bucket. If the
+ // table completely filled with tombstones, no lookup would ever succeed,
+ // causing infinite loops in lookup.
+ unsigned NewNumEntries = getNumEntries() + 1;
+ unsigned NumBuckets = getNumBuckets();
+ if (UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
+ this->grow(NumBuckets * 2);
+ LookupBucketFor(Lookup, TheBucket);
+ NumBuckets = getNumBuckets();
+ } else if (UNLIKELY(NumBuckets - (NewNumEntries + getNumTombstones()) <=
+ NumBuckets / 8)) {
+ this->grow(NumBuckets);
+ LookupBucketFor(Lookup, TheBucket);
+ }
+ CHECK(TheBucket);
+
+ // Only update the state after we've grown our bucket space appropriately
+ // so that when growing buckets we have self-consistent entry count.
+ incrementNumEntries();
+
+ // If we are writing over a tombstone, remember this.
+ const KeyT EmptyKey = getEmptyKey();
+ if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
+ decrementNumTombstones();
+
+ return TheBucket;
+ }
+
+ /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
+ /// FoundBucket. If the bucket contains the key and a value, this returns
+ /// true, otherwise it returns a bucket with an empty marker or tombstone and
+ /// returns false.
+ template <typename LookupKeyT>
+ bool LookupBucketFor(const LookupKeyT &Val,
+ const BucketT *&FoundBucket) const {
+ const BucketT *BucketsPtr = getBuckets();
+ const unsigned NumBuckets = getNumBuckets();
+
+ if (NumBuckets == 0) {
+ FoundBucket = nullptr;
+ return false;
+ }
+
+ // FoundTombstone - Keep track of whether we find a tombstone while probing.
+ const BucketT *FoundTombstone = nullptr;
+ const KeyT EmptyKey = getEmptyKey();
+ const KeyT TombstoneKey = getTombstoneKey();
+ CHECK(!KeyInfoT::isEqual(Val, EmptyKey));
+ CHECK(!KeyInfoT::isEqual(Val, TombstoneKey));
+
+ unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);
+ unsigned ProbeAmt = 1;
+ while (true) {
+ const BucketT *ThisBucket = BucketsPtr + BucketNo;
+ // Found Val's bucket? If so, return it.
+ if (LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
+ FoundBucket = ThisBucket;
+ return true;
+ }
+
+ // If we found an empty bucket, the key doesn't exist in the set.
+ // Insert it and return the default value.
+ if (LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
+ // If we've already seen a tombstone while probing, fill it in instead
+ // of the empty bucket we eventually probed to.
+ FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
+ return false;
+ }
+
+ // If this is a tombstone, remember it. If Val ends up not in the map, we
+ // prefer to return it than something that would require more probing.
+ if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
+ !FoundTombstone)
+ FoundTombstone = ThisBucket; // Remember the first tombstone found.
+
+ // Otherwise, it's a hash collision or a tombstone, continue quadratic
+ // probing.
+ BucketNo += ProbeAmt++;
+ BucketNo &= (NumBuckets - 1);
+ }
+ }
+
+ template <typename LookupKeyT>
+ bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
+ const BucketT *ConstFoundBucket;
+ bool Result = const_cast<const DenseMapBase *>(this)->LookupBucketFor(
+ Val, ConstFoundBucket);
+ FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
+ return Result;
+ }
+
+ public:
+ /// Return the approximate size (in bytes) of the actual map.
+ /// This is just the raw memory used by DenseMap.
+ /// If entries are pointers to objects, the size of the referenced objects
+ /// are not included.
+ uptr getMemorySize() const {
+ return RoundUpTo(getNumBuckets() * sizeof(BucketT), GetPageSizeCached());
+ }
+};
+
+/// Equality comparison for DenseMap.
+///
+/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
+/// is also in RHS, and that no additional pairs are in RHS.
+/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
+/// complexity is linear, worst case is O(N^2) (if every hash collides).
+template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
+ typename BucketT>
+bool operator==(
+ const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
+ const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
+ if (LHS.size() != RHS.size())
+ return false;
+
+ bool R = true;
+ LHS.forEach(
+ [&](const typename DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT,
+ BucketT>::value_type &KV) -> bool {
+ const auto *I = RHS.find(KV.first);
+ if (!I || I->second != KV.second) {
+ R = false;
+ return false;
+ }
+ return true;
+ });
+
+ return R;
+}
+
+/// Inequality comparison for DenseMap.
+///
+/// Equivalent to !(LHS == RHS). See operator== for performance notes.
+template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
+ typename BucketT>
+bool operator!=(
+ const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
+ const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
+ return !(LHS == RHS);
+}
+
+template <typename KeyT, typename ValueT,
+ typename KeyInfoT = DenseMapInfo<KeyT>,
+ typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
+class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
+ KeyT, ValueT, KeyInfoT, BucketT> {
+ friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
+ // Lift some types from the dependent base class into this class for
+ // simplicity of referring to them.
+ using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
+ BucketT *Buckets = nullptr;
+ unsigned NumEntries = 0;
+ unsigned NumTombstones = 0;
+ unsigned NumBuckets = 0;
+
+ public:
+ /// Create a DenseMap with an optional \p InitialReserve that guarantee that
+ /// this number of elements can be inserted in the map without grow()
+ explicit DenseMap(unsigned InitialReserve) { init(InitialReserve); }
+ constexpr DenseMap() = default;
+
+ DenseMap(const DenseMap &other) : BaseT() {
+ init(0);
+ copyFrom(other);
+ }
+
+ DenseMap(DenseMap &&other) : BaseT() {
+ init(0);
+ swap(other);
+ }
+
+ ~DenseMap() {
+ this->destroyAll();
+ deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets);
+ }
+
+ void swap(DenseMap &RHS) {
+ Swap(Buckets, RHS.Buckets);
+ Swap(NumEntries, RHS.NumEntries);
+ Swap(NumTombstones, RHS.NumTombstones);
+ Swap(NumBuckets, RHS.NumBuckets);
+ }
+
+ DenseMap &operator=(const DenseMap &other) {
+ if (&other != this)
+ copyFrom(other);
+ return *this;
+ }
+
+ DenseMap &operator=(DenseMap &&other) {
+ this->destroyAll();
+ deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
+ init(0);
+ swap(other);
+ return *this;
+ }
+
+ void copyFrom(const DenseMap &other) {
+ this->destroyAll();
+ deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets);
+ if (allocateBuckets(other.NumBuckets)) {
+ this->BaseT::copyFrom(other);
+ } else {
+ NumEntries = 0;
+ NumTombstones = 0;
+ }
+ }
+
+ void init(unsigned InitNumEntries) {
+ auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
+ if (allocateBuckets(InitBuckets)) {
+ this->BaseT::initEmpty();
+ } else {
+ NumEntries = 0;
+ NumTombstones = 0;
+ }
+ }
+
+ void grow(unsigned AtLeast) {
+ unsigned OldNumBuckets = NumBuckets;
+ BucketT *OldBuckets = Buckets;
+
+ allocateBuckets(RoundUpToPowerOfTwo(Max<unsigned>(64, AtLeast)));
+ CHECK(Buckets);
+ if (!OldBuckets) {
+ this->BaseT::initEmpty();
+ return;
+ }
+
+ this->moveFromOldBuckets(OldBuckets, OldBuckets + OldNumBuckets);
+
+ // Free the old table.
+ deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets);
+ }
+
+ private:
+ unsigned getNumEntries() const { return NumEntries; }
+
+ void setNumEntries(unsigned Num) { NumEntries = Num; }
+
+ unsigned getNumTombstones() const { return NumTombstones; }
+
+ void setNumTombstones(unsigned Num) { NumTombstones = Num; }
+
+ BucketT *getBuckets() const { return Buckets; }
+
+ unsigned getNumBuckets() const { return NumBuckets; }
+
+ bool allocateBuckets(unsigned Num) {
+ NumBuckets = Num;
+ if (NumBuckets == 0) {
+ Buckets = nullptr;
+ return false;
+ }
+
+ uptr Size = sizeof(BucketT) * NumBuckets;
+ if (Size * 2 <= GetPageSizeCached()) {
+ // We always allocate at least a page, so use entire space.
+ unsigned Log2 = MostSignificantSetBitIndex(GetPageSizeCached() / Size);
+ Size <<= Log2;
+ NumBuckets <<= Log2;
+ CHECK_EQ(Size, sizeof(BucketT) * NumBuckets);
+ CHECK_GT(Size * 2, GetPageSizeCached());
+ }
+ Buckets = static_cast<BucketT *>(allocate_buffer(Size));
+ return true;
+ }
+
+ static void *allocate_buffer(uptr Size) {
+ return MmapOrDie(RoundUpTo(Size, GetPageSizeCached()), "DenseMap");
+ }
+
+ static void deallocate_buffer(void *Ptr, uptr Size) {
+ UnmapOrDie(Ptr, RoundUpTo(Size, GetPageSizeCached()));
+ }
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_DENSE_MAP_H
lib/tsan/sanitizer_common/sanitizer_dense_map_info.h
@@ -0,0 +1,282 @@
+//===- sanitizer_dense_map_info.h - Type traits for DenseMap ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_DENSE_MAP_INFO_H
+#define SANITIZER_DENSE_MAP_INFO_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_type_traits.h"
+
+namespace __sanitizer {
+
+namespace detail {
+
+/// Simplistic combination of 32-bit hash values into 32-bit hash values.
+static constexpr unsigned combineHashValue(unsigned a, unsigned b) {
+ u64 key = (u64)a << 32 | (u64)b;
+ key += ~(key << 32);
+ key ^= (key >> 22);
+ key += ~(key << 13);
+ key ^= (key >> 8);
+ key += (key << 3);
+ key ^= (key >> 15);
+ key += ~(key << 27);
+ key ^= (key >> 31);
+ return (unsigned)key;
+}
+
+// We extend a pair to allow users to override the bucket type with their own
+// implementation without requiring two members.
+template <typename KeyT, typename ValueT>
+struct DenseMapPair {
+ KeyT first = {};
+ ValueT second = {};
+ constexpr DenseMapPair() = default;
+ constexpr DenseMapPair(const KeyT &f, const ValueT &s)
+ : first(f), second(s) {}
+
+ template <typename KeyT2, typename ValueT2>
+ constexpr DenseMapPair(KeyT2 &&f, ValueT2 &&s)
+ : first(__sanitizer::forward<KeyT2>(f)),
+ second(__sanitizer::forward<ValueT2>(s)) {}
+
+ constexpr DenseMapPair(const DenseMapPair &other) = default;
+ constexpr DenseMapPair &operator=(const DenseMapPair &other) = default;
+ constexpr DenseMapPair(DenseMapPair &&other) = default;
+ constexpr DenseMapPair &operator=(DenseMapPair &&other) = default;
+
+ KeyT &getFirst() { return first; }
+ const KeyT &getFirst() const { return first; }
+ ValueT &getSecond() { return second; }
+ const ValueT &getSecond() const { return second; }
+};
+
+} // end namespace detail
+
+template <typename T>
+struct DenseMapInfo {
+ // static T getEmptyKey();
+ // static T getTombstoneKey();
+ // static unsigned getHashValue(const T &Val);
+ // static bool isEqual(const T &LHS, const T &RHS);
+};
+
+// Provide DenseMapInfo for all pointers. Come up with sentinel pointer values
+// that are aligned to alignof(T) bytes, but try to avoid requiring T to be
+// complete. This allows clients to instantiate DenseMap<T*, ...> with forward
+// declared key types. Assume that no pointer key type requires more than 4096
+// bytes of alignment.
+template <typename T>
+struct DenseMapInfo<T *> {
+ // The following should hold, but it would require T to be complete:
+ // static_assert(alignof(T) <= (1 << Log2MaxAlign),
+ // "DenseMap does not support pointer keys requiring more than "
+ // "Log2MaxAlign bits of alignment");
+ static constexpr uptr Log2MaxAlign = 12;
+
+ static constexpr T *getEmptyKey() {
+ uptr Val = static_cast<uptr>(-1);
+ Val <<= Log2MaxAlign;
+ return reinterpret_cast<T *>(Val);
+ }
+
+ static constexpr T *getTombstoneKey() {
+ uptr Val = static_cast<uptr>(-2);
+ Val <<= Log2MaxAlign;
+ return reinterpret_cast<T *>(Val);
+ }
+
+ static constexpr unsigned getHashValue(const T *PtrVal) {
+ return (unsigned((uptr)PtrVal) >> 4) ^ (unsigned((uptr)PtrVal) >> 9);
+ }
+
+ static constexpr bool isEqual(const T *LHS, const T *RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for chars.
+template <>
+struct DenseMapInfo<char> {
+ static constexpr char getEmptyKey() { return ~0; }
+ static constexpr char getTombstoneKey() { return ~0 - 1; }
+ static constexpr unsigned getHashValue(const char &Val) { return Val * 37U; }
+
+ static constexpr bool isEqual(const char &LHS, const char &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for unsigned chars.
+template <>
+struct DenseMapInfo<unsigned char> {
+ static constexpr unsigned char getEmptyKey() { return ~0; }
+ static constexpr unsigned char getTombstoneKey() { return ~0 - 1; }
+ static constexpr unsigned getHashValue(const unsigned char &Val) {
+ return Val * 37U;
+ }
+
+ static constexpr bool isEqual(const unsigned char &LHS,
+ const unsigned char &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for unsigned shorts.
+template <>
+struct DenseMapInfo<unsigned short> {
+ static constexpr unsigned short getEmptyKey() { return 0xFFFF; }
+ static constexpr unsigned short getTombstoneKey() { return 0xFFFF - 1; }
+ static constexpr unsigned getHashValue(const unsigned short &Val) {
+ return Val * 37U;
+ }
+
+ static constexpr bool isEqual(const unsigned short &LHS,
+ const unsigned short &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for unsigned ints.
+template <>
+struct DenseMapInfo<unsigned> {
+ static constexpr unsigned getEmptyKey() { return ~0U; }
+ static constexpr unsigned getTombstoneKey() { return ~0U - 1; }
+ static constexpr unsigned getHashValue(const unsigned &Val) {
+ return Val * 37U;
+ }
+
+ static constexpr bool isEqual(const unsigned &LHS, const unsigned &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for unsigned longs.
+template <>
+struct DenseMapInfo<unsigned long> {
+ static constexpr unsigned long getEmptyKey() { return ~0UL; }
+ static constexpr unsigned long getTombstoneKey() { return ~0UL - 1L; }
+
+ static constexpr unsigned getHashValue(const unsigned long &Val) {
+ return (unsigned)(Val * 37UL);
+ }
+
+ static constexpr bool isEqual(const unsigned long &LHS,
+ const unsigned long &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for unsigned long longs.
+template <>
+struct DenseMapInfo<unsigned long long> {
+ static constexpr unsigned long long getEmptyKey() { return ~0ULL; }
+ static constexpr unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; }
+
+ static constexpr unsigned getHashValue(const unsigned long long &Val) {
+ return (unsigned)(Val * 37ULL);
+ }
+
+ static constexpr bool isEqual(const unsigned long long &LHS,
+ const unsigned long long &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for shorts.
+template <>
+struct DenseMapInfo<short> {
+ static constexpr short getEmptyKey() { return 0x7FFF; }
+ static constexpr short getTombstoneKey() { return -0x7FFF - 1; }
+ static constexpr unsigned getHashValue(const short &Val) { return Val * 37U; }
+ static constexpr bool isEqual(const short &LHS, const short &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for ints.
+template <>
+struct DenseMapInfo<int> {
+ static constexpr int getEmptyKey() { return 0x7fffffff; }
+ static constexpr int getTombstoneKey() { return -0x7fffffff - 1; }
+ static constexpr unsigned getHashValue(const int &Val) {
+ return (unsigned)(Val * 37U);
+ }
+
+ static constexpr bool isEqual(const int &LHS, const int &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for longs.
+template <>
+struct DenseMapInfo<long> {
+ static constexpr long getEmptyKey() {
+ return (1UL << (sizeof(long) * 8 - 1)) - 1UL;
+ }
+
+ static constexpr long getTombstoneKey() { return getEmptyKey() - 1L; }
+
+ static constexpr unsigned getHashValue(const long &Val) {
+ return (unsigned)(Val * 37UL);
+ }
+
+ static constexpr bool isEqual(const long &LHS, const long &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for long longs.
+template <>
+struct DenseMapInfo<long long> {
+ static constexpr long long getEmptyKey() { return 0x7fffffffffffffffLL; }
+ static constexpr long long getTombstoneKey() {
+ return -0x7fffffffffffffffLL - 1;
+ }
+
+ static constexpr unsigned getHashValue(const long long &Val) {
+ return (unsigned)(Val * 37ULL);
+ }
+
+ static constexpr bool isEqual(const long long &LHS, const long long &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for all pairs whose members have info.
+template <typename T, typename U>
+struct DenseMapInfo<detail::DenseMapPair<T, U>> {
+ using Pair = detail::DenseMapPair<T, U>;
+ using FirstInfo = DenseMapInfo<T>;
+ using SecondInfo = DenseMapInfo<U>;
+
+ static constexpr Pair getEmptyKey() {
+ return detail::DenseMapPair<T, U>(FirstInfo::getEmptyKey(),
+ SecondInfo::getEmptyKey());
+ }
+
+ static constexpr Pair getTombstoneKey() {
+ return detail::DenseMapPair<T, U>(FirstInfo::getTombstoneKey(),
+ SecondInfo::getTombstoneKey());
+ }
+
+ static constexpr unsigned getHashValue(const Pair &PairVal) {
+ return detail::combineHashValue(FirstInfo::getHashValue(PairVal.first),
+ SecondInfo::getHashValue(PairVal.second));
+ }
+
+ static constexpr bool isEqual(const Pair &LHS, const Pair &RHS) {
+ return FirstInfo::isEqual(LHS.first, RHS.first) &&
+ SecondInfo::isEqual(LHS.second, RHS.second);
+ }
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_DENSE_MAP_INFO_H
lib/tsan/sanitizer_common/sanitizer_errno.h
@@ -21,7 +21,7 @@
#include "sanitizer_errno_codes.h"
#include "sanitizer_platform.h"
-#if SANITIZER_FREEBSD || SANITIZER_MAC
+#if SANITIZER_FREEBSD || SANITIZER_APPLE
# define __errno_location __error
#elif SANITIZER_ANDROID || SANITIZER_NETBSD
# define __errno_location __errno
lib/tsan/sanitizer_common/sanitizer_errno_codes.h
@@ -25,6 +25,7 @@ namespace __sanitizer {
#define errno_EBUSY 16
#define errno_EINVAL 22
#define errno_ENAMETOOLONG 36
+#define errno_ENOSYS 38
// Those might not present or their value differ on different platforms.
extern const int errno_EOWNERDEAD;
lib/tsan/sanitizer_common/sanitizer_file.cpp
@@ -19,6 +19,7 @@
#include "sanitizer_common.h"
#include "sanitizer_file.h"
+# include "sanitizer_interface_internal.h"
namespace __sanitizer {
@@ -75,6 +76,24 @@ void ReportFile::ReopenIfNecessary() {
fd_pid = pid;
}
+static void RecursiveCreateParentDirs(char *path) {
+ if (path[0] == '\0')
+ return;
+ for (int i = 1; path[i] != '\0'; ++i) {
+ char save = path[i];
+ if (!IsPathSeparator(path[i]))
+ continue;
+ path[i] = '\0';
+ if (!DirExists(path) && !CreateDir(path)) {
+ const char *ErrorMsgPrefix = "ERROR: Can't create directory: ";
+ WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix));
+ WriteToFile(kStderrFd, path, internal_strlen(path));
+ Die();
+ }
+ path[i] = save;
+ }
+}
+
void ReportFile::SetReportPath(const char *path) {
if (path) {
uptr len = internal_strlen(path);
@@ -95,6 +114,7 @@ void ReportFile::SetReportPath(const char *path) {
fd = kStdoutFd;
} else {
internal_snprintf(path_prefix, kMaxPathLength, "%s", path);
+ RecursiveCreateParentDirs(path_prefix);
}
}
lib/tsan/sanitizer_common/sanitizer_file.h
@@ -15,7 +15,7 @@
#ifndef SANITIZER_FILE_H
#define SANITIZER_FILE_H
-#include "sanitizer_interface_internal.h"
+#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_mutex.h"
@@ -78,9 +78,12 @@ bool SupportsColoredOutput(fd_t fd);
// OS
const char *GetPwd();
bool FileExists(const char *filename);
+bool DirExists(const char *path);
char *FindPathToBinary(const char *name);
bool IsPathSeparator(const char c);
bool IsAbsolutePath(const char *path);
+// Returns true on success, false on failure.
+bool CreateDir(const char *pathname);
// Starts a subprocess and returs its pid.
// If *_fd parameters are not kInvalidFd their corresponding input/output
// streams will be redirect to the file. The files will always be closed
lib/tsan/sanitizer_common/sanitizer_flag_parser.cpp
@@ -13,9 +13,9 @@
#include "sanitizer_flag_parser.h"
#include "sanitizer_common.h"
-#include "sanitizer_libc.h"
-#include "sanitizer_flags.h"
#include "sanitizer_flag_parser.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_libc.h"
namespace __sanitizer {
lib/tsan/sanitizer_common/sanitizer_flag_parser.h
@@ -13,9 +13,9 @@
#ifndef SANITIZER_FLAG_REGISTRY_H
#define SANITIZER_FLAG_REGISTRY_H
+#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
-#include "sanitizer_common.h"
namespace __sanitizer {
@@ -138,7 +138,7 @@ inline bool FlagHandler<uptr>::Parse(const char *value) {
template <>
inline bool FlagHandler<uptr>::Format(char *buffer, uptr size) {
- uptr num_symbols_should_write = internal_snprintf(buffer, size, "%p", *t_);
+ uptr num_symbols_should_write = internal_snprintf(buffer, size, "0x%zx", *t_);
return num_symbols_should_write < size;
}
lib/tsan/sanitizer_common/sanitizer_flags.inc
@@ -62,16 +62,19 @@ COMMON_FLAG(
COMMON_FLAG(const char *, log_suffix, nullptr,
"String to append to log file name, e.g. \".txt\".")
COMMON_FLAG(
- bool, log_to_syslog, (bool)SANITIZER_ANDROID || (bool)SANITIZER_MAC,
+ bool, log_to_syslog, (bool)SANITIZER_ANDROID || (bool)SANITIZER_APPLE,
"Write all sanitizer output to syslog in addition to other means of "
"logging.")
COMMON_FLAG(
int, verbosity, 0,
"Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).")
-COMMON_FLAG(bool, strip_env, 1,
+COMMON_FLAG(bool, strip_env, true,
"Whether to remove the sanitizer from DYLD_INSERT_LIBRARIES to "
- "avoid passing it to children. Default is true.")
-COMMON_FLAG(bool, detect_leaks, !SANITIZER_MAC, "Enable memory leak detection.")
+ "avoid passing it to children on Apple platforms. Default is true.")
+COMMON_FLAG(bool, verify_interceptors, true,
+ "Verify that interceptors are working on Apple platforms. Default "
+ "is true.")
+COMMON_FLAG(bool, detect_leaks, !SANITIZER_APPLE, "Enable memory leak detection.")
COMMON_FLAG(
bool, leak_check_at_exit, true,
"Invoke leak checking in an atexit handler. Has no effect if "
@@ -160,6 +163,10 @@ COMMON_FLAG(
COMMON_FLAG(const char *, coverage_dir, ".",
"Target directory for coverage dumps. Defaults to the current "
"directory.")
+COMMON_FLAG(const char *, cov_8bit_counters_out, "",
+ "If non-empty, write 8bit counters to this file. ")
+COMMON_FLAG(const char *, cov_pcs_out, "",
+ "If non-empty, write the coverage pc table to this file. ")
COMMON_FLAG(bool, full_address_space, false,
"Sanitize complete address space; "
"by default kernel area on 32-bit platforms will not be sanitized")
@@ -175,6 +182,7 @@ COMMON_FLAG(bool, use_madv_dontdump, true,
"in core file.")
COMMON_FLAG(bool, symbolize_inline_frames, true,
"Print inlined frames in stacktraces. Defaults to true.")
+COMMON_FLAG(bool, demangle, true, "Print demangled symbols.")
COMMON_FLAG(bool, symbolize_vs_style, false,
"Print file locations in Visual Studio style (e.g: "
" file(10,42): ...")
@@ -187,6 +195,8 @@ COMMON_FLAG(const char *, stack_trace_format, "DEFAULT",
"Format string used to render stack frames. "
"See sanitizer_stacktrace_printer.h for the format description. "
"Use DEFAULT to get default format.")
+COMMON_FLAG(int, compress_stack_depot, 0,
+ "Compress stack depot to save memory.")
COMMON_FLAG(bool, no_huge_pages_for_shadow, true,
"If true, the shadow is not allowed to use huge pages. ")
COMMON_FLAG(bool, strict_string_checks, false,
@@ -238,7 +248,7 @@ COMMON_FLAG(bool, decorate_proc_maps, (bool)SANITIZER_ANDROID,
COMMON_FLAG(int, exitcode, 1, "Override the program exit status if the tool "
"found an error")
COMMON_FLAG(
- bool, abort_on_error, (bool)SANITIZER_ANDROID || (bool)SANITIZER_MAC,
+ bool, abort_on_error, (bool)SANITIZER_ANDROID || (bool)SANITIZER_APPLE,
"If set, the tool calls abort() instead of _exit() after printing the "
"error report.")
COMMON_FLAG(bool, suppress_equal_pcs, true,
lib/tsan/sanitizer_common/sanitizer_flat_map.h
@@ -0,0 +1,162 @@
+//===-- sanitizer_flat_map.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_FLAT_MAP_H
+#define SANITIZER_FLAT_MAP_H
+
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_local_address_space_view.h"
+#include "sanitizer_mutex.h"
+
+namespace __sanitizer {
+
+// Maps integers in rage [0, kSize) to values.
+template <typename T, u64 kSize,
+ typename AddressSpaceViewTy = LocalAddressSpaceView>
+class FlatMap {
+ public:
+ using AddressSpaceView = AddressSpaceViewTy;
+ void Init() { internal_memset(map_, 0, sizeof(map_)); }
+
+ constexpr uptr size() const { return kSize; }
+
+ bool contains(uptr idx) const {
+ CHECK_LT(idx, kSize);
+ return true;
+ }
+
+ T &operator[](uptr idx) {
+ DCHECK_LT(idx, kSize);
+ return map_[idx];
+ }
+
+ const T &operator[](uptr idx) const {
+ DCHECK_LT(idx, kSize);
+ return map_[idx];
+ }
+
+ private:
+ T map_[kSize];
+};
+
+// TwoLevelMap maps integers in range [0, kSize1*kSize2) to values.
+// It is implemented as a two-dimensional array: array of kSize1 pointers
+// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
+// Each value is initially zero and can be set to something else only once.
+// Setting and getting values from multiple threads is safe w/o extra locking.
+template <typename T, u64 kSize1, u64 kSize2,
+ typename AddressSpaceViewTy = LocalAddressSpaceView>
+class TwoLevelMap {
+ static_assert(IsPowerOfTwo(kSize2), "Use a power of two for performance.");
+
+ public:
+ using AddressSpaceView = AddressSpaceViewTy;
+ void Init() {
+ mu_.Init();
+ internal_memset(map1_, 0, sizeof(map1_));
+ }
+
+ void TestOnlyUnmap() {
+ for (uptr i = 0; i < kSize1; i++) {
+ T *p = Get(i);
+ if (!p)
+ continue;
+ UnmapOrDie(p, kSize2);
+ }
+ Init();
+ }
+
+ uptr MemoryUsage() const {
+ uptr res = 0;
+ for (uptr i = 0; i < kSize1; i++) {
+ T *p = Get(i);
+ if (!p)
+ continue;
+ res += MmapSize();
+ }
+ return res;
+ }
+
+ constexpr uptr size() const { return kSize1 * kSize2; }
+ constexpr uptr size1() const { return kSize1; }
+ constexpr uptr size2() const { return kSize2; }
+
+ bool contains(uptr idx) const {
+ CHECK_LT(idx, kSize1 * kSize2);
+ return Get(idx / kSize2);
+ }
+
+ const T &operator[](uptr idx) const {
+ DCHECK_LT(idx, kSize1 * kSize2);
+ T *map2 = GetOrCreate(idx / kSize2);
+ return *AddressSpaceView::Load(&map2[idx % kSize2]);
+ }
+
+ T &operator[](uptr idx) {
+ DCHECK_LT(idx, kSize1 * kSize2);
+ T *map2 = GetOrCreate(idx / kSize2);
+ return *AddressSpaceView::LoadWritable(&map2[idx % kSize2]);
+ }
+
+ private:
+ constexpr uptr MmapSize() const {
+ return RoundUpTo(kSize2 * sizeof(T), GetPageSizeCached());
+ }
+
+ T *Get(uptr idx) const {
+ DCHECK_LT(idx, kSize1);
+ return reinterpret_cast<T *>(
+ atomic_load(&map1_[idx], memory_order_acquire));
+ }
+
+ T *GetOrCreate(uptr idx) const {
+ DCHECK_LT(idx, kSize1);
+ // This code needs to use memory_order_acquire/consume, but we use
+ // memory_order_relaxed for performance reasons (matters for arm64). We
+ // expect memory_order_relaxed to be effectively equivalent to
+ // memory_order_consume in this case for all relevant architectures: all
+ // dependent data is reachable only by dereferencing the resulting pointer.
+ // If relaxed load fails to see stored ptr, the code will fall back to
+ // Create() and reload the value again with locked mutex as a memory
+ // barrier.
+ T *res = reinterpret_cast<T *>(atomic_load_relaxed(&map1_[idx]));
+ if (LIKELY(res))
+ return res;
+ return Create(idx);
+ }
+
+ NOINLINE T *Create(uptr idx) const {
+ SpinMutexLock l(&mu_);
+ T *res = Get(idx);
+ if (!res) {
+ res = reinterpret_cast<T *>(MmapOrDie(MmapSize(), "TwoLevelMap"));
+ atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
+ memory_order_release);
+ }
+ return res;
+ }
+
+ mutable StaticSpinMutex mu_;
+ mutable atomic_uintptr_t map1_[kSize1];
+};
+
+template <u64 kSize, typename AddressSpaceViewTy = LocalAddressSpaceView>
+using FlatByteMap = FlatMap<u8, kSize, AddressSpaceViewTy>;
+
+template <u64 kSize1, u64 kSize2,
+ typename AddressSpaceViewTy = LocalAddressSpaceView>
+using TwoLevelByteMap = TwoLevelMap<u8, kSize1, kSize2, AddressSpaceViewTy>;
+} // namespace __sanitizer
+
+#endif
lib/tsan/sanitizer_common/sanitizer_fuchsia.cpp
@@ -14,24 +14,25 @@
#include "sanitizer_fuchsia.h"
#if SANITIZER_FUCHSIA
-#include <pthread.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <zircon/errors.h>
-#include <zircon/process.h>
-#include <zircon/syscalls.h>
-#include <zircon/utc.h>
-
-#include "sanitizer_common.h"
-#include "sanitizer_libc.h"
-#include "sanitizer_mutex.h"
+# include <pthread.h>
+# include <stdlib.h>
+# include <unistd.h>
+# include <zircon/errors.h>
+# include <zircon/process.h>
+# include <zircon/syscalls.h>
+# include <zircon/utc.h>
+
+# include "sanitizer_common.h"
+# include "sanitizer_interface_internal.h"
+# include "sanitizer_libc.h"
+# include "sanitizer_mutex.h"
namespace __sanitizer {
void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); }
uptr internal_sched_yield() {
- zx_status_t status = _zx_nanosleep(0);
+ zx_status_t status = _zx_thread_legacy_yield(0u);
CHECK_EQ(status, ZX_OK);
return 0; // Why doesn't this return void?
}
@@ -86,10 +87,9 @@ void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
}
void InitializePlatformEarly() {}
-void MaybeReexec() {}
void CheckASLR() {}
void CheckMPROTECT() {}
-void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
+void PlatformPrepareForSandboxing(void *args) {}
void DisableCoreDumperIfNecessary() {}
void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
void SetAlternateSignalStack() {}
@@ -112,47 +112,6 @@ void FutexWake(atomic_uint32_t *p, u32 count) {
CHECK_EQ(status, ZX_OK);
}
-enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
-
-BlockingMutex::BlockingMutex() {
- // NOTE! It's important that this use internal_memset, because plain
- // memset might be intercepted (e.g., actually be __asan_memset).
- // Defining this so the compiler initializes each field, e.g.:
- // BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {}
- // might result in the compiler generating a call to memset, which would
- // have the same problem.
- internal_memset(this, 0, sizeof(*this));
-}
-
-void BlockingMutex::Lock() {
- CHECK_EQ(owner_, 0);
- atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
- if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
- return;
- while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
- zx_status_t status =
- _zx_futex_wait(reinterpret_cast<zx_futex_t *>(m), MtxSleeping,
- ZX_HANDLE_INVALID, ZX_TIME_INFINITE);
- if (status != ZX_ERR_BAD_STATE) // Normal race.
- CHECK_EQ(status, ZX_OK);
- }
-}
-
-void BlockingMutex::Unlock() {
- atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
- u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
- CHECK_NE(v, MtxUnlocked);
- if (v == MtxSleeping) {
- zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(m), 1);
- CHECK_EQ(status, ZX_OK);
- }
-}
-
-void BlockingMutex::CheckLocked() const {
- auto m = reinterpret_cast<atomic_uint32_t const *>(&opaque_storage_);
- CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
-}
-
uptr GetPageSize() { return _zx_system_get_page_size(); }
uptr GetMmapGranularity() { return _zx_system_get_page_size(); }
@@ -168,6 +127,8 @@ uptr GetMaxUserVirtualAddress() {
uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
+bool ErrorIsOOM(error_t err) { return err == ZX_ERR_NO_MEMORY; }
+
static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
bool raw_report, bool die_for_nomem) {
size = RoundUpTo(size, GetPageSize());
@@ -315,6 +276,21 @@ void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
UNIMPLEMENTED();
}
+bool MprotectNoAccess(uptr addr, uptr size) {
+ return _zx_vmar_protect(_zx_vmar_root_self(), 0, addr, size) == ZX_OK;
+}
+
+bool MprotectReadOnly(uptr addr, uptr size) {
+ return _zx_vmar_protect(_zx_vmar_root_self(), ZX_VM_PERM_READ, addr, size) ==
+ ZX_OK;
+}
+
+bool MprotectReadWrite(uptr addr, uptr size) {
+ return _zx_vmar_protect(_zx_vmar_root_self(),
+ ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, addr,
+ size) == ZX_OK;
+}
+
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
const char *mem_type) {
CHECK_GE(size, GetPageSize());
@@ -413,33 +389,12 @@ bool IsAccessibleMemoryRange(uptr beg, uptr size) {
}
// FIXME implement on this platform.
-void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {}
+void GetMemoryProfile(fill_profile_f cb, uptr *stats) {}
bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
uptr *read_len, uptr max_len, error_t *errno_p) {
- zx_handle_t vmo;
- zx_status_t status = __sanitizer_get_configuration(file_name, &vmo);
- if (status == ZX_OK) {
- uint64_t vmo_size;
- status = _zx_vmo_get_size(vmo, &vmo_size);
- if (status == ZX_OK) {
- if (vmo_size < max_len)
- max_len = vmo_size;
- size_t map_size = RoundUpTo(max_len, GetPageSize());
- uintptr_t addr;
- status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0,
- map_size, &addr);
- if (status == ZX_OK) {
- *buff = reinterpret_cast<char *>(addr);
- *buff_size = map_size;
- *read_len = max_len;
- }
- }
- _zx_handle_close(vmo);
- }
- if (status != ZX_OK && errno_p)
- *errno_p = status;
- return status == ZX_OK;
+ *errno_p = ZX_ERR_NOT_SUPPORTED;
+ return false;
}
void RawWrite(const char *buffer) {
@@ -516,6 +471,9 @@ u32 GetNumberOfCPUs() { return zx_system_get_num_cpus(); }
uptr GetRSS() { UNIMPLEMENTED(); }
+void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; }
+void internal_join_thread(void *th) {}
+
void InitializePlatformCommonFlags(CommonFlags *cf) {}
} // namespace __sanitizer
lib/tsan/sanitizer_common/sanitizer_hash.h
@@ -38,6 +38,30 @@ class MurMur2HashBuilder {
return x;
}
};
+
+class MurMur2Hash64Builder {
+ static const u64 m = 0xc6a4a7935bd1e995ull;
+ static const u64 seed = 0x9747b28c9747b28cull;
+ static const u64 r = 47;
+ u64 h;
+
+ public:
+ explicit MurMur2Hash64Builder(u64 init = 0) { h = seed ^ (init * m); }
+ void add(u64 k) {
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h ^= k;
+ h *= m;
+ }
+ u64 get() {
+ u64 x = h;
+ x ^= x >> r;
+ x *= m;
+ x ^= x >> r;
+ return x;
+ }
+};
} //namespace __sanitizer
#endif // SANITIZER_HASH_H
lib/tsan/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc
@@ -1267,8 +1267,6 @@ static void ioctl_table_fill() {
_(TIOCGFLAGS, WRITE, sizeof(int));
_(TIOCSFLAGS, READ, sizeof(int));
_(TIOCDCDTIMESTAMP, WRITE, struct_timeval_sz);
- _(TIOCRCVFRAME, READ, sizeof(uptr));
- _(TIOCXMTFRAME, READ, sizeof(uptr));
_(TIOCPTMGET, WRITE, struct_ptmget_sz);
_(TIOCGRANTPT, NONE, 0);
_(TIOCPTSNAME, WRITE, struct_ptmget_sz);
@@ -1406,7 +1404,7 @@ static void ioctl_table_fill() {
_(URIO_SEND_COMMAND, READWRITE, struct_urio_command_sz);
_(URIO_RECV_COMMAND, READWRITE, struct_urio_command_sz);
#undef _
-} // NOLINT
+}
static bool ioctl_initialized = false;
lib/tsan/sanitizer_common/sanitizer_interface_internal.h
@@ -20,103 +20,134 @@
#include "sanitizer_internal_defs.h"
extern "C" {
- // Tell the tools to write their reports to "path.<pid>" instead of stderr.
- // The special values are "stdout" and "stderr".
- SANITIZER_INTERFACE_ATTRIBUTE
- void __sanitizer_set_report_path(const char *path);
- // Tell the tools to write their reports to the provided file descriptor
- // (casted to void *).
- SANITIZER_INTERFACE_ATTRIBUTE
- void __sanitizer_set_report_fd(void *fd);
- // Get the current full report file path, if a path was specified by
- // an earlier call to __sanitizer_set_report_path. Returns null otherwise.
- SANITIZER_INTERFACE_ATTRIBUTE
- const char *__sanitizer_get_report_path();
+// Tell the tools to write their reports to "path.<pid>" instead of stderr.
+// The special values are "stdout" and "stderr".
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_set_report_path(const char *path);
+// Tell the tools to write their reports to the provided file descriptor
+// (casted to void *).
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_set_report_fd(void *fd);
+// Get the current full report file path, if a path was specified by
+// an earlier call to __sanitizer_set_report_path. Returns null otherwise.
+SANITIZER_INTERFACE_ATTRIBUTE
+const char *__sanitizer_get_report_path();
- typedef struct {
- int coverage_sandboxed;
- __sanitizer::sptr coverage_fd;
- unsigned int coverage_max_block_size;
- } __sanitizer_sandbox_arguments;
+typedef struct {
+ int coverage_sandboxed;
+ __sanitizer::sptr coverage_fd;
+ unsigned int coverage_max_block_size;
+} __sanitizer_sandbox_arguments;
- // Notify the tools that the sandbox is going to be turned on.
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
- __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
+// Notify the tools that the sandbox is going to be turned on.
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
- // This function is called by the tool when it has just finished reporting
- // an error. 'error_summary' is a one-line string that summarizes
- // the error message. This function can be overridden by the client.
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_report_error_summary(const char *error_summary);
+// This function is called by the tool when it has just finished reporting
+// an error. 'error_summary' is a one-line string that summarizes
+// the error message. This function can be overridden by the client.
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_report_error_summary(const char *error_summary);
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(
- const __sanitizer::uptr *pcs, const __sanitizer::uptr len);
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage();
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(
+ const __sanitizer::uptr *pcs, const __sanitizer::uptr len);
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage();
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard);
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard);
- // Returns 1 on the first call, then returns 0 thereafter. Called by the tool
- // to ensure only one report is printed when multiple errors occur
- // simultaneously.
- SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_acquire_crash_state();
+// Returns 1 on the first call, then returns 0 thereafter. Called by the tool
+// to ensure only one report is printed when multiple errors occur
+// simultaneously.
+SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_acquire_crash_state();
- SANITIZER_INTERFACE_ATTRIBUTE
- void __sanitizer_annotate_contiguous_container(const void *beg,
- const void *end,
- const void *old_mid,
- const void *new_mid);
- SANITIZER_INTERFACE_ATTRIBUTE
- int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
- const void *end);
- SANITIZER_INTERFACE_ATTRIBUTE
- const void *__sanitizer_contiguous_container_find_bad_address(
- const void *beg, const void *mid, const void *end);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_annotate_contiguous_container(const void *beg, const void *end,
+ const void *old_mid,
+ const void *new_mid);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_annotate_double_ended_contiguous_container(
+ const void *storage_beg, const void *storage_end,
+ const void *old_container_beg, const void *old_container_end,
+ const void *new_container_beg, const void *new_container_end);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
+ const void *end);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_verify_double_ended_contiguous_container(
+ const void *storage_beg, const void *container_beg,
+ const void *container_end, const void *storage_end);
+SANITIZER_INTERFACE_ATTRIBUTE
+const void *__sanitizer_contiguous_container_find_bad_address(const void *beg,
+ const void *mid,
+ const void *end);
+SANITIZER_INTERFACE_ATTRIBUTE
+const void *__sanitizer_double_ended_contiguous_container_find_bad_address(
+ const void *storage_beg, const void *container_beg,
+ const void *container_end, const void *storage_end);
- SANITIZER_INTERFACE_ATTRIBUTE
- int __sanitizer_get_module_and_offset_for_pc(
- __sanitizer::uptr pc, char *module_path,
- __sanitizer::uptr module_path_len, __sanitizer::uptr *pc_offset);
-
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_cmp();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_cmp1();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_cmp2();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_cmp4();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_cmp8();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_const_cmp1();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_const_cmp2();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_const_cmp4();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_const_cmp8();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_switch();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_div4();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_div8();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_gep();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_pc_indir();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_pc_guard(__sanitizer::u32*);
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_pc_guard_init(__sanitizer::u32*,
- __sanitizer::u32*);
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_8bit_counters_init();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
- __sanitizer_cov_bool_flag_init();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
- __sanitizer_cov_pcs_init();
-} // extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_get_module_and_offset_for_pc(void *pc, char *module_path,
+ __sanitizer::uptr module_path_len,
+ void **pc_offset);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_cmp();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_cmp1();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_cmp2();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_cmp4();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_cmp8();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_const_cmp1();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_const_cmp2();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_const_cmp4();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_const_cmp8();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_switch();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_div4();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_div8();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_gep();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_pc_indir();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_load1();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_load2();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_load4();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_load8();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_load16();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_store1();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_store2();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_store4();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_store8();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_store16();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_pc_guard(__sanitizer::u32 *);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_pc_guard_init(__sanitizer::u32 *, __sanitizer::u32 *);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_8bit_counters_init(char *, char *);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_bool_flag_init();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_pcs_init(const __sanitizer::uptr *, const __sanitizer::uptr *);
+} // extern "C"
#endif // SANITIZER_INTERFACE_INTERNAL_H
lib/tsan/sanitizer_common/sanitizer_internal_defs.h
@@ -13,6 +13,7 @@
#define SANITIZER_DEFS_H
#include "sanitizer_platform.h"
+#include "sanitizer_redefine_builtins.h"
#ifndef SANITIZER_DEBUG
# define SANITIZER_DEBUG 0
@@ -37,15 +38,6 @@
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
#endif
-// TLS is handled differently on different platforms
-#if SANITIZER_LINUX || SANITIZER_NETBSD || \
- SANITIZER_FREEBSD
-# define SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE \
- __attribute__((tls_model("initial-exec"))) thread_local
-#else
-# define SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE
-#endif
-
//--------------------------- WEAK FUNCTIONS ---------------------------------//
// When working with weak functions, to simplify the code and make it more
// portable, when possible define a default implementation using this macro:
@@ -73,7 +65,7 @@
// Before Xcode 4.5, the Darwin linker doesn't reliably support undefined
// weak symbols. Mac OS X 10.9/Darwin 13 is the first release only supported
// by Xcode >= 4.5.
-#elif SANITIZER_MAC && \
+#elif SANITIZER_APPLE && \
__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1090 && !SANITIZER_GO
# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
#else
@@ -125,6 +117,10 @@
# define __has_attribute(x) 0
#endif
+#if !defined(__has_cpp_attribute)
+# define __has_cpp_attribute(x) 0
+#endif
+
// For portability reasons we do not include stddef.h, stdint.h or any other
// system header, but we do need some basic types that are not defined
// in a portable way by the language itself.
@@ -135,8 +131,13 @@ namespace __sanitizer {
typedef unsigned long long uptr;
typedef signed long long sptr;
#else
+# if (SANITIZER_WORDSIZE == 64) || SANITIZER_APPLE || SANITIZER_WINDOWS
typedef unsigned long uptr;
typedef signed long sptr;
+# else
+typedef unsigned int uptr;
+typedef signed int sptr;
+# endif
#endif // defined(_WIN64)
#if defined(__x86_64__)
// Since x32 uses ILP32 data model in 64-bit hardware mode, we must use
@@ -168,17 +169,17 @@ typedef long pid_t;
typedef int pid_t;
#endif
-#if SANITIZER_FREEBSD || SANITIZER_NETBSD || \
- SANITIZER_MAC || \
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_APPLE || \
(SANITIZER_SOLARIS && (defined(_LP64) || _FILE_OFFSET_BITS == 64)) || \
- (SANITIZER_LINUX && defined(__x86_64__))
+ (SANITIZER_LINUX && !SANITIZER_GLIBC && !SANITIZER_ANDROID) || \
+ (SANITIZER_LINUX && (defined(__x86_64__) || defined(__hexagon__)))
typedef u64 OFF_T;
#else
typedef uptr OFF_T;
#endif
typedef u64 OFF64_T;
-#if (SANITIZER_WORDSIZE == 64) || SANITIZER_MAC
+#if (SANITIZER_WORDSIZE == 64) || SANITIZER_APPLE
typedef uptr operator_new_size_type;
#else
# if defined(__s390__) && !defined(__s390x__)
@@ -217,7 +218,7 @@ typedef u64 tid_t;
# define WARN_UNUSED_RESULT
#else // _MSC_VER
# define ALWAYS_INLINE inline __attribute__((always_inline))
-# define ALIAS(x) __attribute__((alias(x)))
+# define ALIAS(x) __attribute__((alias(SANITIZER_STRINGIFY(x))))
// Please only use the ALIGNED macro before the type.
// Using ALIGNED after the variable declaration is not portable!
# define ALIGNED(x) __attribute__((aligned(x)))
@@ -250,6 +251,20 @@ typedef u64 tid_t;
# define NOEXCEPT throw()
#endif
+#if __has_cpp_attribute(clang::fallthrough)
+# define FALLTHROUGH [[clang::fallthrough]]
+#elif __has_cpp_attribute(fallthrough)
+# define FALLTHROUGH [[fallthrough]]
+#else
+# define FALLTHROUGH
+#endif
+
+#if __has_attribute(uninitialized)
+# define UNINITIALIZED __attribute__((uninitialized))
+#else
+# define UNINITIALIZED
+#endif
+
// Unaligned versions of basic types.
typedef ALIGNED(1) u16 uu16;
typedef ALIGNED(1) u32 uu32;
@@ -277,14 +292,17 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2);
// Check macro
-#define RAW_CHECK_MSG(expr, msg) do { \
- if (UNLIKELY(!(expr))) { \
- RawWrite(msg); \
- Die(); \
- } \
-} while (0)
+#define RAW_CHECK_MSG(expr, msg, ...) \
+ do { \
+ if (UNLIKELY(!(expr))) { \
+ const char* msgs[] = {msg, __VA_ARGS__}; \
+ for (const char* m : msgs) RawWrite(m); \
+ Die(); \
+ } \
+ } while (0)
-#define RAW_CHECK(expr) RAW_CHECK_MSG(expr, #expr)
+#define RAW_CHECK(expr) RAW_CHECK_MSG(expr, #expr "\n", )
+#define RAW_CHECK_VA(expr, ...) RAW_CHECK_MSG(expr, #expr "\n", __VA_ARGS__)
#define CHECK_IMPL(c1, op, c2) \
do { \
@@ -366,13 +384,10 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
enum LinkerInitialized { LINKER_INITIALIZED = 0 };
#if !defined(_MSC_VER) || defined(__clang__)
-#if SANITIZER_S390_31
-#define GET_CALLER_PC() \
- (__sanitizer::uptr) __builtin_extract_return_addr(__builtin_return_address(0))
-#else
-#define GET_CALLER_PC() (__sanitizer::uptr) __builtin_return_address(0)
-#endif
-#define GET_CURRENT_FRAME() (__sanitizer::uptr) __builtin_frame_address(0)
+# define GET_CALLER_PC() \
+ ((__sanitizer::uptr)__builtin_extract_return_addr( \
+ __builtin_return_address(0)))
+# define GET_CURRENT_FRAME() ((__sanitizer::uptr)__builtin_frame_address(0))
inline void Trap() {
__builtin_trap();
}
@@ -381,13 +396,13 @@ extern "C" void* _ReturnAddress(void);
extern "C" void* _AddressOfReturnAddress(void);
# pragma intrinsic(_ReturnAddress)
# pragma intrinsic(_AddressOfReturnAddress)
-#define GET_CALLER_PC() (__sanitizer::uptr) _ReturnAddress()
+# define GET_CALLER_PC() ((__sanitizer::uptr)_ReturnAddress())
// CaptureStackBackTrace doesn't need to know BP on Windows.
-#define GET_CURRENT_FRAME() \
- (((__sanitizer::uptr)_AddressOfReturnAddress()) + sizeof(__sanitizer::uptr))
+# define GET_CURRENT_FRAME() \
+ (((__sanitizer::uptr)_AddressOfReturnAddress()) + sizeof(__sanitizer::uptr))
extern "C" void __ud2(void);
-# pragma intrinsic(__ud2)
+# pragma intrinsic(__ud2)
inline void Trap() {
__ud2();
}
@@ -409,8 +424,14 @@ inline void Trap() {
(void)enable_fp; \
} while (0)
-constexpr u32 kInvalidTid = -1;
-constexpr u32 kMainTid = 0;
+// Internal thread identifier allocated by ThreadRegistry.
+typedef u32 Tid;
+constexpr Tid kInvalidTid = -1;
+constexpr Tid kMainTid = 0;
+
+// Stack depot stack identifier.
+typedef u32 StackID;
+const StackID kInvalidStackID = 0;
} // namespace __sanitizer
lib/tsan/sanitizer_common/sanitizer_leb128.h
@@ -0,0 +1,87 @@
+//===-- sanitizer_leb128.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_LEB128_H
+#define SANITIZER_LEB128_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+template <typename T, typename It>
+It EncodeSLEB128(T value, It begin, It end) {
+ bool more;
+ do {
+ u8 byte = value & 0x7f;
+ // NOTE: this assumes that this signed shift is an arithmetic right shift.
+ value >>= 7;
+ more = !((((value == 0) && ((byte & 0x40) == 0)) ||
+ ((value == -1) && ((byte & 0x40) != 0))));
+ if (more)
+ byte |= 0x80;
+ if (UNLIKELY(begin == end))
+ break;
+ *(begin++) = byte;
+ } while (more);
+ return begin;
+}
+
+template <typename T, typename It>
+It DecodeSLEB128(It begin, It end, T* v) {
+ T value = 0;
+ unsigned shift = 0;
+ u8 byte;
+ do {
+ if (UNLIKELY(begin == end))
+ return begin;
+ byte = *(begin++);
+ T slice = byte & 0x7f;
+ value |= slice << shift;
+ shift += 7;
+ } while (byte >= 128);
+ if (shift < 64 && (byte & 0x40))
+ value |= (-1ULL) << shift;
+ *v = value;
+ return begin;
+}
+
+template <typename T, typename It>
+It EncodeULEB128(T value, It begin, It end) {
+ do {
+ u8 byte = value & 0x7f;
+ value >>= 7;
+ if (value)
+ byte |= 0x80;
+ if (UNLIKELY(begin == end))
+ break;
+ *(begin++) = byte;
+ } while (value);
+ return begin;
+}
+
+template <typename T, typename It>
+It DecodeULEB128(It begin, It end, T* v) {
+ T value = 0;
+ unsigned shift = 0;
+ u8 byte;
+ do {
+ if (UNLIKELY(begin == end))
+ return begin;
+ byte = *(begin++);
+ T slice = byte & 0x7f;
+ value += slice << shift;
+ shift += 7;
+ } while (byte >= 128);
+ *v = value;
+ return begin;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LEB128_H
lib/tsan/sanitizer_common/sanitizer_libc.cpp
@@ -10,6 +10,9 @@
// run-time libraries. See sanitizer_libc.h for details.
//===----------------------------------------------------------------------===//
+// Do not redefine builtins; this file is defining the builtin replacements.
+#define SANITIZER_COMMON_NO_REDEFINE_BUILTINS
+
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
@@ -46,7 +49,10 @@ int internal_memcmp(const void* s1, const void* s2, uptr n) {
return 0;
}
-void *internal_memcpy(void *dest, const void *src, uptr n) {
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memcpy(void *dest,
+ const void *src,
+ uptr n) {
char *d = (char*)dest;
const char *s = (const char *)src;
for (uptr i = 0; i < n; ++i)
@@ -54,7 +60,8 @@ void *internal_memcpy(void *dest, const void *src, uptr n) {
return dest;
}
-void *internal_memmove(void *dest, const void *src, uptr n) {
+SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memmove(
+ void *dest, const void *src, uptr n) {
char *d = (char*)dest;
const char *s = (const char *)src;
sptr i, signed_n = (sptr)n;
@@ -72,7 +79,8 @@ void *internal_memmove(void *dest, const void *src, uptr n) {
return dest;
}
-void *internal_memset(void* s, int c, uptr n) {
+SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memset(void *s, int c,
+ uptr n) {
// Optimize for the most performance-critical case:
if ((reinterpret_cast<uptr>(s) % 16) == 0 && (n % 16) == 0) {
u64 *p = reinterpret_cast<u64*>(s);
@@ -95,6 +103,7 @@ void *internal_memset(void* s, int c, uptr n) {
}
return s;
}
+} // extern "C"
uptr internal_strcspn(const char *s, const char *reject) {
uptr i;
@@ -258,6 +267,18 @@ s64 internal_simple_strtoll(const char *nptr, const char **endptr, int base) {
}
}
+uptr internal_wcslen(const wchar_t *s) {
+ uptr i = 0;
+ while (s[i]) i++;
+ return i;
+}
+
+uptr internal_wcsnlen(const wchar_t *s, uptr maxlen) {
+ uptr i = 0;
+ while (i < maxlen && s[i]) i++;
+ return i;
+}
+
bool mem_is_zero(const char *beg, uptr size) {
CHECK_LE(size, 1ULL << FIRST_32_SECOND_64(30, 40)); // Sanity check.
const char *end = beg + size;
lib/tsan/sanitizer_common/sanitizer_libc.h
@@ -24,15 +24,33 @@ namespace __sanitizer {
// internal_X() is a custom implementation of X() for use in RTL.
+extern "C" {
+// These are used as builtin replacements; see sanitizer_redefine_builtins.h.
+// In normal runtime code, use the __sanitizer::internal_X() aliases instead.
+SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memcpy(void *dest,
+ const void *src,
+ uptr n);
+SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memmove(
+ void *dest, const void *src, uptr n);
+SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memset(void *s, int c,
+ uptr n);
+} // extern "C"
+
// String functions
s64 internal_atoll(const char *nptr);
void *internal_memchr(const void *s, int c, uptr n);
void *internal_memrchr(const void *s, int c, uptr n);
int internal_memcmp(const void* s1, const void* s2, uptr n);
-void *internal_memcpy(void *dest, const void *src, uptr n);
-void *internal_memmove(void *dest, const void *src, uptr n);
+ALWAYS_INLINE void *internal_memcpy(void *dest, const void *src, uptr n) {
+ return __sanitizer_internal_memcpy(dest, src, n);
+}
+ALWAYS_INLINE void *internal_memmove(void *dest, const void *src, uptr n) {
+ return __sanitizer_internal_memmove(dest, src, n);
+}
// Should not be used in performance-critical places.
-void *internal_memset(void *s, int c, uptr n);
+ALWAYS_INLINE void *internal_memset(void *s, int c, uptr n) {
+ return __sanitizer_internal_memset(s, c, n);
+}
char* internal_strchr(const char *s, int c);
char *internal_strchrnul(const char *s, int c);
int internal_strcmp(const char *s1, const char *s2);
@@ -49,7 +67,10 @@ char *internal_strrchr(const char *s, int c);
char *internal_strstr(const char *haystack, const char *needle);
// Works only for base=10 and doesn't set errno.
s64 internal_simple_strtoll(const char *nptr, const char **endptr, int base);
-int internal_snprintf(char *buffer, uptr length, const char *format, ...);
+int internal_snprintf(char *buffer, uptr length, const char *format, ...)
+ FORMAT(3, 4);
+uptr internal_wcslen(const wchar_t *s);
+uptr internal_wcsnlen(const wchar_t *s, uptr maxlen);
// Return true if all bytes in [mem, mem+size) are zero.
// Optimized for the case when the result is true.
lib/tsan/sanitizer_common/sanitizer_libignore.cpp
@@ -8,7 +8,7 @@
#include "sanitizer_platform.h"
-#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_APPLE || \
SANITIZER_NETBSD
#include "sanitizer_libignore.h"
@@ -22,9 +22,9 @@ LibIgnore::LibIgnore(LinkerInitialized) {
}
void LibIgnore::AddIgnoredLibrary(const char *name_templ) {
- BlockingMutexLock lock(&mutex_);
+ Lock lock(&mutex_);
if (count_ >= kMaxLibs) {
- Report("%s: too many ignored libraries (max: %d)\n", SanitizerToolName,
+ Report("%s: too many ignored libraries (max: %zu)\n", SanitizerToolName,
kMaxLibs);
Die();
}
@@ -36,7 +36,7 @@ void LibIgnore::AddIgnoredLibrary(const char *name_templ) {
}
void LibIgnore::OnLibraryLoaded(const char *name) {
- BlockingMutexLock lock(&mutex_);
+ Lock lock(&mutex_);
// Try to match suppressions with symlink target.
InternalMmapVector<char> buf(kMaxPathLength);
if (name && internal_readlink(name, buf.data(), buf.size() - 1) > 0 &&
@@ -105,7 +105,7 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
continue;
if (IsPcInstrumented(range.beg) && IsPcInstrumented(range.end - 1))
continue;
- VReport(1, "Adding instrumented range %p-%p from library '%s'\n",
+ VReport(1, "Adding instrumented range 0x%zx-0x%zx from library '%s'\n",
range.beg, range.end, mod.full_name());
const uptr idx =
atomic_load(&instrumented_ranges_count_, memory_order_relaxed);
@@ -125,5 +125,5 @@ void LibIgnore::OnLibraryUnloaded() {
} // namespace __sanitizer
-#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC ||
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_APPLE ||
// SANITIZER_NETBSD
lib/tsan/sanitizer_common/sanitizer_libignore.h
@@ -77,7 +77,7 @@ class LibIgnore {
LibCodeRange instrumented_code_ranges_[kMaxInstrumentedRanges];
// Cold part:
- BlockingMutex mutex_;
+ Mutex mutex_;
uptr count_;
Lib libs_[kMaxLibs];
bool track_instrumented_libs_;
lib/tsan/sanitizer_common/sanitizer_linux.cpp
@@ -34,7 +34,7 @@
// format. Struct kernel_stat is defined as 'struct stat' in asm/stat.h. To
// access stat from asm/stat.h, without conflicting with definition in
// sys/stat.h, we use this trick.
-#if defined(__mips64)
+#if SANITIZER_MIPS64
#include <asm/unistd.h>
#include <sys/types.h>
#define stat kernel_stat
@@ -78,8 +78,13 @@
#include <sys/personality.h>
#endif
+#if SANITIZER_LINUX && defined(__loongarch__)
+# include <sys/sysmacros.h>
+#endif
+
#if SANITIZER_FREEBSD
#include <sys/exec.h>
+#include <sys/procctl.h>
#include <sys/sysctl.h>
#include <machine/atomic.h>
extern "C" {
@@ -123,8 +128,9 @@ const int FUTEX_WAKE_PRIVATE = FUTEX_WAKE | FUTEX_PRIVATE_FLAG;
// Are we using 32-bit or 64-bit Linux syscalls?
// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
// but it still needs to use 64-bit syscalls.
-#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__powerpc64__) || \
- SANITIZER_WORDSIZE == 64)
+#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__powerpc64__) || \
+ SANITIZER_WORDSIZE == 64 || \
+ (defined(__mips__) && _MIPS_SIM == _ABIN32))
# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1
#else
# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0
@@ -150,17 +156,51 @@ const int FUTEX_WAKE_PRIVATE = FUTEX_WAKE | FUTEX_PRIVATE_FLAG;
namespace __sanitizer {
-#if SANITIZER_LINUX && defined(__x86_64__)
-#include "sanitizer_syscall_linux_x86_64.inc"
-#elif SANITIZER_LINUX && SANITIZER_RISCV64
-#include "sanitizer_syscall_linux_riscv64.inc"
-#elif SANITIZER_LINUX && defined(__aarch64__)
-#include "sanitizer_syscall_linux_aarch64.inc"
-#elif SANITIZER_LINUX && defined(__arm__)
-#include "sanitizer_syscall_linux_arm.inc"
-#else
-#include "sanitizer_syscall_generic.inc"
-#endif
+void SetSigProcMask(__sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset) {
+ CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, set, oldset));
+}
+
+void BlockSignals(__sanitizer_sigset_t *oldset) {
+ __sanitizer_sigset_t set;
+ internal_sigfillset(&set);
+# if SANITIZER_LINUX && !SANITIZER_ANDROID
+ // Glibc uses SIGSETXID signal during setuid call. If this signal is blocked
+ // on any thread, setuid call hangs.
+ // See test/sanitizer_common/TestCases/Linux/setuid.c.
+ internal_sigdelset(&set, 33);
+# endif
+# if SANITIZER_LINUX
+ // Seccomp-BPF-sandboxed processes rely on SIGSYS to handle trapped syscalls.
+ // If this signal is blocked, such calls cannot be handled and the process may
+ // hang.
+ internal_sigdelset(&set, 31);
+# endif
+ SetSigProcMask(&set, oldset);
+}
+
+ScopedBlockSignals::ScopedBlockSignals(__sanitizer_sigset_t *copy) {
+ BlockSignals(&saved_);
+ if (copy)
+ internal_memcpy(copy, &saved_, sizeof(saved_));
+}
+
+ScopedBlockSignals::~ScopedBlockSignals() { SetSigProcMask(&saved_, nullptr); }
+
+# if SANITIZER_LINUX && defined(__x86_64__)
+# include "sanitizer_syscall_linux_x86_64.inc"
+# elif SANITIZER_LINUX && SANITIZER_RISCV64
+# include "sanitizer_syscall_linux_riscv64.inc"
+# elif SANITIZER_LINUX && defined(__aarch64__)
+# include "sanitizer_syscall_linux_aarch64.inc"
+# elif SANITIZER_LINUX && defined(__arm__)
+# include "sanitizer_syscall_linux_arm.inc"
+# elif SANITIZER_LINUX && defined(__hexagon__)
+# include "sanitizer_syscall_linux_hexagon.inc"
+# elif SANITIZER_LINUX && SANITIZER_LOONGARCH64
+# include "sanitizer_syscall_linux_loongarch64.inc"
+# else
+# include "sanitizer_syscall_generic.inc"
+# endif
// --------------- sanitizer_libc.h
#if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
@@ -204,7 +244,7 @@ uptr internal_close(fd_t fd) {
}
uptr internal_open(const char *filename, int flags) {
-#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# if SANITIZER_LINUX
return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags);
#else
return internal_syscall(SYSCALL(open), (uptr)filename, flags);
@@ -212,7 +252,7 @@ uptr internal_open(const char *filename, int flags) {
}
uptr internal_open(const char *filename, int flags, u32 mode) {
-#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# if SANITIZER_LINUX
return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags,
mode);
#else
@@ -241,7 +281,7 @@ uptr internal_ftruncate(fd_t fd, uptr size) {
return res;
}
-#if !SANITIZER_LINUX_USES_64BIT_SYSCALLS && SANITIZER_LINUX
+#if (!SANITIZER_LINUX_USES_64BIT_SYSCALLS || SANITIZER_SPARC) && SANITIZER_LINUX
static void stat64_to_stat(struct stat64 *in, struct stat *out) {
internal_memset(out, 0, sizeof(*out));
out->st_dev = in->st_dev;
@@ -260,7 +300,29 @@ static void stat64_to_stat(struct stat64 *in, struct stat *out) {
}
#endif
-#if defined(__mips64)
+#if SANITIZER_LINUX && defined(__loongarch__)
+static void statx_to_stat(struct statx *in, struct stat *out) {
+ internal_memset(out, 0, sizeof(*out));
+ out->st_dev = makedev(in->stx_dev_major, in->stx_dev_minor);
+ out->st_ino = in->stx_ino;
+ out->st_mode = in->stx_mode;
+ out->st_nlink = in->stx_nlink;
+ out->st_uid = in->stx_uid;
+ out->st_gid = in->stx_gid;
+ out->st_rdev = makedev(in->stx_rdev_major, in->stx_rdev_minor);
+ out->st_size = in->stx_size;
+ out->st_blksize = in->stx_blksize;
+ out->st_blocks = in->stx_blocks;
+ out->st_atime = in->stx_atime.tv_sec;
+ out->st_atim.tv_nsec = in->stx_atime.tv_nsec;
+ out->st_mtime = in->stx_mtime.tv_sec;
+ out->st_mtim.tv_nsec = in->stx_mtime.tv_nsec;
+ out->st_ctime = in->stx_ctime.tv_sec;
+ out->st_ctim.tv_nsec = in->stx_ctime.tv_nsec;
+}
+#endif
+
+#if SANITIZER_MIPS64
// Undefine compatibility macros from <sys/stat.h>
// so that they would not clash with the kernel_stat
// st_[a|m|c]time fields
@@ -311,52 +373,65 @@ static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) {
#endif
uptr internal_stat(const char *path, void *buf) {
-#if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0);
-#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# elif SANITIZER_LINUX
+# if defined(__loongarch__)
+ struct statx bufx;
+ int res = internal_syscall(SYSCALL(statx), AT_FDCWD, (uptr)path,
+ AT_NO_AUTOMOUNT, STATX_BASIC_STATS, (uptr)&bufx);
+ statx_to_stat(&bufx, (struct stat *)buf);
+ return res;
+# elif (SANITIZER_WORDSIZE == 64 || SANITIZER_X32 || \
+ (defined(__mips__) && _MIPS_SIM == _ABIN32)) && \
+ !SANITIZER_SPARC
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf,
0);
-#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
-# if defined(__mips64)
- // For mips64, stat syscall fills buffer in the format of kernel_stat
- struct kernel_stat kbuf;
- int res = internal_syscall(SYSCALL(stat), path, &kbuf);
- kernel_stat_to_stat(&kbuf, (struct stat *)buf);
+# else
+ struct stat64 buf64;
+ int res = internal_syscall(SYSCALL(fstatat64), AT_FDCWD, (uptr)path,
+ (uptr)&buf64, 0);
+ stat64_to_stat(&buf64, (struct stat *)buf);
return res;
-# else
- return internal_syscall(SYSCALL(stat), (uptr)path, (uptr)buf);
-# endif
-#else
+# endif
+# else
struct stat64 buf64;
int res = internal_syscall(SYSCALL(stat64), path, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
-#endif
+# endif
}
uptr internal_lstat(const char *path, void *buf) {
-#if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf,
AT_SYMLINK_NOFOLLOW);
-#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# elif SANITIZER_LINUX
+# if defined(__loongarch__)
+ struct statx bufx;
+ int res = internal_syscall(SYSCALL(statx), AT_FDCWD, (uptr)path,
+ AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT,
+ STATX_BASIC_STATS, (uptr)&bufx);
+ statx_to_stat(&bufx, (struct stat *)buf);
+ return res;
+# elif (defined(_LP64) || SANITIZER_X32 || \
+ (defined(__mips__) && _MIPS_SIM == _ABIN32)) && \
+ !SANITIZER_SPARC
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf,
AT_SYMLINK_NOFOLLOW);
-#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
-# if SANITIZER_MIPS64
- // For mips64, lstat syscall fills buffer in the format of kernel_stat
- struct kernel_stat kbuf;
- int res = internal_syscall(SYSCALL(lstat), path, &kbuf);
- kernel_stat_to_stat(&kbuf, (struct stat *)buf);
+# else
+ struct stat64 buf64;
+ int res = internal_syscall(SYSCALL(fstatat64), AT_FDCWD, (uptr)path,
+ (uptr)&buf64, AT_SYMLINK_NOFOLLOW);
+ stat64_to_stat(&buf64, (struct stat *)buf);
return res;
-# else
- return internal_syscall(SYSCALL(lstat), (uptr)path, (uptr)buf);
-# endif
-#else
+# endif
+# else
struct stat64 buf64;
int res = internal_syscall(SYSCALL(lstat64), path, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
-#endif
+# endif
}
uptr internal_fstat(fd_t fd, void *buf) {
@@ -367,9 +442,15 @@ uptr internal_fstat(fd_t fd, void *buf) {
int res = internal_syscall(SYSCALL(fstat), fd, &kbuf);
kernel_stat_to_stat(&kbuf, (struct stat *)buf);
return res;
-# else
+# elif SANITIZER_LINUX && defined(__loongarch__)
+ struct statx bufx;
+ int res = internal_syscall(SYSCALL(statx), fd, "", AT_EMPTY_PATH,
+ STATX_BASIC_STATS, (uptr)&bufx);
+ statx_to_stat(&bufx, (struct stat *)buf);
+ return res;
+# else
return internal_syscall(SYSCALL(fstat), fd, (uptr)buf);
-# endif
+# endif
#else
struct stat64 buf64;
int res = internal_syscall(SYSCALL(fstat64), fd, &buf64);
@@ -390,7 +471,7 @@ uptr internal_dup(int oldfd) {
}
uptr internal_dup2(int oldfd, int newfd) {
-#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# if SANITIZER_LINUX
return internal_syscall(SYSCALL(dup3), oldfd, newfd, 0);
#else
return internal_syscall(SYSCALL(dup2), oldfd, newfd);
@@ -398,7 +479,7 @@ uptr internal_dup2(int oldfd, int newfd) {
}
uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
-#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# if SANITIZER_LINUX
return internal_syscall(SYSCALL(readlinkat), AT_FDCWD, (uptr)path, (uptr)buf,
bufsize);
#else
@@ -407,7 +488,7 @@ uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
}
uptr internal_unlink(const char *path) {
-#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# if SANITIZER_LINUX
return internal_syscall(SYSCALL(unlinkat), AT_FDCWD, (uptr)path, 0);
#else
return internal_syscall(SYSCALL(unlink), (uptr)path);
@@ -415,15 +496,15 @@ uptr internal_unlink(const char *path) {
}
uptr internal_rename(const char *oldpath, const char *newpath) {
-#if defined(__riscv)
+# if (defined(__riscv) || defined(__loongarch__)) && defined(__linux__)
return internal_syscall(SYSCALL(renameat2), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
(uptr)newpath, 0);
-#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# elif SANITIZER_LINUX
return internal_syscall(SYSCALL(renameat), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
(uptr)newpath);
-#else
+# else
return internal_syscall(SYSCALL(rename), (uptr)oldpath, (uptr)newpath);
-#endif
+# endif
}
uptr internal_sched_yield() {
@@ -460,17 +541,20 @@ bool FileExists(const char *filename) {
if (ShouldMockFailureToOpen(filename))
return false;
struct stat st;
-#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
- if (internal_syscall(SYSCALL(newfstatat), AT_FDCWD, filename, &st, 0))
-#else
if (internal_stat(filename, &st))
-#endif
return false;
// Sanity check: filename is a regular file.
return S_ISREG(st.st_mode);
}
-#if !SANITIZER_NETBSD
+bool DirExists(const char *path) {
+ struct stat st;
+ if (internal_stat(path, &st))
+ return false;
+ return S_ISDIR(st.st_mode);
+}
+
+# if !SANITIZER_NETBSD
tid_t GetTid() {
#if SANITIZER_FREEBSD
long Tid;
@@ -659,48 +743,6 @@ void FutexWake(atomic_uint32_t *p, u32 count) {
# endif
}
-enum { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
-
-BlockingMutex::BlockingMutex() {
- internal_memset(this, 0, sizeof(*this));
-}
-
-void BlockingMutex::Lock() {
- CHECK_EQ(owner_, 0);
- atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
- if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
- return;
- while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
-#if SANITIZER_FREEBSD
- _umtx_op(m, UMTX_OP_WAIT_UINT, MtxSleeping, 0, 0);
-#elif SANITIZER_NETBSD
- sched_yield(); /* No userspace futex-like synchronization */
-#else
- internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAIT_PRIVATE, MtxSleeping,
- 0, 0, 0);
-#endif
- }
-}
-
-void BlockingMutex::Unlock() {
- atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
- u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
- CHECK_NE(v, MtxUnlocked);
- if (v == MtxSleeping) {
-#if SANITIZER_FREEBSD
- _umtx_op(m, UMTX_OP_WAKE, 1, 0, 0);
-#elif SANITIZER_NETBSD
- /* No userspace futex-like synchronization */
-#else
- internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAKE_PRIVATE, 1, 0, 0, 0);
-#endif
- }
-}
-
-void BlockingMutex::CheckLocked() const {
- auto m = reinterpret_cast<atomic_uint32_t const *>(&opaque_storage_);
- CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
-}
# endif // !SANITIZER_SOLARIS
// ----------------- sanitizer_linux.h
@@ -711,17 +753,17 @@ void BlockingMutex::CheckLocked() const {
// Not used
#else
struct linux_dirent {
-#if SANITIZER_X32 || defined(__aarch64__) || SANITIZER_RISCV64
+# if SANITIZER_X32 || SANITIZER_LINUX
u64 d_ino;
u64 d_off;
-#else
+# else
unsigned long d_ino;
unsigned long d_off;
-#endif
+# endif
unsigned short d_reclen;
-#if defined(__aarch64__) || SANITIZER_RISCV64
+# if SANITIZER_LINUX
unsigned char d_type;
-#endif
+# endif
char d_name[256];
};
#endif
@@ -757,11 +799,11 @@ int internal_dlinfo(void *handle, int request, void *p) {
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) {
#if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(getdirentries), fd, (uptr)dirp, count, NULL);
-#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# elif SANITIZER_LINUX
return internal_syscall(SYSCALL(getdents64), fd, (uptr)dirp, count);
-#else
+# else
return internal_syscall(SYSCALL(getdents), fd, (uptr)dirp, count);
-#endif
+# endif
}
uptr internal_lseek(fd_t fd, OFF_T offset, int whence) {
@@ -772,18 +814,29 @@ uptr internal_lseek(fd_t fd, OFF_T offset, int whence) {
uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) {
return internal_syscall(SYSCALL(prctl), option, arg2, arg3, arg4, arg5);
}
-#endif
+# if defined(__x86_64__)
+# include <asm/unistd_64.h>
+// Currently internal_arch_prctl() is only needed on x86_64.
+uptr internal_arch_prctl(int option, uptr arg2) {
+ return internal_syscall(__NR_arch_prctl, option, arg2);
+}
+# endif
+# endif
uptr internal_sigaltstack(const void *ss, void *oss) {
return internal_syscall(SYSCALL(sigaltstack), (uptr)ss, (uptr)oss);
}
int internal_fork() {
-#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# if SANITIZER_LINUX
+# if SANITIZER_S390
+ return internal_syscall(SYSCALL(clone), 0, SIGCHLD);
+# else
return internal_syscall(SYSCALL(clone), SIGCHLD, 0);
-#else
+# endif
+# else
return internal_syscall(SYSCALL(fork));
-#endif
+# endif
}
#if SANITIZER_FREEBSD
@@ -911,6 +964,10 @@ bool internal_sigismember(__sanitizer_sigset_t *set, int signum) {
return k_set->sig[idx] & ((uptr)1 << bit);
}
#elif SANITIZER_FREEBSD
+uptr internal_procctl(int type, int id, int cmd, void *data) {
+ return internal_syscall(SYSCALL(procctl), type, id, cmd, data);
+}
+
void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
sigset_t *rset = reinterpret_cast<sigset_t *>(set);
sigdelset(rset, signum);
@@ -1052,7 +1109,7 @@ uptr GetMaxVirtualAddress() {
#if SANITIZER_NETBSD && defined(__x86_64__)
return 0x7f7ffffff000ULL; // (0x00007f8000000000 - PAGE_SIZE)
#elif SANITIZER_WORDSIZE == 64
-# if defined(__powerpc64__) || defined(__aarch64__)
+# if defined(__powerpc64__) || defined(__aarch64__) || defined(__loongarch__)
// On PowerPC64 we have two different address space layouts: 44- and 46-bit.
// We somehow need to figure out which one we are using now and choose
// one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
@@ -1060,10 +1117,11 @@ uptr GetMaxVirtualAddress() {
// of the address space, so simply checking the stack address is not enough.
// This should (does) work for both PowerPC64 Endian modes.
// Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit.
+ // loongarch64 also has multiple address space layouts: default is 47-bit.
return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
#elif SANITIZER_RISCV64
return (1ULL << 38) - 1;
-# elif defined(__mips64)
+# elif SANITIZER_MIPS64
return (1ULL << 40) - 1; // 0x000000ffffffffffUL;
# elif defined(__s390x__)
return (1ULL << 53) - 1; // 0x001fffffffffffffUL;
@@ -1217,7 +1275,8 @@ void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
}
#endif
-#if defined(__x86_64__) && SANITIZER_LINUX
+#if SANITIZER_LINUX
+#if defined(__x86_64__)
// We cannot use glibc's clone wrapper, because it messes with the child
// task's TLS. It writes the PID and TID of the child task to its thread
// descriptor, but in our case the child task shares the thread descriptor with
@@ -1399,7 +1458,7 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
#elif defined(__aarch64__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
- long long res;
+ register long long res __asm__("x0");
if (!fn || !child_stack)
return -EINVAL;
CHECK_EQ(0, (uptr)child_stack % 16);
@@ -1447,6 +1506,47 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
: "x30", "memory");
return res;
}
+#elif SANITIZER_LOONGARCH64
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ if (!fn || !child_stack)
+ return -EINVAL;
+
+ CHECK_EQ(0, (uptr)child_stack % 16);
+
+ register int res __asm__("$a0");
+ register int __flags __asm__("$a0") = flags;
+ register void *__stack __asm__("$a1") = child_stack;
+ register int *__ptid __asm__("$a2") = parent_tidptr;
+ register int *__ctid __asm__("$a3") = child_tidptr;
+ register void *__tls __asm__("$a4") = newtls;
+ register int (*__fn)(void *) __asm__("$a5") = fn;
+ register void *__arg __asm__("$a6") = arg;
+ register int nr_clone __asm__("$a7") = __NR_clone;
+
+ __asm__ __volatile__(
+ "syscall 0\n"
+
+ // if ($a0 != 0)
+ // return $a0;
+ "bnez $a0, 1f\n"
+
+ // In the child, now. Call "fn(arg)".
+ "move $a0, $a6\n"
+ "jirl $ra, $a5, 0\n"
+
+ // Call _exit($a0).
+ "addi.d $a7, $zero, %9\n"
+ "syscall 0\n"
+
+ "1:\n"
+
+ : "=r"(res)
+ : "0"(__flags), "r"(__stack), "r"(__ptid), "r"(__ctid), "r"(__tls),
+ "r"(__fn), "r"(__arg), "r"(nr_clone), "i"(__NR_exit)
+ : "memory", "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8");
+ return res;
+}
#elif defined(__powerpc64__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
@@ -1556,7 +1656,7 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
: "cr0", "cr1", "memory", "ctr", "r0", "r27", "r28", "r29");
return res;
}
-#elif defined(__i386__) && SANITIZER_LINUX
+#elif defined(__i386__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
int res;
@@ -1621,7 +1721,7 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
: "memory");
return res;
}
-#elif defined(__arm__) && SANITIZER_LINUX
+#elif defined(__arm__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
unsigned int res;
@@ -1687,7 +1787,8 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
: "memory");
return res;
}
-#endif // defined(__x86_64__) && SANITIZER_LINUX
+#endif
+#endif // SANITIZER_LINUX
#if SANITIZER_LINUX
int internal_uname(struct utsname *buf) {
@@ -1778,23 +1879,18 @@ HandleSignalMode GetHandleSignalMode(int signum) {
#if !SANITIZER_GO
void *internal_start_thread(void *(*func)(void *arg), void *arg) {
+ if (&real_pthread_create == 0)
+ return nullptr;
// Start the thread with signals blocked, otherwise it can steal user signals.
- __sanitizer_sigset_t set, old;
- internal_sigfillset(&set);
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
- // Glibc uses SIGSETXID signal during setuid call. If this signal is blocked
- // on any thread, setuid call hangs (see test/tsan/setuid.c).
- internal_sigdelset(&set, 33);
-#endif
- internal_sigprocmask(SIG_SETMASK, &set, &old);
+ ScopedBlockSignals block(nullptr);
void *th;
real_pthread_create(&th, nullptr, func, arg);
- internal_sigprocmask(SIG_SETMASK, &old, nullptr);
return th;
}
void internal_join_thread(void *th) {
- real_pthread_join(th, nullptr);
+ if (&real_pthread_join)
+ real_pthread_join(th, nullptr);
}
#else
void *internal_start_thread(void *(*func)(void *), void *arg) { return 0; }
@@ -1802,7 +1898,7 @@ void *internal_start_thread(void *(*func)(void *), void *arg) { return 0; }
void internal_join_thread(void *th) {}
#endif
-#if defined(__aarch64__)
+#if SANITIZER_LINUX && defined(__aarch64__)
// Android headers in the older NDK releases miss this definition.
struct __sanitizer_esr_context {
struct _aarch64_ctx head;
@@ -1811,7 +1907,7 @@ struct __sanitizer_esr_context {
static bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) {
static const u32 kEsrMagic = 0x45535201;
- u8 *aux = ucontext->uc_mcontext.__reserved;
+ u8 *aux = reinterpret_cast<u8 *>(ucontext->uc_mcontext.__reserved);
while (true) {
_aarch64_ctx *ctx = (_aarch64_ctx *)aux;
if (ctx->size == 0) break;
@@ -1823,6 +1919,11 @@ static bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) {
}
return false;
}
+#elif SANITIZER_FREEBSD && defined(__aarch64__)
+// FreeBSD doesn't provide ESR in the ucontext.
+static bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) {
+ return false;
+}
#endif
using Context = ucontext_t;
@@ -1841,7 +1942,7 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
#else
uptr err = ucontext->uc_mcontext.gregs[REG_ERR];
#endif // SANITIZER_FREEBSD
- return err & PF_WRITE ? WRITE : READ;
+ return err & PF_WRITE ? Write : Read;
#elif defined(__mips__)
uint32_t *exception_source;
uint32_t faulty_instruction;
@@ -1864,7 +1965,7 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
case 0x2a: // swl
case 0x2e: // swr
#endif
- return SignalContext::WRITE;
+ return SignalContext::Write;
case 0x20: // lb
case 0x24: // lbu
@@ -1879,27 +1980,34 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
case 0x22: // lwl
case 0x26: // lwr
#endif
- return SignalContext::READ;
+ return SignalContext::Read;
#if __mips_isa_rev == 6
case 0x3b: // pcrel
op_code = (faulty_instruction >> 19) & 0x3;
switch (op_code) {
case 0x1: // lwpc
case 0x2: // lwupc
- return SignalContext::READ;
+ return SignalContext::Read;
}
#endif
}
- return SignalContext::UNKNOWN;
+ return SignalContext::Unknown;
#elif defined(__arm__)
static const uptr FSR_WRITE = 1U << 11;
uptr fsr = ucontext->uc_mcontext.error_code;
- return fsr & FSR_WRITE ? WRITE : READ;
+ return fsr & FSR_WRITE ? Write : Read;
#elif defined(__aarch64__)
static const u64 ESR_ELx_WNR = 1U << 6;
u64 esr;
- if (!Aarch64GetESR(ucontext, &esr)) return UNKNOWN;
- return esr & ESR_ELx_WNR ? WRITE : READ;
+ if (!Aarch64GetESR(ucontext, &esr)) return Unknown;
+ return esr & ESR_ELx_WNR ? Write : Read;
+#elif defined(__loongarch__)
+ u32 flags = ucontext->uc_mcontext.__flags;
+ if (flags & SC_ADDRERR_RD)
+ return SignalContext::Read;
+ if (flags & SC_ADDRERR_WR)
+ return SignalContext::Write;
+ return SignalContext::Unknown;
#elif defined(__sparc__)
// Decode the instruction to determine the access type.
// From OpenSolaris $SRC/uts/sun4/os/trap.c (get_accesstype).
@@ -1915,9 +2023,13 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
#endif
#endif
u32 instr = *(u32 *)pc;
- return (instr >> 21) & 1 ? WRITE: READ;
+ return (instr >> 21) & 1 ? Write: Read;
#elif defined(__riscv)
+#if SANITIZER_FREEBSD
+ unsigned long pc = ucontext->uc_mcontext.mc_gpregs.gp_sepc;
+#else
unsigned long pc = ucontext->uc_mcontext.__gregs[REG_PC];
+#endif
unsigned faulty_instruction = *(uint16_t *)pc;
#if defined(__riscv_compressed)
@@ -1931,7 +2043,7 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
#if __riscv_xlen == 64
case 0b10'011: // c.ldsp (rd != x0)
#endif
- return rd ? SignalContext::READ : SignalContext::UNKNOWN;
+ return rd ? SignalContext::Read : SignalContext::Unknown;
case 0b00'010: // c.lw
#if __riscv_flen >= 32 && __riscv_xlen == 32
case 0b10'011: // c.flwsp
@@ -1943,7 +2055,7 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
case 0b00'001: // c.fld
case 0b10'001: // c.fldsp
#endif
- return SignalContext::READ;
+ return SignalContext::Read;
case 0b00'110: // c.sw
case 0b10'110: // c.swsp
#if __riscv_flen >= 32 || __riscv_xlen == 64
@@ -1954,9 +2066,9 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
case 0b00'101: // c.fsd
case 0b10'101: // c.fsdsp
#endif
- return SignalContext::WRITE;
+ return SignalContext::Write;
default:
- return SignalContext::UNKNOWN;
+ return SignalContext::Unknown;
}
}
#endif
@@ -1974,9 +2086,9 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
#endif
case 0b100: // lbu
case 0b101: // lhu
- return SignalContext::READ;
+ return SignalContext::Read;
default:
- return SignalContext::UNKNOWN;
+ return SignalContext::Unknown;
}
case 0b0100011: // stores
switch (funct3) {
@@ -1986,9 +2098,9 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
#if __riscv_xlen == 64
case 0b011: // sd
#endif
- return SignalContext::WRITE;
+ return SignalContext::Write;
default:
- return SignalContext::UNKNOWN;
+ return SignalContext::Unknown;
}
#if __riscv_flen >= 32
case 0b0000111: // floating-point loads
@@ -1997,9 +2109,9 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
#if __riscv_flen == 64
case 0b011: // fld
#endif
- return SignalContext::READ;
+ return SignalContext::Read;
default:
- return SignalContext::UNKNOWN;
+ return SignalContext::Unknown;
}
case 0b0100111: // floating-point stores
switch (funct3) {
@@ -2007,17 +2119,17 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
#if __riscv_flen == 64
case 0b011: // fsd
#endif
- return SignalContext::WRITE;
+ return SignalContext::Write;
default:
- return SignalContext::UNKNOWN;
+ return SignalContext::Unknown;
}
#endif
default:
- return SignalContext::UNKNOWN;
+ return SignalContext::Unknown;
}
#else
(void)ucontext;
- return UNKNOWN; // FIXME: Implement.
+ return Unknown; // FIXME: Implement.
#endif
}
@@ -2044,10 +2156,17 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
*bp = ucontext->uc_mcontext.arm_fp;
*sp = ucontext->uc_mcontext.arm_sp;
#elif defined(__aarch64__)
+# if SANITIZER_FREEBSD
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.mc_gpregs.gp_elr;
+ *bp = ucontext->uc_mcontext.mc_gpregs.gp_x[29];
+ *sp = ucontext->uc_mcontext.mc_gpregs.gp_sp;
+# else
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.pc;
*bp = ucontext->uc_mcontext.regs[29];
*sp = ucontext->uc_mcontext.sp;
+# endif
#elif defined(__hppa__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.sc_iaoq[0];
@@ -2092,12 +2211,19 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
*sp = ucontext->uc_mcontext.gregs[REG_UESP];
# endif
#elif defined(__powerpc__) || defined(__powerpc64__)
+# if SANITIZER_FREEBSD
+ ucontext_t *ucontext = (ucontext_t *)context;
+ *pc = ucontext->uc_mcontext.mc_srr0;
+ *sp = ucontext->uc_mcontext.mc_frame[1];
+ *bp = ucontext->uc_mcontext.mc_frame[31];
+# else
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.regs->nip;
*sp = ucontext->uc_mcontext.regs->gpr[PT_R1];
// The powerpc{,64}-linux ABIs do not specify r31 as the frame
// pointer, but GCC always uses r31 when we need a frame pointer.
*bp = ucontext->uc_mcontext.regs->gpr[PT_R31];
+# endif
#elif defined(__sparc__)
#if defined(__arch64__) || defined(__sparcv9)
#define STACK_BIAS 2047
@@ -2136,12 +2262,28 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
*sp = ucontext->uc_mcontext.gregs[15];
#elif defined(__riscv)
ucontext_t *ucontext = (ucontext_t*)context;
+# if SANITIZER_FREEBSD
+ *pc = ucontext->uc_mcontext.mc_gpregs.gp_sepc;
+ *bp = ucontext->uc_mcontext.mc_gpregs.gp_s[0];
+ *sp = ucontext->uc_mcontext.mc_gpregs.gp_sp;
+# else
*pc = ucontext->uc_mcontext.__gregs[REG_PC];
*bp = ucontext->uc_mcontext.__gregs[REG_S0];
*sp = ucontext->uc_mcontext.__gregs[REG_SP];
-#else
-# error "Unsupported arch"
-#endif
+# endif
+# elif defined(__hexagon__)
+ ucontext_t *ucontext = (ucontext_t *)context;
+ *pc = ucontext->uc_mcontext.pc;
+ *bp = ucontext->uc_mcontext.r30;
+ *sp = ucontext->uc_mcontext.r29;
+# elif defined(__loongarch__)
+ ucontext_t *ucontext = (ucontext_t *)context;
+ *pc = ucontext->uc_mcontext.__pc;
+ *bp = ucontext->uc_mcontext.__gregs[22];
+ *sp = ucontext->uc_mcontext.__gregs[3];
+# else
+# error "Unsupported arch"
+# endif
}
void SignalContext::InitPcSpBp() { GetPcSpBp(context, &pc, &sp, &bp); }
@@ -2150,10 +2292,6 @@ void InitializePlatformEarly() {
// Do nothing.
}
-void MaybeReexec() {
- // No need to re-exec on Linux.
-}
-
void CheckASLR() {
#if SANITIZER_NETBSD
int mib[3];
@@ -2175,49 +2313,35 @@ void CheckASLR() {
GetArgv()[0]);
Die();
}
-#elif SANITIZER_PPC64V2
- // Disable ASLR for Linux PPC64LE.
- int old_personality = personality(0xffffffff);
- if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) {
- VReport(1, "WARNING: Program is being run with address space layout "
- "randomization (ASLR) enabled which prevents the thread and "
- "memory sanitizers from working on powerpc64le.\n"
- "ASLR will be disabled and the program re-executed.\n");
- CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
- ReExec();
- }
#elif SANITIZER_FREEBSD
- int aslr_pie;
- uptr len = sizeof(aslr_pie);
-#if SANITIZER_WORDSIZE == 64
- if (UNLIKELY(internal_sysctlbyname("kern.elf64.aslr.pie_enable",
- &aslr_pie, &len, NULL, 0) == -1)) {
+ int aslr_status;
+ int r = internal_procctl(P_PID, 0, PROC_ASLR_STATUS, &aslr_status);
+ if (UNLIKELY(r == -1)) {
// We're making things less 'dramatic' here since
- // the OID is not necessarily guaranteed to be here
+ // the cmd is not necessarily guaranteed to be here
// just yet regarding FreeBSD release
return;
}
-
- if (aslr_pie > 0) {
+ if ((aslr_status & PROC_ASLR_ACTIVE) != 0) {
Printf("This sanitizer is not compatible with enabled ASLR "
"and binaries compiled with PIE\n");
Die();
}
-#endif
- // there might be 32 bits compat for 64 bits
- if (UNLIKELY(internal_sysctlbyname("kern.elf32.aslr.pie_enable",
- &aslr_pie, &len, NULL, 0) == -1)) {
- return;
- }
-
- if (aslr_pie > 0) {
- Printf("This sanitizer is not compatible with enabled ASLR "
- "and binaries compiled with PIE\n");
- Die();
+# elif SANITIZER_PPC64V2
+ // Disable ASLR for Linux PPC64LE.
+ int old_personality = personality(0xffffffff);
+ if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) {
+ VReport(1,
+ "WARNING: Program is being run with address space layout "
+ "randomization (ASLR) enabled which prevents the thread and "
+ "memory sanitizers from working on powerpc64le.\n"
+ "ASLR will be disabled and the program re-executed.\n");
+ CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
+ ReExec();
}
-#else
+# else
// Do nothing
-#endif
+# endif
}
void CheckMPROTECT() {
lib/tsan/sanitizer_common/sanitizer_linux.h
@@ -49,26 +49,44 @@ uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
uptr internal_sigaltstack(const void* ss, void* oss);
uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset);
-#if SANITIZER_GLIBC
+
+void SetSigProcMask(__sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset);
+void BlockSignals(__sanitizer_sigset_t *oldset = nullptr);
+struct ScopedBlockSignals {
+ explicit ScopedBlockSignals(__sanitizer_sigset_t *copy);
+ ~ScopedBlockSignals();
+
+ ScopedBlockSignals &operator=(const ScopedBlockSignals &) = delete;
+ ScopedBlockSignals(const ScopedBlockSignals &) = delete;
+
+ private:
+ __sanitizer_sigset_t saved_;
+};
+
+# if SANITIZER_GLIBC
uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp);
#endif
// Linux-only syscalls.
#if SANITIZER_LINUX
uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
+# if defined(__x86_64__)
+uptr internal_arch_prctl(int option, uptr arg2);
+# endif
// Used only by sanitizer_stoptheworld. Signal handlers that are actually used
// (like the process-wide error reporting SEGV handler) must use
// internal_sigaction instead.
int internal_sigaction_norestorer(int signum, const void *act, void *oldact);
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
-#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
- defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
- defined(__arm__) || SANITIZER_RISCV64
+# if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
+ defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
+ defined(__arm__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr);
#endif
int internal_uname(struct utsname *buf);
#elif SANITIZER_FREEBSD
+uptr internal_procctl(int type, int id, int cmd, void *data);
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
#elif SANITIZER_NETBSD
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
@@ -135,6 +153,9 @@ inline void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) {
"rdhwr %0,$29\n" \
".set pop\n" : "=r"(__v)); \
__v; })
+#elif defined (__riscv)
+# define __get_tls() \
+ ({ void** __v; __asm__("mv %0, tp" : "=r"(__v)); __v; })
#elif defined(__i386__)
# define __get_tls() \
({ void** __v; __asm__("movl %%gs:0, %0" : "=r"(__v)); __v; })
lib/tsan/sanitizer_common/sanitizer_linux_libcdep.cpp
@@ -27,6 +27,7 @@
#include "sanitizer_linux.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
+#include "sanitizer_solaris.h"
#if SANITIZER_NETBSD
#define _RTLD_SOURCE // for __lwp_gettcb_fast() / __lwp_getprivate_fast()
@@ -62,6 +63,7 @@
#endif
#if SANITIZER_SOLARIS
+#include <stddef.h>
#include <stdlib.h>
#include <thread.h>
#endif
@@ -146,7 +148,7 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
pthread_attr_t attr;
pthread_attr_init(&attr);
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
- my_pthread_attr_getstack(&attr, &stackaddr, &stacksize);
+ internal_pthread_attr_getstack(&attr, &stackaddr, &stacksize);
pthread_attr_destroy(&attr);
#endif // SANITIZER_SOLARIS
@@ -203,7 +205,8 @@ void InitTlsSize() {
g_use_dlpi_tls_data =
GetLibcVersion(&major, &minor, &patch) && major == 2 && minor >= 25;
-#if defined(__aarch64__) || defined(__x86_64__) || defined(__powerpc64__)
+#if defined(__aarch64__) || defined(__x86_64__) || defined(__powerpc64__) || \
+ defined(__loongarch__)
void *get_tls_static_info = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
size_t tls_align;
((void (*)(size_t *, size_t *))get_tls_static_info)(&g_tls_size, &tls_align);
@@ -216,14 +219,13 @@ void InitTlsSize() { }
// On glibc x86_64, ThreadDescriptorSize() needs to be precise due to the usage
// of g_tls_size. On other targets, ThreadDescriptorSize() is only used by lsan
// to get the pointer to thread-specific data keys in the thread control block.
-#if (SANITIZER_FREEBSD || SANITIZER_LINUX) && !SANITIZER_ANDROID
+#if (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS) && \
+ !SANITIZER_ANDROID && !SANITIZER_GO
// sizeof(struct pthread) from glibc.
static atomic_uintptr_t thread_descriptor_size;
-uptr ThreadDescriptorSize() {
- uptr val = atomic_load_relaxed(&thread_descriptor_size);
- if (val)
- return val;
+static uptr ThreadDescriptorSizeFallback() {
+ uptr val = 0;
#if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
int major;
int minor;
@@ -264,6 +266,8 @@ uptr ThreadDescriptorSize() {
#elif defined(__mips__)
// TODO(sagarthakur): add more values as per different glibc versions.
val = FIRST_32_SECOND_64(1152, 1776);
+#elif SANITIZER_LOONGARCH64
+ val = 1856; // from glibc 2.36
#elif SANITIZER_RISCV64
int major;
int minor;
@@ -285,12 +289,26 @@ uptr ThreadDescriptorSize() {
#elif defined(__powerpc64__)
val = 1776; // from glibc.ppc64le 2.20-8.fc21
#endif
+ return val;
+}
+
+uptr ThreadDescriptorSize() {
+ uptr val = atomic_load_relaxed(&thread_descriptor_size);
if (val)
- atomic_store_relaxed(&thread_descriptor_size, val);
+ return val;
+ // _thread_db_sizeof_pthread is a GLIBC_PRIVATE symbol that is exported in
+ // glibc 2.34 and later.
+ if (unsigned *psizeof = static_cast<unsigned *>(
+ dlsym(RTLD_DEFAULT, "_thread_db_sizeof_pthread")))
+ val = *psizeof;
+ if (!val)
+ val = ThreadDescriptorSizeFallback();
+ atomic_store_relaxed(&thread_descriptor_size, val);
return val;
}
-#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
+#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64 || \
+ SANITIZER_LOONGARCH64
// TlsPreTcbSize includes size of struct pthread_descr and size of tcb
// head structure. It lies before the static tls blocks.
static uptr TlsPreTcbSize() {
@@ -300,6 +318,8 @@ static uptr TlsPreTcbSize() {
const uptr kTcbHead = 88; // sizeof (tcbhead_t)
#elif SANITIZER_RISCV64
const uptr kTcbHead = 16; // sizeof (tcbhead_t)
+#elif SANITIZER_LOONGARCH64
+ const uptr kTcbHead = 16; // sizeof (tcbhead_t)
#endif
const uptr kTlsAlign = 16;
const uptr kTlsPreTcbSize =
@@ -308,7 +328,6 @@ static uptr TlsPreTcbSize() {
}
#endif
-#if !SANITIZER_GO
namespace {
struct TlsBlock {
uptr begin, end, align;
@@ -339,19 +358,43 @@ static uptr TlsGetOffset(uptr ti_module, uptr ti_offset) {
extern "C" void *__tls_get_addr(size_t *);
#endif
+static size_t main_tls_modid;
+
static int CollectStaticTlsBlocks(struct dl_phdr_info *info, size_t size,
void *data) {
- if (!info->dlpi_tls_modid)
+ size_t tls_modid;
+#if SANITIZER_SOLARIS
+ // dlpi_tls_modid is only available since Solaris 11.4 SRU 10. Use
+ // dlinfo(RTLD_DI_LINKMAP) instead which works on all of Solaris 11.3,
+ // 11.4, and Illumos. The tlsmodid of the executable was changed to 1 in
+ // 11.4 to match other implementations.
+ if (size >= offsetof(dl_phdr_info_test, dlpi_tls_modid))
+ main_tls_modid = 1;
+ else
+ main_tls_modid = 0;
+ g_use_dlpi_tls_data = 0;
+ Rt_map *map;
+ dlinfo(RTLD_SELF, RTLD_DI_LINKMAP, &map);
+ tls_modid = map->rt_tlsmodid;
+#else
+ main_tls_modid = 1;
+ tls_modid = info->dlpi_tls_modid;
+#endif
+
+ if (tls_modid < main_tls_modid)
return 0;
- uptr begin = (uptr)info->dlpi_tls_data;
+ uptr begin;
+#if !SANITIZER_SOLARIS
+ begin = (uptr)info->dlpi_tls_data;
+#endif
if (!g_use_dlpi_tls_data) {
// Call __tls_get_addr as a fallback. This forces TLS allocation on glibc
// and FreeBSD.
#ifdef __s390__
begin = (uptr)__builtin_thread_pointer() +
- TlsGetOffset(info->dlpi_tls_modid, 0);
+ TlsGetOffset(tls_modid, 0);
#else
- size_t mod_and_off[2] = {info->dlpi_tls_modid, 0};
+ size_t mod_and_off[2] = {tls_modid, 0};
begin = (uptr)__tls_get_addr(mod_and_off);
#endif
}
@@ -359,7 +402,7 @@ static int CollectStaticTlsBlocks(struct dl_phdr_info *info, size_t size,
if (info->dlpi_phdr[i].p_type == PT_TLS) {
static_cast<InternalMmapVector<TlsBlock> *>(data)->push_back(
TlsBlock{begin, begin + info->dlpi_phdr[i].p_memsz,
- info->dlpi_phdr[i].p_align, info->dlpi_tls_modid});
+ info->dlpi_phdr[i].p_align, tls_modid});
break;
}
return 0;
@@ -371,11 +414,11 @@ __attribute__((unused)) static void GetStaticTlsBoundary(uptr *addr, uptr *size,
dl_iterate_phdr(CollectStaticTlsBlocks, &ranges);
uptr len = ranges.size();
Sort(ranges.begin(), len);
- // Find the range with tls_modid=1. For glibc, because libc.so uses PT_TLS,
- // this module is guaranteed to exist and is one of the initially loaded
- // modules.
+ // Find the range with tls_modid == main_tls_modid. For glibc, because
+ // libc.so uses PT_TLS, this module is guaranteed to exist and is one of
+ // the initially loaded modules.
uptr one = 0;
- while (one != len && ranges[one].tls_modid != 1) ++one;
+ while (one != len && ranges[one].tls_modid != main_tls_modid) ++one;
if (one == len) {
// This may happen with musl if no module uses PT_TLS.
*addr = 0;
@@ -384,21 +427,20 @@ __attribute__((unused)) static void GetStaticTlsBoundary(uptr *addr, uptr *size,
return;
}
// Find the maximum consecutive ranges. We consider two modules consecutive if
- // the gap is smaller than the alignment. The dynamic loader places static TLS
- // blocks this way not to waste space.
+ // the gap is smaller than the alignment of the latter range. The dynamic
+ // loader places static TLS blocks this way not to waste space.
uptr l = one;
*align = ranges[l].align;
- while (l != 0 && ranges[l].begin < ranges[l - 1].end + ranges[l - 1].align)
+ while (l != 0 && ranges[l].begin < ranges[l - 1].end + ranges[l].align)
*align = Max(*align, ranges[--l].align);
uptr r = one + 1;
- while (r != len && ranges[r].begin < ranges[r - 1].end + ranges[r - 1].align)
+ while (r != len && ranges[r].begin < ranges[r - 1].end + ranges[r].align)
*align = Max(*align, ranges[r++].align);
*addr = ranges[l].begin;
*size = ranges[r - 1].end - ranges[l].begin;
}
-#endif // !SANITIZER_GO
#endif // (x86_64 || i386 || mips || ...) && (SANITIZER_FREEBSD ||
- // SANITIZER_LINUX) && !SANITIZER_ANDROID
+ // SANITIZER_LINUX) && !SANITIZER_ANDROID && !SANITIZER_GO
#if SANITIZER_NETBSD
static struct tls_tcb * ThreadSelfTlsTcb() {
@@ -452,7 +494,11 @@ static void GetTls(uptr *addr, uptr *size) {
#elif SANITIZER_GLIBC && defined(__x86_64__)
// For aarch64 and x86-64, use an O(1) approach which requires relatively
// precise ThreadDescriptorSize. g_tls_size was initialized in InitTlsSize.
+# if SANITIZER_X32
+ asm("mov %%fs:8,%0" : "=r"(*addr));
+# else
asm("mov %%fs:16,%0" : "=r"(*addr));
+# endif
*size = g_tls_size;
*addr -= *size;
*addr += ThreadDescriptorSize();
@@ -460,6 +506,15 @@ static void GetTls(uptr *addr, uptr *size) {
*addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
ThreadDescriptorSize();
*size = g_tls_size + ThreadDescriptorSize();
+#elif SANITIZER_GLIBC && defined(__loongarch__)
+# ifdef __clang__
+ *addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
+ ThreadDescriptorSize();
+# else
+ asm("or %0,$tp,$zero" : "=r"(*addr));
+ *addr -= ThreadDescriptorSize();
+# endif
+ *size = g_tls_size + ThreadDescriptorSize();
#elif SANITIZER_GLIBC && defined(__powerpc64__)
// Workaround for glibc<2.25(?). 2.27 is known to not need this.
uptr tp;
@@ -467,7 +522,7 @@ static void GetTls(uptr *addr, uptr *size) {
const uptr pre_tcb_size = TlsPreTcbSize();
*addr = tp - pre_tcb_size;
*size = g_tls_size + pre_tcb_size;
-#elif SANITIZER_FREEBSD || SANITIZER_LINUX
+#elif SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS
uptr align;
GetStaticTlsBoundary(addr, size, &align);
#if defined(__x86_64__) || defined(__i386__) || defined(__s390__) || \
@@ -528,10 +583,6 @@ static void GetTls(uptr *addr, uptr *size) {
*addr = (uptr)tcb->tcb_dtv[1];
}
}
-#elif SANITIZER_SOLARIS
- // FIXME
- *addr = 0;
- *size = 0;
#else
#error "Unknown OS"
#endif
@@ -603,6 +654,34 @@ static int AddModuleSegments(const char *module_name, dl_phdr_info *info,
bool writable = phdr->p_flags & PF_W;
cur_module.addAddressRange(cur_beg, cur_end, executable,
writable);
+ } else if (phdr->p_type == PT_NOTE) {
+# ifdef NT_GNU_BUILD_ID
+ uptr off = 0;
+ while (off + sizeof(ElfW(Nhdr)) < phdr->p_memsz) {
+ auto *nhdr = reinterpret_cast<const ElfW(Nhdr) *>(info->dlpi_addr +
+ phdr->p_vaddr + off);
+ constexpr auto kGnuNamesz = 4; // "GNU" with NUL-byte.
+ static_assert(kGnuNamesz % 4 == 0, "kGnuNameSize is aligned to 4.");
+ if (nhdr->n_type == NT_GNU_BUILD_ID && nhdr->n_namesz == kGnuNamesz) {
+ if (off + sizeof(ElfW(Nhdr)) + nhdr->n_namesz + nhdr->n_descsz >
+ phdr->p_memsz) {
+ // Something is very wrong, bail out instead of reading potentially
+ // arbitrary memory.
+ break;
+ }
+ const char *name =
+ reinterpret_cast<const char *>(nhdr) + sizeof(*nhdr);
+ if (internal_memcmp(name, "GNU", 3) == 0) {
+ const char *value = reinterpret_cast<const char *>(nhdr) +
+ sizeof(*nhdr) + kGnuNamesz;
+ cur_module.setUuid(value, nhdr->n_descsz);
+ break;
+ }
+ }
+ off += sizeof(*nhdr) + RoundUpTo(nhdr->n_namesz, 4) +
+ RoundUpTo(nhdr->n_descsz, 4);
+ }
+# endif
}
}
modules->push_back(cur_module);
lib/tsan/sanitizer_common/sanitizer_linux_s390.cpp
@@ -57,8 +57,10 @@ uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
- if (!fn || !child_stack)
- return -EINVAL;
+ if (!fn || !child_stack) {
+ errno = EINVAL;
+ return -1;
+ }
CHECK_EQ(0, (uptr)child_stack % 16);
// Minimum frame size.
#ifdef __s390x__
@@ -71,9 +73,9 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
// And pass parameters.
((unsigned long *)child_stack)[1] = (uptr)fn;
((unsigned long *)child_stack)[2] = (uptr)arg;
- register long res __asm__("r2");
+ register uptr res __asm__("r2");
register void *__cstack __asm__("r2") = child_stack;
- register int __flags __asm__("r3") = flags;
+ register long __flags __asm__("r3") = flags;
register int * __ptidptr __asm__("r4") = parent_tidptr;
register int * __ctidptr __asm__("r5") = child_tidptr;
register void * __newtls __asm__("r6") = newtls;
@@ -113,6 +115,10 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
"r"(__ctidptr),
"r"(__newtls)
: "memory", "cc");
+ if (res >= (uptr)-4095) {
+ errno = -res;
+ return -1;
+ }
return res;
}
lib/tsan/sanitizer_common/sanitizer_local_address_space_view.h
@@ -17,7 +17,7 @@
// instantiated with the `LocalAddressSpaceView` type. This type is used to
// load any pointers in instance methods. This implementation is effectively
// a no-op. When an object is to be used in an out-of-process manner it is
-// instansiated with the `RemoteAddressSpaceView` type.
+// instantiated with the `RemoteAddressSpaceView` type.
//
// By making `AddressSpaceView` a template parameter of an object, it can
// change its implementation at compile time which has no run time overhead.
lib/tsan/sanitizer_common/sanitizer_lzw.h
@@ -0,0 +1,159 @@
+//===-- sanitizer_lzw.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// LempelโZivโWelch encoding/decoding
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_LZW_H
+#define SANITIZER_LZW_H
+
+#include "sanitizer_dense_map.h"
+
+namespace __sanitizer {
+
+using LzwCodeType = u32;
+
+template <class T, class ItIn, class ItOut>
+ItOut LzwEncode(ItIn begin, ItIn end, ItOut out) {
+ using Substring =
+ detail::DenseMapPair<LzwCodeType /* Prefix */, T /* Next input */>;
+
+ // Sentinel value for substrings of len 1.
+ static constexpr LzwCodeType kNoPrefix =
+ Min(DenseMapInfo<Substring>::getEmptyKey().first,
+ DenseMapInfo<Substring>::getTombstoneKey().first) -
+ 1;
+ DenseMap<Substring, LzwCodeType> prefix_to_code;
+ {
+ // Add all substring of len 1 as initial dictionary.
+ InternalMmapVector<T> dict_len1;
+ for (auto it = begin; it != end; ++it)
+ if (prefix_to_code.try_emplace({kNoPrefix, *it}, 0).second)
+ dict_len1.push_back(*it);
+
+ // Slightly helps with later delta encoding.
+ Sort(dict_len1.data(), dict_len1.size());
+
+ // For large sizeof(T) we have to store dict_len1. Smaller types like u8 can
+ // just generate them.
+ *out = dict_len1.size();
+ ++out;
+
+ for (uptr i = 0; i != dict_len1.size(); ++i) {
+ // Remap after the Sort.
+ prefix_to_code[{kNoPrefix, dict_len1[i]}] = i;
+ *out = dict_len1[i];
+ ++out;
+ }
+ CHECK_EQ(prefix_to_code.size(), dict_len1.size());
+ }
+
+ if (begin == end)
+ return out;
+
+ // Main LZW encoding loop.
+ LzwCodeType match = prefix_to_code.find({kNoPrefix, *begin})->second;
+ ++begin;
+ for (auto it = begin; it != end; ++it) {
+ // Extend match with the new item.
+ auto ins = prefix_to_code.try_emplace({match, *it}, prefix_to_code.size());
+ if (ins.second) {
+ // This is a new substring, but emit the code for the current match
+ // (before extend). This allows LZW decoder to recover the dictionary.
+ *out = match;
+ ++out;
+ // Reset the match to a single item, which must be already in the map.
+ match = prefix_to_code.find({kNoPrefix, *it})->second;
+ } else {
+ // Already known, use as the current match.
+ match = ins.first->second;
+ }
+ }
+
+ *out = match;
+ ++out;
+
+ return out;
+}
+
+template <class T, class ItIn, class ItOut>
+ItOut LzwDecode(ItIn begin, ItIn end, ItOut out) {
+ if (begin == end)
+ return out;
+
+ // Load dictionary of len 1 substrings. Theses correspont to lowest codes.
+ InternalMmapVector<T> dict_len1(*begin);
+ ++begin;
+
+ if (begin == end)
+ return out;
+
+ for (auto& v : dict_len1) {
+ v = *begin;
+ ++begin;
+ }
+
+ // Substrings of len 2 and up. Indexes are shifted because [0,
+ // dict_len1.size()) stored in dict_len1. Substings get here after being
+ // emitted to the output, so we can use output position.
+ InternalMmapVector<detail::DenseMapPair<ItOut /* begin. */, ItOut /* end */>>
+ code_to_substr;
+
+ // Copies already emitted substrings into the output again.
+ auto copy = [&code_to_substr, &dict_len1](LzwCodeType code, ItOut out) {
+ if (code < dict_len1.size()) {
+ *out = dict_len1[code];
+ ++out;
+ return out;
+ }
+ const auto& s = code_to_substr[code - dict_len1.size()];
+
+ for (ItOut it = s.first; it != s.second; ++it, ++out) *out = *it;
+ return out;
+ };
+
+ // Returns lens of the substring with the given code.
+ auto code_to_len = [&code_to_substr, &dict_len1](LzwCodeType code) -> uptr {
+ if (code < dict_len1.size())
+ return 1;
+ const auto& s = code_to_substr[code - dict_len1.size()];
+ return s.second - s.first;
+ };
+
+ // Main LZW decoding loop.
+ LzwCodeType prev_code = *begin;
+ ++begin;
+ out = copy(prev_code, out);
+ for (auto it = begin; it != end; ++it) {
+ LzwCodeType code = *it;
+ auto start = out;
+ if (code == dict_len1.size() + code_to_substr.size()) {
+ // Special LZW case. The code is not in the dictionary yet. This is
+ // possible only when the new substring is the same as previous one plus
+ // the first item of the previous substring. We can emit that in two
+ // steps.
+ out = copy(prev_code, out);
+ *out = *start;
+ ++out;
+ } else {
+ out = copy(code, out);
+ }
+
+ // Every time encoded emits the code, it also creates substing of len + 1
+ // including the first item of the just emmited substring. Do the same here.
+ uptr len = code_to_len(prev_code);
+ code_to_substr.push_back({start - len, start + 1});
+
+ prev_code = code;
+ }
+ return out;
+}
+
+} // namespace __sanitizer
+#endif
lib/tsan/sanitizer_common/sanitizer_mac.cpp
@@ -11,80 +11,82 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_MAC
-#include "sanitizer_mac.h"
-#include "interception/interception.h"
+#if SANITIZER_APPLE
+# include "interception/interception.h"
+# include "sanitizer_mac.h"
// Use 64-bit inodes in file operations. ASan does not support OS X 10.5, so
// the clients will most certainly use 64-bit ones as well.
-#ifndef _DARWIN_USE_64_BIT_INODE
-#define _DARWIN_USE_64_BIT_INODE 1
-#endif
-#include <stdio.h>
-
-#include "sanitizer_common.h"
-#include "sanitizer_file.h"
-#include "sanitizer_flags.h"
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_libc.h"
-#include "sanitizer_platform_limits_posix.h"
-#include "sanitizer_procmaps.h"
-#include "sanitizer_ptrauth.h"
-
-#if !SANITIZER_IOS
-#include <crt_externs.h> // for _NSGetEnviron
-#else
+# ifndef _DARWIN_USE_64_BIT_INODE
+# define _DARWIN_USE_64_BIT_INODE 1
+# endif
+# include <stdio.h>
+
+# include "sanitizer_common.h"
+# include "sanitizer_file.h"
+# include "sanitizer_flags.h"
+# include "sanitizer_interface_internal.h"
+# include "sanitizer_internal_defs.h"
+# include "sanitizer_libc.h"
+# include "sanitizer_platform_limits_posix.h"
+# include "sanitizer_procmaps.h"
+# include "sanitizer_ptrauth.h"
+
+# if !SANITIZER_IOS
+# include <crt_externs.h> // for _NSGetEnviron
+# else
extern char **environ;
-#endif
+# endif
-#if defined(__has_include) && __has_include(<os/trace.h>)
-#define SANITIZER_OS_TRACE 1
-#include <os/trace.h>
-#else
-#define SANITIZER_OS_TRACE 0
-#endif
+# if defined(__has_include) && __has_include(<os/trace.h>)
+# define SANITIZER_OS_TRACE 1
+# include <os/trace.h>
+# else
+# define SANITIZER_OS_TRACE 0
+# endif
// import new crash reporting api
-#if defined(__has_include) && __has_include(<CrashReporterClient.h>)
-#define HAVE_CRASHREPORTERCLIENT_H 1
-#include <CrashReporterClient.h>
-#else
-#define HAVE_CRASHREPORTERCLIENT_H 0
-#endif
-
-#if !SANITIZER_IOS
-#include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
-#else
+# if defined(__has_include) && __has_include(<CrashReporterClient.h>)
+# define HAVE_CRASHREPORTERCLIENT_H 1
+# include <CrashReporterClient.h>
+# else
+# define HAVE_CRASHREPORTERCLIENT_H 0
+# endif
+
+# if !SANITIZER_IOS
+# include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
+# else
extern "C" {
- extern char ***_NSGetArgv(void);
-}
-#endif
-
-#include <asl.h>
-#include <dlfcn.h> // for dladdr()
-#include <errno.h>
-#include <fcntl.h>
-#include <libkern/OSAtomic.h>
-#include <mach-o/dyld.h>
-#include <mach/mach.h>
-#include <mach/mach_time.h>
-#include <mach/vm_statistics.h>
-#include <malloc/malloc.h>
-#include <os/log.h>
-#include <pthread.h>
-#include <sched.h>
-#include <signal.h>
-#include <spawn.h>
-#include <stdlib.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <sys/resource.h>
-#include <sys/stat.h>
-#include <sys/sysctl.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <unistd.h>
-#include <util.h>
+extern char ***_NSGetArgv(void);
+}
+# endif
+
+# include <asl.h>
+# include <dlfcn.h> // for dladdr()
+# include <errno.h>
+# include <fcntl.h>
+# include <libkern/OSAtomic.h>
+# include <mach-o/dyld.h>
+# include <mach/mach.h>
+# include <mach/mach_time.h>
+# include <mach/vm_statistics.h>
+# include <malloc/malloc.h>
+# include <os/log.h>
+# include <pthread.h>
+# include <pthread/introspection.h>
+# include <sched.h>
+# include <signal.h>
+# include <spawn.h>
+# include <stdlib.h>
+# include <sys/ioctl.h>
+# include <sys/mman.h>
+# include <sys/resource.h>
+# include <sys/stat.h>
+# include <sys/sysctl.h>
+# include <sys/types.h>
+# include <sys/wait.h>
+# include <unistd.h>
+# include <util.h>
// From <crt_externs.h>, but we don't have that file on iOS.
extern "C" {
@@ -265,30 +267,32 @@ int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
static fd_t internal_spawn_impl(const char *argv[], const char *envp[],
pid_t *pid) {
- fd_t master_fd = kInvalidFd;
- fd_t slave_fd = kInvalidFd;
+ fd_t primary_fd = kInvalidFd;
+ fd_t secondary_fd = kInvalidFd;
auto fd_closer = at_scope_exit([&] {
- internal_close(master_fd);
- internal_close(slave_fd);
+ internal_close(primary_fd);
+ internal_close(secondary_fd);
});
// We need a new pseudoterminal to avoid buffering problems. The 'atos' tool
// in particular detects when it's talking to a pipe and forgets to flush the
// output stream after sending a response.
- master_fd = posix_openpt(O_RDWR);
- if (master_fd == kInvalidFd) return kInvalidFd;
+ primary_fd = posix_openpt(O_RDWR);
+ if (primary_fd == kInvalidFd)
+ return kInvalidFd;
- int res = grantpt(master_fd) || unlockpt(master_fd);
+ int res = grantpt(primary_fd) || unlockpt(primary_fd);
if (res != 0) return kInvalidFd;
// Use TIOCPTYGNAME instead of ptsname() to avoid threading problems.
- char slave_pty_name[128];
- res = ioctl(master_fd, TIOCPTYGNAME, slave_pty_name);
+ char secondary_pty_name[128];
+ res = ioctl(primary_fd, TIOCPTYGNAME, secondary_pty_name);
if (res == -1) return kInvalidFd;
- slave_fd = internal_open(slave_pty_name, O_RDWR);
- if (slave_fd == kInvalidFd) return kInvalidFd;
+ secondary_fd = internal_open(secondary_pty_name, O_RDWR);
+ if (secondary_fd == kInvalidFd)
+ return kInvalidFd;
// File descriptor actions
posix_spawn_file_actions_t acts;
@@ -299,9 +303,9 @@ static fd_t internal_spawn_impl(const char *argv[], const char *envp[],
posix_spawn_file_actions_destroy(&acts);
});
- res = posix_spawn_file_actions_adddup2(&acts, slave_fd, STDIN_FILENO) ||
- posix_spawn_file_actions_adddup2(&acts, slave_fd, STDOUT_FILENO) ||
- posix_spawn_file_actions_addclose(&acts, slave_fd);
+ res = posix_spawn_file_actions_adddup2(&acts, secondary_fd, STDIN_FILENO) ||
+ posix_spawn_file_actions_adddup2(&acts, secondary_fd, STDOUT_FILENO) ||
+ posix_spawn_file_actions_addclose(&acts, secondary_fd);
if (res != 0) return kInvalidFd;
// Spawn attributes
@@ -326,14 +330,14 @@ static fd_t internal_spawn_impl(const char *argv[], const char *envp[],
// Disable echo in the new terminal, disable CR.
struct termios termflags;
- tcgetattr(master_fd, &termflags);
+ tcgetattr(primary_fd, &termflags);
termflags.c_oflag &= ~ONLCR;
termflags.c_lflag &= ~ECHO;
- tcsetattr(master_fd, TCSANOW, &termflags);
+ tcsetattr(primary_fd, TCSANOW, &termflags);
- // On success, do not close master_fd on scope exit.
- fd_t fd = master_fd;
- master_fd = kInvalidFd;
+ // On success, do not close primary_fd on scope exit.
+ fd_t fd = primary_fd;
+ primary_fd = kInvalidFd;
return fd;
}
@@ -390,6 +394,13 @@ bool FileExists(const char *filename) {
return S_ISREG(st.st_mode);
}
+bool DirExists(const char *path) {
+ struct stat st;
+ if (stat(path, &st))
+ return false;
+ return S_ISDIR(st.st_mode);
+}
+
tid_t GetTid() {
tid_t tid;
pthread_threadid_np(nullptr, &tid);
@@ -516,25 +527,6 @@ void FutexWait(atomic_uint32_t *p, u32 cmp) {
void FutexWake(atomic_uint32_t *p, u32 count) {}
-BlockingMutex::BlockingMutex() {
- internal_memset(this, 0, sizeof(*this));
-}
-
-void BlockingMutex::Lock() {
- CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
- CHECK_EQ(OS_SPINLOCK_INIT, 0);
- CHECK_EQ(owner_, 0);
- OSSpinLockLock((OSSpinLock*)&opaque_storage_);
-}
-
-void BlockingMutex::Unlock() {
- OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
-}
-
-void BlockingMutex::CheckLocked() const {
- CHECK_NE(*(const OSSpinLock*)&opaque_storage_, 0);
-}
-
u64 NanoTime() {
timeval tv;
internal_memset(&tv, 0, sizeof(tv));
@@ -562,6 +554,9 @@ uptr TlsBaseAddr() {
asm("movq %%gs:0,%0" : "=r"(segbase));
#elif defined(__i386__)
asm("movl %%gs:0,%0" : "=r"(segbase));
+#elif defined(__aarch64__)
+ asm("mrs %x0, tpidrro_el0" : "=r"(segbase));
+ segbase &= 0x07ul; // clearing lower bits, cpu id stored there
#endif
return segbase;
}
@@ -784,8 +779,8 @@ void *internal_start_thread(void *(*func)(void *arg), void *arg) {
void internal_join_thread(void *th) { pthread_join((pthread_t)th, 0); }
#if !SANITIZER_GO
-static BlockingMutex syslog_lock(LINKER_INITIALIZED);
-#endif
+static Mutex syslog_lock;
+# endif
void WriteOneLineToSyslog(const char *s) {
#if !SANITIZER_GO
@@ -800,7 +795,7 @@ void WriteOneLineToSyslog(const char *s) {
// buffer to store crash report application information
static char crashreporter_info_buff[__sanitizer::kErrorMessageBufferSize] = {};
-static BlockingMutex crashreporter_info_mutex(LINKER_INITIALIZED);
+static Mutex crashreporter_info_mutex;
extern "C" {
// Integrate with crash reporter libraries.
@@ -830,7 +825,7 @@ asm(".desc ___crashreporter_info__, 0x10");
} // extern "C"
static void CRAppendCrashLogMessage(const char *msg) {
- BlockingMutexLock l(&crashreporter_info_mutex);
+ Lock l(&crashreporter_info_mutex);
internal_strlcat(crashreporter_info_buff, msg,
sizeof(crashreporter_info_buff));
#if HAVE_CRASHREPORTERCLIENT_H
@@ -874,7 +869,7 @@ void LogFullErrorReport(const char *buffer) {
// the reporting thread holds the thread registry mutex, and asl_log waits
// for GCD to dispatch a new thread, the process will deadlock, because the
// pthread_create wrapper needs to acquire the lock as well.
- BlockingMutexLock l(&syslog_lock);
+ Lock l(&syslog_lock);
if (common_flags()->log_to_syslog)
WriteToSyslog(buffer);
@@ -885,9 +880,12 @@ void LogFullErrorReport(const char *buffer) {
SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
#if defined(__x86_64__) || defined(__i386__)
ucontext_t *ucontext = static_cast<ucontext_t*>(context);
- return ucontext->uc_mcontext->__es.__err & 2 /*T_PF_WRITE*/ ? WRITE : READ;
+ return ucontext->uc_mcontext->__es.__err & 2 /*T_PF_WRITE*/ ? Write : Read;
+#elif defined(__arm64__)
+ ucontext_t *ucontext = static_cast<ucontext_t*>(context);
+ return ucontext->uc_mcontext->__es.__esr & 0x40 /*ISS_DA_WNR*/ ? Write : Read;
#else
- return UNKNOWN;
+ return Unknown;
#endif
}
@@ -902,18 +900,14 @@ bool SignalContext::IsTrueFaultingAddress() const {
(uptr)ptrauth_strip( \
(void *)arm_thread_state64_get_##r(ucontext->uc_mcontext->__ss), 0)
#else
- #define AARCH64_GET_REG(r) ucontext->uc_mcontext->__ss.__##r
+ #define AARCH64_GET_REG(r) (uptr)ucontext->uc_mcontext->__ss.__##r
#endif
static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
ucontext_t *ucontext = (ucontext_t*)context;
# if defined(__aarch64__)
*pc = AARCH64_GET_REG(pc);
-# if defined(__IPHONE_8_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_8_0
*bp = AARCH64_GET_REG(fp);
-# else
- *bp = AARCH64_GET_REG(lr);
-# endif
*sp = AARCH64_GET_REG(sp);
# elif defined(__x86_64__)
*pc = ucontext->uc_mcontext->__ss.__rip;
@@ -950,6 +944,9 @@ static void DisableMmapExcGuardExceptions() {
set_behavior(mach_task_self(), task_exc_guard_none);
}
+static void VerifyInterceptorsWorking();
+static void StripEnv();
+
void InitializePlatformEarly() {
// Only use xnu_fast_mmap when on x86_64 and the kernel supports it.
use_xnu_fast_mmap =
@@ -960,17 +957,54 @@ void InitializePlatformEarly() {
#endif
if (GetDarwinKernelVersion() >= DarwinKernelVersion(19, 0))
DisableMmapExcGuardExceptions();
+
+# if !SANITIZER_GO
+ MonotonicNanoTime(); // Call to initialize mach_timebase_info
+ VerifyInterceptorsWorking();
+ StripEnv();
+# endif
}
#if !SANITIZER_GO
static const char kDyldInsertLibraries[] = "DYLD_INSERT_LIBRARIES";
LowLevelAllocator allocator_for_env;
+static bool ShouldCheckInterceptors() {
+ // Restrict "interceptors working?" check to ASan and TSan.
+ const char *sanitizer_names[] = {"AddressSanitizer", "ThreadSanitizer"};
+ size_t count = sizeof(sanitizer_names) / sizeof(sanitizer_names[0]);
+ for (size_t i = 0; i < count; i++) {
+ if (internal_strcmp(sanitizer_names[i], SanitizerToolName) == 0)
+ return true;
+ }
+ return false;
+}
+
+static void VerifyInterceptorsWorking() {
+ if (!common_flags()->verify_interceptors || !ShouldCheckInterceptors())
+ return;
+
+ // Verify that interceptors really work. We'll use dlsym to locate
+ // "puts", if interceptors are working, it should really point to
+ // "wrap_puts" within our own dylib.
+ Dl_info info_puts, info_runtime;
+ RAW_CHECK(dladdr(dlsym(RTLD_DEFAULT, "puts"), &info_puts));
+ RAW_CHECK(dladdr((void *)&VerifyInterceptorsWorking, &info_runtime));
+ if (internal_strcmp(info_puts.dli_fname, info_runtime.dli_fname) != 0) {
+ Report(
+ "ERROR: Interceptors are not working. This may be because %s is "
+ "loaded too late (e.g. via dlopen). Please launch the executable "
+ "with:\n%s=%s\n",
+ SanitizerToolName, kDyldInsertLibraries, info_runtime.dli_fname);
+ RAW_CHECK("interceptors not installed" && 0);
+ }
+}
+
// Change the value of the env var |name|, leaking the original value.
// If |name_value| is NULL, the variable is deleted from the environment,
// otherwise the corresponding "NAME=value" string is replaced with
// |name_value|.
-void LeakyResetEnv(const char *name, const char *name_value) {
+static void LeakyResetEnv(const char *name, const char *name_value) {
char **env = GetEnviron();
uptr name_len = internal_strlen(name);
while (*env != 0) {
@@ -995,100 +1029,28 @@ void LeakyResetEnv(const char *name, const char *name_value) {
}
}
-SANITIZER_WEAK_CXX_DEFAULT_IMPL
-bool ReexecDisabled() {
- return false;
-}
-
-static bool DyldNeedsEnvVariable() {
- // If running on OS X 10.11+ or iOS 9.0+, dyld will interpose even if
- // DYLD_INSERT_LIBRARIES is not set.
- return GetMacosAlignedVersion() < MacosVersion(10, 11);
-}
-
-void MaybeReexec() {
- // FIXME: This should really live in some "InitializePlatform" method.
- MonotonicNanoTime();
+static void StripEnv() {
+ if (!common_flags()->strip_env)
+ return;
- if (ReexecDisabled()) return;
+ char *dyld_insert_libraries =
+ const_cast<char *>(GetEnv(kDyldInsertLibraries));
+ if (!dyld_insert_libraries)
+ return;
- // Make sure the dynamic runtime library is preloaded so that the
- // wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec
- // ourselves.
Dl_info info;
- RAW_CHECK(dladdr((void*)((uptr)&__sanitizer_report_error_summary), &info));
- char *dyld_insert_libraries =
- const_cast<char*>(GetEnv(kDyldInsertLibraries));
- uptr old_env_len = dyld_insert_libraries ?
- internal_strlen(dyld_insert_libraries) : 0;
- uptr fname_len = internal_strlen(info.dli_fname);
+ RAW_CHECK(dladdr((void *)&StripEnv, &info));
const char *dylib_name = StripModuleName(info.dli_fname);
- uptr dylib_name_len = internal_strlen(dylib_name);
-
- bool lib_is_in_env = dyld_insert_libraries &&
- internal_strstr(dyld_insert_libraries, dylib_name);
- if (DyldNeedsEnvVariable() && !lib_is_in_env) {
- // DYLD_INSERT_LIBRARIES is not set or does not contain the runtime
- // library.
- InternalMmapVector<char> program_name(1024);
- uint32_t buf_size = program_name.size();
- _NSGetExecutablePath(program_name.data(), &buf_size);
- char *new_env = const_cast<char*>(info.dli_fname);
- if (dyld_insert_libraries) {
- // Append the runtime dylib name to the existing value of
- // DYLD_INSERT_LIBRARIES.
- new_env = (char*)allocator_for_env.Allocate(old_env_len + fname_len + 2);
- internal_strncpy(new_env, dyld_insert_libraries, old_env_len);
- new_env[old_env_len] = ':';
- // Copy fname_len and add a trailing zero.
- internal_strncpy(new_env + old_env_len + 1, info.dli_fname,
- fname_len + 1);
- // Ok to use setenv() since the wrappers don't depend on the value of
- // asan_inited.
- setenv(kDyldInsertLibraries, new_env, /*overwrite*/1);
- } else {
- // Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name.
- setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0);
- }
- VReport(1, "exec()-ing the program with\n");
- VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env);
- VReport(1, "to enable wrappers.\n");
- execv(program_name.data(), *_NSGetArgv());
-
- // We get here only if execv() failed.
- Report("ERROR: The process is launched without DYLD_INSERT_LIBRARIES, "
- "which is required for the sanitizer to work. We tried to set the "
- "environment variable and re-execute itself, but execv() failed, "
- "possibly because of sandbox restrictions. Make sure to launch the "
- "executable with:\n%s=%s\n", kDyldInsertLibraries, new_env);
- RAW_CHECK("execv failed" && 0);
- }
-
- // Verify that interceptors really work. We'll use dlsym to locate
- // "pthread_create", if interceptors are working, it should really point to
- // "wrap_pthread_create" within our own dylib.
- Dl_info info_pthread_create;
- void *dlopen_addr = dlsym(RTLD_DEFAULT, "pthread_create");
- RAW_CHECK(dladdr(dlopen_addr, &info_pthread_create));
- if (internal_strcmp(info.dli_fname, info_pthread_create.dli_fname) != 0) {
- Report(
- "ERROR: Interceptors are not working. This may be because %s is "
- "loaded too late (e.g. via dlopen). Please launch the executable "
- "with:\n%s=%s\n",
- SanitizerToolName, kDyldInsertLibraries, info.dli_fname);
- RAW_CHECK("interceptors not installed" && 0);
- }
-
+ bool lib_is_in_env = internal_strstr(dyld_insert_libraries, dylib_name);
if (!lib_is_in_env)
return;
- if (!common_flags()->strip_env)
- return;
-
// DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove
// the dylib from the environment variable, because interceptors are installed
// and we don't want our children to inherit the variable.
+ uptr old_env_len = internal_strlen(dyld_insert_libraries);
+ uptr dylib_name_len = internal_strlen(dylib_name);
uptr env_name_len = internal_strlen(kDyldInsertLibraries);
// Allocate memory to hold the previous env var name, its value, the '='
// sign and the '\0' char.
@@ -1237,7 +1199,7 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
uptr largest_gap_found = 0;
uptr max_occupied_addr = 0;
- VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
+ VReport(2, "FindDynamicShadowStart, space_size = %p\n", (void *)space_size);
uptr shadow_start =
FindAvailableMemoryRange(space_size, alignment, granularity,
&largest_gap_found, &max_occupied_addr);
@@ -1246,20 +1208,21 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
VReport(
2,
"Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n",
- largest_gap_found, max_occupied_addr);
+ (void *)largest_gap_found, (void *)max_occupied_addr);
uptr new_max_vm = RoundDownTo(largest_gap_found << shadow_scale, alignment);
if (new_max_vm < max_occupied_addr) {
Report("Unable to find a memory range for dynamic shadow.\n");
Report(
"space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
"new_max_vm = %p\n",
- space_size, largest_gap_found, max_occupied_addr, new_max_vm);
+ (void *)space_size, (void *)largest_gap_found,
+ (void *)max_occupied_addr, (void *)new_max_vm);
CHECK(0 && "cannot place shadow");
}
RestrictMemoryToMaxAddress(new_max_vm);
high_mem_end = new_max_vm - 1;
space_size = (high_mem_end >> shadow_scale) + left_padding;
- VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
+ VReport(2, "FindDynamicShadowStart, space_size = %p\n", (void *)space_size);
shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity,
nullptr, nullptr);
if (shadow_start == 0) {
@@ -1288,6 +1251,7 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
mach_vm_address_t start_address =
(SANITIZER_WORDSIZE == 32) ? 0x000000001000 : 0x000100000000;
+ const mach_vm_address_t max_vm_address = GetMaxVirtualAddress() + 1;
mach_vm_address_t address = start_address;
mach_vm_address_t free_begin = start_address;
kern_return_t kr = KERN_SUCCESS;
@@ -1302,7 +1266,7 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
(vm_region_info_t)&vminfo, &count);
if (kr == KERN_INVALID_ADDRESS) {
// No more regions beyond "address", consider the gap at the end of VM.
- address = GetMaxVirtualAddress() + 1;
+ address = max_vm_address;
vmsize = 0;
} else {
if (max_occupied_addr) *max_occupied_addr = address + vmsize;
@@ -1310,7 +1274,7 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
if (free_begin != address) {
// We found a free region [free_begin..address-1].
uptr gap_start = RoundUpTo((uptr)free_begin + left_padding, alignment);
- uptr gap_end = RoundDownTo((uptr)address, alignment);
+ uptr gap_end = RoundDownTo((uptr)Min(address, max_vm_address), alignment);
uptr gap_size = gap_end > gap_start ? gap_end - gap_start : 0;
if (size < gap_size) {
return gap_start;
@@ -1330,7 +1294,7 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
}
// FIXME implement on this platform.
-void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
+void GetMemoryProfile(fill_profile_f cb, uptr *stats) {}
void SignalContext::DumpAllRegisters(void *context) {
Report("Register values:\n");
@@ -1339,7 +1303,7 @@ void SignalContext::DumpAllRegisters(void *context) {
# define DUMPREG64(r) \
Printf("%s = 0x%016llx ", #r, ucontext->uc_mcontext->__ss.__ ## r);
# define DUMPREGA64(r) \
- Printf(" %s = 0x%016llx ", #r, AARCH64_GET_REG(r));
+ Printf(" %s = 0x%016lx ", #r, AARCH64_GET_REG(r));
# define DUMPREG32(r) \
Printf("%s = 0x%08x ", #r, ucontext->uc_mcontext->__ss.__ ## r);
# define DUMPREG_(r) Printf(" "); DUMPREG(r);
@@ -1409,7 +1373,7 @@ void DumpProcessMap() {
char uuid_str[128];
FormatUUID(uuid_str, sizeof(uuid_str), modules[i].uuid());
Printf("0x%zx-0x%zx %s (%s) %s\n", modules[i].base_address(),
- modules[i].max_executable_address(), modules[i].full_name(),
+ modules[i].max_address(), modules[i].full_name(),
ModuleArchToString(modules[i].arch()), uuid_str);
}
Printf("End of module map.\n");
@@ -1433,6 +1397,61 @@ u32 GetNumberOfCPUs() {
void InitializePlatformCommonFlags(CommonFlags *cf) {}
+// Pthread introspection hook
+//
+// * GCD worker threads are created without a call to pthread_create(), but we
+// still need to register these threads (with ThreadCreate/Start()).
+// * We use the "pthread introspection hook" below to observe the creation of
+// such threads.
+// * GCD worker threads don't have parent threads and the CREATE event is
+// delivered in the context of the thread itself. CREATE events for regular
+// threads, are delivered on the parent. We use this to tell apart which
+// threads are GCD workers with `thread == pthread_self()`.
+//
+static pthread_introspection_hook_t prev_pthread_introspection_hook;
+static ThreadEventCallbacks thread_event_callbacks;
+
+static void sanitizer_pthread_introspection_hook(unsigned int event,
+ pthread_t thread, void *addr,
+ size_t size) {
+ // create -> start -> terminate -> destroy
+ // * create/destroy are usually (not guaranteed) delivered on the parent and
+ // track resource allocation/reclamation
+ // * start/terminate are guaranteed to be delivered in the context of the
+ // thread and give hooks into "just after (before) thread starts (stops)
+ // executing"
+ DCHECK(event >= PTHREAD_INTROSPECTION_THREAD_CREATE &&
+ event <= PTHREAD_INTROSPECTION_THREAD_DESTROY);
+
+ if (event == PTHREAD_INTROSPECTION_THREAD_CREATE) {
+ bool gcd_worker = (thread == pthread_self());
+ if (thread_event_callbacks.create)
+ thread_event_callbacks.create((uptr)thread, gcd_worker);
+ } else if (event == PTHREAD_INTROSPECTION_THREAD_START) {
+ CHECK_EQ(thread, pthread_self());
+ if (thread_event_callbacks.start)
+ thread_event_callbacks.start((uptr)thread);
+ }
+
+ if (prev_pthread_introspection_hook)
+ prev_pthread_introspection_hook(event, thread, addr, size);
+
+ if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) {
+ CHECK_EQ(thread, pthread_self());
+ if (thread_event_callbacks.terminate)
+ thread_event_callbacks.terminate((uptr)thread);
+ } else if (event == PTHREAD_INTROSPECTION_THREAD_DESTROY) {
+ if (thread_event_callbacks.destroy)
+ thread_event_callbacks.destroy((uptr)thread);
+ }
+}
+
+void InstallPthreadIntrospectionHook(const ThreadEventCallbacks &callbacks) {
+ thread_event_callbacks = callbacks;
+ prev_pthread_introspection_hook =
+ pthread_introspection_hook_install(&sanitizer_pthread_introspection_hook);
+}
+
} // namespace __sanitizer
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
lib/tsan/sanitizer_common/sanitizer_mac.h
@@ -9,12 +9,12 @@
// This file is shared between various sanitizers' runtime libraries and
// provides definitions for OSX-specific functions.
//===----------------------------------------------------------------------===//
-#ifndef SANITIZER_MAC_H
-#define SANITIZER_MAC_H
+#ifndef SANITIZER_APPLE_H
+#define SANITIZER_APPLE_H
#include "sanitizer_common.h"
#include "sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include "sanitizer_posix.h"
namespace __sanitizer {
@@ -62,7 +62,18 @@ char **GetEnviron();
void RestrictMemoryToMaxAddress(uptr max_address);
+using ThreadEventCallback = void (*)(uptr thread);
+using ThreadCreateEventCallback = void (*)(uptr thread, bool gcd_worker);
+struct ThreadEventCallbacks {
+ ThreadCreateEventCallback create;
+ ThreadEventCallback start;
+ ThreadEventCallback terminate;
+ ThreadEventCallback destroy;
+};
+
+void InstallPthreadIntrospectionHook(const ThreadEventCallbacks &callbacks);
+
} // namespace __sanitizer
-#endif // SANITIZER_MAC
-#endif // SANITIZER_MAC_H
+#endif // SANITIZER_APPLE
+#endif // SANITIZER_APPLE_H
lib/tsan/sanitizer_common/sanitizer_mac_libcdep.cpp
@@ -11,7 +11,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include "sanitizer_mac.h"
#include <sys/mman.h>
@@ -26,4 +26,4 @@ void RestrictMemoryToMaxAddress(uptr max_address) {
} // namespace __sanitizer
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
lib/tsan/sanitizer_common/sanitizer_mallinfo.h
@@ -0,0 +1,38 @@
+//===-- sanitizer_mallinfo.h ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Definition for mallinfo on different platforms.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_MALLINFO_H
+#define SANITIZER_MALLINFO_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
+
+namespace __sanitizer {
+
+#if SANITIZER_ANDROID
+
+struct __sanitizer_struct_mallinfo {
+ uptr v[10];
+};
+
+#elif SANITIZER_LINUX || SANITIZER_APPLE || SANITIZER_FUCHSIA
+
+struct __sanitizer_struct_mallinfo {
+ int v[10];
+};
+
+#endif
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MALLINFO_H
lib/tsan/sanitizer_common/sanitizer_malloc_mac.inc
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
#error "This file should only be compiled on Darwin."
#endif
@@ -23,6 +23,7 @@
#include <sys/mman.h>
#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator_dlsym.h"
#include "sanitizer_common/sanitizer_mac.h"
// Similar code is used in Google Perftools,
@@ -192,20 +193,15 @@ void *__sanitizer_mz_malloc(malloc_zone_t *zone, uptr size) {
return p;
}
+struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
+ static bool UseImpl() { return !COMMON_MALLOC_SANITIZER_INITIALIZED; }
+};
+
extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void *__sanitizer_mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
- if (UNLIKELY(!COMMON_MALLOC_SANITIZER_INITIALIZED)) {
- // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
- const size_t kCallocPoolSize = 1024;
- static uptr calloc_memory_for_dlsym[kCallocPoolSize];
- static size_t allocated;
- size_t size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
- void *mem = (void*)&calloc_memory_for_dlsym[allocated];
- allocated += size_in_words;
- CHECK(allocated < kCallocPoolSize);
- return mem;
- }
+ if (DlsymAlloc::Use())
+ return DlsymAlloc::Callocate(nmemb, size);
COMMON_MALLOC_CALLOC(nmemb, size);
return p;
}
@@ -223,6 +219,8 @@ extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_mz_free(malloc_zone_t *zone, void *ptr) {
if (!ptr) return;
+ if (DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Free(ptr);
COMMON_MALLOC_FREE(ptr);
}
lib/tsan/sanitizer_common/sanitizer_mutex.cpp
@@ -73,7 +73,7 @@ void DebugMutexInit() {
// Build adjacency matrix.
bool leaf[kMutexTypeMax];
internal_memset(&leaf, 0, sizeof(leaf));
- int cnt[kMutexTypeMax] = {};
+ int cnt[kMutexTypeMax];
internal_memset(&cnt, 0, sizeof(cnt));
for (int t = 0; t < kMutexTypeMax; t++) {
mutex_type_count = t;
@@ -174,7 +174,7 @@ struct InternalDeadlockDetector {
if (max_idx != MutexInvalid && !mutex_can_lock[max_idx][type]) {
Printf("%s: internal deadlock: can't lock %s under %s mutex\n", SanitizerToolName,
mutex_meta[type].name, mutex_meta[max_idx].name);
- PrintMutexPC(pc);
+ PrintMutexPC(locked[max_idx].pc);
CHECK(0);
}
locked[type].seq = ++sequence;
lib/tsan/sanitizer_common/sanitizer_mutex.h
@@ -20,25 +20,27 @@
namespace __sanitizer {
-class MUTEX StaticSpinMutex {
+class SANITIZER_MUTEX StaticSpinMutex {
public:
void Init() {
atomic_store(&state_, 0, memory_order_relaxed);
}
- void Lock() ACQUIRE() {
+ void Lock() SANITIZER_ACQUIRE() {
if (LIKELY(TryLock()))
return;
LockSlow();
}
- bool TryLock() TRY_ACQUIRE(true) {
+ bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
}
- void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
+ void Unlock() SANITIZER_RELEASE() {
+ atomic_store(&state_, 0, memory_order_release);
+ }
- void CheckLocked() const CHECK_LOCKED() {
+ void CheckLocked() const SANITIZER_CHECK_LOCKED() {
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
}
@@ -48,7 +50,7 @@ class MUTEX StaticSpinMutex {
void LockSlow();
};
-class MUTEX SpinMutex : public StaticSpinMutex {
+class SANITIZER_MUTEX SpinMutex : public StaticSpinMutex {
public:
SpinMutex() {
Init();
@@ -95,7 +97,11 @@ enum {
// Go linker does not support THREADLOCAL variables,
// so we can't use per-thread state.
-#define SANITIZER_CHECK_DEADLOCKS (SANITIZER_DEBUG && !SANITIZER_GO)
+// Disable checked locks on Darwin. Although Darwin platforms support
+// THREADLOCAL variables they are not usable early on during process init when
+// `__sanitizer::Mutex` is used.
+#define SANITIZER_CHECK_DEADLOCKS \
+ (SANITIZER_DEBUG && !SANITIZER_GO && SANITIZER_SUPPORTS_THREADLOCAL && !SANITIZER_APPLE)
#if SANITIZER_CHECK_DEADLOCKS
struct MutexMeta {
@@ -111,7 +117,7 @@ struct MutexMeta {
class CheckedMutex {
public:
- constexpr CheckedMutex(MutexType type)
+ explicit constexpr CheckedMutex(MutexType type)
#if SANITIZER_CHECK_DEADLOCKS
: type_(type)
#endif
@@ -152,15 +158,15 @@ class CheckedMutex {
// Derive from CheckedMutex for the purposes of EBO.
// We could make it a field marked with [[no_unique_address]],
// but this attribute is not supported by some older compilers.
-class MUTEX Mutex : CheckedMutex {
+class SANITIZER_MUTEX Mutex : CheckedMutex {
public:
- constexpr Mutex(MutexType type = MutexUnchecked) : CheckedMutex(type) {}
+ explicit constexpr Mutex(MutexType type = MutexUnchecked)
+ : CheckedMutex(type) {}
- void Lock() ACQUIRE() {
+ void Lock() SANITIZER_ACQUIRE() {
CheckedMutex::Lock();
u64 reset_mask = ~0ull;
u64 state = atomic_load_relaxed(&state_);
- const uptr kMaxSpinIters = 1500;
for (uptr spin_iters = 0;; spin_iters++) {
u64 new_state;
bool locked = (state & (kWriterLock | kReaderLockMask)) != 0;
@@ -189,8 +195,6 @@ class MUTEX Mutex : CheckedMutex {
// We've incremented waiting writers, so now block.
writers_.Wait();
spin_iters = 0;
- state = atomic_load(&state_, memory_order_relaxed);
- DCHECK_NE(state & kWriterSpinWait, 0);
} else {
// We've set kWriterSpinWait, but we are still in active spinning.
}
@@ -199,10 +203,26 @@ class MUTEX Mutex : CheckedMutex {
// Either way we need to reset kWriterSpinWait
// next time we take the lock or block again.
reset_mask = ~kWriterSpinWait;
+ state = atomic_load(&state_, memory_order_relaxed);
+ DCHECK_NE(state & kWriterSpinWait, 0);
+ }
+ }
+
+ bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
+ u64 state = atomic_load_relaxed(&state_);
+ for (;;) {
+ if (UNLIKELY(state & (kWriterLock | kReaderLockMask)))
+ return false;
+ // The mutex is not read-/write-locked, try to lock.
+ if (LIKELY(atomic_compare_exchange_weak(
+ &state_, &state, state | kWriterLock, memory_order_acquire))) {
+ CheckedMutex::Lock();
+ return true;
+ }
}
}
- void Unlock() RELEASE() {
+ void Unlock() SANITIZER_RELEASE() {
CheckedMutex::Unlock();
bool wake_writer;
u64 wake_readers;
@@ -212,17 +232,16 @@ class MUTEX Mutex : CheckedMutex {
DCHECK_NE(state & kWriterLock, 0);
DCHECK_EQ(state & kReaderLockMask, 0);
new_state = state & ~kWriterLock;
- wake_writer =
- (state & kWriterSpinWait) == 0 && (state & kWaitingWriterMask) != 0;
+ wake_writer = (state & (kWriterSpinWait | kReaderSpinWait)) == 0 &&
+ (state & kWaitingWriterMask) != 0;
if (wake_writer)
new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
wake_readers =
- (state & (kWriterSpinWait | kWaitingWriterMask)) != 0
+ wake_writer || (state & kWriterSpinWait) != 0
? 0
: ((state & kWaitingReaderMask) >> kWaitingReaderShift);
if (wake_readers)
- new_state = (new_state & ~kWaitingReaderMask) +
- (wake_readers << kReaderLockShift);
+ new_state = (new_state & ~kWaitingReaderMask) | kReaderSpinWait;
} while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
memory_order_release)));
if (UNLIKELY(wake_writer))
@@ -231,37 +250,54 @@ class MUTEX Mutex : CheckedMutex {
readers_.Post(wake_readers);
}
- void ReadLock() ACQUIRE_SHARED() {
+ void ReadLock() SANITIZER_ACQUIRE_SHARED() {
CheckedMutex::Lock();
- bool locked;
- u64 new_state;
+ u64 reset_mask = ~0ull;
u64 state = atomic_load_relaxed(&state_);
- do {
- locked =
- (state & kReaderLockMask) == 0 &&
- (state & (kWriterLock | kWriterSpinWait | kWaitingWriterMask)) != 0;
+ for (uptr spin_iters = 0;; spin_iters++) {
+ bool locked = (state & kWriterLock) != 0;
+ u64 new_state;
+ if (LIKELY(!locked)) {
+ new_state = (state + kReaderLockInc) & reset_mask;
+ } else if (spin_iters > kMaxSpinIters) {
+ new_state = (state + kWaitingReaderInc) & reset_mask;
+ } else if ((state & kReaderSpinWait) == 0) {
+ // Active spinning, but denote our presence so that unlocking
+ // thread does not wake up other threads.
+ new_state = state | kReaderSpinWait;
+ } else {
+ // Active spinning.
+ state = atomic_load(&state_, memory_order_relaxed);
+ continue;
+ }
+ if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
+ memory_order_acquire)))
+ continue;
if (LIKELY(!locked))
- new_state = state + kReaderLockInc;
- else
- new_state = state + kWaitingReaderInc;
- } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
- memory_order_acquire)));
- if (UNLIKELY(locked))
- readers_.Wait();
- DCHECK_EQ(atomic_load_relaxed(&state_) & kWriterLock, 0);
- DCHECK_NE(atomic_load_relaxed(&state_) & kReaderLockMask, 0);
+ return; // We've locked the mutex.
+ if (spin_iters > kMaxSpinIters) {
+ // We've incremented waiting readers, so now block.
+ readers_.Wait();
+ spin_iters = 0;
+ } else {
+ // We've set kReaderSpinWait, but we are still in active spinning.
+ }
+ reset_mask = ~kReaderSpinWait;
+ state = atomic_load(&state_, memory_order_relaxed);
+ }
}
- void ReadUnlock() RELEASE_SHARED() {
+ void ReadUnlock() SANITIZER_RELEASE_SHARED() {
CheckedMutex::Unlock();
bool wake;
u64 new_state;
u64 state = atomic_load_relaxed(&state_);
do {
DCHECK_NE(state & kReaderLockMask, 0);
- DCHECK_EQ(state & (kWaitingReaderMask | kWriterLock), 0);
+ DCHECK_EQ(state & kWriterLock, 0);
new_state = state - kReaderLockInc;
- wake = (new_state & (kReaderLockMask | kWriterSpinWait)) == 0 &&
+ wake = (new_state &
+ (kReaderLockMask | kWriterSpinWait | kReaderSpinWait)) == 0 &&
(new_state & kWaitingWriterMask) != 0;
if (wake)
new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
@@ -277,13 +313,13 @@ class MUTEX Mutex : CheckedMutex {
// owns the mutex but a child checks that it is locked. Rather than
// maintaining complex state to work around those situations, the check only
// checks that the mutex is owned.
- void CheckWriteLocked() const CHECK_LOCKED() {
+ void CheckWriteLocked() const SANITIZER_CHECK_LOCKED() {
CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
}
- void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); }
+ void CheckLocked() const SANITIZER_CHECK_LOCKED() { CheckWriteLocked(); }
- void CheckReadLocked() const CHECK_LOCKED() {
+ void CheckReadLocked() const SANITIZER_CHECK_LOCKED() {
CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
}
@@ -305,16 +341,14 @@ class MUTEX Mutex : CheckedMutex {
// - a writer is awake and spin-waiting
// the flag is used to prevent thundering herd problem
// (new writers are not woken if this flag is set)
+ // - a reader is awake and spin-waiting
//
- // Writer support active spinning, readers does not.
+ // Both writers and readers use active spinning before blocking.
// But readers are more aggressive and always take the mutex
// if there are any other readers.
- // Writers hand off the mutex to readers: after wake up readers
- // already assume ownership of the mutex (don't need to do any
- // state updates). But the mutex is not handed off to writers,
- // after wake up writers compete to lock the mutex again.
- // This is needed to allow repeated write locks even in presence
- // of other blocked writers.
+ // After wake up both writers and readers compete to lock the
+ // mutex again. This is needed to allow repeated locks even in presence
+ // of other blocked threads.
static constexpr u64 kCounterWidth = 20;
static constexpr u64 kReaderLockShift = 0;
static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift;
@@ -330,7 +364,11 @@ class MUTEX Mutex : CheckedMutex {
<< kWaitingWriterShift;
static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth);
static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1);
+ static constexpr u64 kReaderSpinWait = 1ull << (3 * kCounterWidth + 2);
+
+ static constexpr uptr kMaxSpinIters = 1500;
+ Mutex(LinkerInitialized) = delete;
Mutex(const Mutex &) = delete;
void operator=(const Mutex &) = delete;
};
@@ -338,149 +376,70 @@ class MUTEX Mutex : CheckedMutex {
void FutexWait(atomic_uint32_t *p, u32 cmp);
void FutexWake(atomic_uint32_t *p, u32 count);
-class MUTEX BlockingMutex {
- public:
- explicit constexpr BlockingMutex(LinkerInitialized)
- : opaque_storage_ {0, }, owner_ {0} {}
- BlockingMutex();
- void Lock() ACQUIRE();
- void Unlock() RELEASE();
-
- // This function does not guarantee an explicit check that the calling thread
- // is the thread which owns the mutex. This behavior, while more strictly
- // correct, causes problems in cases like StopTheWorld, where a parent thread
- // owns the mutex but a child checks that it is locked. Rather than
- // maintaining complex state to work around those situations, the check only
- // checks that the mutex is owned, and assumes callers to be generally
- // well-behaved.
- void CheckLocked() const CHECK_LOCKED();
-
- private:
- // Solaris mutex_t has a member that requires 64-bit alignment.
- ALIGNED(8) uptr opaque_storage_[10];
- uptr owner_; // for debugging
-};
-
-// Reader-writer spin mutex.
-class MUTEX RWMutex {
+template <typename MutexType>
+class SANITIZER_SCOPED_LOCK GenericScopedLock {
public:
- RWMutex() {
- atomic_store(&state_, kUnlocked, memory_order_relaxed);
- }
-
- ~RWMutex() {
- CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
- }
-
- void Lock() ACQUIRE() {
- u32 cmp = kUnlocked;
- if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
- memory_order_acquire))
- return;
- LockSlow();
- }
-
- void Unlock() RELEASE() {
- u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
- DCHECK_NE(prev & kWriteLock, 0);
- (void)prev;
- }
-
- void ReadLock() ACQUIRE_SHARED() {
- u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
- if ((prev & kWriteLock) == 0)
- return;
- ReadLockSlow();
- }
-
- void ReadUnlock() RELEASE_SHARED() {
- u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
- DCHECK_EQ(prev & kWriteLock, 0);
- DCHECK_GT(prev & ~kWriteLock, 0);
- (void)prev;
+ explicit GenericScopedLock(MutexType *mu) SANITIZER_ACQUIRE(mu) : mu_(mu) {
+ mu_->Lock();
}
- void CheckLocked() const CHECK_LOCKED() {
- CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
- }
+ ~GenericScopedLock() SANITIZER_RELEASE() { mu_->Unlock(); }
private:
- atomic_uint32_t state_;
-
- enum {
- kUnlocked = 0,
- kWriteLock = 1,
- kReadLock = 2
- };
-
- void NOINLINE LockSlow() {
- for (int i = 0;; i++) {
- if (i < 10)
- proc_yield(10);
- else
- internal_sched_yield();
- u32 cmp = atomic_load(&state_, memory_order_relaxed);
- if (cmp == kUnlocked &&
- atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
- memory_order_acquire))
- return;
- }
- }
-
- void NOINLINE ReadLockSlow() {
- for (int i = 0;; i++) {
- if (i < 10)
- proc_yield(10);
- else
- internal_sched_yield();
- u32 prev = atomic_load(&state_, memory_order_acquire);
- if ((prev & kWriteLock) == 0)
- return;
- }
- }
+ MutexType *mu_;
- RWMutex(const RWMutex &) = delete;
- void operator=(const RWMutex &) = delete;
+ GenericScopedLock(const GenericScopedLock &) = delete;
+ void operator=(const GenericScopedLock &) = delete;
};
template <typename MutexType>
-class SCOPED_LOCK GenericScopedLock {
+class SANITIZER_SCOPED_LOCK GenericScopedReadLock {
public:
- explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
- mu_->Lock();
+ explicit GenericScopedReadLock(MutexType *mu) SANITIZER_ACQUIRE(mu)
+ : mu_(mu) {
+ mu_->ReadLock();
}
- ~GenericScopedLock() RELEASE() { mu_->Unlock(); }
+ ~GenericScopedReadLock() SANITIZER_RELEASE() { mu_->ReadUnlock(); }
private:
MutexType *mu_;
- GenericScopedLock(const GenericScopedLock &) = delete;
- void operator=(const GenericScopedLock &) = delete;
+ GenericScopedReadLock(const GenericScopedReadLock &) = delete;
+ void operator=(const GenericScopedReadLock &) = delete;
};
template <typename MutexType>
-class SCOPED_LOCK GenericScopedReadLock {
+class SANITIZER_SCOPED_LOCK GenericScopedRWLock {
public:
- explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
- mu_->ReadLock();
+ ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write)
+ SANITIZER_ACQUIRE(mu)
+ : mu_(mu), write_(write) {
+ if (write_)
+ mu_->Lock();
+ else
+ mu_->ReadLock();
}
- ~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); }
+ ALWAYS_INLINE ~GenericScopedRWLock() SANITIZER_RELEASE() {
+ if (write_)
+ mu_->Unlock();
+ else
+ mu_->ReadUnlock();
+ }
private:
MutexType *mu_;
+ bool write_;
- GenericScopedReadLock(const GenericScopedReadLock &) = delete;
- void operator=(const GenericScopedReadLock &) = delete;
+ GenericScopedRWLock(const GenericScopedRWLock &) = delete;
+ void operator=(const GenericScopedRWLock &) = delete;
};
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
-typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
-typedef GenericScopedLock<RWMutex> RWMutexLock;
-typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
typedef GenericScopedLock<Mutex> Lock;
typedef GenericScopedReadLock<Mutex> ReadLock;
+typedef GenericScopedRWLock<Mutex> RWLock;
} // namespace __sanitizer
lib/tsan/sanitizer_common/sanitizer_openbsd.cpp
lib/tsan/sanitizer_common/sanitizer_persistent_allocator.h
@@ -1,71 +0,0 @@
-//===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// A fast memory allocator that does not support free() nor realloc().
-// All allocations are forever.
-//===----------------------------------------------------------------------===//
-
-#ifndef SANITIZER_PERSISTENT_ALLOCATOR_H
-#define SANITIZER_PERSISTENT_ALLOCATOR_H
-
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_mutex.h"
-#include "sanitizer_atomic.h"
-#include "sanitizer_common.h"
-
-namespace __sanitizer {
-
-class PersistentAllocator {
- public:
- void *alloc(uptr size);
-
- private:
- void *tryAlloc(uptr size);
- StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator.
- atomic_uintptr_t region_pos; // Region allocator for Node's.
- atomic_uintptr_t region_end;
-};
-
-inline void *PersistentAllocator::tryAlloc(uptr size) {
- // Optimisic lock-free allocation, essentially try to bump the region ptr.
- for (;;) {
- uptr cmp = atomic_load(®ion_pos, memory_order_acquire);
- uptr end = atomic_load(®ion_end, memory_order_acquire);
- if (cmp == 0 || cmp + size > end) return nullptr;
- if (atomic_compare_exchange_weak(®ion_pos, &cmp, cmp + size,
- memory_order_acquire))
- return (void *)cmp;
- }
-}
-
-inline void *PersistentAllocator::alloc(uptr size) {
- // First, try to allocate optimisitically.
- void *s = tryAlloc(size);
- if (s) return s;
- // If failed, lock, retry and alloc new superblock.
- SpinMutexLock l(&mtx);
- for (;;) {
- s = tryAlloc(size);
- if (s) return s;
- atomic_store(®ion_pos, 0, memory_order_relaxed);
- uptr allocsz = 64 * 1024;
- if (allocsz < size) allocsz = size;
- uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
- atomic_store(®ion_end, mem + allocsz, memory_order_release);
- atomic_store(®ion_pos, mem, memory_order_release);
- }
-}
-
-extern PersistentAllocator thePersistentAllocator;
-inline void *PersistentAlloc(uptr sz) {
- return thePersistentAllocator.alloc(sz);
-}
-
-} // namespace __sanitizer
-
-#endif // SANITIZER_PERSISTENT_ALLOCATOR_H
lib/tsan/sanitizer_common/sanitizer_platform.h
@@ -22,103 +22,123 @@
// function declarations into a .S file which doesn't compile.
// https://crbug.com/1162741
#if __has_include(<features.h>) && !defined(__ANDROID__)
-#include <features.h>
+# include <features.h>
#endif
#if defined(__linux__)
-# define SANITIZER_LINUX 1
+# define SANITIZER_LINUX 1
#else
-# define SANITIZER_LINUX 0
+# define SANITIZER_LINUX 0
#endif
#if defined(__GLIBC__)
-# define SANITIZER_GLIBC 1
+# define SANITIZER_GLIBC 1
#else
-# define SANITIZER_GLIBC 0
+# define SANITIZER_GLIBC 0
#endif
#if defined(__FreeBSD__)
-# define SANITIZER_FREEBSD 1
+# define SANITIZER_FREEBSD 1
#else
-# define SANITIZER_FREEBSD 0
+# define SANITIZER_FREEBSD 0
#endif
#if defined(__NetBSD__)
-# define SANITIZER_NETBSD 1
+# define SANITIZER_NETBSD 1
#else
-# define SANITIZER_NETBSD 0
+# define SANITIZER_NETBSD 0
#endif
#if defined(__sun__) && defined(__svr4__)
-# define SANITIZER_SOLARIS 1
+# define SANITIZER_SOLARIS 1
#else
-# define SANITIZER_SOLARIS 0
+# define SANITIZER_SOLARIS 0
#endif
+// - SANITIZER_APPLE: all Apple code
+// - TARGET_OS_OSX: macOS
+// - SANITIZER_IOS: devices (iOS and iOS-like)
+// - SANITIZER_WATCHOS
+// - SANITIZER_TVOS
+// - SANITIZER_IOSSIM: simulators (iOS and iOS-like)
+// - SANITIZER_DRIVERKIT
#if defined(__APPLE__)
-# define SANITIZER_MAC 1
-# include <TargetConditionals.h>
-# if TARGET_OS_OSX
-# define SANITIZER_OSX 1
-# else
-# define SANITIZER_OSX 0
-# endif
-# if TARGET_OS_IPHONE
-# define SANITIZER_IOS 1
-# else
-# define SANITIZER_IOS 0
-# endif
-# if TARGET_OS_SIMULATOR
-# define SANITIZER_IOSSIM 1
-# else
-# define SANITIZER_IOSSIM 0
-# endif
-#else
-# define SANITIZER_MAC 0
-# define SANITIZER_IOS 0
-# define SANITIZER_IOSSIM 0
-# define SANITIZER_OSX 0
-#endif
-
-#if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_WATCH
-# define SANITIZER_WATCHOS 1
-#else
-# define SANITIZER_WATCHOS 0
-#endif
-
-#if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_TV
-# define SANITIZER_TVOS 1
+# define SANITIZER_APPLE 1
+# include <TargetConditionals.h>
+# if TARGET_OS_OSX
+# define SANITIZER_OSX 1
+# else
+# define SANITIZER_OSX 0
+# endif
+# if TARGET_OS_IPHONE
+# define SANITIZER_IOS 1
+# else
+# define SANITIZER_IOS 0
+# endif
+# if TARGET_OS_WATCH
+# define SANITIZER_WATCHOS 1
+# else
+# define SANITIZER_WATCHOS 0
+# endif
+# if TARGET_OS_TV
+# define SANITIZER_TVOS 1
+# else
+# define SANITIZER_TVOS 0
+# endif
+# if TARGET_OS_SIMULATOR
+# define SANITIZER_IOSSIM 1
+# else
+# define SANITIZER_IOSSIM 0
+# endif
+# if defined(TARGET_OS_DRIVERKIT) && TARGET_OS_DRIVERKIT
+# define SANITIZER_DRIVERKIT 1
+# else
+# define SANITIZER_DRIVERKIT 0
+# endif
#else
-# define SANITIZER_TVOS 0
+# define SANITIZER_APPLE 0
+# define SANITIZER_OSX 0
+# define SANITIZER_IOS 0
+# define SANITIZER_WATCHOS 0
+# define SANITIZER_TVOS 0
+# define SANITIZER_IOSSIM 0
+# define SANITIZER_DRIVERKIT 0
#endif
#if defined(_WIN32)
-# define SANITIZER_WINDOWS 1
+# define SANITIZER_WINDOWS 1
#else
-# define SANITIZER_WINDOWS 0
+# define SANITIZER_WINDOWS 0
#endif
#if defined(_WIN64)
-# define SANITIZER_WINDOWS64 1
+# define SANITIZER_WINDOWS64 1
#else
-# define SANITIZER_WINDOWS64 0
+# define SANITIZER_WINDOWS64 0
#endif
#if defined(__ANDROID__)
-# define SANITIZER_ANDROID 1
+# define SANITIZER_ANDROID 1
#else
-# define SANITIZER_ANDROID 0
+# define SANITIZER_ANDROID 0
#endif
#if defined(__Fuchsia__)
-# define SANITIZER_FUCHSIA 1
+# define SANITIZER_FUCHSIA 1
#else
-# define SANITIZER_FUCHSIA 0
+# define SANITIZER_FUCHSIA 0
#endif
-#define SANITIZER_POSIX \
- (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \
- SANITIZER_NETBSD || SANITIZER_SOLARIS)
+// Assume linux that is not glibc or android is musl libc.
+#if SANITIZER_LINUX && !SANITIZER_GLIBC && !SANITIZER_ANDROID
+# define SANITIZER_MUSL 1
+#else
+# define SANITIZER_MUSL 0
+#endif
+
+#define SANITIZER_POSIX \
+ (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_APPLE || \
+ SANITIZER_NETBSD || SANITIZER_SOLARIS)
#if __LP64__ || defined(_WIN64)
# define SANITIZER_WORDSIZE 64
@@ -127,58 +147,79 @@
#endif
#if SANITIZER_WORDSIZE == 64
-# define FIRST_32_SECOND_64(a, b) (b)
+# define FIRST_32_SECOND_64(a, b) (b)
#else
-# define FIRST_32_SECOND_64(a, b) (a)
+# define FIRST_32_SECOND_64(a, b) (a)
#endif
#if defined(__x86_64__) && !defined(_LP64)
-# define SANITIZER_X32 1
+# define SANITIZER_X32 1
#else
-# define SANITIZER_X32 0
+# define SANITIZER_X32 0
+#endif
+
+#if defined(__x86_64__) || defined(_M_X64)
+# define SANITIZER_X64 1
+#else
+# define SANITIZER_X64 0
#endif
#if defined(__i386__) || defined(_M_IX86)
-# define SANITIZER_I386 1
+# define SANITIZER_I386 1
#else
-# define SANITIZER_I386 0
+# define SANITIZER_I386 0
#endif
#if defined(__mips__)
-# define SANITIZER_MIPS 1
-# if defined(__mips64)
+# define SANITIZER_MIPS 1
+# if defined(__mips64) && _MIPS_SIM == _ABI64
+# define SANITIZER_MIPS32 0
+# define SANITIZER_MIPS64 1
+# else
+# define SANITIZER_MIPS32 1
+# define SANITIZER_MIPS64 0
+# endif
+#else
+# define SANITIZER_MIPS 0
# define SANITIZER_MIPS32 0
-# define SANITIZER_MIPS64 1
-# else
-# define SANITIZER_MIPS32 1
# define SANITIZER_MIPS64 0
-# endif
-#else
-# define SANITIZER_MIPS 0
-# define SANITIZER_MIPS32 0
-# define SANITIZER_MIPS64 0
#endif
#if defined(__s390__)
-# define SANITIZER_S390 1
-# if defined(__s390x__)
+# define SANITIZER_S390 1
+# if defined(__s390x__)
+# define SANITIZER_S390_31 0
+# define SANITIZER_S390_64 1
+# else
+# define SANITIZER_S390_31 1
+# define SANITIZER_S390_64 0
+# endif
+#else
+# define SANITIZER_S390 0
# define SANITIZER_S390_31 0
-# define SANITIZER_S390_64 1
-# else
-# define SANITIZER_S390_31 1
# define SANITIZER_S390_64 0
-# endif
+#endif
+
+#if defined(__sparc__)
+# define SANITIZER_SPARC 1
+# if defined(__arch64__)
+# define SANITIZER_SPARC32 0
+# define SANITIZER_SPARC64 1
+# else
+# define SANITIZER_SPARC32 1
+# define SANITIZER_SPARC64 0
+# endif
#else
-# define SANITIZER_S390 0
-# define SANITIZER_S390_31 0
-# define SANITIZER_S390_64 0
+# define SANITIZER_SPARC 0
+# define SANITIZER_SPARC32 0
+# define SANITIZER_SPARC64 0
#endif
#if defined(__powerpc__)
-# define SANITIZER_PPC 1
-# if defined(__powerpc64__)
-# define SANITIZER_PPC32 0
-# define SANITIZER_PPC64 1
+# define SANITIZER_PPC 1
+# if defined(__powerpc64__)
+# define SANITIZER_PPC32 0
+# define SANITIZER_PPC64 1
// 64-bit PPC has two ABIs (v1 and v2). The old powerpc64 target is
// big-endian, and uses v1 ABI (known for its function descriptors),
// while the new powerpc64le target is little-endian and uses v2.
@@ -186,106 +227,109 @@
// (eg. big-endian v2), but you won't find such combinations in the wild
// (it'd require bootstrapping a whole system, which would be quite painful
// - there's no target triple for that). LLVM doesn't support them either.
-# if _CALL_ELF == 2
-# define SANITIZER_PPC64V1 0
-# define SANITIZER_PPC64V2 1
+# if _CALL_ELF == 2
+# define SANITIZER_PPC64V1 0
+# define SANITIZER_PPC64V2 1
+# else
+# define SANITIZER_PPC64V1 1
+# define SANITIZER_PPC64V2 0
+# endif
# else
-# define SANITIZER_PPC64V1 1
-# define SANITIZER_PPC64V2 0
+# define SANITIZER_PPC32 1
+# define SANITIZER_PPC64 0
+# define SANITIZER_PPC64V1 0
+# define SANITIZER_PPC64V2 0
# endif
-# else
-# define SANITIZER_PPC32 1
+#else
+# define SANITIZER_PPC 0
+# define SANITIZER_PPC32 0
# define SANITIZER_PPC64 0
# define SANITIZER_PPC64V1 0
# define SANITIZER_PPC64V2 0
-# endif
+#endif
+
+#if defined(__arm__) || defined(_M_ARM)
+# define SANITIZER_ARM 1
#else
-# define SANITIZER_PPC 0
-# define SANITIZER_PPC32 0
-# define SANITIZER_PPC64 0
-# define SANITIZER_PPC64V1 0
-# define SANITIZER_PPC64V2 0
+# define SANITIZER_ARM 0
#endif
-#if defined(__arm__)
-# define SANITIZER_ARM 1
+#if defined(__aarch64__) || defined(_M_ARM64)
+# define SANITIZER_ARM64 1
#else
-# define SANITIZER_ARM 0
+# define SANITIZER_ARM64 0
#endif
#if SANITIZER_SOLARIS && SANITIZER_WORDSIZE == 32
-# define SANITIZER_SOLARIS32 1
+# define SANITIZER_SOLARIS32 1
#else
-# define SANITIZER_SOLARIS32 0
+# define SANITIZER_SOLARIS32 0
#endif
#if defined(__riscv) && (__riscv_xlen == 64)
-#define SANITIZER_RISCV64 1
+# define SANITIZER_RISCV64 1
#else
-#define SANITIZER_RISCV64 0
+# define SANITIZER_RISCV64 0
+#endif
+
+#if defined(__loongarch_lp64)
+# define SANITIZER_LOONGARCH64 1
+#else
+# define SANITIZER_LOONGARCH64 0
#endif
// By default we allow to use SizeClassAllocator64 on 64-bit platform.
-// But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64
-// does not work well and we need to fallback to SizeClassAllocator32.
+// But in some cases SizeClassAllocator64 does not work well and we need to
+// fallback to SizeClassAllocator32.
// For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or
// change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here.
#ifndef SANITIZER_CAN_USE_ALLOCATOR64
-# if (SANITIZER_ANDROID && defined(__aarch64__)) || SANITIZER_FUCHSIA
-# define SANITIZER_CAN_USE_ALLOCATOR64 1
-# elif defined(__mips64) || defined(__aarch64__)
-# define SANITIZER_CAN_USE_ALLOCATOR64 0
-# else
-# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
-# endif
+# if SANITIZER_RISCV64 || SANITIZER_IOS
+# define SANITIZER_CAN_USE_ALLOCATOR64 0
+# elif defined(__mips64) || defined(__hexagon__)
+# define SANITIZER_CAN_USE_ALLOCATOR64 0
+# else
+# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
+# endif
#endif
// The range of addresses which can be returned my mmap.
// FIXME: this value should be different on different platforms. Larger values
// will still work but will consume more memory for TwoLevelByteMap.
#if defined(__mips__)
-#if SANITIZER_GO && defined(__mips64)
-#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
-#else
-# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40)
-#endif
+# if SANITIZER_GO && defined(__mips64)
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+# else
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40)
+# endif
#elif SANITIZER_RISCV64
-#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38)
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38)
#elif defined(__aarch64__)
-# if SANITIZER_MAC
-# if SANITIZER_OSX || SANITIZER_IOSSIM
-# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+# if SANITIZER_APPLE
+# if SANITIZER_OSX || SANITIZER_IOSSIM
+# define SANITIZER_MMAP_RANGE_SIZE \
+ FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+# else
+// Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM
+# define SANITIZER_MMAP_RANGE_SIZE \
+ FIRST_32_SECOND_64(1ULL << 32, 1ULL << 36)
+# endif
# else
- // Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM
-# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 36)
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
# endif
-# else
-# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
-# endif
#elif defined(__sparc__)
-#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 52)
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 52)
#else
-# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
#endif
// Whether the addresses are sign-extended from the VMA range to the word.
// The SPARC64 Linux port implements this to split the VMA space into two
// non-contiguous halves with a huge hole in the middle.
#if defined(__sparc__) && SANITIZER_WORDSIZE == 64
-#define SANITIZER_SIGN_EXTENDED_ADDRESSES 1
+# define SANITIZER_SIGN_EXTENDED_ADDRESSES 1
#else
-#define SANITIZER_SIGN_EXTENDED_ADDRESSES 0
-#endif
-
-// The AArch64 and RISC-V linux ports use the canonical syscall set as
-// mandated by the upstream linux community for all new ports. Other ports
-// may still use legacy syscalls.
-#ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
-# if (defined(__aarch64__) || defined(__riscv)) && SANITIZER_LINUX
-# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1
-# else
-# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0
-# endif
+# define SANITIZER_SIGN_EXTENDED_ADDRESSES 0
#endif
// udi16 syscalls can only be used when the following conditions are
@@ -296,15 +340,15 @@
// Since we don't want to include libc headers here, we check the
// target only.
#if defined(__arm__) || SANITIZER_X32 || defined(__sparc__)
-#define SANITIZER_USES_UID16_SYSCALLS 1
+# define SANITIZER_USES_UID16_SYSCALLS 1
#else
-#define SANITIZER_USES_UID16_SYSCALLS 0
+# define SANITIZER_USES_UID16_SYSCALLS 0
#endif
#if defined(__mips__)
-# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10)
+# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10)
#else
-# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
+# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
#endif
/// \macro MSC_PREREQ
@@ -313,15 +357,15 @@
/// * 1800: Microsoft Visual Studio 2013 / 12.0
/// * 1900: Microsoft Visual Studio 2015 / 14.0
#ifdef _MSC_VER
-# define MSC_PREREQ(version) (_MSC_VER >= (version))
+# define MSC_PREREQ(version) (_MSC_VER >= (version))
#else
-# define MSC_PREREQ(version) 0
+# define MSC_PREREQ(version) 0
#endif
-#if SANITIZER_MAC && !(defined(__arm64__) && SANITIZER_IOS)
-# define SANITIZER_NON_UNIQUE_TYPEINFO 0
+#if SANITIZER_APPLE && defined(__x86_64__)
+# define SANITIZER_NON_UNIQUE_TYPEINFO 0
#else
-# define SANITIZER_NON_UNIQUE_TYPEINFO 1
+# define SANITIZER_NON_UNIQUE_TYPEINFO 1
#endif
// On linux, some architectures had an ABI transition from 64-bit long double
@@ -329,11 +373,11 @@
// involving long doubles come in two versions, and we need to pass the
// correct one to dlvsym when intercepting them.
#if SANITIZER_LINUX && (SANITIZER_S390 || SANITIZER_PPC32 || SANITIZER_PPC64V1)
-#define SANITIZER_NLDBL_VERSION "GLIBC_2.4"
+# define SANITIZER_NLDBL_VERSION "GLIBC_2.4"
#endif
#if SANITIZER_GO == 0
-# define SANITIZER_GO 0
+# define SANITIZER_GO 0
#endif
// On PowerPC and ARM Thumb, calling pthread_exit() causes LSan to detect leaks.
@@ -341,40 +385,64 @@
// dlopen mallocs "libgcc_s.so" string which confuses LSan, it fails to realize
// that this allocation happens in dynamic linker and should be ignored.
#if SANITIZER_PPC || defined(__thumb__)
-# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 1
+# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 1
#else
-# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0
+# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0
#endif
-#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD || \
- SANITIZER_SOLARIS
-# define SANITIZER_MADVISE_DONTNEED MADV_FREE
+#if SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD || SANITIZER_SOLARIS
+# define SANITIZER_MADVISE_DONTNEED MADV_FREE
#else
-# define SANITIZER_MADVISE_DONTNEED MADV_DONTNEED
+# define SANITIZER_MADVISE_DONTNEED MADV_DONTNEED
#endif
// Older gcc have issues aligning to a constexpr, and require an integer.
// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56859 among others.
#if defined(__powerpc__) || defined(__powerpc64__)
-# define SANITIZER_CACHE_LINE_SIZE 128
+# define SANITIZER_CACHE_LINE_SIZE 128
#else
-# define SANITIZER_CACHE_LINE_SIZE 64
+# define SANITIZER_CACHE_LINE_SIZE 64
#endif
// Enable offline markup symbolizer for Fuchsia.
#if SANITIZER_FUCHSIA
# define SANITIZER_SYMBOLIZER_MARKUP 1
#else
-#define SANITIZER_SYMBOLIZER_MARKUP 0
+# define SANITIZER_SYMBOLIZER_MARKUP 0
#endif
// Enable ability to support sanitizer initialization that is
// compatible with the sanitizer library being loaded via
// `dlopen()`.
-#if SANITIZER_MAC
-#define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 1
+#if SANITIZER_APPLE
+# define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 1
+#else
+# define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 0
+#endif
+
+// SANITIZER_SUPPORTS_THREADLOCAL
+// 1 - THREADLOCAL macro is supported by target
+// 0 - THREADLOCAL macro is not supported by target
+#ifndef __has_feature
+// TODO: Support other compilers here
+# define SANITIZER_SUPPORTS_THREADLOCAL 1
+#else
+# if __has_feature(tls)
+# define SANITIZER_SUPPORTS_THREADLOCAL 1
+# else
+# define SANITIZER_SUPPORTS_THREADLOCAL 0
+# endif
+#endif
+
+#if defined(__thumb__) && defined(__linux__)
+// Workaround for
+// https://lab.llvm.org/buildbot/#/builders/clang-thumbv7-full-2stage
+// or
+// https://lab.llvm.org/staging/#/builders/clang-thumbv7-full-2stage
+// It fails *rss_limit_mb_test* without meaningful errors.
+# define SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL 1
#else
-#define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 0
+# define SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL 0
#endif
-#endif // SANITIZER_PLATFORM_H
+#endif // SANITIZER_PLATFORM_H
lib/tsan/sanitizer_common/sanitizer_platform_interceptors.h
@@ -76,7 +76,7 @@
#define SI_LINUX 0
#endif
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#define SI_MAC 1
#define SI_NOT_MAC 0
#else
@@ -126,7 +126,7 @@
#define SI_SOLARIS32 0
#endif
-#if SANITIZER_POSIX && !SANITIZER_MAC
+#if SANITIZER_POSIX && !SANITIZER_APPLE
#define SI_POSIX_NOT_MAC 1
#else
#define SI_POSIX_NOT_MAC 0
@@ -229,11 +229,15 @@
(SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_CLOCK_GETTIME \
(SI_FREEBSD || SI_NETBSD || SI_LINUX || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID SI_LINUX
+#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID \
+ (SI_LINUX || SI_FREEBSD || SI_NETBSD)
#define SANITIZER_INTERCEPT_GETITIMER SI_POSIX
#define SANITIZER_INTERCEPT_TIME SI_POSIX
#define SANITIZER_INTERCEPT_GLOB (SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_GLOB64 SI_GLIBC
+#define SANITIZER_INTERCEPT___B64_TO SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_DN_COMP_EXPAND SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_POSIX_SPAWN SI_POSIX
#define SANITIZER_INTERCEPT_WAIT SI_POSIX
#define SANITIZER_INTERCEPT_INET SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_POSIX
@@ -251,7 +255,8 @@
#define SANITIZER_INTERCEPT_GETHOSTENT_R (SI_FREEBSD || SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_GETSOCKOPT SI_POSIX
#define SANITIZER_INTERCEPT_ACCEPT SI_POSIX
-#define SANITIZER_INTERCEPT_ACCEPT4 (SI_LINUX_NOT_ANDROID || SI_NETBSD)
+#define SANITIZER_INTERCEPT_ACCEPT4 \
+ (SI_LINUX_NOT_ANDROID || SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_PACCEPT SI_NETBSD
#define SANITIZER_INTERCEPT_MODF SI_POSIX
#define SANITIZER_INTERCEPT_RECVMSG SI_POSIX
@@ -264,11 +269,11 @@
#define SANITIZER_INTERCEPT_INET_ATON SI_POSIX
#define SANITIZER_INTERCEPT_SYSINFO SI_LINUX
#define SANITIZER_INTERCEPT_READDIR SI_POSIX
-#define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
+#define SANITIZER_INTERCEPT_READDIR64 SI_GLIBC || SI_SOLARIS32
#if SI_LINUX_NOT_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
- defined(__s390__) || SANITIZER_RISCV64)
+ defined(__s390__) || defined(__loongarch__) || SANITIZER_RISCV64)
#define SANITIZER_INTERCEPT_PTRACE 1
#else
#define SANITIZER_INTERCEPT_PTRACE 0
@@ -303,13 +308,13 @@
#define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SCANDIR \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
+#define SANITIZER_INTERCEPT_SCANDIR64 SI_GLIBC || SI_SOLARIS32
#define SANITIZER_INTERCEPT_GETGROUPS SI_POSIX
#define SANITIZER_INTERCEPT_POLL SI_POSIX
#define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_WORDEXP \
(SI_FREEBSD || SI_NETBSD || (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID || \
- SI_SOLARIS) // NOLINT
+ SI_SOLARIS)
#define SANITIZER_INTERCEPT_SIGWAIT SI_POSIX
#define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID || SI_SOLARIS
@@ -325,11 +330,10 @@
#define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATFS \
(SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_STATFS64 \
- (((SI_MAC && !TARGET_CPU_ARM64) && !SI_IOS) || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_STATFS64 SI_GLIBC && SANITIZER_HAS_STATFS64
#define SANITIZER_INTERCEPT_STATVFS \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
-#define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STATVFS64 SI_GLIBC
#define SANITIZER_INTERCEPT_INITGROUPS SI_POSIX
#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON SI_POSIX
#define SANITIZER_INTERCEPT_ETHER_HOST \
@@ -337,12 +341,14 @@
#define SANITIZER_INTERCEPT_ETHER_R (SI_FREEBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_SHMCTL \
(((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && SANITIZER_WORDSIZE == 64) || \
- SI_NETBSD || SI_SOLARIS) // NOLINT
+ SI_NETBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_RANDOM_R SI_GLIBC
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_GLIBC
+#define SANITIZER_INTERCEPT_PTHREAD_GETAFFINITY_NP \
+ (SI_LINUX_NOT_ANDROID || SI_FREEBSD)
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED \
(SI_POSIX && !SI_NETBSD)
@@ -362,6 +368,8 @@
(SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED \
(SI_LINUX_NOT_ANDROID && !SI_NETBSD)
+#define SANITIZER_INTERCEPT_TRYJOIN SI_GLIBC
+#define SANITIZER_INTERCEPT_TIMEDJOIN SI_GLIBC
#define SANITIZER_INTERCEPT_THR_EXIT SI_FREEBSD
#define SANITIZER_INTERCEPT_TMPNAM SI_POSIX
#define SANITIZER_INTERCEPT_TMPNAM_R (SI_GLIBC || SI_SOLARIS)
@@ -391,8 +399,6 @@
#define SANITIZER_INTERCEPT__EXIT \
(SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_MAC || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_PTHREAD_MUTEX SI_POSIX
-#define SANITIZER_INTERCEPT___PTHREAD_MUTEX SI_GLIBC
#define SANITIZER_INTERCEPT___LIBC_MUTEX SI_NETBSD
#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP \
(SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)
@@ -400,7 +406,7 @@
(SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_TLS_GET_ADDR \
- (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_LISTXATTR SI_LINUX
#define SANITIZER_INTERCEPT_GETXATTR SI_LINUX
@@ -445,7 +451,8 @@
#define SANITIZER_INTERCEPT_SEM \
(SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_SETCANCEL SI_POSIX
-#define SANITIZER_INTERCEPT_MINCORE (SI_LINUX || SI_NETBSD || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_MINCORE \
+ (SI_LINUX || SI_NETBSD || SI_FREEBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PROCESS_VM_READV SI_LINUX
#define SANITIZER_INTERCEPT_CTERMID \
(SI_LINUX || SI_MAC || SI_FREEBSD || SI_NETBSD || SI_SOLARIS)
@@ -457,13 +464,17 @@
#define SANITIZER_INTERCEPT_SEND_SENDTO SI_POSIX
#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE SI_LINUX
-#define SANITIZER_INTERCEPT_STAT \
- (SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_LSTAT (SI_NETBSD || SI_FREEBSD)
-#define SANITIZER_INTERCEPT___XSTAT (!SANITIZER_INTERCEPT_STAT && SI_POSIX)
-#define SANITIZER_INTERCEPT___XSTAT64 SI_LINUX_NOT_ANDROID
+#define SI_STAT_LINUX (SI_LINUX && __GLIBC_PREREQ(2, 33))
+#define SANITIZER_INTERCEPT_STAT \
+ (SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD || SI_SOLARIS || \
+ SI_STAT_LINUX)
+#define SANITIZER_INTERCEPT_STAT64 SI_STAT_LINUX && SANITIZER_HAS_STAT64
+#define SANITIZER_INTERCEPT_LSTAT (SI_NETBSD || SI_FREEBSD || SI_STAT_LINUX)
+#define SANITIZER_INTERCEPT___XSTAT \
+ ((!SANITIZER_INTERCEPT_STAT && SI_POSIX) || SI_STAT_LINUX)
+#define SANITIZER_INTERCEPT___XSTAT64 SI_GLIBC
#define SANITIZER_INTERCEPT___LXSTAT SANITIZER_INTERCEPT___XSTAT
-#define SANITIZER_INTERCEPT___LXSTAT64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT___LXSTAT64 SI_GLIBC
#define SANITIZER_INTERCEPT_UTMP \
(SI_POSIX && !SI_MAC && !SI_FREEBSD && !SI_NETBSD)
@@ -474,7 +485,7 @@
(SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD || SI_NETBSD)
#define SANITIZER_INTERCEPT_MMAP SI_POSIX
-#define SANITIZER_INTERCEPT_MMAP64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_MMAP64 SI_GLIBC || SI_SOLARIS
#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO (SI_GLIBC || SI_ANDROID)
#define SANITIZER_INTERCEPT_MEMALIGN (!SI_FREEBSD && !SI_MAC && !SI_NETBSD)
#define SANITIZER_INTERCEPT___LIBC_MEMALIGN SI_GLIBC
@@ -484,6 +495,7 @@
#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC)
#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC && !SI_NETBSD)
#define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_WCSLEN 1
#define SANITIZER_INTERCEPT_WCSCAT SI_POSIX
#define SANITIZER_INTERCEPT_WCSDUP SI_POSIX
#define SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION (!SI_WINDOWS && SI_NOT_FUCHSIA)
@@ -496,7 +508,8 @@
#define SANITIZER_INTERCEPT_GID_FROM_GROUP SI_NETBSD
#define SANITIZER_INTERCEPT_ACCESS (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_FACCESSAT (SI_NETBSD || SI_FREEBSD)
-#define SANITIZER_INTERCEPT_GETGROUPLIST SI_NETBSD
+#define SANITIZER_INTERCEPT_GETGROUPLIST \
+ (SI_NETBSD || SI_FREEBSD || SI_LINUX)
#define SANITIZER_INTERCEPT_STRLCPY \
(SI_NETBSD || SI_FREEBSD || SI_MAC || SI_ANDROID)
@@ -517,10 +530,11 @@
#define SANITIZER_INTERCEPT_DEVNAME_R (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_FGETLN (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_STRMODE (SI_NETBSD || SI_FREEBSD)
-#define SANITIZER_INTERCEPT_TTYENT SI_NETBSD
-#define SANITIZER_INTERCEPT_PROTOENT (SI_NETBSD || SI_LINUX)
+#define SANITIZER_INTERCEPT_TTYENT (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_TTYENTPATH SI_NETBSD
+#define SANITIZER_INTERCEPT_PROTOENT (SI_LINUX || SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_PROTOENT_R SI_GLIBC
-#define SANITIZER_INTERCEPT_NETENT SI_NETBSD
+#define SANITIZER_INTERCEPT_NETENT (SI_LINUX || SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_SETVBUF \
(SI_NETBSD || SI_FREEBSD || SI_LINUX || SI_MAC)
#define SANITIZER_INTERCEPT_GETMNTINFO (SI_NETBSD || SI_FREEBSD || SI_MAC)
@@ -536,17 +550,17 @@
#define SANITIZER_INTERCEPT_MODCTL SI_NETBSD
#define SANITIZER_INTERCEPT_CAPSICUM SI_FREEBSD
#define SANITIZER_INTERCEPT_STRTONUM (SI_NETBSD || SI_FREEBSD)
-#define SANITIZER_INTERCEPT_FPARSELN SI_NETBSD
+#define SANITIZER_INTERCEPT_FPARSELN (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_STATVFS1 SI_NETBSD
#define SANITIZER_INTERCEPT_STRTOI SI_NETBSD
#define SANITIZER_INTERCEPT_CAPSICUM SI_FREEBSD
#define SANITIZER_INTERCEPT_SHA1 SI_NETBSD
#define SANITIZER_INTERCEPT_MD4 SI_NETBSD
#define SANITIZER_INTERCEPT_RMD160 SI_NETBSD
-#define SANITIZER_INTERCEPT_MD5 SI_NETBSD
+#define SANITIZER_INTERCEPT_MD5 (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_FSEEK (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_MD2 SI_NETBSD
-#define SANITIZER_INTERCEPT_SHA2 SI_NETBSD
+#define SANITIZER_INTERCEPT_SHA2 (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_CDB SI_NETBSD
#define SANITIZER_INTERCEPT_VIS (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_POPEN SI_POSIX
@@ -559,25 +573,30 @@
#define SANITIZER_INTERCEPT_FDEVNAME SI_FREEBSD
#define SANITIZER_INTERCEPT_GETUSERSHELL (SI_POSIX && !SI_ANDROID)
#define SANITIZER_INTERCEPT_SL_INIT (SI_FREEBSD || SI_NETBSD)
-#define SANITIZER_INTERCEPT_CRYPT (SI_POSIX && !SI_ANDROID)
-#define SANITIZER_INTERCEPT_CRYPT_R (SI_LINUX && !SI_ANDROID)
#define SANITIZER_INTERCEPT_GETRANDOM \
((SI_LINUX && __GLIBC_PREREQ(2, 25)) || SI_FREEBSD)
#define SANITIZER_INTERCEPT___CXA_ATEXIT SI_NETBSD
#define SANITIZER_INTERCEPT_ATEXIT SI_NETBSD
#define SANITIZER_INTERCEPT_PTHREAD_ATFORK SI_NETBSD
-#define SANITIZER_INTERCEPT_GETENTROPY SI_FREEBSD
+#define SANITIZER_INTERCEPT_GETENTROPY \
+ ((SI_LINUX && __GLIBC_PREREQ(2, 25)) || SI_FREEBSD)
#define SANITIZER_INTERCEPT_QSORT \
(SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS && !SI_ANDROID)
#define SANITIZER_INTERCEPT_QSORT_R SI_GLIBC
+#define SANITIZER_INTERCEPT_BSEARCH \
+ (SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS && !SI_ANDROID)
// sigaltstack on i386 macOS cannot be intercepted due to setjmp()
// calling it and assuming that it does not clobber registers.
#define SANITIZER_INTERCEPT_SIGALTSTACK \
- (SI_POSIX && !(SANITIZER_MAC && SANITIZER_I386))
+ (SI_POSIX && !(SANITIZER_APPLE && SANITIZER_I386))
#define SANITIZER_INTERCEPT_UNAME (SI_POSIX && !SI_FREEBSD)
#define SANITIZER_INTERCEPT___XUNAME SI_FREEBSD
#define SANITIZER_INTERCEPT_FLOPEN SI_FREEBSD
+#define SANITIZER_INTERCEPT_PROCCTL SI_FREEBSD
+#define SANITIZER_INTERCEPT_HEXDUMP SI_FREEBSD
+#define SANITIZER_INTERCEPT_ARGP_PARSE SI_GLIBC
+#define SANITIZER_INTERCEPT_CPUSET_GETAFFINITY SI_FREEBSD
// This macro gives a way for downstream users to override the above
// interceptor macros irrespective of the platform they are on. They have
lib/tsan/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
@@ -17,6 +17,7 @@
#include <sys/capsicum.h>
#include <sys/consio.h>
+#include <sys/cpuset.h>
#include <sys/filio.h>
#include <sys/ipc.h>
#include <sys/kbio.h>
@@ -69,11 +70,17 @@
#include <semaphore.h>
#include <signal.h>
#include <stddef.h>
+#include <md5.h>
+#include <sha224.h>
+#include <sha256.h>
+#include <sha384.h>
+#include <sha512.h>
#include <stdio.h>
#include <stringlist.h>
#include <term.h>
#include <termios.h>
#include <time.h>
+#include <ttyent.h>
#include <utime.h>
#include <utmpx.h>
#include <vis.h>
@@ -97,6 +104,7 @@ void *__sanitizer_get_link_map_by_dlopen_handle(void *handle) {
return internal_dlinfo(handle, RTLD_DI_LINKMAP, &p) == 0 ? p : nullptr;
}
+unsigned struct_cpuset_sz = sizeof(cpuset_t);
unsigned struct_cap_rights_sz = sizeof(cap_rights_t);
unsigned struct_utsname_sz = sizeof(struct utsname);
unsigned struct_stat_sz = sizeof(struct stat);
@@ -124,7 +132,7 @@ unsigned struct_sigevent_sz = sizeof(struct sigevent);
unsigned struct_sched_param_sz = sizeof(struct sched_param);
unsigned struct_statfs_sz = sizeof(struct statfs);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
-unsigned ucontext_t_sz = sizeof(ucontext_t);
+unsigned ucontext_t_sz(void *ctx) { return sizeof(ucontext_t); }
unsigned struct_rlimit_sz = sizeof(struct rlimit);
unsigned struct_timespec_sz = sizeof(struct timespec);
unsigned struct_utimbuf_sz = sizeof(struct utimbuf);
@@ -167,12 +175,21 @@ uptr __sanitizer_in_addr_sz(int af) {
return 0;
}
+// For FreeBSD the actual size of a directory entry is not always in d_reclen.
+// Use the appropriate macro to get the correct size for all cases (e.g. NFS).
+u16 __sanitizer_dirsiz(const __sanitizer_dirent *dp) {
+ return _GENERIC_DIRSIZ(dp);
+}
+
unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
int glob_nomatch = GLOB_NOMATCH;
int glob_altdirfunc = GLOB_ALTDIRFUNC;
+const int wordexp_wrde_dooffs = WRDE_DOOFFS;
unsigned path_max = PATH_MAX;
+int struct_ttyent_sz = sizeof(struct ttyent);
+
// ioctl arguments
unsigned struct_ifreq_sz = sizeof(struct ifreq);
unsigned struct_termios_sz = sizeof(struct termios);
@@ -196,6 +213,10 @@ unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info);
unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);
unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);
+unsigned struct_procctl_reaper_status_sz = sizeof(struct __sanitizer_procctl_reaper_status);
+unsigned struct_procctl_reaper_pidinfo_sz = sizeof(struct __sanitizer_procctl_reaper_pidinfo);
+unsigned struct_procctl_reaper_pids_sz = sizeof(struct __sanitizer_procctl_reaper_pids);
+unsigned struct_procctl_reaper_kill_sz = sizeof(struct __sanitizer_procctl_reaper_kill);
const unsigned long __sanitizer_bufsiz = BUFSIZ;
const unsigned IOCTL_NOT_PRESENT = 0;
@@ -357,6 +378,22 @@ const int si_SEGV_MAPERR = SEGV_MAPERR;
const int si_SEGV_ACCERR = SEGV_ACCERR;
const int unvis_valid = UNVIS_VALID;
const int unvis_validpush = UNVIS_VALIDPUSH;
+
+const unsigned MD5_CTX_sz = sizeof(MD5_CTX);
+const unsigned MD5_return_length = MD5_DIGEST_STRING_LENGTH;
+
+#define SHA2_CONST(LEN) \
+ const unsigned SHA##LEN##_CTX_sz = sizeof(SHA##LEN##_CTX); \
+ const unsigned SHA##LEN##_return_length = SHA##LEN##_DIGEST_STRING_LENGTH; \
+ const unsigned SHA##LEN##_block_length = SHA##LEN##_BLOCK_LENGTH; \
+ const unsigned SHA##LEN##_digest_length = SHA##LEN##_DIGEST_LENGTH
+
+SHA2_CONST(224);
+SHA2_CONST(256);
+SHA2_CONST(384);
+SHA2_CONST(512);
+
+#undef SHA2_CONST
} // namespace __sanitizer
using namespace __sanitizer;
@@ -529,4 +566,5 @@ COMPILER_CHECK(__sanitizer_XDR_FREE == XDR_FREE);
CHECK_TYPE_SIZE(sem_t);
COMPILER_CHECK(sizeof(__sanitizer_cap_rights_t) >= sizeof(cap_rights_t));
+COMPILER_CHECK(sizeof(__sanitizer_cpuset_t) >= sizeof(cpuset_t));
#endif // SANITIZER_FREEBSD
lib/tsan/sanitizer_common/sanitizer_platform_limits_freebsd.h
@@ -16,26 +16,26 @@
#if SANITIZER_FREEBSD
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_platform.h"
-#include "sanitizer_platform_limits_posix.h"
+# include "sanitizer_internal_defs.h"
+# include "sanitizer_platform.h"
+# include "sanitizer_platform_limits_posix.h"
// Get sys/_types.h, because that tells us whether 64-bit inodes are
// used in struct dirent below.
-#include <sys/_types.h>
+# include <sys/_types.h>
namespace __sanitizer {
void *__sanitizer_get_link_map_by_dlopen_handle(void *handle);
-#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
- (link_map *)__sanitizer_get_link_map_by_dlopen_handle(handle)
+# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
+ (link_map *)__sanitizer_get_link_map_by_dlopen_handle(handle)
extern unsigned struct_utsname_sz;
extern unsigned struct_stat_sz;
-#if defined(__powerpc64__)
+# if defined(__powerpc64__)
const unsigned struct___old_kernel_stat_sz = 0;
-#else
+# else
const unsigned struct___old_kernel_stat_sz = 32;
-#endif
+# endif
extern unsigned struct_rusage_sz;
extern unsigned siginfo_t_sz;
extern unsigned struct_itimerval_sz;
@@ -57,7 +57,7 @@ extern unsigned struct_sched_param_sz;
extern unsigned struct_statfs64_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_sockaddr_sz;
-extern unsigned ucontext_t_sz;
+unsigned ucontext_t_sz(void *ctx);
extern unsigned struct_rlimit_sz;
extern unsigned struct_utimbuf_sz;
extern unsigned struct_timespec_sz;
@@ -114,11 +114,24 @@ struct __sanitizer_ipc_perm {
long key;
};
-#if !defined(__i386__)
+struct __sanitizer_protoent {
+ char *p_name;
+ char **p_aliases;
+ int p_proto;
+};
+
+struct __sanitizer_netent {
+ char *n_name;
+ char **n_aliases;
+ int n_addrtype;
+ u32 n_net;
+};
+
+# if !defined(__i386__)
typedef long long __sanitizer_time_t;
-#else
+# else
typedef long __sanitizer_time_t;
-#endif
+# endif
struct __sanitizer_shmid_ds {
__sanitizer_ipc_perm shm_perm;
@@ -147,7 +160,7 @@ struct __sanitizer_ifaddrs {
unsigned int ifa_flags;
void *ifa_addr; // (struct sockaddr *)
void *ifa_netmask; // (struct sockaddr *)
-#undef ifa_dstaddr
+# undef ifa_dstaddr
void *ifa_dstaddr; // (struct sockaddr *)
void *ifa_data;
};
@@ -229,37 +242,43 @@ struct __sanitizer_cmsghdr {
};
struct __sanitizer_dirent {
-#if defined(__INO64)
+# if defined(__INO64)
unsigned long long d_fileno;
unsigned long long d_off;
-#else
+# else
unsigned int d_fileno;
-#endif
+# endif
unsigned short d_reclen;
- // more fields that we don't care about
+ u8 d_type;
+ u8 d_pad0;
+ u16 d_namlen;
+ u16 d_pad1;
+ char d_name[256];
};
+u16 __sanitizer_dirsiz(const __sanitizer_dirent *dp);
+
// 'clock_t' is 32 bits wide on x64 FreeBSD
typedef int __sanitizer_clock_t;
typedef int __sanitizer_clockid_t;
-#if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \
- defined(__mips__)
+# if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \
+ defined(__mips__)
typedef unsigned __sanitizer___kernel_uid_t;
typedef unsigned __sanitizer___kernel_gid_t;
-#else
+# else
typedef unsigned short __sanitizer___kernel_uid_t;
typedef unsigned short __sanitizer___kernel_gid_t;
-#endif
+# endif
typedef long long __sanitizer___kernel_off_t;
-#if defined(__powerpc__) || defined(__mips__)
+# if defined(__powerpc__) || defined(__mips__)
typedef unsigned int __sanitizer___kernel_old_uid_t;
typedef unsigned int __sanitizer___kernel_old_gid_t;
-#else
+# else
typedef unsigned short __sanitizer___kernel_old_uid_t;
typedef unsigned short __sanitizer___kernel_old_gid_t;
-#endif
+# endif
typedef long long __sanitizer___kernel_loff_t;
typedef struct {
@@ -366,9 +385,12 @@ struct __sanitizer_glob_t {
extern int glob_nomatch;
extern int glob_altdirfunc;
+extern const int wordexp_wrde_dooffs;
extern unsigned path_max;
+extern int struct_ttyent_sz;
+
struct __sanitizer_wordexp_t {
uptr we_wordc;
char **we_wordv;
@@ -398,39 +420,81 @@ struct __sanitizer_ifconf {
} ifc_ifcu;
};
-#define IOC_NRBITS 8
-#define IOC_TYPEBITS 8
-#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__)
-#define IOC_SIZEBITS 13
-#define IOC_DIRBITS 3
-#define IOC_NONE 1U
-#define IOC_WRITE 4U
-#define IOC_READ 2U
-#else
-#define IOC_SIZEBITS 14
-#define IOC_DIRBITS 2
-#define IOC_NONE 0U
-#define IOC_WRITE 1U
-#define IOC_READ 2U
-#endif
-#define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
-#define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
-#define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
-#if defined(IOC_DIRMASK)
-#undef IOC_DIRMASK
-#endif
-#define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
-#define IOC_NRSHIFT 0
-#define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
-#define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
-#define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
-#define EVIOC_EV_MAX 0x1f
-#define EVIOC_ABS_MAX 0x3f
-
-#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
-#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
-#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
-#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
+struct __sanitizer__ttyent {
+ char *ty_name;
+ char *ty_getty;
+ char *ty_type;
+ int ty_status;
+ char *ty_window;
+ char *ty_comment;
+ char *ty_group;
+};
+
+// procctl reaper data for PROCCTL_REAPER flags
+struct __sanitizer_procctl_reaper_status {
+ unsigned int rs_flags;
+ unsigned int rs_children;
+ unsigned int rs_descendants;
+ pid_t rs_reaper;
+ pid_t rs_pid;
+ unsigned int rs_pad0[15];
+};
+
+struct __sanitizer_procctl_reaper_pidinfo {
+ pid_t pi_pid;
+ pid_t pi_subtree;
+ unsigned int pi_flags;
+ unsigned int pi_pad0[15];
+};
+
+struct __sanitizer_procctl_reaper_pids {
+ unsigned int rp_count;
+ unsigned int rp_pad0[15];
+ struct __sanitize_procctl_reapper_pidinfo *rp_pids;
+};
+
+struct __sanitizer_procctl_reaper_kill {
+ int rk_sig;
+ unsigned int rk_flags;
+ pid_t rk_subtree;
+ unsigned int rk_killed;
+ pid_t rk_fpid;
+ unsigned int rk_pad[15];
+};
+
+# define IOC_NRBITS 8
+# define IOC_TYPEBITS 8
+# if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__)
+# define IOC_SIZEBITS 13
+# define IOC_DIRBITS 3
+# define IOC_NONE 1U
+# define IOC_WRITE 4U
+# define IOC_READ 2U
+# else
+# define IOC_SIZEBITS 14
+# define IOC_DIRBITS 2
+# define IOC_NONE 0U
+# define IOC_WRITE 1U
+# define IOC_READ 2U
+# endif
+# define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
+# define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
+# define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
+# if defined(IOC_DIRMASK)
+# undef IOC_DIRMASK
+# endif
+# define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
+# define IOC_NRSHIFT 0
+# define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
+# define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
+# define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
+# define EVIOC_EV_MAX 0x1f
+# define EVIOC_ABS_MAX 0x3f
+
+# define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
+# define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
+# define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
+# define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
extern unsigned struct_ifreq_sz;
extern unsigned struct_termios_sz;
@@ -454,6 +518,11 @@ extern unsigned struct_ppp_stats_sz;
extern unsigned struct_sioc_sg_req_sz;
extern unsigned struct_sioc_vif_req_sz;
+extern unsigned struct_procctl_reaper_status_sz;
+extern unsigned struct_procctl_reaper_pidinfo_sz;
+extern unsigned struct_procctl_reaper_pids_sz;
+extern unsigned struct_procctl_reaper_kill_sz;
+
// ioctl request identifiers
// A special value to mark ioctls that are not present on the target platform,
@@ -621,6 +690,22 @@ extern unsigned IOCTL_KDSKBMODE;
extern const int si_SEGV_MAPERR;
extern const int si_SEGV_ACCERR;
+extern const unsigned MD5_CTX_sz;
+extern const unsigned MD5_return_length;
+
+#define SHA2_EXTERN(LEN) \
+ extern const unsigned SHA##LEN##_CTX_sz; \
+ extern const unsigned SHA##LEN##_return_length; \
+ extern const unsigned SHA##LEN##_block_length; \
+ extern const unsigned SHA##LEN##_digest_length
+
+SHA2_EXTERN(224);
+SHA2_EXTERN(256);
+SHA2_EXTERN(384);
+SHA2_EXTERN(512);
+
+#undef SHA2_EXTERN
+
struct __sanitizer_cap_rights {
u64 cr_rights[2];
};
@@ -630,26 +715,37 @@ extern unsigned struct_cap_rights_sz;
extern unsigned struct_fstab_sz;
extern unsigned struct_StringList_sz;
+
+struct __sanitizer_cpuset {
+#if __FreeBSD_version >= 1400090
+ long __bits[(1024 + (sizeof(long) * 8) - 1) / (sizeof(long) * 8)];
+#else
+ long __bits[(256 + (sizeof(long) * 8) - 1) / (sizeof(long) * 8)];
+#endif
+};
+
+typedef struct __sanitizer_cpuset __sanitizer_cpuset_t;
+extern unsigned struct_cpuset_sz;
} // namespace __sanitizer
-#define CHECK_TYPE_SIZE(TYPE) \
- COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
+# define CHECK_TYPE_SIZE(TYPE) \
+ COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
-#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
- COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \
- sizeof(((CLASS *)NULL)->MEMBER)); \
- COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
- offsetof(CLASS, MEMBER))
+# define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \
+ sizeof(((CLASS *)NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
+ offsetof(CLASS, MEMBER))
// For sigaction, which is a function and struct at the same time,
// and thus requires explicit "struct" in sizeof() expression.
-#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
- COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \
- sizeof(((struct CLASS *)NULL)->MEMBER)); \
- COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
- offsetof(struct CLASS, MEMBER))
+# define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \
+ sizeof(((struct CLASS *)NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
+ offsetof(struct CLASS, MEMBER))
-#define SIGACTION_SYMNAME sigaction
+# define SIGACTION_SYMNAME sigaction
#endif
lib/tsan/sanitizer_common/sanitizer_platform_limits_linux.cpp
@@ -28,44 +28,39 @@
// are not defined anywhere in userspace headers. Fake them. This seems to work
// fine with newer headers, too.
#include <linux/posix_types.h>
-#if defined(__x86_64__) || defined(__mips__)
-#include <sys/stat.h>
-#else
-#define ino_t __kernel_ino_t
-#define mode_t __kernel_mode_t
-#define nlink_t __kernel_nlink_t
-#define uid_t __kernel_uid_t
-#define gid_t __kernel_gid_t
-#define off_t __kernel_off_t
-#define time_t __kernel_time_t
+# if defined(__x86_64__) || defined(__mips__) || defined(__hexagon__)
+# include <sys/stat.h>
+# else
+# define ino_t __kernel_ino_t
+# define mode_t __kernel_mode_t
+# define nlink_t __kernel_nlink_t
+# define uid_t __kernel_uid_t
+# define gid_t __kernel_gid_t
+# define off_t __kernel_off_t
+# define time_t __kernel_time_t
// This header seems to contain the definitions of _kernel_ stat* structs.
-#include <asm/stat.h>
-#undef ino_t
-#undef mode_t
-#undef nlink_t
-#undef uid_t
-#undef gid_t
-#undef off_t
-#endif
-
-#include <linux/aio_abi.h>
-
-#if !SANITIZER_ANDROID
-#include <sys/statfs.h>
-#include <linux/perf_event.h>
-#endif
+# include <asm/stat.h>
+# undef ino_t
+# undef mode_t
+# undef nlink_t
+# undef uid_t
+# undef gid_t
+# undef off_t
+# endif
+
+# include <linux/aio_abi.h>
+
+# if !SANITIZER_ANDROID
+# include <sys/statfs.h>
+# include <linux/perf_event.h>
+# endif
using namespace __sanitizer;
-namespace __sanitizer {
-#if !SANITIZER_ANDROID
- unsigned struct_statfs64_sz = sizeof(struct statfs64);
-#endif
-} // namespace __sanitizer
-
-#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\
- && !defined(__mips__) && !defined(__s390__)\
- && !defined(__sparc__) && !defined(__riscv)
+# if !defined(__powerpc64__) && !defined(__x86_64__) && \
+ !defined(__aarch64__) && !defined(__mips__) && !defined(__s390__) && \
+ !defined(__sparc__) && !defined(__riscv) && !defined(__hexagon__) && \
+ !defined(__loongarch__)
COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
#endif
lib/tsan/sanitizer_common/sanitizer_platform_limits_netbsd.cpp
@@ -554,7 +554,7 @@ unsigned struct_tms_sz = sizeof(struct tms);
unsigned struct_sigevent_sz = sizeof(struct sigevent);
unsigned struct_sched_param_sz = sizeof(struct sched_param);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
-unsigned ucontext_t_sz = sizeof(ucontext_t);
+unsigned ucontext_t_sz(void *ctx) { return sizeof(ucontext_t); }
unsigned struct_rlimit_sz = sizeof(struct rlimit);
unsigned struct_timespec_sz = sizeof(struct timespec);
unsigned struct_sembuf_sz = sizeof(struct sembuf);
@@ -666,6 +666,7 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
int glob_nomatch = GLOB_NOMATCH;
int glob_altdirfunc = GLOB_ALTDIRFUNC;
+const int wordexp_wrde_dooffs = WRDE_DOOFFS;
unsigned path_max = PATH_MAX;
@@ -2341,8 +2342,6 @@ unsigned IOCTL_TIOCDRAIN = TIOCDRAIN;
unsigned IOCTL_TIOCGFLAGS = TIOCGFLAGS;
unsigned IOCTL_TIOCSFLAGS = TIOCSFLAGS;
unsigned IOCTL_TIOCDCDTIMESTAMP = TIOCDCDTIMESTAMP;
-unsigned IOCTL_TIOCRCVFRAME = TIOCRCVFRAME;
-unsigned IOCTL_TIOCXMTFRAME = TIOCXMTFRAME;
unsigned IOCTL_TIOCPTMGET = TIOCPTMGET;
unsigned IOCTL_TIOCGRANTPT = TIOCGRANTPT;
unsigned IOCTL_TIOCPTSNAME = TIOCPTSNAME;
lib/tsan/sanitizer_common/sanitizer_platform_limits_netbsd.h
@@ -45,7 +45,7 @@ extern unsigned struct_stack_t_sz;
extern unsigned struct_sched_param_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_sockaddr_sz;
-extern unsigned ucontext_t_sz;
+unsigned ucontext_t_sz(void *ctx);
extern unsigned struct_rlimit_sz;
extern unsigned struct_utimbuf_sz;
@@ -394,6 +394,7 @@ struct __sanitizer_glob_t {
extern int glob_nomatch;
extern int glob_altdirfunc;
+extern const int wordexp_wrde_dooffs;
extern unsigned path_max;
@@ -2194,8 +2195,6 @@ extern unsigned IOCTL_TIOCDRAIN;
extern unsigned IOCTL_TIOCGFLAGS;
extern unsigned IOCTL_TIOCSFLAGS;
extern unsigned IOCTL_TIOCDCDTIMESTAMP;
-extern unsigned IOCTL_TIOCRCVFRAME;
-extern unsigned IOCTL_TIOCXMTFRAME;
extern unsigned IOCTL_TIOCPTMGET;
extern unsigned IOCTL_TIOCGRANTPT;
extern unsigned IOCTL_TIOCPTSNAME;
lib/tsan/sanitizer_common/sanitizer_platform_limits_posix.cpp
@@ -18,12 +18,13 @@
// depends on _FILE_OFFSET_BITS setting.
// To get this "true" dirent definition, we undefine _FILE_OFFSET_BITS below.
#undef _FILE_OFFSET_BITS
+#undef _TIME_BITS
#endif
// Must go after undef _FILE_OFFSET_BITS.
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX || SANITIZER_MAC
+#if SANITIZER_LINUX || SANITIZER_APPLE
// Must go after undef _FILE_OFFSET_BITS.
#include "sanitizer_glibc_version.h"
@@ -51,7 +52,7 @@
#include <time.h>
#include <wchar.h>
#include <regex.h>
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
#include <utmp.h>
#endif
@@ -73,7 +74,9 @@
#include <sys/vt.h>
#include <linux/cdrom.h>
#include <linux/fd.h>
+#if SANITIZER_ANDROID
#include <linux/fs.h>
+#endif
#include <linux/hdreg.h>
#include <linux/input.h>
#include <linux/ioctl.h>
@@ -91,10 +94,10 @@
#if SANITIZER_LINUX
# include <utime.h>
# include <sys/ptrace.h>
-#if defined(__mips64) || defined(__aarch64__) || defined(__arm__) || \
- SANITIZER_RISCV64
-# include <asm/ptrace.h>
-# ifdef __arm__
+# if defined(__mips64) || defined(__aarch64__) || defined(__arm__) || \
+ defined(__hexagon__) || defined(__loongarch__) ||SANITIZER_RISCV64
+# include <asm/ptrace.h>
+# ifdef __arm__
typedef struct user_fpregs elf_fpregset_t;
# define ARM_VFPREGS_SIZE_ASAN (32 * 8 /*fpregs*/ + 4 /*fpscr*/)
# if !defined(ARM_VFPREGS_SIZE)
@@ -152,7 +155,6 @@ typedef struct user_fpregs elf_fpregset_t;
#include <linux/serial.h>
#include <sys/msg.h>
#include <sys/ipc.h>
-#include <crypt.h>
#endif // SANITIZER_ANDROID
#include <link.h>
@@ -163,22 +165,24 @@ typedef struct user_fpregs elf_fpregset_t;
#include <fstab.h>
#endif // SANITIZER_LINUX
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include <net/ethernet.h>
#include <sys/filio.h>
#include <sys/sockio.h>
#endif
// Include these after system headers to avoid name clashes and ambiguities.
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_platform_limits_posix.h"
+# include "sanitizer_common.h"
+# include "sanitizer_internal_defs.h"
+# include "sanitizer_platform_interceptors.h"
+# include "sanitizer_platform_limits_posix.h"
namespace __sanitizer {
unsigned struct_utsname_sz = sizeof(struct utsname);
unsigned struct_stat_sz = sizeof(struct stat);
-#if !SANITIZER_IOS && !(SANITIZER_MAC && TARGET_CPU_ARM64)
+#if SANITIZER_HAS_STAT64
unsigned struct_stat64_sz = sizeof(struct stat64);
-#endif // !SANITIZER_IOS && !(SANITIZER_MAC && TARGET_CPU_ARM64)
+#endif // SANITIZER_HAS_STAT64
unsigned struct_rusage_sz = sizeof(struct rusage);
unsigned struct_tm_sz = sizeof(struct tm);
unsigned struct_passwd_sz = sizeof(struct passwd);
@@ -203,26 +207,60 @@ namespace __sanitizer {
unsigned struct_regex_sz = sizeof(regex_t);
unsigned struct_regmatch_sz = sizeof(regmatch_t);
-#if (SANITIZER_MAC && !TARGET_CPU_ARM64) && !SANITIZER_IOS
+#if SANITIZER_HAS_STATFS64
unsigned struct_statfs64_sz = sizeof(struct statfs64);
-#endif // (SANITIZER_MAC && !TARGET_CPU_ARM64) && !SANITIZER_IOS
+#endif // SANITIZER_HAS_STATFS64
-#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC
+#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_APPLE
unsigned struct_fstab_sz = sizeof(struct fstab);
#endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
- // SANITIZER_MAC
+ // SANITIZER_APPLE
#if !SANITIZER_ANDROID
unsigned struct_statfs_sz = sizeof(struct statfs);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
- unsigned ucontext_t_sz = sizeof(ucontext_t);
-#endif // !SANITIZER_ANDROID
-#if SANITIZER_LINUX
+ unsigned ucontext_t_sz(void *ctx) {
+# if SANITIZER_GLIBC && SANITIZER_X64
+ // Added in Linux kernel 3.4.0, merged to glibc in 2.16
+# ifndef FP_XSTATE_MAGIC1
+# define FP_XSTATE_MAGIC1 0x46505853U
+# endif
+ // See kernel arch/x86/kernel/fpu/signal.c for details.
+ const auto *fpregs = static_cast<ucontext_t *>(ctx)->uc_mcontext.fpregs;
+ // The member names differ across header versions, but the actual layout
+ // is always the same. So avoid using members, just use arithmetic.
+ const uint32_t *after_xmm =
+ reinterpret_cast<const uint32_t *>(fpregs + 1) - 24;
+ if (after_xmm[12] == FP_XSTATE_MAGIC1)
+ return reinterpret_cast<const char *>(fpregs) + after_xmm[13] -
+ static_cast<const char *>(ctx);
+# endif
+ return sizeof(ucontext_t);
+ }
+# endif // !SANITIZER_ANDROID
+
+# if SANITIZER_LINUX
unsigned struct_epoll_event_sz = sizeof(struct epoll_event);
unsigned struct_sysinfo_sz = sizeof(struct sysinfo);
unsigned __user_cap_header_struct_sz =
sizeof(struct __user_cap_header_struct);
- unsigned __user_cap_data_struct_sz = sizeof(struct __user_cap_data_struct);
+ unsigned __user_cap_data_struct_sz(void *hdrp) {
+ int u32s = 0;
+ if (hdrp) {
+ switch (((struct __user_cap_header_struct *)hdrp)->version) {
+ case _LINUX_CAPABILITY_VERSION_1:
+ u32s = _LINUX_CAPABILITY_U32S_1;
+ break;
+ case _LINUX_CAPABILITY_VERSION_2:
+ u32s = _LINUX_CAPABILITY_U32S_2;
+ break;
+ case _LINUX_CAPABILITY_VERSION_3:
+ u32s = _LINUX_CAPABILITY_U32S_3;
+ break;
+ }
+ }
+ return sizeof(struct __user_cap_data_struct) * u32s;
+ }
unsigned struct_new_utsname_sz = sizeof(struct new_utsname);
unsigned struct_old_utsname_sz = sizeof(struct old_utsname);
unsigned struct_oldold_utsname_sz = sizeof(struct oldold_utsname);
@@ -235,24 +273,28 @@ namespace __sanitizer {
unsigned struct_itimerspec_sz = sizeof(struct itimerspec);
#endif // SANITIZER_LINUX
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC
// Use pre-computed size of struct ustat to avoid <sys/ustat.h> which
// has been removed from glibc 2.28.
#if defined(__aarch64__) || defined(__s390x__) || defined(__mips64) || \
defined(__powerpc64__) || defined(__arch64__) || defined(__sparcv9) || \
defined(__x86_64__) || SANITIZER_RISCV64
#define SIZEOF_STRUCT_USTAT 32
-#elif defined(__arm__) || defined(__i386__) || defined(__mips__) \
- || defined(__powerpc__) || defined(__s390__) || defined(__sparc__)
-#define SIZEOF_STRUCT_USTAT 20
-#else
-#error Unknown size of struct ustat
-#endif
+# elif defined(__arm__) || defined(__i386__) || defined(__mips__) || \
+ defined(__powerpc__) || defined(__s390__) || defined(__sparc__) || \
+ defined(__hexagon__)
+# define SIZEOF_STRUCT_USTAT 20
+# elif defined(__loongarch__)
+ // Not used. The minimum Glibc version available for LoongArch is 2.36
+ // so ustat() wrapper is already gone.
+# define SIZEOF_STRUCT_USTAT 0
+# else
+# error Unknown size of struct ustat
+# endif
unsigned struct_ustat_sz = SIZEOF_STRUCT_USTAT;
unsigned struct_rlimit64_sz = sizeof(struct rlimit64);
unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
- unsigned struct_crypt_data_sz = sizeof(struct crypt_data);
-#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+#endif // SANITIZER_GLIBC
#if SANITIZER_LINUX && !SANITIZER_ANDROID
unsigned struct_timex_sz = sizeof(struct timex);
@@ -280,7 +322,7 @@ namespace __sanitizer {
int shmctl_shm_stat = (int)SHM_STAT;
#endif
-#if !SANITIZER_MAC && !SANITIZER_FREEBSD
+#if !SANITIZER_APPLE && !SANITIZER_FREEBSD
unsigned struct_utmp_sz = sizeof(struct utmp);
#endif
#if !SANITIZER_ANDROID
@@ -312,10 +354,14 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
int glob_altdirfunc = GLOB_ALTDIRFUNC;
#endif
+# if !SANITIZER_ANDROID
+ const int wordexp_wrde_dooffs = WRDE_DOOFFS;
+# endif // !SANITIZER_ANDROID
+
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
- defined(__s390__) || SANITIZER_RISCV64)
+ defined(__s390__) || defined(__loongarch__)|| SANITIZER_RISCV64)
#if defined(__mips64) || defined(__powerpc64__) || defined(__arm__)
unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs);
unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t);
@@ -325,21 +371,24 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
#elif defined(__aarch64__)
unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs);
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpsimd_state);
+#elif defined(__loongarch__)
+ unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs);
+ unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fp_state);
#elif defined(__s390__)
unsigned struct_user_regs_struct_sz = sizeof(struct _user_regs_struct);
unsigned struct_user_fpregs_struct_sz = sizeof(struct _user_fpregs_struct);
#else
unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct);
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct);
-#endif // __mips64 || __powerpc64__ || __aarch64__
+#endif // __mips64 || __powerpc64__ || __aarch64__ || __loongarch__
#if defined(__x86_64) || defined(__mips64) || defined(__powerpc64__) || \
defined(__aarch64__) || defined(__arm__) || defined(__s390__) || \
- SANITIZER_RISCV64
+ defined(__loongarch__) || SANITIZER_RISCV64
unsigned struct_user_fpxregs_struct_sz = 0;
#else
unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct);
#endif // __x86_64 || __mips64 || __powerpc64__ || __aarch64__ || __arm__
-// || __s390__
+// || __s390__ || __loongarch__
#ifdef __arm__
unsigned struct_user_vfpregs_struct_sz = ARM_VFPREGS_SIZE;
#else
@@ -484,7 +533,7 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);
#endif // SANITIZER_GLIBC
-#if !SANITIZER_ANDROID && !SANITIZER_MAC
+#if !SANITIZER_ANDROID && !SANITIZER_APPLE
unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);
#endif
@@ -570,6 +619,14 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned IOCTL_BLKROGET = BLKROGET;
unsigned IOCTL_BLKROSET = BLKROSET;
unsigned IOCTL_BLKRRPART = BLKRRPART;
+ unsigned IOCTL_BLKFRASET = BLKFRASET;
+ unsigned IOCTL_BLKFRAGET = BLKFRAGET;
+ unsigned IOCTL_BLKSECTSET = BLKSECTSET;
+ unsigned IOCTL_BLKSECTGET = BLKSECTGET;
+ unsigned IOCTL_BLKSSZGET = BLKSSZGET;
+ unsigned IOCTL_BLKBSZGET = BLKBSZGET;
+ unsigned IOCTL_BLKBSZSET = BLKBSZSET;
+ unsigned IOCTL_BLKGETSIZE64 = BLKGETSIZE64;
unsigned IOCTL_CDROMAUDIOBUFSIZ = CDROMAUDIOBUFSIZ;
unsigned IOCTL_CDROMEJECT = CDROMEJECT;
unsigned IOCTL_CDROMEJECT_SW = CDROMEJECT_SW;
@@ -837,10 +894,10 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned IOCTL_EVIOCGPROP = IOCTL_NOT_PRESENT;
unsigned IOCTL_EVIOCSKEYCODE_V2 = IOCTL_NOT_PRESENT;
#endif
- unsigned IOCTL_FS_IOC_GETFLAGS = FS_IOC_GETFLAGS;
- unsigned IOCTL_FS_IOC_GETVERSION = FS_IOC_GETVERSION;
- unsigned IOCTL_FS_IOC_SETFLAGS = FS_IOC_SETFLAGS;
- unsigned IOCTL_FS_IOC_SETVERSION = FS_IOC_SETVERSION;
+ unsigned IOCTL_FS_IOC_GETFLAGS = _IOR('f', 1, long);
+ unsigned IOCTL_FS_IOC_GETVERSION = _IOR('v', 1, long);
+ unsigned IOCTL_FS_IOC_SETFLAGS = _IOW('f', 2, long);
+ unsigned IOCTL_FS_IOC_SETVERSION = _IOW('v', 2, long);
unsigned IOCTL_GIO_CMAP = GIO_CMAP;
unsigned IOCTL_GIO_FONT = GIO_FONT;
unsigned IOCTL_GIO_UNIMAP = GIO_UNIMAP;
@@ -1035,7 +1092,7 @@ CHECK_SIZE_AND_OFFSET(mmsghdr, msg_len);
COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));
CHECK_SIZE_AND_OFFSET(dirent, d_ino);
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
CHECK_SIZE_AND_OFFSET(dirent, d_seekoff);
#elif SANITIZER_FREEBSD
// There is no 'd_off' field on FreeBSD.
@@ -1044,7 +1101,7 @@ CHECK_SIZE_AND_OFFSET(dirent, d_off);
#endif
CHECK_SIZE_AND_OFFSET(dirent, d_reclen);
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC
COMPILER_CHECK(sizeof(__sanitizer_dirent64) <= sizeof(dirent64));
CHECK_SIZE_AND_OFFSET(dirent64, d_ino);
CHECK_SIZE_AND_OFFSET(dirent64, d_off);
@@ -1077,6 +1134,15 @@ CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_flags);
CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_restorer);
#endif
+#if SANITIZER_HAS_SIGINFO
+COMPILER_CHECK(alignof(siginfo_t) == alignof(__sanitizer_siginfo));
+using __sanitizer_siginfo_t = __sanitizer_siginfo;
+CHECK_TYPE_SIZE(siginfo_t);
+CHECK_SIZE_AND_OFFSET(siginfo_t, si_signo);
+CHECK_SIZE_AND_OFFSET(siginfo_t, si_errno);
+CHECK_SIZE_AND_OFFSET(siginfo_t, si_code);
+#endif
+
#if SANITIZER_LINUX
CHECK_TYPE_SIZE(__sysctl_args);
CHECK_SIZE_AND_OFFSET(__sysctl_args, name);
@@ -1217,7 +1283,7 @@ CHECK_SIZE_AND_OFFSET(passwd, pw_shell);
CHECK_SIZE_AND_OFFSET(passwd, pw_gecos);
#endif
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
CHECK_SIZE_AND_OFFSET(passwd, pw_change);
CHECK_SIZE_AND_OFFSET(passwd, pw_expire);
CHECK_SIZE_AND_OFFSET(passwd, pw_class);
@@ -1230,7 +1296,7 @@ CHECK_SIZE_AND_OFFSET(group, gr_passwd);
CHECK_SIZE_AND_OFFSET(group, gr_gid);
CHECK_SIZE_AND_OFFSET(group, gr_mem);
-#if HAVE_RPC_XDR_H
+#if HAVE_RPC_XDR_H && !SANITIZER_APPLE
CHECK_TYPE_SIZE(XDR);
CHECK_SIZE_AND_OFFSET(XDR, x_op);
CHECK_SIZE_AND_OFFSET(XDR, x_ops);
@@ -1285,4 +1351,4 @@ CHECK_TYPE_SIZE(sem_t);
COMPILER_CHECK(ARM_VFPREGS_SIZE == ARM_VFPREGS_SIZE_ASAN);
#endif
-#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_MAC
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_APPLE
lib/tsan/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -14,10 +14,25 @@
#ifndef SANITIZER_PLATFORM_LIMITS_POSIX_H
#define SANITIZER_PLATFORM_LIMITS_POSIX_H
-#if SANITIZER_LINUX || SANITIZER_MAC
+#if SANITIZER_LINUX || SANITIZER_APPLE
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform.h"
+#include "sanitizer_mallinfo.h"
+
+#if SANITIZER_APPLE
+#include <sys/cdefs.h>
+#if !__DARWIN_ONLY_64_BIT_INO_T
+#define SANITIZER_HAS_STAT64 1
+#define SANITIZER_HAS_STATFS64 1
+#else
+#define SANITIZER_HAS_STAT64 0
+#define SANITIZER_HAS_STATFS64 0
+#endif
+#elif SANITIZER_GLIBC || SANITIZER_ANDROID
+#define SANITIZER_HAS_STAT64 1
+#define SANITIZER_HAS_STATFS64 1
+#endif
#if defined(__sparc__)
// FIXME: This can't be included from tsan which does not support sparc yet.
@@ -29,7 +44,7 @@
namespace __sanitizer {
extern unsigned struct_utsname_sz;
extern unsigned struct_stat_sz;
-#if !SANITIZER_IOS
+#if SANITIZER_HAS_STAT64
extern unsigned struct_stat64_sz;
#endif
extern unsigned struct_rusage_sz;
@@ -49,7 +64,9 @@ extern unsigned struct_itimerspec_sz;
extern unsigned struct_sigevent_sz;
extern unsigned struct_stack_t_sz;
extern unsigned struct_sched_param_sz;
+#if SANITIZER_HAS_STATFS64
extern unsigned struct_statfs64_sz;
+#endif
extern unsigned struct_regex_sz;
extern unsigned struct_regmatch_sz;
@@ -57,12 +74,12 @@ extern unsigned struct_regmatch_sz;
extern unsigned struct_fstab_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_sockaddr_sz;
-extern unsigned ucontext_t_sz;
-#endif // !SANITIZER_ANDROID
+unsigned ucontext_t_sz(void *uctx);
+# endif // !SANITIZER_ANDROID
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
-#if defined(__x86_64__)
+# if defined(__x86_64__)
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 0;
#elif defined(__i386__)
@@ -81,9 +98,10 @@ const unsigned struct_kernel_stat64_sz = 104;
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__mips__)
-const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID
- ? FIRST_32_SECOND_64(104, 128)
- : FIRST_32_SECOND_64(160, 216);
+const unsigned struct_kernel_stat_sz =
+ SANITIZER_ANDROID
+ ? FIRST_32_SECOND_64(104, 128)
+ : FIRST_32_SECOND_64((_MIPS_SIM == _ABIN32) ? 176 : 160, 216);
const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__s390__) && !defined(__s390x__)
const unsigned struct_kernel_stat_sz = 64;
@@ -102,7 +120,13 @@ const unsigned struct_kernel_stat64_sz = 104;
#elif SANITIZER_RISCV64
const unsigned struct_kernel_stat_sz = 128;
const unsigned struct_kernel_stat64_sz = 0; // RISCV64 does not use stat64
-#endif
+# elif defined(__hexagon__)
+const unsigned struct_kernel_stat_sz = 128;
+const unsigned struct_kernel_stat64_sz = 0;
+# elif defined(__loongarch__)
+const unsigned struct_kernel_stat_sz = 128;
+const unsigned struct_kernel_stat64_sz = 0;
+# endif
struct __sanitizer_perf_event_attr {
unsigned type;
unsigned size;
@@ -112,7 +136,7 @@ struct __sanitizer_perf_event_attr {
extern unsigned struct_epoll_event_sz;
extern unsigned struct_sysinfo_sz;
extern unsigned __user_cap_header_struct_sz;
-extern unsigned __user_cap_data_struct_sz;
+extern unsigned __user_cap_data_struct_sz(void *hdrp);
extern unsigned struct_new_utsname_sz;
extern unsigned struct_old_utsname_sz;
extern unsigned struct_oldold_utsname_sz;
@@ -122,7 +146,7 @@ const unsigned struct_kexec_segment_sz = 4 * sizeof(unsigned long);
#if SANITIZER_LINUX
-#if defined(__powerpc64__) || defined(__s390__)
+#if defined(__powerpc64__) || defined(__s390__) || defined(__loongarch__)
const unsigned struct___old_kernel_stat_sz = 0;
#elif !defined(__sparc__)
const unsigned struct___old_kernel_stat_sz = 32;
@@ -181,17 +205,7 @@ struct __sanitizer_sem_t {
};
#endif // SANITIZER_LINUX
-#if SANITIZER_ANDROID
-struct __sanitizer_struct_mallinfo {
- uptr v[10];
-};
-#endif
-
#if SANITIZER_LINUX && !SANITIZER_ANDROID
-struct __sanitizer_struct_mallinfo {
- int v[10];
-};
-
extern unsigned struct_ustat_sz;
extern unsigned struct_rlimit64_sz;
extern unsigned struct_statvfs64_sz;
@@ -295,7 +309,6 @@ extern unsigned struct_msqid_ds_sz;
extern unsigned struct_mq_attr_sz;
extern unsigned struct_timex_sz;
extern unsigned struct_statvfs_sz;
-extern unsigned struct_crypt_data_sz;
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
struct __sanitizer_iovec {
@@ -319,7 +332,7 @@ struct __sanitizer_ifaddrs {
};
#endif // !SANITIZER_ANDROID
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
typedef unsigned long __sanitizer_pthread_key_t;
#else
typedef unsigned __sanitizer_pthread_key_t;
@@ -346,7 +359,7 @@ struct __sanitizer_passwd {
char *pw_passwd;
int pw_uid;
int pw_gid;
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
long pw_change;
char *pw_class;
#endif
@@ -355,7 +368,7 @@ struct __sanitizer_passwd {
#endif
char *pw_dir;
char *pw_shell;
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
long pw_expire;
#endif
};
@@ -367,7 +380,8 @@ struct __sanitizer_group {
char **gr_mem;
};
-#if defined(__x86_64__) && !defined(_LP64)
+# if (SANITIZER_LINUX && !SANITIZER_GLIBC && !SANITIZER_ANDROID) || \
+ (defined(__x86_64__) && !defined(_LP64)) || defined(__hexagon__)
typedef long long __sanitizer_time_t;
#else
typedef long __sanitizer_time_t;
@@ -427,7 +441,7 @@ struct __sanitizer_file_handle {
};
#endif
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
struct __sanitizer_msghdr {
void *msg_name;
unsigned msg_namelen;
@@ -468,30 +482,31 @@ struct __sanitizer_mmsghdr {
};
#endif
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
struct __sanitizer_dirent {
unsigned long long d_ino;
unsigned long long d_seekoff;
unsigned short d_reclen;
// more fields that we don't care about
};
-#elif SANITIZER_ANDROID || defined(__x86_64__)
+# elif (SANITIZER_LINUX && !SANITIZER_GLIBC) || defined(__x86_64__) || \
+ defined(__hexagon__)
struct __sanitizer_dirent {
unsigned long long d_ino;
unsigned long long d_off;
unsigned short d_reclen;
// more fields that we don't care about
};
-#else
+# else
struct __sanitizer_dirent {
uptr d_ino;
uptr d_off;
unsigned short d_reclen;
// more fields that we don't care about
};
-#endif
+# endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# if SANITIZER_GLIBC
struct __sanitizer_dirent64 {
unsigned long long d_ino;
unsigned long long d_off;
@@ -511,8 +526,8 @@ typedef int __sanitizer_clockid_t;
#endif
#if SANITIZER_LINUX
-#if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \
- defined(__mips__)
+# if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \
+ defined(__mips__) || defined(__hexagon__)
typedef unsigned __sanitizer___kernel_uid_t;
typedef unsigned __sanitizer___kernel_gid_t;
#else
@@ -552,7 +567,7 @@ typedef unsigned long __sanitizer_sigset_t[16 / sizeof(unsigned long)];
# else
typedef unsigned long __sanitizer_sigset_t;
# endif
-#elif SANITIZER_MAC
+#elif SANITIZER_APPLE
typedef unsigned __sanitizer_sigset_t;
#elif SANITIZER_LINUX
struct __sanitizer_sigset_t {
@@ -561,10 +576,35 @@ struct __sanitizer_sigset_t {
};
#endif
-struct __sanitizer_siginfo {
- // The size is determined by looking at sizeof of real siginfo_t on linux.
- u64 opaque[128 / sizeof(u64)];
+struct __sanitizer_siginfo_pad {
+#if SANITIZER_X32
+ // x32 siginfo_t is aligned to 8 bytes.
+ u64 pad[128 / sizeof(u64)];
+#else
+ // Require uptr, because siginfo_t is always pointer-size aligned on Linux.
+ uptr pad[128 / sizeof(uptr)];
+#endif
+};
+
+#if SANITIZER_LINUX
+# define SANITIZER_HAS_SIGINFO 1
+union __sanitizer_siginfo {
+ struct {
+ int si_signo;
+# if SANITIZER_MIPS
+ int si_code;
+ int si_errno;
+# else
+ int si_errno;
+ int si_code;
+# endif
+ };
+ __sanitizer_siginfo_pad pad;
};
+#else
+# define SANITIZER_HAS_SIGINFO 0
+typedef __sanitizer_siginfo_pad __sanitizer_siginfo;
+#endif
using __sanitizer_sighandler_ptr = void (*)(int sig);
using __sanitizer_sigactionhandler_ptr = void (*)(int sig,
@@ -712,12 +752,19 @@ struct __sanitizer_protoent {
int p_proto;
};
+struct __sanitizer_netent {
+ char *n_name;
+ char **n_aliases;
+ int n_addrtype;
+ u32 n_net;
+};
+
struct __sanitizer_addrinfo {
int ai_flags;
int ai_family;
int ai_socktype;
int ai_protocol;
-#if SANITIZER_ANDROID || SANITIZER_MAC
+#if SANITIZER_ANDROID || SANITIZER_APPLE
unsigned ai_addrlen;
char *ai_canonname;
void *ai_addr;
@@ -743,7 +790,7 @@ struct __sanitizer_pollfd {
short revents;
};
-#if SANITIZER_ANDROID || SANITIZER_MAC
+#if SANITIZER_ANDROID || SANITIZER_APPLE
typedef unsigned __sanitizer_nfds_t;
#else
typedef unsigned long __sanitizer_nfds_t;
@@ -773,6 +820,10 @@ extern int glob_altdirfunc;
extern unsigned path_max;
+# if !SANITIZER_ANDROID
+extern const int wordexp_wrde_dooffs;
+# endif // !SANITIZER_ANDROID
+
struct __sanitizer_wordexp_t {
uptr we_wordc;
char **we_wordv;
@@ -806,7 +857,7 @@ typedef void __sanitizer_FILE;
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
- defined(__s390__) || SANITIZER_RISCV64)
+ defined(__s390__) || defined(__loongarch__) || SANITIZER_RISCV64)
extern unsigned struct_user_regs_struct_sz;
extern unsigned struct_user_fpregs_struct_sz;
extern unsigned struct_user_fpxregs_struct_sz;
@@ -839,7 +890,7 @@ extern int shmctl_shm_info;
extern int shmctl_shm_stat;
#endif
-#if !SANITIZER_MAC && !SANITIZER_FREEBSD
+#if !SANITIZER_APPLE && !SANITIZER_FREEBSD
extern unsigned struct_utmp_sz;
#endif
#if !SANITIZER_ANDROID
@@ -854,7 +905,7 @@ struct __sanitizer_ifconf {
union {
void *ifcu_req;
} ifc_ifcu;
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
} __attribute__((packed));
#else
};
@@ -1007,7 +1058,7 @@ extern unsigned struct_audio_buf_info_sz;
extern unsigned struct_ppp_stats_sz;
#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
-#if !SANITIZER_ANDROID && !SANITIZER_MAC
+#if !SANITIZER_ANDROID && !SANITIZER_APPLE
extern unsigned struct_sioc_sg_req_sz;
extern unsigned struct_sioc_vif_req_sz;
#endif
@@ -1094,6 +1145,14 @@ extern unsigned IOCTL_BLKRASET;
extern unsigned IOCTL_BLKROGET;
extern unsigned IOCTL_BLKROSET;
extern unsigned IOCTL_BLKRRPART;
+extern unsigned IOCTL_BLKFRASET;
+extern unsigned IOCTL_BLKFRAGET;
+extern unsigned IOCTL_BLKSECTSET;
+extern unsigned IOCTL_BLKSECTGET;
+extern unsigned IOCTL_BLKSSZGET;
+extern unsigned IOCTL_BLKBSZGET;
+extern unsigned IOCTL_BLKBSZSET;
+extern unsigned IOCTL_BLKGETSIZE64;
extern unsigned IOCTL_CDROMAUDIOBUFSIZ;
extern unsigned IOCTL_CDROMEJECT;
extern unsigned IOCTL_CDROMEJECT_SW;
@@ -1440,6 +1499,6 @@ extern const int si_SEGV_ACCERR;
#define SIGACTION_SYMNAME sigaction
-#endif // SANITIZER_LINUX || SANITIZER_MAC
+#endif // SANITIZER_LINUX || SANITIZER_APPLE
#endif
lib/tsan/sanitizer_common/sanitizer_platform_limits_solaris.cpp
@@ -89,7 +89,7 @@ namespace __sanitizer {
unsigned struct_sched_param_sz = sizeof(struct sched_param);
unsigned struct_statfs_sz = sizeof(struct statfs);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
- unsigned ucontext_t_sz = sizeof(ucontext_t);
+ unsigned ucontext_t_sz(void *ctx) { return sizeof(ucontext_t); }
unsigned struct_timespec_sz = sizeof(struct timespec);
#if SANITIZER_SOLARIS32
unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
@@ -123,6 +123,7 @@ namespace __sanitizer {
unsigned struct_ElfW_Phdr_sz = sizeof(ElfW(Phdr));
int glob_nomatch = GLOB_NOMATCH;
+ const int wordexp_wrde_dooffs = WRDE_DOOFFS;
unsigned path_max = PATH_MAX;
lib/tsan/sanitizer_common/sanitizer_platform_limits_solaris.h
@@ -43,7 +43,7 @@ extern unsigned struct_sched_param_sz;
extern unsigned struct_statfs64_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_sockaddr_sz;
-extern unsigned ucontext_t_sz;
+unsigned ucontext_t_sz(void *ctx);
extern unsigned struct_timespec_sz;
extern unsigned struct_rlimit_sz;
@@ -341,6 +341,7 @@ struct __sanitizer_glob_t {
extern int glob_nomatch;
extern int glob_altdirfunc;
+extern const int wordexp_wrde_dooffs;
extern unsigned path_max;
lib/tsan/sanitizer_common/sanitizer_posix.cpp
@@ -41,6 +41,8 @@ uptr GetMmapGranularity() {
return GetPageSize();
}
+bool ErrorIsOOM(error_t err) { return err == ENOMEM; }
+
void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
size = RoundUpTo(size, GetPageSizeCached());
uptr res = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
@@ -55,11 +57,9 @@ void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
void UnmapOrDie(void *addr, uptr size) {
if (!addr || !size) return;
uptr res = internal_munmap(addr, size);
- if (UNLIKELY(internal_iserror(res))) {
- Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
- SanitizerToolName, size, size, addr);
- CHECK("unable to unmap" && 0);
- }
+ int reserrno;
+ if (UNLIKELY(internal_iserror(res, &reserrno)))
+ ReportMunmapFailureAndDie(addr, size, reserrno);
DecreaseTotalMmap(size);
}
@@ -85,18 +85,26 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
CHECK(IsPowerOfTwo(size));
CHECK(IsPowerOfTwo(alignment));
uptr map_size = size + alignment;
+ // mmap maps entire pages and rounds up map_size needs to be a an integral
+ // number of pages.
+ // We need to be aware of this size for calculating end and for unmapping
+ // fragments before and after the alignment region.
+ map_size = RoundUpTo(map_size, GetPageSizeCached());
uptr map_res = (uptr)MmapOrDieOnFatalError(map_size, mem_type);
if (UNLIKELY(!map_res))
return nullptr;
- uptr map_end = map_res + map_size;
uptr res = map_res;
if (!IsAligned(res, alignment)) {
res = (map_res + alignment - 1) & ~(alignment - 1);
UnmapOrDie((void*)map_res, res - map_res);
}
+ uptr map_end = map_res + map_size;
uptr end = res + size;
- if (end != map_end)
+ end = RoundUpTo(end, GetPageSizeCached());
+ if (end != map_end) {
+ CHECK_LT(end, map_end);
UnmapOrDie((void*)end, map_end - end);
+ }
return (void*)res;
}
@@ -146,7 +154,11 @@ bool MprotectReadOnly(uptr addr, uptr size) {
return 0 == internal_mprotect((void *)addr, size, PROT_READ);
}
-#if !SANITIZER_MAC
+bool MprotectReadWrite(uptr addr, uptr size) {
+ return 0 == internal_mprotect((void *)addr, size, PROT_READ | PROT_WRITE);
+}
+
+#if !SANITIZER_APPLE
void MprotectMallocZones(void *addr, int prot) {}
#endif
@@ -239,7 +251,7 @@ bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
return true;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
void DumpProcessMap() {
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
const sptr kBufSize = 4095;
lib/tsan/sanitizer_common/sanitizer_posix.h
@@ -20,10 +20,7 @@
#include "sanitizer_platform_limits_posix.h"
#include "sanitizer_platform_limits_solaris.h"
-#if !SANITIZER_POSIX
-// Make it hard to accidentally use any of functions declared in this file:
-#error This file should only be included on POSIX
-#endif
+#if SANITIZER_POSIX
namespace __sanitizer {
@@ -93,7 +90,7 @@ int real_pthread_join(void *th, void **ret);
} \
} // namespace __sanitizer
-int my_pthread_attr_getstack(void *attr, void **addr, uptr *size);
+int internal_pthread_attr_getstack(void *attr, void **addr, uptr *size);
// A routine named real_sigaction() must be implemented by each sanitizer in
// order for internal_sigaction() to bypass interceptors.
@@ -123,7 +120,12 @@ int GetNamedMappingFd(const char *name, uptr size, int *flags);
// alive at least as long as the mapping exists.
void DecorateMapping(uptr addr, uptr size, const char *name);
+# if !SANITIZER_FREEBSD
+# define __sanitizer_dirsiz(dp) ((dp)->d_reclen)
+# endif
} // namespace __sanitizer
+#endif // SANITIZER_POSIX
+
#endif // SANITIZER_POSIX_H
lib/tsan/sanitizer_common/sanitizer_posix_libcdep.cpp
@@ -151,6 +151,8 @@ int Atexit(void (*function)(void)) {
#endif
}
+bool CreateDir(const char *pathname) { return mkdir(pathname, 0755) == 0; }
+
bool SupportsColoredOutput(fd_t fd) {
return isatty(fd) != 0;
}
@@ -288,7 +290,7 @@ bool IsAccessibleMemoryRange(uptr beg, uptr size) {
return result;
}
-void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
+void PlatformPrepareForSandboxing(void *args) {
// Some kinds of sandboxes may forbid filesystem access, so we won't be able
// to read the file mappings from /proc/self/maps. Luckily, neither the
// process will be able to load additional libraries, so it's fine to use the
@@ -381,8 +383,8 @@ SANITIZER_WEAK_ATTRIBUTE int
real_pthread_attr_getstack(void *attr, void **addr, size_t *size);
} // extern "C"
-int my_pthread_attr_getstack(void *attr, void **addr, uptr *size) {
-#if !SANITIZER_GO && !SANITIZER_MAC
+int internal_pthread_attr_getstack(void *attr, void **addr, uptr *size) {
+#if !SANITIZER_GO && !SANITIZER_APPLE
if (&real_pthread_attr_getstack)
return real_pthread_attr_getstack((pthread_attr_t *)attr, addr,
(size_t *)size);
@@ -395,7 +397,7 @@ void AdjustStackSize(void *attr_) {
pthread_attr_t *attr = (pthread_attr_t *)attr_;
uptr stackaddr = 0;
uptr stacksize = 0;
- my_pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
+ internal_pthread_attr_getstack(attr, (void **)&stackaddr, &stacksize);
// GLibC will return (0 - stacksize) as the stack address in the case when
// stacksize is set, but stackaddr is not.
bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0);
lib/tsan/sanitizer_common/sanitizer_printf.cpp
@@ -20,10 +20,6 @@
#include <stdio.h>
#include <stdarg.h>
-#if defined(__x86_64__)
-# include <emmintrin.h>
-#endif
-
#if SANITIZER_WINDOWS && defined(_MSC_VER) && _MSC_VER < 1800 && \
!defined(va_copy)
# define va_copy(dst, src) ((dst) = (src))
@@ -132,8 +128,8 @@ static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) {
int VSNPrintf(char *buff, int buff_length,
const char *format, va_list args) {
static const char *kPrintfFormatsHelp =
- "Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x,X,V}; %p; "
- "%[-]([0-9]*)?(\\.\\*)?s; %c\n";
+ "Supported Printf formats: %([0-9]*)?(z|l|ll)?{d,u,x,X}; %p; "
+ "%[-]([0-9]*)?(\\.\\*)?s; %c\nProvided format: ";
RAW_CHECK(format);
RAW_CHECK(buff_length > 0);
const char *buff_end = &buff[buff_length - 1];
@@ -164,9 +160,11 @@ int VSNPrintf(char *buff, int buff_length,
}
bool have_z = (*cur == 'z');
cur += have_z;
- bool have_ll = !have_z && (cur[0] == 'l' && cur[1] == 'l');
+ bool have_l = cur[0] == 'l' && cur[1] != 'l';
+ cur += have_l;
+ bool have_ll = cur[0] == 'l' && cur[1] == 'l';
cur += have_ll * 2;
- const bool have_length = have_z || have_ll;
+ const bool have_length = have_z || have_l || have_ll;
const bool have_flags = have_width || have_length;
// At the moment only %s supports precision and left-justification.
CHECK(!((precision >= 0 || left_justified) && *cur != 's'));
@@ -174,6 +172,7 @@ int VSNPrintf(char *buff, int buff_length,
case 'd': {
s64 dval = have_ll ? va_arg(args, s64)
: have_z ? va_arg(args, sptr)
+ : have_l ? va_arg(args, long)
: va_arg(args, int);
result += AppendSignedDecimal(&buff, buff_end, dval, width,
pad_with_zero);
@@ -184,26 +183,20 @@ int VSNPrintf(char *buff, int buff_length,
case 'X': {
u64 uval = have_ll ? va_arg(args, u64)
: have_z ? va_arg(args, uptr)
+ : have_l ? va_arg(args, unsigned long)
: va_arg(args, unsigned);
bool uppercase = (*cur == 'X');
result += AppendUnsigned(&buff, buff_end, uval, (*cur == 'u') ? 10 : 16,
width, pad_with_zero, uppercase);
break;
}
- case 'V': {
- for (uptr i = 0; i < 16; i++) {
- unsigned x = va_arg(args, unsigned);
- result += AppendUnsigned(&buff, buff_end, x, 16, 2, true, false);
- }
- break;
- }
case 'p': {
- RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
+ RAW_CHECK_VA(!have_flags, kPrintfFormatsHelp, format);
result += AppendPointer(&buff, buff_end, va_arg(args, uptr));
break;
}
case 's': {
- RAW_CHECK_MSG(!have_length, kPrintfFormatsHelp);
+ RAW_CHECK_VA(!have_length, kPrintfFormatsHelp, format);
// Only left-justified width is supported.
CHECK(!have_width || left_justified);
result += AppendString(&buff, buff_end, left_justified ? -width : width,
@@ -211,17 +204,17 @@ int VSNPrintf(char *buff, int buff_length,
break;
}
case 'c': {
- RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
+ RAW_CHECK_VA(!have_flags, kPrintfFormatsHelp, format);
result += AppendChar(&buff, buff_end, va_arg(args, int));
break;
}
case '%' : {
- RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
+ RAW_CHECK_VA(!have_flags, kPrintfFormatsHelp, format);
result += AppendChar(&buff, buff_end, '%');
break;
}
default: {
- RAW_CHECK_MSG(false, kPrintfFormatsHelp);
+ RAW_CHECK_VA(false, kPrintfFormatsHelp, format);
}
}
}
@@ -317,7 +310,6 @@ static void NOINLINE SharedPrintfCode(bool append_pid, const char *format,
format, args);
}
-FORMAT(1, 2)
void Printf(const char *format, ...) {
va_list args;
va_start(args, format);
@@ -326,7 +318,6 @@ void Printf(const char *format, ...) {
}
// Like Printf, but prints the current PID before the output string.
-FORMAT(1, 2)
void Report(const char *format, ...) {
va_list args;
va_start(args, format);
@@ -338,7 +329,6 @@ void Report(const char *format, ...) {
// Returns the number of symbols that should have been written to buffer
// (not including trailing '\0'). Thus, the string is truncated
// iff return value is not less than "length".
-FORMAT(3, 4)
int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
va_list args;
va_start(args, format);
@@ -347,7 +337,6 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
return needed_length;
}
-FORMAT(2, 3)
void InternalScopedString::append(const char *format, ...) {
uptr prev_len = length();
lib/tsan/sanitizer_common/sanitizer_procmaps.h
@@ -16,7 +16,7 @@
#include "sanitizer_platform.h"
#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
- SANITIZER_MAC || SANITIZER_SOLARIS || \
+ SANITIZER_APPLE || SANITIZER_SOLARIS || \
SANITIZER_FUCHSIA
#include "sanitizer_common.h"
@@ -65,13 +65,37 @@ class MemoryMappedSegment {
MemoryMappedSegmentData *data_;
};
-class MemoryMappingLayout {
+struct ImageHeader;
+
+class MemoryMappingLayoutBase {
+ public:
+ virtual bool Next(MemoryMappedSegment *segment) { UNIMPLEMENTED(); }
+ virtual bool Error() const { UNIMPLEMENTED(); };
+ virtual void Reset() { UNIMPLEMENTED(); }
+
+ protected:
+ ~MemoryMappingLayoutBase() {}
+};
+
+class MemoryMappingLayout : public MemoryMappingLayoutBase {
public:
explicit MemoryMappingLayout(bool cache_enabled);
+
+// This destructor cannot be virtual, as it would cause an operator new() linking
+// failures in hwasan test cases. However non-virtual destructors emit warnings
+// in macOS build, hence disabling those
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
+#endif
~MemoryMappingLayout();
- bool Next(MemoryMappedSegment *segment);
- bool Error() const;
- void Reset();
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
+
+ virtual bool Next(MemoryMappedSegment *segment) override;
+ virtual bool Error() const override;
+ virtual void Reset() override;
// In some cases, e.g. when running under a sandbox on Linux, ASan is unable
// to obtain the memory mappings. It should fall back to pre-cached data
// instead of aborting.
@@ -80,10 +104,14 @@ class MemoryMappingLayout {
// Adds all mapped objects into a vector.
void DumpListOfModules(InternalMmapVectorNoCtor<LoadedModule> *modules);
+ protected:
+#if SANITIZER_APPLE
+ virtual const ImageHeader *CurrentImageHeader();
+#endif
+ MemoryMappingLayoutData data_;
+
private:
void LoadFromCache();
-
- MemoryMappingLayoutData data_;
};
// Returns code range for the specified module.
lib/tsan/sanitizer_common/sanitizer_procmaps_bsd.cpp
@@ -39,6 +39,22 @@
namespace __sanitizer {
+#if SANITIZER_FREEBSD
+void GetMemoryProfile(fill_profile_f cb, uptr *stats) {
+ const int Mib[] = {
+ CTL_KERN,
+ KERN_PROC,
+ KERN_PROC_PID,
+ getpid()
+ };
+
+ struct kinfo_proc InfoProc;
+ uptr Len = sizeof(InfoProc);
+ CHECK_EQ(internal_sysctl(Mib, ARRAY_SIZE(Mib), nullptr, (uptr *)&InfoProc, &Len, 0), 0);
+ cb(0, InfoProc.ki_rssize * GetPageSizeCached(), false, stats);
+}
+#endif
+
void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
const int Mib[] = {
#if SANITIZER_FREEBSD
lib/tsan/sanitizer_common/sanitizer_procmaps_common.cpp
@@ -145,29 +145,47 @@ void MemoryMappingLayout::DumpListOfModules(
}
}
-void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {
+#if SANITIZER_LINUX || SANITIZER_ANDROID || SANITIZER_SOLARIS || SANITIZER_NETBSD
+void GetMemoryProfile(fill_profile_f cb, uptr *stats) {
char *smaps = nullptr;
uptr smaps_cap = 0;
uptr smaps_len = 0;
if (!ReadFileToBuffer("/proc/self/smaps", &smaps, &smaps_cap, &smaps_len))
return;
+ ParseUnixMemoryProfile(cb, stats, smaps, smaps_len);
+ UnmapOrDie(smaps, smaps_cap);
+}
+
+void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
+ uptr smaps_len) {
uptr start = 0;
bool file = false;
const char *pos = smaps;
- while (pos < smaps + smaps_len) {
+ char *end = smaps + smaps_len;
+ if (smaps_len < 2)
+ return;
+ // The following parsing can crash on almost every line
+ // in the case of malformed/truncated input.
+ // Fixing that is hard b/c e.g. ParseDecimal does not
+ // even accept end of the buffer and assumes well-formed input.
+ // So instead we patch end of the input a bit,
+ // it does not affect well-formed complete inputs.
+ *--end = 0;
+ *--end = '\n';
+ while (pos < end) {
if (IsHex(pos[0])) {
start = ParseHex(&pos);
for (; *pos != '/' && *pos > '\n'; pos++) {}
file = *pos == '/';
} else if (internal_strncmp(pos, "Rss:", 4) == 0) {
- while (!IsDecimal(*pos)) pos++;
+ while (pos < end && !IsDecimal(*pos)) pos++;
uptr rss = ParseDecimal(&pos) * 1024;
- cb(start, rss, file, stats, stats_size);
+ cb(start, rss, file, stats);
}
while (*pos++ != '\n') {}
}
- UnmapOrDie(smaps, smaps_cap);
}
+#endif
} // namespace __sanitizer
lib/tsan/sanitizer_common/sanitizer_procmaps_mac.cpp
@@ -10,7 +10,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include "sanitizer_common.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
@@ -136,29 +136,34 @@ void MemoryMappingLayout::LoadFromCache() {
// No-op on Mac for now.
}
+static bool IsDyldHdr(const mach_header *hdr) {
+ return (hdr->magic == MH_MAGIC || hdr->magic == MH_MAGIC_64) &&
+ hdr->filetype == MH_DYLINKER;
+}
+
// _dyld_get_image_header() and related APIs don't report dyld itself.
// We work around this by manually recursing through the memory map
// until we hit a Mach header matching dyld instead. These recurse
// calls are expensive, but the first memory map generation occurs
// early in the process, when dyld is one of the only images loaded,
-// so it will be hit after only a few iterations.
-static mach_header *get_dyld_image_header() {
- unsigned depth = 1;
- vm_size_t size = 0;
+// so it will be hit after only a few iterations. These assumptions don't hold
+// on macOS 13+ anymore (dyld itself has moved into the shared cache).
+static mach_header *GetDyldImageHeaderViaVMRegion() {
vm_address_t address = 0;
- kern_return_t err = KERN_SUCCESS;
- mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
while (true) {
+ vm_size_t size = 0;
+ unsigned depth = 1;
struct vm_region_submap_info_64 info;
- err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth,
- (vm_region_info_t)&info, &count);
+ mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
+ kern_return_t err =
+ vm_region_recurse_64(mach_task_self(), &address, &size, &depth,
+ (vm_region_info_t)&info, &count);
if (err != KERN_SUCCESS) return nullptr;
if (size >= sizeof(mach_header) && info.protection & kProtectionRead) {
mach_header *hdr = (mach_header *)address;
- if ((hdr->magic == MH_MAGIC || hdr->magic == MH_MAGIC_64) &&
- hdr->filetype == MH_DYLINKER) {
+ if (IsDyldHdr(hdr)) {
return hdr;
}
}
@@ -166,8 +171,69 @@ static mach_header *get_dyld_image_header() {
}
}
+extern "C" {
+struct dyld_shared_cache_dylib_text_info {
+ uint64_t version; // current version 2
+ // following fields all exist in version 1
+ uint64_t loadAddressUnslid;
+ uint64_t textSegmentSize;
+ uuid_t dylibUuid;
+ const char *path; // pointer invalid at end of iterations
+ // following fields all exist in version 2
+ uint64_t textSegmentOffset; // offset from start of cache
+};
+typedef struct dyld_shared_cache_dylib_text_info
+ dyld_shared_cache_dylib_text_info;
+
+extern bool _dyld_get_shared_cache_uuid(uuid_t uuid);
+extern const void *_dyld_get_shared_cache_range(size_t *length);
+extern int dyld_shared_cache_iterate_text(
+ const uuid_t cacheUuid,
+ void (^callback)(const dyld_shared_cache_dylib_text_info *info));
+} // extern "C"
+
+static mach_header *GetDyldImageHeaderViaSharedCache() {
+ uuid_t uuid;
+ bool hasCache = _dyld_get_shared_cache_uuid(uuid);
+ if (!hasCache)
+ return nullptr;
+
+ size_t cacheLength;
+ __block uptr cacheStart = (uptr)_dyld_get_shared_cache_range(&cacheLength);
+ CHECK(cacheStart && cacheLength);
+
+ __block mach_header *dyldHdr = nullptr;
+ int res = dyld_shared_cache_iterate_text(
+ uuid, ^(const dyld_shared_cache_dylib_text_info *info) {
+ CHECK_GE(info->version, 2);
+ mach_header *hdr =
+ (mach_header *)(cacheStart + info->textSegmentOffset);
+ if (IsDyldHdr(hdr))
+ dyldHdr = hdr;
+ });
+ CHECK_EQ(res, 0);
+
+ return dyldHdr;
+}
+
const mach_header *get_dyld_hdr() {
- if (!dyld_hdr) dyld_hdr = get_dyld_image_header();
+ if (!dyld_hdr) {
+ // On macOS 13+, dyld itself has moved into the shared cache. Looking it up
+ // via vm_region_recurse_64() causes spins/hangs/crashes.
+ if (GetMacosAlignedVersion() >= MacosVersion(13, 0)) {
+ dyld_hdr = GetDyldImageHeaderViaSharedCache();
+ if (!dyld_hdr) {
+ VReport(1,
+ "Failed to lookup the dyld image header in the shared cache on "
+ "macOS 13+ (or no shared cache in use). Falling back to "
+ "lookup via vm_region_recurse_64().\n");
+ dyld_hdr = GetDyldImageHeaderViaVMRegion();
+ }
+ } else {
+ dyld_hdr = GetDyldImageHeaderViaVMRegion();
+ }
+ CHECK(dyld_hdr);
+ }
return dyld_hdr;
}
@@ -184,7 +250,9 @@ static bool NextSegmentLoad(MemoryMappedSegment *segment,
MemoryMappedSegmentData *seg_data,
MemoryMappingLayoutData *layout_data) {
const char *lc = layout_data->current_load_cmd_addr;
+
layout_data->current_load_cmd_addr += ((const load_command *)lc)->cmdsize;
+ layout_data->current_load_cmd_count--;
if (((const load_command *)lc)->cmd == kLCSegment) {
const SegmentCommand* sc = (const SegmentCommand *)lc;
uptr base_virt_addr, addr_mask;
@@ -292,11 +360,16 @@ static bool IsModuleInstrumented(const load_command *first_lc) {
return false;
}
+const ImageHeader *MemoryMappingLayout::CurrentImageHeader() {
+ const mach_header *hdr = (data_.current_image == kDyldImageIdx)
+ ? get_dyld_hdr()
+ : _dyld_get_image_header(data_.current_image);
+ return (const ImageHeader *)hdr;
+}
+
bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
for (; data_.current_image >= kDyldImageIdx; data_.current_image--) {
- const mach_header *hdr = (data_.current_image == kDyldImageIdx)
- ? get_dyld_hdr()
- : _dyld_get_image_header(data_.current_image);
+ const mach_header *hdr = (const mach_header *)CurrentImageHeader();
if (!hdr) continue;
if (data_.current_load_cmd_count < 0) {
// Set up for this image;
@@ -326,7 +399,7 @@ bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
(const load_command *)data_.current_load_cmd_addr);
}
- for (; data_.current_load_cmd_count >= 0; data_.current_load_cmd_count--) {
+ while (data_.current_load_cmd_count > 0) {
switch (data_.current_magic) {
// data_.current_magic may be only one of MH_MAGIC, MH_MAGIC_64.
#ifdef MH_MAGIC_64
@@ -347,6 +420,7 @@ bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
}
// If we get here, no more load_cmd's in this image talk about
// segments. Go on to the next image.
+ data_.current_load_cmd_count = -1; // This will trigger loading next image
}
return false;
}
@@ -376,4 +450,4 @@ void MemoryMappingLayout::DumpListOfModules(
} // namespace __sanitizer
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
lib/tsan/sanitizer_common/sanitizer_procmaps_solaris.cpp
@@ -13,21 +13,30 @@
#undef _FILE_OFFSET_BITS
#include "sanitizer_platform.h"
#if SANITIZER_SOLARIS
-#include "sanitizer_common.h"
-#include "sanitizer_procmaps.h"
+# include <fcntl.h>
+# include <limits.h>
+# include <procfs.h>
-#include <procfs.h>
-#include <limits.h>
+# include "sanitizer_common.h"
+# include "sanitizer_procmaps.h"
namespace __sanitizer {
void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
- if (!ReadFileToBuffer("/proc/self/xmap", &proc_maps->data,
- &proc_maps->mmaped_size, &proc_maps->len)) {
- proc_maps->data = nullptr;
- proc_maps->mmaped_size = 0;
- proc_maps->len = 0;
- }
+ uptr fd = internal_open("/proc/self/xmap", O_RDONLY);
+ CHECK_NE(fd, -1);
+ uptr Size = internal_filesize(fd);
+ CHECK_GT(Size, 0);
+
+ // Allow for additional entries by following mmap.
+ size_t MmapedSize = Size * 4 / 3;
+ void *VmMap = MmapOrDie(MmapedSize, "ReadProcMaps()");
+ Size = internal_read(fd, VmMap, MmapedSize);
+ CHECK_NE(Size, -1);
+ internal_close(fd);
+ proc_maps->data = (char *)VmMap;
+ proc_maps->mmaped_size = MmapedSize;
+ proc_maps->len = Size;
}
bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
@@ -49,13 +58,28 @@ bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
segment->protection |= kProtectionWrite;
if ((xmapentry->pr_mflags & MA_EXEC) != 0)
segment->protection |= kProtectionExecute;
+ if ((xmapentry->pr_mflags & MA_SHARED) != 0)
+ segment->protection |= kProtectionShared;
if (segment->filename != NULL && segment->filename_size > 0) {
char proc_path[PATH_MAX + 1];
- internal_snprintf(proc_path, sizeof(proc_path), "/proc/self/path/%s",
- xmapentry->pr_mapname);
- internal_readlink(proc_path, segment->filename, segment->filename_size);
+ // Avoid unnecessary readlink on unnamed entires.
+ if (xmapentry->pr_mapname[0] == '\0')
+ segment->filename[0] = '\0';
+ else {
+ internal_snprintf(proc_path, sizeof(proc_path), "/proc/self/path/%s",
+ xmapentry->pr_mapname);
+ ssize_t sz = internal_readlink(proc_path, segment->filename,
+ segment->filename_size - 1);
+
+ // If readlink failed, the map is anonymous.
+ if (sz == -1)
+ segment->filename[0] = '\0';
+ else if ((size_t)sz < segment->filename_size)
+ // readlink doesn't NUL-terminate.
+ segment->filename[sz] = '\0';
+ }
}
data_.current += sizeof(prxmap_t);
lib/tsan/sanitizer_common/sanitizer_quarantine.h
@@ -68,10 +68,6 @@ struct QuarantineBatch {
COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
-// The callback interface is:
-// void Callback::Recycle(Node *ptr);
-// void *cb.Allocate(uptr size);
-// void cb.Deallocate(void *ptr);
template<typename Callback, typename Node>
class Quarantine {
public:
@@ -94,21 +90,20 @@ class Quarantine {
recycle_mutex_.Init();
}
- uptr GetSize() const { return atomic_load_relaxed(&max_size_); }
- uptr GetCacheSize() const {
- return atomic_load_relaxed(&max_cache_size_);
- }
+ uptr GetMaxSize() const { return atomic_load_relaxed(&max_size_); }
+ uptr GetMaxCacheSize() const { return atomic_load_relaxed(&max_cache_size_); }
void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
- uptr cache_size = GetCacheSize();
- if (cache_size) {
+ uptr max_cache_size = GetMaxCacheSize();
+ if (max_cache_size && size <= GetMaxSize()) {
+ cb.PreQuarantine(ptr);
c->Enqueue(cb, ptr, size);
} else {
- // GetCacheSize() == 0 only when GetSize() == 0 (see Init).
- cb.Recycle(ptr);
+ // GetMaxCacheSize() == 0 only when GetMaxSize() == 0 (see Init).
+ cb.RecyclePassThrough(ptr);
}
// Check cache size anyway to accommodate for runtime cache_size change.
- if (c->Size() > cache_size)
+ if (c->Size() > max_cache_size)
Drain(c, cb);
}
@@ -117,7 +112,7 @@ class Quarantine {
SpinMutexLock l(&cache_mutex_);
cache_.Transfer(c);
}
- if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
+ if (cache_.Size() > GetMaxSize() && recycle_mutex_.TryLock())
Recycle(atomic_load_relaxed(&min_size_), cb);
}
@@ -133,7 +128,7 @@ class Quarantine {
void PrintStats() const {
// It assumes that the world is stopped, just as the allocator's PrintStats.
Printf("Quarantine limits: global: %zdMb; thread local: %zdKb\n",
- GetSize() >> 20, GetCacheSize() >> 10);
+ GetMaxSize() >> 20, GetMaxCacheSize() >> 10);
cache_.PrintStats();
}
@@ -149,8 +144,8 @@ class Quarantine {
Cache cache_;
char pad2_[kCacheLineSize];
- void NOINLINE Recycle(uptr min_size, Callback cb) REQUIRES(recycle_mutex_)
- RELEASE(recycle_mutex_) {
+ void NOINLINE Recycle(uptr min_size, Callback cb)
+ SANITIZER_REQUIRES(recycle_mutex_) SANITIZER_RELEASE(recycle_mutex_) {
Cache tmp;
{
SpinMutexLock l(&cache_mutex_);
lib/tsan/sanitizer_common/sanitizer_range.cpp
@@ -0,0 +1,62 @@
+//===-- sanitizer_range.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_range.h"
+
+#include "sanitizer_common/sanitizer_array_ref.h"
+
+namespace __sanitizer {
+
+void Intersect(ArrayRef<Range> a, ArrayRef<Range> b,
+ InternalMmapVectorNoCtor<Range> &output) {
+ output.clear();
+
+ struct Event {
+ uptr val;
+ s8 diff1;
+ s8 diff2;
+ };
+
+ InternalMmapVector<Event> events;
+ for (const Range &r : a) {
+ CHECK_LE(r.begin, r.end);
+ events.push_back({r.begin, 1, 0});
+ events.push_back({r.end, -1, 0});
+ }
+
+ for (const Range &r : b) {
+ CHECK_LE(r.begin, r.end);
+ events.push_back({r.begin, 0, 1});
+ events.push_back({r.end, 0, -1});
+ }
+
+ Sort(events.data(), events.size(),
+ [](const Event &lh, const Event &rh) { return lh.val < rh.val; });
+
+ uptr start = 0;
+ sptr state1 = 0;
+ sptr state2 = 0;
+ for (const auto &e : events) {
+ if (e.val != start) {
+ DCHECK_GE(state1, 0);
+ DCHECK_GE(state2, 0);
+ if (state1 && state2) {
+ if (!output.empty() && start == output.back().end)
+ output.back().end = e.val;
+ else
+ output.push_back({start, e.val});
+ }
+ start = e.val;
+ }
+
+ state1 += e.diff1;
+ state2 += e.diff2;
+ }
+}
+
+} // namespace __sanitizer
lib/tsan/sanitizer_common/sanitizer_range.h
@@ -0,0 +1,40 @@
+//===-- sanitizer_range.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Contais Range and related utilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_RANGE_H
+#define SANITIZER_RANGE_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_common/sanitizer_array_ref.h"
+
+namespace __sanitizer {
+
+struct Range {
+ uptr begin;
+ uptr end;
+};
+
+inline bool operator==(const Range &lhs, const Range &rhs) {
+ return lhs.begin == rhs.begin && lhs.end == rhs.end;
+}
+
+inline bool operator!=(const Range &lhs, const Range &rhs) {
+ return !(lhs == rhs);
+}
+
+// Calculates intersection of two sets of regions in O(N log N) time.
+void Intersect(ArrayRef<Range> a, ArrayRef<Range> b,
+ InternalMmapVectorNoCtor<Range> &output);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_RANGE_H
lib/tsan/sanitizer_common/sanitizer_redefine_builtins.h
@@ -0,0 +1,52 @@
+//===-- sanitizer_redefine_builtins.h ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Redefine builtin functions to use internal versions. This is needed where
+// compiler optimizations end up producing unwanted libcalls!
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_COMMON_NO_REDEFINE_BUILTINS
+#ifndef SANITIZER_REDEFINE_BUILTINS_H
+#define SANITIZER_REDEFINE_BUILTINS_H
+
+// The asm hack only works with GCC and Clang.
+#if !defined(_WIN32)
+
+asm("memcpy = __sanitizer_internal_memcpy");
+asm("memmove = __sanitizer_internal_memmove");
+asm("memset = __sanitizer_internal_memset");
+
+// The builtins should not be redefined in source files that make use of C++
+// standard libraries, in particular where C++STL headers with inline functions
+// are used. The redefinition in such cases would lead to ODR violations.
+//
+// Try to break the build in common cases where builtins shouldn't be redefined.
+namespace std {
+class Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file {
+ Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file(
+ const Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file&) = delete;
+ Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file& operator=(
+ const Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file&) = delete;
+};
+using array = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using atomic = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using function = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using map = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using set = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using shared_ptr = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using string = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using unique_ptr = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using unordered_map = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using unordered_set = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using vector = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+} // namespace std
+
+#endif // !_WIN32
+
+#endif // SANITIZER_REDEFINE_BUILTINS_H
+#endif // SANITIZER_COMMON_NO_REDEFINE_BUILTINS
lib/tsan/sanitizer_common/sanitizer_ring_buffer.h
@@ -86,10 +86,13 @@ class CompactRingBuffer {
// Lower bytes store the address of the next buffer element.
static constexpr int kPageSizeBits = 12;
static constexpr int kSizeShift = 56;
+ static constexpr int kSizeBits = 64 - kSizeShift;
static constexpr uptr kNextMask = (1ULL << kSizeShift) - 1;
uptr GetStorageSize() const { return (long_ >> kSizeShift) << kPageSizeBits; }
+ static uptr SignExtend(uptr x) { return ((sptr)x) << kSizeBits >> kSizeBits; }
+
void Init(void *storage, uptr size) {
CHECK_EQ(sizeof(CompactRingBuffer<T>), sizeof(void *));
CHECK(IsPowerOfTwo(size));
@@ -97,12 +100,14 @@ class CompactRingBuffer {
CHECK_LE(size, 128 << kPageSizeBits);
CHECK_EQ(size % 4096, 0);
CHECK_EQ(size % sizeof(T), 0);
- CHECK_EQ((uptr)storage % (size * 2), 0);
- long_ = (uptr)storage | ((size >> kPageSizeBits) << kSizeShift);
+ uptr st = (uptr)storage;
+ CHECK_EQ(st % (size * 2), 0);
+ CHECK_EQ(st, SignExtend(st & kNextMask));
+ long_ = (st & kNextMask) | ((size >> kPageSizeBits) << kSizeShift);
}
void SetNext(const T *next) {
- long_ = (long_ & ~kNextMask) | (uptr)next;
+ long_ = (long_ & ~kNextMask) | ((uptr)next & kNextMask);
}
public:
@@ -119,7 +124,7 @@ class CompactRingBuffer {
SetNext((const T *)storage + Idx);
}
- T *Next() const { return (T *)(long_ & kNextMask); }
+ T *Next() const { return (T *)(SignExtend(long_ & kNextMask)); }
void *StartOfStorage() const {
return (void *)((uptr)Next() & ~(GetStorageSize() - 1));
lib/tsan/sanitizer_common/sanitizer_signal_interceptors.inc
@@ -29,12 +29,21 @@ using namespace __sanitizer;
#endif
#ifndef SIGNAL_INTERCEPTOR_SIGACTION_IMPL
-#define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signum, act, oldact) \
- { return REAL(sigaction_symname)(signum, act, oldact); }
+# define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signum, act, oldact) \
+ { \
+ if (!REAL(sigaction_symname)) { \
+ Printf( \
+ "Warning: REAL(sigaction_symname) == nullptr. This may happen " \
+ "if you link with ubsan statically. Sigaction will not work.\n"); \
+ return -1; \
+ } \
+ return REAL(sigaction_symname)(signum, act, oldact); \
+ }
#endif
#if SANITIZER_INTERCEPT_BSD_SIGNAL
INTERCEPTOR(uptr, bsd_signal, int signum, uptr handler) {
+ SIGNAL_INTERCEPTOR_ENTER();
if (GetHandleSignalMode(signum) == kHandleSignalExclusive) return 0;
SIGNAL_INTERCEPTOR_SIGNAL_IMPL(bsd_signal, signum, handler);
}
@@ -45,6 +54,7 @@ INTERCEPTOR(uptr, bsd_signal, int signum, uptr handler) {
#if SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION
INTERCEPTOR(uptr, signal, int signum, uptr handler) {
+ SIGNAL_INTERCEPTOR_ENTER();
if (GetHandleSignalMode(signum) == kHandleSignalExclusive)
return (uptr) nullptr;
SIGNAL_INTERCEPTOR_SIGNAL_IMPL(signal, signum, handler);
@@ -53,6 +63,7 @@ INTERCEPTOR(uptr, signal, int signum, uptr handler) {
INTERCEPTOR(int, sigaction_symname, int signum,
const __sanitizer_sigaction *act, __sanitizer_sigaction *oldact) {
+ SIGNAL_INTERCEPTOR_ENTER();
if (GetHandleSignalMode(signum) == kHandleSignalExclusive) {
if (!oldact) return 0;
act = nullptr;
lib/tsan/sanitizer_common/sanitizer_solaris.cpp
@@ -225,28 +225,6 @@ void FutexWait(atomic_uint32_t *p, u32 cmp) {
void FutexWake(atomic_uint32_t *p, u32 count) {}
-BlockingMutex::BlockingMutex() {
- CHECK(sizeof(mutex_t) <= sizeof(opaque_storage_));
- internal_memset(this, 0, sizeof(*this));
- CHECK_EQ(mutex_init((mutex_t *)&opaque_storage_, USYNC_THREAD, NULL), 0);
-}
-
-void BlockingMutex::Lock() {
- CHECK(sizeof(mutex_t) <= sizeof(opaque_storage_));
- CHECK_NE(owner_, (uptr)thr_self());
- CHECK_EQ(mutex_lock((mutex_t *)&opaque_storage_), 0);
- CHECK(!owner_);
- owner_ = (uptr)thr_self();
-}
-
-void BlockingMutex::Unlock() {
- CHECK(owner_ == (uptr)thr_self());
- owner_ = 0;
- CHECK_EQ(mutex_unlock((mutex_t *)&opaque_storage_), 0);
-}
-
-void BlockingMutex::CheckLocked() const { CHECK_EQ((uptr)thr_self(), owner_); }
-
} // namespace __sanitizer
#endif // SANITIZER_SOLARIS
lib/tsan/sanitizer_common/sanitizer_solaris.h
@@ -0,0 +1,56 @@
+//===-- sanitizer_solaris.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime. It contains Solaris-specific
+// definitions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_SOLARIS_H
+#define SANITIZER_SOLARIS_H
+
+#include "sanitizer_internal_defs.h"
+
+#if SANITIZER_SOLARIS
+
+#include <link.h>
+
+namespace __sanitizer {
+
+// Beginning of declaration from OpenSolaris/Illumos
+// $SRC/cmd/sgs/include/rtld.h.
+struct Rt_map {
+ Link_map rt_public;
+ const char *rt_pathname;
+ ulong_t rt_padstart;
+ ulong_t rt_padimlen;
+ ulong_t rt_msize;
+ uint_t rt_flags;
+ uint_t rt_flags1;
+ ulong_t rt_tlsmodid;
+};
+
+// Structure matching the Solaris 11.4 struct dl_phdr_info used to determine
+// presence of dlpi_tls_modid field at runtime. Cf. Solaris 11.4
+// dl_iterate_phdr(3C), Example 2.
+struct dl_phdr_info_test {
+ ElfW(Addr) dlpi_addr;
+ const char *dlpi_name;
+ const ElfW(Phdr) * dlpi_phdr;
+ ElfW(Half) dlpi_phnum;
+ u_longlong_t dlpi_adds;
+ u_longlong_t dlpi_subs;
+ size_t dlpi_tls_modid;
+ void *dlpi_tls_data;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SOLARIS
+
+#endif // SANITIZER_SOLARIS_H
lib/tsan/sanitizer_common/sanitizer_stack_store.cpp
@@ -0,0 +1,379 @@
+//===-- sanitizer_stack_store.cpp -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_stack_store.h"
+
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_leb128.h"
+#include "sanitizer_lzw.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_stacktrace.h"
+
+namespace __sanitizer {
+
+namespace {
+struct StackTraceHeader {
+ static constexpr u32 kStackSizeBits = 8;
+
+ u8 size;
+ u8 tag;
+ explicit StackTraceHeader(const StackTrace &trace)
+ : size(Min<uptr>(trace.size, (1u << 8) - 1)), tag(trace.tag) {
+ CHECK_EQ(trace.tag, static_cast<uptr>(tag));
+ }
+ explicit StackTraceHeader(uptr h)
+ : size(h & ((1 << kStackSizeBits) - 1)), tag(h >> kStackSizeBits) {}
+
+ uptr ToUptr() const {
+ return static_cast<uptr>(size) | (static_cast<uptr>(tag) << kStackSizeBits);
+ }
+};
+} // namespace
+
+StackStore::Id StackStore::Store(const StackTrace &trace, uptr *pack) {
+ if (!trace.size && !trace.tag)
+ return 0;
+ StackTraceHeader h(trace);
+ uptr idx = 0;
+ *pack = 0;
+ uptr *stack_trace = Alloc(h.size + 1, &idx, pack);
+ *stack_trace = h.ToUptr();
+ internal_memcpy(stack_trace + 1, trace.trace, h.size * sizeof(uptr));
+ *pack += blocks_[GetBlockIdx(idx)].Stored(h.size + 1);
+ return OffsetToId(idx);
+}
+
+StackTrace StackStore::Load(Id id) {
+ if (!id)
+ return {};
+ uptr idx = IdToOffset(id);
+ uptr block_idx = GetBlockIdx(idx);
+ CHECK_LT(block_idx, ARRAY_SIZE(blocks_));
+ const uptr *stack_trace = blocks_[block_idx].GetOrUnpack(this);
+ if (!stack_trace)
+ return {};
+ stack_trace += GetInBlockIdx(idx);
+ StackTraceHeader h(*stack_trace);
+ return StackTrace(stack_trace + 1, h.size, h.tag);
+}
+
+uptr StackStore::Allocated() const {
+ return atomic_load_relaxed(&allocated_) + sizeof(*this);
+}
+
+uptr *StackStore::Alloc(uptr count, uptr *idx, uptr *pack) {
+ for (;;) {
+ // Optimisic lock-free allocation, essentially try to bump the
+ // total_frames_.
+ uptr start = atomic_fetch_add(&total_frames_, count, memory_order_relaxed);
+ uptr block_idx = GetBlockIdx(start);
+ uptr last_idx = GetBlockIdx(start + count - 1);
+ if (LIKELY(block_idx == last_idx)) {
+ // Fits into the a single block.
+ CHECK_LT(block_idx, ARRAY_SIZE(blocks_));
+ *idx = start;
+ return blocks_[block_idx].GetOrCreate(this) + GetInBlockIdx(start);
+ }
+
+ // Retry. We can't use range allocated in two different blocks.
+ CHECK_LE(count, kBlockSizeFrames);
+ uptr in_first = kBlockSizeFrames - GetInBlockIdx(start);
+ // Mark tail/head of these blocks as "stored".to avoid waiting before we can
+ // Pack().
+ *pack += blocks_[block_idx].Stored(in_first);
+ *pack += blocks_[last_idx].Stored(count - in_first);
+ }
+}
+
+void *StackStore::Map(uptr size, const char *mem_type) {
+ atomic_fetch_add(&allocated_, size, memory_order_relaxed);
+ return MmapNoReserveOrDie(size, mem_type);
+}
+
+void StackStore::Unmap(void *addr, uptr size) {
+ atomic_fetch_sub(&allocated_, size, memory_order_relaxed);
+ UnmapOrDie(addr, size);
+}
+
+uptr StackStore::Pack(Compression type) {
+ uptr res = 0;
+ for (BlockInfo &b : blocks_) res += b.Pack(type, this);
+ return res;
+}
+
+void StackStore::LockAll() {
+ for (BlockInfo &b : blocks_) b.Lock();
+}
+
+void StackStore::UnlockAll() {
+ for (BlockInfo &b : blocks_) b.Unlock();
+}
+
+void StackStore::TestOnlyUnmap() {
+ for (BlockInfo &b : blocks_) b.TestOnlyUnmap(this);
+ internal_memset(this, 0, sizeof(*this));
+}
+
+uptr *StackStore::BlockInfo::Get() const {
+ // Idiomatic double-checked locking uses memory_order_acquire here. But
+ // relaxed is fine for us, justification is similar to
+ // TwoLevelMap::GetOrCreate.
+ return reinterpret_cast<uptr *>(atomic_load_relaxed(&data_));
+}
+
+uptr *StackStore::BlockInfo::Create(StackStore *store) {
+ SpinMutexLock l(&mtx_);
+ uptr *ptr = Get();
+ if (!ptr) {
+ ptr = reinterpret_cast<uptr *>(store->Map(kBlockSizeBytes, "StackStore"));
+ atomic_store(&data_, reinterpret_cast<uptr>(ptr), memory_order_release);
+ }
+ return ptr;
+}
+
+uptr *StackStore::BlockInfo::GetOrCreate(StackStore *store) {
+ uptr *ptr = Get();
+ if (LIKELY(ptr))
+ return ptr;
+ return Create(store);
+}
+
+class SLeb128Encoder {
+ public:
+ SLeb128Encoder(u8 *begin, u8 *end) : begin(begin), end(end) {}
+
+ bool operator==(const SLeb128Encoder &other) const {
+ return begin == other.begin;
+ }
+
+ bool operator!=(const SLeb128Encoder &other) const {
+ return begin != other.begin;
+ }
+
+ SLeb128Encoder &operator=(uptr v) {
+ sptr diff = v - previous;
+ begin = EncodeSLEB128(diff, begin, end);
+ previous = v;
+ return *this;
+ }
+ SLeb128Encoder &operator*() { return *this; }
+ SLeb128Encoder &operator++() { return *this; }
+
+ u8 *base() const { return begin; }
+
+ private:
+ u8 *begin;
+ u8 *end;
+ uptr previous = 0;
+};
+
+class SLeb128Decoder {
+ public:
+ SLeb128Decoder(const u8 *begin, const u8 *end) : begin(begin), end(end) {}
+
+ bool operator==(const SLeb128Decoder &other) const {
+ return begin == other.begin;
+ }
+
+ bool operator!=(const SLeb128Decoder &other) const {
+ return begin != other.begin;
+ }
+
+ uptr operator*() {
+ sptr diff;
+ begin = DecodeSLEB128(begin, end, &diff);
+ previous += diff;
+ return previous;
+ }
+ SLeb128Decoder &operator++() { return *this; }
+
+ SLeb128Decoder operator++(int) { return *this; }
+
+ private:
+ const u8 *begin;
+ const u8 *end;
+ uptr previous = 0;
+};
+
+static u8 *CompressDelta(const uptr *from, const uptr *from_end, u8 *to,
+ u8 *to_end) {
+ SLeb128Encoder encoder(to, to_end);
+ for (; from != from_end; ++from, ++encoder) *encoder = *from;
+ return encoder.base();
+}
+
+static uptr *UncompressDelta(const u8 *from, const u8 *from_end, uptr *to,
+ uptr *to_end) {
+ SLeb128Decoder decoder(from, from_end);
+ SLeb128Decoder end(from_end, from_end);
+ for (; decoder != end; ++to, ++decoder) *to = *decoder;
+ CHECK_EQ(to, to_end);
+ return to;
+}
+
+static u8 *CompressLzw(const uptr *from, const uptr *from_end, u8 *to,
+ u8 *to_end) {
+ SLeb128Encoder encoder(to, to_end);
+ encoder = LzwEncode<uptr>(from, from_end, encoder);
+ return encoder.base();
+}
+
+static uptr *UncompressLzw(const u8 *from, const u8 *from_end, uptr *to,
+ uptr *to_end) {
+ SLeb128Decoder decoder(from, from_end);
+ SLeb128Decoder end(from_end, from_end);
+ to = LzwDecode<uptr>(decoder, end, to);
+ CHECK_EQ(to, to_end);
+ return to;
+}
+
+#if defined(_MSC_VER) && !defined(__clang__)
+# pragma warning(push)
+// Disable 'nonstandard extension used: zero-sized array in struct/union'.
+# pragma warning(disable : 4200)
+#endif
+namespace {
+struct PackedHeader {
+ uptr size;
+ StackStore::Compression type;
+ u8 data[];
+};
+} // namespace
+#if defined(_MSC_VER) && !defined(__clang__)
+# pragma warning(pop)
+#endif
+
+uptr *StackStore::BlockInfo::GetOrUnpack(StackStore *store) {
+ SpinMutexLock l(&mtx_);
+ switch (state) {
+ case State::Storing:
+ state = State::Unpacked;
+ FALLTHROUGH;
+ case State::Unpacked:
+ return Get();
+ case State::Packed:
+ break;
+ }
+
+ u8 *ptr = reinterpret_cast<u8 *>(Get());
+ CHECK_NE(nullptr, ptr);
+ const PackedHeader *header = reinterpret_cast<const PackedHeader *>(ptr);
+ CHECK_LE(header->size, kBlockSizeBytes);
+ CHECK_GE(header->size, sizeof(PackedHeader));
+
+ uptr packed_size_aligned = RoundUpTo(header->size, GetPageSizeCached());
+
+ uptr *unpacked =
+ reinterpret_cast<uptr *>(store->Map(kBlockSizeBytes, "StackStoreUnpack"));
+
+ uptr *unpacked_end;
+ switch (header->type) {
+ case Compression::Delta:
+ unpacked_end = UncompressDelta(header->data, ptr + header->size, unpacked,
+ unpacked + kBlockSizeFrames);
+ break;
+ case Compression::LZW:
+ unpacked_end = UncompressLzw(header->data, ptr + header->size, unpacked,
+ unpacked + kBlockSizeFrames);
+ break;
+ default:
+ UNREACHABLE("Unexpected type");
+ break;
+ }
+
+ CHECK_EQ(kBlockSizeFrames, unpacked_end - unpacked);
+
+ MprotectReadOnly(reinterpret_cast<uptr>(unpacked), kBlockSizeBytes);
+ atomic_store(&data_, reinterpret_cast<uptr>(unpacked), memory_order_release);
+ store->Unmap(ptr, packed_size_aligned);
+
+ state = State::Unpacked;
+ return Get();
+}
+
+uptr StackStore::BlockInfo::Pack(Compression type, StackStore *store) {
+ if (type == Compression::None)
+ return 0;
+
+ SpinMutexLock l(&mtx_);
+ switch (state) {
+ case State::Unpacked:
+ case State::Packed:
+ return 0;
+ case State::Storing:
+ break;
+ }
+
+ uptr *ptr = Get();
+ if (!ptr || !Stored(0))
+ return 0;
+
+ u8 *packed =
+ reinterpret_cast<u8 *>(store->Map(kBlockSizeBytes, "StackStorePack"));
+ PackedHeader *header = reinterpret_cast<PackedHeader *>(packed);
+ u8 *alloc_end = packed + kBlockSizeBytes;
+
+ u8 *packed_end = nullptr;
+ switch (type) {
+ case Compression::Delta:
+ packed_end =
+ CompressDelta(ptr, ptr + kBlockSizeFrames, header->data, alloc_end);
+ break;
+ case Compression::LZW:
+ packed_end =
+ CompressLzw(ptr, ptr + kBlockSizeFrames, header->data, alloc_end);
+ break;
+ default:
+ UNREACHABLE("Unexpected type");
+ break;
+ }
+
+ header->type = type;
+ header->size = packed_end - packed;
+
+ VPrintf(1, "Packed block of %zu KiB to %zu KiB\n", kBlockSizeBytes >> 10,
+ header->size >> 10);
+
+ if (kBlockSizeBytes - header->size < kBlockSizeBytes / 8) {
+ VPrintf(1, "Undo and keep block unpacked\n");
+ MprotectReadOnly(reinterpret_cast<uptr>(ptr), kBlockSizeBytes);
+ store->Unmap(packed, kBlockSizeBytes);
+ state = State::Unpacked;
+ return 0;
+ }
+
+ uptr packed_size_aligned = RoundUpTo(header->size, GetPageSizeCached());
+ store->Unmap(packed + packed_size_aligned,
+ kBlockSizeBytes - packed_size_aligned);
+ MprotectReadOnly(reinterpret_cast<uptr>(packed), packed_size_aligned);
+
+ atomic_store(&data_, reinterpret_cast<uptr>(packed), memory_order_release);
+ store->Unmap(ptr, kBlockSizeBytes);
+
+ state = State::Packed;
+ return kBlockSizeBytes - packed_size_aligned;
+}
+
+void StackStore::BlockInfo::TestOnlyUnmap(StackStore *store) {
+ if (uptr *ptr = Get())
+ store->Unmap(ptr, kBlockSizeBytes);
+}
+
+bool StackStore::BlockInfo::Stored(uptr n) {
+ return n + atomic_fetch_add(&stored_, n, memory_order_release) ==
+ kBlockSizeFrames;
+}
+
+bool StackStore::BlockInfo::IsPacked() const {
+ SpinMutexLock l(&mtx_);
+ return state == State::Packed;
+}
+
+} // namespace __sanitizer
lib/tsan/sanitizer_common/sanitizer_stack_store.h
@@ -0,0 +1,121 @@
+//===-- sanitizer_stack_store.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_STACK_STORE_H
+#define SANITIZER_STACK_STORE_H
+
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_stacktrace.h"
+
+namespace __sanitizer {
+
+class StackStore {
+ static constexpr uptr kBlockSizeFrames = 0x100000;
+ static constexpr uptr kBlockCount = 0x1000;
+ static constexpr uptr kBlockSizeBytes = kBlockSizeFrames * sizeof(uptr);
+
+ public:
+ enum class Compression : u8 {
+ None = 0,
+ Delta,
+ LZW,
+ };
+
+ constexpr StackStore() = default;
+
+ using Id = u32; // Enough for 2^32 * sizeof(uptr) bytes of traces.
+ static_assert(u64(kBlockCount) * kBlockSizeFrames == 1ull << (sizeof(Id) * 8),
+ "");
+
+ Id Store(const StackTrace &trace,
+ uptr *pack /* number of blocks completed by this call */);
+ StackTrace Load(Id id);
+ uptr Allocated() const;
+
+ // Packs all blocks which don't expect any more writes. A block is going to be
+ // packed once. As soon trace from that block was requested, it will unpack
+ // and stay unpacked after that.
+ // Returns the number of released bytes.
+ uptr Pack(Compression type);
+
+ void LockAll();
+ void UnlockAll();
+
+ void TestOnlyUnmap();
+
+ private:
+ friend class StackStoreTest;
+ static constexpr uptr GetBlockIdx(uptr frame_idx) {
+ return frame_idx / kBlockSizeFrames;
+ }
+
+ static constexpr uptr GetInBlockIdx(uptr frame_idx) {
+ return frame_idx % kBlockSizeFrames;
+ }
+
+ static constexpr uptr IdToOffset(Id id) {
+ CHECK_NE(id, 0);
+ return id - 1; // Avoid zero as id.
+ }
+
+ static constexpr uptr OffsetToId(Id id) {
+ // This makes UINT32_MAX to 0 and it will be retrived as and empty stack.
+ // But this is not a problem as we will not be able to store anything after
+ // that anyway.
+ return id + 1; // Avoid zero as id.
+ }
+
+ uptr *Alloc(uptr count, uptr *idx, uptr *pack);
+
+ void *Map(uptr size, const char *mem_type);
+ void Unmap(void *addr, uptr size);
+
+ // Total number of allocated frames.
+ atomic_uintptr_t total_frames_ = {};
+
+ // Tracks total allocated memory in bytes.
+ atomic_uintptr_t allocated_ = {};
+
+ // Each block will hold pointer to exactly kBlockSizeFrames.
+ class BlockInfo {
+ atomic_uintptr_t data_;
+ // Counter to track store progress to know when we can Pack() the block.
+ atomic_uint32_t stored_;
+ // Protects alloc of new blocks.
+ mutable StaticSpinMutex mtx_;
+
+ enum class State : u8 {
+ Storing = 0,
+ Packed,
+ Unpacked,
+ };
+ State state SANITIZER_GUARDED_BY(mtx_);
+
+ uptr *Create(StackStore *store);
+
+ public:
+ uptr *Get() const;
+ uptr *GetOrCreate(StackStore *store);
+ uptr *GetOrUnpack(StackStore *store);
+ uptr Pack(Compression type, StackStore *store);
+ void TestOnlyUnmap(StackStore *store);
+ bool Stored(uptr n);
+ bool IsPacked() const;
+ void Lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mtx_.Lock(); }
+ void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mtx_.Unlock(); }
+ };
+
+ BlockInfo blocks_[kBlockCount] = {};
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_STACK_STORE_H
lib/tsan/sanitizer_common/sanitizer_stackdepot.cpp
@@ -12,95 +12,203 @@
#include "sanitizer_stackdepot.h"
+#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
#include "sanitizer_hash.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_stack_store.h"
#include "sanitizer_stackdepotbase.h"
namespace __sanitizer {
struct StackDepotNode {
- StackDepotNode *link;
- u32 id;
- atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
- u32 size;
- u32 tag;
- uptr stack[1]; // [size]
+ using hash_type = u64;
+ hash_type stack_hash;
+ u32 link;
+ StackStore::Id store_id;
static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20;
- // Lower kTabSizeLog bits are equal for all items in one bucket.
- // We use these bits to store the per-stack use counter.
- static const u32 kUseCountBits = kTabSizeLog;
- static const u32 kMaxUseCount = 1 << kUseCountBits;
- static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
- static const u32 kHashMask = ~kUseCountMask;
typedef StackTrace args_type;
- bool eq(u32 hash, const args_type &args) const {
- u32 hash_bits =
- atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
- if ((hash & kHashMask) != hash_bits || args.size != size || args.tag != tag)
- return false;
- uptr i = 0;
- for (; i < size; i++) {
- if (stack[i] != args.trace[i]) return false;
- }
- return true;
- }
- static uptr storage_size(const args_type &args) {
- return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
+ bool eq(hash_type hash, const args_type &args) const {
+ return hash == stack_hash;
}
- static u32 hash(const args_type &args) {
- MurMur2HashBuilder H(args.size * sizeof(uptr));
+ static uptr allocated();
+ static hash_type hash(const args_type &args) {
+ MurMur2Hash64Builder H(args.size * sizeof(uptr));
for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]);
+ H.add(args.tag);
return H.get();
}
static bool is_valid(const args_type &args) {
return args.size > 0 && args.trace;
}
- void store(const args_type &args, u32 hash) {
- atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
- size = args.size;
- tag = args.tag;
- internal_memcpy(stack, args.trace, size * sizeof(uptr));
- }
- args_type load() const {
- return args_type(&stack[0], size, tag);
- }
- StackDepotHandle get_handle() { return StackDepotHandle(this); }
+ void store(u32 id, const args_type &args, hash_type hash);
+ args_type load(u32 id) const;
+ static StackDepotHandle get_handle(u32 id);
typedef StackDepotHandle handle_type;
};
-COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount);
-
-u32 StackDepotHandle::id() { return node_->id; }
-int StackDepotHandle::use_count() {
- return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
- StackDepotNode::kUseCountMask;
-}
-void StackDepotHandle::inc_use_count_unsafe() {
- u32 prev =
- atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
- StackDepotNode::kUseCountMask;
- CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
-}
+static StackStore stackStore;
// FIXME(dvyukov): this single reserved bit is used in TSan.
typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
StackDepot;
static StackDepot theDepot;
+// Keep mutable data out of frequently access nodes to improve caching
+// efficiency.
+static TwoLevelMap<atomic_uint32_t, StackDepot::kNodesSize1,
+ StackDepot::kNodesSize2>
+ useCounts;
+
+int StackDepotHandle::use_count() const {
+ return atomic_load_relaxed(&useCounts[id_]);
+}
+
+void StackDepotHandle::inc_use_count_unsafe() {
+ atomic_fetch_add(&useCounts[id_], 1, memory_order_relaxed);
+}
+
+uptr StackDepotNode::allocated() {
+ return stackStore.Allocated() + useCounts.MemoryUsage();
+}
+
+static void CompressStackStore() {
+ u64 start = Verbosity() >= 1 ? MonotonicNanoTime() : 0;
+ uptr diff = stackStore.Pack(static_cast<StackStore::Compression>(
+ Abs(common_flags()->compress_stack_depot)));
+ if (!diff)
+ return;
+ if (Verbosity() >= 1) {
+ u64 finish = MonotonicNanoTime();
+ uptr total_before = theDepot.GetStats().allocated + diff;
+ VPrintf(1, "%s: StackDepot released %zu KiB out of %zu KiB in %llu ms\n",
+ SanitizerToolName, diff >> 10, total_before >> 10,
+ (finish - start) / 1000000);
+ }
+}
+
+namespace {
+
+class CompressThread {
+ public:
+ constexpr CompressThread() = default;
+ void NewWorkNotify();
+ void Stop();
+ void LockAndStop() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
+ void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
+
+ private:
+ enum class State {
+ NotStarted = 0,
+ Started,
+ Failed,
+ Stopped,
+ };
+
+ void Run();
+
+ bool WaitForWork() {
+ semaphore_.Wait();
+ return atomic_load(&run_, memory_order_acquire);
+ }
+
+ Semaphore semaphore_ = {};
+ StaticSpinMutex mutex_ = {};
+ State state_ SANITIZER_GUARDED_BY(mutex_) = State::NotStarted;
+ void *thread_ SANITIZER_GUARDED_BY(mutex_) = nullptr;
+ atomic_uint8_t run_ = {};
+};
+
+static CompressThread compress_thread;
+
+void CompressThread::NewWorkNotify() {
+ int compress = common_flags()->compress_stack_depot;
+ if (!compress)
+ return;
+ if (compress > 0 /* for testing or debugging */) {
+ SpinMutexLock l(&mutex_);
+ if (state_ == State::NotStarted) {
+ atomic_store(&run_, 1, memory_order_release);
+ CHECK_EQ(nullptr, thread_);
+ thread_ = internal_start_thread(
+ [](void *arg) -> void * {
+ reinterpret_cast<CompressThread *>(arg)->Run();
+ return nullptr;
+ },
+ this);
+ state_ = thread_ ? State::Started : State::Failed;
+ }
+ if (state_ == State::Started) {
+ semaphore_.Post();
+ return;
+ }
+ }
+ CompressStackStore();
+}
+
+void CompressThread::Run() {
+ VPrintf(1, "%s: StackDepot compression thread started\n", SanitizerToolName);
+ while (WaitForWork()) CompressStackStore();
+ VPrintf(1, "%s: StackDepot compression thread stopped\n", SanitizerToolName);
+}
+
+void CompressThread::Stop() {
+ void *t = nullptr;
+ {
+ SpinMutexLock l(&mutex_);
+ if (state_ != State::Started)
+ return;
+ state_ = State::Stopped;
+ CHECK_NE(nullptr, thread_);
+ t = thread_;
+ thread_ = nullptr;
+ }
+ atomic_store(&run_, 0, memory_order_release);
+ semaphore_.Post();
+ internal_join_thread(t);
+}
+
+void CompressThread::LockAndStop() {
+ mutex_.Lock();
+ if (state_ != State::Started)
+ return;
+ CHECK_NE(nullptr, thread_);
+
+ atomic_store(&run_, 0, memory_order_release);
+ semaphore_.Post();
+ internal_join_thread(thread_);
+ // Allow to restart after Unlock() if needed.
+ state_ = State::NotStarted;
+ thread_ = nullptr;
+}
+
+void CompressThread::Unlock() { mutex_.Unlock(); }
+
+} // namespace
-StackDepotStats *StackDepotGetStats() {
- return theDepot.GetStats();
+void StackDepotNode::store(u32 id, const args_type &args, hash_type hash) {
+ stack_hash = hash;
+ uptr pack = 0;
+ store_id = stackStore.Store(args, &pack);
+ if (LIKELY(!pack))
+ return;
+ compress_thread.NewWorkNotify();
}
-u32 StackDepotPut(StackTrace stack) {
- StackDepotHandle h = theDepot.Put(stack);
- return h.valid() ? h.id() : 0;
+StackDepotNode::args_type StackDepotNode::load(u32 id) const {
+ if (!store_id)
+ return {};
+ return stackStore.Load(store_id);
}
+StackDepotStats StackDepotGetStats() { return theDepot.GetStats(); }
+
+u32 StackDepotPut(StackTrace stack) { return theDepot.Put(stack); }
+
StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
- return theDepot.Put(stack);
+ return StackDepotNode::get_handle(theDepot.Put(stack));
}
StackTrace StackDepotGet(u32 id) {
@@ -109,9 +217,13 @@ StackTrace StackDepotGet(u32 id) {
void StackDepotLockAll() {
theDepot.LockAll();
+ compress_thread.LockAndStop();
+ stackStore.LockAll();
}
void StackDepotUnlockAll() {
+ stackStore.UnlockAll();
+ compress_thread.Unlock();
theDepot.UnlockAll();
}
@@ -121,34 +233,15 @@ void StackDepotPrintAll() {
#endif
}
-bool StackDepotReverseMap::IdDescPair::IdComparator(
- const StackDepotReverseMap::IdDescPair &a,
- const StackDepotReverseMap::IdDescPair &b) {
- return a.id < b.id;
-}
+void StackDepotStopBackgroundThread() { compress_thread.Stop(); }
-StackDepotReverseMap::StackDepotReverseMap() {
- map_.reserve(StackDepotGetStats()->n_uniq_ids + 100);
- for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
- atomic_uintptr_t *p = &theDepot.tab[idx];
- uptr v = atomic_load(p, memory_order_consume);
- StackDepotNode *s = (StackDepotNode*)(v & ~1);
- for (; s; s = s->link) {
- IdDescPair pair = {s->id, s};
- map_.push_back(pair);
- }
- }
- Sort(map_.data(), map_.size(), &IdDescPair::IdComparator);
+StackDepotHandle StackDepotNode::get_handle(u32 id) {
+ return StackDepotHandle(&theDepot.nodes[id], id);
}
-StackTrace StackDepotReverseMap::Get(u32 id) {
- if (!map_.size())
- return StackTrace();
- IdDescPair pair = {id, nullptr};
- uptr idx = InternalLowerBound(map_, pair, IdDescPair::IdComparator);
- if (idx > map_.size() || map_[idx].id != id)
- return StackTrace();
- return map_[idx].desc->load();
+void StackDepotTestOnlyUnmap() {
+ theDepot.TestOnlyUnmap();
+ stackStore.TestOnlyUnmap();
}
} // namespace __sanitizer
lib/tsan/sanitizer_common/sanitizer_stackdepot.h
@@ -22,18 +22,18 @@ namespace __sanitizer {
// StackDepot efficiently stores huge amounts of stack traces.
struct StackDepotNode;
struct StackDepotHandle {
- StackDepotNode *node_;
- StackDepotHandle() : node_(nullptr) {}
- explicit StackDepotHandle(StackDepotNode *node) : node_(node) {}
- bool valid() { return node_; }
- u32 id();
- int use_count();
+ StackDepotNode *node_ = nullptr;
+ u32 id_ = 0;
+ StackDepotHandle(StackDepotNode *node, u32 id) : node_(node), id_(id) {}
+ bool valid() const { return node_; }
+ u32 id() const { return id_; }
+ int use_count() const;
void inc_use_count_unsafe();
};
const int kStackDepotMaxUseCount = 1U << (SANITIZER_ANDROID ? 16 : 20);
-StackDepotStats *StackDepotGetStats();
+StackDepotStats StackDepotGetStats();
u32 StackDepotPut(StackTrace stack);
StackDepotHandle StackDepotPut_WithHandle(StackTrace stack);
// Retrieves a stored stack trace by the id.
@@ -42,30 +42,9 @@ StackTrace StackDepotGet(u32 id);
void StackDepotLockAll();
void StackDepotUnlockAll();
void StackDepotPrintAll();
+void StackDepotStopBackgroundThread();
-// Instantiating this class creates a snapshot of StackDepot which can be
-// efficiently queried with StackDepotGet(). You can use it concurrently with
-// StackDepot, but the snapshot is only guaranteed to contain those stack traces
-// which were stored before it was instantiated.
-class StackDepotReverseMap {
- public:
- StackDepotReverseMap();
- StackTrace Get(u32 id);
-
- private:
- struct IdDescPair {
- u32 id;
- StackDepotNode *desc;
-
- static bool IdComparator(const IdDescPair &a, const IdDescPair &b);
- };
-
- InternalMmapVector<IdDescPair> map_;
-
- // Disallow evil constructors.
- StackDepotReverseMap(const StackDepotReverseMap&);
- void operator=(const StackDepotReverseMap&);
-};
+void StackDepotTestOnlyUnmap();
} // namespace __sanitizer
lib/tsan/sanitizer_common/sanitizer_stackdepotbase.h
@@ -16,71 +16,87 @@
#include <stdio.h>
#include "sanitizer_atomic.h"
+#include "sanitizer_flat_map.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_mutex.h"
-#include "sanitizer_persistent_allocator.h"
namespace __sanitizer {
template <class Node, int kReservedBits, int kTabSizeLog>
class StackDepotBase {
+ static constexpr u32 kIdSizeLog =
+ sizeof(u32) * 8 - Max(kReservedBits, 1 /* At least 1 bit for locking. */);
+ static constexpr u32 kNodesSize1Log = kIdSizeLog / 2;
+ static constexpr u32 kNodesSize2Log = kIdSizeLog - kNodesSize1Log;
+ static constexpr int kTabSize = 1 << kTabSizeLog; // Hash table size.
+ static constexpr u32 kUnlockMask = (1ull << kIdSizeLog) - 1;
+ static constexpr u32 kLockMask = ~kUnlockMask;
+
public:
typedef typename Node::args_type args_type;
typedef typename Node::handle_type handle_type;
+ typedef typename Node::hash_type hash_type;
+
+ static constexpr u64 kNodesSize1 = 1ull << kNodesSize1Log;
+ static constexpr u64 kNodesSize2 = 1ull << kNodesSize2Log;
+
// Maps stack trace to an unique id.
- handle_type Put(args_type args, bool *inserted = nullptr);
+ u32 Put(args_type args, bool *inserted = nullptr);
// Retrieves a stored stack trace by the id.
args_type Get(u32 id);
- StackDepotStats *GetStats() { return &stats; }
+ StackDepotStats GetStats() const {
+ return {
+ atomic_load_relaxed(&n_uniq_ids),
+ nodes.MemoryUsage() + Node::allocated(),
+ };
+ }
void LockAll();
void UnlockAll();
void PrintAll();
- private:
- static Node *find(Node *s, args_type args, u32 hash);
- static Node *lock(atomic_uintptr_t *p);
- static void unlock(atomic_uintptr_t *p, Node *s);
+ void TestOnlyUnmap() {
+ nodes.TestOnlyUnmap();
+ internal_memset(this, 0, sizeof(*this));
+ }
- static const int kTabSize = 1 << kTabSizeLog; // Hash table size.
- static const int kPartBits = 8;
- static const int kPartShift = sizeof(u32) * 8 - kPartBits - kReservedBits;
- static const int kPartCount =
- 1 << kPartBits; // Number of subparts in the table.
- static const int kPartSize = kTabSize / kPartCount;
- static const int kMaxId = 1 << kPartShift;
+ private:
+ friend Node;
+ u32 find(u32 s, args_type args, hash_type hash) const;
+ static u32 lock(atomic_uint32_t *p);
+ static void unlock(atomic_uint32_t *p, u32 s);
+ atomic_uint32_t tab[kTabSize]; // Hash table of Node's.
- atomic_uintptr_t tab[kTabSize]; // Hash table of Node's.
- atomic_uint32_t seq[kPartCount]; // Unique id generators.
+ atomic_uint32_t n_uniq_ids;
- StackDepotStats stats;
+ TwoLevelMap<Node, kNodesSize1, kNodesSize2> nodes;
friend class StackDepotReverseMap;
};
template <class Node, int kReservedBits, int kTabSizeLog>
-Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::find(Node *s,
- args_type args,
- u32 hash) {
+u32 StackDepotBase<Node, kReservedBits, kTabSizeLog>::find(
+ u32 s, args_type args, hash_type hash) const {
// Searches linked list s for the stack, returns its id.
- for (; s; s = s->link) {
- if (s->eq(hash, args)) {
+ for (; s;) {
+ const Node &node = nodes[s];
+ if (node.eq(hash, args))
return s;
- }
+ s = node.link;
}
- return nullptr;
+ return 0;
}
template <class Node, int kReservedBits, int kTabSizeLog>
-Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::lock(
- atomic_uintptr_t *p) {
+u32 StackDepotBase<Node, kReservedBits, kTabSizeLog>::lock(atomic_uint32_t *p) {
// Uses the pointer lsb as mutex.
for (int i = 0;; i++) {
- uptr cmp = atomic_load(p, memory_order_relaxed);
- if ((cmp & 1) == 0 &&
- atomic_compare_exchange_weak(p, &cmp, cmp | 1, memory_order_acquire))
- return (Node *)cmp;
+ u32 cmp = atomic_load(p, memory_order_relaxed);
+ if ((cmp & kLockMask) == 0 &&
+ atomic_compare_exchange_weak(p, &cmp, cmp | kLockMask,
+ memory_order_acquire))
+ return cmp;
if (i < 10)
proc_yield(10);
else
@@ -90,73 +106,57 @@ Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::lock(
template <class Node, int kReservedBits, int kTabSizeLog>
void StackDepotBase<Node, kReservedBits, kTabSizeLog>::unlock(
- atomic_uintptr_t *p, Node *s) {
- DCHECK_EQ((uptr)s & 1, 0);
- atomic_store(p, (uptr)s, memory_order_release);
+ atomic_uint32_t *p, u32 s) {
+ DCHECK_EQ(s & kLockMask, 0);
+ atomic_store(p, s, memory_order_release);
}
template <class Node, int kReservedBits, int kTabSizeLog>
-typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::handle_type
-StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
- bool *inserted) {
- if (inserted) *inserted = false;
- if (!Node::is_valid(args)) return handle_type();
- uptr h = Node::hash(args);
- atomic_uintptr_t *p = &tab[h % kTabSize];
- uptr v = atomic_load(p, memory_order_consume);
- Node *s = (Node *)(v & ~1);
+u32 StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
+ bool *inserted) {
+ if (inserted)
+ *inserted = false;
+ if (!LIKELY(Node::is_valid(args)))
+ return 0;
+ hash_type h = Node::hash(args);
+ atomic_uint32_t *p = &tab[h % kTabSize];
+ u32 v = atomic_load(p, memory_order_consume);
+ u32 s = v & kUnlockMask;
// First, try to find the existing stack.
- Node *node = find(s, args, h);
- if (node) return node->get_handle();
+ u32 node = find(s, args, h);
+ if (LIKELY(node))
+ return node;
+
// If failed, lock, retry and insert new.
- Node *s2 = lock(p);
+ u32 s2 = lock(p);
if (s2 != s) {
node = find(s2, args, h);
if (node) {
unlock(p, s2);
- return node->get_handle();
+ return node;
}
}
- uptr part = (h % kTabSize) / kPartSize;
- u32 id = atomic_fetch_add(&seq[part], 1, memory_order_relaxed) + 1;
- stats.n_uniq_ids++;
- CHECK_LT(id, kMaxId);
- id |= part << kPartShift;
- CHECK_NE(id, 0);
- CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
- uptr memsz = Node::storage_size(args);
- s = (Node *)PersistentAlloc(memsz);
- stats.allocated += memsz;
- s->id = id;
- s->store(args, h);
- s->link = s2;
+ s = atomic_fetch_add(&n_uniq_ids, 1, memory_order_relaxed) + 1;
+ CHECK_EQ(s & kUnlockMask, s);
+ CHECK_EQ(s & (((u32)-1) >> kReservedBits), s);
+ Node &new_node = nodes[s];
+ new_node.store(s, args, h);
+ new_node.link = s2;
unlock(p, s);
if (inserted) *inserted = true;
- return s->get_handle();
+ return s;
}
template <class Node, int kReservedBits, int kTabSizeLog>
typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::args_type
StackDepotBase<Node, kReservedBits, kTabSizeLog>::Get(u32 id) {
- if (id == 0) {
+ if (id == 0)
return args_type();
- }
CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
- // High kPartBits contain part id, so we need to scan at most kPartSize lists.
- uptr part = id >> kPartShift;
- for (int i = 0; i != kPartSize; i++) {
- uptr idx = part * kPartSize + i;
- CHECK_LT(idx, kTabSize);
- atomic_uintptr_t *p = &tab[idx];
- uptr v = atomic_load(p, memory_order_consume);
- Node *s = (Node *)(v & ~1);
- for (; s; s = s->link) {
- if (s->id == id) {
- return s->load();
- }
- }
- }
- return args_type();
+ if (!nodes.contains(id))
+ return args_type();
+ const Node &node = nodes[id];
+ return node.load(id);
}
template <class Node, int kReservedBits, int kTabSizeLog>
@@ -169,24 +169,23 @@ void StackDepotBase<Node, kReservedBits, kTabSizeLog>::LockAll() {
template <class Node, int kReservedBits, int kTabSizeLog>
void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAll() {
for (int i = 0; i < kTabSize; ++i) {
- atomic_uintptr_t *p = &tab[i];
+ atomic_uint32_t *p = &tab[i];
uptr s = atomic_load(p, memory_order_relaxed);
- unlock(p, (Node *)(s & ~1UL));
+ unlock(p, s & kUnlockMask);
}
}
template <class Node, int kReservedBits, int kTabSizeLog>
void StackDepotBase<Node, kReservedBits, kTabSizeLog>::PrintAll() {
for (int i = 0; i < kTabSize; ++i) {
- atomic_uintptr_t *p = &tab[i];
- lock(p);
- uptr v = atomic_load(p, memory_order_relaxed);
- Node *s = (Node *)(v & ~1UL);
- for (; s; s = s->link) {
- Printf("Stack for id %u:\n", s->id);
- s->load().Print();
+ atomic_uint32_t *p = &tab[i];
+ u32 s = atomic_load(p, memory_order_consume) & kUnlockMask;
+ for (; s;) {
+ const Node &node = nodes[s];
+ Printf("Stack for id %u:\n", s);
+ node.load(s).Print();
+ s = node.link;
}
- unlock(p, s);
}
}
lib/tsan/sanitizer_common/sanitizer_stacktrace.cpp
@@ -20,10 +20,10 @@
namespace __sanitizer {
uptr StackTrace::GetNextInstructionPc(uptr pc) {
-#if defined(__sparc__) || defined(__mips__)
+#if defined(__aarch64__)
+ return STRIP_PAC_PC((void *)pc) + 4;
+#elif defined(__sparc__) || defined(__mips__)
return pc + 8;
-#elif defined(__powerpc__) || defined(__arm__) || defined(__aarch64__)
- return pc + 4;
#elif SANITIZER_RISCV64
// Current check order is 4 -> 2 -> 6 -> 8
u8 InsnByte = *(u8 *)(pc);
@@ -46,8 +46,10 @@ uptr StackTrace::GetNextInstructionPc(uptr pc) {
}
// bail-out if could not figure out the instruction size
return 0;
-#else
+#elif SANITIZER_S390 || SANITIZER_I386 || SANITIZER_X32 || SANITIZER_X64
return pc + 1;
+#else
+ return pc + 4;
#endif
}
@@ -64,7 +66,7 @@ void BufferedStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
top_frame_bp = 0;
}
-// Sparc implemention is in its own file.
+// Sparc implementation is in its own file.
#if !defined(__sparc__)
// In GCC on ARM bp points to saved lr, not fp, so we should check the next
@@ -119,7 +121,7 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
uhwptr pc1 = caller_frame[2];
#elif defined(__s390__)
uhwptr pc1 = frame[14];
-#elif defined(__riscv)
+#elif defined(__loongarch__) || defined(__riscv)
// frame[-1] contains the return address
uhwptr pc1 = frame[-1];
#else
@@ -134,7 +136,7 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
trace_buffer[size++] = (uptr) pc1;
}
bottom = (uptr)frame;
-#if defined(__riscv)
+#if defined(__loongarch__) || defined(__riscv)
// frame[-2] contain fp of the previous frame
uptr new_bp = (uptr)frame[-2];
#else
lib/tsan/sanitizer_common/sanitizer_stacktrace.h
@@ -20,7 +20,7 @@ namespace __sanitizer {
struct BufferedStackTrace;
-static const u32 kStackTraceMax = 256;
+static const u32 kStackTraceMax = 255;
#if SANITIZER_LINUX && defined(__mips__)
# define SANITIZER_CAN_FAST_UNWIND 0
@@ -33,7 +33,7 @@ static const u32 kStackTraceMax = 256;
// Fast unwind is the only option on Mac for now; we will need to
// revisit this macro when slow unwind works on Mac, see
// https://github.com/google/sanitizers/issues/137
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
# define SANITIZER_CAN_SLOW_UNWIND 0
#else
# define SANITIZER_CAN_SLOW_UNWIND 1
@@ -88,21 +88,20 @@ uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
// so we return (pc-2) in that case in order to be safe.
// For A32 mode we return (pc-4) because all instructions are 32 bit long.
return (pc - 3) & (~1);
-#elif defined(__powerpc__) || defined(__powerpc64__) || defined(__aarch64__)
- // PCs are always 4 byte aligned.
- return pc - 4;
#elif defined(__sparc__) || defined(__mips__)
return pc - 8;
#elif SANITIZER_RISCV64
- // RV-64 has variable instruciton length...
+ // RV-64 has variable instruction length...
// C extentions gives us 2-byte instructoins
// RV-64 has 4-byte instructions
- // + RISCV architecture allows instructions up to 8 bytes
+ // + RISC-V architecture allows instructions up to 8 bytes
// It seems difficult to figure out the exact instruction length -
// pc - 2 seems like a safe option for the purposes of stack tracing
return pc - 2;
-#else
+#elif SANITIZER_S390 || SANITIZER_I386 || SANITIZER_X32 || SANITIZER_X64
return pc - 1;
+#else
+ return pc - 4;
#endif
}
@@ -209,11 +208,11 @@ static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {
// StackTrace::GetCurrentPc() faster.
#if defined(__x86_64__)
# define GET_CURRENT_PC() \
- ({ \
+ (__extension__({ \
uptr pc; \
asm("lea 0(%%rip), %0" : "=r"(pc)); \
pc; \
- })
+ }))
#else
# define GET_CURRENT_PC() StackTrace::GetCurrentPc()
#endif
lib/tsan/sanitizer_common/sanitizer_stacktrace_libcdep.cpp
@@ -64,7 +64,7 @@ class StackTraceTextPrinter {
if (dedup_token_->length())
dedup_token_->append("--");
if (stack->info.function != nullptr)
- dedup_token_->append(stack->info.function);
+ dedup_token_->append("%s", stack->info.function);
}
}
@@ -166,8 +166,8 @@ void BufferedStackTrace::Unwind(u32 max_depth, uptr pc, uptr bp, void *context,
UnwindFast(pc, bp, stack_top, stack_bottom, max_depth);
}
-static int GetModuleAndOffsetForPc(uptr pc, char *module_name,
- uptr module_name_len, uptr *pc_offset) {
+int GetModuleAndOffsetForPc(uptr pc, char *module_name, uptr module_name_len,
+ uptr *pc_offset) {
const char *found_module_name = nullptr;
bool ok = Symbolizer::GetOrInit()->GetModuleNameAndOffsetForPC(
pc, &found_module_name, pc_offset);
@@ -216,10 +216,11 @@ void __sanitizer_symbolize_global(uptr data_addr, const char *fmt,
}
SANITIZER_INTERFACE_ATTRIBUTE
-int __sanitizer_get_module_and_offset_for_pc(uptr pc, char *module_name,
+int __sanitizer_get_module_and_offset_for_pc(void *pc, char *module_name,
uptr module_name_len,
- uptr *pc_offset) {
- return __sanitizer::GetModuleAndOffsetForPc(pc, module_name, module_name_len,
- pc_offset);
+ void **pc_offset) {
+ return __sanitizer::GetModuleAndOffsetForPc(
+ reinterpret_cast<uptr>(pc), module_name, module_name_len,
+ reinterpret_cast<uptr *>(pc_offset));
}
} // extern "C"
lib/tsan/sanitizer_common/sanitizer_stacktrace_printer.cpp
@@ -11,25 +11,47 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_stacktrace_printer.h"
+
#include "sanitizer_file.h"
+#include "sanitizer_flags.h"
#include "sanitizer_fuchsia.h"
namespace __sanitizer {
-// sanitizer_symbolizer_markup.cpp implements these differently.
-#if !SANITIZER_SYMBOLIZER_MARKUP
-
-static const char *StripFunctionName(const char *function, const char *prefix) {
- if (!function) return nullptr;
- if (!prefix) return function;
- uptr prefix_len = internal_strlen(prefix);
- if (0 == internal_strncmp(function, prefix, prefix_len))
- return function + prefix_len;
+const char *StripFunctionName(const char *function) {
+ if (!common_flags()->demangle)
+ return function;
+ if (!function)
+ return nullptr;
+ auto try_strip = [function](const char *prefix) -> const char * {
+ const uptr prefix_len = internal_strlen(prefix);
+ if (!internal_strncmp(function, prefix, prefix_len))
+ return function + prefix_len;
+ return nullptr;
+ };
+ if (SANITIZER_APPLE) {
+ if (const char *s = try_strip("wrap_"))
+ return s;
+ } else if (SANITIZER_WINDOWS) {
+ if (const char *s = try_strip("__asan_wrap_"))
+ return s;
+ } else {
+ if (const char *s = try_strip("___interceptor_"))
+ return s;
+ if (const char *s = try_strip("__interceptor_"))
+ return s;
+ }
return function;
}
+// sanitizer_symbolizer_markup.cpp implements these differently.
+#if !SANITIZER_SYMBOLIZER_MARKUP
+
static const char *DemangleFunctionName(const char *function) {
- if (!function) return nullptr;
+ if (!common_flags()->demangle)
+ return function;
+ if (!function)
+ return nullptr;
// NetBSD uses indirection for old threading functions for historical reasons
// The mangled names are internal implementation detail and should not be
@@ -104,11 +126,24 @@ static const char *DemangleFunctionName(const char *function) {
return function;
}
+static void MaybeBuildIdToBuffer(const AddressInfo &info, bool PrefixSpace,
+ InternalScopedString *buffer) {
+ if (info.uuid_size) {
+ if (PrefixSpace)
+ buffer->append(" ");
+ buffer->append("(BuildId: ");
+ for (uptr i = 0; i < info.uuid_size; ++i) {
+ buffer->append("%02x", info.uuid[i]);
+ }
+ buffer->append(")");
+ }
+}
+
static const char kDefaultFormat[] = " #%n %p %F %L";
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
uptr address, const AddressInfo *info, bool vs_style,
- const char *strip_path_prefix, const char *strip_func_prefix) {
+ const char *strip_path_prefix) {
// info will be null in the case where symbolization is not needed for the
// given format. This ensures that the code below will get a hard failure
// rather than print incorrect information in case RenderNeedsSymbolization
@@ -129,7 +164,7 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
break;
// Frame number and all fields of AddressInfo structure.
case 'n':
- buffer->append("%zu", frame_no);
+ buffer->append("%u", frame_no);
break;
case 'p':
buffer->append("0x%zx", address);
@@ -140,9 +175,12 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
case 'o':
buffer->append("0x%zx", info->module_offset);
break;
+ case 'b':
+ MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/false, buffer);
+ break;
case 'f':
- buffer->append("%s", DemangleFunctionName(StripFunctionName(
- info->function, strip_func_prefix)));
+ buffer->append("%s",
+ DemangleFunctionName(StripFunctionName(info->function)));
break;
case 'q':
buffer->append("0x%zx", info->function_offset != AddressInfo::kUnknown
@@ -162,8 +200,8 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
case 'F':
// Function name and offset, if file is unknown.
if (info->function) {
- buffer->append("in %s", DemangleFunctionName(StripFunctionName(
- info->function, strip_func_prefix)));
+ buffer->append("in %s",
+ DemangleFunctionName(StripFunctionName(info->function)));
if (!info->file && info->function_offset != AddressInfo::kUnknown)
buffer->append("+0x%zx", info->function_offset);
}
@@ -181,6 +219,10 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
} else if (info->module) {
RenderModuleLocation(buffer, info->module, info->module_offset,
info->module_arch, strip_path_prefix);
+
+#if !SANITIZER_APPLE
+ MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/true, buffer);
+#endif
} else {
buffer->append("(<unknown module>)");
}
@@ -193,13 +235,16 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
// Always strip the module name for %M.
RenderModuleLocation(buffer, StripModuleName(info->module),
info->module_offset, info->module_arch, "");
+#if !SANITIZER_APPLE
+ MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/true, buffer);
+#endif
} else {
buffer->append("(%p)", (void *)address);
}
break;
default:
- Report("Unsupported specifier in stack frame format: %c (0x%zx)!\n", *p,
- *p);
+ Report("Unsupported specifier in stack frame format: %c (%p)!\n", *p,
+ (void *)p);
Die();
}
}
@@ -244,14 +289,14 @@ void RenderData(InternalScopedString *buffer, const char *format,
buffer->append("%s", StripPathPrefix(DI->file, strip_path_prefix));
break;
case 'l':
- buffer->append("%d", DI->line);
+ buffer->append("%zu", DI->line);
break;
case 'g':
buffer->append("%s", DI->name);
break;
default:
- Report("Unsupported specifier in stack frame format: %c (0x%zx)!\n", *p,
- *p);
+ Report("Unsupported specifier in stack frame format: %c (%p)!\n", *p,
+ (void *)p);
Die();
}
}
lib/tsan/sanitizer_common/sanitizer_stacktrace_printer.h
@@ -17,6 +17,9 @@
namespace __sanitizer {
+// Strip interceptor prefixes from function name.
+const char *StripFunctionName(const char *function);
+
// Render the contents of "info" structure, which represents the contents of
// stack frame "frame_no" and appends it to the "buffer". "format" is a
// string with placeholders, which is copied to the output with
@@ -26,8 +29,7 @@ namespace __sanitizer {
// will be turned into
// " frame 10: function foo::bar() at my/file.cc:10"
// You may additionally pass "strip_path_prefix" to strip prefixes of paths to
-// source files and modules, and "strip_func_prefix" to strip prefixes of
-// function names.
+// source files and modules.
// Here's the full list of available placeholders:
// %% - represents a '%' character;
// %n - frame number (copy of frame_no);
@@ -48,8 +50,7 @@ namespace __sanitizer {
// %M - prints module basename and offset, if it is known, or PC.
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
uptr address, const AddressInfo *info, bool vs_style,
- const char *strip_path_prefix = "",
- const char *strip_func_prefix = "");
+ const char *strip_path_prefix = "");
bool RenderNeedsSymbolization(const char *format);
lib/tsan/sanitizer_common/sanitizer_stacktrace_sparc.cpp
@@ -9,7 +9,7 @@
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
//
-// Implemention of fast stack unwinding for Sparc.
+// Implementation of fast stack unwinding for Sparc.
//===----------------------------------------------------------------------===//
#if defined(__sparc__)
@@ -30,13 +30,7 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
// TODO(yln): add arg sanity check for stack_top/stack_bottom
CHECK_GE(max_depth, 2);
const uptr kPageSize = GetPageSizeCached();
-#if defined(__GNUC__)
- // __builtin_return_address returns the address of the call instruction
- // on the SPARC and not the return address, so we need to compensate.
- trace_buffer[0] = GetNextInstructionPc(pc);
-#else
trace_buffer[0] = pc;
-#endif
size = 1;
if (stack_top < 4096) return; // Sanity check for stack top.
// Flush register windows to memory
lib/tsan/sanitizer_common/sanitizer_persistent_allocator.cpp โ lib/tsan/sanitizer_common/sanitizer_stoptheworld_fuchsia.h
@@ -1,18 +1,20 @@
-//===-- sanitizer_persistent_allocator.cpp ----------------------*- C++ -*-===//
+//===-- sanitizer_stoptheworld_fuchsia.h ------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-//
-// This file is shared between AddressSanitizer and ThreadSanitizer
-// run-time libraries.
-//===----------------------------------------------------------------------===//
-#include "sanitizer_persistent_allocator.h"
+
+#ifndef SANITIZER_STOPTHEWORLD_FUCHSIA_H
+#define SANITIZER_STOPTHEWORLD_FUCHSIA_H
+
+#include "sanitizer_stoptheworld.h"
namespace __sanitizer {
-PersistentAllocator thePersistentAllocator;
+class SuspendedThreadsListFuchsia final : public SuspendedThreadsList {};
} // namespace __sanitizer
+
+#endif // SANITIZER_STOPTHEWORLD_FUCHSIA_H
lib/tsan/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp
@@ -16,7 +16,7 @@
#if SANITIZER_LINUX && \
(defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
- defined(__arm__) || SANITIZER_RISCV64)
+ defined(__arm__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64)
#include "sanitizer_stoptheworld.h"
@@ -31,7 +31,8 @@
#include <sys/types.h> // for pid_t
#include <sys/uio.h> // for iovec
#include <elf.h> // for NT_PRSTATUS
-#if (defined(__aarch64__) || SANITIZER_RISCV64) && !SANITIZER_ANDROID
+#if (defined(__aarch64__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64) && \
+ !SANITIZER_ANDROID
// GLIBC 2.20+ sys/user does not include asm/ptrace.h
# include <asm/ptrace.h>
#endif
@@ -108,7 +109,7 @@ struct TracerThreadArgument {
void *callback_argument;
// The tracer thread waits on this mutex while the parent finishes its
// preparations.
- BlockingMutex mutex;
+ Mutex mutex;
// Tracer thread signals its completion by setting done.
atomic_uintptr_t done;
uptr parent_pid;
@@ -514,6 +515,12 @@ typedef struct user_pt_regs regs_struct;
static constexpr uptr kExtraRegs[] = {0};
#define ARCH_IOVEC_FOR_GETREGSET
+#elif defined(__loongarch__)
+typedef struct user_pt_regs regs_struct;
+#define REG_SP regs[3]
+static constexpr uptr kExtraRegs[] = {0};
+#define ARCH_IOVEC_FOR_GETREGSET
+
#elif SANITIZER_RISCV64
typedef struct user_regs_struct regs_struct;
// sys/ucontext.h already defines REG_SP as 2. Undefine it first.
@@ -621,3 +628,4 @@ PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(
#endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__)
// || defined(__aarch64__) || defined(__powerpc64__)
// || defined(__s390__) || defined(__i386__) || defined(__arm__)
+ // || SANITIZER_LOONGARCH64
lib/tsan/sanitizer_common/sanitizer_stoptheworld_mac.cpp
@@ -12,7 +12,7 @@
#include "sanitizer_platform.h"
-#if SANITIZER_MAC && (defined(__x86_64__) || defined(__aarch64__) || \
+#if SANITIZER_APPLE && (defined(__x86_64__) || defined(__aarch64__) || \
defined(__i386))
#include <mach/mach.h>
@@ -29,7 +29,7 @@ typedef struct {
class SuspendedThreadsListMac final : public SuspendedThreadsList {
public:
- SuspendedThreadsListMac() : threads_(1024) {}
+ SuspendedThreadsListMac() = default;
tid_t GetThreadID(uptr index) const override;
thread_t GetThread(uptr index) const;
@@ -87,11 +87,13 @@ void StopTheWorld(StopTheWorldCallback callback, void *argument) {
#if defined(__x86_64__)
typedef x86_thread_state64_t regs_struct;
+#define regs_flavor x86_THREAD_STATE64
#define SP_REG __rsp
#elif defined(__aarch64__)
typedef arm_thread_state64_t regs_struct;
+#define regs_flavor ARM_THREAD_STATE64
# if __DARWIN_UNIX03
# define SP_REG __sp
@@ -101,6 +103,7 @@ typedef arm_thread_state64_t regs_struct;
#elif defined(__i386)
typedef x86_thread_state32_t regs_struct;
+#define regs_flavor x86_THREAD_STATE32
#define SP_REG __esp
@@ -146,17 +149,15 @@ PtraceRegistersStatus SuspendedThreadsListMac::GetRegistersAndSP(
thread_t thread = GetThread(index);
regs_struct regs;
int err;
- mach_msg_type_number_t reg_count = MACHINE_THREAD_STATE_COUNT;
- err = thread_get_state(thread, MACHINE_THREAD_STATE, (thread_state_t)®s,
+ mach_msg_type_number_t reg_count = sizeof(regs) / sizeof(natural_t);
+ err = thread_get_state(thread, regs_flavor, (thread_state_t)®s,
®_count);
if (err != KERN_SUCCESS) {
VReport(1, "Error - unable to get registers for a thread\n");
- // KERN_INVALID_ARGUMENT indicates that either the flavor is invalid,
- // or the thread does not exist. The other possible error case,
// MIG_ARRAY_TOO_LARGE, means that the state is too large, but it's
// still safe to proceed.
- return err == KERN_INVALID_ARGUMENT ? REGISTERS_UNAVAILABLE_FATAL
- : REGISTERS_UNAVAILABLE;
+ return err == MIG_ARRAY_TOO_LARGE ? REGISTERS_UNAVAILABLE
+ : REGISTERS_UNAVAILABLE_FATAL;
}
buffer->resize(RoundUpTo(sizeof(regs), sizeof(uptr)) / sizeof(uptr));
@@ -176,5 +177,5 @@ PtraceRegistersStatus SuspendedThreadsListMac::GetRegistersAndSP(
} // namespace __sanitizer
-#endif // SANITIZER_MAC && (defined(__x86_64__) || defined(__aarch64__)) ||
+#endif // SANITIZER_APPLE && (defined(__x86_64__) || defined(__aarch64__)) ||
// defined(__i386))
lib/tsan/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp
@@ -68,7 +68,7 @@ class SuspendedThreadsListNetBSD final : public SuspendedThreadsList {
struct TracerThreadArgument {
StopTheWorldCallback callback;
void *callback_argument;
- BlockingMutex mutex;
+ Mutex mutex;
atomic_uintptr_t done;
uptr parent_pid;
};
lib/tsan/sanitizer_common/sanitizer_stoptheworld_win.cpp
@@ -0,0 +1,175 @@
+//===-- sanitizer_stoptheworld_win.cpp ------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// See sanitizer_stoptheworld.h for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_WINDOWS
+
+# define WIN32_LEAN_AND_MEAN
+# include <windows.h>
+// windows.h needs to be included before tlhelp32.h
+# include <tlhelp32.h>
+
+# include "sanitizer_stoptheworld.h"
+
+namespace __sanitizer {
+
+namespace {
+
+struct SuspendedThreadsListWindows final : public SuspendedThreadsList {
+ InternalMmapVector<HANDLE> threadHandles;
+ InternalMmapVector<DWORD> threadIds;
+
+ SuspendedThreadsListWindows() {
+ threadIds.reserve(1024);
+ threadHandles.reserve(1024);
+ }
+
+ PtraceRegistersStatus GetRegistersAndSP(uptr index,
+ InternalMmapVector<uptr> *buffer,
+ uptr *sp) const override;
+
+ tid_t GetThreadID(uptr index) const override;
+ uptr ThreadCount() const override;
+};
+
+// Stack Pointer register names on different architectures
+# if SANITIZER_X64
+# define SP_REG Rsp
+# elif SANITIZER_I386
+# define SP_REG Esp
+# elif SANITIZER_ARM | SANITIZER_ARM64
+# define SP_REG Sp
+# else
+# error Architecture not supported!
+# endif
+
+PtraceRegistersStatus SuspendedThreadsListWindows::GetRegistersAndSP(
+ uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {
+ CHECK_LT(index, threadHandles.size());
+
+ buffer->resize(RoundUpTo(sizeof(CONTEXT), sizeof(uptr)) / sizeof(uptr));
+ CONTEXT *thread_context = reinterpret_cast<CONTEXT *>(buffer->data());
+ thread_context->ContextFlags = CONTEXT_ALL;
+ CHECK(GetThreadContext(threadHandles[index], thread_context));
+ *sp = thread_context->SP_REG;
+
+ return REGISTERS_AVAILABLE;
+}
+
+tid_t SuspendedThreadsListWindows::GetThreadID(uptr index) const {
+ CHECK_LT(index, threadIds.size());
+ return threadIds[index];
+}
+
+uptr SuspendedThreadsListWindows::ThreadCount() const {
+ return threadIds.size();
+}
+
+struct RunThreadArgs {
+ StopTheWorldCallback callback;
+ void *argument;
+};
+
+DWORD WINAPI RunThread(void *argument) {
+ RunThreadArgs *run_args = (RunThreadArgs *)argument;
+
+ const DWORD this_thread = GetCurrentThreadId();
+ const DWORD this_process = GetCurrentProcessId();
+
+ SuspendedThreadsListWindows suspended_threads_list;
+ bool new_thread_found;
+
+ do {
+ // Take a snapshot of all Threads
+ const HANDLE threads = CreateToolhelp32Snapshot(TH32CS_SNAPTHREAD, 0);
+ CHECK(threads != INVALID_HANDLE_VALUE);
+
+ THREADENTRY32 thread_entry;
+ thread_entry.dwSize = sizeof(thread_entry);
+ new_thread_found = false;
+
+ if (!Thread32First(threads, &thread_entry))
+ break;
+
+ do {
+ if (thread_entry.th32ThreadID == this_thread ||
+ thread_entry.th32OwnerProcessID != this_process)
+ continue;
+
+ bool suspended_thread = false;
+ for (const auto thread_id : suspended_threads_list.threadIds) {
+ if (thread_id == thread_entry.th32ThreadID) {
+ suspended_thread = true;
+ break;
+ }
+ }
+
+ // Skip the Thread if it was already suspended
+ if (suspended_thread)
+ continue;
+
+ const HANDLE thread =
+ OpenThread(THREAD_ALL_ACCESS, FALSE, thread_entry.th32ThreadID);
+ CHECK(thread);
+
+ if (SuspendThread(thread) == (DWORD)-1) {
+ DWORD last_error = GetLastError();
+
+ VPrintf(1, "Could not suspend thread %lu (error %lu)",
+ thread_entry.th32ThreadID, last_error);
+ continue;
+ }
+
+ suspended_threads_list.threadIds.push_back(thread_entry.th32ThreadID);
+ suspended_threads_list.threadHandles.push_back(thread);
+ new_thread_found = true;
+ } while (Thread32Next(threads, &thread_entry));
+
+ CloseHandle(threads);
+
+ // Between the call to `CreateToolhelp32Snapshot` and suspending the
+ // relevant Threads, new Threads could have potentially been created. So
+ // continue to find and suspend new Threads until we don't find any.
+ } while (new_thread_found);
+
+ // Now all Threads of this Process except of this Thread should be suspended.
+ // Execute the callback function.
+ run_args->callback(suspended_threads_list, run_args->argument);
+
+ // Resume all Threads
+ for (const auto suspended_thread_handle :
+ suspended_threads_list.threadHandles) {
+ CHECK_NE(ResumeThread(suspended_thread_handle), -1);
+ CloseHandle(suspended_thread_handle);
+ }
+
+ return 0;
+}
+
+} // namespace
+
+void StopTheWorld(StopTheWorldCallback callback, void *argument) {
+ struct RunThreadArgs arg = {callback, argument};
+ DWORD trace_thread_id;
+
+ auto trace_thread =
+ CreateThread(nullptr, 0, RunThread, &arg, 0, &trace_thread_id);
+ CHECK(trace_thread);
+
+ WaitForSingleObject(trace_thread, INFINITE);
+ CloseHandle(trace_thread);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_WINDOWS
lib/tsan/sanitizer_common/sanitizer_suppressions.cpp
@@ -86,6 +86,7 @@ void SuppressionContext::ParseFromFile(const char *filename) {
}
Parse(file_contents);
+ UnmapOrDie(file_contents, contents_size);
}
bool SuppressionContext::Match(const char *str, const char *type,
lib/tsan/sanitizer_common/sanitizer_symbolizer.cpp
@@ -11,10 +11,11 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_allocator_internal.h"
-#include "sanitizer_platform.h"
+#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_placement_new.h"
+#include "sanitizer_platform.h"
#include "sanitizer_symbolizer_internal.h"
namespace __sanitizer {
@@ -30,6 +31,7 @@ void AddressInfo::Clear() {
InternalFree(file);
internal_memset(this, 0, sizeof(AddressInfo));
function_offset = kUnknown;
+ uuid_size = 0;
}
void AddressInfo::FillModuleInfo(const char *mod_name, uptr mod_offset,
@@ -37,6 +39,16 @@ void AddressInfo::FillModuleInfo(const char *mod_name, uptr mod_offset,
module = internal_strdup(mod_name);
module_offset = mod_offset;
module_arch = mod_arch;
+ uuid_size = 0;
+}
+
+void AddressInfo::FillModuleInfo(const LoadedModule &mod) {
+ module = internal_strdup(mod.full_name());
+ module_offset = address - mod.base_address();
+ module_arch = mod.arch();
+ if (mod.uuid_size())
+ internal_memcpy(uuid, mod.uuid(), mod.uuid_size());
+ uuid_size = mod.uuid_size();
}
SymbolizedStack::SymbolizedStack() : next(nullptr), info() {}
@@ -126,10 +138,4 @@ Symbolizer::SymbolizerScope::~SymbolizerScope() {
sym_->end_hook_();
}
-void Symbolizer::LateInitializeTools() {
- for (auto &tool : tools_) {
- tool.LateInitialize();
- }
-}
-
} // namespace __sanitizer
lib/tsan/sanitizer_common/sanitizer_symbolizer.h
@@ -32,6 +32,8 @@ struct AddressInfo {
char *module;
uptr module_offset;
ModuleArch module_arch;
+ u8 uuid[kModuleUUIDSize];
+ uptr uuid_size;
static const uptr kUnknown = ~(uptr)0;
char *function;
@@ -45,6 +47,8 @@ struct AddressInfo {
// Deletes all strings and resets all fields.
void Clear();
void FillModuleInfo(const char *mod_name, uptr mod_offset, ModuleArch arch);
+ void FillModuleInfo(const LoadedModule &mod);
+ uptr module_base() const { return address - module_offset; }
};
// Linked list of symbolized frames (each frame is described by AddressInfo).
@@ -158,7 +162,7 @@ class Symbolizer final {
// its method should be protected by |mu_|.
class ModuleNameOwner {
public:
- explicit ModuleNameOwner(BlockingMutex *synchronized_by)
+ explicit ModuleNameOwner(Mutex *synchronized_by)
: last_match_(nullptr), mu_(synchronized_by) {
storage_.reserve(kInitialCapacity);
}
@@ -169,7 +173,7 @@ class Symbolizer final {
InternalMmapVector<const char*> storage_;
const char *last_match_;
- BlockingMutex *mu_;
+ Mutex *mu_;
} module_names_;
/// Platform-specific function for creating a Symbolizer object.
@@ -192,7 +196,7 @@ class Symbolizer final {
// Mutex locked from public methods of |Symbolizer|, so that the internals
// (including individual symbolizer tools and platform-specific methods) are
// always synchronized.
- BlockingMutex mu_;
+ Mutex mu_;
IntrusiveList<SymbolizerTool> tools_;
@@ -209,9 +213,6 @@ class Symbolizer final {
private:
const Symbolizer *sym_;
};
-
- // Calls `LateInitialize()` on all items in `tools_`.
- void LateInitializeTools();
};
#ifdef SANITIZER_WINDOWS
lib/tsan/sanitizer_common/sanitizer_symbolizer_internal.h
@@ -13,15 +13,15 @@
#ifndef SANITIZER_SYMBOLIZER_INTERNAL_H
#define SANITIZER_SYMBOLIZER_INTERNAL_H
-#include "sanitizer_symbolizer.h"
#include "sanitizer_file.h"
+#include "sanitizer_symbolizer.h"
#include "sanitizer_vector.h"
namespace __sanitizer {
// Parsing helpers, 'str' is searched for delimiter(s) and a string or uptr
// is extracted. When extracting a string, a newly allocated (using
-// InternalAlloc) and null-terminataed buffer is returned. They return a pointer
+// InternalAlloc) and null-terminated buffer is returned. They return a pointer
// to the next characted after the found delimiter.
const char *ExtractToken(const char *str, const char *delims, char **result);
const char *ExtractInt(const char *str, const char *delims, int *result);
@@ -70,11 +70,6 @@ class SymbolizerTool {
return nullptr;
}
- // Called during the LateInitialize phase of Sanitizer initialization.
- // Usually this is a safe place to call code that might need to use user
- // memory allocators.
- virtual void LateInitialize() {}
-
protected:
~SymbolizerTool() {}
};
@@ -91,13 +86,14 @@ class SymbolizerProcess {
~SymbolizerProcess() {}
/// The maximum number of arguments required to invoke a tool process.
- static const unsigned kArgVMax = 6;
+ static const unsigned kArgVMax = 16;
// Customizable by subclasses.
virtual bool StartSymbolizerSubprocess();
- virtual bool ReadFromSymbolizer(char *buffer, uptr max_length);
+ virtual bool ReadFromSymbolizer();
// Return the environment to run the symbolizer in.
virtual char **GetEnvP() { return GetEnviron(); }
+ InternalMmapVector<char> &GetBuff() { return buffer_; }
private:
virtual bool ReachedEndOfOutput(const char *buffer, uptr length) const {
@@ -118,8 +114,7 @@ class SymbolizerProcess {
fd_t input_fd_;
fd_t output_fd_;
- static const uptr kBufferSize = 16 * 1024;
- char buffer_[kBufferSize];
+ InternalMmapVector<char> buffer_;
static const uptr kMaxTimesRestarted = 5;
static const int kSymbolizerStartupTimeMillis = 10;
lib/tsan/sanitizer_common/sanitizer_symbolizer_libbacktrace.cpp
@@ -11,11 +11,11 @@
// Libbacktrace implementation of symbolizer parts.
//===----------------------------------------------------------------------===//
-#include "sanitizer_platform.h"
+#include "sanitizer_symbolizer_libbacktrace.h"
#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
#include "sanitizer_symbolizer.h"
-#include "sanitizer_symbolizer_libbacktrace.h"
#if SANITIZER_LIBBACKTRACE
# include "backtrace-supported.h"
lib/tsan/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
@@ -83,16 +83,13 @@ const char *ExtractTokenUpToDelimiter(const char *str, const char *delimiter,
}
SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
- BlockingMutexLock l(&mu_);
- const char *module_name = nullptr;
- uptr module_offset;
- ModuleArch arch;
+ Lock l(&mu_);
SymbolizedStack *res = SymbolizedStack::New(addr);
- if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset,
- &arch))
+ auto *mod = FindModuleForAddress(addr);
+ if (!mod)
return res;
// Always fill data about module name and offset.
- res->info.FillModuleInfo(module_name, module_offset, arch);
+ res->info.FillModuleInfo(*mod);
for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
if (tool.SymbolizePC(addr, res)) {
@@ -103,7 +100,7 @@ SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
}
bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
- BlockingMutexLock l(&mu_);
+ Lock l(&mu_);
const char *module_name = nullptr;
uptr module_offset;
ModuleArch arch;
@@ -124,7 +121,7 @@ bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
}
bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) {
- BlockingMutexLock l(&mu_);
+ Lock l(&mu_);
const char *module_name = nullptr;
if (!FindModuleNameAndOffsetForAddress(
addr, &module_name, &info->module_offset, &info->module_arch))
@@ -141,7 +138,7 @@ bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) {
bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
uptr *module_address) {
- BlockingMutexLock l(&mu_);
+ Lock l(&mu_);
const char *internal_module_name = nullptr;
ModuleArch arch;
if (!FindModuleNameAndOffsetForAddress(pc, &internal_module_name,
@@ -154,7 +151,7 @@ bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
}
void Symbolizer::Flush() {
- BlockingMutexLock l(&mu_);
+ Lock l(&mu_);
for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
tool.Flush();
@@ -162,7 +159,7 @@ void Symbolizer::Flush() {
}
const char *Symbolizer::Demangle(const char *name) {
- BlockingMutexLock l(&mu_);
+ Lock l(&mu_);
for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
if (const char *demangled = tool.Demangle(name))
@@ -240,7 +237,7 @@ const LoadedModule *Symbolizer::FindModuleForAddress(uptr address) {
class LLVMSymbolizerProcess final : public SymbolizerProcess {
public:
explicit LLVMSymbolizerProcess(const char *path)
- : SymbolizerProcess(path, /*use_posix_spawn=*/SANITIZER_MAC) {}
+ : SymbolizerProcess(path, /*use_posix_spawn=*/SANITIZER_APPLE) {}
private:
bool ReachedEndOfOutput(const char *buffer, uptr length) const override {
@@ -259,6 +256,8 @@ class LLVMSymbolizerProcess final : public SymbolizerProcess {
const char* const kSymbolizerArch = "--default-arch=x86_64";
#elif defined(__i386__)
const char* const kSymbolizerArch = "--default-arch=i386";
+#elif SANITIZER_LOONGARCH64
+ const char *const kSymbolizerArch = "--default-arch=loongarch64";
#elif SANITIZER_RISCV64
const char *const kSymbolizerArch = "--default-arch=riscv64";
#elif defined(__aarch64__)
@@ -277,14 +276,17 @@ class LLVMSymbolizerProcess final : public SymbolizerProcess {
const char* const kSymbolizerArch = "--default-arch=unknown";
#endif
- const char *const inline_flag = common_flags()->symbolize_inline_frames
- ? "--inlines"
- : "--no-inlines";
+ const char *const demangle_flag =
+ common_flags()->demangle ? "--demangle" : "--no-demangle";
+ const char *const inline_flag =
+ common_flags()->symbolize_inline_frames ? "--inlines" : "--no-inlines";
int i = 0;
argv[i++] = path_to_binary;
+ argv[i++] = demangle_flag;
argv[i++] = inline_flag;
argv[i++] = kSymbolizerArch;
argv[i++] = nullptr;
+ CHECK_LE(i, kArgVMax);
}
};
@@ -363,14 +365,21 @@ void ParseSymbolizePCOutput(const char *str, SymbolizedStack *res) {
}
}
-// Parses a two-line string in the following format:
+// Parses a two- or three-line string in the following format:
// <symbol_name>
// <start_address> <size>
-// Used by LLVMSymbolizer and InternalSymbolizer.
+// <filename>:<column>
+// Used by LLVMSymbolizer and InternalSymbolizer. LLVMSymbolizer added support
+// for symbolizing the third line in D123538, but we support the older two-line
+// information as well.
void ParseSymbolizeDataOutput(const char *str, DataInfo *info) {
str = ExtractToken(str, "\n", &info->name);
str = ExtractUptr(str, " ", &info->start);
str = ExtractUptr(str, "\n", &info->size);
+ // Note: If the third line isn't present, these calls will set info.{file,
+ // line} to empty strings.
+ str = ExtractToken(str, ":", &info->file);
+ str = ExtractUptr(str, "\n", &info->line);
}
static void ParseSymbolizeFrameOutput(const char *str,
@@ -500,9 +509,9 @@ const char *SymbolizerProcess::SendCommandImpl(const char *command) {
return nullptr;
if (!WriteToSymbolizer(command, internal_strlen(command)))
return nullptr;
- if (!ReadFromSymbolizer(buffer_, kBufferSize))
- return nullptr;
- return buffer_;
+ if (!ReadFromSymbolizer())
+ return nullptr;
+ return buffer_.data();
}
bool SymbolizerProcess::Restart() {
@@ -513,31 +522,33 @@ bool SymbolizerProcess::Restart() {
return StartSymbolizerSubprocess();
}
-bool SymbolizerProcess::ReadFromSymbolizer(char *buffer, uptr max_length) {
- if (max_length == 0)
- return true;
- uptr read_len = 0;
- while (true) {
+bool SymbolizerProcess::ReadFromSymbolizer() {
+ buffer_.clear();
+ constexpr uptr max_length = 1024;
+ bool ret = true;
+ do {
uptr just_read = 0;
- bool success = ReadFromFile(input_fd_, buffer + read_len,
- max_length - read_len - 1, &just_read);
+ uptr size_before = buffer_.size();
+ buffer_.resize(size_before + max_length);
+ buffer_.resize(buffer_.capacity());
+ bool ret = ReadFromFile(input_fd_, &buffer_[size_before],
+ buffer_.size() - size_before, &just_read);
+
+ if (!ret)
+ just_read = 0;
+
+ buffer_.resize(size_before + just_read);
+
// We can't read 0 bytes, as we don't expect external symbolizer to close
// its stdout.
- if (!success || just_read == 0) {
+ if (just_read == 0) {
Report("WARNING: Can't read from symbolizer at fd %d\n", input_fd_);
- return false;
- }
- read_len += just_read;
- if (ReachedEndOfOutput(buffer, read_len))
- break;
- if (read_len + 1 == max_length) {
- Report("WARNING: Symbolizer buffer too small\n");
- read_len = 0;
+ ret = false;
break;
}
- }
- buffer[read_len] = '\0';
- return true;
+ } while (!ReachedEndOfOutput(buffer_.data(), buffer_.size()));
+ buffer_.push_back('\0');
+ return ret;
}
bool SymbolizerProcess::WriteToSymbolizer(const char *buffer, uptr length) {
lib/tsan/sanitizer_common/sanitizer_symbolizer_mac.cpp
@@ -12,19 +12,18 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
-#include "sanitizer_allocator_internal.h"
-#include "sanitizer_mac.h"
-#include "sanitizer_symbolizer_mac.h"
+# include <dlfcn.h>
+# include <errno.h>
+# include <stdlib.h>
+# include <sys/wait.h>
+# include <unistd.h>
+# include <util.h>
-#include <dlfcn.h>
-#include <errno.h>
-#include <mach/mach.h>
-#include <stdlib.h>
-#include <sys/wait.h>
-#include <unistd.h>
-#include <util.h>
+# include "sanitizer_allocator_internal.h"
+# include "sanitizer_mac.h"
+# include "sanitizer_symbolizer_mac.h"
namespace __sanitizer {
@@ -58,13 +57,6 @@ bool DlAddrSymbolizer::SymbolizeData(uptr addr, DataInfo *datainfo) {
return true;
}
-#define K_ATOS_ENV_VAR "__check_mach_ports_lookup"
-
-// This cannot live in `AtosSymbolizerProcess` because instances of that object
-// are allocated by the internal allocator which under ASan is poisoned with
-// kAsanInternalHeapMagic.
-static char kAtosMachPortEnvEntry[] = K_ATOS_ENV_VAR "=000000000000000";
-
class AtosSymbolizerProcess final : public SymbolizerProcess {
public:
explicit AtosSymbolizerProcess(const char *path)
@@ -72,51 +64,13 @@ class AtosSymbolizerProcess final : public SymbolizerProcess {
pid_str_[0] = '\0';
}
- void LateInitialize() {
- if (SANITIZER_IOSSIM) {
- // `putenv()` may call malloc/realloc so it is only safe to do this
- // during LateInitialize() or later (i.e. we can't do this in the
- // constructor). We also can't do this in `StartSymbolizerSubprocess()`
- // because in TSan we switch allocators when we're symbolizing.
- // We use `putenv()` rather than `setenv()` so that we can later directly
- // write into the storage without LibC getting involved to change what the
- // variable is set to
- int result = putenv(kAtosMachPortEnvEntry);
- CHECK_EQ(result, 0);
- }
- }
-
private:
bool StartSymbolizerSubprocess() override {
- // Configure sandbox before starting atos process.
-
// Put the string command line argument in the object so that it outlives
// the call to GetArgV.
- internal_snprintf(pid_str_, sizeof(pid_str_), "%d", internal_getpid());
-
- if (SANITIZER_IOSSIM) {
- // `atos` in the simulator is restricted in its ability to retrieve the
- // task port for the target process (us) so we need to do extra work
- // to pass our task port to it.
- mach_port_t ports[]{mach_task_self()};
- kern_return_t ret =
- mach_ports_register(mach_task_self(), ports, /*count=*/1);
- CHECK_EQ(ret, KERN_SUCCESS);
-
- // Set environment variable that signals to `atos` that it should look
- // for our task port. We can't call `setenv()` here because it might call
- // malloc/realloc. To avoid that we instead update the
- // `mach_port_env_var_entry_` variable with our current PID.
- uptr count = internal_snprintf(kAtosMachPortEnvEntry,
- sizeof(kAtosMachPortEnvEntry),
- K_ATOS_ENV_VAR "=%s", pid_str_);
- CHECK_GE(count, sizeof(K_ATOS_ENV_VAR) + internal_strlen(pid_str_));
- // Document our assumption but without calling `getenv()` in normal
- // builds.
- DCHECK(getenv(K_ATOS_ENV_VAR));
- DCHECK_EQ(internal_strcmp(getenv(K_ATOS_ENV_VAR), pid_str_), 0);
- }
+ internal_snprintf(pid_str_, sizeof(pid_str_), "%d", (int)internal_getpid());
+ // Configure sandbox before starting atos process.
return SymbolizerProcess::StartSymbolizerSubprocess();
}
@@ -137,13 +91,10 @@ class AtosSymbolizerProcess final : public SymbolizerProcess {
argv[i++] = "-d";
}
argv[i++] = nullptr;
+ CHECK_LE(i, kArgVMax);
}
char pid_str_[16];
- // Space for `\0` in `K_ATOS_ENV_VAR` is reused for `=`.
- static_assert(sizeof(kAtosMachPortEnvEntry) ==
- (sizeof(K_ATOS_ENV_VAR) + sizeof(pid_str_)),
- "sizes should match");
};
#undef K_ATOS_ENV_VAR
@@ -212,7 +163,7 @@ bool AtosSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
uptr start_address = AddressInfo::kUnknown;
if (!ParseCommandOutput(buf, addr, &stack->info.function, &stack->info.module,
&stack->info.file, &line, &start_address)) {
- process_ = nullptr;
+ Report("WARNING: atos failed to symbolize address \"0x%zx\"\n", addr);
return false;
}
stack->info.line = (int)line;
@@ -249,8 +200,6 @@ bool AtosSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {
return true;
}
-void AtosSymbolizer::LateInitialize() { process_->LateInitialize(); }
-
} // namespace __sanitizer
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
lib/tsan/sanitizer_common/sanitizer_symbolizer_mac.h
@@ -15,7 +15,7 @@
#define SANITIZER_SYMBOLIZER_MAC_H
#include "sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include "sanitizer_symbolizer_internal.h"
@@ -35,7 +35,6 @@ class AtosSymbolizer final : public SymbolizerTool {
bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
bool SymbolizeData(uptr addr, DataInfo *info) override;
- void LateInitialize() override;
private:
AtosSymbolizerProcess *process_;
@@ -43,6 +42,6 @@ class AtosSymbolizer final : public SymbolizerTool {
} // namespace __sanitizer
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
#endif // SANITIZER_SYMBOLIZER_MAC_H
lib/tsan/sanitizer_common/sanitizer_symbolizer_markup.cpp
@@ -91,7 +91,7 @@ bool RenderNeedsSymbolization(const char *format) { return false; }
// We don't support the stack_trace_format flag at all.
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
uptr address, const AddressInfo *info, bool vs_style,
- const char *strip_path_prefix, const char *strip_func_prefix) {
+ const char *strip_path_prefix) {
CHECK(!RenderNeedsSymbolization(format));
buffer->append(kFormatFrame, frame_no, address);
}
@@ -100,9 +100,7 @@ Symbolizer *Symbolizer::PlatformInit() {
return new (symbolizer_allocator_) Symbolizer({});
}
-void Symbolizer::LateInitialize() {
- Symbolizer::GetOrInit()->LateInitializeTools();
-}
+void Symbolizer::LateInitialize() { Symbolizer::GetOrInit(); }
void StartReportDeadlySignal() {}
void ReportDeadlySignal(const SignalContext &sig, u32 tid,
lib/tsan/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
@@ -13,25 +13,25 @@
#include "sanitizer_platform.h"
#if SANITIZER_POSIX
-#include "sanitizer_allocator_internal.h"
-#include "sanitizer_common.h"
-#include "sanitizer_file.h"
-#include "sanitizer_flags.h"
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_linux.h"
-#include "sanitizer_placement_new.h"
-#include "sanitizer_posix.h"
-#include "sanitizer_procmaps.h"
-#include "sanitizer_symbolizer_internal.h"
-#include "sanitizer_symbolizer_libbacktrace.h"
-#include "sanitizer_symbolizer_mac.h"
-
-#include <dlfcn.h> // for dlsym()
-#include <errno.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <sys/wait.h>
-#include <unistd.h>
+# include <dlfcn.h> // for dlsym()
+# include <errno.h>
+# include <stdint.h>
+# include <stdlib.h>
+# include <sys/wait.h>
+# include <unistd.h>
+
+# include "sanitizer_allocator_internal.h"
+# include "sanitizer_common.h"
+# include "sanitizer_file.h"
+# include "sanitizer_flags.h"
+# include "sanitizer_internal_defs.h"
+# include "sanitizer_linux.h"
+# include "sanitizer_placement_new.h"
+# include "sanitizer_posix.h"
+# include "sanitizer_procmaps.h"
+# include "sanitizer_symbolizer_internal.h"
+# include "sanitizer_symbolizer_libbacktrace.h"
+# include "sanitizer_symbolizer_mac.h"
// C++ demangling function, as required by Itanium C++ ABI. This is weak,
// because we do not require a C++ ABI library to be linked to a program
@@ -72,7 +72,6 @@ static swift_demangle_ft swift_demangle_f;
// symbolication.
static void InitializeSwiftDemangler() {
swift_demangle_f = (swift_demangle_ft)dlsym(RTLD_DEFAULT, "swift_demangle");
- (void)dlerror(); // Cleanup error message in case of failure
}
// Attempts to demangle a Swift name. The demangler will return nullptr if a
@@ -155,7 +154,7 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
}
if (use_posix_spawn_) {
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
fd_t fd = internal_spawn(argv, const_cast<const char **>(GetEnvP()), &pid);
if (fd == kInvalidFd) {
Report("WARNING: failed to spawn external symbolizer (errno: %d)\n",
@@ -165,9 +164,9 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
input_fd_ = fd;
output_fd_ = fd;
-#else // SANITIZER_MAC
+#else // SANITIZER_APPLE
UNIMPLEMENTED();
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
} else {
fd_t infd[2] = {}, outfd[2] = {};
if (!CreateTwoHighNumberedPipes(infd, outfd)) {
@@ -213,31 +212,36 @@ class Addr2LineProcess final : public SymbolizerProcess {
const char *(&argv)[kArgVMax]) const override {
int i = 0;
argv[i++] = path_to_binary;
- argv[i++] = "-iCfe";
+ if (common_flags()->demangle)
+ argv[i++] = "-C";
+ if (common_flags()->symbolize_inline_frames)
+ argv[i++] = "-i";
+ argv[i++] = "-fe";
argv[i++] = module_name_;
argv[i++] = nullptr;
+ CHECK_LE(i, kArgVMax);
}
bool ReachedEndOfOutput(const char *buffer, uptr length) const override;
- bool ReadFromSymbolizer(char *buffer, uptr max_length) override {
- if (!SymbolizerProcess::ReadFromSymbolizer(buffer, max_length))
+ bool ReadFromSymbolizer() override {
+ if (!SymbolizerProcess::ReadFromSymbolizer())
return false;
- // The returned buffer is empty when output is valid, but exceeds
- // max_length.
- if (*buffer == '\0')
- return true;
+ auto &buff = GetBuff();
// We should cut out output_terminator_ at the end of given buffer,
// appended by addr2line to mark the end of its meaningful output.
// We cannot scan buffer from it's beginning, because it is legal for it
// to start with output_terminator_ in case given offset is invalid. So,
// scanning from second character.
- char *garbage = internal_strstr(buffer + 1, output_terminator_);
+ char *garbage = internal_strstr(buff.data() + 1, output_terminator_);
// This should never be NULL since buffer must end up with
// output_terminator_.
CHECK(garbage);
+
// Trim the buffer.
- garbage[0] = '\0';
+ uintptr_t new_size = garbage - buff.data();
+ GetBuff().resize(new_size);
+ GetBuff().push_back('\0');
return true;
}
@@ -312,37 +316,42 @@ class Addr2LinePool final : public SymbolizerTool {
FIRST_32_SECOND_64(UINT32_MAX, UINT64_MAX);
};
-#if SANITIZER_SUPPORTS_WEAK_HOOKS
+# if SANITIZER_SUPPORTS_WEAK_HOOKS
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
__sanitizer_symbolize_code(const char *ModuleName, u64 ModuleOffset,
- char *Buffer, int MaxLength,
- bool SymbolizeInlineFrames);
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-bool __sanitizer_symbolize_data(const char *ModuleName, u64 ModuleOffset,
- char *Buffer, int MaxLength);
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_symbolize_flush();
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-int __sanitizer_symbolize_demangle(const char *Name, char *Buffer,
- int MaxLength);
+ char *Buffer, int MaxLength);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
+__sanitizer_symbolize_data(const char *ModuleName, u64 ModuleOffset,
+ char *Buffer, int MaxLength);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_symbolize_flush();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE int
+__sanitizer_symbolize_demangle(const char *Name, char *Buffer, int MaxLength);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
+__sanitizer_symbolize_set_demangle(bool Demangle);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
+__sanitizer_symbolize_set_inline_frames(bool InlineFrames);
} // extern "C"
class InternalSymbolizer final : public SymbolizerTool {
public:
static InternalSymbolizer *get(LowLevelAllocator *alloc) {
- if (__sanitizer_symbolize_code != 0 &&
- __sanitizer_symbolize_data != 0) {
- return new(*alloc) InternalSymbolizer();
- }
+ if (__sanitizer_symbolize_set_demangle)
+ CHECK(__sanitizer_symbolize_set_demangle(common_flags()->demangle));
+ if (__sanitizer_symbolize_set_inline_frames)
+ CHECK(__sanitizer_symbolize_set_inline_frames(
+ common_flags()->symbolize_inline_frames));
+ if (__sanitizer_symbolize_code && __sanitizer_symbolize_data)
+ return new (*alloc) InternalSymbolizer();
return 0;
}
bool SymbolizePC(uptr addr, SymbolizedStack *stack) override {
bool result = __sanitizer_symbolize_code(
- stack->info.module, stack->info.module_offset, buffer_, kBufferSize,
- common_flags()->symbolize_inline_frames);
- if (result) ParseSymbolizePCOutput(buffer_, stack);
+ stack->info.module, stack->info.module_offset, buffer_, kBufferSize);
+ if (result)
+ ParseSymbolizePCOutput(buffer_, stack);
return result;
}
@@ -365,7 +374,7 @@ class InternalSymbolizer final : public SymbolizerTool {
if (__sanitizer_symbolize_demangle) {
for (uptr res_length = 1024;
res_length <= InternalSizeClassMap::kMaxSize;) {
- char *res_buff = static_cast<char*>(InternalAlloc(res_length));
+ char *res_buff = static_cast<char *>(InternalAlloc(res_length));
uptr req_length =
__sanitizer_symbolize_demangle(name, res_buff, res_length);
if (req_length > res_length) {
@@ -380,19 +389,19 @@ class InternalSymbolizer final : public SymbolizerTool {
}
private:
- InternalSymbolizer() { }
+ InternalSymbolizer() {}
static const int kBufferSize = 16 * 1024;
char buffer_[kBufferSize];
};
-#else // SANITIZER_SUPPORTS_WEAK_HOOKS
+# else // SANITIZER_SUPPORTS_WEAK_HOOKS
class InternalSymbolizer final : public SymbolizerTool {
public:
static InternalSymbolizer *get(LowLevelAllocator *alloc) { return 0; }
};
-#endif // SANITIZER_SUPPORTS_WEAK_HOOKS
+# endif // SANITIZER_SUPPORTS_WEAK_HOOKS
const char *Symbolizer::PlatformDemangle(const char *name) {
return DemangleSwiftAndCXX(name);
@@ -417,13 +426,13 @@ static SymbolizerTool *ChooseExternalSymbolizer(LowLevelAllocator *allocator) {
VReport(2, "Using llvm-symbolizer at user-specified path: %s\n", path);
return new(*allocator) LLVMSymbolizer(path, allocator);
} else if (!internal_strcmp(binary_name, "atos")) {
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
VReport(2, "Using atos at user-specified path: %s\n", path);
return new(*allocator) AtosSymbolizer(path, allocator);
-#else // SANITIZER_MAC
+#else // SANITIZER_APPLE
Report("ERROR: Using `atos` is only supported on Darwin.\n");
Die();
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
} else if (!internal_strcmp(binary_name, "addr2line")) {
VReport(2, "Using addr2line at user-specified path: %s\n", path);
return new(*allocator) Addr2LinePool(path, allocator);
@@ -436,12 +445,12 @@ static SymbolizerTool *ChooseExternalSymbolizer(LowLevelAllocator *allocator) {
// Otherwise symbolizer program is unknown, let's search $PATH
CHECK(path == nullptr);
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
if (const char *found_path = FindPathToBinary("atos")) {
VReport(2, "Using atos found at: %s\n", found_path);
return new(*allocator) AtosSymbolizer(found_path, allocator);
}
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
if (const char *found_path = FindPathToBinary("llvm-symbolizer")) {
VReport(2, "Using llvm-symbolizer found at: %s\n", found_path);
return new(*allocator) LLVMSymbolizer(found_path, allocator);
@@ -478,10 +487,10 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
list->push_back(tool);
}
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
VReport(2, "Using dladdr symbolizer.\n");
list->push_back(new(*allocator) DlAddrSymbolizer());
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
}
Symbolizer *Symbolizer::PlatformInit() {
@@ -492,7 +501,7 @@ Symbolizer *Symbolizer::PlatformInit() {
}
void Symbolizer::LateInitialize() {
- Symbolizer::GetOrInit()->LateInitializeTools();
+ Symbolizer::GetOrInit();
InitializeSwiftDemangler();
}
lib/tsan/sanitizer_common/sanitizer_symbolizer_report.cpp
@@ -88,11 +88,17 @@ void ReportErrorSummary(const char *error_type, const StackTrace *stack,
#endif
}
-void ReportMmapWriteExec(int prot) {
+void ReportMmapWriteExec(int prot, int flags) {
#if SANITIZER_POSIX && (!SANITIZER_GO && !SANITIZER_ANDROID)
- if ((prot & (PROT_WRITE | PROT_EXEC)) != (PROT_WRITE | PROT_EXEC))
+ int pflags = (PROT_WRITE | PROT_EXEC);
+ if ((prot & pflags) != pflags)
return;
+# if SANITIZER_APPLE && defined(MAP_JIT)
+ if ((flags & MAP_JIT) == MAP_JIT)
+ return;
+# endif
+
ScopedErrorReportLock l;
SanitizerCommonDecorator d;
@@ -101,8 +107,7 @@ void ReportMmapWriteExec(int prot) {
stack->Reset();
uptr top = 0;
uptr bottom = 0;
- GET_CALLER_PC_BP_SP;
- (void)sp;
+ GET_CALLER_PC_BP;
bool fast = common_flags()->fast_unwind_on_fatal;
if (StackTrace::WillUseFastUnwind(fast)) {
GetThreadStackTopAndBottom(false, &top, &bottom);
@@ -205,9 +210,9 @@ static void ReportDeadlySignalImpl(const SignalContext &sig, u32 tid,
Report("Hint: pc points to the zero page.\n");
if (sig.is_memory_access) {
const char *access_type =
- sig.write_flag == SignalContext::WRITE
+ sig.write_flag == SignalContext::Write
? "WRITE"
- : (sig.write_flag == SignalContext::READ ? "READ" : "UNKNOWN");
+ : (sig.write_flag == SignalContext::Read ? "READ" : "UNKNOWN");
Report("The signal is caused by a %s memory access.\n", access_type);
if (!sig.is_true_faulting_addr)
Report("Hint: this fault was caused by a dereference of a high value "
lib/tsan/sanitizer_common/sanitizer_symbolizer_win.cpp
@@ -14,8 +14,8 @@
#include "sanitizer_platform.h"
#if SANITIZER_WINDOWS
-#include "sanitizer_dbghelp.h"
-#include "sanitizer_symbolizer_internal.h"
+# include "sanitizer_dbghelp.h"
+# include "sanitizer_symbolizer_internal.h"
namespace __sanitizer {
@@ -231,8 +231,6 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
// Check that tool command lines are simple and that complete escaping is
// unnecessary.
CHECK(!internal_strchr(arg, '"') && "quotes in args unsupported");
- CHECK(!internal_strstr(arg, "\\\\") &&
- "double backslashes in args unsupported");
CHECK(arglen > 0 && arg[arglen - 1] != '\\' &&
"args ending in backslash and empty args unsupported");
command_line.append("\"%s\" ", arg);
@@ -294,15 +292,15 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
const char *path =
user_path ? user_path : FindPathToBinary("llvm-symbolizer.exe");
if (path) {
- VReport(2, "Using llvm-symbolizer at %spath: %s\n",
- user_path ? "user-specified " : "", path);
- list->push_back(new(*allocator) LLVMSymbolizer(path, allocator));
- } else {
if (user_path && user_path[0] == '\0') {
VReport(2, "External symbolizer is explicitly disabled.\n");
} else {
- VReport(2, "External symbolizer is not present.\n");
+ VReport(2, "Using llvm-symbolizer at %spath: %s\n",
+ user_path ? "user-specified " : "", path);
+ list->push_back(new (*allocator) LLVMSymbolizer(path, allocator));
}
+ } else {
+ VReport(2, "External symbolizer is not present.\n");
}
// Add the dbghelp based symbolizer.
@@ -318,7 +316,7 @@ Symbolizer *Symbolizer::PlatformInit() {
}
void Symbolizer::LateInitialize() {
- Symbolizer::GetOrInit()->LateInitializeTools();
+ Symbolizer::GetOrInit();
}
} // namespace __sanitizer
lib/tsan/sanitizer_common/sanitizer_syscall_generic.inc
@@ -13,13 +13,14 @@
// NetBSD uses libc calls directly
#if !SANITIZER_NETBSD
-#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_SOLARIS
+#if SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_SOLARIS
# define SYSCALL(name) SYS_ ## name
#else
# define SYSCALL(name) __NR_ ## name
#endif
-#if defined(__x86_64__) && (SANITIZER_FREEBSD || SANITIZER_MAC)
+#if (defined(__x86_64__) && (SANITIZER_FREEBSD || SANITIZER_APPLE)) || \
+ (defined(__aarch64__) && SANITIZER_FREEBSD)
# define internal_syscall __syscall
# else
# define internal_syscall syscall
lib/tsan/sanitizer_common/sanitizer_syscalls_netbsd.inc
@@ -2255,13 +2255,13 @@ PRE_SYSCALL(getcontext)(void *ucp_) { /* Nothing to do */ }
POST_SYSCALL(getcontext)(long long res, void *ucp_) { /* Nothing to do */ }
PRE_SYSCALL(setcontext)(void *ucp_) {
if (ucp_) {
- PRE_READ(ucp_, ucontext_t_sz);
+ PRE_READ(ucp_, ucontext_t_sz(ucp_));
}
}
POST_SYSCALL(setcontext)(long long res, void *ucp_) {}
PRE_SYSCALL(_lwp_create)(void *ucp_, long long flags_, void *new_lwp_) {
if (ucp_) {
- PRE_READ(ucp_, ucontext_t_sz);
+ PRE_READ(ucp_, ucontext_t_sz(ucp_));
}
}
POST_SYSCALL(_lwp_create)
lib/tsan/sanitizer_common/sanitizer_thread_arg_retval.cpp
@@ -0,0 +1,94 @@
+//===-- sanitizer_thread_arg_retval.cpp -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizer tools.
+//
+// Tracks thread arguments and return value for leak checking.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_thread_arg_retval.h"
+
+#include "sanitizer_placement_new.h"
+
+namespace __sanitizer {
+
+void ThreadArgRetval::CreateLocked(uptr thread, bool detached,
+ const Args& args) {
+ CheckLocked();
+ Data& t = data_[thread];
+ t = {};
+ t.gen = gen_++;
+ t.detached = detached;
+ t.args = args;
+}
+
+ThreadArgRetval::Args ThreadArgRetval::GetArgs(uptr thread) const {
+ __sanitizer::Lock lock(&mtx_);
+ auto t = data_.find(thread);
+ CHECK(t);
+ if (t->second.done)
+ return {};
+ return t->second.args;
+}
+
+void ThreadArgRetval::Finish(uptr thread, void* retval) {
+ __sanitizer::Lock lock(&mtx_);
+ auto t = data_.find(thread);
+ if (!t)
+ return;
+ if (t->second.detached) {
+ // Retval of detached thread connot be retrieved.
+ data_.erase(t);
+ return;
+ }
+ t->second.done = true;
+ t->second.args.arg_retval = retval;
+}
+
+u32 ThreadArgRetval::BeforeJoin(uptr thread) const {
+ __sanitizer::Lock lock(&mtx_);
+ auto t = data_.find(thread);
+ CHECK(t);
+ CHECK(!t->second.detached);
+ return t->second.gen;
+}
+
+void ThreadArgRetval::AfterJoin(uptr thread, u32 gen) {
+ __sanitizer::Lock lock(&mtx_);
+ auto t = data_.find(thread);
+ if (!t || gen != t->second.gen) {
+ // Thread was reused and erased by any other event.
+ return;
+ }
+ CHECK(!t->second.detached);
+ data_.erase(t);
+}
+
+void ThreadArgRetval::DetachLocked(uptr thread) {
+ CheckLocked();
+ auto t = data_.find(thread);
+ CHECK(t);
+ CHECK(!t->second.detached);
+ if (t->second.done) {
+ // We can't retrive retval after detached thread finished.
+ data_.erase(t);
+ return;
+ }
+ t->second.detached = true;
+}
+
+void ThreadArgRetval::GetAllPtrsLocked(InternalMmapVector<uptr>* ptrs) {
+ CheckLocked();
+ CHECK(ptrs);
+ data_.forEach([&](DenseMap<uptr, Data>::value_type& kv) -> bool {
+ ptrs->push_back((uptr)kv.second.args.arg_retval);
+ return true;
+ });
+}
+
+} // namespace __sanitizer
lib/tsan/sanitizer_common/sanitizer_thread_arg_retval.h
@@ -0,0 +1,116 @@
+//===-- sanitizer_thread_arg_retval.h ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizer tools.
+//
+// Tracks thread arguments and return value for leak checking.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_THREAD_ARG_RETVAL_H
+#define SANITIZER_THREAD_ARG_RETVAL_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_dense_map.h"
+#include "sanitizer_list.h"
+#include "sanitizer_mutex.h"
+
+namespace __sanitizer {
+
+// Primary goal of the class is to keep alive arg and retval pointer for leak
+// checking. However it can be used to pass those pointer into wrappers used by
+// interceptors. The difference from ThreadRegistry/ThreadList is that this
+// class keeps data up to the detach or join, as exited thread still can be
+// joined to retrive retval. ThreadRegistry/ThreadList can discard exited
+// threads immediately.
+class SANITIZER_MUTEX ThreadArgRetval {
+ public:
+ struct Args {
+ void* (*routine)(void*);
+ void* arg_retval; // Either arg or retval.
+ };
+ void Lock() SANITIZER_ACQUIRE() { mtx_.Lock(); }
+ void CheckLocked() const SANITIZER_CHECK_LOCKED() { mtx_.CheckLocked(); }
+ void Unlock() SANITIZER_RELEASE() { mtx_.Unlock(); }
+
+ // Wraps pthread_create or similar. We need to keep object locked, to
+ // prevent child thread from proceeding without thread handle.
+ template <typename CreateFn /* returns thread id on success, or 0 */>
+ void Create(bool detached, const Args& args, const CreateFn& fn) {
+ // No need to track detached threads with no args, but we will to do as it's
+ // not expensive and less edge-cases.
+ __sanitizer::Lock lock(&mtx_);
+ if (uptr thread = fn())
+ CreateLocked(thread, detached, args);
+ }
+
+ // Returns thread arg and routine.
+ Args GetArgs(uptr thread) const;
+
+ // Mark thread as done and stores retval or remove if detached. Should be
+ // called by the thread.
+ void Finish(uptr thread, void* retval);
+
+ // Mark thread as detached or remove if done.
+ template <typename DetachFn /* returns true on success */>
+ void Detach(uptr thread, const DetachFn& fn) {
+ // Lock to prevent re-use of the thread between fn() and DetachLocked()
+ // calls.
+ __sanitizer::Lock lock(&mtx_);
+ if (fn())
+ DetachLocked(thread);
+ }
+
+ // Joins the thread.
+ template <typename JoinFn /* returns true on success */>
+ void Join(uptr thread, const JoinFn& fn) {
+ // Remember internal id of the thread to prevent re-use of the thread
+ // between fn() and AfterJoin() calls. Locking JoinFn, like in
+ // Detach(), implementation can cause deadlock.
+ auto gen = BeforeJoin(thread);
+ if (fn())
+ AfterJoin(thread, gen);
+ }
+
+ // Returns all arg and retval which are considered alive.
+ void GetAllPtrsLocked(InternalMmapVector<uptr>* ptrs);
+
+ uptr size() const {
+ __sanitizer::Lock lock(&mtx_);
+ return data_.size();
+ }
+
+ // FIXME: Add fork support. Expected users of the class are sloppy with forks
+ // anyway. We likely should lock/unlock the object to avoid deadlocks, and
+ // erase all but the current threads, so we can detect leaked arg or retval in
+ // child process.
+
+ // FIXME: Add cancelation support. Now if a thread was canceled, the class
+ // will keep pointers alive forever, missing leaks caused by cancelation.
+
+ private:
+ struct Data {
+ Args args;
+ u32 gen; // Avoid collision if thread id re-used.
+ bool detached;
+ bool done;
+ };
+
+ void CreateLocked(uptr thread, bool detached, const Args& args);
+ u32 BeforeJoin(uptr thread) const;
+ void AfterJoin(uptr thread, u32 gen);
+ void DetachLocked(uptr thread);
+
+ mutable Mutex mtx_;
+
+ DenseMap<uptr, Data> data_;
+ u32 gen_ = 0;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_THREAD_ARG_RETVAL_H
lib/tsan/sanitizer_common/sanitizer_thread_registry.cpp
@@ -13,6 +13,8 @@
#include "sanitizer_thread_registry.h"
+#include "sanitizer_placement_new.h"
+
namespace __sanitizer {
ThreadContextBase::ThreadContextBase(u32 tid)
@@ -108,7 +110,7 @@ ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
max_threads_(max_threads),
thread_quarantine_size_(thread_quarantine_size),
max_reuse_(max_reuse),
- mtx_(),
+ mtx_(MutexThreadRegistry),
total_threads_(0),
alive_threads_(0),
max_alive_threads_(0),
@@ -119,7 +121,7 @@ ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
void ThreadRegistry::GetNumberOfThreads(uptr *total, uptr *running,
uptr *alive) {
- BlockingMutexLock l(&mtx_);
+ ThreadRegistryLock l(this);
if (total)
*total = threads_.size();
if (running) *running = running_threads_;
@@ -127,13 +129,13 @@ void ThreadRegistry::GetNumberOfThreads(uptr *total, uptr *running,
}
uptr ThreadRegistry::GetMaxAliveThreads() {
- BlockingMutexLock l(&mtx_);
+ ThreadRegistryLock l(this);
return max_alive_threads_;
}
u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
void *arg) {
- BlockingMutexLock l(&mtx_);
+ ThreadRegistryLock l(this);
u32 tid = kInvalidTid;
ThreadContextBase *tctx = QuarantinePop();
if (tctx) {
@@ -162,6 +164,12 @@ u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
max_alive_threads_++;
CHECK_EQ(alive_threads_, max_alive_threads_);
}
+ if (user_id) {
+ // Ensure that user_id is unique. If it's not the case we are screwed.
+ // Ignoring this situation may lead to very hard to debug false
+ // positives later (e.g. if we join a wrong thread).
+ CHECK(live_.try_emplace(user_id, tid).second);
+ }
tctx->SetCreated(user_id, total_threads_++, detached,
parent_tid, arg);
return tid;
@@ -179,7 +187,7 @@ void ThreadRegistry::RunCallbackForEachThreadLocked(ThreadCallback cb,
}
u32 ThreadRegistry::FindThread(FindThreadCallback cb, void *arg) {
- BlockingMutexLock l(&mtx_);
+ ThreadRegistryLock l(this);
for (u32 tid = 0; tid < threads_.size(); tid++) {
ThreadContextBase *tctx = threads_[tid];
if (tctx != 0 && cb(tctx, arg))
@@ -211,7 +219,7 @@ ThreadContextBase *ThreadRegistry::FindThreadContextByOsIDLocked(tid_t os_id) {
}
void ThreadRegistry::SetThreadName(u32 tid, const char *name) {
- BlockingMutexLock l(&mtx_);
+ ThreadRegistryLock l(this);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
CHECK_EQ(SANITIZER_FUCHSIA ? ThreadStatusCreated : ThreadStatusRunning,
@@ -220,19 +228,13 @@ void ThreadRegistry::SetThreadName(u32 tid, const char *name) {
}
void ThreadRegistry::SetThreadNameByUserId(uptr user_id, const char *name) {
- BlockingMutexLock l(&mtx_);
- for (u32 tid = 0; tid < threads_.size(); tid++) {
- ThreadContextBase *tctx = threads_[tid];
- if (tctx != 0 && tctx->user_id == user_id &&
- tctx->status != ThreadStatusInvalid) {
- tctx->SetName(name);
- return;
- }
- }
+ ThreadRegistryLock l(this);
+ if (const auto *tid = live_.find(user_id))
+ threads_[tid->second]->SetName(name);
}
void ThreadRegistry::DetachThread(u32 tid, void *arg) {
- BlockingMutexLock l(&mtx_);
+ ThreadRegistryLock l(this);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
if (tctx->status == ThreadStatusInvalid) {
@@ -241,6 +243,8 @@ void ThreadRegistry::DetachThread(u32 tid, void *arg) {
}
tctx->OnDetached(arg);
if (tctx->status == ThreadStatusFinished) {
+ if (tctx->user_id)
+ live_.erase(tctx->user_id);
tctx->SetDead();
QuarantinePush(tctx);
} else {
@@ -252,7 +256,7 @@ void ThreadRegistry::JoinThread(u32 tid, void *arg) {
bool destroyed = false;
do {
{
- BlockingMutexLock l(&mtx_);
+ ThreadRegistryLock l(this);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
if (tctx->status == ThreadStatusInvalid) {
@@ -260,6 +264,8 @@ void ThreadRegistry::JoinThread(u32 tid, void *arg) {
return;
}
if ((destroyed = tctx->GetDestroyed())) {
+ if (tctx->user_id)
+ live_.erase(tctx->user_id);
tctx->SetJoined(arg);
QuarantinePush(tctx);
}
@@ -275,7 +281,7 @@ void ThreadRegistry::JoinThread(u32 tid, void *arg) {
// thread before trying to create it, and then failed to actually
// create it, and so never called StartThread.
ThreadStatus ThreadRegistry::FinishThread(u32 tid) {
- BlockingMutexLock l(&mtx_);
+ ThreadRegistryLock l(this);
CHECK_GT(alive_threads_, 0);
alive_threads_--;
ThreadContextBase *tctx = threads_[tid];
@@ -292,6 +298,8 @@ ThreadStatus ThreadRegistry::FinishThread(u32 tid) {
}
tctx->SetFinished();
if (dead) {
+ if (tctx->user_id)
+ live_.erase(tctx->user_id);
tctx->SetDead();
QuarantinePush(tctx);
}
@@ -301,7 +309,7 @@ ThreadStatus ThreadRegistry::FinishThread(u32 tid) {
void ThreadRegistry::StartThread(u32 tid, tid_t os_id, ThreadType thread_type,
void *arg) {
- BlockingMutexLock l(&mtx_);
+ ThreadRegistryLock l(this);
running_threads_++;
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
@@ -327,20 +335,50 @@ void ThreadRegistry::QuarantinePush(ThreadContextBase *tctx) {
ThreadContextBase *ThreadRegistry::QuarantinePop() {
if (invalid_threads_.size() == 0)
- return 0;
+ return nullptr;
ThreadContextBase *tctx = invalid_threads_.front();
invalid_threads_.pop_front();
return tctx;
}
+u32 ThreadRegistry::ConsumeThreadUserId(uptr user_id) {
+ ThreadRegistryLock l(this);
+ u32 tid;
+ auto *t = live_.find(user_id);
+ CHECK(t);
+ tid = t->second;
+ live_.erase(t);
+ auto *tctx = threads_[tid];
+ CHECK_EQ(tctx->user_id, user_id);
+ tctx->user_id = 0;
+ return tid;
+}
+
void ThreadRegistry::SetThreadUserId(u32 tid, uptr user_id) {
- BlockingMutexLock l(&mtx_);
+ ThreadRegistryLock l(this);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
CHECK_NE(tctx->status, ThreadStatusInvalid);
CHECK_NE(tctx->status, ThreadStatusDead);
CHECK_EQ(tctx->user_id, 0);
tctx->user_id = user_id;
+ CHECK(live_.try_emplace(user_id, tctx->tid).second);
+}
+
+u32 ThreadRegistry::OnFork(u32 tid) {
+ ThreadRegistryLock l(this);
+ // We only purge user_id (pthread_t) of live threads because
+ // they cause CHECK failures if new threads with matching pthread_t
+ // created after fork.
+ // Potentially we could purge more info (ThreadContextBase themselves),
+ // but it's hard to test and easy to introduce new issues by doing this.
+ for (auto *tctx : threads_) {
+ if (tctx->tid == tid || !tctx->user_id)
+ continue;
+ CHECK(live_.erase(tctx->user_id));
+ tctx->user_id = 0;
+ }
+ return alive_threads_;
}
} // namespace __sanitizer
lib/tsan/sanitizer_common/sanitizer_thread_registry.h
@@ -15,6 +15,7 @@
#define SANITIZER_THREAD_REGISTRY_H
#include "sanitizer_common.h"
+#include "sanitizer_dense_map.h"
#include "sanitizer_list.h"
#include "sanitizer_mutex.h"
@@ -85,7 +86,7 @@ class ThreadContextBase {
typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid);
-class MUTEX ThreadRegistry {
+class SANITIZER_MUTEX ThreadRegistry {
public:
ThreadRegistry(ThreadContextFactory factory);
ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
@@ -94,15 +95,17 @@ class MUTEX ThreadRegistry {
uptr *alive = nullptr);
uptr GetMaxAliveThreads();
- void Lock() ACQUIRE() { mtx_.Lock(); }
- void CheckLocked() const CHECK_LOCKED() { mtx_.CheckLocked(); }
- void Unlock() RELEASE() { mtx_.Unlock(); }
+ void Lock() SANITIZER_ACQUIRE() { mtx_.Lock(); }
+ void CheckLocked() const SANITIZER_CHECK_LOCKED() { mtx_.CheckLocked(); }
+ void Unlock() SANITIZER_RELEASE() { mtx_.Unlock(); }
// Should be guarded by ThreadRegistryLock.
ThreadContextBase *GetThreadLocked(u32 tid) {
return threads_.empty() ? nullptr : threads_[tid];
}
+ u32 NumThreadsLocked() const { return threads_.size(); }
+
u32 CreateThread(uptr user_id, bool detached, u32 parent_tid, void *arg);
typedef void (*ThreadCallback)(ThreadContextBase *tctx, void *arg);
@@ -127,15 +130,21 @@ class MUTEX ThreadRegistry {
// Finishes thread and returns previous status.
ThreadStatus FinishThread(u32 tid);
void StartThread(u32 tid, tid_t os_id, ThreadType thread_type, void *arg);
+ u32 ConsumeThreadUserId(uptr user_id);
void SetThreadUserId(u32 tid, uptr user_id);
+ // OnFork must be called in the child process after fork to purge old
+ // threads that don't exist anymore (except for the current thread tid).
+ // Returns number of alive threads before fork.
+ u32 OnFork(u32 tid);
+
private:
const ThreadContextFactory context_factory_;
const u32 max_threads_;
const u32 thread_quarantine_size_;
const u32 max_reuse_;
- BlockingMutex mtx_;
+ Mutex mtx_;
u64 total_threads_; // Total number of created threads. May be greater than
// max_threads_ if contexts were reused.
@@ -146,6 +155,7 @@ class MUTEX ThreadRegistry {
InternalMmapVector<ThreadContextBase *> threads_;
IntrusiveList<ThreadContextBase> dead_threads_;
IntrusiveList<ThreadContextBase> invalid_threads_;
+ DenseMap<uptr, Tid> live_;
void QuarantinePush(ThreadContextBase *tctx);
ThreadContextBase *QuarantinePop();
lib/tsan/sanitizer_common/sanitizer_thread_safety.h
@@ -16,27 +16,34 @@
#define SANITIZER_THREAD_SAFETY_H
#if defined(__clang__)
-# define THREAD_ANNOTATION(x) __attribute__((x))
+# define SANITIZER_THREAD_ANNOTATION(x) __attribute__((x))
#else
-# define THREAD_ANNOTATION(x)
+# define SANITIZER_THREAD_ANNOTATION(x)
#endif
-#define MUTEX THREAD_ANNOTATION(capability("mutex"))
-#define SCOPED_LOCK THREAD_ANNOTATION(scoped_lockable)
-#define GUARDED_BY(x) THREAD_ANNOTATION(guarded_by(x))
-#define PT_GUARDED_BY(x) THREAD_ANNOTATION(pt_guarded_by(x))
-#define REQUIRES(...) THREAD_ANNOTATION(requires_capability(__VA_ARGS__))
-#define REQUIRES_SHARED(...) \
- THREAD_ANNOTATION(requires_shared_capability(__VA_ARGS__))
-#define ACQUIRE(...) THREAD_ANNOTATION(acquire_capability(__VA_ARGS__))
-#define ACQUIRE_SHARED(...) \
- THREAD_ANNOTATION(acquire_shared_capability(__VA_ARGS__))
-#define TRY_ACQUIRE(...) THREAD_ANNOTATION(try_acquire_capability(__VA_ARGS__))
-#define RELEASE(...) THREAD_ANNOTATION(release_capability(__VA_ARGS__))
-#define RELEASE_SHARED(...) \
- THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__))
-#define EXCLUDES(...) THREAD_ANNOTATION(locks_excluded(__VA_ARGS__))
-#define CHECK_LOCKED(...) THREAD_ANNOTATION(assert_capability(__VA_ARGS__))
-#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION(no_thread_safety_analysis)
+#define SANITIZER_MUTEX SANITIZER_THREAD_ANNOTATION(capability("mutex"))
+#define SANITIZER_SCOPED_LOCK SANITIZER_THREAD_ANNOTATION(scoped_lockable)
+#define SANITIZER_GUARDED_BY(x) SANITIZER_THREAD_ANNOTATION(guarded_by(x))
+#define SANITIZER_PT_GUARDED_BY(x) SANITIZER_THREAD_ANNOTATION(pt_guarded_by(x))
+#define SANITIZER_REQUIRES(...) \
+ SANITIZER_THREAD_ANNOTATION(requires_capability(__VA_ARGS__))
+#define SANITIZER_REQUIRES_SHARED(...) \
+ SANITIZER_THREAD_ANNOTATION(requires_shared_capability(__VA_ARGS__))
+#define SANITIZER_ACQUIRE(...) \
+ SANITIZER_THREAD_ANNOTATION(acquire_capability(__VA_ARGS__))
+#define SANITIZER_ACQUIRE_SHARED(...) \
+ SANITIZER_THREAD_ANNOTATION(acquire_shared_capability(__VA_ARGS__))
+#define SANITIZER_TRY_ACQUIRE(...) \
+ SANITIZER_THREAD_ANNOTATION(try_acquire_capability(__VA_ARGS__))
+#define SANITIZER_RELEASE(...) \
+ SANITIZER_THREAD_ANNOTATION(release_capability(__VA_ARGS__))
+#define SANITIZER_RELEASE_SHARED(...) \
+ SANITIZER_THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__))
+#define SANITIZER_EXCLUDES(...) \
+ SANITIZER_THREAD_ANNOTATION(locks_excluded(__VA_ARGS__))
+#define SANITIZER_CHECK_LOCKED(...) \
+ SANITIZER_THREAD_ANNOTATION(assert_capability(__VA_ARGS__))
+#define SANITIZER_NO_THREAD_SAFETY_ANALYSIS \
+ SANITIZER_THREAD_ANNOTATION(no_thread_safety_analysis)
#endif
lib/tsan/sanitizer_common/sanitizer_tls_get_addr.cpp
@@ -12,6 +12,7 @@
#include "sanitizer_tls_get_addr.h"
+#include "sanitizer_allocator_interface.h"
#include "sanitizer_atomic.h"
#include "sanitizer_flags.h"
#include "sanitizer_platform_interceptors.h"
@@ -26,13 +27,6 @@ struct TlsGetAddrParam {
uptr offset;
};
-// Glibc starting from 2.19 allocates tls using __signal_safe_memalign,
-// which has such header.
-struct Glibc_2_19_tls_header {
- uptr size;
- uptr start;
-};
-
// This must be static TLS
__attribute__((tls_model("initial-exec")))
static __thread DTLS dtls;
@@ -44,7 +38,7 @@ static atomic_uintptr_t number_of_live_dtls;
static const uptr kDestroyedThread = -1;
static void DTLS_Deallocate(DTLS::DTVBlock *block) {
- VReport(2, "__tls_get_addr: DTLS_Deallocate %p %zd\n", block);
+ VReport(2, "__tls_get_addr: DTLS_Deallocate %p\n", (void *)block);
UnmapOrDie(block, sizeof(DTLS::DTVBlock));
atomic_fetch_sub(&number_of_live_dtls, 1, memory_order_relaxed);
}
@@ -66,12 +60,13 @@ static DTLS::DTVBlock *DTLS_NextBlock(atomic_uintptr_t *cur) {
}
uptr num_live_dtls =
atomic_fetch_add(&number_of_live_dtls, 1, memory_order_relaxed);
- VReport(2, "__tls_get_addr: DTLS_NextBlock %p %zd\n", &dtls, num_live_dtls);
+ VReport(2, "__tls_get_addr: DTLS_NextBlock %p %zd\n", (void *)&dtls,
+ num_live_dtls);
return new_dtv;
}
static DTLS::DTV *DTLS_Find(uptr id) {
- VReport(2, "__tls_get_addr: DTLS_Find %p %zd\n", &dtls, id);
+ VReport(2, "__tls_get_addr: DTLS_Find %p %zd\n", (void *)&dtls, id);
static constexpr uptr kPerBlock = ARRAY_SIZE(DTLS::DTVBlock::dtvs);
DTLS::DTVBlock *cur = DTLS_NextBlock(&dtls.dtv_block);
if (!cur)
@@ -82,7 +77,7 @@ static DTLS::DTV *DTLS_Find(uptr id) {
void DTLS_Destroy() {
if (!common_flags()->intercept_tls_get_addr) return;
- VReport(2, "__tls_get_addr: DTLS_Destroy %p\n", &dtls);
+ VReport(2, "__tls_get_addr: DTLS_Destroy %p\n", (void *)&dtls);
DTLS::DTVBlock *block = (DTLS::DTVBlock *)atomic_exchange(
&dtls.dtv_block, kDestroyedThread, memory_order_release);
while (block) {
@@ -107,6 +102,14 @@ static const uptr kDtvOffset = 0x800;
static const uptr kDtvOffset = 0;
#endif
+extern "C" {
+SANITIZER_WEAK_ATTRIBUTE
+uptr __sanitizer_get_allocated_size(const void *p);
+
+SANITIZER_WEAK_ATTRIBUTE
+const void *__sanitizer_get_allocated_begin(const void *p);
+}
+
DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res,
uptr static_tls_begin, uptr static_tls_end) {
if (!common_flags()->intercept_tls_get_addr) return 0;
@@ -117,26 +120,26 @@ DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res,
return 0;
uptr tls_size = 0;
uptr tls_beg = reinterpret_cast<uptr>(res) - arg->offset - kDtvOffset;
- VReport(2, "__tls_get_addr: %p {%p,%p} => %p; tls_beg: %p; sp: %p "
- "num_live_dtls %zd\n",
- arg, arg->dso_id, arg->offset, res, tls_beg, &tls_beg,
+ VReport(2,
+ "__tls_get_addr: %p {0x%zx,0x%zx} => %p; tls_beg: 0x%zx; sp: %p "
+ "num_live_dtls %zd\n",
+ (void *)arg, arg->dso_id, arg->offset, res, tls_beg, (void *)&tls_beg,
atomic_load(&number_of_live_dtls, memory_order_relaxed));
if (dtls.last_memalign_ptr == tls_beg) {
tls_size = dtls.last_memalign_size;
- VReport(2, "__tls_get_addr: glibc <=2.18 suspected; tls={%p,%p}\n",
- tls_beg, tls_size);
+ VReport(2, "__tls_get_addr: glibc <=2.24 suspected; tls={0x%zx,0x%zx}\n",
+ tls_beg, tls_size);
} else if (tls_beg >= static_tls_begin && tls_beg < static_tls_end) {
// This is the static TLS block which was initialized / unpoisoned at thread
// creation.
- VReport(2, "__tls_get_addr: static tls: %p\n", tls_beg);
+ VReport(2, "__tls_get_addr: static tls: 0x%zx\n", tls_beg);
tls_size = 0;
- } else if ((tls_beg % 4096) == sizeof(Glibc_2_19_tls_header)) {
- // We may want to check gnu_get_libc_version().
- Glibc_2_19_tls_header *header = (Glibc_2_19_tls_header *)tls_beg - 1;
- tls_size = header->size;
- tls_beg = header->start;
- VReport(2, "__tls_get_addr: glibc >=2.19 suspected; tls={%p %p}\n",
- tls_beg, tls_size);
+ } else if (const void *start =
+ __sanitizer_get_allocated_begin((void *)tls_beg)) {
+ tls_beg = (uptr)start;
+ tls_size = __sanitizer_get_allocated_size(start);
+ VReport(2, "__tls_get_addr: glibc >=2.25 suspected; tls={0x%zx,0x%zx}\n",
+ tls_beg, tls_size);
} else {
VReport(2, "__tls_get_addr: Can't guess glibc version\n");
// This may happen inside the DTOR of main thread, so just ignore it.
@@ -149,7 +152,7 @@ DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res,
void DTLS_on_libc_memalign(void *ptr, uptr size) {
if (!common_flags()->intercept_tls_get_addr) return;
- VReport(2, "DTLS_on_libc_memalign: %p %p\n", ptr, size);
+ VReport(2, "DTLS_on_libc_memalign: %p 0x%zx\n", ptr, size);
dtls.last_memalign_ptr = reinterpret_cast<uptr>(ptr);
dtls.last_memalign_size = size;
}
lib/tsan/sanitizer_common/sanitizer_tls_get_addr.h
@@ -12,16 +12,24 @@
// the lack of interface that would tell us about the Dynamic TLS (DTLS).
// https://sourceware.org/bugzilla/show_bug.cgi?id=16291
//
-// The matters get worse because the glibc implementation changed between
-// 2.18 and 2.19:
-// https://groups.google.com/forum/#!topic/address-sanitizer/BfwYD8HMxTM
-//
-// Before 2.19, every DTLS chunk is allocated with __libc_memalign,
+// Before 2.25: every DTLS chunk is allocated with __libc_memalign,
// which we intercept and thus know where is the DTLS.
-// Since 2.19, DTLS chunks are allocated with __signal_safe_memalign,
-// which is an internal function that wraps a mmap call, neither of which
-// we can intercept. Luckily, __signal_safe_memalign has a simple parseable
-// header which we can use.
+//
+// Since 2.25: DTLS chunks are allocated with malloc. We could co-opt
+// the malloc interceptor to keep track of the last allocation, similar
+// to how we handle __libc_memalign; however, this adds some overhead
+// (since malloc, unlike __libc_memalign, is commonly called), and
+// requires care to avoid false negatives for LeakSanitizer.
+// Instead, we rely on our internal allocators - which keep track of all
+// its allocations - to determine if an address points to a malloc
+// allocation.
+//
+// There exists a since-deprecated version of Google's internal glibc fork
+// that used __signal_safe_memalign. DTLS_on_tls_get_addr relied on a
+// heuristic check (is the allocation 16 bytes from the start of a page
+// boundary?), which was sometimes erroneous:
+// https://bugs.chromium.org/p/chromium/issues/detail?id=1275223#c15
+// Since that check has no practical use anymore, we have removed it.
//
//===----------------------------------------------------------------------===//
lib/tsan/sanitizer_common/sanitizer_type_traits.h
@@ -13,6 +13,8 @@
#ifndef SANITIZER_TYPE_TRAITS_H
#define SANITIZER_TYPE_TRAITS_H
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
namespace __sanitizer {
struct true_type {
@@ -57,6 +59,83 @@ struct conditional<false, T, F> {
using type = F;
};
+template <class T>
+struct remove_reference {
+ using type = T;
+};
+template <class T>
+struct remove_reference<T&> {
+ using type = T;
+};
+template <class T>
+struct remove_reference<T&&> {
+ using type = T;
+};
+
+template <class T>
+WARN_UNUSED_RESULT inline typename remove_reference<T>::type&& move(T&& t) {
+ return static_cast<typename remove_reference<T>::type&&>(t);
+}
+
+template <class T>
+WARN_UNUSED_RESULT inline constexpr T&& forward(
+ typename remove_reference<T>::type& t) {
+ return static_cast<T&&>(t);
+}
+
+template <class T>
+WARN_UNUSED_RESULT inline constexpr T&& forward(
+ typename remove_reference<T>::type&& t) {
+ return static_cast<T&&>(t);
+}
+
+template <class T, T v>
+struct integral_constant {
+ static constexpr const T value = v;
+ typedef T value_type;
+ typedef integral_constant type;
+ constexpr operator value_type() const { return value; }
+ constexpr value_type operator()() const { return value; }
+};
+
+#ifndef __has_builtin
+# define __has_builtin(x) 0
+#endif
+
+#if __has_builtin(__is_trivially_destructible)
+
+template <class T>
+struct is_trivially_destructible
+ : public integral_constant<bool, __is_trivially_destructible(T)> {};
+
+#elif __has_builtin(__has_trivial_destructor)
+
+template <class T>
+struct is_trivially_destructible
+ : public integral_constant<bool, __has_trivial_destructor(T)> {};
+
+#else
+
+template <class T>
+struct is_trivially_destructible
+ : public integral_constant<bool, /* less efficient fallback */ false> {};
+
+#endif
+
+#if __has_builtin(__is_trivially_copyable)
+
+template <class T>
+struct is_trivially_copyable
+ : public integral_constant<bool, __is_trivially_copyable(T)> {};
+
+#else
+
+template <class T>
+struct is_trivially_copyable
+ : public integral_constant<bool, /* less efficient fallback */ false> {};
+
+#endif
+
} // namespace __sanitizer
#endif
lib/tsan/sanitizer_common/sanitizer_unwind_linux_libcdep.cpp
@@ -58,7 +58,7 @@ unwind_backtrace_signal_arch_func unwind_backtrace_signal_arch;
#endif
uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
-#if defined(__arm__) && !SANITIZER_MAC
+#if defined(__arm__) && !SANITIZER_APPLE
uptr val;
_Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
15 /* r15 = PC */, _UVRSD_UINT32, &val);
@@ -139,13 +139,7 @@ void BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) {
if (to_pop == 0 && size > 1)
to_pop = 1;
PopStackFrames(to_pop);
-#if defined(__GNUC__) && defined(__sparc__)
- // __builtin_return_address returns the address of the call instruction
- // on the SPARC and not the return address, so we need to compensate.
- trace_buffer[0] = GetNextInstructionPc(pc);
-#else
trace_buffer[0] = pc;
-#endif
}
void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
lib/tsan/sanitizer_common/sanitizer_unwind_win.cpp
@@ -57,30 +57,37 @@ void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
InitializeDbgHelpIfNeeded();
size = 0;
-#if defined(_WIN64)
+# if SANITIZER_WINDOWS64
+# if SANITIZER_ARM64
+ int machine_type = IMAGE_FILE_MACHINE_ARM64;
+ stack_frame.AddrPC.Offset = ctx.Pc;
+ stack_frame.AddrFrame.Offset = ctx.Fp;
+ stack_frame.AddrStack.Offset = ctx.Sp;
+# else
int machine_type = IMAGE_FILE_MACHINE_AMD64;
stack_frame.AddrPC.Offset = ctx.Rip;
stack_frame.AddrFrame.Offset = ctx.Rbp;
stack_frame.AddrStack.Offset = ctx.Rsp;
-#else
+# endif
+# else
int machine_type = IMAGE_FILE_MACHINE_I386;
stack_frame.AddrPC.Offset = ctx.Eip;
stack_frame.AddrFrame.Offset = ctx.Ebp;
stack_frame.AddrStack.Offset = ctx.Esp;
-#endif
+# endif
stack_frame.AddrPC.Mode = AddrModeFlat;
stack_frame.AddrFrame.Mode = AddrModeFlat;
stack_frame.AddrStack.Mode = AddrModeFlat;
while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(),
- &stack_frame, &ctx, NULL, SymFunctionTableAccess64,
- SymGetModuleBase64, NULL) &&
- size < Min(max_depth, kStackTraceMax)) {
+ &stack_frame, &ctx, NULL, SymFunctionTableAccess64,
+ SymGetModuleBase64, NULL) &&
+ size < Min(max_depth, kStackTraceMax)) {
trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset;
}
}
-#ifdef __clang__
-#pragma clang diagnostic pop
-#endif
-#endif // #if !SANITIZER_GO
+# ifdef __clang__
+# pragma clang diagnostic pop
+# endif
+# endif // #if !SANITIZER_GO
#endif // SANITIZER_WINDOWS
lib/tsan/sanitizer_common/sanitizer_vector.h
@@ -83,8 +83,8 @@ class Vector {
}
EnsureSize(size);
if (old_size < size) {
- for (uptr i = old_size; i < size; i++)
- internal_memset(&begin_[i], 0, sizeof(begin_[i]));
+ internal_memset(&begin_[old_size], 0,
+ sizeof(begin_[old_size]) * (size - old_size));
}
}
lib/tsan/sanitizer_common/sanitizer_win.cpp
@@ -93,6 +93,11 @@ bool FileExists(const char *filename) {
return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES;
}
+bool DirExists(const char *path) {
+ auto attr = ::GetFileAttributesA(path);
+ return (attr != INVALID_FILE_ATTRIBUTES) && (attr & FILE_ATTRIBUTE_DIRECTORY);
+}
+
uptr internal_getpid() {
return GetProcessId(GetCurrentProcess());
}
@@ -126,6 +131,11 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
}
#endif // #if !SANITIZER_GO
+bool ErrorIsOOM(error_t err) {
+ // TODO: This should check which `err`s correspond to OOM.
+ return false;
+}
+
void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
if (rv == 0)
@@ -224,6 +234,17 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
return (void *)mapped_addr;
}
+// ZeroMmapFixedRegion zero's out a region of memory previously returned from a
+// call to one of the MmapFixed* helpers. On non-windows systems this would be
+// done with another mmap, but on windows remapping is not an option.
+// VirtualFree(DECOMMIT)+VirtualAlloc(RECOMMIT) would also be a way to zero the
+// memory, but we can't do this atomically, so instead we fall back to using
+// internal_memset.
+bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) {
+ internal_memset((void*) fixed_addr, 0, size);
+ return true;
+}
+
bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
// FIXME: is this really "NoReserve"? On Win32 this does not matter much,
// but on Win64 it does.
@@ -336,6 +357,16 @@ bool MprotectNoAccess(uptr addr, uptr size) {
return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection);
}
+bool MprotectReadOnly(uptr addr, uptr size) {
+ DWORD old_protection;
+ return VirtualProtect((LPVOID)addr, size, PAGE_READONLY, &old_protection);
+}
+
+bool MprotectReadWrite(uptr addr, uptr size) {
+ DWORD old_protection;
+ return VirtualProtect((LPVOID)addr, size, PAGE_READWRITE, &old_protection);
+}
+
void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
uptr beg_aligned = RoundDownTo(beg, GetPageSizeCached()),
end_aligned = RoundDownTo(end, GetPageSizeCached());
@@ -512,7 +543,7 @@ void ReExec() {
UNIMPLEMENTED();
}
-void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
+void PlatformPrepareForSandboxing(void *args) {}
bool StackSizeIsUnlimited() {
UNIMPLEMENTED();
@@ -565,6 +596,10 @@ void Abort() {
internal__exit(3);
}
+bool CreateDir(const char *pathname) {
+ return CreateDirectoryA(pathname, nullptr) != 0;
+}
+
#if !SANITIZER_GO
// Read the file to extract the ImageBase field from the PE header. If ASLR is
// disabled and this virtual address is available, the loader will typically
@@ -688,13 +723,24 @@ void ListOfModules::fallbackInit() { clear(); }
// atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers).
InternalMmapVectorNoCtor<void (*)(void)> atexit_functions;
-int Atexit(void (*function)(void)) {
+static int queueAtexit(void (*function)(void)) {
atexit_functions.push_back(function);
return 0;
}
+// If Atexit() is being called after RunAtexit() has already been run, it needs
+// to be able to call atexit() directly. Here we use a function ponter to
+// switch out its behaviour.
+// An example of where this is needed is the asan_dynamic runtime on MinGW-w64.
+// On this environment, __asan_init is called during global constructor phase,
+// way after calling the .CRT$XID initializer.
+static int (*volatile queueOrCallAtExit)(void (*)(void)) = &queueAtexit;
+
+int Atexit(void (*function)(void)) { return queueOrCallAtExit(function); }
+
static int RunAtexit() {
TraceLoggingUnregister(g_asan_provider);
+ queueOrCallAtExit = &atexit;
int ret = 0;
for (uptr i = 0; i < atexit_functions.size(); ++i) {
ret |= atexit(atexit_functions[i]);
@@ -827,27 +873,6 @@ void FutexWake(atomic_uint32_t *p, u32 count) {
WakeByAddressAll(p);
}
-// ---------------------- BlockingMutex ---------------- {{{1
-
-BlockingMutex::BlockingMutex() {
- CHECK(sizeof(SRWLOCK) <= sizeof(opaque_storage_));
- internal_memset(this, 0, sizeof(*this));
-}
-
-void BlockingMutex::Lock() {
- AcquireSRWLockExclusive((PSRWLOCK)opaque_storage_);
- CHECK_EQ(owner_, 0);
- owner_ = GetThreadSelf();
-}
-
-void BlockingMutex::Unlock() {
- CheckLocked();
- owner_ = 0;
- ReleaseSRWLockExclusive((PSRWLOCK)opaque_storage_);
-}
-
-void BlockingMutex::CheckLocked() const { CHECK_EQ(owner_, GetThreadSelf()); }
-
uptr GetTlsSize() {
return 0;
}
@@ -962,13 +987,18 @@ void SignalContext::InitPcSpBp() {
CONTEXT *context_record = (CONTEXT *)context;
pc = (uptr)exception_record->ExceptionAddress;
-#ifdef _WIN64
+# if SANITIZER_WINDOWS64
+# if SANITIZER_ARM64
+ bp = (uptr)context_record->Fp;
+ sp = (uptr)context_record->Sp;
+# else
bp = (uptr)context_record->Rbp;
sp = (uptr)context_record->Rsp;
-#else
+# endif
+# else
bp = (uptr)context_record->Ebp;
sp = (uptr)context_record->Esp;
-#endif
+# endif
}
uptr SignalContext::GetAddress() const {
@@ -990,7 +1020,7 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
// The write flag is only available for access violation exceptions.
if (exception_record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION)
- return SignalContext::UNKNOWN;
+ return SignalContext::Unknown;
// The contents of this array are documented at
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record
@@ -998,13 +1028,13 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
// second element is the faulting address.
switch (exception_record->ExceptionInformation[0]) {
case 0:
- return SignalContext::READ;
+ return SignalContext::Read;
case 1:
- return SignalContext::WRITE;
+ return SignalContext::Write;
case 8:
- return SignalContext::UNKNOWN;
+ return SignalContext::Unknown;
}
- return SignalContext::UNKNOWN;
+ return SignalContext::Unknown;
}
void SignalContext::DumpAllRegisters(void *context) {
@@ -1091,10 +1121,6 @@ void InitializePlatformEarly() {
// Do nothing.
}
-void MaybeReexec() {
- // No need to re-exec on Windows.
-}
-
void CheckASLR() {
// Do nothing
}
@@ -1131,7 +1157,7 @@ bool IsProcessRunning(pid_t pid) {
int WaitForProcess(pid_t pid) { return -1; }
// FIXME implement on this platform.
-void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
+void GetMemoryProfile(fill_profile_f cb, uptr *stats) {}
void CheckNoDeepBind(const char *filename, int flag) {
// Do nothing.
lib/tsan/sanitizer_common/sanitizer_win_dll_thunk.cpp
@@ -0,0 +1,101 @@
+//===-- sanitizer_win_dll_thunk.cpp ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This file defines a family of thunks that should be statically linked into
+// the DLLs that have instrumentation in order to delegate the calls to the
+// shared runtime that lives in the main binary.
+// See https://github.com/google/sanitizers/issues/209 for the details.
+//===----------------------------------------------------------------------===//
+
+#ifdef SANITIZER_DLL_THUNK
+#include "sanitizer_win_defs.h"
+#include "sanitizer_win_dll_thunk.h"
+#include "interception/interception.h"
+
+extern "C" {
+void *WINAPI GetModuleHandleA(const char *module_name);
+void abort();
+}
+
+namespace __sanitizer {
+uptr dllThunkGetRealAddrOrDie(const char *name) {
+ uptr ret =
+ __interception::InternalGetProcAddress((void *)GetModuleHandleA(0), name);
+ if (!ret)
+ abort();
+ return ret;
+}
+
+int dllThunkIntercept(const char* main_function, uptr dll_function) {
+ uptr wrapper = dllThunkGetRealAddrOrDie(main_function);
+ if (!__interception::OverrideFunction(dll_function, wrapper, 0))
+ abort();
+ return 0;
+}
+
+int dllThunkInterceptWhenPossible(const char* main_function,
+ const char* default_function, uptr dll_function) {
+ uptr wrapper = __interception::InternalGetProcAddress(
+ (void *)GetModuleHandleA(0), main_function);
+ if (!wrapper)
+ wrapper = dllThunkGetRealAddrOrDie(default_function);
+ if (!__interception::OverrideFunction(dll_function, wrapper, 0))
+ abort();
+ return 0;
+}
+} // namespace __sanitizer
+
+// Include Sanitizer Common interface.
+#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "sanitizer_common_interface.inc"
+
+#pragma section(".DLLTH$A", read)
+#pragma section(".DLLTH$Z", read)
+
+typedef void (*DllThunkCB)();
+extern "C" {
+__declspec(allocate(".DLLTH$A")) DllThunkCB __start_dll_thunk;
+__declspec(allocate(".DLLTH$Z")) DllThunkCB __stop_dll_thunk;
+}
+
+// Disable compiler warnings that show up if we declare our own version
+// of a compiler intrinsic (e.g. strlen).
+#pragma warning(disable: 4391)
+#pragma warning(disable: 4392)
+
+extern "C" int __dll_thunk_init() {
+ static bool flag = false;
+ // __dll_thunk_init is expected to be called by only one thread.
+ if (flag) return 0;
+ flag = true;
+
+ for (DllThunkCB *it = &__start_dll_thunk; it < &__stop_dll_thunk; ++it)
+ if (*it)
+ (*it)();
+
+ // In DLLs, the callbacks are expected to return 0,
+ // otherwise CRT initialization fails.
+ return 0;
+}
+
+// We want to call dll_thunk_init before C/C++ initializers / constructors are
+// executed, otherwise functions like memset might be invoked.
+#pragma section(".CRT$XIB", long, read)
+__declspec(allocate(".CRT$XIB")) int (*__dll_thunk_preinit)() =
+ __dll_thunk_init;
+
+static void WINAPI dll_thunk_thread_init(void *mod, unsigned long reason,
+ void *reserved) {
+ if (reason == /*DLL_PROCESS_ATTACH=*/1) __dll_thunk_init();
+}
+
+#pragma section(".CRT$XLAB", long, read)
+__declspec(allocate(".CRT$XLAB")) void (WINAPI *__dll_thunk_tls_init)(void *,
+ unsigned long, void *) = dll_thunk_thread_init;
+
+#endif // SANITIZER_DLL_THUNK
lib/tsan/sanitizer_common/sanitizer_win_dll_thunk.h
@@ -84,7 +84,7 @@ extern "C" int __dll_thunk_init();
// which isn't a big deal.
#define INTERCEPT_LIBRARY_FUNCTION(name) \
extern "C" void name(); \
- INTERCEPT_OR_DIE(WRAPPER_NAME(name), name)
+ INTERCEPT_OR_DIE(STRINGIFY(WRAP(name)), name)
// Use these macros for functions that could be called before __dll_thunk_init()
// is executed and don't lead to errors if defined (free, malloc, etc).
lib/tsan/sanitizer_common/sanitizer_win_dynamic_runtime_thunk.cpp
@@ -0,0 +1,26 @@
+//===-- santizer_win_dynamic_runtime_thunk.cpp ----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines things that need to be present in the application modules
+// to interact with Sanitizer Common, when it is included in a dll.
+//
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK
+#define SANITIZER_IMPORT_INTERFACE 1
+#include "sanitizer_win_defs.h"
+// Define weak alias for all weak functions imported from sanitizer common.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name)
+#include "sanitizer_common_interface.inc"
+#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK
+
+namespace __sanitizer {
+// Add one, otherwise unused, external symbol to this object file so that the
+// Visual C++ linker includes it and reads the .drective section.
+void ForceWholeArchiveIncludeForSanitizerCommon() {}
+}
lib/tsan/sanitizer_common/sanitizer_win_weak_interception.cpp
@@ -0,0 +1,94 @@
+//===-- sanitizer_win_weak_interception.cpp -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This module should be included in the sanitizer when it is implemented as a
+// shared library on Windows (dll), in order to delegate the calls of weak
+// functions to the implementation in the main executable when a strong
+// definition is provided.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_WINDOWS && SANITIZER_DYNAMIC
+#include "sanitizer_win_weak_interception.h"
+#include "sanitizer_allocator_interface.h"
+#include "sanitizer_interface_internal.h"
+#include "sanitizer_win_defs.h"
+#include "interception/interception.h"
+
+extern "C" {
+void *WINAPI GetModuleHandleA(const char *module_name);
+void abort();
+}
+
+namespace __sanitizer {
+// Try to get a pointer to real_function in the main module and override
+// dll_function with that pointer. If the function isn't found, nothing changes.
+int interceptWhenPossible(uptr dll_function, const char *real_function) {
+ uptr real = __interception::InternalGetProcAddress(
+ (void *)GetModuleHandleA(0), real_function);
+ if (real && !__interception::OverrideFunction((uptr)dll_function, real, 0))
+ abort();
+ return 0;
+}
+} // namespace __sanitizer
+
+// Declare weak hooks.
+extern "C" {
+void __sanitizer_on_print(const char *str);
+void __sanitizer_weak_hook_memcmp(uptr called_pc, const void *s1,
+ const void *s2, uptr n, int result);
+void __sanitizer_weak_hook_strcmp(uptr called_pc, const char *s1,
+ const char *s2, int result);
+void __sanitizer_weak_hook_strncmp(uptr called_pc, const char *s1,
+ const char *s2, uptr n, int result);
+void __sanitizer_weak_hook_strstr(uptr called_pc, const char *s1,
+ const char *s2, char *result);
+}
+
+// Include Sanitizer Common interface.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "sanitizer_common_interface.inc"
+
+#pragma section(".WEAK$A", read)
+#pragma section(".WEAK$Z", read)
+
+typedef void (*InterceptCB)();
+extern "C" {
+__declspec(allocate(".WEAK$A")) InterceptCB __start_weak_list;
+__declspec(allocate(".WEAK$Z")) InterceptCB __stop_weak_list;
+}
+
+static int weak_intercept_init() {
+ static bool flag = false;
+ // weak_interception_init is expected to be called by only one thread.
+ if (flag) return 0;
+ flag = true;
+
+ for (InterceptCB *it = &__start_weak_list; it < &__stop_weak_list; ++it)
+ if (*it)
+ (*it)();
+
+ // In DLLs, the callbacks are expected to return 0,
+ // otherwise CRT initialization fails.
+ return 0;
+}
+
+#pragma section(".CRT$XIB", long, read)
+__declspec(allocate(".CRT$XIB")) int (*__weak_intercept_preinit)() =
+ weak_intercept_init;
+
+static void WINAPI weak_intercept_thread_init(void *mod, unsigned long reason,
+ void *reserved) {
+ if (reason == /*DLL_PROCESS_ATTACH=*/1) weak_intercept_init();
+}
+
+#pragma section(".CRT$XLAB", long, read)
+__declspec(allocate(".CRT$XLAB")) void(WINAPI *__weak_intercept_tls_init)(
+ void *, unsigned long, void *) = weak_intercept_thread_init;
+
+#endif // SANITIZER_WINDOWS && SANITIZER_DYNAMIC
lib/tsan/tsan_clock.cpp
@@ -1,625 +0,0 @@
-//===-- tsan_clock.cpp ----------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-#include "tsan_clock.h"
-#include "tsan_rtl.h"
-#include "sanitizer_common/sanitizer_placement_new.h"
-
-// SyncClock and ThreadClock implement vector clocks for sync variables
-// (mutexes, atomic variables, file descriptors, etc) and threads, respectively.
-// ThreadClock contains fixed-size vector clock for maximum number of threads.
-// SyncClock contains growable vector clock for currently necessary number of
-// threads.
-// Together they implement very simple model of operations, namely:
-//
-// void ThreadClock::acquire(const SyncClock *src) {
-// for (int i = 0; i < kMaxThreads; i++)
-// clock[i] = max(clock[i], src->clock[i]);
-// }
-//
-// void ThreadClock::release(SyncClock *dst) const {
-// for (int i = 0; i < kMaxThreads; i++)
-// dst->clock[i] = max(dst->clock[i], clock[i]);
-// }
-//
-// void ThreadClock::releaseStoreAcquire(SyncClock *sc) const {
-// for (int i = 0; i < kMaxThreads; i++) {
-// tmp = clock[i];
-// clock[i] = max(clock[i], sc->clock[i]);
-// sc->clock[i] = tmp;
-// }
-// }
-//
-// void ThreadClock::ReleaseStore(SyncClock *dst) const {
-// for (int i = 0; i < kMaxThreads; i++)
-// dst->clock[i] = clock[i];
-// }
-//
-// void ThreadClock::acq_rel(SyncClock *dst) {
-// acquire(dst);
-// release(dst);
-// }
-//
-// Conformance to this model is extensively verified in tsan_clock_test.cpp.
-// However, the implementation is significantly more complex. The complexity
-// allows to implement important classes of use cases in O(1) instead of O(N).
-//
-// The use cases are:
-// 1. Singleton/once atomic that has a single release-store operation followed
-// by zillions of acquire-loads (the acquire-load is O(1)).
-// 2. Thread-local mutex (both lock and unlock can be O(1)).
-// 3. Leaf mutex (unlock is O(1)).
-// 4. A mutex shared by 2 threads (both lock and unlock can be O(1)).
-// 5. An atomic with a single writer (writes can be O(1)).
-// The implementation dynamically adopts to workload. So if an atomic is in
-// read-only phase, these reads will be O(1); if it later switches to read/write
-// phase, the implementation will correctly handle that by switching to O(N).
-//
-// Thread-safety note: all const operations on SyncClock's are conducted under
-// a shared lock; all non-const operations on SyncClock's are conducted under
-// an exclusive lock; ThreadClock's are private to respective threads and so
-// do not need any protection.
-//
-// Description of SyncClock state:
-// clk_ - variable size vector clock, low kClkBits hold timestamp,
-// the remaining bits hold "acquired" flag (the actual value is thread's
-// reused counter);
-// if acquried == thr->reused_, then the respective thread has already
-// acquired this clock (except possibly for dirty elements).
-// dirty_ - holds up to two indeces in the vector clock that other threads
-// need to acquire regardless of "acquired" flag value;
-// release_store_tid_ - denotes that the clock state is a result of
-// release-store operation by the thread with release_store_tid_ index.
-// release_store_reused_ - reuse count of release_store_tid_.
-
-namespace __tsan {
-
-static atomic_uint32_t *ref_ptr(ClockBlock *cb) {
- return reinterpret_cast<atomic_uint32_t *>(&cb->table[ClockBlock::kRefIdx]);
-}
-
-// Drop reference to the first level block idx.
-static void UnrefClockBlock(ClockCache *c, u32 idx, uptr blocks) {
- ClockBlock *cb = ctx->clock_alloc.Map(idx);
- atomic_uint32_t *ref = ref_ptr(cb);
- u32 v = atomic_load(ref, memory_order_acquire);
- for (;;) {
- CHECK_GT(v, 0);
- if (v == 1)
- break;
- if (atomic_compare_exchange_strong(ref, &v, v - 1, memory_order_acq_rel))
- return;
- }
- // First level block owns second level blocks, so them as well.
- for (uptr i = 0; i < blocks; i++)
- ctx->clock_alloc.Free(c, cb->table[ClockBlock::kBlockIdx - i]);
- ctx->clock_alloc.Free(c, idx);
-}
-
-ThreadClock::ThreadClock(unsigned tid, unsigned reused)
- : tid_(tid)
- , reused_(reused + 1) // 0 has special meaning
- , last_acquire_()
- , global_acquire_()
- , cached_idx_()
- , cached_size_()
- , cached_blocks_() {
- CHECK_LT(tid, kMaxTidInClock);
- CHECK_EQ(reused_, ((u64)reused_ << kClkBits) >> kClkBits);
- nclk_ = tid_ + 1;
- internal_memset(clk_, 0, sizeof(clk_));
-}
-
-void ThreadClock::ResetCached(ClockCache *c) {
- if (cached_idx_) {
- UnrefClockBlock(c, cached_idx_, cached_blocks_);
- cached_idx_ = 0;
- cached_size_ = 0;
- cached_blocks_ = 0;
- }
-}
-
-void ThreadClock::acquire(ClockCache *c, SyncClock *src) {
- DCHECK_LE(nclk_, kMaxTid);
- DCHECK_LE(src->size_, kMaxTid);
-
- // Check if it's empty -> no need to do anything.
- const uptr nclk = src->size_;
- if (nclk == 0)
- return;
-
- bool acquired = false;
- for (unsigned i = 0; i < kDirtyTids; i++) {
- SyncClock::Dirty dirty = src->dirty_[i];
- unsigned tid = dirty.tid();
- if (tid != kInvalidTid) {
- if (clk_[tid] < dirty.epoch) {
- clk_[tid] = dirty.epoch;
- acquired = true;
- }
- }
- }
-
- // Check if we've already acquired src after the last release operation on src
- if (tid_ >= nclk || src->elem(tid_).reused != reused_) {
- // O(N) acquire.
- nclk_ = max(nclk_, nclk);
- u64 *dst_pos = &clk_[0];
- for (ClockElem &src_elem : *src) {
- u64 epoch = src_elem.epoch;
- if (*dst_pos < epoch) {
- *dst_pos = epoch;
- acquired = true;
- }
- dst_pos++;
- }
-
- // Remember that this thread has acquired this clock.
- if (nclk > tid_)
- src->elem(tid_).reused = reused_;
- }
-
- if (acquired) {
- last_acquire_ = clk_[tid_];
- ResetCached(c);
- }
-}
-
-void ThreadClock::releaseStoreAcquire(ClockCache *c, SyncClock *sc) {
- DCHECK_LE(nclk_, kMaxTid);
- DCHECK_LE(sc->size_, kMaxTid);
-
- if (sc->size_ == 0) {
- // ReleaseStore will correctly set release_store_tid_,
- // which can be important for future operations.
- ReleaseStore(c, sc);
- return;
- }
-
- nclk_ = max(nclk_, (uptr) sc->size_);
-
- // Check if we need to resize sc.
- if (sc->size_ < nclk_)
- sc->Resize(c, nclk_);
-
- bool acquired = false;
-
- sc->Unshare(c);
- // Update sc->clk_.
- sc->FlushDirty();
- uptr i = 0;
- for (ClockElem &ce : *sc) {
- u64 tmp = clk_[i];
- if (clk_[i] < ce.epoch) {
- clk_[i] = ce.epoch;
- acquired = true;
- }
- ce.epoch = tmp;
- ce.reused = 0;
- i++;
- }
- sc->release_store_tid_ = kInvalidTid;
- sc->release_store_reused_ = 0;
-
- if (acquired) {
- last_acquire_ = clk_[tid_];
- ResetCached(c);
- }
-}
-
-void ThreadClock::release(ClockCache *c, SyncClock *dst) {
- DCHECK_LE(nclk_, kMaxTid);
- DCHECK_LE(dst->size_, kMaxTid);
-
- if (dst->size_ == 0) {
- // ReleaseStore will correctly set release_store_tid_,
- // which can be important for future operations.
- ReleaseStore(c, dst);
- return;
- }
-
- // Check if we need to resize dst.
- if (dst->size_ < nclk_)
- dst->Resize(c, nclk_);
-
- // Check if we had not acquired anything from other threads
- // since the last release on dst. If so, we need to update
- // only dst->elem(tid_).
- if (!HasAcquiredAfterRelease(dst)) {
- UpdateCurrentThread(c, dst);
- if (dst->release_store_tid_ != tid_ ||
- dst->release_store_reused_ != reused_)
- dst->release_store_tid_ = kInvalidTid;
- return;
- }
-
- // O(N) release.
- dst->Unshare(c);
- // First, remember whether we've acquired dst.
- bool acquired = IsAlreadyAcquired(dst);
- // Update dst->clk_.
- dst->FlushDirty();
- uptr i = 0;
- for (ClockElem &ce : *dst) {
- ce.epoch = max(ce.epoch, clk_[i]);
- ce.reused = 0;
- i++;
- }
- // Clear 'acquired' flag in the remaining elements.
- dst->release_store_tid_ = kInvalidTid;
- dst->release_store_reused_ = 0;
- // If we've acquired dst, remember this fact,
- // so that we don't need to acquire it on next acquire.
- if (acquired)
- dst->elem(tid_).reused = reused_;
-}
-
-void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) {
- DCHECK_LE(nclk_, kMaxTid);
- DCHECK_LE(dst->size_, kMaxTid);
-
- if (dst->size_ == 0 && cached_idx_ != 0) {
- // Reuse the cached clock.
- // Note: we could reuse/cache the cached clock in more cases:
- // we could update the existing clock and cache it, or replace it with the
- // currently cached clock and release the old one. And for a shared
- // existing clock, we could replace it with the currently cached;
- // or unshare, update and cache. But, for simplicity, we currnetly reuse
- // cached clock only when the target clock is empty.
- dst->tab_ = ctx->clock_alloc.Map(cached_idx_);
- dst->tab_idx_ = cached_idx_;
- dst->size_ = cached_size_;
- dst->blocks_ = cached_blocks_;
- CHECK_EQ(dst->dirty_[0].tid(), kInvalidTid);
- // The cached clock is shared (immutable),
- // so this is where we store the current clock.
- dst->dirty_[0].set_tid(tid_);
- dst->dirty_[0].epoch = clk_[tid_];
- dst->release_store_tid_ = tid_;
- dst->release_store_reused_ = reused_;
- // Rememeber that we don't need to acquire it in future.
- dst->elem(tid_).reused = reused_;
- // Grab a reference.
- atomic_fetch_add(ref_ptr(dst->tab_), 1, memory_order_relaxed);
- return;
- }
-
- // Check if we need to resize dst.
- if (dst->size_ < nclk_)
- dst->Resize(c, nclk_);
-
- if (dst->release_store_tid_ == tid_ &&
- dst->release_store_reused_ == reused_ &&
- !HasAcquiredAfterRelease(dst)) {
- UpdateCurrentThread(c, dst);
- return;
- }
-
- // O(N) release-store.
- dst->Unshare(c);
- // Note: dst can be larger than this ThreadClock.
- // This is fine since clk_ beyond size is all zeros.
- uptr i = 0;
- for (ClockElem &ce : *dst) {
- ce.epoch = clk_[i];
- ce.reused = 0;
- i++;
- }
- for (uptr i = 0; i < kDirtyTids; i++) dst->dirty_[i].set_tid(kInvalidTid);
- dst->release_store_tid_ = tid_;
- dst->release_store_reused_ = reused_;
- // Rememeber that we don't need to acquire it in future.
- dst->elem(tid_).reused = reused_;
-
- // If the resulting clock is cachable, cache it for future release operations.
- // The clock is always cachable if we released to an empty sync object.
- if (cached_idx_ == 0 && dst->Cachable()) {
- // Grab a reference to the ClockBlock.
- atomic_uint32_t *ref = ref_ptr(dst->tab_);
- if (atomic_load(ref, memory_order_acquire) == 1)
- atomic_store_relaxed(ref, 2);
- else
- atomic_fetch_add(ref_ptr(dst->tab_), 1, memory_order_relaxed);
- cached_idx_ = dst->tab_idx_;
- cached_size_ = dst->size_;
- cached_blocks_ = dst->blocks_;
- }
-}
-
-void ThreadClock::acq_rel(ClockCache *c, SyncClock *dst) {
- acquire(c, dst);
- ReleaseStore(c, dst);
-}
-
-// Updates only single element related to the current thread in dst->clk_.
-void ThreadClock::UpdateCurrentThread(ClockCache *c, SyncClock *dst) const {
- // Update the threads time, but preserve 'acquired' flag.
- for (unsigned i = 0; i < kDirtyTids; i++) {
- SyncClock::Dirty *dirty = &dst->dirty_[i];
- const unsigned tid = dirty->tid();
- if (tid == tid_ || tid == kInvalidTid) {
- dirty->set_tid(tid_);
- dirty->epoch = clk_[tid_];
- return;
- }
- }
- // Reset all 'acquired' flags, O(N).
- // We are going to touch dst elements, so we need to unshare it.
- dst->Unshare(c);
- dst->elem(tid_).epoch = clk_[tid_];
- for (uptr i = 0; i < dst->size_; i++)
- dst->elem(i).reused = 0;
- dst->FlushDirty();
-}
-
-// Checks whether the current thread has already acquired src.
-bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const {
- if (src->elem(tid_).reused != reused_)
- return false;
- for (unsigned i = 0; i < kDirtyTids; i++) {
- SyncClock::Dirty dirty = src->dirty_[i];
- if (dirty.tid() != kInvalidTid) {
- if (clk_[dirty.tid()] < dirty.epoch)
- return false;
- }
- }
- return true;
-}
-
-// Checks whether the current thread has acquired anything
-// from other clocks after releasing to dst (directly or indirectly).
-bool ThreadClock::HasAcquiredAfterRelease(const SyncClock *dst) const {
- const u64 my_epoch = dst->elem(tid_).epoch;
- return my_epoch <= last_acquire_ ||
- my_epoch <= atomic_load_relaxed(&global_acquire_);
-}
-
-// Sets a single element in the vector clock.
-// This function is called only from weird places like AcquireGlobal.
-void ThreadClock::set(ClockCache *c, unsigned tid, u64 v) {
- DCHECK_LT(tid, kMaxTid);
- DCHECK_GE(v, clk_[tid]);
- clk_[tid] = v;
- if (nclk_ <= tid)
- nclk_ = tid + 1;
- last_acquire_ = clk_[tid_];
- ResetCached(c);
-}
-
-void ThreadClock::DebugDump(int(*printf)(const char *s, ...)) {
- printf("clock=[");
- for (uptr i = 0; i < nclk_; i++)
- printf("%s%llu", i == 0 ? "" : ",", clk_[i]);
- printf("] tid=%u/%u last_acq=%llu", tid_, reused_, last_acquire_);
-}
-
-SyncClock::SyncClock() {
- ResetImpl();
-}
-
-SyncClock::~SyncClock() {
- // Reset must be called before dtor.
- CHECK_EQ(size_, 0);
- CHECK_EQ(blocks_, 0);
- CHECK_EQ(tab_, 0);
- CHECK_EQ(tab_idx_, 0);
-}
-
-void SyncClock::Reset(ClockCache *c) {
- if (size_)
- UnrefClockBlock(c, tab_idx_, blocks_);
- ResetImpl();
-}
-
-void SyncClock::ResetImpl() {
- tab_ = 0;
- tab_idx_ = 0;
- size_ = 0;
- blocks_ = 0;
- release_store_tid_ = kInvalidTid;
- release_store_reused_ = 0;
- for (uptr i = 0; i < kDirtyTids; i++) dirty_[i].set_tid(kInvalidTid);
-}
-
-void SyncClock::Resize(ClockCache *c, uptr nclk) {
- Unshare(c);
- if (nclk <= capacity()) {
- // Memory is already allocated, just increase the size.
- size_ = nclk;
- return;
- }
- if (size_ == 0) {
- // Grow from 0 to one-level table.
- CHECK_EQ(size_, 0);
- CHECK_EQ(blocks_, 0);
- CHECK_EQ(tab_, 0);
- CHECK_EQ(tab_idx_, 0);
- tab_idx_ = ctx->clock_alloc.Alloc(c);
- tab_ = ctx->clock_alloc.Map(tab_idx_);
- internal_memset(tab_, 0, sizeof(*tab_));
- atomic_store_relaxed(ref_ptr(tab_), 1);
- size_ = 1;
- } else if (size_ > blocks_ * ClockBlock::kClockCount) {
- u32 idx = ctx->clock_alloc.Alloc(c);
- ClockBlock *new_cb = ctx->clock_alloc.Map(idx);
- uptr top = size_ - blocks_ * ClockBlock::kClockCount;
- CHECK_LT(top, ClockBlock::kClockCount);
- const uptr move = top * sizeof(tab_->clock[0]);
- internal_memcpy(&new_cb->clock[0], tab_->clock, move);
- internal_memset(&new_cb->clock[top], 0, sizeof(*new_cb) - move);
- internal_memset(tab_->clock, 0, move);
- append_block(idx);
- }
- // At this point we have first level table allocated and all clock elements
- // are evacuated from it to a second level block.
- // Add second level tables as necessary.
- while (nclk > capacity()) {
- u32 idx = ctx->clock_alloc.Alloc(c);
- ClockBlock *cb = ctx->clock_alloc.Map(idx);
- internal_memset(cb, 0, sizeof(*cb));
- append_block(idx);
- }
- size_ = nclk;
-}
-
-// Flushes all dirty elements into the main clock array.
-void SyncClock::FlushDirty() {
- for (unsigned i = 0; i < kDirtyTids; i++) {
- Dirty *dirty = &dirty_[i];
- if (dirty->tid() != kInvalidTid) {
- CHECK_LT(dirty->tid(), size_);
- elem(dirty->tid()).epoch = dirty->epoch;
- dirty->set_tid(kInvalidTid);
- }
- }
-}
-
-bool SyncClock::IsShared() const {
- if (size_ == 0)
- return false;
- atomic_uint32_t *ref = ref_ptr(tab_);
- u32 v = atomic_load(ref, memory_order_acquire);
- CHECK_GT(v, 0);
- return v > 1;
-}
-
-// Unshares the current clock if it's shared.
-// Shared clocks are immutable, so they need to be unshared before any updates.
-// Note: this does not apply to dirty entries as they are not shared.
-void SyncClock::Unshare(ClockCache *c) {
- if (!IsShared())
- return;
- // First, copy current state into old.
- SyncClock old;
- old.tab_ = tab_;
- old.tab_idx_ = tab_idx_;
- old.size_ = size_;
- old.blocks_ = blocks_;
- old.release_store_tid_ = release_store_tid_;
- old.release_store_reused_ = release_store_reused_;
- for (unsigned i = 0; i < kDirtyTids; i++)
- old.dirty_[i] = dirty_[i];
- // Then, clear current object.
- ResetImpl();
- // Allocate brand new clock in the current object.
- Resize(c, old.size_);
- // Now copy state back into this object.
- Iter old_iter(&old);
- for (ClockElem &ce : *this) {
- ce = *old_iter;
- ++old_iter;
- }
- release_store_tid_ = old.release_store_tid_;
- release_store_reused_ = old.release_store_reused_;
- for (unsigned i = 0; i < kDirtyTids; i++)
- dirty_[i] = old.dirty_[i];
- // Drop reference to old and delete if necessary.
- old.Reset(c);
-}
-
-// Can we cache this clock for future release operations?
-ALWAYS_INLINE bool SyncClock::Cachable() const {
- if (size_ == 0)
- return false;
- for (unsigned i = 0; i < kDirtyTids; i++) {
- if (dirty_[i].tid() != kInvalidTid)
- return false;
- }
- return atomic_load_relaxed(ref_ptr(tab_)) == 1;
-}
-
-// elem linearizes the two-level structure into linear array.
-// Note: this is used only for one time accesses, vector operations use
-// the iterator as it is much faster.
-ALWAYS_INLINE ClockElem &SyncClock::elem(unsigned tid) const {
- DCHECK_LT(tid, size_);
- const uptr block = tid / ClockBlock::kClockCount;
- DCHECK_LE(block, blocks_);
- tid %= ClockBlock::kClockCount;
- if (block == blocks_)
- return tab_->clock[tid];
- u32 idx = get_block(block);
- ClockBlock *cb = ctx->clock_alloc.Map(idx);
- return cb->clock[tid];
-}
-
-ALWAYS_INLINE uptr SyncClock::capacity() const {
- if (size_ == 0)
- return 0;
- uptr ratio = sizeof(ClockBlock::clock[0]) / sizeof(ClockBlock::table[0]);
- // How many clock elements we can fit into the first level block.
- // +1 for ref counter.
- uptr top = ClockBlock::kClockCount - RoundUpTo(blocks_ + 1, ratio) / ratio;
- return blocks_ * ClockBlock::kClockCount + top;
-}
-
-ALWAYS_INLINE u32 SyncClock::get_block(uptr bi) const {
- DCHECK(size_);
- DCHECK_LT(bi, blocks_);
- return tab_->table[ClockBlock::kBlockIdx - bi];
-}
-
-ALWAYS_INLINE void SyncClock::append_block(u32 idx) {
- uptr bi = blocks_++;
- CHECK_EQ(get_block(bi), 0);
- tab_->table[ClockBlock::kBlockIdx - bi] = idx;
-}
-
-// Used only by tests.
-u64 SyncClock::get(unsigned tid) const {
- for (unsigned i = 0; i < kDirtyTids; i++) {
- Dirty dirty = dirty_[i];
- if (dirty.tid() == tid)
- return dirty.epoch;
- }
- return elem(tid).epoch;
-}
-
-// Used only by Iter test.
-u64 SyncClock::get_clean(unsigned tid) const {
- return elem(tid).epoch;
-}
-
-void SyncClock::DebugDump(int(*printf)(const char *s, ...)) {
- printf("clock=[");
- for (uptr i = 0; i < size_; i++)
- printf("%s%llu", i == 0 ? "" : ",", elem(i).epoch);
- printf("] reused=[");
- for (uptr i = 0; i < size_; i++)
- printf("%s%llu", i == 0 ? "" : ",", elem(i).reused);
- printf("] release_store_tid=%d/%d dirty_tids=%d[%llu]/%d[%llu]",
- release_store_tid_, release_store_reused_, dirty_[0].tid(),
- dirty_[0].epoch, dirty_[1].tid(), dirty_[1].epoch);
-}
-
-void SyncClock::Iter::Next() {
- // Finished with the current block, move on to the next one.
- block_++;
- if (block_ < parent_->blocks_) {
- // Iterate over the next second level block.
- u32 idx = parent_->get_block(block_);
- ClockBlock *cb = ctx->clock_alloc.Map(idx);
- pos_ = &cb->clock[0];
- end_ = pos_ + min(parent_->size_ - block_ * ClockBlock::kClockCount,
- ClockBlock::kClockCount);
- return;
- }
- if (block_ == parent_->blocks_ &&
- parent_->size_ > parent_->blocks_ * ClockBlock::kClockCount) {
- // Iterate over elements in the first level block.
- pos_ = &parent_->tab_->clock[0];
- end_ = pos_ + min(parent_->size_ - block_ * ClockBlock::kClockCount,
- ClockBlock::kClockCount);
- return;
- }
- parent_ = nullptr; // denotes end
-}
-} // namespace __tsan
lib/tsan/tsan_clock.h
@@ -1,293 +0,0 @@
-//===-- tsan_clock.h --------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-#ifndef TSAN_CLOCK_H
-#define TSAN_CLOCK_H
-
-#include "tsan_defs.h"
-#include "tsan_dense_alloc.h"
-
-namespace __tsan {
-
-typedef DenseSlabAlloc<ClockBlock, 1 << 22, 1 << 10> ClockAlloc;
-typedef DenseSlabAllocCache ClockCache;
-
-// The clock that lives in sync variables (mutexes, atomics, etc).
-class SyncClock {
- public:
- SyncClock();
- ~SyncClock();
-
- uptr size() const;
-
- // These are used only in tests.
- u64 get(unsigned tid) const;
- u64 get_clean(unsigned tid) const;
-
- void Resize(ClockCache *c, uptr nclk);
- void Reset(ClockCache *c);
-
- void DebugDump(int(*printf)(const char *s, ...));
-
- // Clock element iterator.
- // Note: it iterates only over the table without regard to dirty entries.
- class Iter {
- public:
- explicit Iter(SyncClock* parent);
- Iter& operator++();
- bool operator!=(const Iter& other);
- ClockElem &operator*();
-
- private:
- SyncClock *parent_;
- // [pos_, end_) is the current continuous range of clock elements.
- ClockElem *pos_;
- ClockElem *end_;
- int block_; // Current number of second level block.
-
- NOINLINE void Next();
- };
-
- Iter begin();
- Iter end();
-
- private:
- friend class ThreadClock;
- friend class Iter;
- static const uptr kDirtyTids = 2;
-
- struct Dirty {
- u32 tid() const { return tid_ == kShortInvalidTid ? kInvalidTid : tid_; }
- void set_tid(u32 tid) {
- tid_ = tid == kInvalidTid ? kShortInvalidTid : tid;
- }
- u64 epoch : kClkBits;
-
- private:
- // Full kInvalidTid won't fit into Dirty::tid.
- static const u64 kShortInvalidTid = (1ull << (64 - kClkBits)) - 1;
- u64 tid_ : 64 - kClkBits; // kInvalidId if not active
- };
-
- static_assert(sizeof(Dirty) == 8, "Dirty is not 64bit");
-
- unsigned release_store_tid_;
- unsigned release_store_reused_;
- Dirty dirty_[kDirtyTids];
- // If size_ is 0, tab_ is nullptr.
- // If size <= 64 (kClockCount), tab_ contains pointer to an array with
- // 64 ClockElem's (ClockBlock::clock).
- // Otherwise, tab_ points to an array with up to 127 u32 elements,
- // each pointing to the second-level 512b block with 64 ClockElem's.
- // Unused space in the first level ClockBlock is used to store additional
- // clock elements.
- // The last u32 element in the first level ClockBlock is always used as
- // reference counter.
- //
- // See the following scheme for details.
- // All memory blocks are 512 bytes (allocated from ClockAlloc).
- // Clock (clk) elements are 64 bits.
- // Idx and ref are 32 bits.
- //
- // tab_
- // |
- // \/
- // +----------------------------------------------------+
- // | clk128 | clk129 | ...unused... | idx1 | idx0 | ref |
- // +----------------------------------------------------+
- // | |
- // | \/
- // | +----------------+
- // | | clk0 ... clk63 |
- // | +----------------+
- // \/
- // +------------------+
- // | clk64 ... clk127 |
- // +------------------+
- //
- // Note: dirty entries, if active, always override what's stored in the clock.
- ClockBlock *tab_;
- u32 tab_idx_;
- u16 size_;
- u16 blocks_; // Number of second level blocks.
-
- void Unshare(ClockCache *c);
- bool IsShared() const;
- bool Cachable() const;
- void ResetImpl();
- void FlushDirty();
- uptr capacity() const;
- u32 get_block(uptr bi) const;
- void append_block(u32 idx);
- ClockElem &elem(unsigned tid) const;
-};
-
-// The clock that lives in threads.
-class ThreadClock {
- public:
- typedef DenseSlabAllocCache Cache;
-
- explicit ThreadClock(unsigned tid, unsigned reused = 0);
-
- u64 get(unsigned tid) const;
- void set(ClockCache *c, unsigned tid, u64 v);
- void set(u64 v);
- void tick();
- uptr size() const;
-
- void acquire(ClockCache *c, SyncClock *src);
- void releaseStoreAcquire(ClockCache *c, SyncClock *src);
- void release(ClockCache *c, SyncClock *dst);
- void acq_rel(ClockCache *c, SyncClock *dst);
- void ReleaseStore(ClockCache *c, SyncClock *dst);
- void ResetCached(ClockCache *c);
- void NoteGlobalAcquire(u64 v);
-
- void DebugReset();
- void DebugDump(int(*printf)(const char *s, ...));
-
- private:
- static const uptr kDirtyTids = SyncClock::kDirtyTids;
- // Index of the thread associated with he clock ("current thread").
- const unsigned tid_;
- const unsigned reused_; // tid_ reuse count.
- // Current thread time when it acquired something from other threads.
- u64 last_acquire_;
-
- // Last time another thread has done a global acquire of this thread's clock.
- // It helps to avoid problem described in:
- // https://github.com/golang/go/issues/39186
- // See test/tsan/java_finalizer2.cpp for a regression test.
- // Note the failuire is _extremely_ hard to hit, so if you are trying
- // to reproduce it, you may want to run something like:
- // $ go get golang.org/x/tools/cmd/stress
- // $ stress -p=64 ./a.out
- //
- // The crux of the problem is roughly as follows.
- // A number of O(1) optimizations in the clocks algorithm assume proper
- // transitive cumulative propagation of clock values. The AcquireGlobal
- // operation may produce an inconsistent non-linearazable view of
- // thread clocks. Namely, it may acquire a later value from a thread
- // with a higher ID, but fail to acquire an earlier value from a thread
- // with a lower ID. If a thread that executed AcquireGlobal then releases
- // to a sync clock, it will spoil the sync clock with the inconsistent
- // values. If another thread later releases to the sync clock, the optimized
- // algorithm may break.
- //
- // The exact sequence of events that leads to the failure.
- // - thread 1 executes AcquireGlobal
- // - thread 1 acquires value 1 for thread 2
- // - thread 2 increments clock to 2
- // - thread 2 releases to sync object 1
- // - thread 3 at time 1
- // - thread 3 acquires from sync object 1
- // - thread 3 increments clock to 2
- // - thread 1 acquires value 2 for thread 3
- // - thread 1 releases to sync object 2
- // - sync object 2 clock has 1 for thread 2 and 2 for thread 3
- // - thread 3 releases to sync object 2
- // - thread 3 sees value 2 in the clock for itself
- // and decides that it has already released to the clock
- // and did not acquire anything from other threads after that
- // (the last_acquire_ check in release operation)
- // - thread 3 does not update the value for thread 2 in the clock from 1 to 2
- // - thread 4 acquires from sync object 2
- // - thread 4 detects a false race with thread 2
- // as it should have been synchronized with thread 2 up to time 2,
- // but because of the broken clock it is now synchronized only up to time 1
- //
- // The global_acquire_ value helps to prevent this scenario.
- // Namely, thread 3 will not trust any own clock values up to global_acquire_
- // for the purposes of the last_acquire_ optimization.
- atomic_uint64_t global_acquire_;
-
- // Cached SyncClock (without dirty entries and release_store_tid_).
- // We reuse it for subsequent store-release operations without intervening
- // acquire operations. Since it is shared (and thus constant), clock value
- // for the current thread is then stored in dirty entries in the SyncClock.
- // We host a refernece to the table while it is cached here.
- u32 cached_idx_;
- u16 cached_size_;
- u16 cached_blocks_;
-
- // Number of active elements in the clk_ table (the rest is zeros).
- uptr nclk_;
- u64 clk_[kMaxTidInClock]; // Fixed size vector clock.
-
- bool IsAlreadyAcquired(const SyncClock *src) const;
- bool HasAcquiredAfterRelease(const SyncClock *dst) const;
- void UpdateCurrentThread(ClockCache *c, SyncClock *dst) const;
-};
-
-ALWAYS_INLINE u64 ThreadClock::get(unsigned tid) const {
- DCHECK_LT(tid, kMaxTidInClock);
- return clk_[tid];
-}
-
-ALWAYS_INLINE void ThreadClock::set(u64 v) {
- DCHECK_GE(v, clk_[tid_]);
- clk_[tid_] = v;
-}
-
-ALWAYS_INLINE void ThreadClock::tick() {
- clk_[tid_]++;
-}
-
-ALWAYS_INLINE uptr ThreadClock::size() const {
- return nclk_;
-}
-
-ALWAYS_INLINE void ThreadClock::NoteGlobalAcquire(u64 v) {
- // Here we rely on the fact that AcquireGlobal is protected by
- // ThreadRegistryLock, thus only one thread at a time executes it
- // and values passed to this function should not go backwards.
- CHECK_LE(atomic_load_relaxed(&global_acquire_), v);
- atomic_store_relaxed(&global_acquire_, v);
-}
-
-ALWAYS_INLINE SyncClock::Iter SyncClock::begin() {
- return Iter(this);
-}
-
-ALWAYS_INLINE SyncClock::Iter SyncClock::end() {
- return Iter(nullptr);
-}
-
-ALWAYS_INLINE uptr SyncClock::size() const {
- return size_;
-}
-
-ALWAYS_INLINE SyncClock::Iter::Iter(SyncClock* parent)
- : parent_(parent)
- , pos_(nullptr)
- , end_(nullptr)
- , block_(-1) {
- if (parent)
- Next();
-}
-
-ALWAYS_INLINE SyncClock::Iter& SyncClock::Iter::operator++() {
- pos_++;
- if (UNLIKELY(pos_ >= end_))
- Next();
- return *this;
-}
-
-ALWAYS_INLINE bool SyncClock::Iter::operator!=(const SyncClock::Iter& other) {
- return parent_ != other.parent_;
-}
-
-ALWAYS_INLINE ClockElem &SyncClock::Iter::operator*() {
- return *pos_;
-}
-} // namespace __tsan
-
-#endif // TSAN_CLOCK_H
lib/tsan/tsan_debugging.cpp
@@ -157,7 +157,7 @@ int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr,
ReportMutex *mutex = rep->mutexes[idx];
*mutex_id = mutex->id;
*addr = (void *)mutex->addr;
- *destroyed = mutex->destroyed;
+ *destroyed = false;
if (mutex->stack) CopyTrace(mutex->stack->frames, trace, trace_size);
return 1;
}
@@ -195,9 +195,9 @@ const char *__tsan_locate_address(uptr addr, char *name, uptr name_size,
const char *region_kind = nullptr;
if (name && name_size > 0) name[0] = 0;
- if (IsMetaMem(addr)) {
+ if (IsMetaMem(reinterpret_cast<u32 *>(addr))) {
region_kind = "meta shadow";
- } else if (IsShadowMem(addr)) {
+ } else if (IsShadowMem(reinterpret_cast<RawShadow *>(addr))) {
region_kind = "shadow";
} else {
bool is_stack = false;
@@ -215,9 +215,9 @@ const char *__tsan_locate_address(uptr addr, char *name, uptr name_size,
} else {
// TODO(kuba.brecka): We should not lock. This is supposed to be called
// from within the debugger when other threads are stopped.
- ctx->thread_registry->Lock();
+ ctx->thread_registry.Lock();
ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack);
- ctx->thread_registry->Unlock();
+ ctx->thread_registry.Unlock();
if (tctx) {
region_kind = is_stack ? "stack" : "tls";
} else {
@@ -252,7 +252,7 @@ int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id,
*thread_id = b->tid;
// No locking. This is supposed to be called from within the debugger when
// other threads are stopped.
- ThreadContextBase *tctx = ctx->thread_registry->GetThreadLocked(b->tid);
+ ThreadContextBase *tctx = ctx->thread_registry.GetThreadLocked(b->tid);
*os_id = tctx->os_id;
StackTrace stack = StackDepotGet(b->stk);
lib/tsan/tsan_defs.h
@@ -18,6 +18,24 @@
#include "sanitizer_common/sanitizer_mutex.h"
#include "ubsan/ubsan_platform.h"
+#ifndef TSAN_VECTORIZE
+# define TSAN_VECTORIZE __SSE4_2__
+#endif
+
+#if TSAN_VECTORIZE
+// <emmintrin.h> transitively includes <stdlib.h>,
+// and it's prohibited to include std headers into tsan runtime.
+// So we do this dirty trick.
+# define _MM_MALLOC_H_INCLUDED
+# define __MM_MALLOC_H
+# include <emmintrin.h>
+# include <smmintrin.h>
+# define VECTOR_ALIGNED ALIGNED(16)
+typedef __m128i m128;
+#else
+# define VECTOR_ALIGNED
+#endif
+
// Setup defaults for compile definitions.
#ifndef TSAN_NO_HISTORY
# define TSAN_NO_HISTORY 0
@@ -33,40 +51,26 @@
namespace __tsan {
-const int kClkBits = 42;
-const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;
+constexpr uptr kByteBits = 8;
-struct ClockElem {
- u64 epoch : kClkBits;
- u64 reused : 64 - kClkBits; // tid reuse count
-};
+// Thread slot ID.
+enum class Sid : u8 {};
+constexpr uptr kThreadSlotCount = 256;
+constexpr Sid kFreeSid = static_cast<Sid>(255);
-struct ClockBlock {
- static const uptr kSize = 512;
- static const uptr kTableSize = kSize / sizeof(u32);
- static const uptr kClockCount = kSize / sizeof(ClockElem);
- static const uptr kRefIdx = kTableSize - 1;
- static const uptr kBlockIdx = kTableSize - 2;
+// Abstract time unit, vector clock element.
+enum class Epoch : u16 {};
+constexpr uptr kEpochBits = 14;
+constexpr Epoch kEpochZero = static_cast<Epoch>(0);
+constexpr Epoch kEpochOver = static_cast<Epoch>(1 << kEpochBits);
+constexpr Epoch kEpochLast = static_cast<Epoch>((1 << kEpochBits) - 1);
- union {
- u32 table[kTableSize];
- ClockElem clock[kClockCount];
- };
+inline Epoch EpochInc(Epoch epoch) {
+ return static_cast<Epoch>(static_cast<u16>(epoch) + 1);
+}
- ClockBlock() {
- }
-};
+inline bool EpochOverflow(Epoch epoch) { return epoch == kEpochOver; }
-const int kTidBits = 13;
-// Reduce kMaxTid by kClockCount because one slot in ClockBlock table is
-// occupied by reference counter, so total number of elements we can store
-// in SyncClock is kClockCount * (kTableSize - 1).
-const unsigned kMaxTid = (1 << kTidBits) - ClockBlock::kClockCount;
-#if !SANITIZER_GO
-const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
-#else
-const unsigned kMaxTidInClock = kMaxTid; // Go does not track freed memory.
-#endif
const uptr kShadowStackSize = 64 * 1024;
// Count of shadow values in a shadow cell.
@@ -75,8 +79,9 @@ const uptr kShadowCnt = 4;
// That many user bytes are mapped onto a single shadow cell.
const uptr kShadowCell = 8;
-// Size of a single shadow value (u64).
-const uptr kShadowSize = 8;
+// Single shadow value.
+enum class RawShadow : u32 {};
+const uptr kShadowSize = sizeof(RawShadow);
// Shadow memory is kShadowMultiplier times larger than user memory.
const uptr kShadowMultiplier = kShadowSize * kShadowCnt / kShadowCell;
@@ -88,6 +93,9 @@ const uptr kMetaShadowCell = 8;
// Size of a single meta shadow value (u32).
const uptr kMetaShadowSize = 4;
+// All addresses and PCs are assumed to be compressable to that many bits.
+const uptr kCompressedAddrBits = 44;
+
#if TSAN_NO_HISTORY
const bool kCollectHistory = false;
#else
@@ -149,17 +157,34 @@ MD5Hash md5_hash(const void *data, uptr size);
struct Processor;
struct ThreadState;
class ThreadContext;
+struct TidSlot;
struct Context;
struct ReportStack;
class ReportDesc;
class RegionAlloc;
+struct Trace;
+struct TracePart;
+
+typedef uptr AccessType;
+
+enum : AccessType {
+ kAccessWrite = 0,
+ kAccessRead = 1 << 0,
+ kAccessAtomic = 1 << 1,
+ kAccessVptr = 1 << 2, // read or write of an object virtual table pointer
+ kAccessFree = 1 << 3, // synthetic memory access during memory freeing
+ kAccessExternalPC = 1 << 4, // access PC can have kExternalPCBit set
+ kAccessCheckOnly = 1 << 5, // check for races, but don't store
+ kAccessNoRodata = 1 << 6, // don't check for .rodata marker
+ kAccessSlotLocked = 1 << 7, // memory access with TidSlot locked
+};
// Descriptor of user's memory block.
struct MBlock {
u64 siz : 48;
u64 tag : 16;
- u32 stk;
- u16 tid;
+ StackID stk;
+ Tid tid;
};
COMPILER_CHECK(sizeof(MBlock) == 16);
@@ -173,15 +198,18 @@ enum ExternalTag : uptr {
// as 16-bit values, see tsan_defs.h.
};
-enum MutexType {
- MutexTypeTrace = MutexLastCommon,
- MutexTypeReport,
+enum {
+ MutexTypeReport = MutexLastCommon,
MutexTypeSyncVar,
MutexTypeAnnotations,
MutexTypeAtExit,
MutexTypeFired,
MutexTypeRacy,
MutexTypeGlobalProc,
+ MutexTypeInternalAlloc,
+ MutexTypeTrace,
+ MutexTypeSlot,
+ MutexTypeSlots,
};
} // namespace __tsan
lib/tsan/tsan_dense_alloc.h
@@ -49,11 +49,7 @@ class DenseSlabAlloc {
static_assert(sizeof(T) > sizeof(IndexT),
"it doesn't make sense to use dense alloc");
- explicit DenseSlabAlloc(LinkerInitialized, const char *name) {
- freelist_ = 0;
- fillpos_ = 0;
- name_ = name;
- }
+ DenseSlabAlloc(LinkerInitialized, const char *name) : name_(name) {}
explicit DenseSlabAlloc(const char *name)
: DenseSlabAlloc(LINKER_INITIALIZED, name) {
@@ -89,12 +85,7 @@ class DenseSlabAlloc {
}
void FlushCache(Cache *c) {
- SpinMutexLock lock(&mtx_);
- while (c->pos) {
- IndexT idx = c->cache[--c->pos];
- *(IndexT*)Map(idx) = freelist_;
- freelist_ = idx;
- }
+ while (c->pos) Drain(c);
}
void InitCache(Cache *c) {
@@ -102,48 +93,101 @@ class DenseSlabAlloc {
internal_memset(c->cache, 0, sizeof(c->cache));
}
+ uptr AllocatedMemory() const {
+ return atomic_load_relaxed(&fillpos_) * kL2Size * sizeof(T);
+ }
+
+ template <typename Func>
+ void ForEach(Func func) {
+ Lock lock(&mtx_);
+ uptr fillpos = atomic_load_relaxed(&fillpos_);
+ for (uptr l1 = 0; l1 < fillpos; l1++) {
+ for (IndexT l2 = l1 == 0 ? 1 : 0; l2 < kL2Size; l2++) func(&map_[l1][l2]);
+ }
+ }
+
private:
T *map_[kL1Size];
- SpinMutex mtx_;
- IndexT freelist_;
- uptr fillpos_;
- const char *name_;
-
- void Refill(Cache *c) {
- SpinMutexLock lock(&mtx_);
- if (freelist_ == 0) {
- if (fillpos_ == kL1Size) {
- Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n",
- name_, kL1Size, kL2Size);
- Die();
- }
- VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n",
- name_, fillpos_, kL1Size, kL2Size);
- T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_);
- // Reserve 0 as invalid index.
- IndexT start = fillpos_ == 0 ? 1 : 0;
- for (IndexT i = start; i < kL2Size; i++) {
- new(batch + i) T;
- *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size;
- }
- *(IndexT*)(batch + kL2Size - 1) = 0;
- freelist_ = fillpos_ * kL2Size + start;
- map_[fillpos_++] = batch;
- }
- for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
- IndexT idx = freelist_;
+ Mutex mtx_;
+ // The freelist is organized as a lock-free stack of batches of nodes.
+ // The stack itself uses Block::next links, while the batch within each
+ // stack node uses Block::batch links.
+ // Low 32-bits of freelist_ is the node index, top 32-bits is ABA-counter.
+ atomic_uint64_t freelist_ = {0};
+ atomic_uintptr_t fillpos_ = {0};
+ const char *const name_;
+
+ struct Block {
+ IndexT next;
+ IndexT batch;
+ };
+
+ Block *MapBlock(IndexT idx) { return reinterpret_cast<Block *>(Map(idx)); }
+
+ static constexpr u64 kCounterInc = 1ull << 32;
+ static constexpr u64 kCounterMask = ~(kCounterInc - 1);
+
+ NOINLINE void Refill(Cache *c) {
+ // Pop 1 batch of nodes from the freelist.
+ IndexT idx;
+ u64 xchg;
+ u64 cmp = atomic_load(&freelist_, memory_order_acquire);
+ do {
+ idx = static_cast<IndexT>(cmp);
+ if (!idx)
+ return AllocSuperBlock(c);
+ Block *ptr = MapBlock(idx);
+ xchg = ptr->next | (cmp & kCounterMask);
+ } while (!atomic_compare_exchange_weak(&freelist_, &cmp, xchg,
+ memory_order_acq_rel));
+ // Unpack it into c->cache.
+ while (idx) {
c->cache[c->pos++] = idx;
- freelist_ = *(IndexT*)Map(idx);
+ idx = MapBlock(idx)->batch;
}
}
- void Drain(Cache *c) {
- SpinMutexLock lock(&mtx_);
- for (uptr i = 0; i < Cache::kSize / 2; i++) {
+ NOINLINE void Drain(Cache *c) {
+ // Build a batch of at most Cache::kSize / 2 nodes linked by Block::batch.
+ IndexT head_idx = 0;
+ for (uptr i = 0; i < Cache::kSize / 2 && c->pos; i++) {
IndexT idx = c->cache[--c->pos];
- *(IndexT*)Map(idx) = freelist_;
- freelist_ = idx;
+ Block *ptr = MapBlock(idx);
+ ptr->batch = head_idx;
+ head_idx = idx;
+ }
+ // Push it onto the freelist stack.
+ Block *head = MapBlock(head_idx);
+ u64 xchg;
+ u64 cmp = atomic_load(&freelist_, memory_order_acquire);
+ do {
+ head->next = static_cast<IndexT>(cmp);
+ xchg = head_idx | (cmp & kCounterMask) + kCounterInc;
+ } while (!atomic_compare_exchange_weak(&freelist_, &cmp, xchg,
+ memory_order_acq_rel));
+ }
+
+ NOINLINE void AllocSuperBlock(Cache *c) {
+ Lock lock(&mtx_);
+ uptr fillpos = atomic_load_relaxed(&fillpos_);
+ if (fillpos == kL1Size) {
+ Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n", name_, kL1Size,
+ kL2Size);
+ Die();
+ }
+ VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n", name_,
+ fillpos, kL1Size, kL2Size);
+ T *batch = (T *)MmapOrDie(kL2Size * sizeof(T), name_);
+ map_[fillpos] = batch;
+ // Reserve 0 as invalid index.
+ for (IndexT i = fillpos ? 0 : 1; i < kL2Size; i++) {
+ new (batch + i) T;
+ c->cache[c->pos++] = i + fillpos * kL2Size;
+ if (c->pos == Cache::kSize)
+ Drain(c);
}
+ atomic_store_relaxed(&fillpos_, fillpos + 1);
+ CHECK(c->pos);
}
};
lib/tsan/tsan_dispatch_defs.h
@@ -0,0 +1,73 @@
+//===-- tsan_dispatch_defs.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_DISPATCH_DEFS_H
+#define TSAN_DISPATCH_DEFS_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+typedef struct dispatch_object_s {} *dispatch_object_t;
+
+#define DISPATCH_DECL(name) \
+ typedef struct name##_s : public dispatch_object_s {} *name##_t
+
+DISPATCH_DECL(dispatch_queue);
+DISPATCH_DECL(dispatch_source);
+DISPATCH_DECL(dispatch_group);
+DISPATCH_DECL(dispatch_data);
+DISPATCH_DECL(dispatch_semaphore);
+DISPATCH_DECL(dispatch_io);
+
+typedef void (*dispatch_function_t)(void *arg);
+typedef void (^dispatch_block_t)(void);
+typedef void (^dispatch_io_handler_t)(bool done, dispatch_data_t data,
+ int error);
+
+typedef long dispatch_once_t;
+typedef __sanitizer::u64 dispatch_time_t;
+typedef int dispatch_fd_t;
+typedef unsigned long dispatch_io_type_t;
+typedef unsigned long dispatch_io_close_flags_t;
+
+extern "C" {
+void *dispatch_get_context(dispatch_object_t object);
+void dispatch_retain(dispatch_object_t object);
+void dispatch_release(dispatch_object_t object);
+
+extern const dispatch_block_t _dispatch_data_destructor_free;
+extern const dispatch_block_t _dispatch_data_destructor_munmap;
+} // extern "C"
+
+#define DISPATCH_DATA_DESTRUCTOR_DEFAULT nullptr
+#define DISPATCH_DATA_DESTRUCTOR_FREE _dispatch_data_destructor_free
+#define DISPATCH_DATA_DESTRUCTOR_MUNMAP _dispatch_data_destructor_munmap
+
+#if __has_attribute(noescape)
+# define DISPATCH_NOESCAPE __attribute__((__noescape__))
+#else
+# define DISPATCH_NOESCAPE
+#endif
+
+#if SANITIZER_APPLE
+# define SANITIZER_WEAK_IMPORT extern "C" __attribute((weak_import))
+#else
+# define SANITIZER_WEAK_IMPORT extern "C" __attribute((weak))
+#endif
+
+
+// Data types used in dispatch APIs
+typedef unsigned long size_t;
+typedef unsigned long uintptr_t;
+typedef __sanitizer::s64 off_t;
+typedef __sanitizer::u16 mode_t;
+typedef long long_t;
+
+#endif // TSAN_DISPATCH_DEFS_H
lib/tsan/tsan_external.cpp
@@ -10,9 +10,12 @@
//
//===----------------------------------------------------------------------===//
#include "tsan_rtl.h"
-#include "tsan_interceptors.h"
#include "sanitizer_common/sanitizer_ptrauth.h"
+#if !SANITIZER_GO
+# include "tsan_interceptors.h"
+#endif
+
namespace __tsan {
#define CALLERPC ((uptr)__builtin_return_address(0))
@@ -43,10 +46,6 @@ const char *GetReportHeaderFromTag(uptr tag) {
return tag_data ? tag_data->header : nullptr;
}
-void InsertShadowStackFrameForTag(ThreadState *thr, uptr tag) {
- FuncEntry(thr, (uptr)®istered_tags[tag]);
-}
-
uptr TagFromShadowStackFrame(uptr pc) {
uptr tag_count = atomic_load(&used_tags, memory_order_relaxed);
void *pc_ptr = (void *)pc;
@@ -57,17 +56,26 @@ uptr TagFromShadowStackFrame(uptr pc) {
#if !SANITIZER_GO
-typedef void(*AccessFunc)(ThreadState *, uptr, uptr, int);
-void ExternalAccess(void *addr, uptr caller_pc, void *tag, AccessFunc access) {
+// We need to track tags for individual memory accesses, but there is no space
+// in the shadow cells for them. Instead we push/pop them onto the thread
+// traces and ignore the extra tag frames when printing reports.
+static void PushTag(ThreadState *thr, uptr tag) {
+ FuncEntry(thr, (uptr)®istered_tags[tag]);
+}
+static void PopTag(ThreadState *thr) { FuncExit(thr); }
+
+static void ExternalAccess(void *addr, uptr caller_pc, uptr tsan_caller_pc,
+ void *tag, AccessType typ) {
CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
+ bool in_ignored_lib;
+ if (caller_pc && libignore()->IsIgnored(caller_pc, &in_ignored_lib))
+ return;
+
ThreadState *thr = cur_thread();
if (caller_pc) FuncEntry(thr, caller_pc);
- InsertShadowStackFrameForTag(thr, (uptr)tag);
- bool in_ignored_lib;
- if (!caller_pc || !libignore()->IsIgnored(caller_pc, &in_ignored_lib)) {
- access(thr, CALLERPC, (uptr)addr, kSizeLog1);
- }
- FuncExit(thr);
+ PushTag(thr, (uptr)tag);
+ MemoryAccess(thr, tsan_caller_pc, (uptr)addr, 1, typ);
+ PopTag(thr);
if (caller_pc) FuncExit(thr);
}
@@ -92,7 +100,7 @@ void __tsan_external_register_header(void *tag, const char *header) {
header = internal_strdup(header);
char *old_header =
(char *)atomic_exchange(header_ptr, (uptr)header, memory_order_seq_cst);
- if (old_header) internal_free(old_header);
+ Free(old_header);
}
SANITIZER_INTERFACE_ATTRIBUTE
@@ -111,12 +119,12 @@ void __tsan_external_assign_tag(void *addr, void *tag) {
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_read(void *addr, void *caller_pc, void *tag) {
- ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, MemoryRead);
+ ExternalAccess(addr, STRIP_PAC_PC(caller_pc), CALLERPC, tag, kAccessRead);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_write(void *addr, void *caller_pc, void *tag) {
- ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, MemoryWrite);
+ ExternalAccess(addr, STRIP_PAC_PC(caller_pc), CALLERPC, tag, kAccessWrite);
}
} // extern "C"
lib/tsan/tsan_fd.cpp
@@ -11,9 +11,12 @@
//===----------------------------------------------------------------------===//
#include "tsan_fd.h"
-#include "tsan_rtl.h"
+
#include <sanitizer_common/sanitizer_atomic.h>
+#include "tsan_interceptors.h"
+#include "tsan_rtl.h"
+
namespace __tsan {
const int kTableSizeL1 = 1024;
@@ -26,8 +29,12 @@ struct FdSync {
struct FdDesc {
FdSync *sync;
- int creation_tid;
- u32 creation_stack;
+ // This is used to establish write -> epoll_wait synchronization
+ // where epoll_wait receives notification about the write.
+ atomic_uintptr_t aux_sync; // FdSync*
+ Tid creation_tid;
+ StackID creation_stack;
+ bool closed;
};
struct FdContext {
@@ -100,6 +107,10 @@ static void init(ThreadState *thr, uptr pc, int fd, FdSync *s,
unref(thr, pc, d->sync);
d->sync = 0;
}
+ unref(thr, pc,
+ reinterpret_cast<FdSync *>(
+ atomic_load(&d->aux_sync, memory_order_relaxed)));
+ atomic_store(&d->aux_sync, 0, memory_order_relaxed);
if (flags()->io_sync == 0) {
unref(thr, pc, s);
} else if (flags()->io_sync == 1) {
@@ -110,12 +121,18 @@ static void init(ThreadState *thr, uptr pc, int fd, FdSync *s,
}
d->creation_tid = thr->tid;
d->creation_stack = CurrentStackId(thr, pc);
+ d->closed = false;
+ // This prevents false positives on fd_close_norace3.cpp test.
+ // The mechanics of the false positive are not completely clear,
+ // but it happens only if global reset is enabled (flush_memory_ms=1)
+ // and may be related to lost writes during asynchronous MADV_DONTNEED.
+ SlotLocker locker(thr);
if (write) {
// To catch races between fd usage and open.
MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
} else {
// See the dup-related comment in FdClose.
- MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead | kAccessSlotLocked);
}
}
@@ -140,7 +157,7 @@ void FdOnFork(ThreadState *thr, uptr pc) {
}
}
-bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) {
+bool FdLocation(uptr addr, int *fd, Tid *tid, StackID *stack, bool *closed) {
for (int l1 = 0; l1 < kTableSizeL1; l1++) {
FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
if (tab == 0)
@@ -151,6 +168,7 @@ bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) {
*fd = l1 * kTableSizeL1 + l2;
*tid = d->creation_tid;
*stack = d->creation_stack;
+ *closed = d->closed;
return true;
}
}
@@ -163,7 +181,7 @@ void FdAcquire(ThreadState *thr, uptr pc, int fd) {
FdDesc *d = fddesc(thr, pc, fd);
FdSync *s = d->sync;
DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
- MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
if (s)
Acquire(thr, pc, (uptr)s);
}
@@ -174,9 +192,11 @@ void FdRelease(ThreadState *thr, uptr pc, int fd) {
FdDesc *d = fddesc(thr, pc, fd);
FdSync *s = d->sync;
DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
- MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
if (s)
Release(thr, pc, (uptr)s);
+ if (uptr aux_sync = atomic_load(&d->aux_sync, memory_order_acquire))
+ Release(thr, pc, aux_sync);
}
void FdAccess(ThreadState *thr, uptr pc, int fd) {
@@ -184,7 +204,7 @@ void FdAccess(ThreadState *thr, uptr pc, int fd) {
if (bogusfd(fd))
return;
FdDesc *d = fddesc(thr, pc, fd);
- MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
}
void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
@@ -192,27 +212,42 @@ void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
if (bogusfd(fd))
return;
FdDesc *d = fddesc(thr, pc, fd);
- if (write) {
- // To catch races between fd usage and close.
- MemoryWrite(thr, pc, (uptr)d, kSizeLog8);
- } else {
- // This path is used only by dup2/dup3 calls.
- // We do read instead of write because there is a number of legitimate
- // cases where write would lead to false positives:
- // 1. Some software dups a closed pipe in place of a socket before closing
- // the socket (to prevent races actually).
- // 2. Some daemons dup /dev/null in place of stdin/stdout.
- // On the other hand we have not seen cases when write here catches real
- // bugs.
- MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ {
+ // Need to lock the slot to make MemoryAccess and MemoryResetRange atomic
+ // with respect to global reset. See the comment in MemoryRangeFreed.
+ SlotLocker locker(thr);
+ if (!MustIgnoreInterceptor(thr)) {
+ if (write) {
+ // To catch races between fd usage and close.
+ MemoryAccess(thr, pc, (uptr)d, 8,
+ kAccessWrite | kAccessCheckOnly | kAccessSlotLocked);
+ } else {
+ // This path is used only by dup2/dup3 calls.
+ // We do read instead of write because there is a number of legitimate
+ // cases where write would lead to false positives:
+ // 1. Some software dups a closed pipe in place of a socket before
+ // closing
+ // the socket (to prevent races actually).
+ // 2. Some daemons dup /dev/null in place of stdin/stdout.
+ // On the other hand we have not seen cases when write here catches real
+ // bugs.
+ MemoryAccess(thr, pc, (uptr)d, 8,
+ kAccessRead | kAccessCheckOnly | kAccessSlotLocked);
+ }
+ }
+ // We need to clear it, because if we do not intercept any call out there
+ // that creates fd, we will hit false postives.
+ MemoryResetRange(thr, pc, (uptr)d, 8);
}
- // We need to clear it, because if we do not intercept any call out there
- // that creates fd, we will hit false postives.
- MemoryResetRange(thr, pc, (uptr)d, 8);
unref(thr, pc, d->sync);
d->sync = 0;
- d->creation_tid = 0;
- d->creation_stack = 0;
+ unref(thr, pc,
+ reinterpret_cast<FdSync *>(
+ atomic_load(&d->aux_sync, memory_order_relaxed)));
+ atomic_store(&d->aux_sync, 0, memory_order_relaxed);
+ d->closed = true;
+ d->creation_tid = thr->tid;
+ d->creation_stack = CurrentStackId(thr, pc);
}
void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
@@ -228,7 +263,7 @@ void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write) {
return;
// Ignore the case when user dups not yet connected socket.
FdDesc *od = fddesc(thr, pc, oldfd);
- MemoryRead(thr, pc, (uptr)od, kSizeLog8);
+ MemoryAccess(thr, pc, (uptr)od, 8, kAccessRead);
FdClose(thr, pc, newfd, write);
init(thr, pc, newfd, ref(od->sync), write);
}
@@ -269,6 +304,30 @@ void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
init(thr, pc, fd, allocsync(thr, pc));
}
+void FdPollAdd(ThreadState *thr, uptr pc, int epfd, int fd) {
+ DPrintf("#%d: FdPollAdd(%d, %d)\n", thr->tid, epfd, fd);
+ if (bogusfd(epfd) || bogusfd(fd))
+ return;
+ FdDesc *d = fddesc(thr, pc, fd);
+ // Associate fd with epoll fd only once.
+ // While an fd can be associated with multiple epolls at the same time,
+ // or with different epolls during different phases of lifetime,
+ // synchronization semantics (and examples) of this are unclear.
+ // So we don't support this for now.
+ // If we change the association, it will also create lifetime management
+ // problem for FdRelease which accesses the aux_sync.
+ if (atomic_load(&d->aux_sync, memory_order_relaxed))
+ return;
+ FdDesc *epd = fddesc(thr, pc, epfd);
+ FdSync *s = epd->sync;
+ if (!s)
+ return;
+ uptr cmp = 0;
+ if (atomic_compare_exchange_strong(
+ &d->aux_sync, &cmp, reinterpret_cast<uptr>(s), memory_order_release))
+ ref(s);
+}
+
void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
lib/tsan/tsan_fd.h
@@ -49,11 +49,12 @@ void FdEventCreate(ThreadState *thr, uptr pc, int fd);
void FdSignalCreate(ThreadState *thr, uptr pc, int fd);
void FdInotifyCreate(ThreadState *thr, uptr pc, int fd);
void FdPollCreate(ThreadState *thr, uptr pc, int fd);
+void FdPollAdd(ThreadState *thr, uptr pc, int epfd, int fd);
void FdSocketCreate(ThreadState *thr, uptr pc, int fd);
void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd);
void FdSocketConnecting(ThreadState *thr, uptr pc, int fd);
void FdSocketConnect(ThreadState *thr, uptr pc, int fd);
-bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack);
+bool FdLocation(uptr addr, int *fd, Tid *tid, StackID *stack, bool *closed);
void FdOnFork(ThreadState *thr, uptr pc);
uptr File2addr(const char *path);
lib/tsan/tsan_flags.cpp
@@ -10,19 +10,21 @@
//
//===----------------------------------------------------------------------===//
-#include "sanitizer_common/sanitizer_flags.h"
+#include "tsan_flags.h"
+
#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_libc.h"
-#include "tsan_flags.h"
-#include "tsan_rtl.h"
+#include "tsan_interface.h"
#include "tsan_mman.h"
+#include "tsan_rtl.h"
#include "ubsan/ubsan_flags.h"
namespace __tsan {
// Can be overriden in frontend.
#ifdef TSAN_EXTERNAL_HOOKS
-extern "C" const char* __tsan_default_options();
+extern "C" const char *__tsan_default_options();
#else
SANITIZER_WEAK_DEFAULT_IMPL
const char *__tsan_default_options() {
@@ -55,6 +57,7 @@ void InitializeFlags(Flags *f, const char *env, const char *env_option_name) {
// Override some common flags defaults.
CommonFlags cf;
cf.CopyFrom(*common_flags());
+ cf.external_symbolizer_path = GetEnv("TSAN_SYMBOLIZER_PATH");
cf.allow_addr2line = true;
if (SANITIZER_GO) {
// Does not work as expected for Go: runtime handles SIGABRT and crashes.
@@ -96,7 +99,7 @@ void InitializeFlags(Flags *f, const char *env, const char *env_option_name) {
ubsan_parser.ParseStringFromEnv("UBSAN_OPTIONS");
#endif
- // Sanity check.
+ // Check flags.
if (!f->report_bugs) {
f->report_thread_leaks = false;
f->report_destroy_locked = false;
@@ -109,12 +112,6 @@ void InitializeFlags(Flags *f, const char *env, const char *env_option_name) {
if (common_flags()->help) parser.PrintFlagDescriptions();
- if (f->history_size < 0 || f->history_size > 7) {
- Printf("ThreadSanitizer: incorrect value for history_size"
- " (must be [0..7])\n");
- Die();
- }
-
if (f->io_sync < 0 || f->io_sync > 2) {
Printf("ThreadSanitizer: incorrect value for io_sync"
" (must be [0..2])\n");
lib/tsan/tsan_flags.inc
@@ -23,10 +23,6 @@ TSAN_FLAG(bool, enable_annotations, true,
TSAN_FLAG(bool, suppress_equal_stacks, true,
"Suppress a race report if we've already output another race report "
"with the same stack.")
-TSAN_FLAG(bool, suppress_equal_addresses, true,
- "Suppress a race report if we've already output another race report "
- "on the same address.")
-
TSAN_FLAG(bool, report_bugs, true,
"Turns off bug reporting entirely (useful for benchmarking).")
TSAN_FLAG(bool, report_thread_leaks, true, "Report thread leaks at exit?")
@@ -43,7 +39,9 @@ TSAN_FLAG(
bool, force_seq_cst_atomics, false,
"If set, all atomics are effectively sequentially consistent (seq_cst), "
"regardless of what user actually specified.")
-TSAN_FLAG(bool, print_benign, false, "Print matched \"benign\" races at exit.")
+TSAN_FLAG(bool, force_background_thread, false,
+ "If set, eagerly launch a background thread for memory reclamation "
+ "instead of waiting for a user call to pthread_create.")
TSAN_FLAG(bool, halt_on_error, false, "Exit after first reported error.")
TSAN_FLAG(int, atexit_sleep_ms, 1000,
"Sleep in main thread before exiting for that many ms "
@@ -60,14 +58,10 @@ TSAN_FLAG(bool, stop_on_start, false,
"Stops on start until __tsan_resume() is called (for debugging).")
TSAN_FLAG(bool, running_on_valgrind, false,
"Controls whether RunningOnValgrind() returns true or false.")
-// There are a lot of goroutines in Go, so we use smaller history.
TSAN_FLAG(
- int, history_size, SANITIZER_GO ? 1 : 3,
- "Per-thread history size, controls how many previous memory accesses "
- "are remembered per thread. Possible values are [0..7]. "
- "history_size=0 amounts to 32K memory accesses. Each next value doubles "
- "the amount of memory accesses, up to history_size=7 that amounts to "
- "4M memory accesses. The default value is 2 (128K memory accesses).")
+ uptr, history_size, 0,
+ "Per-thread history size,"
+ " controls how many extra previous memory accesses are remembered per thread.")
TSAN_FLAG(int, io_sync, 1,
"Controls level of synchronization implied by IO operations. "
"0 - no synchronization "
@@ -76,10 +70,13 @@ TSAN_FLAG(int, io_sync, 1,
TSAN_FLAG(bool, die_after_fork, true,
"Die after multi-threaded fork if the child creates new threads.")
TSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
-TSAN_FLAG(bool, ignore_interceptors_accesses, SANITIZER_MAC ? true : false,
+TSAN_FLAG(bool, ignore_interceptors_accesses, SANITIZER_APPLE ? true : false,
"Ignore reads and writes from all interceptors.")
-TSAN_FLAG(bool, ignore_noninstrumented_modules, SANITIZER_MAC ? true : false,
+TSAN_FLAG(bool, ignore_noninstrumented_modules, SANITIZER_APPLE ? true : false,
"Interceptors should only detect races when called from instrumented "
"modules.")
TSAN_FLAG(bool, shared_ptr_interceptor, true,
"Track atomic reference counting in libc++ shared_ptr and weak_ptr.")
+TSAN_FLAG(bool, print_full_thread_history, false,
+ "If set, prints thread creation stacks for the threads involved in "
+ "the report and their ancestors up to the main thread.")
lib/tsan/tsan_ignoreset.cpp
@@ -19,7 +19,7 @@ IgnoreSet::IgnoreSet()
: size_() {
}
-void IgnoreSet::Add(u32 stack_id) {
+void IgnoreSet::Add(StackID stack_id) {
if (size_ == kMaxSize)
return;
for (uptr i = 0; i < size_; i++) {
@@ -29,15 +29,7 @@ void IgnoreSet::Add(u32 stack_id) {
stacks_[size_++] = stack_id;
}
-void IgnoreSet::Reset() {
- size_ = 0;
-}
-
-uptr IgnoreSet::Size() const {
- return size_;
-}
-
-u32 IgnoreSet::At(uptr i) const {
+StackID IgnoreSet::At(uptr i) const {
CHECK_LT(i, size_);
CHECK_LE(size_, kMaxSize);
return stacks_[i];
lib/tsan/tsan_ignoreset.h
@@ -19,17 +19,16 @@ namespace __tsan {
class IgnoreSet {
public:
- static const uptr kMaxSize = 16;
-
IgnoreSet();
- void Add(u32 stack_id);
- void Reset();
- uptr Size() const;
- u32 At(uptr i) const;
+ void Add(StackID stack_id);
+ void Reset() { size_ = 0; }
+ uptr Size() const { return size_; }
+ StackID At(uptr i) const;
private:
+ static constexpr uptr kMaxSize = 16;
uptr size_;
- u32 stacks_[kMaxSize];
+ StackID stacks_[kMaxSize];
};
} // namespace __tsan
lib/tsan/tsan_ilist.h
@@ -0,0 +1,189 @@
+//===-- tsan_ilist.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_ILIST_H
+#define TSAN_ILIST_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __tsan {
+
+class INode {
+ public:
+ INode() = default;
+
+ private:
+ INode* next_ = nullptr;
+ INode* prev_ = nullptr;
+
+ template <typename Base, INode Base::*Node, typename Elem>
+ friend class IList;
+ INode(const INode&) = delete;
+ void operator=(const INode&) = delete;
+};
+
+// Intrusive doubly-linked list.
+//
+// The node class (MyNode) needs to include "INode foo" field,
+// then the list can be declared as IList<MyNode, &MyNode::foo>.
+// This design allows to link MyNode into multiple lists using
+// different INode fields.
+// The optional Elem template argument allows to specify node MDT
+// (most derived type) if it's different from MyNode.
+template <typename Base, INode Base::*Node, typename Elem = Base>
+class IList {
+ public:
+ IList();
+
+ void PushFront(Elem* e);
+ void PushBack(Elem* e);
+ void Remove(Elem* e);
+
+ Elem* PopFront();
+ Elem* PopBack();
+ Elem* Front();
+ Elem* Back();
+
+ // Prev links point towards front of the queue.
+ Elem* Prev(Elem* e);
+ // Next links point towards back of the queue.
+ Elem* Next(Elem* e);
+
+ uptr Size() const;
+ bool Empty() const;
+ bool Queued(Elem* e) const;
+
+ private:
+ INode node_;
+ uptr size_ = 0;
+
+ void Push(Elem* e, INode* after);
+ static INode* ToNode(Elem* e);
+ static Elem* ToElem(INode* n);
+
+ IList(const IList&) = delete;
+ void operator=(const IList&) = delete;
+};
+
+template <typename Base, INode Base::*Node, typename Elem>
+IList<Base, Node, Elem>::IList() {
+ node_.next_ = node_.prev_ = &node_;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+void IList<Base, Node, Elem>::PushFront(Elem* e) {
+ Push(e, &node_);
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+void IList<Base, Node, Elem>::PushBack(Elem* e) {
+ Push(e, node_.prev_);
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+void IList<Base, Node, Elem>::Push(Elem* e, INode* after) {
+ INode* n = ToNode(e);
+ DCHECK_EQ(n->next_, nullptr);
+ DCHECK_EQ(n->prev_, nullptr);
+ INode* next = after->next_;
+ n->next_ = next;
+ n->prev_ = after;
+ next->prev_ = n;
+ after->next_ = n;
+ size_++;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+void IList<Base, Node, Elem>::Remove(Elem* e) {
+ INode* n = ToNode(e);
+ INode* next = n->next_;
+ INode* prev = n->prev_;
+ DCHECK(next);
+ DCHECK(prev);
+ DCHECK(size_);
+ next->prev_ = prev;
+ prev->next_ = next;
+ n->prev_ = n->next_ = nullptr;
+ size_--;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::PopFront() {
+ Elem* e = Front();
+ if (e)
+ Remove(e);
+ return e;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::PopBack() {
+ Elem* e = Back();
+ if (e)
+ Remove(e);
+ return e;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::Front() {
+ return size_ ? ToElem(node_.next_) : nullptr;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::Back() {
+ return size_ ? ToElem(node_.prev_) : nullptr;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::Prev(Elem* e) {
+ INode* n = ToNode(e);
+ DCHECK(n->prev_);
+ return n->prev_ != &node_ ? ToElem(n->prev_) : nullptr;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::Next(Elem* e) {
+ INode* n = ToNode(e);
+ DCHECK(n->next_);
+ return n->next_ != &node_ ? ToElem(n->next_) : nullptr;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+uptr IList<Base, Node, Elem>::Size() const {
+ return size_;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+bool IList<Base, Node, Elem>::Empty() const {
+ return size_ == 0;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+bool IList<Base, Node, Elem>::Queued(Elem* e) const {
+ INode* n = ToNode(e);
+ DCHECK_EQ(!n->next_, !n->prev_);
+ return n->next_;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+INode* IList<Base, Node, Elem>::ToNode(Elem* e) {
+ return &(e->*Node);
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::ToElem(INode* n) {
+ return static_cast<Elem*>(reinterpret_cast<Base*>(
+ reinterpret_cast<uptr>(n) -
+ reinterpret_cast<uptr>(&(reinterpret_cast<Elem*>(0)->*Node))));
+}
+
+} // namespace __tsan
+
+#endif
lib/tsan/tsan_interceptors.h
@@ -10,44 +10,71 @@ class ScopedInterceptor {
public:
ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc);
~ScopedInterceptor();
- void DisableIgnores();
- void EnableIgnores();
+ void DisableIgnores() {
+ if (UNLIKELY(ignoring_))
+ DisableIgnoresImpl();
+ }
+ void EnableIgnores() {
+ if (UNLIKELY(ignoring_))
+ EnableIgnoresImpl();
+ }
+
private:
ThreadState *const thr_;
- const uptr pc_;
- bool in_ignored_lib_;
- bool ignoring_;
+ bool in_ignored_lib_ = false;
+ bool in_blocking_func_ = false;
+ bool ignoring_ = false;
+
+ void DisableIgnoresImpl();
+ void EnableIgnoresImpl();
+};
+
+struct TsanInterceptorContext {
+ ThreadState *thr;
+ const uptr pc;
};
LibIgnore *libignore();
#if !SANITIZER_GO
inline bool in_symbolizer() {
- cur_thread_init();
- return UNLIKELY(cur_thread()->in_symbolizer);
+ return UNLIKELY(cur_thread_init()->in_symbolizer);
}
#endif
+inline bool MustIgnoreInterceptor(ThreadState *thr) {
+ return !thr->is_inited || thr->ignore_interceptors || thr->in_ignored_lib;
+}
+
} // namespace __tsan
-#define SCOPED_INTERCEPTOR_RAW(func, ...) \
- cur_thread_init(); \
- ThreadState *thr = cur_thread(); \
- const uptr caller_pc = GET_CALLER_PC(); \
- ScopedInterceptor si(thr, #func, caller_pc); \
- const uptr pc = GET_CURRENT_PC(); \
- (void)pc; \
- /**/
-
-#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
- SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
- if (REAL(func) == 0) { \
+#define SCOPED_INTERCEPTOR_RAW(func, ...) \
+ ThreadState *thr = cur_thread_init(); \
+ ScopedInterceptor si(thr, #func, GET_CALLER_PC()); \
+ UNUSED const uptr pc = GET_CURRENT_PC();
+
+#ifdef __powerpc64__
+// Debugging of crashes on powerpc after commit:
+// c80604f7a3 ("tsan: remove real func check from interceptors")
+// Somehow replacing if with DCHECK leads to strange failures in:
+// SanitizerCommon-tsan-powerpc64le-Linux :: Linux/ptrace.cpp
+// https://lab.llvm.org/buildbot/#/builders/105
+// https://lab.llvm.org/buildbot/#/builders/121
+// https://lab.llvm.org/buildbot/#/builders/57
+# define CHECK_REAL_FUNC(func) \
+ if (REAL(func) == 0) { \
Report("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
- Die(); \
- } \
- if (!thr->is_inited || thr->ignore_interceptors || thr->in_ignored_lib) \
- return REAL(func)(__VA_ARGS__); \
-/**/
+ Die(); \
+ }
+#else
+# define CHECK_REAL_FUNC(func) DCHECK(REAL(func))
+#endif
+
+#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
+ SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
+ CHECK_REAL_FUNC(func); \
+ if (MustIgnoreInterceptor(thr)) \
+ return REAL(func)(__VA_ARGS__);
#define SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START() \
si.DisableIgnores();
@@ -57,20 +84,49 @@ inline bool in_symbolizer() {
#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
+#if SANITIZER_FREEBSD
+# define TSAN_INTERCEPTOR_FREEBSD_ALIAS(ret, func, ...) \
+ TSAN_INTERCEPTOR(ret, _pthread_##func, __VA_ARGS__) \
+ ALIAS(WRAP(pthread_##func));
+#else
+# define TSAN_INTERCEPTOR_FREEBSD_ALIAS(ret, func, ...)
+#endif
+
#if SANITIZER_NETBSD
# define TSAN_INTERCEPTOR_NETBSD_ALIAS(ret, func, ...) \
TSAN_INTERCEPTOR(ret, __libc_##func, __VA_ARGS__) \
- ALIAS(WRAPPER_NAME(pthread_##func));
+ ALIAS(WRAP(pthread_##func));
# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(ret, func, ...) \
TSAN_INTERCEPTOR(ret, __libc_thr_##func, __VA_ARGS__) \
- ALIAS(WRAPPER_NAME(pthread_##func));
+ ALIAS(WRAP(pthread_##func));
# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(ret, func, func2, ...) \
TSAN_INTERCEPTOR(ret, __libc_thr_##func, __VA_ARGS__) \
- ALIAS(WRAPPER_NAME(pthread_##func2));
+ ALIAS(WRAP(pthread_##func2));
#else
# define TSAN_INTERCEPTOR_NETBSD_ALIAS(ret, func, ...)
# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(ret, func, ...)
# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(ret, func, func2, ...)
#endif
+#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+
+#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
+ (!cur_thread_init()->is_inited)
+
+#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
+ MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \
+ true)
+
+#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
+ MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \
+ ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \
+ false)
+
+#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \
+ TsanInterceptorContext _ctx = {thr, pc}; \
+ ctx = (void *)&_ctx; \
+ (void)ctx;
+
#endif // TSAN_INTERCEPTORS_H
lib/tsan/tsan_interceptors_mac.cpp
@@ -12,12 +12,13 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include "interception/interception.h"
#include "tsan_interceptors.h"
#include "tsan_interface.h"
#include "tsan_interface_ann.h"
+#include "tsan_spinlock_defs_mac.h"
#include "sanitizer_common/sanitizer_addrhashmap.h"
#include <errno.h>
@@ -365,7 +366,7 @@ static uptr GetOrCreateSyncAddress(uptr addr, ThreadState *thr, uptr pc) {
if (h.created()) {
ThreadIgnoreBegin(thr, pc);
*h = (uptr) user_alloc(thr, pc, /*size=*/1);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreEnd(thr);
}
return *h;
}
@@ -405,8 +406,8 @@ TSAN_INTERCEPTOR(int, swapcontext, ucontext_t *oucp, const ucontext_t *ucp) {
{
SCOPED_INTERCEPTOR_RAW(swapcontext, oucp, ucp);
}
- // Bacause of swapcontext() semantics we have no option but to copy its
- // impementation here
+ // Because of swapcontext() semantics we have no option but to copy its
+ // implementation here
if (!oucp || !ucp) {
errno = EINVAL;
return -1;
@@ -518,4 +519,4 @@ STDCXX_INTERCEPTOR(void, _ZNSt3__111__call_onceERVmPvPFvS2_E, void *flag,
} // namespace __tsan
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
lib/tsan/tsan_interceptors_memintrinsics.cpp
@@ -0,0 +1,43 @@
+//===-- tsan_interceptors_posix.cpp ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#define SANITIZER_COMMON_NO_REDEFINE_BUILTINS
+
+#include "tsan_interceptors.h"
+#include "tsan_interface.h"
+
+using namespace __tsan;
+
+#include "sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc"
+
+extern "C" {
+
+void *__tsan_memcpy(void *dst, const void *src, uptr size) {
+ void *ctx;
+#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
+#else
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+#endif
+}
+
+void *__tsan_memset(void *dst, int c, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, c, size);
+}
+
+void *__tsan_memmove(void *dst, const void *src, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+}
+
+} // extern "C"
lib/tsan/tsan_interceptors_posix.cpp
@@ -35,7 +35,10 @@
using namespace __tsan;
-#if SANITIZER_FREEBSD || SANITIZER_MAC
+DECLARE_REAL(void *, memcpy, void *to, const void *from, SIZE_T size)
+DECLARE_REAL(void *, memset, void *block, int c, SIZE_T size)
+
+#if SANITIZER_FREEBSD || SANITIZER_APPLE
#define stdout __stdoutp
#define stderr __stderrp
#endif
@@ -76,6 +79,8 @@ struct ucontext_t {
#define PTHREAD_ABI_BASE "GLIBC_2.3.2"
#elif defined(__aarch64__) || SANITIZER_PPC64V2
#define PTHREAD_ABI_BASE "GLIBC_2.17"
+#elif SANITIZER_LOONGARCH64
+#define PTHREAD_ABI_BASE "GLIBC_2.36"
#endif
extern "C" int pthread_attr_init(void *attr);
@@ -90,28 +95,26 @@ DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
+extern "C" int pthread_equal(void *t1, void *t2);
extern "C" void *pthread_self();
extern "C" void _exit(int status);
#if !SANITIZER_NETBSD
extern "C" int fileno_unlocked(void *stream);
extern "C" int dirfd(void *dirp);
#endif
-#if SANITIZER_GLIBC
-extern "C" int mallopt(int param, int value);
-#endif
#if SANITIZER_NETBSD
extern __sanitizer_FILE __sF[];
#else
extern __sanitizer_FILE *stdout, *stderr;
#endif
-#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
+#if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
const int PTHREAD_MUTEX_RECURSIVE = 1;
const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
#else
const int PTHREAD_MUTEX_RECURSIVE = 2;
const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
#endif
-#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
+#if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
const int EPOLL_CTL_ADD = 1;
#endif
const int SIGILL = 4;
@@ -121,17 +124,20 @@ const int SIGFPE = 8;
const int SIGSEGV = 11;
const int SIGPIPE = 13;
const int SIGTERM = 15;
-#if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
+#if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
const int SIGBUS = 10;
const int SIGSYS = 12;
#else
const int SIGBUS = 7;
const int SIGSYS = 31;
#endif
+#if SANITIZER_HAS_SIGINFO
+const int SI_TIMER = -2;
+#endif
void *const MAP_FAILED = (void*)-1;
#if SANITIZER_NETBSD
const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
-#elif !SANITIZER_MAC
+#elif !SANITIZER_APPLE
const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
#endif
const int MAP_FIXED = 0x10;
@@ -144,7 +150,7 @@ typedef __sanitizer::u16 mode_t;
# define F_TLOCK 2 /* Test and lock a region for exclusive use. */
# define F_TEST 3 /* Test a region for other processes locks. */
-#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
+#if SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
const int SA_SIGINFO = 0x40;
const int SIG_SETMASK = 3;
#elif defined(__mips__)
@@ -155,32 +161,41 @@ const int SA_SIGINFO = 4;
const int SIG_SETMASK = 2;
#endif
-#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
- (cur_thread_init(), !cur_thread()->is_inited)
-
namespace __tsan {
struct SignalDesc {
bool armed;
- bool sigaction;
__sanitizer_siginfo siginfo;
ucontext_t ctx;
};
struct ThreadSignalContext {
int int_signal_send;
- atomic_uintptr_t in_blocking_func;
- atomic_uintptr_t have_pending_signals;
SignalDesc pending_signals[kSigCount];
// emptyset and oldset are too big for stack.
__sanitizer_sigset_t emptyset;
__sanitizer_sigset_t oldset;
};
+void EnterBlockingFunc(ThreadState *thr) {
+ for (;;) {
+ // The order is important to not delay a signal infinitely if it's
+ // delivered right before we set in_blocking_func. Note: we can't call
+ // ProcessPendingSignals when in_blocking_func is set, or we can handle
+ // a signal synchronously when we are already handling a signal.
+ atomic_store(&thr->in_blocking_func, 1, memory_order_relaxed);
+ if (atomic_load(&thr->pending_signals, memory_order_relaxed) == 0)
+ break;
+ atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
+ ProcessPendingSignals(thr);
+ }
+}
+
// The sole reason tsan wraps atexit callbacks is to establish synchronization
// between callback setup and callback execution.
struct AtExitCtx {
void (*f)();
void *arg;
+ uptr pc;
};
// InterceptorContext holds all global data required for interceptors.
@@ -192,7 +207,7 @@ struct InterceptorContext {
// in a single cache line if possible (it's accessed in every interceptor).
ALIGNED(64) LibIgnore libignore;
__sanitizer_sigaction sigactions[kSigCount];
-#if !SANITIZER_MAC && !SANITIZER_NETBSD
+#if !SANITIZER_APPLE && !SANITIZER_NETBSD
unsigned finalize_key;
#endif
@@ -237,19 +252,37 @@ SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionEnd() {}
} // namespace __tsan
static ThreadSignalContext *SigCtx(ThreadState *thr) {
- ThreadSignalContext *ctx = (ThreadSignalContext*)thr->signal_ctx;
+ // This function may be called reentrantly if it is interrupted by a signal
+ // handler. Use CAS to handle the race.
+ uptr ctx = atomic_load(&thr->signal_ctx, memory_order_relaxed);
if (ctx == 0 && !thr->is_dead) {
- ctx = (ThreadSignalContext*)MmapOrDie(sizeof(*ctx), "ThreadSignalContext");
- MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx));
- thr->signal_ctx = ctx;
+ uptr pctx =
+ (uptr)MmapOrDie(sizeof(ThreadSignalContext), "ThreadSignalContext");
+ MemoryResetRange(thr, (uptr)&SigCtx, pctx, sizeof(ThreadSignalContext));
+ if (atomic_compare_exchange_strong(&thr->signal_ctx, &ctx, pctx,
+ memory_order_relaxed)) {
+ ctx = pctx;
+ } else {
+ UnmapOrDie((ThreadSignalContext *)pctx, sizeof(ThreadSignalContext));
+ }
}
- return ctx;
+ return (ThreadSignalContext *)ctx;
}
ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
uptr pc)
- : thr_(thr), pc_(pc), in_ignored_lib_(false), ignoring_(false) {
- Initialize(thr);
+ : thr_(thr) {
+ LazyInitialize(thr);
+ if (UNLIKELY(atomic_load(&thr->in_blocking_func, memory_order_relaxed))) {
+ // pthread_join is marked as blocking, but it's also known to call other
+ // intercepted functions (mmap, free). If we don't reset in_blocking_func
+ // we can get deadlocks and memory corruptions if we deliver a synchronous
+ // signal inside of an mmap/free interceptor.
+ // So reset it and restore it back in the destructor.
+ // See https://github.com/google/sanitizers/issues/1540
+ atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
+ in_blocking_func_ = true;
+ }
if (!thr_->is_inited) return;
if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
@@ -262,6 +295,8 @@ ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
ScopedInterceptor::~ScopedInterceptor() {
if (!thr_->is_inited) return;
DisableIgnores();
+ if (UNLIKELY(in_blocking_func_))
+ EnterBlockingFunc(thr_);
if (!thr_->ignore_interceptors) {
ProcessPendingSignals(thr_);
FuncExit(thr_);
@@ -269,43 +304,48 @@ ScopedInterceptor::~ScopedInterceptor() {
}
}
-void ScopedInterceptor::EnableIgnores() {
- if (ignoring_) {
- ThreadIgnoreBegin(thr_, pc_, /*save_stack=*/false);
- if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports++;
- if (in_ignored_lib_) {
- DCHECK(!thr_->in_ignored_lib);
- thr_->in_ignored_lib = true;
- }
+NOINLINE
+void ScopedInterceptor::EnableIgnoresImpl() {
+ ThreadIgnoreBegin(thr_, 0);
+ if (flags()->ignore_noninstrumented_modules)
+ thr_->suppress_reports++;
+ if (in_ignored_lib_) {
+ DCHECK(!thr_->in_ignored_lib);
+ thr_->in_ignored_lib = true;
}
}
-void ScopedInterceptor::DisableIgnores() {
- if (ignoring_) {
- ThreadIgnoreEnd(thr_, pc_);
- if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports--;
- if (in_ignored_lib_) {
- DCHECK(thr_->in_ignored_lib);
- thr_->in_ignored_lib = false;
- }
+NOINLINE
+void ScopedInterceptor::DisableIgnoresImpl() {
+ ThreadIgnoreEnd(thr_);
+ if (flags()->ignore_noninstrumented_modules)
+ thr_->suppress_reports--;
+ if (in_ignored_lib_) {
+ DCHECK(thr_->in_ignored_lib);
+ thr_->in_ignored_lib = false;
}
}
#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
+#else
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
+#endif
#if SANITIZER_FREEBSD
-# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
-# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
-# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
-#elif SANITIZER_NETBSD
-# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
-# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \
- INTERCEPT_FUNCTION(__libc_##func)
-# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \
- INTERCEPT_FUNCTION(__libc_thr_##func)
+# define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func) \
+ INTERCEPT_FUNCTION(_pthread_##func)
#else
-# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
-# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
-# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
+# define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func)
+#endif
+#if SANITIZER_NETBSD
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \
+ INTERCEPT_FUNCTION(__libc_##func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \
+ INTERCEPT_FUNCTION(__libc_thr_##func)
+#else
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
#endif
#define READ_STRING_OF_LEN(thr, pc, s, len, n) \
@@ -319,15 +359,8 @@ void ScopedInterceptor::DisableIgnores() {
struct BlockingCall {
explicit BlockingCall(ThreadState *thr)
- : thr(thr)
- , ctx(SigCtx(thr)) {
- for (;;) {
- atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed);
- if (atomic_load(&ctx->have_pending_signals, memory_order_relaxed) == 0)
- break;
- atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
- ProcessPendingSignals(thr);
- }
+ : thr(thr) {
+ EnterBlockingFunc(thr);
// When we are in a "blocking call", we process signals asynchronously
// (right when they arrive). In this context we do not expect to be
// executing any user/runtime code. The known interceptor sequence when
@@ -338,11 +371,10 @@ struct BlockingCall {
~BlockingCall() {
thr->ignore_interceptors--;
- atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+ atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
}
ThreadState *thr;
- ThreadSignalContext *ctx;
};
TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
@@ -371,7 +403,10 @@ TSAN_INTERCEPTOR(int, pause, int fake) {
return BLOCK_REAL(pause)(fake);
}
-static void at_exit_wrapper() {
+// Note: we specifically call the function in such strange way
+// with "installed_at" because in reports it will appear between
+// callback frames and the frame that installed the callback.
+static void at_exit_callback_installed_at() {
AtExitCtx *ctx;
{
// Ensure thread-safety.
@@ -383,16 +418,22 @@ static void at_exit_wrapper() {
interceptor_ctx()->AtExitStack.PopBack();
}
- Acquire(cur_thread(), (uptr)0, (uptr)ctx);
+ ThreadState *thr = cur_thread();
+ Acquire(thr, ctx->pc, (uptr)ctx);
+ FuncEntry(thr, ctx->pc);
((void(*)())ctx->f)();
- InternalFree(ctx);
+ FuncExit(thr);
+ Free(ctx);
}
-static void cxa_at_exit_wrapper(void *arg) {
- Acquire(cur_thread(), 0, (uptr)arg);
+static void cxa_at_exit_callback_installed_at(void *arg) {
+ ThreadState *thr = cur_thread();
AtExitCtx *ctx = (AtExitCtx*)arg;
+ Acquire(thr, ctx->pc, (uptr)arg);
+ FuncEntry(thr, ctx->pc);
((void(*)(void *arg))ctx->f)(ctx->arg);
- InternalFree(ctx);
+ FuncExit(thr);
+ Free(ctx);
}
static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
@@ -405,7 +446,7 @@ TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
// We want to setup the atexit callback even if we are in ignored lib
// or after fork.
SCOPED_INTERCEPTOR_RAW(atexit, f);
- return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0);
+ return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, 0, 0);
}
#endif
@@ -413,14 +454,15 @@ TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
if (in_symbolizer())
return 0;
SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
- return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso);
+ return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, arg, dso);
}
static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
void *arg, void *dso) {
- AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
+ auto *ctx = New<AtExitCtx>();
ctx->f = f;
ctx->arg = arg;
+ ctx->pc = pc;
Release(thr, pc, (uptr)ctx);
// Memory allocation in __cxa_atexit will race with free during exit,
// because we do not see synchronization around atexit callback list.
@@ -436,41 +478,44 @@ static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
// due to atexit_mu held on exit from the calloc interceptor.
ScopedIgnoreInterceptors ignore;
- res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_wrapper, 0, 0);
+ res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_callback_installed_at,
+ 0, 0);
// Push AtExitCtx on the top of the stack of callback functions
if (!res) {
interceptor_ctx()->AtExitStack.PushBack(ctx);
}
} else {
- res = REAL(__cxa_atexit)(cxa_at_exit_wrapper, ctx, dso);
+ res = REAL(__cxa_atexit)(cxa_at_exit_callback_installed_at, ctx, dso);
}
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreEnd(thr);
return res;
}
-#if !SANITIZER_MAC && !SANITIZER_NETBSD
-static void on_exit_wrapper(int status, void *arg) {
+#if !SANITIZER_APPLE && !SANITIZER_NETBSD
+static void on_exit_callback_installed_at(int status, void *arg) {
ThreadState *thr = cur_thread();
- uptr pc = 0;
- Acquire(thr, pc, (uptr)arg);
AtExitCtx *ctx = (AtExitCtx*)arg;
+ Acquire(thr, ctx->pc, (uptr)arg);
+ FuncEntry(thr, ctx->pc);
((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
- InternalFree(ctx);
+ FuncExit(thr);
+ Free(ctx);
}
TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
if (in_symbolizer())
return 0;
SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
- AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
+ auto *ctx = New<AtExitCtx>();
ctx->f = (void(*)())f;
ctx->arg = arg;
+ ctx->pc = GET_CALLER_PC();
Release(thr, pc, (uptr)ctx);
// Memory allocation in __cxa_atexit will race with free during exit,
// because we do not see synchronization around atexit callback list.
ThreadIgnoreBegin(thr, pc);
- int res = REAL(on_exit)(on_exit_wrapper, ctx);
- ThreadIgnoreEnd(thr, pc);
+ int res = REAL(on_exit)(on_exit_callback_installed_at, ctx);
+ ThreadIgnoreEnd(thr);
return res;
}
#define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit)
@@ -502,9 +547,7 @@ static void SetJmp(ThreadState *thr, uptr sp) {
buf->shadow_stack_pos = thr->shadow_stack_pos;
ThreadSignalContext *sctx = SigCtx(thr);
buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
- buf->in_blocking_func = sctx ?
- atomic_load(&sctx->in_blocking_func, memory_order_relaxed) :
- false;
+ buf->in_blocking_func = atomic_load(&thr->in_blocking_func, memory_order_relaxed);
buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
memory_order_relaxed);
}
@@ -520,11 +563,10 @@ static void LongJmp(ThreadState *thr, uptr *env) {
while (thr->shadow_stack_pos > buf->shadow_stack_pos)
FuncExit(thr);
ThreadSignalContext *sctx = SigCtx(thr);
- if (sctx) {
+ if (sctx)
sctx->int_signal_send = buf->int_signal_send;
- atomic_store(&sctx->in_blocking_func, buf->in_blocking_func,
- memory_order_relaxed);
- }
+ atomic_store(&thr->in_blocking_func, buf->in_blocking_func,
+ memory_order_relaxed);
atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
memory_order_relaxed);
JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp
@@ -536,16 +578,13 @@ static void LongJmp(ThreadState *thr, uptr *env) {
}
// FIXME: put everything below into a common extern "C" block?
-extern "C" void __tsan_setjmp(uptr sp) {
- cur_thread_init();
- SetJmp(cur_thread(), sp);
-}
+extern "C" void __tsan_setjmp(uptr sp) { SetJmp(cur_thread_init(), sp); }
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
TSAN_INTERCEPTOR(int, setjmp, void *env);
TSAN_INTERCEPTOR(int, _setjmp, void *env);
TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
-#else // SANITIZER_MAC
+#else // SANITIZER_APPLE
#if SANITIZER_NETBSD
#define setjmp_symname __setjmp14
@@ -555,59 +594,28 @@ TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
#define sigsetjmp_symname sigsetjmp
#endif
-#define TSAN_INTERCEPTOR_SETJMP_(x) __interceptor_ ## x
-#define TSAN_INTERCEPTOR_SETJMP__(x) TSAN_INTERCEPTOR_SETJMP_(x)
-#define TSAN_INTERCEPTOR_SETJMP TSAN_INTERCEPTOR_SETJMP__(setjmp_symname)
-#define TSAN_INTERCEPTOR_SIGSETJMP TSAN_INTERCEPTOR_SETJMP__(sigsetjmp_symname)
-
-#define TSAN_STRING_SETJMP SANITIZER_STRINGIFY(setjmp_symname)
-#define TSAN_STRING_SIGSETJMP SANITIZER_STRINGIFY(sigsetjmp_symname)
-
-// Not called. Merely to satisfy TSAN_INTERCEPT().
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-int TSAN_INTERCEPTOR_SETJMP(void *env);
-extern "C" int TSAN_INTERCEPTOR_SETJMP(void *env) {
- CHECK(0);
- return 0;
-}
-
-// FIXME: any reason to have a separate declaration?
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-int __interceptor__setjmp(void *env);
-extern "C" int __interceptor__setjmp(void *env) {
- CHECK(0);
- return 0;
-}
-
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-int TSAN_INTERCEPTOR_SIGSETJMP(void *env);
-extern "C" int TSAN_INTERCEPTOR_SIGSETJMP(void *env) {
- CHECK(0);
- return 0;
-}
-
-#if !SANITIZER_NETBSD
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-int __interceptor___sigsetjmp(void *env);
-extern "C" int __interceptor___sigsetjmp(void *env) {
- CHECK(0);
- return 0;
-}
-#endif
-
-extern "C" int setjmp_symname(void *env);
-extern "C" int _setjmp(void *env);
-extern "C" int sigsetjmp_symname(void *env);
-#if !SANITIZER_NETBSD
-extern "C" int __sigsetjmp(void *env);
-#endif
DEFINE_REAL(int, setjmp_symname, void *env)
DEFINE_REAL(int, _setjmp, void *env)
DEFINE_REAL(int, sigsetjmp_symname, void *env)
#if !SANITIZER_NETBSD
DEFINE_REAL(int, __sigsetjmp, void *env)
#endif
-#endif // SANITIZER_MAC
+
+// The real interceptor for setjmp is special, and implemented in pure asm. We
+// just need to initialize the REAL functions so that they can be used in asm.
+static void InitializeSetjmpInterceptors() {
+ // We can not use TSAN_INTERCEPT to get setjmp addr, because it does &setjmp and
+ // setjmp is not present in some versions of libc.
+ using __interception::InterceptFunction;
+ InterceptFunction(SANITIZER_STRINGIFY(setjmp_symname), (uptr*)&REAL(setjmp_symname), 0, 0);
+ InterceptFunction("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
+ InterceptFunction(SANITIZER_STRINGIFY(sigsetjmp_symname), (uptr*)&REAL(sigsetjmp_symname), 0,
+ 0);
+#if !SANITIZER_NETBSD
+ InterceptFunction("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
+#endif
+}
+#endif // SANITIZER_APPLE
#if SANITIZER_NETBSD
#define longjmp_symname __longjmp14
@@ -646,7 +654,7 @@ TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) {
}
#endif
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(void*, malloc, uptr size) {
if (in_symbolizer())
return InternalAlloc(size);
@@ -787,10 +795,11 @@ static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap,
return res;
}
-TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
- SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
+template <class Munmap>
+static int munmap_interceptor(ThreadState *thr, uptr pc, Munmap real_munmap,
+ void *addr, SIZE_T sz) {
UnmapShadow(thr, (uptr)addr, sz);
- int res = REAL(munmap)(addr, sz);
+ int res = real_munmap(addr, sz);
return res;
}
@@ -804,7 +813,7 @@ TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
#define TSAN_MAYBE_INTERCEPT_MEMALIGN
#endif
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
if (in_symbolizer())
return InternalAlloc(sz, nullptr, align);
@@ -835,7 +844,7 @@ TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
#define TSAN_MAYBE_INTERCEPT_PVALLOC
#endif
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
if (in_symbolizer()) {
void *p = InternalAlloc(sz, nullptr, align);
@@ -849,6 +858,54 @@ TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
}
#endif
+// Both __cxa_guard_acquire and pthread_once 0-initialize
+// the object initially. pthread_once does not have any
+// other ABI requirements. __cxa_guard_acquire assumes
+// that any non-0 value in the first byte means that
+// initialization is completed. Contents of the remaining
+// bytes are up to us.
+constexpr u32 kGuardInit = 0;
+constexpr u32 kGuardDone = 1;
+constexpr u32 kGuardRunning = 1 << 16;
+constexpr u32 kGuardWaiter = 1 << 17;
+
+static int guard_acquire(ThreadState *thr, uptr pc, atomic_uint32_t *g,
+ bool blocking_hooks = true) {
+ if (blocking_hooks)
+ OnPotentiallyBlockingRegionBegin();
+ auto on_exit = at_scope_exit([blocking_hooks] {
+ if (blocking_hooks)
+ OnPotentiallyBlockingRegionEnd();
+ });
+
+ for (;;) {
+ u32 cmp = atomic_load(g, memory_order_acquire);
+ if (cmp == kGuardInit) {
+ if (atomic_compare_exchange_strong(g, &cmp, kGuardRunning,
+ memory_order_relaxed))
+ return 1;
+ } else if (cmp == kGuardDone) {
+ if (!thr->in_ignored_lib)
+ Acquire(thr, pc, (uptr)g);
+ return 0;
+ } else {
+ if ((cmp & kGuardWaiter) ||
+ atomic_compare_exchange_strong(g, &cmp, cmp | kGuardWaiter,
+ memory_order_relaxed))
+ FutexWait(g, cmp | kGuardWaiter);
+ }
+ }
+}
+
+static void guard_release(ThreadState *thr, uptr pc, atomic_uint32_t *g,
+ u32 v) {
+ if (!thr->in_ignored_lib)
+ Release(thr, pc, (uptr)g);
+ u32 old = atomic_exchange(g, v, memory_order_release);
+ if (old & kGuardWaiter)
+ FutexWake(g, 1 << 30);
+}
+
// __cxa_guard_acquire and friends need to be intercepted in a special way -
// regular interceptors will break statically-linked libstdc++. Linux
// interceptors are especially defined as weak functions (so that they don't
@@ -859,7 +916,7 @@ TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
// these interceptors with INTERFACE_ATTRIBUTE.
// On OS X, we don't support statically linking, so we just use a regular
// interceptor.
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
#else
#define STDCXX_INTERCEPTOR(rettype, name, ...) \
@@ -869,31 +926,17 @@ TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
// Used in thread-safe function static initialization.
STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
- OnPotentiallyBlockingRegionBegin();
- auto on_exit = at_scope_exit(&OnPotentiallyBlockingRegionEnd);
- for (;;) {
- u32 cmp = atomic_load(g, memory_order_acquire);
- if (cmp == 0) {
- if (atomic_compare_exchange_strong(g, &cmp, 1<<16, memory_order_relaxed))
- return 1;
- } else if (cmp == 1) {
- Acquire(thr, pc, (uptr)g);
- return 0;
- } else {
- internal_sched_yield();
- }
- }
+ return guard_acquire(thr, pc, g);
}
STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
- Release(thr, pc, (uptr)g);
- atomic_store(g, 1, memory_order_release);
+ guard_release(thr, pc, g, kGuardDone);
}
STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
- atomic_store(g, 0, memory_order_relaxed);
+ guard_release(thr, pc, g, kGuardInit);
}
namespace __tsan {
@@ -908,15 +951,16 @@ void DestroyThreadState() {
}
void PlatformCleanUpThreadState(ThreadState *thr) {
- ThreadSignalContext *sctx = thr->signal_ctx;
+ ThreadSignalContext *sctx = (ThreadSignalContext *)atomic_load(
+ &thr->signal_ctx, memory_order_relaxed);
if (sctx) {
- thr->signal_ctx = 0;
+ atomic_store(&thr->signal_ctx, 0, memory_order_relaxed);
UnmapOrDie(sctx, sizeof(*sctx));
}
}
} // namespace __tsan
-#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+#if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
static void thread_finalize(void *v) {
uptr iter = (uptr)v;
if (iter > 1) {
@@ -935,34 +979,33 @@ static void thread_finalize(void *v) {
struct ThreadParam {
void* (*callback)(void *arg);
void *param;
- atomic_uintptr_t tid;
+ Tid tid;
+ Semaphore created;
+ Semaphore started;
};
extern "C" void *__tsan_thread_start_func(void *arg) {
ThreadParam *p = (ThreadParam*)arg;
void* (*callback)(void *arg) = p->callback;
void *param = p->param;
- int tid = 0;
{
- cur_thread_init();
- ThreadState *thr = cur_thread();
+ ThreadState *thr = cur_thread_init();
// Thread-local state is not initialized yet.
ScopedIgnoreInterceptors ignore;
-#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+#if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
ThreadIgnoreBegin(thr, 0);
if (pthread_setspecific(interceptor_ctx()->finalize_key,
(void *)GetPthreadDestructorIterations())) {
Printf("ThreadSanitizer: failed to set thread key\n");
Die();
}
- ThreadIgnoreEnd(thr, 0);
+ ThreadIgnoreEnd(thr);
#endif
- while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
- internal_sched_yield();
+ p->created.Wait();
Processor *proc = ProcCreate();
ProcWire(proc, thr);
- ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
- atomic_store(&p->tid, 0, memory_order_release);
+ ThreadStart(thr, p->tid, GetTid(), ThreadType::Regular);
+ p->started.Post();
}
void *res = callback(param);
// Prevent the callback from being tail called,
@@ -984,9 +1027,11 @@ TSAN_INTERCEPTOR(int, pthread_create,
"fork is not supported. Dying (set die_after_fork=0 to override)\n");
Die();
} else {
- VPrintf(1, "ThreadSanitizer: starting new threads after multi-threaded "
- "fork is not supported (pid %d). Continuing because of "
- "die_after_fork=0, but you are on your own\n", internal_getpid());
+ VPrintf(1,
+ "ThreadSanitizer: starting new threads after multi-threaded "
+ "fork is not supported (pid %lu). Continuing because of "
+ "die_after_fork=0, but you are on your own\n",
+ internal_getpid());
}
}
__sanitizer_pthread_attr_t myattr;
@@ -1001,18 +1046,18 @@ TSAN_INTERCEPTOR(int, pthread_create,
ThreadParam p;
p.callback = callback;
p.param = param;
- atomic_store(&p.tid, 0, memory_order_relaxed);
+ p.tid = kMainTid;
int res = -1;
{
// Otherwise we see false positives in pthread stack manipulation.
ScopedIgnoreInterceptors ignore;
ThreadIgnoreBegin(thr, pc);
res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreEnd(thr);
}
if (res == 0) {
- int tid = ThreadCreate(thr, pc, *(uptr*)th, IsStateDetached(detached));
- CHECK_NE(tid, 0);
+ p.tid = ThreadCreate(thr, pc, *(uptr *)th, IsStateDetached(detached));
+ CHECK_NE(p.tid, kMainTid);
// Synchronization on p.tid serves two purposes:
// 1. ThreadCreate must finish before the new thread starts.
// Otherwise the new thread can call pthread_detach, but the pthread_t
@@ -1020,9 +1065,8 @@ TSAN_INTERCEPTOR(int, pthread_create,
// 2. ThreadStart must finish before this thread continues.
// Otherwise, this thread can call pthread_detach and reset thr->sync
// before the new thread got a chance to acquire from it in ThreadStart.
- atomic_store(&p.tid, tid, memory_order_release);
- while (atomic_load(&p.tid, memory_order_acquire) != 0)
- internal_sched_yield();
+ p.created.Post();
+ p.started.Wait();
}
if (attr == &myattr)
pthread_attr_destroy(&myattr);
@@ -1031,10 +1075,10 @@ TSAN_INTERCEPTOR(int, pthread_create,
TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
- int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
ThreadIgnoreBegin(thr, pc);
int res = BLOCK_REAL(pthread_join)(th, ret);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreEnd(thr);
if (res == 0) {
ThreadJoin(thr, pc, tid);
}
@@ -1045,7 +1089,7 @@ DEFINE_REAL_PTHREAD_FUNCTIONS
TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
SCOPED_INTERCEPTOR_RAW(pthread_detach, th);
- int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
int res = REAL(pthread_detach)(th);
if (res == 0) {
ThreadDetach(thr, pc, tid);
@@ -1056,7 +1100,7 @@ TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
{
SCOPED_INTERCEPTOR_RAW(pthread_exit, retval);
-#if !SANITIZER_MAC && !SANITIZER_ANDROID
+#if !SANITIZER_APPLE && !SANITIZER_ANDROID
CHECK_EQ(thr, &cur_thread_placeholder);
#endif
}
@@ -1066,10 +1110,10 @@ TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret);
- int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
ThreadIgnoreBegin(thr, pc);
int res = REAL(pthread_tryjoin_np)(th, ret);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreEnd(thr);
if (res == 0)
ThreadJoin(thr, pc, tid);
else
@@ -1080,10 +1124,10 @@ TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
const struct timespec *abstime) {
SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime);
- int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
ThreadIgnoreBegin(thr, pc);
int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreEnd(thr);
if (res == 0)
ThreadJoin(thr, pc, tid);
else
@@ -1152,9 +1196,8 @@ void CondMutexUnlockCtx<Fn>::Unlock() const {
// tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
// since the thread is cancelled, so we have to manually execute them
// (the thread still can run some user code due to pthread_cleanup_push).
- ThreadSignalContext *ctx = SigCtx(thr);
- CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1);
- atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+ CHECK_EQ(atomic_load(&thr->in_blocking_func, memory_order_relaxed), 1);
+ atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
// Undo BlockingCall ctor effects.
thr->ignore_interceptors--;
@@ -1225,7 +1268,7 @@ INTERCEPTOR(int, pthread_cond_clockwait, void *c, void *m,
#define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT
#endif
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
void *reltime) {
void *cond = init_cond(c);
@@ -1292,6 +1335,19 @@ TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
return res;
}
+TSAN_INTERCEPTOR(int, pthread_mutex_lock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_lock, m);
+ MutexPreLock(thr, pc, (uptr)m);
+ int res = REAL(pthread_mutex_lock)(m);
+ if (res == errno_EOWNERDEAD)
+ MutexRepair(thr, pc, (uptr)m);
+ if (res == 0 || res == errno_EOWNERDEAD)
+ MutexPostLock(thr, pc, (uptr)m);
+ if (res == errno_EINVAL)
+ MutexInvalidAccess(thr, pc, (uptr)m);
+ return res;
+}
+
TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
int res = REAL(pthread_mutex_trylock)(m);
@@ -1302,7 +1358,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
return res;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
int res = REAL(pthread_mutex_timedlock)(m, abstime);
@@ -1313,7 +1369,44 @@ TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
}
#endif
-#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_unlock, m);
+ MutexUnlock(thr, pc, (uptr)m);
+ int res = REAL(pthread_mutex_unlock)(m);
+ if (res == errno_EINVAL)
+ MutexInvalidAccess(thr, pc, (uptr)m);
+ return res;
+}
+
+#if SANITIZER_GLIBC
+# if !__GLIBC_PREREQ(2, 34)
+// glibc 2.34 applies a non-default version for the two functions. They are no
+// longer expected to be intercepted by programs.
+TSAN_INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_lock, m);
+ MutexPreLock(thr, pc, (uptr)m);
+ int res = REAL(__pthread_mutex_lock)(m);
+ if (res == errno_EOWNERDEAD)
+ MutexRepair(thr, pc, (uptr)m);
+ if (res == 0 || res == errno_EOWNERDEAD)
+ MutexPostLock(thr, pc, (uptr)m);
+ if (res == errno_EINVAL)
+ MutexInvalidAccess(thr, pc, (uptr)m);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_unlock, m);
+ MutexUnlock(thr, pc, (uptr)m);
+ int res = REAL(__pthread_mutex_unlock)(m);
+ if (res == errno_EINVAL)
+ MutexInvalidAccess(thr, pc, (uptr)m);
+ return res;
+}
+# endif
+#endif
+
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
int res = REAL(pthread_spin_init)(m, pshared);
@@ -1396,7 +1489,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
return res;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
@@ -1426,7 +1519,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
return res;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
@@ -1444,17 +1537,17 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
return res;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
- MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
+ MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
int res = REAL(pthread_barrier_init)(b, a, count);
return res;
}
TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
- MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
+ MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
int res = REAL(pthread_barrier_destroy)(b);
return res;
}
@@ -1462,9 +1555,9 @@ TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
Release(thr, pc, (uptr)b);
- MemoryRead(thr, pc, (uptr)b, kSizeLog1);
+ MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
int res = REAL(pthread_barrier_wait)(b);
- MemoryRead(thr, pc, (uptr)b, kSizeLog1);
+ MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
Acquire(thr, pc, (uptr)b);
}
@@ -1478,7 +1571,7 @@ TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
return errno_EINVAL;
atomic_uint32_t *a;
- if (SANITIZER_MAC)
+ if (SANITIZER_APPLE)
a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
else if (SANITIZER_NETBSD)
a = static_cast<atomic_uint32_t*>
@@ -1486,25 +1579,16 @@ TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
else
a = static_cast<atomic_uint32_t*>(o);
- u32 v = atomic_load(a, memory_order_acquire);
- if (v == 0 && atomic_compare_exchange_strong(a, &v, 1,
- memory_order_relaxed)) {
+ // Mac OS X appears to use pthread_once() where calling BlockingRegion hooks
+ // result in crashes due to too little stack space.
+ if (guard_acquire(thr, pc, a, !SANITIZER_APPLE)) {
(*f)();
- if (!thr->in_ignored_lib)
- Release(thr, pc, (uptr)o);
- atomic_store(a, 2, memory_order_release);
- } else {
- while (v != 2) {
- internal_sched_yield();
- v = atomic_load(a, memory_order_acquire);
- }
- if (!thr->in_ignored_lib)
- Acquire(thr, pc, (uptr)o);
+ guard_release(thr, pc, a, kGuardDone);
}
return 0;
}
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC
TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
if (fd > 0)
@@ -1517,20 +1601,20 @@ TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
#endif
TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
-#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD
- SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
+#if SANITIZER_GLIBC
+ SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
- return REAL(fstat)(fd, buf);
+ return REAL(__fxstat)(0, fd, buf);
#else
- SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
+ SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
- return REAL(__fxstat)(0, fd, buf);
+ return REAL(fstat)(fd, buf);
#endif
}
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC
TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
if (fd > 0)
@@ -1542,7 +1626,7 @@ TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
#define TSAN_MAYBE_INTERCEPT___FXSTAT64
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC
TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
if (fd > 0)
@@ -1624,7 +1708,7 @@ TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
return newfd2;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
int newfd2 = REAL(dup3)(oldfd, newfd, flags);
@@ -1649,11 +1733,10 @@ TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
- SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags);
- if (fd >= 0)
- FdClose(thr, pc, fd);
+ SCOPED_INTERCEPTOR_RAW(signalfd, fd, mask, flags);
+ FdClose(thr, pc, fd);
fd = REAL(signalfd)(fd, mask, flags);
- if (fd >= 0)
+ if (!MustIgnoreInterceptor(thr))
FdSignalCreate(thr, pc, fd);
return fd;
}
@@ -1730,17 +1813,16 @@ TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
}
TSAN_INTERCEPTOR(int, close, int fd) {
- SCOPED_TSAN_INTERCEPTOR(close, fd);
- if (fd >= 0)
+ SCOPED_INTERCEPTOR_RAW(close, fd);
+ if (!in_symbolizer())
FdClose(thr, pc, fd);
return REAL(close)(fd);
}
#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, __close, int fd) {
- SCOPED_TSAN_INTERCEPTOR(__close, fd);
- if (fd >= 0)
- FdClose(thr, pc, fd);
+ SCOPED_INTERCEPTOR_RAW(__close, fd);
+ FdClose(thr, pc, fd);
return REAL(__close)(fd);
}
#define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
@@ -1751,13 +1833,10 @@ TSAN_INTERCEPTOR(int, __close, int fd) {
// glibc guts
#if SANITIZER_LINUX && !SANITIZER_ANDROID
TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
- SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr);
+ SCOPED_INTERCEPTOR_RAW(__res_iclose, state, free_addr);
int fds[64];
int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
- for (int i = 0; i < cnt; i++) {
- if (fds[i] > 0)
- FdClose(thr, pc, fds[i]);
- }
+ for (int i = 0; i < cnt; i++) FdClose(thr, pc, fds[i]);
REAL(__res_iclose)(state, free_addr);
}
#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
@@ -1773,7 +1852,7 @@ TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
return res;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
int res = REAL(pipe2)(pipefd, flags);
@@ -1838,7 +1917,7 @@ TSAN_INTERCEPTOR(int, rmdir, char *path) {
}
TSAN_INTERCEPTOR(int, closedir, void *dirp) {
- SCOPED_TSAN_INTERCEPTOR(closedir, dirp);
+ SCOPED_INTERCEPTOR_RAW(closedir, dirp);
if (dirp) {
int fd = dirfd(dirp);
FdClose(thr, pc, fd);
@@ -1869,8 +1948,10 @@ TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
FdAccess(thr, pc, epfd);
if (epfd >= 0 && fd >= 0)
FdAccess(thr, pc, fd);
- if (op == EPOLL_CTL_ADD && epfd >= 0)
+ if (op == EPOLL_CTL_ADD && epfd >= 0) {
+ FdPollAdd(thr, pc, epfd, fd);
FdRelease(thr, pc, epfd);
+ }
int res = REAL(epoll_ctl)(epfd, op, fd, ev);
return res;
}
@@ -1896,12 +1977,34 @@ TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
return res;
}
-#define TSAN_MAYBE_INTERCEPT_EPOLL \
- TSAN_INTERCEPT(epoll_create); \
- TSAN_INTERCEPT(epoll_create1); \
- TSAN_INTERCEPT(epoll_ctl); \
- TSAN_INTERCEPT(epoll_wait); \
- TSAN_INTERCEPT(epoll_pwait)
+TSAN_INTERCEPTOR(int, epoll_pwait2, int epfd, void *ev, int cnt, void *timeout,
+ void *sigmask) {
+ SCOPED_INTERCEPTOR_RAW(epoll_pwait2, epfd, ev, cnt, timeout, sigmask);
+ // This function is new and may not be present in libc and/or kernel.
+ // Since we effectively add it to libc (as will be probed by the program
+ // using dlsym or a weak function pointer) we need to handle the case
+ // when it's not present in the actual libc.
+ if (!REAL(epoll_pwait2)) {
+ errno = errno_ENOSYS;
+ return -1;
+ }
+ if (MustIgnoreInterceptor(thr))
+ REAL(epoll_pwait2)(epfd, ev, cnt, timeout, sigmask);
+ if (epfd >= 0)
+ FdAccess(thr, pc, epfd);
+ int res = BLOCK_REAL(epoll_pwait2)(epfd, ev, cnt, timeout, sigmask);
+ if (res > 0 && epfd >= 0)
+ FdAcquire(thr, pc, epfd);
+ return res;
+}
+
+# define TSAN_MAYBE_INTERCEPT_EPOLL \
+ TSAN_INTERCEPT(epoll_create); \
+ TSAN_INTERCEPT(epoll_create1); \
+ TSAN_INTERCEPT(epoll_ctl); \
+ TSAN_INTERCEPT(epoll_wait); \
+ TSAN_INTERCEPT(epoll_pwait); \
+ TSAN_INTERCEPT(epoll_pwait2)
#else
#define TSAN_MAYBE_INTERCEPT_EPOLL
#endif
@@ -1933,24 +2036,47 @@ TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
namespace __tsan {
+static void ReportErrnoSpoiling(ThreadState *thr, uptr pc, int sig) {
+ VarSizeStackTrace stack;
+ // StackTrace::GetNestInstructionPc(pc) is used because return address is
+ // expected, OutputReport() will undo this.
+ ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
+ ThreadRegistryLock l(&ctx->thread_registry);
+ ScopedReport rep(ReportTypeErrnoInSignal);
+ rep.SetSigNum(sig);
+ if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
+ rep.AddStack(stack, true);
+ OutputReport(thr, rep);
+ }
+}
+
static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
- bool sigact, int sig,
- __sanitizer_siginfo *info, void *uctx) {
+ int sig, __sanitizer_siginfo *info,
+ void *uctx) {
+ CHECK(thr->slot);
__sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
if (acquire)
Acquire(thr, 0, (uptr)&sigactions[sig]);
// Signals are generally asynchronous, so if we receive a signals when
// ignores are enabled we should disable ignores. This is critical for sync
- // and interceptors, because otherwise we can miss syncronization and report
+ // and interceptors, because otherwise we can miss synchronization and report
// false races.
int ignore_reads_and_writes = thr->ignore_reads_and_writes;
int ignore_interceptors = thr->ignore_interceptors;
int ignore_sync = thr->ignore_sync;
+ // For symbolizer we only process SIGSEGVs synchronously
+ // (bug in symbolizer or in tsan). But we want to reset
+ // in_symbolizer to fail gracefully. Symbolizer and user code
+ // use different memory allocators, so if we don't reset
+ // in_symbolizer we can get memory allocated with one being
+ // feed with another, which can cause more crashes.
+ int in_symbolizer = thr->in_symbolizer;
if (!ctx->after_multithreaded_fork) {
thr->ignore_reads_and_writes = 0;
thr->fast_state.ClearIgnoreBit();
thr->ignore_interceptors = 0;
thr->ignore_sync = 0;
+ thr->in_symbolizer = 0;
}
// Ensure that the handler does not spoil errno.
const int saved_errno = errno;
@@ -1958,13 +2084,14 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
// This code races with sigaction. Be careful to not read sa_sigaction twice.
// Also need to remember pc for reporting before the call,
// because the handler can reset it.
- volatile uptr pc =
- sigact ? (uptr)sigactions[sig].sigaction : (uptr)sigactions[sig].handler;
+ volatile uptr pc = (sigactions[sig].sa_flags & SA_SIGINFO)
+ ? (uptr)sigactions[sig].sigaction
+ : (uptr)sigactions[sig].handler;
if (pc != sig_dfl && pc != sig_ign) {
- if (sigact)
- ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
- else
- ((__sanitizer_sighandler_ptr)pc)(sig);
+ // The callback can be either sa_handler or sa_sigaction.
+ // They have different signatures, but we assume that passing
+ // additional arguments to sa_handler works and is harmless.
+ ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
}
if (!ctx->after_multithreaded_fork) {
thr->ignore_reads_and_writes = ignore_reads_and_writes;
@@ -1972,6 +2099,7 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
thr->fast_state.SetIgnoreBit();
thr->ignore_interceptors = ignore_interceptors;
thr->ignore_sync = ignore_sync;
+ thr->in_symbolizer = in_symbolizer;
}
// We do not detect errno spoiling for SIGTERM,
// because some SIGTERM handlers do spoil errno but reraise SIGTERM,
@@ -1981,27 +2109,16 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
// from rtl_generic_sighandler) we have not yet received the reraised
// signal; and it looks too fragile to intercept all ways to reraise a signal.
if (ShouldReport(thr, ReportTypeErrnoInSignal) && !sync && sig != SIGTERM &&
- errno != 99) {
- VarSizeStackTrace stack;
- // StackTrace::GetNestInstructionPc(pc) is used because return address is
- // expected, OutputReport() will undo this.
- ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
- ThreadRegistryLock l(ctx->thread_registry);
- ScopedReport rep(ReportTypeErrnoInSignal);
- if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
- rep.AddStack(stack, true);
- OutputReport(thr, rep);
- }
- }
+ errno != 99)
+ ReportErrnoSpoiling(thr, pc, sig);
errno = saved_errno;
}
-void ProcessPendingSignals(ThreadState *thr) {
+void ProcessPendingSignalsImpl(ThreadState *thr) {
+ atomic_store(&thr->pending_signals, 0, memory_order_relaxed);
ThreadSignalContext *sctx = SigCtx(thr);
- if (sctx == 0 ||
- atomic_load(&sctx->have_pending_signals, memory_order_relaxed) == 0)
+ if (sctx == 0)
return;
- atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed);
atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
internal_sigfillset(&sctx->emptyset);
int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
@@ -2010,8 +2127,8 @@ void ProcessPendingSignals(ThreadState *thr) {
SignalDesc *signal = &sctx->pending_signals[sig];
if (signal->armed) {
signal->armed = false;
- CallUserSignalHandler(thr, false, true, signal->sigaction, sig,
- &signal->siginfo, &signal->ctx);
+ CallUserSignalHandler(thr, false, true, sig, &signal->siginfo,
+ &signal->ctx);
}
}
res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
@@ -2021,35 +2138,40 @@ void ProcessPendingSignals(ThreadState *thr) {
} // namespace __tsan
-static bool is_sync_signal(ThreadSignalContext *sctx, int sig) {
+static bool is_sync_signal(ThreadSignalContext *sctx, int sig,
+ __sanitizer_siginfo *info) {
+ // If we are sending signal to ourselves, we must process it now.
+ if (sctx && sig == sctx->int_signal_send)
+ return true;
+#if SANITIZER_HAS_SIGINFO
+ // POSIX timers can be configured to send any kind of signal; however, it
+ // doesn't make any sense to consider a timer signal as synchronous!
+ if (info->si_code == SI_TIMER)
+ return false;
+#endif
return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGTRAP ||
- sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
- // If we are sending signal to ourselves, we must process it now.
- (sctx && sig == sctx->int_signal_send);
+ sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS;
}
-void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
- __sanitizer_siginfo *info,
- void *ctx) {
- cur_thread_init();
- ThreadState *thr = cur_thread();
+void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) {
+ ThreadState *thr = cur_thread_init();
ThreadSignalContext *sctx = SigCtx(thr);
if (sig < 0 || sig >= kSigCount) {
VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
return;
}
// Don't mess with synchronous signals.
- const bool sync = is_sync_signal(sctx, sig);
+ const bool sync = is_sync_signal(sctx, sig, info);
if (sync ||
// If we are in blocking function, we can safely process it now
// (but check if we are in a recursive interceptor,
// i.e. pthread_join()->munmap()).
- (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) {
+ atomic_load(&thr->in_blocking_func, memory_order_relaxed)) {
atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
- if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) {
- atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed);
- CallUserSignalHandler(thr, sync, true, sigact, sig, info, ctx);
- atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed);
+ if (atomic_load(&thr->in_blocking_func, memory_order_relaxed)) {
+ atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
+ CallUserSignalHandler(thr, sync, true, sig, info, ctx);
+ atomic_store(&thr->in_blocking_func, 1, memory_order_relaxed);
} else {
// Be very conservative with when we do acquire in this case.
// It's unsafe to do acquire in async handlers, because ThreadState
@@ -2057,7 +2179,7 @@ void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
// SIGSYS looks relatively safe -- it's synchronous and can actually
// need some global state.
bool acq = (sig == SIGSYS);
- CallUserSignalHandler(thr, sync, acq, sigact, sig, info, ctx);
+ CallUserSignalHandler(thr, sync, acq, sig, info, ctx);
}
atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
return;
@@ -2068,23 +2190,12 @@ void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
SignalDesc *signal = &sctx->pending_signals[sig];
if (signal->armed == false) {
signal->armed = true;
- signal->sigaction = sigact;
- if (info)
- internal_memcpy(&signal->siginfo, info, sizeof(*info));
- if (ctx)
- internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
- atomic_store(&sctx->have_pending_signals, 1, memory_order_relaxed);
+ internal_memcpy(&signal->siginfo, info, sizeof(*info));
+ internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
+ atomic_store(&thr->pending_signals, 1, memory_order_relaxed);
}
}
-static void rtl_sighandler(int sig) {
- rtl_generic_sighandler(false, sig, 0, 0);
-}
-
-static void rtl_sigaction(int sig, __sanitizer_siginfo *info, void *ctx) {
- rtl_generic_sighandler(true, sig, info, ctx);
-}
-
TSAN_INTERCEPTOR(int, raise, int sig) {
SCOPED_TSAN_INTERCEPTOR(raise, sig);
ThreadSignalContext *sctx = SigCtx(thr);
@@ -2118,11 +2229,11 @@ TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
ThreadSignalContext *sctx = SigCtx(thr);
CHECK_NE(sctx, 0);
int prev = sctx->int_signal_send;
- if (tid == pthread_self()) {
+ bool self = pthread_equal(tid, pthread_self());
+ if (self)
sctx->int_signal_send = sig;
- }
int res = REAL(pthread_kill)(tid, sig);
- if (tid == pthread_self()) {
+ if (self) {
CHECK_EQ(sctx->int_signal_send, sig);
sctx->int_signal_send = prev;
}
@@ -2143,7 +2254,7 @@ TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
// inside of getaddrinfo. So ignore memory accesses.
ThreadIgnoreBegin(thr, pc);
int res = REAL(getaddrinfo)(node, service, hints, rv);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreEnd(thr);
return res;
}
@@ -2175,10 +2286,11 @@ void atfork_child() {
return;
ThreadState *thr = cur_thread();
const uptr pc = StackTrace::GetCurrentPc();
- ForkChildAfter(thr, pc);
+ ForkChildAfter(thr, pc, true);
FdOnFork(thr, pc);
}
+#if !SANITIZER_IOS
TSAN_INTERCEPTOR(int, vfork, int fake) {
// Some programs (e.g. openjdk) call close for all file descriptors
// in the child process. Under tsan it leads to false positives, because
@@ -2195,8 +2307,40 @@ TSAN_INTERCEPTOR(int, vfork, int fake) {
// Instead we simply turn vfork into fork.
return WRAP(fork)(fake);
}
+#endif
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, clone, int (*fn)(void *), void *stack, int flags,
+ void *arg, int *parent_tid, void *tls, pid_t *child_tid) {
+ SCOPED_INTERCEPTOR_RAW(clone, fn, stack, flags, arg, parent_tid, tls,
+ child_tid);
+ struct Arg {
+ int (*fn)(void *);
+ void *arg;
+ };
+ auto wrapper = +[](void *p) -> int {
+ auto *thr = cur_thread();
+ uptr pc = GET_CURRENT_PC();
+ // Start the background thread for fork, but not for clone.
+ // For fork we did this always and it's known to work (or user code has
+ // adopted). But if we do this for the new clone interceptor some code
+ // (sandbox2) fails. So model we used to do for years and don't start the
+ // background thread after clone.
+ ForkChildAfter(thr, pc, false);
+ FdOnFork(thr, pc);
+ auto *arg = static_cast<Arg *>(p);
+ return arg->fn(arg->arg);
+ };
+ ForkBefore(thr, pc);
+ Arg arg_wrapper = {fn, arg};
+ int pid = REAL(clone)(wrapper, stack, flags, &arg_wrapper, parent_tid, tls,
+ child_tid);
+ ForkParentAfter(thr, pc);
+ return pid;
+}
+#endif
-#if !SANITIZER_MAC && !SANITIZER_ANDROID
+#if !SANITIZER_APPLE && !SANITIZER_ANDROID
typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
void *data);
struct dl_iterate_phdr_data {
@@ -2207,7 +2351,7 @@ struct dl_iterate_phdr_data {
};
static bool IsAppNotRodata(uptr addr) {
- return IsAppMem(addr) && *(u64*)MemToShadow(addr) != kShadowRodata;
+ return IsAppMem(addr) && *MemToShadow(addr) != Shadow::kRodata;
}
static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
@@ -2248,13 +2392,7 @@ static int OnExit(ThreadState *thr) {
return status;
}
-struct TsanInterceptorContext {
- ThreadState *thr;
- const uptr caller_pc;
- const uptr pc;
-};
-
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
static void HandleRecvmsg(ThreadState *thr, uptr pc,
__sanitizer_msghdr *msg) {
int fds[64];
@@ -2275,33 +2413,16 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
#define SANITIZER_INTERCEPT_TLS_GET_OFFSET 1
#undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
-#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
INTERCEPT_FUNCTION_VER(name, ver)
#define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
(INTERCEPT_FUNCTION_VER(name, ver) || INTERCEPT_FUNCTION(name))
-#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
- MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
- ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \
- true)
-
-#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
- MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \
- ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \
- false)
-
-#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
- SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \
- TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
- ctx = (void *)&_ctx; \
- (void) ctx;
-
#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
- TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
+ TsanInterceptorContext _ctx = {thr, pc}; \
ctx = (void *)&_ctx; \
- (void) ctx;
+ (void)ctx;
#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
if (path) \
@@ -2314,14 +2435,33 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
if (file) { \
int fd = fileno_unlocked(file); \
- if (fd >= 0) FdClose(thr, pc, fd); \
- }
-
+ FdClose(thr, pc, fd); \
+ }
+
+#define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \
+ ({ \
+ CheckNoDeepBind(filename, flag); \
+ ThreadIgnoreBegin(thr, 0); \
+ void *res = REAL(dlopen)(filename, flag); \
+ ThreadIgnoreEnd(thr); \
+ res; \
+ })
+
+// Ignore interceptors in OnLibraryLoaded()/Unloaded(). These hooks use code
+// (ListOfModules::init, MemoryMappingLayout::DumpListOfModules) that make
+// intercepted calls, which can cause deadlockes with ReportRace() which also
+// uses this code.
#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
- libignore()->OnLibraryLoaded(filename)
+ ({ \
+ ScopedIgnoreInterceptors ignore_interceptors; \
+ libignore()->OnLibraryLoaded(filename); \
+ })
-#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
- libignore()->OnLibraryUnloaded()
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
+ ({ \
+ ScopedIgnoreInterceptors ignore_interceptors; \
+ libignore()->OnLibraryUnloaded(); \
+ })
#define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
@@ -2347,34 +2487,17 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
-#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
- __tsan::ctx->thread_registry->SetThreadNameByUserId(thread, name)
+#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
+ if (pthread_equal(pthread_self(), reinterpret_cast<void *>(thread))) \
+ COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name); \
+ else \
+ __tsan::ctx->thread_registry.SetThreadNameByUserId(thread, name)
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
OnExit(((TsanInterceptorContext *) ctx)->thr)
-#define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) \
- MutexPreLock(((TsanInterceptorContext *)ctx)->thr, \
- ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
-
-#define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) \
- MutexPostLock(((TsanInterceptorContext *)ctx)->thr, \
- ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
-
-#define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \
- MutexUnlock(((TsanInterceptorContext *)ctx)->thr, \
- ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
-
-#define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) \
- MutexRepair(((TsanInterceptorContext *)ctx)->thr, \
- ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
-
-#define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) \
- MutexInvalidAccess(((TsanInterceptorContext *)ctx)->thr, \
- ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
-
#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \
off) \
do { \
@@ -2382,7 +2505,12 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
off); \
} while (false)
-#if !SANITIZER_MAC
+#define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, sz) \
+ do { \
+ return munmap_interceptor(thr, pc, REAL(munmap), addr, sz); \
+ } while (false)
+
+#if !SANITIZER_APPLE
#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
((TsanInterceptorContext *)ctx)->pc, msg)
@@ -2415,12 +2543,14 @@ static __sanitizer_sighandler_ptr signal_impl(int sig,
#define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \
{ return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); }
+#define SIGNAL_INTERCEPTOR_ENTER() LazyInitialize(cur_thread_init())
+
#include "sanitizer_common/sanitizer_signal_interceptors.inc"
int sigaction_impl(int sig, const __sanitizer_sigaction *act,
__sanitizer_sigaction *old) {
// Note: if we call REAL(sigaction) directly for any reason without proxying
- // the signal handler through rtl_sigaction, very bad things will happen.
+ // the signal handler through sighandler, very bad things will happen.
// The handler will run synchronously and corrupt tsan per-thread state.
SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
if (sig <= 0 || sig >= kSigCount) {
@@ -2443,27 +2573,22 @@ int sigaction_impl(int sig, const __sanitizer_sigaction *act,
sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags;
internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
sizeof(sigactions[sig].sa_mask));
-#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
+#if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
sigactions[sig].sa_restorer = act->sa_restorer;
#endif
internal_memcpy(&newact, act, sizeof(newact));
internal_sigfillset(&newact.sa_mask);
- if ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl) {
- if (newact.sa_flags & SA_SIGINFO)
- newact.sigaction = rtl_sigaction;
- else
- newact.handler = rtl_sighandler;
+ if ((act->sa_flags & SA_SIGINFO) ||
+ ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl)) {
+ newact.sa_flags |= SA_SIGINFO;
+ newact.sigaction = sighandler;
}
ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
act = &newact;
}
int res = REAL(sigaction)(sig, act, old);
- if (res == 0 && old) {
- uptr cb = (uptr)old->sigaction;
- if (cb == (uptr)rtl_sigaction || cb == (uptr)rtl_sighandler) {
- internal_memcpy(old, &old_stored, sizeof(*old));
- }
- }
+ if (res == 0 && old && old->sigaction == sighandler)
+ internal_memcpy(old, &old_stored, sizeof(*old));
return res;
}
@@ -2479,27 +2604,23 @@ static __sanitizer_sighandler_ptr signal_impl(int sig,
return old.handler;
}
-#define TSAN_SYSCALL() \
+#define TSAN_SYSCALL() \
ThreadState *thr = cur_thread(); \
- if (thr->ignore_interceptors) \
- return; \
- ScopedSyscall scoped_syscall(thr) \
-/**/
+ if (thr->ignore_interceptors) \
+ return; \
+ ScopedSyscall scoped_syscall(thr)
struct ScopedSyscall {
ThreadState *thr;
- explicit ScopedSyscall(ThreadState *thr)
- : thr(thr) {
- Initialize(thr);
- }
+ explicit ScopedSyscall(ThreadState *thr) : thr(thr) { LazyInitialize(thr); }
~ScopedSyscall() {
ProcessPendingSignals(thr);
}
};
-#if !SANITIZER_FREEBSD && !SANITIZER_MAC
+#if !SANITIZER_FREEBSD && !SANITIZER_APPLE
static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
TSAN_SYSCALL();
MemoryAccessRange(thr, pc, p, s, write);
@@ -2508,29 +2629,29 @@ static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
static USED void syscall_acquire(uptr pc, uptr addr) {
TSAN_SYSCALL();
Acquire(thr, pc, addr);
- DPrintf("syscall_acquire(%p)\n", addr);
+ DPrintf("syscall_acquire(0x%zx))\n", addr);
}
static USED void syscall_release(uptr pc, uptr addr) {
TSAN_SYSCALL();
- DPrintf("syscall_release(%p)\n", addr);
+ DPrintf("syscall_release(0x%zx)\n", addr);
Release(thr, pc, addr);
}
static void syscall_fd_close(uptr pc, int fd) {
- TSAN_SYSCALL();
+ auto *thr = cur_thread();
FdClose(thr, pc, fd);
}
static USED void syscall_fd_acquire(uptr pc, int fd) {
TSAN_SYSCALL();
FdAcquire(thr, pc, fd);
- DPrintf("syscall_fd_acquire(%p)\n", fd);
+ DPrintf("syscall_fd_acquire(%d)\n", fd);
}
static USED void syscall_fd_release(uptr pc, int fd) {
TSAN_SYSCALL();
- DPrintf("syscall_fd_release(%p)\n", fd);
+ DPrintf("syscall_fd_release(%d)\n", fd);
FdRelease(thr, pc, fd);
}
@@ -2540,7 +2661,7 @@ static void syscall_post_fork(uptr pc, int pid) {
ThreadState *thr = cur_thread();
if (pid == 0) {
// child
- ForkChildAfter(thr, pc);
+ ForkChildAfter(thr, pc, true);
FdOnFork(thr, pc);
} else if (pid > 0) {
// parent
@@ -2653,6 +2774,26 @@ TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) {
#define TSAN_MAYBE_INTERCEPT_THR_EXIT
#endif
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_init, void *c, void *a)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_destroy, void *c)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_signal, void *c)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_broadcast, void *c)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_wait, void *c, void *m)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_init, void *m, void *a)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_destroy, void *m)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_lock, void *m)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_trylock, void *m)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_unlock, void *m)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_init, void *l, void *a)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_destroy, void *l)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_rdlock, void *l)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_tryrdlock, void *l)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_wrlock, void *l)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_trywrlock, void *l)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_unlock, void *l)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, once, void *o, void (*i)())
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, sigmask, int f, void *n, void *o)
+
TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a)
TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c)
TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c)
@@ -2660,7 +2801,9 @@ TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m)
TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c)
TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a)
TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_lock, void *m)
TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_unlock, void *m)
TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a)
TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m)
TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m)
@@ -2683,7 +2826,7 @@ static void finalize(void *arg) {
Die();
}
-#if !SANITIZER_MAC && !SANITIZER_ANDROID
+#if !SANITIZER_APPLE && !SANITIZER_ANDROID
static void unreachable() {
Report("FATAL: ThreadSanitizer: unreachable called\n");
Die();
@@ -2694,35 +2837,20 @@ static void unreachable() {
SANITIZER_WEAK_ATTRIBUTE void InitializeLibdispatchInterceptors() {}
void InitializeInterceptors() {
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
// We need to setup it early, because functions like dlsym() can call it.
REAL(memset) = internal_memset;
REAL(memcpy) = internal_memcpy;
#endif
- // Instruct libc malloc to consume less memory.
-#if SANITIZER_GLIBC
- mallopt(1, 0); // M_MXFAST
- mallopt(-3, 32*1024); // M_MMAP_THRESHOLD
-#endif
-
new(interceptor_ctx()) InterceptorContext();
InitializeCommonInterceptors();
InitializeSignalInterceptors();
InitializeLibdispatchInterceptors();
-#if !SANITIZER_MAC
- // We can not use TSAN_INTERCEPT to get setjmp addr,
- // because it does &setjmp and setjmp is not present in some versions of libc.
- using __interception::InterceptFunction;
- InterceptFunction(TSAN_STRING_SETJMP, (uptr*)&REAL(setjmp_symname), 0, 0);
- InterceptFunction("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
- InterceptFunction(TSAN_STRING_SIGSETJMP, (uptr*)&REAL(sigsetjmp_symname), 0,
- 0);
-#if !SANITIZER_NETBSD
- InterceptFunction("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
-#endif
+#if !SANITIZER_APPLE
+ InitializeSetjmpInterceptors();
#endif
TSAN_INTERCEPT(longjmp_symname);
@@ -2768,8 +2896,16 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(pthread_mutex_init);
TSAN_INTERCEPT(pthread_mutex_destroy);
+ TSAN_INTERCEPT(pthread_mutex_lock);
TSAN_INTERCEPT(pthread_mutex_trylock);
TSAN_INTERCEPT(pthread_mutex_timedlock);
+ TSAN_INTERCEPT(pthread_mutex_unlock);
+#if SANITIZER_GLIBC
+# if !__GLIBC_PREREQ(2, 34)
+ TSAN_INTERCEPT(__pthread_mutex_lock);
+ TSAN_INTERCEPT(__pthread_mutex_unlock);
+# endif
+#endif
TSAN_INTERCEPT(pthread_spin_init);
TSAN_INTERCEPT(pthread_spin_destroy);
@@ -2843,6 +2979,9 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(fork);
TSAN_INTERCEPT(vfork);
+#if SANITIZER_LINUX
+ TSAN_INTERCEPT(clone);
+#endif
#if !SANITIZER_ANDROID
TSAN_INTERCEPT(dl_iterate_phdr);
#endif
@@ -2862,7 +3001,7 @@ void InitializeInterceptors() {
TSAN_MAYBE_INTERCEPT__LWP_EXIT;
TSAN_MAYBE_INTERCEPT_THR_EXIT;
-#if !SANITIZER_MAC && !SANITIZER_ANDROID
+#if !SANITIZER_APPLE && !SANITIZER_ANDROID
// Need to setup it, because interceptors check that the function is resolved.
// But atexit is emitted directly into the module, so can't be resolved.
REAL(atexit) = (int(*)(void(*)()))unreachable;
@@ -2877,13 +3016,33 @@ void InitializeInterceptors() {
Die();
}
-#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+#if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) {
Printf("ThreadSanitizer: failed to create thread key\n");
Die();
}
#endif
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_init);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_destroy);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_signal);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_broadcast);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_wait);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_init);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_destroy);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_lock);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_trylock);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_unlock);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_init);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_destroy);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_rdlock);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_tryrdlock);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_wrlock);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_trywrlock);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_unlock);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(once);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(sigmask);
+
TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init);
TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal);
TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast);
@@ -2891,7 +3050,9 @@ void InitializeInterceptors() {
TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy);
TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init);
TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_lock);
TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_unlock);
TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init);
TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy);
TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock);
@@ -2920,25 +3081,40 @@ void InitializeInterceptors() {
// Note that no_sanitize_thread attribute does not turn off atomic interception
// so attaching it to the function defined in user code does not help.
// That's why we now have what we have.
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_testonly_barrier_init(u64 *barrier, u32 count) {
- if (count >= (1 << 8)) {
- Printf("barrier_init: count is too large (%d)\n", count);
- Die();
+constexpr u32 kBarrierThreadBits = 10;
+constexpr u32 kBarrierThreads = 1 << kBarrierThreadBits;
+
+extern "C" {
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(
+ atomic_uint32_t *barrier, u32 num_threads) {
+ if (num_threads >= kBarrierThreads) {
+ Printf("barrier_init: count is too large (%d)\n", num_threads);
+ Die();
}
- // 8 lsb is thread count, the remaining are count of entered threads.
- *barrier = count;
+ // kBarrierThreadBits lsb is thread count,
+ // the remaining are count of entered threads.
+ atomic_store(barrier, num_threads, memory_order_relaxed);
+}
+
+static u32 barrier_epoch(u32 value) {
+ return (value >> kBarrierThreadBits) / (value & (kBarrierThreads - 1));
}
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_testonly_barrier_wait(u64 *barrier) {
- unsigned old = __atomic_fetch_add(barrier, 1 << 8, __ATOMIC_RELAXED);
- unsigned old_epoch = (old >> 8) / (old & 0xff);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
+ atomic_uint32_t *barrier) {
+ u32 old = atomic_fetch_add(barrier, kBarrierThreads, memory_order_relaxed);
+ u32 old_epoch = barrier_epoch(old);
+ if (barrier_epoch(old + kBarrierThreads) != old_epoch) {
+ FutexWake(barrier, (1 << 30));
+ return;
+ }
for (;;) {
- unsigned cur = __atomic_load_n(barrier, __ATOMIC_RELAXED);
- unsigned cur_epoch = (cur >> 8) / (cur & 0xff);
- if (cur_epoch != old_epoch)
+ u32 cur = atomic_load(barrier, memory_order_relaxed);
+ if (barrier_epoch(cur) != old_epoch)
return;
- internal_sched_yield();
+ FutexWait(barrier, cur);
}
}
+
+} // extern "C"
lib/tsan/tsan_interface.cpp
@@ -20,108 +20,43 @@
using namespace __tsan;
-void __tsan_init() {
- cur_thread_init();
- Initialize(cur_thread());
-}
+void __tsan_init() { Initialize(cur_thread_init()); }
void __tsan_flush_memory() {
FlushShadowMemory();
}
-void __tsan_read16(void *addr) {
- MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
- MemoryRead(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
-}
-
-void __tsan_write16(void *addr) {
- MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
- MemoryWrite(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
-}
-
void __tsan_read16_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
- MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr + 8, kSizeLog8);
+ uptr pc_no_pac = STRIP_PAC_PC(pc);
+ ThreadState *thr = cur_thread();
+ MemoryAccess(thr, pc_no_pac, (uptr)addr, 8, kAccessRead);
+ MemoryAccess(thr, pc_no_pac, (uptr)addr + 8, 8, kAccessRead);
}
void __tsan_write16_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
- MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr + 8, kSizeLog8);
+ uptr pc_no_pac = STRIP_PAC_PC(pc);
+ ThreadState *thr = cur_thread();
+ MemoryAccess(thr, pc_no_pac, (uptr)addr, 8, kAccessWrite);
+ MemoryAccess(thr, pc_no_pac, (uptr)addr + 8, 8, kAccessWrite);
}
// __tsan_unaligned_read/write calls are emitted by compiler.
-void __tsan_unaligned_read2(const void *addr) {
- UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, false, false);
-}
-
-void __tsan_unaligned_read4(const void *addr) {
- UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, false, false);
-}
-
-void __tsan_unaligned_read8(const void *addr) {
- UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, false, false);
-}
-
void __tsan_unaligned_read16(const void *addr) {
- UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, false, false);
-}
-
-void __tsan_unaligned_write2(void *addr) {
- UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, true, false);
-}
-
-void __tsan_unaligned_write4(void *addr) {
- UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, true, false);
-}
-
-void __tsan_unaligned_write8(void *addr) {
- UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, true, false);
+ uptr pc = CALLERPC;
+ ThreadState *thr = cur_thread();
+ UnalignedMemoryAccess(thr, pc, (uptr)addr, 8, kAccessRead);
+ UnalignedMemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessRead);
}
void __tsan_unaligned_write16(void *addr) {
- UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, true, false);
+ uptr pc = CALLERPC;
+ ThreadState *thr = cur_thread();
+ UnalignedMemoryAccess(thr, pc, (uptr)addr, 8, kAccessWrite);
+ UnalignedMemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessWrite);
}
-// __sanitizer_unaligned_load/store are for user instrumentation.
-
extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE
-u16 __sanitizer_unaligned_load16(const uu16 *addr) {
- __tsan_unaligned_read2(addr);
- return *addr;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-u32 __sanitizer_unaligned_load32(const uu32 *addr) {
- __tsan_unaligned_read4(addr);
- return *addr;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-u64 __sanitizer_unaligned_load64(const uu64 *addr) {
- __tsan_unaligned_read8(addr);
- return *addr;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_unaligned_store16(uu16 *addr, u16 v) {
- __tsan_unaligned_write2(addr);
- *addr = v;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_unaligned_store32(uu32 *addr, u32 v) {
- __tsan_unaligned_write4(addr);
- *addr = v;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_unaligned_store64(uu64 *addr, u64 v) {
- __tsan_unaligned_write8(addr);
- *addr = v;
-}
-
SANITIZER_INTERFACE_ATTRIBUTE
void *__tsan_get_current_fiber() {
return cur_thread();
lib/tsan/tsan_interface.h
@@ -32,6 +32,9 @@ extern "C" {
// before any instrumented code is executed and before any call to malloc.
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_init();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char *
+__tsan_default_options();
+
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_flush_memory();
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read1(void *addr);
@@ -72,12 +75,21 @@ SANITIZER_INTERFACE_ATTRIBUTE void __tsan_vptr_read(void **vptr_p);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_vptr_update(void **vptr_p, void *new_val);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_memcpy(void *dest, const void *src, uptr count);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_memset(void *dest, int ch, uptr count);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_memmove(void *dest, const void *src, uptr count);
+
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_entry(void *call_pc);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_exit();
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_begin();
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_end();
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_on_thread_idle();
+
SANITIZER_INTERFACE_ATTRIBUTE
void *__tsan_external_register_tag(const char *object_type);
SANITIZER_INTERFACE_ATTRIBUTE
@@ -95,9 +107,9 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_write_range(void *addr, unsigned long size);
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_read_range_pc(void *addr, unsigned long size, void *pc); // NOLINT
+void __tsan_read_range_pc(void *addr, unsigned long size, void *pc);
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_write_range_pc(void *addr, unsigned long size, void *pc); // NOLINT
+void __tsan_write_range_pc(void *addr, unsigned long size, void *pc);
// User may provide function that would be called right when TSan detects
// an error. The argument 'report' is an opaque pointer that can be used to
@@ -417,12 +429,6 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic64_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
u8 *a);
-SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_on_initialize();
-
-SANITIZER_INTERFACE_ATTRIBUTE
-int __tsan_on_finalize(int failed);
-
} // extern "C"
} // namespace __tsan
lib/tsan/tsan_interface.inc
@@ -0,0 +1,190 @@
+//===-- tsan_interface.inc --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_ptrauth.h"
+#include "tsan_interface.h"
+#include "tsan_rtl.h"
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+using namespace __tsan;
+
+void __tsan_read1(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, kAccessRead);
+}
+
+void __tsan_read2(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessRead);
+}
+
+void __tsan_read4(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessRead);
+}
+
+void __tsan_read8(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessRead);
+}
+
+void __tsan_read16(void *addr) {
+ MemoryAccess16(cur_thread(), CALLERPC, (uptr)addr, kAccessRead);
+}
+
+void __tsan_write1(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, kAccessWrite);
+}
+
+void __tsan_write2(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessWrite);
+}
+
+void __tsan_write4(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessWrite);
+}
+
+void __tsan_write8(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessWrite);
+}
+
+void __tsan_write16(void *addr) {
+ MemoryAccess16(cur_thread(), CALLERPC, (uptr)addr, kAccessWrite);
+}
+
+void __tsan_read1_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 1, kAccessRead | kAccessExternalPC);
+}
+
+void __tsan_read2_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 2, kAccessRead | kAccessExternalPC);
+}
+
+void __tsan_read4_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 4, kAccessRead | kAccessExternalPC);
+}
+
+void __tsan_read8_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 8, kAccessRead | kAccessExternalPC);
+}
+
+void __tsan_write1_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 1, kAccessWrite | kAccessExternalPC);
+}
+
+void __tsan_write2_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 2, kAccessWrite | kAccessExternalPC);
+}
+
+void __tsan_write4_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 4, kAccessWrite | kAccessExternalPC);
+}
+
+void __tsan_write8_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 8, kAccessWrite | kAccessExternalPC);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_read2(const void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessRead);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_read4(const void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessRead);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_read8(const void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessRead);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_write2(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessWrite);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_write4(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessWrite);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_write8(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessWrite);
+}
+
+extern "C" {
+// __sanitizer_unaligned_load/store are for user instrumentation.
+SANITIZER_INTERFACE_ATTRIBUTE
+u16 __sanitizer_unaligned_load16(const uu16 *addr) {
+ __tsan_unaligned_read2(addr);
+ return *addr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u32 __sanitizer_unaligned_load32(const uu32 *addr) {
+ __tsan_unaligned_read4(addr);
+ return *addr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u64 __sanitizer_unaligned_load64(const uu64 *addr) {
+ __tsan_unaligned_read8(addr);
+ return *addr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store16(uu16 *addr, u16 v) {
+ *addr = v;
+ __tsan_unaligned_write2(addr);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store32(uu32 *addr, u32 v) {
+ *addr = v;
+ __tsan_unaligned_write4(addr);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store64(uu64 *addr, u64 v) {
+ *addr = v;
+ __tsan_unaligned_write8(addr);
+}
+}
+
+void __tsan_vptr_update(void **vptr_p, void *new_val) {
+ if (*vptr_p == new_val)
+ return;
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)vptr_p, sizeof(*vptr_p),
+ kAccessWrite | kAccessVptr);
+}
+
+void __tsan_vptr_read(void **vptr_p) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)vptr_p, sizeof(*vptr_p),
+ kAccessRead | kAccessVptr);
+}
+
+void __tsan_func_entry(void *pc) { FuncEntry(cur_thread(), STRIP_PAC_PC(pc)); }
+
+void __tsan_func_exit() { FuncExit(cur_thread()); }
+
+void __tsan_ignore_thread_begin() { ThreadIgnoreBegin(cur_thread(), CALLERPC); }
+
+void __tsan_ignore_thread_end() { ThreadIgnoreEnd(cur_thread()); }
+
+void __tsan_read_range(void *addr, uptr size) {
+ MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, false);
+}
+
+void __tsan_write_range(void *addr, uptr size) {
+ MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, true);
+}
+
+void __tsan_read_range_pc(void *addr, uptr size, void *pc) {
+ MemoryAccessRange(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, size, false);
+}
+
+void __tsan_write_range_pc(void *addr, uptr size, void *pc) {
+ MemoryAccessRange(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, size, true);
+}
lib/tsan/tsan_interface_ann.cpp
@@ -43,15 +43,14 @@ class ScopedAnnotation {
ThreadState *const thr_;
};
-#define SCOPED_ANNOTATION_RET(typ, ret) \
- if (!flags()->enable_annotations) \
- return ret; \
- ThreadState *thr = cur_thread(); \
- const uptr caller_pc = (uptr)__builtin_return_address(0); \
- ScopedAnnotation sa(thr, __func__, caller_pc); \
- const uptr pc = StackTrace::GetCurrentPc(); \
- (void)pc; \
-/**/
+#define SCOPED_ANNOTATION_RET(typ, ret) \
+ if (!flags()->enable_annotations) \
+ return ret; \
+ ThreadState *thr = cur_thread(); \
+ const uptr caller_pc = (uptr)__builtin_return_address(0); \
+ ScopedAnnotation sa(thr, __func__, caller_pc); \
+ const uptr pc = StackTrace::GetCurrentPc(); \
+ (void)pc;
#define SCOPED_ANNOTATION(typ) SCOPED_ANNOTATION_RET(typ, )
@@ -71,7 +70,6 @@ struct ExpectRace {
struct DynamicAnnContext {
Mutex mtx;
- ExpectRace expect;
ExpectRace benign;
DynamicAnnContext() : mtx(MutexTypeAnnotations) {}
@@ -90,7 +88,7 @@ static void AddExpectRace(ExpectRace *list,
return;
}
}
- race = (ExpectRace*)internal_alloc(MBlockExpectRace, sizeof(ExpectRace));
+ race = static_cast<ExpectRace *>(Alloc(sizeof(ExpectRace)));
race->addr = addr;
race->size = size;
race->file = f;
@@ -137,81 +135,12 @@ static void InitList(ExpectRace *list) {
void InitializeDynamicAnnotations() {
dyn_ann_ctx = new(dyn_ann_ctx_placeholder) DynamicAnnContext;
- InitList(&dyn_ann_ctx->expect);
InitList(&dyn_ann_ctx->benign);
}
bool IsExpectedReport(uptr addr, uptr size) {
ReadLock lock(&dyn_ann_ctx->mtx);
- if (CheckContains(&dyn_ann_ctx->expect, addr, size))
- return true;
- if (CheckContains(&dyn_ann_ctx->benign, addr, size))
- return true;
- return false;
-}
-
-static void CollectMatchedBenignRaces(Vector<ExpectRace> *matched,
- int *unique_count, int *hit_count, atomic_uintptr_t ExpectRace::*counter) {
- ExpectRace *list = &dyn_ann_ctx->benign;
- for (ExpectRace *race = list->next; race != list; race = race->next) {
- (*unique_count)++;
- const uptr cnt = atomic_load_relaxed(&(race->*counter));
- if (cnt == 0)
- continue;
- *hit_count += cnt;
- uptr i = 0;
- for (; i < matched->Size(); i++) {
- ExpectRace *race0 = &(*matched)[i];
- if (race->line == race0->line
- && internal_strcmp(race->file, race0->file) == 0
- && internal_strcmp(race->desc, race0->desc) == 0) {
- atomic_fetch_add(&(race0->*counter), cnt, memory_order_relaxed);
- break;
- }
- }
- if (i == matched->Size())
- matched->PushBack(*race);
- }
-}
-
-void PrintMatchedBenignRaces() {
- Lock lock(&dyn_ann_ctx->mtx);
- int unique_count = 0;
- int hit_count = 0;
- int add_count = 0;
- Vector<ExpectRace> hit_matched;
- CollectMatchedBenignRaces(&hit_matched, &unique_count, &hit_count,
- &ExpectRace::hitcount);
- Vector<ExpectRace> add_matched;
- CollectMatchedBenignRaces(&add_matched, &unique_count, &add_count,
- &ExpectRace::addcount);
- if (hit_matched.Size()) {
- Printf("ThreadSanitizer: Matched %d \"benign\" races (pid=%d):\n",
- hit_count, (int)internal_getpid());
- for (uptr i = 0; i < hit_matched.Size(); i++) {
- Printf("%d %s:%d %s\n",
- atomic_load_relaxed(&hit_matched[i].hitcount),
- hit_matched[i].file, hit_matched[i].line, hit_matched[i].desc);
- }
- }
- if (hit_matched.Size()) {
- Printf("ThreadSanitizer: Annotated %d \"benign\" races, %d unique"
- " (pid=%d):\n",
- add_count, unique_count, (int)internal_getpid());
- for (uptr i = 0; i < add_matched.Size(); i++) {
- Printf("%d %s:%d %s\n",
- atomic_load_relaxed(&add_matched[i].addcount),
- add_matched[i].file, add_matched[i].line, add_matched[i].desc);
- }
- }
-}
-
-static void ReportMissedExpectedRace(ExpectRace *race) {
- Printf("==================\n");
- Printf("WARNING: ThreadSanitizer: missed expected data race\n");
- Printf(" %s addr=%zx %s:%d\n",
- race->desc, race->addr, race->file, race->line);
- Printf("==================\n");
+ return CheckContains(&dyn_ann_ctx->benign, addr, size);
}
} // namespace __tsan
@@ -229,20 +158,16 @@ void INTERFACE_ATTRIBUTE AnnotateHappensAfter(char *f, int l, uptr addr) {
}
void INTERFACE_ATTRIBUTE AnnotateCondVarSignal(char *f, int l, uptr cv) {
- SCOPED_ANNOTATION(AnnotateCondVarSignal);
}
void INTERFACE_ATTRIBUTE AnnotateCondVarSignalAll(char *f, int l, uptr cv) {
- SCOPED_ANNOTATION(AnnotateCondVarSignalAll);
}
void INTERFACE_ATTRIBUTE AnnotateMutexIsNotPHB(char *f, int l, uptr mu) {
- SCOPED_ANNOTATION(AnnotateMutexIsNotPHB);
}
void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv,
uptr lock) {
- SCOPED_ANNOTATION(AnnotateCondVarWait);
}
void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) {
@@ -279,86 +204,56 @@ void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m,
}
void INTERFACE_ATTRIBUTE AnnotateTraceMemory(char *f, int l, uptr mem) {
- SCOPED_ANNOTATION(AnnotateTraceMemory);
}
void INTERFACE_ATTRIBUTE AnnotateFlushState(char *f, int l) {
- SCOPED_ANNOTATION(AnnotateFlushState);
}
void INTERFACE_ATTRIBUTE AnnotateNewMemory(char *f, int l, uptr mem,
uptr size) {
- SCOPED_ANNOTATION(AnnotateNewMemory);
}
void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) {
- SCOPED_ANNOTATION(AnnotateNoOp);
}
void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) {
- SCOPED_ANNOTATION(AnnotateFlushExpectedRaces);
- Lock lock(&dyn_ann_ctx->mtx);
- while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) {
- ExpectRace *race = dyn_ann_ctx->expect.next;
- if (atomic_load_relaxed(&race->hitcount) == 0) {
- ctx->nmissed_expected++;
- ReportMissedExpectedRace(race);
- }
- race->prev->next = race->next;
- race->next->prev = race->prev;
- internal_free(race);
- }
}
void INTERFACE_ATTRIBUTE AnnotateEnableRaceDetection(
char *f, int l, int enable) {
- SCOPED_ANNOTATION(AnnotateEnableRaceDetection);
- // FIXME: Reconsider this functionality later. It may be irrelevant.
}
void INTERFACE_ATTRIBUTE AnnotateMutexIsUsedAsCondVar(
char *f, int l, uptr mu) {
- SCOPED_ANNOTATION(AnnotateMutexIsUsedAsCondVar);
}
void INTERFACE_ATTRIBUTE AnnotatePCQGet(
char *f, int l, uptr pcq) {
- SCOPED_ANNOTATION(AnnotatePCQGet);
}
void INTERFACE_ATTRIBUTE AnnotatePCQPut(
char *f, int l, uptr pcq) {
- SCOPED_ANNOTATION(AnnotatePCQPut);
}
void INTERFACE_ATTRIBUTE AnnotatePCQDestroy(
char *f, int l, uptr pcq) {
- SCOPED_ANNOTATION(AnnotatePCQDestroy);
}
void INTERFACE_ATTRIBUTE AnnotatePCQCreate(
char *f, int l, uptr pcq) {
- SCOPED_ANNOTATION(AnnotatePCQCreate);
}
void INTERFACE_ATTRIBUTE AnnotateExpectRace(
char *f, int l, uptr mem, char *desc) {
- SCOPED_ANNOTATION(AnnotateExpectRace);
- Lock lock(&dyn_ann_ctx->mtx);
- AddExpectRace(&dyn_ann_ctx->expect,
- f, l, mem, 1, desc);
- DPrintf("Add expected race: %s addr=%zx %s:%d\n", desc, mem, f, l);
}
-static void BenignRaceImpl(
- char *f, int l, uptr mem, uptr size, char *desc) {
+static void BenignRaceImpl(char *f, int l, uptr mem, uptr size, char *desc) {
Lock lock(&dyn_ann_ctx->mtx);
AddExpectRace(&dyn_ann_ctx->benign,
f, l, mem, size, desc);
DPrintf("Add benign race: %s addr=%zx %s:%d\n", desc, mem, f, l);
}
-// FIXME: Turn it off later. WTF is benign race?1?? Go talk to Hans Boehm.
void INTERFACE_ATTRIBUTE AnnotateBenignRaceSized(
char *f, int l, uptr mem, uptr size, char *desc) {
SCOPED_ANNOTATION(AnnotateBenignRaceSized);
@@ -378,7 +273,7 @@ void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsBegin(char *f, int l) {
void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsEnd(char *f, int l) {
SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreEnd(thr);
}
void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) {
@@ -388,7 +283,7 @@ void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) {
void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) {
SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreEnd(thr);
}
void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) {
@@ -398,17 +293,15 @@ void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) {
void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncEnd(char *f, int l) {
SCOPED_ANNOTATION(AnnotateIgnoreSyncEnd);
- ThreadIgnoreSyncEnd(thr, pc);
+ ThreadIgnoreSyncEnd(thr);
}
void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange(
char *f, int l, uptr addr, uptr size) {
- SCOPED_ANNOTATION(AnnotatePublishMemoryRange);
}
void INTERFACE_ATTRIBUTE AnnotateUnpublishMemoryRange(
char *f, int l, uptr addr, uptr size) {
- SCOPED_ANNOTATION(AnnotateUnpublishMemoryRange);
}
void INTERFACE_ATTRIBUTE AnnotateThreadName(
@@ -421,11 +314,9 @@ void INTERFACE_ATTRIBUTE AnnotateThreadName(
// WTFAnnotateHappensAfter(). Those are being used by Webkit to annotate
// atomic operations, which should be handled by ThreadSanitizer correctly.
void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) {
- SCOPED_ANNOTATION(AnnotateHappensBefore);
}
void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) {
- SCOPED_ANNOTATION(AnnotateHappensAfter);
}
void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized(
@@ -477,15 +368,15 @@ void __tsan_mutex_pre_lock(void *m, unsigned flagz) {
else
MutexPreLock(thr, pc, (uptr)m);
}
- ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
- ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+ ThreadIgnoreBegin(thr, 0);
+ ThreadIgnoreSyncBegin(thr, 0);
}
INTERFACE_ATTRIBUTE
void __tsan_mutex_post_lock(void *m, unsigned flagz, int rec) {
SCOPED_ANNOTATION(__tsan_mutex_post_lock);
- ThreadIgnoreSyncEnd(thr, pc);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreSyncEnd(thr);
+ ThreadIgnoreEnd(thr);
if (!(flagz & MutexFlagTryLockFailed)) {
if (flagz & MutexFlagReadLock)
MutexPostReadLock(thr, pc, (uptr)m, flagz);
@@ -504,44 +395,44 @@ int __tsan_mutex_pre_unlock(void *m, unsigned flagz) {
} else {
ret = MutexUnlock(thr, pc, (uptr)m, flagz);
}
- ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
- ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+ ThreadIgnoreBegin(thr, 0);
+ ThreadIgnoreSyncBegin(thr, 0);
return ret;
}
INTERFACE_ATTRIBUTE
void __tsan_mutex_post_unlock(void *m, unsigned flagz) {
SCOPED_ANNOTATION(__tsan_mutex_post_unlock);
- ThreadIgnoreSyncEnd(thr, pc);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreSyncEnd(thr);
+ ThreadIgnoreEnd(thr);
}
INTERFACE_ATTRIBUTE
void __tsan_mutex_pre_signal(void *addr, unsigned flagz) {
SCOPED_ANNOTATION(__tsan_mutex_pre_signal);
- ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
- ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+ ThreadIgnoreBegin(thr, 0);
+ ThreadIgnoreSyncBegin(thr, 0);
}
INTERFACE_ATTRIBUTE
void __tsan_mutex_post_signal(void *addr, unsigned flagz) {
SCOPED_ANNOTATION(__tsan_mutex_post_signal);
- ThreadIgnoreSyncEnd(thr, pc);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreSyncEnd(thr);
+ ThreadIgnoreEnd(thr);
}
INTERFACE_ATTRIBUTE
void __tsan_mutex_pre_divert(void *addr, unsigned flagz) {
SCOPED_ANNOTATION(__tsan_mutex_pre_divert);
// Exit from ignore region started in __tsan_mutex_pre_lock/unlock/signal.
- ThreadIgnoreSyncEnd(thr, pc);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreSyncEnd(thr);
+ ThreadIgnoreEnd(thr);
}
INTERFACE_ATTRIBUTE
void __tsan_mutex_post_divert(void *addr, unsigned flagz) {
SCOPED_ANNOTATION(__tsan_mutex_post_divert);
- ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
- ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+ ThreadIgnoreBegin(thr, 0);
+ ThreadIgnoreSyncBegin(thr, 0);
}
} // extern "C"
lib/tsan/tsan_interface_atomic.cpp
@@ -32,6 +32,7 @@ using namespace __tsan;
static StaticSpinMutex mutex128;
#endif
+#if SANITIZER_DEBUG
static bool IsLoadOrder(morder mo) {
return mo == mo_relaxed || mo == mo_consume
|| mo == mo_acquire || mo == mo_seq_cst;
@@ -40,6 +41,7 @@ static bool IsLoadOrder(morder mo) {
static bool IsStoreOrder(morder mo) {
return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
}
+#endif
static bool IsReleaseOrder(morder mo) {
return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
@@ -161,16 +163,16 @@ a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
}
#endif
-template<typename T>
-static int SizeLog() {
+template <typename T>
+static int AccessSize() {
if (sizeof(T) <= 1)
- return kSizeLog1;
+ return 1;
else if (sizeof(T) <= 2)
- return kSizeLog2;
+ return 2;
else if (sizeof(T) <= 4)
- return kSizeLog4;
+ return 4;
else
- return kSizeLog8;
+ return 8;
// For 16-byte atomics we also use 8-byte memory access,
// this leads to false negatives only in very obscure cases.
}
@@ -202,7 +204,7 @@ static memory_order to_mo(morder mo) {
case mo_acq_rel: return memory_order_acq_rel;
case mo_seq_cst: return memory_order_seq_cst;
}
- CHECK(0);
+ DCHECK(0);
return memory_order_seq_cst;
}
@@ -219,27 +221,28 @@ static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
#endif
template <typename T>
-static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
- morder mo) NO_THREAD_SAFETY_ANALYSIS {
- CHECK(IsLoadOrder(mo));
+static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
+ DCHECK(IsLoadOrder(mo));
// This fast-path is critical for performance.
// Assume the access is atomic.
if (!IsAcquireOrder(mo)) {
- MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
+ kAccessRead | kAccessAtomic);
return NoTsanAtomicLoad(a, mo);
}
// Don't create sync object if it does not exist yet. For example, an atomic
// pointer is initialized to nullptr and then periodically acquire-loaded.
T v = NoTsanAtomicLoad(a, mo);
- SyncVar *s = ctx->metamap.GetIfExistsAndLock((uptr)a, false);
+ SyncVar *s = ctx->metamap.GetSyncIfExists((uptr)a);
if (s) {
- AcquireImpl(thr, pc, &s->clock);
+ SlotLocker locker(thr);
+ ReadLock lock(&s->mtx);
+ thr->clock.Acquire(s->clock);
// Re-read under sync mutex because we need a consistent snapshot
// of the value and the clock we acquire.
v = NoTsanAtomicLoad(a, mo);
- s->mtx.ReadUnlock();
}
- MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessRead | kAccessAtomic);
return v;
}
@@ -257,9 +260,9 @@ static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
template <typename T>
static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
- morder mo) NO_THREAD_SAFETY_ANALYSIS {
- CHECK(IsStoreOrder(mo));
- MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ morder mo) {
+ DCHECK(IsStoreOrder(mo));
+ MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
// This fast-path is critical for performance.
// Assume the access is atomic.
// Strictly saying even relaxed store cuts off release sequence,
@@ -268,36 +271,35 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
NoTsanAtomicStore(a, v, mo);
return;
}
- __sync_synchronize();
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- ReleaseStoreImpl(thr, pc, &s->clock);
- NoTsanAtomicStore(a, v, mo);
- s->mtx.Unlock();
+ SlotLocker locker(thr);
+ {
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
+ Lock lock(&s->mtx);
+ thr->clock.ReleaseStore(&s->clock);
+ NoTsanAtomicStore(a, v, mo);
+ }
+ IncrementEpoch(thr);
}
template <typename T, T (*F)(volatile T *v, T op)>
-static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v,
- morder mo) NO_THREAD_SAFETY_ANALYSIS {
- MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
- SyncVar *s = 0;
- if (mo != mo_relaxed) {
- s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
+ MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
+ if (LIKELY(mo == mo_relaxed))
+ return F(a, v);
+ SlotLocker locker(thr);
+ {
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
+ RWLock lock(&s->mtx, IsReleaseOrder(mo));
if (IsAcqRelOrder(mo))
- AcquireReleaseImpl(thr, pc, &s->clock);
+ thr->clock.ReleaseAcquire(&s->clock);
else if (IsReleaseOrder(mo))
- ReleaseImpl(thr, pc, &s->clock);
+ thr->clock.Release(&s->clock);
else if (IsAcquireOrder(mo))
- AcquireImpl(thr, pc, &s->clock);
+ thr->clock.Acquire(s->clock);
+ v = F(a, v);
}
- v = F(a, v);
- if (s)
- s->mtx.Unlock();
+ if (IsReleaseOrder(mo))
+ IncrementEpoch(thr);
return v;
}
@@ -402,46 +404,44 @@ static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
}
template <typename T>
-static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v, morder mo,
- morder fmo) NO_THREAD_SAFETY_ANALYSIS {
+static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v,
+ morder mo, morder fmo) {
// 31.7.2.18: "The failure argument shall not be memory_order_release
// nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic
// (mo_relaxed) when those are used.
- CHECK(IsLoadOrder(fmo));
-
- MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
- SyncVar *s = 0;
- bool write_lock = IsReleaseOrder(mo);
-
- if (mo != mo_relaxed || fmo != mo_relaxed)
- s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
-
- T cc = *c;
- T pr = func_cas(a, cc, v);
- bool success = pr == cc;
- if (!success) {
+ DCHECK(IsLoadOrder(fmo));
+
+ MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
+ if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {
+ T cc = *c;
+ T pr = func_cas(a, cc, v);
+ if (pr == cc)
+ return true;
*c = pr;
- mo = fmo;
+ return false;
}
-
- if (s) {
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
-
+ SlotLocker locker(thr);
+ bool release = IsReleaseOrder(mo);
+ bool success;
+ {
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
+ RWLock lock(&s->mtx, release);
+ T cc = *c;
+ T pr = func_cas(a, cc, v);
+ success = pr == cc;
+ if (!success) {
+ *c = pr;
+ mo = fmo;
+ }
if (success && IsAcqRelOrder(mo))
- AcquireReleaseImpl(thr, pc, &s->clock);
+ thr->clock.ReleaseAcquire(&s->clock);
else if (success && IsReleaseOrder(mo))
- ReleaseImpl(thr, pc, &s->clock);
+ thr->clock.Release(&s->clock);
else if (IsAcquireOrder(mo))
- AcquireImpl(thr, pc, &s->clock);
-
- if (write_lock)
- s->mtx.Unlock();
- else
- s->mtx.ReadUnlock();
+ thr->clock.Acquire(s->clock);
}
-
+ if (success && release)
+ IncrementEpoch(thr);
return success;
}
@@ -485,380 +485,356 @@ static morder convert_morder(morder mo) {
return (morder)(mo & 0x7fff);
}
-#define SCOPED_ATOMIC(func, ...) \
- ThreadState *const thr = cur_thread(); \
- if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) { \
- ProcessPendingSignals(thr); \
- return NoTsanAtomic##func(__VA_ARGS__); \
- } \
- const uptr callpc = (uptr)__builtin_return_address(0); \
- uptr pc = StackTrace::GetCurrentPc(); \
- mo = convert_morder(mo); \
- ScopedAtomic sa(thr, callpc, a, mo, __func__); \
- return Atomic##func(thr, pc, __VA_ARGS__); \
-/**/
-
-class ScopedAtomic {
- public:
- ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
- morder mo, const char *func)
- : thr_(thr) {
- FuncEntry(thr_, pc);
- DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
- }
- ~ScopedAtomic() {
- ProcessPendingSignals(thr_);
- FuncExit(thr_);
- }
- private:
- ThreadState *thr_;
-};
+# define ATOMIC_IMPL(func, ...) \
+ ThreadState *const thr = cur_thread(); \
+ ProcessPendingSignals(thr); \
+ if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) \
+ return NoTsanAtomic##func(__VA_ARGS__); \
+ mo = convert_morder(mo); \
+ return Atomic##func(thr, GET_CALLER_PC(), __VA_ARGS__);
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
- SCOPED_ATOMIC(Load, a, mo);
+ ATOMIC_IMPL(Load, a, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
- SCOPED_ATOMIC(Load, a, mo);
+ ATOMIC_IMPL(Load, a, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
- SCOPED_ATOMIC(Load, a, mo);
+ ATOMIC_IMPL(Load, a, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
- SCOPED_ATOMIC(Load, a, mo);
+ ATOMIC_IMPL(Load, a, mo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
- SCOPED_ATOMIC(Load, a, mo);
+ ATOMIC_IMPL(Load, a, mo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
- SCOPED_ATOMIC(Store, a, v, mo);
+ ATOMIC_IMPL(Store, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
- SCOPED_ATOMIC(Store, a, v, mo);
+ ATOMIC_IMPL(Store, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
- SCOPED_ATOMIC(Store, a, v, mo);
+ ATOMIC_IMPL(Store, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
- SCOPED_ATOMIC(Store, a, v, mo);
+ ATOMIC_IMPL(Store, a, v, mo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
- SCOPED_ATOMIC(Store, a, v, mo);
+ ATOMIC_IMPL(Store, a, v, mo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
- SCOPED_ATOMIC(Exchange, a, v, mo);
+ ATOMIC_IMPL(Exchange, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
- SCOPED_ATOMIC(Exchange, a, v, mo);
+ ATOMIC_IMPL(Exchange, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
- SCOPED_ATOMIC(Exchange, a, v, mo);
+ ATOMIC_IMPL(Exchange, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
- SCOPED_ATOMIC(Exchange, a, v, mo);
+ ATOMIC_IMPL(Exchange, a, v, mo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
- SCOPED_ATOMIC(Exchange, a, v, mo);
+ ATOMIC_IMPL(Exchange, a, v, mo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
- SCOPED_ATOMIC(FetchAdd, a, v, mo);
+ ATOMIC_IMPL(FetchAdd, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
- SCOPED_ATOMIC(FetchAdd, a, v, mo);
+ ATOMIC_IMPL(FetchAdd, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
- SCOPED_ATOMIC(FetchAdd, a, v, mo);
+ ATOMIC_IMPL(FetchAdd, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
- SCOPED_ATOMIC(FetchAdd, a, v, mo);
+ ATOMIC_IMPL(FetchAdd, a, v, mo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
- SCOPED_ATOMIC(FetchAdd, a, v, mo);
+ ATOMIC_IMPL(FetchAdd, a, v, mo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
- SCOPED_ATOMIC(FetchSub, a, v, mo);
+ ATOMIC_IMPL(FetchSub, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
- SCOPED_ATOMIC(FetchSub, a, v, mo);
+ ATOMIC_IMPL(FetchSub, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
- SCOPED_ATOMIC(FetchSub, a, v, mo);
+ ATOMIC_IMPL(FetchSub, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
- SCOPED_ATOMIC(FetchSub, a, v, mo);
+ ATOMIC_IMPL(FetchSub, a, v, mo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
- SCOPED_ATOMIC(FetchSub, a, v, mo);
+ ATOMIC_IMPL(FetchSub, a, v, mo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
- SCOPED_ATOMIC(FetchAnd, a, v, mo);
+ ATOMIC_IMPL(FetchAnd, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
- SCOPED_ATOMIC(FetchAnd, a, v, mo);
+ ATOMIC_IMPL(FetchAnd, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
- SCOPED_ATOMIC(FetchAnd, a, v, mo);
+ ATOMIC_IMPL(FetchAnd, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
- SCOPED_ATOMIC(FetchAnd, a, v, mo);
+ ATOMIC_IMPL(FetchAnd, a, v, mo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
- SCOPED_ATOMIC(FetchAnd, a, v, mo);
+ ATOMIC_IMPL(FetchAnd, a, v, mo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
- SCOPED_ATOMIC(FetchOr, a, v, mo);
+ ATOMIC_IMPL(FetchOr, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
- SCOPED_ATOMIC(FetchOr, a, v, mo);
+ ATOMIC_IMPL(FetchOr, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
- SCOPED_ATOMIC(FetchOr, a, v, mo);
+ ATOMIC_IMPL(FetchOr, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
- SCOPED_ATOMIC(FetchOr, a, v, mo);
+ ATOMIC_IMPL(FetchOr, a, v, mo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
- SCOPED_ATOMIC(FetchOr, a, v, mo);
+ ATOMIC_IMPL(FetchOr, a, v, mo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
- SCOPED_ATOMIC(FetchXor, a, v, mo);
+ ATOMIC_IMPL(FetchXor, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
- SCOPED_ATOMIC(FetchXor, a, v, mo);
+ ATOMIC_IMPL(FetchXor, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
- SCOPED_ATOMIC(FetchXor, a, v, mo);
+ ATOMIC_IMPL(FetchXor, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
- SCOPED_ATOMIC(FetchXor, a, v, mo);
+ ATOMIC_IMPL(FetchXor, a, v, mo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
- SCOPED_ATOMIC(FetchXor, a, v, mo);
+ ATOMIC_IMPL(FetchXor, a, v, mo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
- SCOPED_ATOMIC(FetchNand, a, v, mo);
+ ATOMIC_IMPL(FetchNand, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
- SCOPED_ATOMIC(FetchNand, a, v, mo);
+ ATOMIC_IMPL(FetchNand, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
- SCOPED_ATOMIC(FetchNand, a, v, mo);
+ ATOMIC_IMPL(FetchNand, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
- SCOPED_ATOMIC(FetchNand, a, v, mo);
+ ATOMIC_IMPL(FetchNand, a, v, mo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
- SCOPED_ATOMIC(FetchNand, a, v, mo);
+ ATOMIC_IMPL(FetchNand, a, v, mo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic_thread_fence(morder mo) {
- char* a = 0;
- SCOPED_ATOMIC(Fence, mo);
-}
+void __tsan_atomic_thread_fence(morder mo) { ATOMIC_IMPL(Fence, mo); }
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic_signal_fence(morder mo) {
@@ -869,25 +845,23 @@ void __tsan_atomic_signal_fence(morder mo) {
// Go
-#define ATOMIC(func, ...) \
- if (thr->ignore_sync) { \
- NoTsanAtomic##func(__VA_ARGS__); \
- } else { \
- FuncEntry(thr, cpc); \
+# define ATOMIC(func, ...) \
+ if (thr->ignore_sync) { \
+ NoTsanAtomic##func(__VA_ARGS__); \
+ } else { \
+ FuncEntry(thr, cpc); \
Atomic##func(thr, pc, __VA_ARGS__); \
- FuncExit(thr); \
- } \
-/**/
-
-#define ATOMIC_RET(func, ret, ...) \
- if (thr->ignore_sync) { \
- (ret) = NoTsanAtomic##func(__VA_ARGS__); \
- } else { \
- FuncEntry(thr, cpc); \
+ FuncExit(thr); \
+ }
+
+# define ATOMIC_RET(func, ret, ...) \
+ if (thr->ignore_sync) { \
+ (ret) = NoTsanAtomic##func(__VA_ARGS__); \
+ } else { \
+ FuncEntry(thr, cpc); \
(ret) = Atomic##func(thr, pc, __VA_ARGS__); \
- FuncExit(thr); \
- } \
-/**/
+ FuncExit(thr); \
+ }
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
lib/tsan/tsan_interface_inl.h
@@ -1,133 +0,0 @@
-//===-- tsan_interface_inl.h ------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-
-#include "tsan_interface.h"
-#include "tsan_rtl.h"
-#include "sanitizer_common/sanitizer_ptrauth.h"
-
-#define CALLERPC ((uptr)__builtin_return_address(0))
-
-using namespace __tsan;
-
-void __tsan_read1(void *addr) {
- MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1);
-}
-
-void __tsan_read2(void *addr) {
- MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2);
-}
-
-void __tsan_read4(void *addr) {
- MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4);
-}
-
-void __tsan_read8(void *addr) {
- MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
-}
-
-void __tsan_write1(void *addr) {
- MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1);
-}
-
-void __tsan_write2(void *addr) {
- MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2);
-}
-
-void __tsan_write4(void *addr) {
- MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4);
-}
-
-void __tsan_write8(void *addr) {
- MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
-}
-
-void __tsan_read1_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog1);
-}
-
-void __tsan_read2_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog2);
-}
-
-void __tsan_read4_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog4);
-}
-
-void __tsan_read8_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
-}
-
-void __tsan_write1_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog1);
-}
-
-void __tsan_write2_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog2);
-}
-
-void __tsan_write4_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog4);
-}
-
-void __tsan_write8_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
-}
-
-void __tsan_vptr_update(void **vptr_p, void *new_val) {
- CHECK_EQ(sizeof(vptr_p), 8);
- if (*vptr_p != new_val) {
- ThreadState *thr = cur_thread();
- thr->is_vptr_access = true;
- MemoryWrite(thr, CALLERPC, (uptr)vptr_p, kSizeLog8);
- thr->is_vptr_access = false;
- }
-}
-
-void __tsan_vptr_read(void **vptr_p) {
- CHECK_EQ(sizeof(vptr_p), 8);
- ThreadState *thr = cur_thread();
- thr->is_vptr_access = true;
- MemoryRead(thr, CALLERPC, (uptr)vptr_p, kSizeLog8);
- thr->is_vptr_access = false;
-}
-
-void __tsan_func_entry(void *pc) {
- FuncEntry(cur_thread(), STRIP_PAC_PC(pc));
-}
-
-void __tsan_func_exit() {
- FuncExit(cur_thread());
-}
-
-void __tsan_ignore_thread_begin() {
- ThreadIgnoreBegin(cur_thread(), CALLERPC);
-}
-
-void __tsan_ignore_thread_end() {
- ThreadIgnoreEnd(cur_thread(), CALLERPC);
-}
-
-void __tsan_read_range(void *addr, uptr size) {
- MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, false);
-}
-
-void __tsan_write_range(void *addr, uptr size) {
- MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, true);
-}
-
-void __tsan_read_range_pc(void *addr, uptr size, void *pc) {
- MemoryAccessRange(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, size, false);
-}
-
-void __tsan_write_range_pc(void *addr, uptr size, void *pc) {
- MemoryAccessRange(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, size, true);
-}
lib/tsan/tsan_interface_java.cpp
@@ -34,52 +34,49 @@ struct JavaContext {
}
};
-class ScopedJavaFunc {
- public:
- ScopedJavaFunc(ThreadState *thr, uptr pc)
- : thr_(thr) {
- Initialize(thr_);
- FuncEntry(thr, pc);
- }
-
- ~ScopedJavaFunc() {
- FuncExit(thr_);
- // FIXME(dvyukov): process pending signals.
- }
-
- private:
- ThreadState *thr_;
-};
-
static u64 jctx_buf[sizeof(JavaContext) / sizeof(u64) + 1];
static JavaContext *jctx;
+MBlock *JavaHeapBlock(uptr addr, uptr *start) {
+ if (!jctx || addr < jctx->heap_begin ||
+ addr >= jctx->heap_begin + jctx->heap_size)
+ return nullptr;
+ for (uptr p = RoundDown(addr, kMetaShadowCell); p >= jctx->heap_begin;
+ p -= kMetaShadowCell) {
+ MBlock *b = ctx->metamap.GetBlock(p);
+ if (!b)
+ continue;
+ if (p + b->siz <= addr)
+ return nullptr;
+ *start = p;
+ return b;
+ }
+ return nullptr;
+}
+
} // namespace __tsan
-#define SCOPED_JAVA_FUNC(func) \
+#define JAVA_FUNC_ENTER(func) \
ThreadState *thr = cur_thread(); \
- const uptr caller_pc = GET_CALLER_PC(); \
- const uptr pc = StackTrace::GetCurrentPc(); \
- (void)pc; \
- ScopedJavaFunc scoped(thr, caller_pc); \
-/**/
+ (void)thr;
void __tsan_java_init(jptr heap_begin, jptr heap_size) {
- SCOPED_JAVA_FUNC(__tsan_java_init);
- DPrintf("#%d: java_init(%p, %p)\n", thr->tid, heap_begin, heap_size);
- CHECK_EQ(jctx, 0);
- CHECK_GT(heap_begin, 0);
- CHECK_GT(heap_size, 0);
- CHECK_EQ(heap_begin % kHeapAlignment, 0);
- CHECK_EQ(heap_size % kHeapAlignment, 0);
- CHECK_LT(heap_begin, heap_begin + heap_size);
+ JAVA_FUNC_ENTER(__tsan_java_init);
+ Initialize(thr);
+ DPrintf("#%d: java_init(0x%zx, 0x%zx)\n", thr->tid, heap_begin, heap_size);
+ DCHECK_EQ(jctx, 0);
+ DCHECK_GT(heap_begin, 0);
+ DCHECK_GT(heap_size, 0);
+ DCHECK_EQ(heap_begin % kHeapAlignment, 0);
+ DCHECK_EQ(heap_size % kHeapAlignment, 0);
+ DCHECK_LT(heap_begin, heap_begin + heap_size);
jctx = new(jctx_buf) JavaContext(heap_begin, heap_size);
}
int __tsan_java_fini() {
- SCOPED_JAVA_FUNC(__tsan_java_fini);
+ JAVA_FUNC_ENTER(__tsan_java_fini);
DPrintf("#%d: java_fini()\n", thr->tid);
- CHECK_NE(jctx, 0);
+ DCHECK_NE(jctx, 0);
// FIXME(dvyukov): this does not call atexit() callbacks.
int status = Finalize(thr);
DPrintf("#%d: java_fini() = %d\n", thr->tid, status);
@@ -87,74 +84,65 @@ int __tsan_java_fini() {
}
void __tsan_java_alloc(jptr ptr, jptr size) {
- SCOPED_JAVA_FUNC(__tsan_java_alloc);
- DPrintf("#%d: java_alloc(%p, %p)\n", thr->tid, ptr, size);
- CHECK_NE(jctx, 0);
- CHECK_NE(size, 0);
- CHECK_EQ(ptr % kHeapAlignment, 0);
- CHECK_EQ(size % kHeapAlignment, 0);
- CHECK_GE(ptr, jctx->heap_begin);
- CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
-
- OnUserAlloc(thr, pc, ptr, size, false);
+ JAVA_FUNC_ENTER(__tsan_java_alloc);
+ DPrintf("#%d: java_alloc(0x%zx, 0x%zx)\n", thr->tid, ptr, size);
+ DCHECK_NE(jctx, 0);
+ DCHECK_NE(size, 0);
+ DCHECK_EQ(ptr % kHeapAlignment, 0);
+ DCHECK_EQ(size % kHeapAlignment, 0);
+ DCHECK_GE(ptr, jctx->heap_begin);
+ DCHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
+
+ OnUserAlloc(thr, 0, ptr, size, false);
}
void __tsan_java_free(jptr ptr, jptr size) {
- SCOPED_JAVA_FUNC(__tsan_java_free);
- DPrintf("#%d: java_free(%p, %p)\n", thr->tid, ptr, size);
- CHECK_NE(jctx, 0);
- CHECK_NE(size, 0);
- CHECK_EQ(ptr % kHeapAlignment, 0);
- CHECK_EQ(size % kHeapAlignment, 0);
- CHECK_GE(ptr, jctx->heap_begin);
- CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
-
- ctx->metamap.FreeRange(thr->proc(), ptr, size);
+ JAVA_FUNC_ENTER(__tsan_java_free);
+ DPrintf("#%d: java_free(0x%zx, 0x%zx)\n", thr->tid, ptr, size);
+ DCHECK_NE(jctx, 0);
+ DCHECK_NE(size, 0);
+ DCHECK_EQ(ptr % kHeapAlignment, 0);
+ DCHECK_EQ(size % kHeapAlignment, 0);
+ DCHECK_GE(ptr, jctx->heap_begin);
+ DCHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
+
+ ctx->metamap.FreeRange(thr->proc(), ptr, size, false);
}
void __tsan_java_move(jptr src, jptr dst, jptr size) {
- SCOPED_JAVA_FUNC(__tsan_java_move);
- DPrintf("#%d: java_move(%p, %p, %p)\n", thr->tid, src, dst, size);
- CHECK_NE(jctx, 0);
- CHECK_NE(size, 0);
- CHECK_EQ(src % kHeapAlignment, 0);
- CHECK_EQ(dst % kHeapAlignment, 0);
- CHECK_EQ(size % kHeapAlignment, 0);
- CHECK_GE(src, jctx->heap_begin);
- CHECK_LE(src + size, jctx->heap_begin + jctx->heap_size);
- CHECK_GE(dst, jctx->heap_begin);
- CHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
- CHECK_NE(dst, src);
- CHECK_NE(size, 0);
+ JAVA_FUNC_ENTER(__tsan_java_move);
+ DPrintf("#%d: java_move(0x%zx, 0x%zx, 0x%zx)\n", thr->tid, src, dst, size);
+ DCHECK_NE(jctx, 0);
+ DCHECK_NE(size, 0);
+ DCHECK_EQ(src % kHeapAlignment, 0);
+ DCHECK_EQ(dst % kHeapAlignment, 0);
+ DCHECK_EQ(size % kHeapAlignment, 0);
+ DCHECK_GE(src, jctx->heap_begin);
+ DCHECK_LE(src + size, jctx->heap_begin + jctx->heap_size);
+ DCHECK_GE(dst, jctx->heap_begin);
+ DCHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
+ DCHECK_NE(dst, src);
+ DCHECK_NE(size, 0);
// Assuming it's not running concurrently with threads that do
// memory accesses and mutex operations (stop-the-world phase).
ctx->metamap.MoveMemory(src, dst, size);
- // Move shadow.
- u64 *s = (u64*)MemToShadow(src);
- u64 *d = (u64*)MemToShadow(dst);
- u64 *send = (u64*)MemToShadow(src + size);
- uptr inc = 1;
- if (dst > src) {
- s = (u64*)MemToShadow(src + size) - 1;
- d = (u64*)MemToShadow(dst + size) - 1;
- send = (u64*)MemToShadow(src) - 1;
- inc = -1;
- }
- for (; s != send; s += inc, d += inc) {
- *d = *s;
- *s = 0;
- }
+ // Clear the destination shadow range.
+ // We used to move shadow from src to dst, but the trace format does not
+ // support that anymore as it contains addresses of accesses.
+ RawShadow *d = MemToShadow(dst);
+ RawShadow *dend = MemToShadow(dst + size);
+ ShadowSet(d, dend, Shadow::kEmpty);
}
jptr __tsan_java_find(jptr *from_ptr, jptr to) {
- SCOPED_JAVA_FUNC(__tsan_java_find);
- DPrintf("#%d: java_find(&%p, %p)\n", *from_ptr, to);
- CHECK_EQ((*from_ptr) % kHeapAlignment, 0);
- CHECK_EQ(to % kHeapAlignment, 0);
- CHECK_GE(*from_ptr, jctx->heap_begin);
- CHECK_LE(to, jctx->heap_begin + jctx->heap_size);
+ JAVA_FUNC_ENTER(__tsan_java_find);
+ DPrintf("#%d: java_find(&0x%zx, 0x%zx)\n", thr->tid, *from_ptr, to);
+ DCHECK_EQ((*from_ptr) % kHeapAlignment, 0);
+ DCHECK_EQ(to % kHeapAlignment, 0);
+ DCHECK_GE(*from_ptr, jctx->heap_begin);
+ DCHECK_LE(to, jctx->heap_begin + jctx->heap_size);
for (uptr from = *from_ptr; from < to; from += kHeapAlignment) {
MBlock *b = ctx->metamap.GetBlock(from);
if (b) {
@@ -166,101 +154,105 @@ jptr __tsan_java_find(jptr *from_ptr, jptr to) {
}
void __tsan_java_finalize() {
- SCOPED_JAVA_FUNC(__tsan_java_finalize);
- DPrintf("#%d: java_mutex_finalize()\n", thr->tid);
- AcquireGlobal(thr, 0);
+ JAVA_FUNC_ENTER(__tsan_java_finalize);
+ DPrintf("#%d: java_finalize()\n", thr->tid);
+ AcquireGlobal(thr);
}
void __tsan_java_mutex_lock(jptr addr) {
- SCOPED_JAVA_FUNC(__tsan_java_mutex_lock);
- DPrintf("#%d: java_mutex_lock(%p)\n", thr->tid, addr);
- CHECK_NE(jctx, 0);
- CHECK_GE(addr, jctx->heap_begin);
- CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
-
- MutexPostLock(thr, pc, addr, MutexFlagLinkerInit | MutexFlagWriteReentrant |
- MutexFlagDoPreLockOnPostLock);
+ JAVA_FUNC_ENTER(__tsan_java_mutex_lock);
+ DPrintf("#%d: java_mutex_lock(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ MutexPostLock(thr, 0, addr,
+ MutexFlagLinkerInit | MutexFlagWriteReentrant |
+ MutexFlagDoPreLockOnPostLock);
}
void __tsan_java_mutex_unlock(jptr addr) {
- SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock);
- DPrintf("#%d: java_mutex_unlock(%p)\n", thr->tid, addr);
- CHECK_NE(jctx, 0);
- CHECK_GE(addr, jctx->heap_begin);
- CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+ JAVA_FUNC_ENTER(__tsan_java_mutex_unlock);
+ DPrintf("#%d: java_mutex_unlock(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
- MutexUnlock(thr, pc, addr);
+ MutexUnlock(thr, 0, addr);
}
void __tsan_java_mutex_read_lock(jptr addr) {
- SCOPED_JAVA_FUNC(__tsan_java_mutex_read_lock);
- DPrintf("#%d: java_mutex_read_lock(%p)\n", thr->tid, addr);
- CHECK_NE(jctx, 0);
- CHECK_GE(addr, jctx->heap_begin);
- CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
-
- MutexPostReadLock(thr, pc, addr, MutexFlagLinkerInit |
- MutexFlagWriteReentrant | MutexFlagDoPreLockOnPostLock);
+ JAVA_FUNC_ENTER(__tsan_java_mutex_read_lock);
+ DPrintf("#%d: java_mutex_read_lock(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ MutexPostReadLock(thr, 0, addr,
+ MutexFlagLinkerInit | MutexFlagWriteReentrant |
+ MutexFlagDoPreLockOnPostLock);
}
void __tsan_java_mutex_read_unlock(jptr addr) {
- SCOPED_JAVA_FUNC(__tsan_java_mutex_read_unlock);
- DPrintf("#%d: java_mutex_read_unlock(%p)\n", thr->tid, addr);
- CHECK_NE(jctx, 0);
- CHECK_GE(addr, jctx->heap_begin);
- CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+ JAVA_FUNC_ENTER(__tsan_java_mutex_read_unlock);
+ DPrintf("#%d: java_mutex_read_unlock(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
- MutexReadUnlock(thr, pc, addr);
+ MutexReadUnlock(thr, 0, addr);
}
void __tsan_java_mutex_lock_rec(jptr addr, int rec) {
- SCOPED_JAVA_FUNC(__tsan_java_mutex_lock_rec);
- DPrintf("#%d: java_mutex_lock_rec(%p, %d)\n", thr->tid, addr, rec);
- CHECK_NE(jctx, 0);
- CHECK_GE(addr, jctx->heap_begin);
- CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
- CHECK_GT(rec, 0);
-
- MutexPostLock(thr, pc, addr, MutexFlagLinkerInit | MutexFlagWriteReentrant |
- MutexFlagDoPreLockOnPostLock | MutexFlagRecursiveLock, rec);
+ JAVA_FUNC_ENTER(__tsan_java_mutex_lock_rec);
+ DPrintf("#%d: java_mutex_lock_rec(0x%zx, %d)\n", thr->tid, addr, rec);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+ DCHECK_GT(rec, 0);
+
+ MutexPostLock(thr, 0, addr,
+ MutexFlagLinkerInit | MutexFlagWriteReentrant |
+ MutexFlagDoPreLockOnPostLock | MutexFlagRecursiveLock,
+ rec);
}
int __tsan_java_mutex_unlock_rec(jptr addr) {
- SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock_rec);
- DPrintf("#%d: java_mutex_unlock_rec(%p)\n", thr->tid, addr);
- CHECK_NE(jctx, 0);
- CHECK_GE(addr, jctx->heap_begin);
- CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+ JAVA_FUNC_ENTER(__tsan_java_mutex_unlock_rec);
+ DPrintf("#%d: java_mutex_unlock_rec(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
- return MutexUnlock(thr, pc, addr, MutexFlagRecursiveUnlock);
+ return MutexUnlock(thr, 0, addr, MutexFlagRecursiveUnlock);
}
void __tsan_java_acquire(jptr addr) {
- SCOPED_JAVA_FUNC(__tsan_java_acquire);
- DPrintf("#%d: java_acquire(%p)\n", thr->tid, addr);
- CHECK_NE(jctx, 0);
- CHECK_GE(addr, jctx->heap_begin);
- CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+ JAVA_FUNC_ENTER(__tsan_java_acquire);
+ DPrintf("#%d: java_acquire(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
- Acquire(thr, caller_pc, addr);
+ Acquire(thr, 0, addr);
}
void __tsan_java_release(jptr addr) {
- SCOPED_JAVA_FUNC(__tsan_java_release);
- DPrintf("#%d: java_release(%p)\n", thr->tid, addr);
- CHECK_NE(jctx, 0);
- CHECK_GE(addr, jctx->heap_begin);
- CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+ JAVA_FUNC_ENTER(__tsan_java_release);
+ DPrintf("#%d: java_release(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
- Release(thr, caller_pc, addr);
+ Release(thr, 0, addr);
}
void __tsan_java_release_store(jptr addr) {
- SCOPED_JAVA_FUNC(__tsan_java_release);
- DPrintf("#%d: java_release_store(%p)\n", thr->tid, addr);
- CHECK_NE(jctx, 0);
- CHECK_GE(addr, jctx->heap_begin);
- CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+ JAVA_FUNC_ENTER(__tsan_java_release);
+ DPrintf("#%d: java_release_store(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
- ReleaseStore(thr, caller_pc, addr);
+ ReleaseStore(thr, 0, addr);
}
lib/tsan/tsan_malloc_mac.cpp
@@ -12,11 +12,12 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include "sanitizer_common/sanitizer_errno.h"
#include "tsan_interceptors.h"
#include "tsan_stack_trace.h"
+#include "tsan_mman.h"
using namespace __tsan;
#define COMMON_MALLOC_ZONE_NAME "tsan"
@@ -29,16 +30,30 @@ using namespace __tsan;
user_memalign(cur_thread(), StackTrace::GetCurrentPc(), alignment, size)
#define COMMON_MALLOC_MALLOC(size) \
if (in_symbolizer()) return InternalAlloc(size); \
- SCOPED_INTERCEPTOR_RAW(malloc, size); \
- void *p = user_alloc(thr, pc, size)
+ void *p = 0; \
+ { \
+ SCOPED_INTERCEPTOR_RAW(malloc, size); \
+ p = user_alloc(thr, pc, size); \
+ } \
+ invoke_malloc_hook(p, size)
#define COMMON_MALLOC_REALLOC(ptr, size) \
if (in_symbolizer()) return InternalRealloc(ptr, size); \
- SCOPED_INTERCEPTOR_RAW(realloc, ptr, size); \
- void *p = user_realloc(thr, pc, ptr, size)
+ if (ptr) \
+ invoke_free_hook(ptr); \
+ void *p = 0; \
+ { \
+ SCOPED_INTERCEPTOR_RAW(realloc, ptr, size); \
+ p = user_realloc(thr, pc, ptr, size); \
+ } \
+ invoke_malloc_hook(p, size)
#define COMMON_MALLOC_CALLOC(count, size) \
if (in_symbolizer()) return InternalCalloc(count, size); \
- SCOPED_INTERCEPTOR_RAW(calloc, size, count); \
- void *p = user_calloc(thr, pc, size, count)
+ void *p = 0; \
+ { \
+ SCOPED_INTERCEPTOR_RAW(calloc, size, count); \
+ p = user_calloc(thr, pc, size, count); \
+ } \
+ invoke_malloc_hook(p, size * count)
#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
if (in_symbolizer()) { \
void *p = InternalAlloc(size, nullptr, alignment); \
@@ -55,6 +70,7 @@ using namespace __tsan;
void *p = user_valloc(thr, pc, size)
#define COMMON_MALLOC_FREE(ptr) \
if (in_symbolizer()) return InternalFree(ptr); \
+ invoke_free_hook(ptr); \
SCOPED_INTERCEPTOR_RAW(free, ptr); \
user_free(thr, pc, ptr)
#define COMMON_MALLOC_SIZE(ptr) uptr size = user_alloc_usable_size(ptr);
lib/tsan/tsan_mman.cpp
@@ -15,27 +15,18 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_interface.h"
#include "tsan_mman.h"
#include "tsan_rtl.h"
#include "tsan_report.h"
#include "tsan_flags.h"
-// May be overriden by front-end.
-SANITIZER_WEAK_DEFAULT_IMPL
-void __sanitizer_malloc_hook(void *ptr, uptr size) {
- (void)ptr;
- (void)size;
-}
-
-SANITIZER_WEAK_DEFAULT_IMPL
-void __sanitizer_free_hook(void *ptr) {
- (void)ptr;
-}
-
namespace __tsan {
struct MapUnmapCallback {
void OnMap(uptr p, uptr size) const { }
+ void OnMapSecondary(uptr p, uptr size, uptr user_begin,
+ uptr user_size) const {};
void OnUnmap(uptr p, uptr size) const {
// We are about to unmap a chunk of user memory.
// Mark the corresponding shadow memory as not needed.
@@ -69,8 +60,17 @@ Allocator *allocator() {
struct GlobalProc {
Mutex mtx;
Processor *proc;
-
- GlobalProc() : mtx(MutexTypeGlobalProc), proc(ProcCreate()) {}
+ // This mutex represents the internal allocator combined for
+ // the purposes of deadlock detection. The internal allocator
+ // uses multiple mutexes, moreover they are locked only occasionally
+ // and they are spin mutexes which don't support deadlock detection.
+ // So we use this fake mutex to serve as a substitute for these mutexes.
+ CheckedMutex internal_alloc_mtx;
+
+ GlobalProc()
+ : mtx(MutexTypeGlobalProc),
+ proc(ProcCreate()),
+ internal_alloc_mtx(MutexTypeInternalAlloc) {}
};
static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
@@ -78,6 +78,11 @@ GlobalProc *global_proc() {
return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
}
+static void InternalAllocAccess() {
+ global_proc()->internal_alloc_mtx.Lock();
+ global_proc()->internal_alloc_mtx.Unlock();
+}
+
ScopedGlobalProcessor::ScopedGlobalProcessor() {
GlobalProc *gp = global_proc();
ThreadState *thr = cur_thread();
@@ -110,6 +115,24 @@ ScopedGlobalProcessor::~ScopedGlobalProcessor() {
gp->mtx.Unlock();
}
+void AllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ global_proc()->internal_alloc_mtx.Lock();
+ InternalAllocatorLock();
+}
+
+void AllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ InternalAllocatorUnlock();
+ global_proc()->internal_alloc_mtx.Unlock();
+}
+
+void GlobalProcessorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ global_proc()->mtx.Lock();
+}
+
+void GlobalProcessorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ global_proc()->mtx.Unlock();
+}
+
static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
static uptr max_user_defined_malloc_size;
@@ -148,7 +171,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
ObtainCurrentStack(thr, pc, &stack);
if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
return;
- ThreadRegistryLock l(ctx->thread_registry);
+ ThreadRegistryLock l(&ctx->thread_registry);
ScopedReport rep(ReportTypeSignalUnsafe);
rep.AddStack(stack, true);
OutputReport(thr, rep);
@@ -166,6 +189,12 @@ void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
GET_STACK_TRACE_FATAL(thr, pc);
ReportAllocationSizeTooBig(sz, malloc_limit, &stack);
}
+ if (UNLIKELY(IsRssLimitExceeded())) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportRssLimitExceeded(&stack);
+ }
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
if (UNLIKELY(!p)) {
SetAllocatorOutOfMemory();
@@ -218,9 +247,18 @@ void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) {
}
void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
- DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
+ DPrintf("#%d: alloc(%zu) = 0x%zx\n", thr->tid, sz, p);
+ // Note: this can run before thread initialization/after finalization.
+ // As a result this is not necessarily synchronized with DoReset,
+ // which iterates over and resets all sync objects,
+ // but it is fine to create new MBlocks in this context.
ctx->metamap.AllocBlock(thr, pc, p, sz);
- if (write && thr->ignore_reads_and_writes == 0)
+ // If this runs before thread initialization/after finalization
+ // and we don't have trace initialized, we can't imitate writes.
+ // In such case just reset the shadow range, it is fine since
+ // it affects only a small fraction of special objects.
+ if (write && thr->ignore_reads_and_writes == 0 &&
+ atomic_load_relaxed(&thr->trace_pos))
MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
else
MemoryResetRange(thr, pc, (uptr)p, sz);
@@ -228,8 +266,15 @@ void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
CHECK_NE(p, (void*)0);
- uptr sz = ctx->metamap.FreeBlock(thr->proc(), p);
- DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
+ if (!thr->slot) {
+ // Very early/late in thread lifetime, or during fork.
+ UNUSED uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, false);
+ DPrintf("#%d: free(0x%zx, %zu) (no slot)\n", thr->tid, p, sz);
+ return;
+ }
+ SlotLocker locker(thr);
+ uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, true);
+ DPrintf("#%d: free(0x%zx, %zu)\n", thr->tid, p, sz);
if (write && thr->ignore_reads_and_writes == 0)
MemoryRangeFreed(thr, pc, (uptr)p, sz);
}
@@ -309,8 +354,22 @@ void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
}
+static const void *user_alloc_begin(const void *p) {
+ if (p == nullptr || !IsAppMem((uptr)p))
+ return nullptr;
+ void *beg = allocator()->GetBlockBegin(p);
+ if (!beg)
+ return nullptr;
+
+ MBlock *b = ctx->metamap.GetBlock((uptr)beg);
+ if (!b)
+ return nullptr; // Not a valid pointer.
+
+ return (const void *)beg;
+}
+
uptr user_alloc_usable_size(const void *p) {
- if (p == 0)
+ if (p == 0 || !IsAppMem((uptr)p))
return 0;
MBlock *b = ctx->metamap.GetBlock((uptr)p);
if (!b)
@@ -320,11 +379,21 @@ uptr user_alloc_usable_size(const void *p) {
return b->siz;
}
+uptr user_alloc_usable_size_fast(const void *p) {
+ MBlock *b = ctx->metamap.GetBlock((uptr)p);
+ // Static objects may have malloc'd before tsan completes
+ // initialization, and may believe returned ptrs to be valid.
+ if (!b)
+ return 0; // Not a valid pointer.
+ if (b->siz == 0)
+ return 1; // Zero-sized allocations are actually 1 byte.
+ return b->siz;
+}
+
void invoke_malloc_hook(void *ptr, uptr size) {
ThreadState *thr = cur_thread();
if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
return;
- __sanitizer_malloc_hook(ptr, size);
RunMallocHooks(ptr, size);
}
@@ -332,25 +401,26 @@ void invoke_free_hook(void *ptr) {
ThreadState *thr = cur_thread();
if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
return;
- __sanitizer_free_hook(ptr);
RunFreeHooks(ptr);
}
-void *internal_alloc(MBlockType typ, uptr sz) {
+void *Alloc(uptr sz) {
ThreadState *thr = cur_thread();
if (thr->nomalloc) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
}
+ InternalAllocAccess();
return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
}
-void internal_free(void *p) {
+void FreeImpl(void *p) {
ThreadState *thr = cur_thread();
if (thr->nomalloc) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
}
+ InternalAllocAccess();
InternalFree(p, &thr->proc()->internal_alloc_cache);
}
@@ -387,14 +457,27 @@ int __sanitizer_get_ownership(const void *p) {
return allocator()->GetBlockBegin(p) != 0;
}
+const void *__sanitizer_get_allocated_begin(const void *p) {
+ return user_alloc_begin(p);
+}
+
uptr __sanitizer_get_allocated_size(const void *p) {
return user_alloc_usable_size(p);
}
+uptr __sanitizer_get_allocated_size_fast(const void *p) {
+ DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
+ uptr ret = user_alloc_usable_size_fast(p);
+ DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
+ return ret;
+}
+
+void __sanitizer_purge_allocator() {
+ allocator()->ForceReleaseToOS();
+}
+
void __tsan_on_thread_idle() {
ThreadState *thr = cur_thread();
- thr->clock.ResetCached(&thr->proc()->clock_cache);
- thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
allocator()->SwallowCache(&thr->proc()->alloc_cache);
internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
ctx->metamap.OnProcIdle(thr->proc());
lib/tsan/tsan_mman.h
@@ -24,6 +24,10 @@ void ReplaceSystemMalloc();
void AllocatorProcStart(Processor *proc);
void AllocatorProcFinish(Processor *proc);
void AllocatorPrintStats();
+void AllocatorLock();
+void AllocatorUnlock();
+void GlobalProcessorLock();
+void GlobalProcessorUnlock();
// For user allocations.
void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz,
@@ -47,42 +51,29 @@ uptr user_alloc_usable_size(const void *p);
void invoke_malloc_hook(void *ptr, uptr size);
void invoke_free_hook(void *ptr);
-enum MBlockType {
- MBlockScopedBuf,
- MBlockString,
- MBlockStackTrace,
- MBlockShadowStack,
- MBlockSync,
- MBlockClock,
- MBlockThreadContex,
- MBlockDeadInfo,
- MBlockRacyStacks,
- MBlockRacyAddresses,
- MBlockAtExit,
- MBlockFlag,
- MBlockReport,
- MBlockReportMop,
- MBlockReportThread,
- MBlockReportMutex,
- MBlockReportLoc,
- MBlockReportStack,
- MBlockSuppression,
- MBlockExpectRace,
- MBlockSignal,
- MBlockJmpBuf,
+// For internal data structures.
+void *Alloc(uptr sz);
+void FreeImpl(void *p);
- // This must be the last.
- MBlockTypeCount
-};
+template <typename T, typename... Args>
+T *New(Args &&...args) {
+ return new (Alloc(sizeof(T))) T(static_cast<Args &&>(args)...);
+}
-// For internal data structures.
-void *internal_alloc(MBlockType typ, uptr sz);
-void internal_free(void *p);
+template <typename T>
+void Free(T *&p) {
+ if (p == nullptr)
+ return;
+ FreeImpl(p);
+ p = nullptr;
+}
template <typename T>
-void DestroyAndFree(T *p) {
+void DestroyAndFree(T *&p) {
+ if (p == nullptr)
+ return;
p->~T();
- internal_free(p);
+ Free(p);
}
} // namespace __tsan
lib/tsan/tsan_mutexset.cpp
@@ -10,66 +10,55 @@
//
//===----------------------------------------------------------------------===//
#include "tsan_mutexset.h"
+
+#include "sanitizer_common/sanitizer_placement_new.h"
#include "tsan_rtl.h"
namespace __tsan {
-const uptr MutexSet::kMaxSize;
-
MutexSet::MutexSet() {
- size_ = 0;
- internal_memset(&descs_, 0, sizeof(descs_));
}
-void MutexSet::Add(u64 id, bool write, u64 epoch) {
+void MutexSet::Reset() { internal_memset(this, 0, sizeof(*this)); }
+
+void MutexSet::AddAddr(uptr addr, StackID stack_id, bool write) {
// Look up existing mutex with the same id.
for (uptr i = 0; i < size_; i++) {
- if (descs_[i].id == id) {
+ if (descs_[i].addr == addr) {
descs_[i].count++;
- descs_[i].epoch = epoch;
+ descs_[i].seq = seq_++;
return;
}
}
// On overflow, find the oldest mutex and drop it.
if (size_ == kMaxSize) {
- u64 minepoch = (u64)-1;
- u64 mini = (u64)-1;
+ uptr min = 0;
for (uptr i = 0; i < size_; i++) {
- if (descs_[i].epoch < minepoch) {
- minepoch = descs_[i].epoch;
- mini = i;
- }
+ if (descs_[i].seq < descs_[min].seq)
+ min = i;
}
- RemovePos(mini);
+ RemovePos(min);
CHECK_EQ(size_, kMaxSize - 1);
}
// Add new mutex descriptor.
- descs_[size_].id = id;
+ descs_[size_].addr = addr;
+ descs_[size_].stack_id = stack_id;
descs_[size_].write = write;
- descs_[size_].epoch = epoch;
+ descs_[size_].seq = seq_++;
descs_[size_].count = 1;
size_++;
}
-void MutexSet::Del(u64 id, bool write) {
+void MutexSet::DelAddr(uptr addr, bool destroy) {
for (uptr i = 0; i < size_; i++) {
- if (descs_[i].id == id) {
- if (--descs_[i].count == 0)
+ if (descs_[i].addr == addr) {
+ if (destroy || --descs_[i].count == 0)
RemovePos(i);
return;
}
}
}
-void MutexSet::Remove(u64 id) {
- for (uptr i = 0; i < size_; i++) {
- if (descs_[i].id == id) {
- RemovePos(i);
- return;
- }
- }
-}
-
void MutexSet::RemovePos(uptr i) {
CHECK_LT(i, size_);
descs_[i] = descs_[size_ - 1];
@@ -85,4 +74,7 @@ MutexSet::Desc MutexSet::Get(uptr i) const {
return descs_[i];
}
+DynamicMutexSet::DynamicMutexSet() : ptr_(New<MutexSet>()) {}
+DynamicMutexSet::~DynamicMutexSet() { DestroyAndFree(ptr_); }
+
} // namespace __tsan
lib/tsan/tsan_mutexset.h
@@ -21,34 +21,55 @@ class MutexSet {
public:
// Holds limited number of mutexes.
// The oldest mutexes are discarded on overflow.
- static const uptr kMaxSize = 16;
+ static constexpr uptr kMaxSize = 16;
struct Desc {
- u64 id;
- u64 epoch;
- int count;
+ uptr addr;
+ StackID stack_id;
+ u32 seq;
+ u32 count;
bool write;
+
+ Desc() { internal_memset(this, 0, sizeof(*this)); }
+ Desc(const Desc& other) { *this = other; }
+ Desc& operator=(const MutexSet::Desc& other) {
+ internal_memcpy(this, &other, sizeof(*this));
+ return *this;
+ }
};
MutexSet();
- // The 'id' is obtained from SyncVar::GetId().
- void Add(u64 id, bool write, u64 epoch);
- void Del(u64 id, bool write);
- void Remove(u64 id); // Removes the mutex completely (if it's destroyed).
+ void Reset();
+ void AddAddr(uptr addr, StackID stack_id, bool write);
+ void DelAddr(uptr addr, bool destroy = false);
uptr Size() const;
Desc Get(uptr i) const;
- void operator=(const MutexSet &other) {
- internal_memcpy(this, &other, sizeof(*this));
- }
-
private:
#if !SANITIZER_GO
- uptr size_;
+ u32 seq_ = 0;
+ uptr size_ = 0;
Desc descs_[kMaxSize];
-#endif
void RemovePos(uptr i);
- MutexSet(const MutexSet&);
+#endif
+};
+
+// MutexSet is too large to live on stack.
+// DynamicMutexSet can be use used to create local MutexSet's.
+class DynamicMutexSet {
+ public:
+ DynamicMutexSet();
+ ~DynamicMutexSet();
+ MutexSet* operator->() { return ptr_; }
+ operator MutexSet*() { return ptr_; }
+ DynamicMutexSet(const DynamicMutexSet&) = delete;
+ DynamicMutexSet& operator=(const DynamicMutexSet&) = delete;
+
+ private:
+ MutexSet* ptr_;
+#if SANITIZER_GO
+ MutexSet set_;
+#endif
};
// Go does not have mutexes, so do not spend memory and time.
@@ -56,12 +77,13 @@ class MutexSet {
// in different goroutine).
#if SANITIZER_GO
MutexSet::MutexSet() {}
-void MutexSet::Add(u64 id, bool write, u64 epoch) {}
-void MutexSet::Del(u64 id, bool write) {}
-void MutexSet::Remove(u64 id) {}
-void MutexSet::RemovePos(uptr i) {}
+void MutexSet::Reset() {}
+void MutexSet::AddAddr(uptr addr, StackID stack_id, bool write) {}
+void MutexSet::DelAddr(uptr addr, bool destroy) {}
uptr MutexSet::Size() const { return 0; }
MutexSet::Desc MutexSet::Get(uptr i) const { return Desc(); }
+DynamicMutexSet::DynamicMutexSet() : ptr_(&set_) {}
+DynamicMutexSet::~DynamicMutexSet() {}
#endif
} // namespace __tsan
lib/tsan/tsan_new_delete.cpp
@@ -0,0 +1,199 @@
+//===-- tsan_new_delete.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Interceptors for operators new and delete.
+//===----------------------------------------------------------------------===//
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "tsan_interceptors.h"
+#include "tsan_rtl.h"
+
+using namespace __tsan;
+
+namespace std {
+struct nothrow_t {};
+enum class align_val_t: __sanitizer::uptr {};
+} // namespace std
+
+DECLARE_REAL(void *, malloc, uptr size)
+DECLARE_REAL(void, free, void *ptr)
+
+// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
+#define OPERATOR_NEW_BODY(mangled_name, nothrow) \
+ if (in_symbolizer()) \
+ return InternalAlloc(size); \
+ void *p = 0; \
+ { \
+ SCOPED_INTERCEPTOR_RAW(mangled_name, size); \
+ p = user_alloc(thr, pc, size); \
+ if (!nothrow && UNLIKELY(!p)) { \
+ GET_STACK_TRACE_FATAL(thr, pc); \
+ ReportOutOfMemory(size, &stack); \
+ } \
+ } \
+ invoke_malloc_hook(p, size); \
+ return p;
+
+#define OPERATOR_NEW_BODY_ALIGN(mangled_name, nothrow) \
+ if (in_symbolizer()) \
+ return InternalAlloc(size, nullptr, (uptr)align); \
+ void *p = 0; \
+ { \
+ SCOPED_INTERCEPTOR_RAW(mangled_name, size); \
+ p = user_memalign(thr, pc, (uptr)align, size); \
+ if (!nothrow && UNLIKELY(!p)) { \
+ GET_STACK_TRACE_FATAL(thr, pc); \
+ ReportOutOfMemory(size, &stack); \
+ } \
+ } \
+ invoke_malloc_hook(p, size); \
+ return p;
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new(__sanitizer::uptr size);
+void *operator new(__sanitizer::uptr size) {
+ OPERATOR_NEW_BODY(_Znwm, false /*nothrow*/);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new[](__sanitizer::uptr size);
+void *operator new[](__sanitizer::uptr size) {
+ OPERATOR_NEW_BODY(_Znam, false /*nothrow*/);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new(__sanitizer::uptr size, std::nothrow_t const&);
+void *operator new(__sanitizer::uptr size, std::nothrow_t const&) {
+ OPERATOR_NEW_BODY(_ZnwmRKSt9nothrow_t, true /*nothrow*/);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new[](__sanitizer::uptr size, std::nothrow_t const&);
+void *operator new[](__sanitizer::uptr size, std::nothrow_t const&) {
+ OPERATOR_NEW_BODY(_ZnamRKSt9nothrow_t, true /*nothrow*/);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new(__sanitizer::uptr size, std::align_val_t align);
+void *operator new(__sanitizer::uptr size, std::align_val_t align) {
+ OPERATOR_NEW_BODY_ALIGN(_ZnwmSt11align_val_t, false /*nothrow*/);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new[](__sanitizer::uptr size, std::align_val_t align);
+void *operator new[](__sanitizer::uptr size, std::align_val_t align) {
+ OPERATOR_NEW_BODY_ALIGN(_ZnamSt11align_val_t, false /*nothrow*/);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new(__sanitizer::uptr size, std::align_val_t align,
+ std::nothrow_t const&);
+void *operator new(__sanitizer::uptr size, std::align_val_t align,
+ std::nothrow_t const&) {
+ OPERATOR_NEW_BODY_ALIGN(_ZnwmSt11align_val_tRKSt9nothrow_t,
+ true /*nothrow*/);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new[](__sanitizer::uptr size, std::align_val_t align,
+ std::nothrow_t const&);
+void *operator new[](__sanitizer::uptr size, std::align_val_t align,
+ std::nothrow_t const&) {
+ OPERATOR_NEW_BODY_ALIGN(_ZnamSt11align_val_tRKSt9nothrow_t,
+ true /*nothrow*/);
+}
+
+#define OPERATOR_DELETE_BODY(mangled_name) \
+ if (ptr == 0) return; \
+ if (in_symbolizer()) \
+ return InternalFree(ptr); \
+ invoke_free_hook(ptr); \
+ SCOPED_INTERCEPTOR_RAW(mangled_name, ptr); \
+ user_free(thr, pc, ptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr) NOEXCEPT;
+void operator delete(void *ptr) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdlPv);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr) NOEXCEPT;
+void operator delete[](void *ptr) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdaPv);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const&);
+void operator delete(void *ptr, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(_ZdlPvRKSt9nothrow_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const&);
+void operator delete[](void *ptr, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(_ZdaPvRKSt9nothrow_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr, __sanitizer::uptr size) NOEXCEPT;
+void operator delete(void *ptr, __sanitizer::uptr size) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdlPvm);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr, __sanitizer::uptr size) NOEXCEPT;
+void operator delete[](void *ptr, __sanitizer::uptr size) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdaPvm);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align) NOEXCEPT;
+void operator delete(void *ptr, std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdlPvSt11align_val_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT;
+void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdaPvSt11align_val_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&);
+void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(_ZdlPvSt11align_val_tRKSt9nothrow_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align,
+ std::nothrow_t const&);
+void operator delete[](void *ptr, std::align_val_t align,
+ std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(_ZdaPvSt11align_val_tRKSt9nothrow_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr, __sanitizer::uptr size,
+ std::align_val_t align) NOEXCEPT;
+void operator delete(void *ptr, __sanitizer::uptr size,
+ std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdlPvmSt11align_val_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr, __sanitizer::uptr size,
+ std::align_val_t align) NOEXCEPT;
+void operator delete[](void *ptr, __sanitizer::uptr size,
+ std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdaPvmSt11align_val_t);
+}
lib/tsan/tsan_platform.h
@@ -18,38 +18,42 @@
# error "Only 64-bit is supported"
#endif
+#include "sanitizer_common/sanitizer_common.h"
#include "tsan_defs.h"
-#include "tsan_trace.h"
namespace __tsan {
-#if defined(__x86_64__)
-#define HAS_48_BIT_ADDRESS_SPACE 1
-#elif SANITIZER_IOSSIM // arm64 iOS simulators (order of #if matters)
-#define HAS_48_BIT_ADDRESS_SPACE 1
-#elif SANITIZER_IOS // arm64 iOS devices (order of #if matters)
-#define HAS_48_BIT_ADDRESS_SPACE 0
-#elif SANITIZER_MAC // arm64 macOS (order of #if matters)
-#define HAS_48_BIT_ADDRESS_SPACE 1
-#else
-#define HAS_48_BIT_ADDRESS_SPACE 0
-#endif
-
-#if !SANITIZER_GO
+enum {
+ // App memory is not mapped onto shadow memory range.
+ kBrokenMapping = 1 << 0,
+ // Mapping app memory and back does not produce the same address,
+ // this can lead to wrong addresses in reports and potentially
+ // other bad consequences.
+ kBrokenReverseMapping = 1 << 1,
+ // Mapping is non-linear for linear user range.
+ // This is bad and can lead to unpredictable memory corruptions, etc
+ // because range access functions assume linearity.
+ kBrokenLinearity = 1 << 2,
+ // Meta for an app region overlaps with the meta of another app region.
+ // This is determined by recomputing the individual meta regions for
+ // each app region.
+ //
+ // N.B. There is no "kBrokenReverseMetaMapping" constant because there
+ // is no MetaToMem function. However, note that (!kBrokenLinearity
+ // && !kBrokenAliasedMetas) implies that MemToMeta is invertible.
+ kBrokenAliasedMetas = 1 << 3,
+};
-#if HAS_48_BIT_ADDRESS_SPACE
/*
C/C++ on linux/x86_64 and freebsd/x86_64
0000 0000 1000 - 0080 0000 0000: main binary and/or MAP_32BIT mappings (512GB)
0040 0000 0000 - 0100 0000 0000: -
-0100 0000 0000 - 2000 0000 0000: shadow
-2000 0000 0000 - 3000 0000 0000: -
-3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
-4000 0000 0000 - 5500 0000 0000: -
+0100 0000 0000 - 1000 0000 0000: shadow
+1000 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 3400 0000 0000: metainfo (memory blocks and sync objects)
+3400 0000 0000 - 5500 0000 0000: -
5500 0000 0000 - 5680 0000 0000: pie binaries without ASLR or on 4.1+ kernels
-5680 0000 0000 - 6000 0000 0000: -
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 7d00 0000 0000: -
+5680 0000 0000 - 7d00 0000 0000: -
7b00 0000 0000 - 7c00 0000 0000: heap
7c00 0000 0000 - 7e80 0000 0000: -
7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
@@ -65,15 +69,12 @@ C/C++ on netbsd/amd64 can reuse the same mapping:
* Stack on NetBSD/amd64 has prereserved 128MB.
* Heap grows downwards (top-down).
* ASLR must be disabled per-process or globally.
-
*/
-struct Mapping {
+struct Mapping48AddressSpace {
static const uptr kMetaShadowBeg = 0x300000000000ull;
static const uptr kMetaShadowEnd = 0x340000000000ull;
- static const uptr kTraceMemBeg = 0x600000000000ull;
- static const uptr kTraceMemEnd = 0x620000000000ull;
static const uptr kShadowBeg = 0x010000000000ull;
- static const uptr kShadowEnd = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x100000000000ull;
static const uptr kHeapMemBeg = 0x7b0000000000ull;
static const uptr kHeapMemEnd = 0x7c0000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
@@ -82,36 +83,32 @@ struct Mapping {
static const uptr kMidAppMemEnd = 0x568000000000ull;
static const uptr kHiAppMemBeg = 0x7e8000000000ull;
static const uptr kHiAppMemEnd = 0x800000000000ull;
- static const uptr kAppMemMsk = 0x780000000000ull;
- static const uptr kAppMemXor = 0x040000000000ull;
+ static const uptr kShadowMsk = 0x780000000000ull;
+ static const uptr kShadowXor = 0x040000000000ull;
+ static const uptr kShadowAdd = 0x000000000000ull;
static const uptr kVdsoBeg = 0xf000000000000000ull;
};
-#define TSAN_MID_APP_RANGE 1
-#elif defined(__mips64)
/*
C/C++ on linux/mips64 (40-bit VMA)
0000 0000 00 - 0100 0000 00: - (4 GB)
0100 0000 00 - 0200 0000 00: main binary (4 GB)
-0200 0000 00 - 2000 0000 00: - (120 GB)
-2000 0000 00 - 4000 0000 00: shadow (128 GB)
+0200 0000 00 - 1200 0000 00: - (64 GB)
+1200 0000 00 - 2200 0000 00: shadow (64 GB)
+2200 0000 00 - 4000 0000 00: - (120 GB)
4000 0000 00 - 5000 0000 00: metainfo (memory blocks and sync objects) (64 GB)
5000 0000 00 - aa00 0000 00: - (360 GB)
aa00 0000 00 - ab00 0000 00: main binary (PIE) (4 GB)
-ab00 0000 00 - b000 0000 00: - (20 GB)
-b000 0000 00 - b200 0000 00: traces (8 GB)
-b200 0000 00 - fe00 0000 00: - (304 GB)
+ab00 0000 00 - fe00 0000 00: - (332 GB)
fe00 0000 00 - ff00 0000 00: heap (4 GB)
ff00 0000 00 - ff80 0000 00: - (2 GB)
ff80 0000 00 - ffff ffff ff: modules and main thread stack (<2 GB)
*/
-struct Mapping40 {
+struct MappingMips64_40 {
static const uptr kMetaShadowBeg = 0x4000000000ull;
static const uptr kMetaShadowEnd = 0x5000000000ull;
- static const uptr kTraceMemBeg = 0xb000000000ull;
- static const uptr kTraceMemEnd = 0xb200000000ull;
- static const uptr kShadowBeg = 0x2000000000ull;
- static const uptr kShadowEnd = 0x4000000000ull;
+ static const uptr kShadowBeg = 0x1200000000ull;
+ static const uptr kShadowEnd = 0x2200000000ull;
static const uptr kHeapMemBeg = 0xfe00000000ull;
static const uptr kHeapMemEnd = 0xff00000000ull;
static const uptr kLoAppMemBeg = 0x0100000000ull;
@@ -120,152 +117,170 @@ struct Mapping40 {
static const uptr kMidAppMemEnd = 0xab00000000ull;
static const uptr kHiAppMemBeg = 0xff80000000ull;
static const uptr kHiAppMemEnd = 0xffffffffffull;
- static const uptr kAppMemMsk = 0xf800000000ull;
- static const uptr kAppMemXor = 0x0800000000ull;
+ static const uptr kShadowMsk = 0xf800000000ull;
+ static const uptr kShadowXor = 0x0800000000ull;
+ static const uptr kShadowAdd = 0x0000000000ull;
static const uptr kVdsoBeg = 0xfffff00000ull;
};
-#define TSAN_MID_APP_RANGE 1
-#define TSAN_RUNTIME_VMA 1
-#elif defined(__aarch64__) && defined(__APPLE__)
/*
C/C++ on Darwin/iOS/ARM64 (36-bit VMA, 64 GB VM)
0000 0000 00 - 0100 0000 00: - (4 GB)
0100 0000 00 - 0200 0000 00: main binary, modules, thread stacks (4 GB)
0200 0000 00 - 0300 0000 00: heap (4 GB)
0300 0000 00 - 0400 0000 00: - (4 GB)
-0400 0000 00 - 0c00 0000 00: shadow memory (32 GB)
-0c00 0000 00 - 0d00 0000 00: - (4 GB)
+0400 0000 00 - 0800 0000 00: shadow memory (16 GB)
+0800 0000 00 - 0d00 0000 00: - (20 GB)
0d00 0000 00 - 0e00 0000 00: metainfo (4 GB)
-0e00 0000 00 - 0f00 0000 00: - (4 GB)
-0f00 0000 00 - 0fc0 0000 00: traces (3 GB)
-0fc0 0000 00 - 1000 0000 00: -
+0e00 0000 00 - 1000 0000 00: -
*/
-struct Mapping {
+struct MappingAppleAarch64 {
static const uptr kLoAppMemBeg = 0x0100000000ull;
static const uptr kLoAppMemEnd = 0x0200000000ull;
static const uptr kHeapMemBeg = 0x0200000000ull;
static const uptr kHeapMemEnd = 0x0300000000ull;
static const uptr kShadowBeg = 0x0400000000ull;
- static const uptr kShadowEnd = 0x0c00000000ull;
+ static const uptr kShadowEnd = 0x0800000000ull;
static const uptr kMetaShadowBeg = 0x0d00000000ull;
static const uptr kMetaShadowEnd = 0x0e00000000ull;
- static const uptr kTraceMemBeg = 0x0f00000000ull;
- static const uptr kTraceMemEnd = 0x0fc0000000ull;
static const uptr kHiAppMemBeg = 0x0fc0000000ull;
static const uptr kHiAppMemEnd = 0x0fc0000000ull;
- static const uptr kAppMemMsk = 0x0ull;
- static const uptr kAppMemXor = 0x0ull;
+ static const uptr kShadowMsk = 0x0ull;
+ static const uptr kShadowXor = 0x0ull;
+ static const uptr kShadowAdd = 0x0200000000ull;
static const uptr kVdsoBeg = 0x7000000000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
};
-#elif defined(__aarch64__) && !defined(__APPLE__)
-// AArch64 supports multiple VMA which leads to multiple address transformation
-// functions. To support these multiple VMAS transformations and mappings TSAN
-// runtime for AArch64 uses an external memory read (vmaSize) to select which
-// mapping to use. Although slower, it make a same instrumented binary run on
-// multiple kernels.
-
/*
C/C++ on linux/aarch64 (39-bit VMA)
-0000 0010 00 - 0100 0000 00: main binary
-0100 0000 00 - 0800 0000 00: -
-0800 0000 00 - 2000 0000 00: shadow memory
-2000 0000 00 - 3100 0000 00: -
-3100 0000 00 - 3400 0000 00: metainfo
-3400 0000 00 - 5500 0000 00: -
-5500 0000 00 - 5600 0000 00: main binary (PIE)
-5600 0000 00 - 6000 0000 00: -
-6000 0000 00 - 6200 0000 00: traces
-6200 0000 00 - 7d00 0000 00: -
-7c00 0000 00 - 7d00 0000 00: heap
-7d00 0000 00 - 7fff ffff ff: modules and main thread stack
+0000 0010 00 - 0500 0000 00: main binary (20 GB)
+0100 0000 00 - 2000 0000 00: -
+2000 0000 00 - 4000 0000 00: shadow memory (128 GB)
+4000 0000 00 - 4800 0000 00: metainfo (32 GB)
+4800 0000 00 - 5500 0000 00: -
+5500 0000 00 - 5a00 0000 00: main binary (PIE) (20 GB)
+5600 0000 00 - 7c00 0000 00: -
+7a00 0000 00 - 7d00 0000 00: heap (12 GB)
+7d00 0000 00 - 7fff ffff ff: modules and main thread stack (12 GB)
*/
-struct Mapping39 {
+struct MappingAarch64_39 {
static const uptr kLoAppMemBeg = 0x0000001000ull;
- static const uptr kLoAppMemEnd = 0x0100000000ull;
- static const uptr kShadowBeg = 0x0800000000ull;
- static const uptr kShadowEnd = 0x2000000000ull;
- static const uptr kMetaShadowBeg = 0x3100000000ull;
- static const uptr kMetaShadowEnd = 0x3400000000ull;
+ static const uptr kLoAppMemEnd = 0x0500000000ull;
+ static const uptr kShadowBeg = 0x2000000000ull;
+ static const uptr kShadowEnd = 0x4000000000ull;
+ static const uptr kMetaShadowBeg = 0x4000000000ull;
+ static const uptr kMetaShadowEnd = 0x4800000000ull;
static const uptr kMidAppMemBeg = 0x5500000000ull;
- static const uptr kMidAppMemEnd = 0x5600000000ull;
- static const uptr kTraceMemBeg = 0x6000000000ull;
- static const uptr kTraceMemEnd = 0x6200000000ull;
- static const uptr kHeapMemBeg = 0x7c00000000ull;
+ static const uptr kMidAppMemEnd = 0x5a00000000ull;
+ static const uptr kHeapMemBeg = 0x7a00000000ull;
static const uptr kHeapMemEnd = 0x7d00000000ull;
- static const uptr kHiAppMemBeg = 0x7e00000000ull;
+ static const uptr kHiAppMemBeg = 0x7d00000000ull;
static const uptr kHiAppMemEnd = 0x7fffffffffull;
- static const uptr kAppMemMsk = 0x7800000000ull;
- static const uptr kAppMemXor = 0x0200000000ull;
+ static const uptr kShadowMsk = 0x7000000000ull;
+ static const uptr kShadowXor = 0x1000000000ull;
+ static const uptr kShadowAdd = 0x0000000000ull;
static const uptr kVdsoBeg = 0x7f00000000ull;
};
/*
C/C++ on linux/aarch64 (42-bit VMA)
-00000 0010 00 - 01000 0000 00: main binary
-01000 0000 00 - 10000 0000 00: -
-10000 0000 00 - 20000 0000 00: shadow memory
-20000 0000 00 - 26000 0000 00: -
-26000 0000 00 - 28000 0000 00: metainfo
-28000 0000 00 - 2aa00 0000 00: -
-2aa00 0000 00 - 2ab00 0000 00: main binary (PIE)
-2ab00 0000 00 - 36200 0000 00: -
-36200 0000 00 - 36240 0000 00: traces
-36240 0000 00 - 3e000 0000 00: -
-3e000 0000 00 - 3f000 0000 00: heap
-3f000 0000 00 - 3ffff ffff ff: modules and main thread stack
+00000 0010 00 - 02000 0000 00: main binary (128 GB)
+02000 0000 00 - 08000 0000 00: -
+10000 0000 00 - 20000 0000 00: shadow memory (1024 GB)
+20000 0000 00 - 24000 0000 00: metainfo (256 GB)
+24000 0000 00 - 2aa00 0000 00: -
+2aa00 0000 00 - 2c000 0000 00: main binary (PIE) (88 GB)
+2c000 0000 00 - 3c000 0000 00: -
+3c000 0000 00 - 3f000 0000 00: heap (192 GB)
+3f000 0000 00 - 3ffff ffff ff: modules and main thread stack (64 GB)
*/
-struct Mapping42 {
+struct MappingAarch64_42 {
static const uptr kLoAppMemBeg = 0x00000001000ull;
- static const uptr kLoAppMemEnd = 0x01000000000ull;
+ static const uptr kLoAppMemEnd = 0x02000000000ull;
static const uptr kShadowBeg = 0x10000000000ull;
static const uptr kShadowEnd = 0x20000000000ull;
- static const uptr kMetaShadowBeg = 0x26000000000ull;
- static const uptr kMetaShadowEnd = 0x28000000000ull;
+ static const uptr kMetaShadowBeg = 0x20000000000ull;
+ static const uptr kMetaShadowEnd = 0x24000000000ull;
static const uptr kMidAppMemBeg = 0x2aa00000000ull;
- static const uptr kMidAppMemEnd = 0x2ab00000000ull;
- static const uptr kTraceMemBeg = 0x36200000000ull;
- static const uptr kTraceMemEnd = 0x36400000000ull;
- static const uptr kHeapMemBeg = 0x3e000000000ull;
+ static const uptr kMidAppMemEnd = 0x2c000000000ull;
+ static const uptr kHeapMemBeg = 0x3c000000000ull;
static const uptr kHeapMemEnd = 0x3f000000000ull;
static const uptr kHiAppMemBeg = 0x3f000000000ull;
static const uptr kHiAppMemEnd = 0x3ffffffffffull;
- static const uptr kAppMemMsk = 0x3c000000000ull;
- static const uptr kAppMemXor = 0x04000000000ull;
+ static const uptr kShadowMsk = 0x38000000000ull;
+ static const uptr kShadowXor = 0x08000000000ull;
+ static const uptr kShadowAdd = 0x00000000000ull;
static const uptr kVdsoBeg = 0x37f00000000ull;
};
-struct Mapping48 {
+/*
+C/C++ on linux/aarch64 (48-bit VMA)
+0000 0000 1000 - 0a00 0000 0000: main binary (10240 GB)
+0a00 0000 1000 - 1554 0000 0000: -
+1554 0000 1000 - 5400 0000 0000: shadow memory (64176 GB)
+5400 0000 1000 - 8000 0000 0000: -
+8000 0000 1000 - 0a00 0000 0000: metainfo (32768 GB)
+a000 0000 1000 - aaaa 0000 0000: -
+aaaa 0000 1000 - ac00 0000 0000: main binary (PIE) (1368 GB)
+ac00 0000 1000 - fc00 0000 0000: -
+fc00 0000 1000 - ffff ffff ffff: modules and main thread stack (4096 GB)
+
+N.B. the shadow memory region has a strange start address, because it
+contains the shadows for the mid, high and low app regions (in this
+unusual order).
+*/
+struct MappingAarch64_48 {
static const uptr kLoAppMemBeg = 0x0000000001000ull;
- static const uptr kLoAppMemEnd = 0x0000200000000ull;
- static const uptr kShadowBeg = 0x0002000000000ull;
- static const uptr kShadowEnd = 0x0004000000000ull;
- static const uptr kMetaShadowBeg = 0x0005000000000ull;
- static const uptr kMetaShadowEnd = 0x0006000000000ull;
+ static const uptr kLoAppMemEnd = 0x00a0000000000ull;
+ static const uptr kShadowBeg = 0x0155400000000ull;
+ static const uptr kShadowEnd = 0x0540000000000ull;
+ static const uptr kMetaShadowBeg = 0x0800000000000ull;
+ static const uptr kMetaShadowEnd = 0x0a00000000000ull;
static const uptr kMidAppMemBeg = 0x0aaaa00000000ull;
- static const uptr kMidAppMemEnd = 0x0aaaf00000000ull;
- static const uptr kTraceMemBeg = 0x0f06000000000ull;
- static const uptr kTraceMemEnd = 0x0f06200000000ull;
- static const uptr kHeapMemBeg = 0x0ffff00000000ull;
- static const uptr kHeapMemEnd = 0x0ffff00000000ull;
- static const uptr kHiAppMemBeg = 0x0ffff00000000ull;
+ static const uptr kMidAppMemEnd = 0x0ac0000000000ull;
+ static const uptr kHiAppMemBeg = 0x0fc0000000000ull;
static const uptr kHiAppMemEnd = 0x1000000000000ull;
- static const uptr kAppMemMsk = 0x0fff800000000ull;
- static const uptr kAppMemXor = 0x0000800000000ull;
+ static const uptr kHeapMemBeg = 0x0fc0000000000ull;
+ static const uptr kHeapMemEnd = 0x0fc0000000000ull;
+ static const uptr kShadowMsk = 0x0c00000000000ull;
+ static const uptr kShadowXor = 0x0200000000000ull;
+ static const uptr kShadowAdd = 0x0000000000000ull;
static const uptr kVdsoBeg = 0xffff000000000ull;
};
-// Indicates the runtime will define the memory regions at runtime.
-#define TSAN_RUNTIME_VMA 1
-// Indicates that mapping defines a mid range memory segment.
-#define TSAN_MID_APP_RANGE 1
-#elif defined(__powerpc64__)
-// PPC64 supports multiple VMA which leads to multiple address transformation
-// functions. To support these multiple VMAS transformations and mappings TSAN
-// runtime for PPC64 uses an external memory read (vmaSize) to select which
-// mapping to use. Although slower, it make a same instrumented binary run on
-// multiple kernels.
+/* C/C++ on linux/loongarch64 (47-bit VMA)
+0000 0000 4000 - 0080 0000 0000: main binary
+0080 0000 0000 - 0100 0000 0000: -
+0100 0000 0000 - 1000 0000 0000: shadow memory
+1000 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 3400 0000 0000: metainfo
+3400 0000 0000 - 5555 0000 0000: -
+5555 0000 0000 - 5556 0000 0000: main binary (PIE)
+5556 0000 0000 - 7ffe 0000 0000: -
+7ffe 0000 0000 - 7fff 0000 0000: heap
+7fff 0000 0000 - 7fff 8000 0000: -
+7fff 8000 0000 - 8000 0000 0000: modules and main thread stack
+*/
+struct MappingLoongArch64_47 {
+ static const uptr kMetaShadowBeg = 0x300000000000ull;
+ static const uptr kMetaShadowEnd = 0x340000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
+ static const uptr kShadowEnd = 0x100000000000ull;
+ static const uptr kHeapMemBeg = 0x7ffe00000000ull;
+ static const uptr kHeapMemEnd = 0x7fff00000000ull;
+ static const uptr kLoAppMemBeg = 0x000000004000ull;
+ static const uptr kLoAppMemEnd = 0x008000000000ull;
+ static const uptr kMidAppMemBeg = 0x555500000000ull;
+ static const uptr kMidAppMemEnd = 0x555600000000ull;
+ static const uptr kHiAppMemBeg = 0x7fff80000000ull;
+ static const uptr kHiAppMemEnd = 0x800000000000ull;
+ static const uptr kShadowMsk = 0x780000000000ull;
+ static const uptr kShadowXor = 0x040000000000ull;
+ static const uptr kShadowAdd = 0x000000000000ull;
+ static const uptr kVdsoBeg = 0x7fffffffc000ull;
+};
/*
C/C++ on linux/powerpc64 (44-bit VMA)
@@ -274,18 +289,16 @@ C/C++ on linux/powerpc64 (44-bit VMA)
0001 0000 0000 - 0b00 0000 0000: shadow
0b00 0000 0000 - 0b00 0000 0000: -
0b00 0000 0000 - 0d00 0000 0000: metainfo (memory blocks and sync objects)
-0d00 0000 0000 - 0d00 0000 0000: -
-0d00 0000 0000 - 0f00 0000 0000: traces
-0f00 0000 0000 - 0f00 0000 0000: -
+0d00 0000 0000 - 0f00 0000 0000: -
0f00 0000 0000 - 0f50 0000 0000: heap
0f50 0000 0000 - 0f60 0000 0000: -
0f60 0000 0000 - 1000 0000 0000: modules and main thread stack
*/
-struct Mapping44 {
+struct MappingPPC64_44 {
+ static const uptr kBroken = kBrokenMapping | kBrokenReverseMapping |
+ kBrokenLinearity | kBrokenAliasedMetas;
static const uptr kMetaShadowBeg = 0x0b0000000000ull;
static const uptr kMetaShadowEnd = 0x0d0000000000ull;
- static const uptr kTraceMemBeg = 0x0d0000000000ull;
- static const uptr kTraceMemEnd = 0x0f0000000000ull;
static const uptr kShadowBeg = 0x000100000000ull;
static const uptr kShadowEnd = 0x0b0000000000ull;
static const uptr kLoAppMemBeg = 0x000000000100ull;
@@ -294,188 +307,196 @@ struct Mapping44 {
static const uptr kHeapMemEnd = 0x0f5000000000ull;
static const uptr kHiAppMemBeg = 0x0f6000000000ull;
static const uptr kHiAppMemEnd = 0x100000000000ull; // 44 bits
- static const uptr kAppMemMsk = 0x0f0000000000ull;
- static const uptr kAppMemXor = 0x002100000000ull;
+ static const uptr kShadowMsk = 0x0f0000000000ull;
+ static const uptr kShadowXor = 0x002100000000ull;
+ static const uptr kShadowAdd = 0x000000000000ull;
static const uptr kVdsoBeg = 0x3c0000000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
};
/*
C/C++ on linux/powerpc64 (46-bit VMA)
0000 0000 1000 - 0100 0000 0000: main binary
0100 0000 0000 - 0200 0000 0000: -
-0100 0000 0000 - 1000 0000 0000: shadow
-1000 0000 0000 - 1000 0000 0000: -
-1000 0000 0000 - 2000 0000 0000: metainfo (memory blocks and sync objects)
-2000 0000 0000 - 2000 0000 0000: -
-2000 0000 0000 - 2200 0000 0000: traces
-2200 0000 0000 - 3d00 0000 0000: -
+0100 0000 0000 - 0800 0000 0000: shadow
+0800 0000 0000 - 1000 0000 0000: -
+1000 0000 0000 - 1200 0000 0000: metainfo (memory blocks and sync objects)
+1200 0000 0000 - 3d00 0000 0000: -
3d00 0000 0000 - 3e00 0000 0000: heap
3e00 0000 0000 - 3e80 0000 0000: -
3e80 0000 0000 - 4000 0000 0000: modules and main thread stack
*/
-struct Mapping46 {
+struct MappingPPC64_46 {
static const uptr kMetaShadowBeg = 0x100000000000ull;
- static const uptr kMetaShadowEnd = 0x200000000000ull;
- static const uptr kTraceMemBeg = 0x200000000000ull;
- static const uptr kTraceMemEnd = 0x220000000000ull;
+ static const uptr kMetaShadowEnd = 0x120000000000ull;
static const uptr kShadowBeg = 0x010000000000ull;
- static const uptr kShadowEnd = 0x100000000000ull;
+ static const uptr kShadowEnd = 0x080000000000ull;
static const uptr kHeapMemBeg = 0x3d0000000000ull;
static const uptr kHeapMemEnd = 0x3e0000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
static const uptr kLoAppMemEnd = 0x010000000000ull;
static const uptr kHiAppMemBeg = 0x3e8000000000ull;
static const uptr kHiAppMemEnd = 0x400000000000ull; // 46 bits
- static const uptr kAppMemMsk = 0x3c0000000000ull;
- static const uptr kAppMemXor = 0x020000000000ull;
+ static const uptr kShadowMsk = 0x3c0000000000ull;
+ static const uptr kShadowXor = 0x020000000000ull;
+ static const uptr kShadowAdd = 0x000000000000ull;
static const uptr kVdsoBeg = 0x7800000000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
};
/*
C/C++ on linux/powerpc64 (47-bit VMA)
0000 0000 1000 - 0100 0000 0000: main binary
0100 0000 0000 - 0200 0000 0000: -
-0100 0000 0000 - 1000 0000 0000: shadow
-1000 0000 0000 - 1000 0000 0000: -
-1000 0000 0000 - 2000 0000 0000: metainfo (memory blocks and sync objects)
-2000 0000 0000 - 2000 0000 0000: -
-2000 0000 0000 - 2200 0000 0000: traces
-2200 0000 0000 - 7d00 0000 0000: -
+0100 0000 0000 - 0800 0000 0000: shadow
+0800 0000 0000 - 1000 0000 0000: -
+1000 0000 0000 - 1200 0000 0000: metainfo (memory blocks and sync objects)
+1200 0000 0000 - 7d00 0000 0000: -
7d00 0000 0000 - 7e00 0000 0000: heap
7e00 0000 0000 - 7e80 0000 0000: -
7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
*/
-struct Mapping47 {
+struct MappingPPC64_47 {
static const uptr kMetaShadowBeg = 0x100000000000ull;
- static const uptr kMetaShadowEnd = 0x200000000000ull;
- static const uptr kTraceMemBeg = 0x200000000000ull;
- static const uptr kTraceMemEnd = 0x220000000000ull;
+ static const uptr kMetaShadowEnd = 0x120000000000ull;
static const uptr kShadowBeg = 0x010000000000ull;
- static const uptr kShadowEnd = 0x100000000000ull;
+ static const uptr kShadowEnd = 0x080000000000ull;
static const uptr kHeapMemBeg = 0x7d0000000000ull;
static const uptr kHeapMemEnd = 0x7e0000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
static const uptr kLoAppMemEnd = 0x010000000000ull;
static const uptr kHiAppMemBeg = 0x7e8000000000ull;
static const uptr kHiAppMemEnd = 0x800000000000ull; // 47 bits
- static const uptr kAppMemMsk = 0x7c0000000000ull;
- static const uptr kAppMemXor = 0x020000000000ull;
+ static const uptr kShadowMsk = 0x7c0000000000ull;
+ static const uptr kShadowXor = 0x020000000000ull;
+ static const uptr kShadowAdd = 0x000000000000ull;
static const uptr kVdsoBeg = 0x7800000000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
};
-// Indicates the runtime will define the memory regions at runtime.
-#define TSAN_RUNTIME_VMA 1
-#elif defined(__s390x__)
/*
C/C++ on linux/s390x
While the kernel provides a 64-bit address space, we have to restrict ourselves
to 48 bits due to how e.g. SyncVar::GetId() works.
0000 0000 1000 - 0e00 0000 0000: binary, modules, stacks - 14 TiB
-0e00 0000 0000 - 4000 0000 0000: -
-4000 0000 0000 - 8000 0000 0000: shadow - 64TiB (4 * app)
-8000 0000 0000 - 9000 0000 0000: -
+0e00 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 4000 0000 0000: shadow - 32TiB (2 * app)
+4000 0000 0000 - 9000 0000 0000: -
9000 0000 0000 - 9800 0000 0000: metainfo - 8TiB (0.5 * app)
-9800 0000 0000 - a000 0000 0000: -
-a000 0000 0000 - b000 0000 0000: traces - 16TiB (max history * 128k threads)
-b000 0000 0000 - be00 0000 0000: -
+9800 0000 0000 - be00 0000 0000: -
be00 0000 0000 - c000 0000 0000: heap - 2TiB (max supported by the allocator)
*/
-struct Mapping {
+struct MappingS390x {
static const uptr kMetaShadowBeg = 0x900000000000ull;
static const uptr kMetaShadowEnd = 0x980000000000ull;
- static const uptr kTraceMemBeg = 0xa00000000000ull;
- static const uptr kTraceMemEnd = 0xb00000000000ull;
- static const uptr kShadowBeg = 0x400000000000ull;
- static const uptr kShadowEnd = 0x800000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x400000000000ull;
static const uptr kHeapMemBeg = 0xbe0000000000ull;
static const uptr kHeapMemEnd = 0xc00000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
static const uptr kLoAppMemEnd = 0x0e0000000000ull;
static const uptr kHiAppMemBeg = 0xc00000004000ull;
static const uptr kHiAppMemEnd = 0xc00000004000ull;
- static const uptr kAppMemMsk = 0xb00000000000ull;
- static const uptr kAppMemXor = 0x100000000000ull;
+ static const uptr kShadowMsk = 0xb00000000000ull;
+ static const uptr kShadowXor = 0x100000000000ull;
+ static const uptr kShadowAdd = 0x000000000000ull;
static const uptr kVdsoBeg = 0xfffffffff000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
};
-#endif
-
-#elif SANITIZER_GO && !SANITIZER_WINDOWS && HAS_48_BIT_ADDRESS_SPACE
/* Go on linux, darwin and freebsd on x86_64
0000 0000 1000 - 0000 1000 0000: executable
0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 2000 0000 0000: -
-2000 0000 0000 - 2380 0000 0000: shadow
-2380 0000 0000 - 3000 0000 0000: -
+2000 0000 0000 - 21c0 0000 0000: shadow
+21c0 0000 0000 - 3000 0000 0000: -
3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
-4000 0000 0000 - 6000 0000 0000: -
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 8000 0000 0000: -
+4000 0000 0000 - 8000 0000 0000: -
*/
-struct Mapping {
+struct MappingGo48 {
static const uptr kMetaShadowBeg = 0x300000000000ull;
static const uptr kMetaShadowEnd = 0x400000000000ull;
- static const uptr kTraceMemBeg = 0x600000000000ull;
- static const uptr kTraceMemEnd = 0x620000000000ull;
static const uptr kShadowBeg = 0x200000000000ull;
- static const uptr kShadowEnd = 0x238000000000ull;
- static const uptr kAppMemBeg = 0x000000001000ull;
- static const uptr kAppMemEnd = 0x00e000000000ull;
+ static const uptr kShadowEnd = 0x21c000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x00e000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x200000000000ull;
};
-#elif SANITIZER_GO && SANITIZER_WINDOWS
-
/* Go on windows
0000 0000 1000 - 0000 1000 0000: executable
0000 1000 0000 - 00f8 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 0100 0000 0000: -
-0100 0000 0000 - 0500 0000 0000: shadow
-0500 0000 0000 - 0560 0000 0000: -
-0560 0000 0000 - 0760 0000 0000: traces
-0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects)
+0100 0000 0000 - 0300 0000 0000: shadow
+0300 0000 0000 - 0700 0000 0000: -
+0700 0000 0000 - 0770 0000 0000: metainfo (memory blocks and sync objects)
07d0 0000 0000 - 8000 0000 0000: -
+PIE binaries currently not supported, but it should be theoretically possible.
*/
-struct Mapping {
- static const uptr kMetaShadowBeg = 0x076000000000ull;
- static const uptr kMetaShadowEnd = 0x07d000000000ull;
- static const uptr kTraceMemBeg = 0x056000000000ull;
- static const uptr kTraceMemEnd = 0x076000000000ull;
+struct MappingGoWindows {
+ static const uptr kMetaShadowBeg = 0x070000000000ull;
+ static const uptr kMetaShadowEnd = 0x077000000000ull;
static const uptr kShadowBeg = 0x010000000000ull;
- static const uptr kShadowEnd = 0x050000000000ull;
- static const uptr kAppMemBeg = 0x000000001000ull;
- static const uptr kAppMemEnd = 0x00e000000000ull;
+ static const uptr kShadowEnd = 0x030000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x00e000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x010000000000ull;
};
-#elif SANITIZER_GO && defined(__powerpc64__)
-
-/* Only Mapping46 and Mapping47 are currently supported for powercp64 on Go. */
-
/* Go on linux/powerpc64 (46-bit VMA)
0000 0000 1000 - 0000 1000 0000: executable
0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 2000 0000 0000: -
-2000 0000 0000 - 2380 0000 0000: shadow
-2380 0000 0000 - 2400 0000 0000: -
-2400 0000 0000 - 3400 0000 0000: metainfo (memory blocks and sync objects)
-3400 0000 0000 - 3600 0000 0000: -
-3600 0000 0000 - 3800 0000 0000: traces
-3800 0000 0000 - 4000 0000 0000: -
+2000 0000 0000 - 21c0 0000 0000: shadow
+21c0 0000 0000 - 2400 0000 0000: -
+2400 0000 0000 - 2470 0000 0000: metainfo (memory blocks and sync objects)
+2470 0000 0000 - 4000 0000 0000: -
*/
-struct Mapping46 {
+struct MappingGoPPC64_46 {
static const uptr kMetaShadowBeg = 0x240000000000ull;
- static const uptr kMetaShadowEnd = 0x340000000000ull;
- static const uptr kTraceMemBeg = 0x360000000000ull;
- static const uptr kTraceMemEnd = 0x380000000000ull;
+ static const uptr kMetaShadowEnd = 0x247000000000ull;
static const uptr kShadowBeg = 0x200000000000ull;
- static const uptr kShadowEnd = 0x238000000000ull;
- static const uptr kAppMemBeg = 0x000000001000ull;
- static const uptr kAppMemEnd = 0x00e000000000ull;
+ static const uptr kShadowEnd = 0x21c000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x00e000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x200000000000ull;
};
/* Go on linux/powerpc64 (47-bit VMA)
@@ -483,718 +504,424 @@ struct Mapping46 {
0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 2000 0000 0000: -
-2000 0000 0000 - 3000 0000 0000: shadow
-3000 0000 0000 - 3000 0000 0000: -
-3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
-4000 0000 0000 - 6000 0000 0000: -
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 8000 0000 0000: -
+2000 0000 0000 - 2800 0000 0000: shadow
+2800 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 3200 0000 0000: metainfo (memory blocks and sync objects)
+3200 0000 0000 - 8000 0000 0000: -
*/
-struct Mapping47 {
+struct MappingGoPPC64_47 {
static const uptr kMetaShadowBeg = 0x300000000000ull;
- static const uptr kMetaShadowEnd = 0x400000000000ull;
- static const uptr kTraceMemBeg = 0x600000000000ull;
- static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kMetaShadowEnd = 0x320000000000ull;
static const uptr kShadowBeg = 0x200000000000ull;
- static const uptr kShadowEnd = 0x300000000000ull;
- static const uptr kAppMemBeg = 0x000000001000ull;
- static const uptr kAppMemEnd = 0x00e000000000ull;
+ static const uptr kShadowEnd = 0x280000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x00e000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x200000000000ull;
};
-#define TSAN_RUNTIME_VMA 1
-
-#elif SANITIZER_GO && defined(__aarch64__)
-
/* Go on linux/aarch64 (48-bit VMA) and darwin/aarch64 (47-bit VMA)
0000 0000 1000 - 0000 1000 0000: executable
0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 2000 0000 0000: -
-2000 0000 0000 - 3000 0000 0000: shadow
-3000 0000 0000 - 3000 0000 0000: -
-3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
-4000 0000 0000 - 6000 0000 0000: -
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 8000 0000 0000: -
+2000 0000 0000 - 2800 0000 0000: shadow
+2800 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 3200 0000 0000: metainfo (memory blocks and sync objects)
+3200 0000 0000 - 8000 0000 0000: -
*/
-
-struct Mapping {
+struct MappingGoAarch64 {
static const uptr kMetaShadowBeg = 0x300000000000ull;
- static const uptr kMetaShadowEnd = 0x400000000000ull;
- static const uptr kTraceMemBeg = 0x600000000000ull;
- static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kMetaShadowEnd = 0x320000000000ull;
static const uptr kShadowBeg = 0x200000000000ull;
- static const uptr kShadowEnd = 0x300000000000ull;
- static const uptr kAppMemBeg = 0x000000001000ull;
- static const uptr kAppMemEnd = 0x00e000000000ull;
+ static const uptr kShadowEnd = 0x280000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x00e000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x200000000000ull;
};
-// Indicates the runtime will define the memory regions at runtime.
-#define TSAN_RUNTIME_VMA 1
-
-#elif SANITIZER_GO && defined(__mips64)
/*
Go on linux/mips64 (47-bit VMA)
0000 0000 1000 - 0000 1000 0000: executable
0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 2000 0000 0000: -
-2000 0000 0000 - 3000 0000 0000: shadow
-3000 0000 0000 - 3000 0000 0000: -
-3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
-4000 0000 0000 - 6000 0000 0000: -
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 8000 0000 0000: -
+2000 0000 0000 - 2800 0000 0000: shadow
+2800 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 3200 0000 0000: metainfo (memory blocks and sync objects)
+3200 0000 0000 - 8000 0000 0000: -
*/
-struct Mapping47 {
+struct MappingGoMips64_47 {
static const uptr kMetaShadowBeg = 0x300000000000ull;
- static const uptr kMetaShadowEnd = 0x400000000000ull;
- static const uptr kTraceMemBeg = 0x600000000000ull;
- static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kMetaShadowEnd = 0x320000000000ull;
static const uptr kShadowBeg = 0x200000000000ull;
- static const uptr kShadowEnd = 0x300000000000ull;
- static const uptr kAppMemBeg = 0x000000001000ull;
- static const uptr kAppMemEnd = 0x00e000000000ull;
+ static const uptr kShadowEnd = 0x280000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x00e000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x200000000000ull;
};
-#define TSAN_RUNTIME_VMA 1
-
-#elif SANITIZER_GO && defined(__s390x__)
/*
Go on linux/s390x
0000 0000 1000 - 1000 0000 0000: executable and heap - 16 TiB
1000 0000 0000 - 4000 0000 0000: -
-4000 0000 0000 - 8000 0000 0000: shadow - 64TiB (4 * app)
-8000 0000 0000 - 9000 0000 0000: -
+4000 0000 0000 - 6000 0000 0000: shadow - 64TiB (4 * app)
+6000 0000 0000 - 9000 0000 0000: -
9000 0000 0000 - 9800 0000 0000: metainfo - 8TiB (0.5 * app)
-9800 0000 0000 - a000 0000 0000: -
-a000 0000 0000 - b000 0000 0000: traces - 16TiB (max history * 128k threads)
*/
-struct Mapping {
+struct MappingGoS390x {
static const uptr kMetaShadowBeg = 0x900000000000ull;
static const uptr kMetaShadowEnd = 0x980000000000ull;
- static const uptr kTraceMemBeg = 0xa00000000000ull;
- static const uptr kTraceMemEnd = 0xb00000000000ull;
static const uptr kShadowBeg = 0x400000000000ull;
- static const uptr kShadowEnd = 0x800000000000ull;
- static const uptr kAppMemBeg = 0x000000001000ull;
- static const uptr kAppMemEnd = 0x100000000000ull;
+ static const uptr kShadowEnd = 0x600000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x100000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x400000000000ull;
};
-#else
-# error "Unknown platform"
-#endif
-
-
-#ifdef TSAN_RUNTIME_VMA
extern uptr vmaSize;
-#endif
-
-
-enum MappingType {
- MAPPING_LO_APP_BEG,
- MAPPING_LO_APP_END,
- MAPPING_HI_APP_BEG,
- MAPPING_HI_APP_END,
-#ifdef TSAN_MID_APP_RANGE
- MAPPING_MID_APP_BEG,
- MAPPING_MID_APP_END,
-#endif
- MAPPING_HEAP_BEG,
- MAPPING_HEAP_END,
- MAPPING_APP_BEG,
- MAPPING_APP_END,
- MAPPING_SHADOW_BEG,
- MAPPING_SHADOW_END,
- MAPPING_META_SHADOW_BEG,
- MAPPING_META_SHADOW_END,
- MAPPING_TRACE_BEG,
- MAPPING_TRACE_END,
- MAPPING_VDSO_BEG,
-};
-template<typename Mapping, int Type>
-uptr MappingImpl(void) {
- switch (Type) {
-#if !SANITIZER_GO
- case MAPPING_LO_APP_BEG: return Mapping::kLoAppMemBeg;
- case MAPPING_LO_APP_END: return Mapping::kLoAppMemEnd;
-# ifdef TSAN_MID_APP_RANGE
- case MAPPING_MID_APP_BEG: return Mapping::kMidAppMemBeg;
- case MAPPING_MID_APP_END: return Mapping::kMidAppMemEnd;
-# endif
- case MAPPING_HI_APP_BEG: return Mapping::kHiAppMemBeg;
- case MAPPING_HI_APP_END: return Mapping::kHiAppMemEnd;
- case MAPPING_HEAP_BEG: return Mapping::kHeapMemBeg;
- case MAPPING_HEAP_END: return Mapping::kHeapMemEnd;
- case MAPPING_VDSO_BEG: return Mapping::kVdsoBeg;
-#else
- case MAPPING_APP_BEG: return Mapping::kAppMemBeg;
- case MAPPING_APP_END: return Mapping::kAppMemEnd;
-#endif
- case MAPPING_SHADOW_BEG: return Mapping::kShadowBeg;
- case MAPPING_SHADOW_END: return Mapping::kShadowEnd;
- case MAPPING_META_SHADOW_BEG: return Mapping::kMetaShadowBeg;
- case MAPPING_META_SHADOW_END: return Mapping::kMetaShadowEnd;
- case MAPPING_TRACE_BEG: return Mapping::kTraceMemBeg;
- case MAPPING_TRACE_END: return Mapping::kTraceMemEnd;
- }
-}
-
-template<int Type>
-uptr MappingArchImpl(void) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
+template <typename Func, typename Arg>
+ALWAYS_INLINE auto SelectMapping(Arg arg) {
+#if SANITIZER_GO
+# if defined(__powerpc64__)
switch (vmaSize) {
- case 39: return MappingImpl<Mapping39, Type>();
- case 42: return MappingImpl<Mapping42, Type>();
- case 48: return MappingImpl<Mapping48, Type>();
+ case 46:
+ return Func::template Apply<MappingGoPPC64_46>(arg);
+ case 47:
+ return Func::template Apply<MappingGoPPC64_47>(arg);
}
- DCHECK(0);
- return 0;
-#elif defined(__powerpc64__)
+# elif defined(__mips64)
+ return Func::template Apply<MappingGoMips64_47>(arg);
+# elif defined(__s390x__)
+ return Func::template Apply<MappingGoS390x>(arg);
+# elif defined(__aarch64__)
+ return Func::template Apply<MappingGoAarch64>(arg);
+# elif SANITIZER_WINDOWS
+ return Func::template Apply<MappingGoWindows>(arg);
+# else
+ return Func::template Apply<MappingGo48>(arg);
+# endif
+#else // SANITIZER_GO
+# if SANITIZER_IOS && !SANITIZER_IOSSIM
+ return Func::template Apply<MappingAppleAarch64>(arg);
+# elif defined(__x86_64__) || SANITIZER_APPLE
+ return Func::template Apply<Mapping48AddressSpace>(arg);
+# elif defined(__aarch64__)
switch (vmaSize) {
-#if !SANITIZER_GO
- case 44: return MappingImpl<Mapping44, Type>();
-#endif
- case 46: return MappingImpl<Mapping46, Type>();
- case 47: return MappingImpl<Mapping47, Type>();
+ case 39:
+ return Func::template Apply<MappingAarch64_39>(arg);
+ case 42:
+ return Func::template Apply<MappingAarch64_42>(arg);
+ case 48:
+ return Func::template Apply<MappingAarch64_48>(arg);
}
- DCHECK(0);
- return 0;
-#elif defined(__mips64)
+# elif SANITIZER_LOONGARCH64
+ return Func::template Apply<MappingLoongArch64_47>(arg);
+# elif defined(__powerpc64__)
switch (vmaSize) {
-#if !SANITIZER_GO
- case 40: return MappingImpl<Mapping40, Type>();
-#else
- case 47: return MappingImpl<Mapping47, Type>();
-#endif
+ case 44:
+ return Func::template Apply<MappingPPC64_44>(arg);
+ case 46:
+ return Func::template Apply<MappingPPC64_46>(arg);
+ case 47:
+ return Func::template Apply<MappingPPC64_47>(arg);
}
- DCHECK(0);
- return 0;
-#else
- return MappingImpl<Mapping, Type>();
-#endif
+# elif defined(__mips64)
+ return Func::template Apply<MappingMips64_40>(arg);
+# elif defined(__s390x__)
+ return Func::template Apply<MappingS390x>(arg);
+# else
+# error "unsupported platform"
+# endif
+#endif
+ Die();
+}
+
+template <typename Func>
+void ForEachMapping() {
+ Func::template Apply<Mapping48AddressSpace>();
+ Func::template Apply<MappingMips64_40>();
+ Func::template Apply<MappingAppleAarch64>();
+ Func::template Apply<MappingAarch64_39>();
+ Func::template Apply<MappingAarch64_42>();
+ Func::template Apply<MappingAarch64_48>();
+ Func::template Apply<MappingLoongArch64_47>();
+ Func::template Apply<MappingPPC64_44>();
+ Func::template Apply<MappingPPC64_46>();
+ Func::template Apply<MappingPPC64_47>();
+ Func::template Apply<MappingS390x>();
+ Func::template Apply<MappingGo48>();
+ Func::template Apply<MappingGoWindows>();
+ Func::template Apply<MappingGoPPC64_46>();
+ Func::template Apply<MappingGoPPC64_47>();
+ Func::template Apply<MappingGoAarch64>();
+ Func::template Apply<MappingGoMips64_47>();
+ Func::template Apply<MappingGoS390x>();
}
-#if !SANITIZER_GO
-ALWAYS_INLINE
-uptr LoAppMemBeg(void) {
- return MappingArchImpl<MAPPING_LO_APP_BEG>();
-}
-ALWAYS_INLINE
-uptr LoAppMemEnd(void) {
- return MappingArchImpl<MAPPING_LO_APP_END>();
-}
+enum MappingType {
+ kLoAppMemBeg,
+ kLoAppMemEnd,
+ kHiAppMemBeg,
+ kHiAppMemEnd,
+ kMidAppMemBeg,
+ kMidAppMemEnd,
+ kHeapMemBeg,
+ kHeapMemEnd,
+ kShadowBeg,
+ kShadowEnd,
+ kMetaShadowBeg,
+ kMetaShadowEnd,
+ kVdsoBeg,
+};
-#ifdef TSAN_MID_APP_RANGE
-ALWAYS_INLINE
-uptr MidAppMemBeg(void) {
- return MappingArchImpl<MAPPING_MID_APP_BEG>();
-}
-ALWAYS_INLINE
-uptr MidAppMemEnd(void) {
- return MappingArchImpl<MAPPING_MID_APP_END>();
-}
-#endif
+struct MappingField {
+ template <typename Mapping>
+ static uptr Apply(MappingType type) {
+ switch (type) {
+ case kLoAppMemBeg:
+ return Mapping::kLoAppMemBeg;
+ case kLoAppMemEnd:
+ return Mapping::kLoAppMemEnd;
+ case kMidAppMemBeg:
+ return Mapping::kMidAppMemBeg;
+ case kMidAppMemEnd:
+ return Mapping::kMidAppMemEnd;
+ case kHiAppMemBeg:
+ return Mapping::kHiAppMemBeg;
+ case kHiAppMemEnd:
+ return Mapping::kHiAppMemEnd;
+ case kHeapMemBeg:
+ return Mapping::kHeapMemBeg;
+ case kHeapMemEnd:
+ return Mapping::kHeapMemEnd;
+ case kVdsoBeg:
+ return Mapping::kVdsoBeg;
+ case kShadowBeg:
+ return Mapping::kShadowBeg;
+ case kShadowEnd:
+ return Mapping::kShadowEnd;
+ case kMetaShadowBeg:
+ return Mapping::kMetaShadowBeg;
+ case kMetaShadowEnd:
+ return Mapping::kMetaShadowEnd;
+ }
+ Die();
+ }
+};
ALWAYS_INLINE
-uptr HeapMemBeg(void) {
- return MappingArchImpl<MAPPING_HEAP_BEG>();
-}
+uptr LoAppMemBeg(void) { return SelectMapping<MappingField>(kLoAppMemBeg); }
ALWAYS_INLINE
-uptr HeapMemEnd(void) {
- return MappingArchImpl<MAPPING_HEAP_END>();
-}
+uptr LoAppMemEnd(void) { return SelectMapping<MappingField>(kLoAppMemEnd); }
ALWAYS_INLINE
-uptr HiAppMemBeg(void) {
- return MappingArchImpl<MAPPING_HI_APP_BEG>();
-}
+uptr MidAppMemBeg(void) { return SelectMapping<MappingField>(kMidAppMemBeg); }
ALWAYS_INLINE
-uptr HiAppMemEnd(void) {
- return MappingArchImpl<MAPPING_HI_APP_END>();
-}
+uptr MidAppMemEnd(void) { return SelectMapping<MappingField>(kMidAppMemEnd); }
ALWAYS_INLINE
-uptr VdsoBeg(void) {
- return MappingArchImpl<MAPPING_VDSO_BEG>();
-}
-
-#else
+uptr HeapMemBeg(void) { return SelectMapping<MappingField>(kHeapMemBeg); }
+ALWAYS_INLINE
+uptr HeapMemEnd(void) { return SelectMapping<MappingField>(kHeapMemEnd); }
ALWAYS_INLINE
-uptr AppMemBeg(void) {
- return MappingArchImpl<MAPPING_APP_BEG>();
-}
+uptr HiAppMemBeg(void) { return SelectMapping<MappingField>(kHiAppMemBeg); }
ALWAYS_INLINE
-uptr AppMemEnd(void) {
- return MappingArchImpl<MAPPING_APP_END>();
-}
-
-#endif
+uptr HiAppMemEnd(void) { return SelectMapping<MappingField>(kHiAppMemEnd); }
-static inline
-bool GetUserRegion(int i, uptr *start, uptr *end) {
- switch (i) {
- default:
- return false;
-#if !SANITIZER_GO
- case 0:
- *start = LoAppMemBeg();
- *end = LoAppMemEnd();
- return true;
- case 1:
- *start = HiAppMemBeg();
- *end = HiAppMemEnd();
- return true;
- case 2:
- *start = HeapMemBeg();
- *end = HeapMemEnd();
- return true;
-# ifdef TSAN_MID_APP_RANGE
- case 3:
- *start = MidAppMemBeg();
- *end = MidAppMemEnd();
- return true;
-# endif
-#else
- case 0:
- *start = AppMemBeg();
- *end = AppMemEnd();
- return true;
-#endif
- }
-}
-
-ALWAYS_INLINE
-uptr ShadowBeg(void) {
- return MappingArchImpl<MAPPING_SHADOW_BEG>();
-}
ALWAYS_INLINE
-uptr ShadowEnd(void) {
- return MappingArchImpl<MAPPING_SHADOW_END>();
-}
+uptr VdsoBeg(void) { return SelectMapping<MappingField>(kVdsoBeg); }
ALWAYS_INLINE
-uptr MetaShadowBeg(void) {
- return MappingArchImpl<MAPPING_META_SHADOW_BEG>();
-}
+uptr ShadowBeg(void) { return SelectMapping<MappingField>(kShadowBeg); }
ALWAYS_INLINE
-uptr MetaShadowEnd(void) {
- return MappingArchImpl<MAPPING_META_SHADOW_END>();
-}
+uptr ShadowEnd(void) { return SelectMapping<MappingField>(kShadowEnd); }
ALWAYS_INLINE
-uptr TraceMemBeg(void) {
- return MappingArchImpl<MAPPING_TRACE_BEG>();
-}
+uptr MetaShadowBeg(void) { return SelectMapping<MappingField>(kMetaShadowBeg); }
ALWAYS_INLINE
-uptr TraceMemEnd(void) {
- return MappingArchImpl<MAPPING_TRACE_END>();
-}
-
+uptr MetaShadowEnd(void) { return SelectMapping<MappingField>(kMetaShadowEnd); }
-template<typename Mapping>
-bool IsAppMemImpl(uptr mem) {
-#if !SANITIZER_GO
+struct IsAppMemImpl {
+ template <typename Mapping>
+ static bool Apply(uptr mem) {
return (mem >= Mapping::kHeapMemBeg && mem < Mapping::kHeapMemEnd) ||
-# ifdef TSAN_MID_APP_RANGE
(mem >= Mapping::kMidAppMemBeg && mem < Mapping::kMidAppMemEnd) ||
-# endif
(mem >= Mapping::kLoAppMemBeg && mem < Mapping::kLoAppMemEnd) ||
(mem >= Mapping::kHiAppMemBeg && mem < Mapping::kHiAppMemEnd);
-#else
- return mem >= Mapping::kAppMemBeg && mem < Mapping::kAppMemEnd;
-#endif
-}
-
-ALWAYS_INLINE
-bool IsAppMem(uptr mem) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
- switch (vmaSize) {
- case 39: return IsAppMemImpl<Mapping39>(mem);
- case 42: return IsAppMemImpl<Mapping42>(mem);
- case 48: return IsAppMemImpl<Mapping48>(mem);
- }
- DCHECK(0);
- return false;
-#elif defined(__powerpc64__)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 44: return IsAppMemImpl<Mapping44>(mem);
-#endif
- case 46: return IsAppMemImpl<Mapping46>(mem);
- case 47: return IsAppMemImpl<Mapping47>(mem);
- }
- DCHECK(0);
- return false;
-#elif defined(__mips64)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 40: return IsAppMemImpl<Mapping40>(mem);
-#else
- case 47: return IsAppMemImpl<Mapping47>(mem);
-#endif
}
- DCHECK(0);
- return false;
-#else
- return IsAppMemImpl<Mapping>(mem);
-#endif
-}
-
-
-template<typename Mapping>
-bool IsShadowMemImpl(uptr mem) {
- return mem >= Mapping::kShadowBeg && mem <= Mapping::kShadowEnd;
-}
+};
ALWAYS_INLINE
-bool IsShadowMem(uptr mem) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
- switch (vmaSize) {
- case 39: return IsShadowMemImpl<Mapping39>(mem);
- case 42: return IsShadowMemImpl<Mapping42>(mem);
- case 48: return IsShadowMemImpl<Mapping48>(mem);
- }
- DCHECK(0);
- return false;
-#elif defined(__powerpc64__)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 44: return IsShadowMemImpl<Mapping44>(mem);
-#endif
- case 46: return IsShadowMemImpl<Mapping46>(mem);
- case 47: return IsShadowMemImpl<Mapping47>(mem);
- }
- DCHECK(0);
- return false;
-#elif defined(__mips64)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 40: return IsShadowMemImpl<Mapping40>(mem);
-#else
- case 47: return IsShadowMemImpl<Mapping47>(mem);
-#endif
- }
- DCHECK(0);
- return false;
-#else
- return IsShadowMemImpl<Mapping>(mem);
-#endif
-}
+bool IsAppMem(uptr mem) { return SelectMapping<IsAppMemImpl>(mem); }
-
-template<typename Mapping>
-bool IsMetaMemImpl(uptr mem) {
- return mem >= Mapping::kMetaShadowBeg && mem <= Mapping::kMetaShadowEnd;
-}
-
-ALWAYS_INLINE
-bool IsMetaMem(uptr mem) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
- switch (vmaSize) {
- case 39: return IsMetaMemImpl<Mapping39>(mem);
- case 42: return IsMetaMemImpl<Mapping42>(mem);
- case 48: return IsMetaMemImpl<Mapping48>(mem);
- }
- DCHECK(0);
- return false;
-#elif defined(__powerpc64__)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 44: return IsMetaMemImpl<Mapping44>(mem);
-#endif
- case 46: return IsMetaMemImpl<Mapping46>(mem);
- case 47: return IsMetaMemImpl<Mapping47>(mem);
+struct IsShadowMemImpl {
+ template <typename Mapping>
+ static bool Apply(uptr mem) {
+ return mem >= Mapping::kShadowBeg && mem <= Mapping::kShadowEnd;
}
- DCHECK(0);
- return false;
-#elif defined(__mips64)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 40: return IsMetaMemImpl<Mapping40>(mem);
-#else
- case 47: return IsMetaMemImpl<Mapping47>(mem);
-#endif
- }
- DCHECK(0);
- return false;
-#else
- return IsMetaMemImpl<Mapping>(mem);
-#endif
-}
-
-
-template<typename Mapping>
-uptr MemToShadowImpl(uptr x) {
- DCHECK(IsAppMem(x));
-#if !SANITIZER_GO
- return (((x) & ~(Mapping::kAppMemMsk | (kShadowCell - 1)))
- ^ Mapping::kAppMemXor) * kShadowCnt;
-#else
-# ifndef SANITIZER_WINDOWS
- return ((x & ~(kShadowCell - 1)) * kShadowCnt) | Mapping::kShadowBeg;
-# else
- return ((x & ~(kShadowCell - 1)) * kShadowCnt) + Mapping::kShadowBeg;
-# endif
-#endif
-}
+};
ALWAYS_INLINE
-uptr MemToShadow(uptr x) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
- switch (vmaSize) {
- case 39: return MemToShadowImpl<Mapping39>(x);
- case 42: return MemToShadowImpl<Mapping42>(x);
- case 48: return MemToShadowImpl<Mapping48>(x);
- }
- DCHECK(0);
- return 0;
-#elif defined(__powerpc64__)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 44: return MemToShadowImpl<Mapping44>(x);
-#endif
- case 46: return MemToShadowImpl<Mapping46>(x);
- case 47: return MemToShadowImpl<Mapping47>(x);
- }
- DCHECK(0);
- return 0;
-#elif defined(__mips64)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 40: return MemToShadowImpl<Mapping40>(x);
-#else
- case 47: return MemToShadowImpl<Mapping47>(x);
-#endif
- }
- DCHECK(0);
- return 0;
-#else
- return MemToShadowImpl<Mapping>(x);
-#endif
+bool IsShadowMem(RawShadow *p) {
+ return SelectMapping<IsShadowMemImpl>(reinterpret_cast<uptr>(p));
}
-
-template<typename Mapping>
-u32 *MemToMetaImpl(uptr x) {
- DCHECK(IsAppMem(x));
-#if !SANITIZER_GO
- return (u32*)(((((x) & ~(Mapping::kAppMemMsk | (kMetaShadowCell - 1)))) /
- kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg);
-#else
-# ifndef SANITIZER_WINDOWS
- return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
- kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg);
-# else
- return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
- kMetaShadowCell * kMetaShadowSize) + Mapping::kMetaShadowBeg);
-# endif
-#endif
-}
+struct IsMetaMemImpl {
+ template <typename Mapping>
+ static bool Apply(uptr mem) {
+ return mem >= Mapping::kMetaShadowBeg && mem <= Mapping::kMetaShadowEnd;
+ }
+};
ALWAYS_INLINE
-u32 *MemToMeta(uptr x) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
- switch (vmaSize) {
- case 39: return MemToMetaImpl<Mapping39>(x);
- case 42: return MemToMetaImpl<Mapping42>(x);
- case 48: return MemToMetaImpl<Mapping48>(x);
- }
- DCHECK(0);
- return 0;
-#elif defined(__powerpc64__)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 44: return MemToMetaImpl<Mapping44>(x);
-#endif
- case 46: return MemToMetaImpl<Mapping46>(x);
- case 47: return MemToMetaImpl<Mapping47>(x);
- }
- DCHECK(0);
- return 0;
-#elif defined(__mips64)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 40: return MemToMetaImpl<Mapping40>(x);
-#else
- case 47: return MemToMetaImpl<Mapping47>(x);
-#endif
+bool IsMetaMem(const u32 *p) {
+ return SelectMapping<IsMetaMemImpl>(reinterpret_cast<uptr>(p));
+}
+
+struct MemToShadowImpl {
+ template <typename Mapping>
+ static uptr Apply(uptr x) {
+ DCHECK(IsAppMemImpl::Apply<Mapping>(x));
+ return (((x) & ~(Mapping::kShadowMsk | (kShadowCell - 1))) ^
+ Mapping::kShadowXor) *
+ kShadowMultiplier +
+ Mapping::kShadowAdd;
}
- DCHECK(0);
- return 0;
-#else
- return MemToMetaImpl<Mapping>(x);
-#endif
-}
-
-
-template<typename Mapping>
-uptr ShadowToMemImpl(uptr s) {
- DCHECK(IsShadowMem(s));
-#if !SANITIZER_GO
- // The shadow mapping is non-linear and we've lost some bits, so we don't have
- // an easy way to restore the original app address. But the mapping is a
- // bijection, so we try to restore the address as belonging to low/mid/high
- // range consecutively and see if shadow->app->shadow mapping gives us the
- // same address.
- uptr p = (s / kShadowCnt) ^ Mapping::kAppMemXor;
- if (p >= Mapping::kLoAppMemBeg && p < Mapping::kLoAppMemEnd &&
- MemToShadow(p) == s)
- return p;
-# ifdef TSAN_MID_APP_RANGE
- p = ((s / kShadowCnt) ^ Mapping::kAppMemXor) +
- (Mapping::kMidAppMemBeg & Mapping::kAppMemMsk);
- if (p >= Mapping::kMidAppMemBeg && p < Mapping::kMidAppMemEnd &&
- MemToShadow(p) == s)
- return p;
-# endif
- return ((s / kShadowCnt) ^ Mapping::kAppMemXor) | Mapping::kAppMemMsk;
-#else // #if !SANITIZER_GO
-# ifndef SANITIZER_WINDOWS
- return (s & ~Mapping::kShadowBeg) / kShadowCnt;
-# else
- return (s - Mapping::kShadowBeg) / kShadowCnt;
-# endif // SANITIZER_WINDOWS
-#endif
-}
+};
ALWAYS_INLINE
-uptr ShadowToMem(uptr s) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
- switch (vmaSize) {
- case 39: return ShadowToMemImpl<Mapping39>(s);
- case 42: return ShadowToMemImpl<Mapping42>(s);
- case 48: return ShadowToMemImpl<Mapping48>(s);
- }
- DCHECK(0);
- return 0;
-#elif defined(__powerpc64__)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 44: return ShadowToMemImpl<Mapping44>(s);
-#endif
- case 46: return ShadowToMemImpl<Mapping46>(s);
- case 47: return ShadowToMemImpl<Mapping47>(s);
- }
- DCHECK(0);
- return 0;
-#elif defined(__mips64)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 40: return ShadowToMemImpl<Mapping40>(s);
-#else
- case 47: return ShadowToMemImpl<Mapping47>(s);
-#endif
- }
- DCHECK(0);
- return 0;
-#else
- return ShadowToMemImpl<Mapping>(s);
-#endif
+RawShadow *MemToShadow(uptr x) {
+ return reinterpret_cast<RawShadow *>(SelectMapping<MemToShadowImpl>(x));
}
-
-
-// The additional page is to catch shadow stack overflow as paging fault.
-// Windows wants 64K alignment for mmaps.
-const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace)
- + (64 << 10) + (64 << 10) - 1) & ~((64 << 10) - 1);
-
-template<typename Mapping>
-uptr GetThreadTraceImpl(int tid) {
- uptr p = Mapping::kTraceMemBeg + (uptr)tid * kTotalTraceSize;
- DCHECK_LT(p, Mapping::kTraceMemEnd);
- return p;
-}
+struct MemToMetaImpl {
+ template <typename Mapping>
+ static u32 *Apply(uptr x) {
+ DCHECK(IsAppMemImpl::Apply<Mapping>(x));
+ return (u32 *)(((((x) & ~(Mapping::kShadowMsk | (kMetaShadowCell - 1)))) /
+ kMetaShadowCell * kMetaShadowSize) |
+ Mapping::kMetaShadowBeg);
+ }
+};
ALWAYS_INLINE
-uptr GetThreadTrace(int tid) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
- switch (vmaSize) {
- case 39: return GetThreadTraceImpl<Mapping39>(tid);
- case 42: return GetThreadTraceImpl<Mapping42>(tid);
- case 48: return GetThreadTraceImpl<Mapping48>(tid);
- }
- DCHECK(0);
- return 0;
-#elif defined(__powerpc64__)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 44: return GetThreadTraceImpl<Mapping44>(tid);
-#endif
- case 46: return GetThreadTraceImpl<Mapping46>(tid);
- case 47: return GetThreadTraceImpl<Mapping47>(tid);
+u32 *MemToMeta(uptr x) { return SelectMapping<MemToMetaImpl>(x); }
+
+struct ShadowToMemImpl {
+ template <typename Mapping>
+ static uptr Apply(uptr sp) {
+ if (!IsShadowMemImpl::Apply<Mapping>(sp))
+ return 0;
+ // The shadow mapping is non-linear and we've lost some bits, so we don't
+ // have an easy way to restore the original app address. But the mapping is
+ // a bijection, so we try to restore the address as belonging to
+ // low/mid/high range consecutively and see if shadow->app->shadow mapping
+ // gives us the same address.
+ uptr p =
+ ((sp - Mapping::kShadowAdd) / kShadowMultiplier) ^ Mapping::kShadowXor;
+ if (p >= Mapping::kLoAppMemBeg && p < Mapping::kLoAppMemEnd &&
+ MemToShadowImpl::Apply<Mapping>(p) == sp)
+ return p;
+ if (Mapping::kMidAppMemBeg) {
+ uptr p_mid = p + (Mapping::kMidAppMemBeg & Mapping::kShadowMsk);
+ if (p_mid >= Mapping::kMidAppMemBeg && p_mid < Mapping::kMidAppMemEnd &&
+ MemToShadowImpl::Apply<Mapping>(p_mid) == sp)
+ return p_mid;
+ }
+ return p | Mapping::kShadowMsk;
}
- DCHECK(0);
- return 0;
-#elif defined(__mips64)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 40: return GetThreadTraceImpl<Mapping40>(tid);
-#else
- case 47: return GetThreadTraceImpl<Mapping47>(tid);
-#endif
- }
- DCHECK(0);
- return 0;
-#else
- return GetThreadTraceImpl<Mapping>(tid);
-#endif
-}
-
-
-template<typename Mapping>
-uptr GetThreadTraceHeaderImpl(int tid) {
- uptr p = Mapping::kTraceMemBeg + (uptr)tid * kTotalTraceSize
- + kTraceSize * sizeof(Event);
- DCHECK_LT(p, Mapping::kTraceMemEnd);
- return p;
-}
+};
ALWAYS_INLINE
-uptr GetThreadTraceHeader(int tid) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
- switch (vmaSize) {
- case 39: return GetThreadTraceHeaderImpl<Mapping39>(tid);
- case 42: return GetThreadTraceHeaderImpl<Mapping42>(tid);
- case 48: return GetThreadTraceHeaderImpl<Mapping48>(tid);
- }
- DCHECK(0);
- return 0;
-#elif defined(__powerpc64__)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 44: return GetThreadTraceHeaderImpl<Mapping44>(tid);
-#endif
- case 46: return GetThreadTraceHeaderImpl<Mapping46>(tid);
- case 47: return GetThreadTraceHeaderImpl<Mapping47>(tid);
+uptr ShadowToMem(RawShadow *s) {
+ return SelectMapping<ShadowToMemImpl>(reinterpret_cast<uptr>(s));
+}
+
+// Compresses addr to kCompressedAddrBits stored in least significant bits.
+ALWAYS_INLINE uptr CompressAddr(uptr addr) {
+ return addr & ((1ull << kCompressedAddrBits) - 1);
+}
+
+struct RestoreAddrImpl {
+ typedef uptr Result;
+ template <typename Mapping>
+ static Result Apply(uptr addr) {
+ // To restore the address we go over all app memory ranges and check if top
+ // 3 bits of the compressed addr match that of the app range. If yes, we
+ // assume that the compressed address come from that range and restore the
+ // missing top bits to match the app range address.
+ const uptr ranges[] = {
+ Mapping::kLoAppMemBeg, Mapping::kLoAppMemEnd, Mapping::kMidAppMemBeg,
+ Mapping::kMidAppMemEnd, Mapping::kHiAppMemBeg, Mapping::kHiAppMemEnd,
+ Mapping::kHeapMemBeg, Mapping::kHeapMemEnd,
+ };
+ const uptr indicator = 0x0e0000000000ull;
+ const uptr ind_lsb = 1ull << LeastSignificantSetBitIndex(indicator);
+ for (uptr i = 0; i < ARRAY_SIZE(ranges); i += 2) {
+ uptr beg = ranges[i];
+ uptr end = ranges[i + 1];
+ if (beg == end)
+ continue;
+ for (uptr p = beg; p < end; p = RoundDown(p + ind_lsb, ind_lsb)) {
+ if ((addr & indicator) == (p & indicator))
+ return addr | (p & ~(ind_lsb - 1));
+ }
+ }
+ Printf("ThreadSanitizer: failed to restore address 0x%zx\n", addr);
+ Die();
}
- DCHECK(0);
- return 0;
-#elif defined(__mips64)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 40: return GetThreadTraceHeaderImpl<Mapping40>(tid);
-#else
- case 47: return GetThreadTraceHeaderImpl<Mapping47>(tid);
-#endif
- }
- DCHECK(0);
- return 0;
-#else
- return GetThreadTraceHeaderImpl<Mapping>(tid);
-#endif
+};
+
+// Restores compressed addr from kCompressedAddrBits to full representation.
+// This is called only during reporting and is not performance-critical.
+inline uptr RestoreAddr(uptr addr) {
+ return SelectMapping<RestoreAddrImpl>(addr);
}
void InitializePlatform();
void InitializePlatformEarly();
void CheckAndProtect();
void InitializeShadowMemoryPlatform();
-void FlushShadowMemory();
-void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive);
+void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns);
int ExtractResolvFDs(void *state, int *fds, int nfd);
int ExtractRecvmsgFDs(void *msg, int *fds, int nfd);
uptr ExtractLongJmpSp(uptr *env);
lib/tsan/tsan_platform_linux.cpp
@@ -66,7 +66,8 @@ extern "C" void *__libc_stack_end;
void *__libc_stack_end = 0;
#endif
-#if SANITIZER_LINUX && defined(__aarch64__) && !SANITIZER_GO
+#if SANITIZER_LINUX && (defined(__aarch64__) || defined(__loongarch_lp64)) && \
+ !SANITIZER_GO
# define INIT_LONGJMP_XOR_KEY 1
#else
# define INIT_LONGJMP_XOR_KEY 0
@@ -85,78 +86,71 @@ static void InitializeLongjmpXorKey();
static uptr longjmp_xor_key;
#endif
-#ifdef TSAN_RUNTIME_VMA
// Runtime detected VMA size.
uptr vmaSize;
-#endif
enum {
- MemTotal = 0,
- MemShadow = 1,
- MemMeta = 2,
- MemFile = 3,
- MemMmap = 4,
- MemTrace = 5,
- MemHeap = 6,
- MemOther = 7,
- MemCount = 8,
+ MemTotal,
+ MemShadow,
+ MemMeta,
+ MemFile,
+ MemMmap,
+ MemHeap,
+ MemOther,
+ MemCount,
};
-void FillProfileCallback(uptr p, uptr rss, bool file,
- uptr *mem, uptr stats_size) {
+void FillProfileCallback(uptr p, uptr rss, bool file, uptr *mem) {
mem[MemTotal] += rss;
if (p >= ShadowBeg() && p < ShadowEnd())
mem[MemShadow] += rss;
else if (p >= MetaShadowBeg() && p < MetaShadowEnd())
mem[MemMeta] += rss;
-#if !SANITIZER_GO
+ else if ((p >= LoAppMemBeg() && p < LoAppMemEnd()) ||
+ (p >= MidAppMemBeg() && p < MidAppMemEnd()) ||
+ (p >= HiAppMemBeg() && p < HiAppMemEnd()))
+ mem[file ? MemFile : MemMmap] += rss;
else if (p >= HeapMemBeg() && p < HeapMemEnd())
mem[MemHeap] += rss;
- else if (p >= LoAppMemBeg() && p < LoAppMemEnd())
- mem[file ? MemFile : MemMmap] += rss;
- else if (p >= HiAppMemBeg() && p < HiAppMemEnd())
- mem[file ? MemFile : MemMmap] += rss;
-#else
- else if (p >= AppMemBeg() && p < AppMemEnd())
- mem[file ? MemFile : MemMmap] += rss;
-#endif
- else if (p >= TraceMemBeg() && p < TraceMemEnd())
- mem[MemTrace] += rss;
else
mem[MemOther] += rss;
}
-void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
+void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
uptr mem[MemCount];
- internal_memset(mem, 0, sizeof(mem[0]) * MemCount);
- __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
- StackDepotStats *stacks = StackDepotGetStats();
- internal_snprintf(buf, buf_size,
- "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
- " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n",
+ internal_memset(mem, 0, sizeof(mem));
+ GetMemoryProfile(FillProfileCallback, mem);
+ auto meta = ctx->metamap.GetMemoryStats();
+ StackDepotStats stacks = StackDepotGetStats();
+ uptr nthread, nlive;
+ ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive);
+ uptr trace_mem;
+ {
+ Lock l(&ctx->slot_mtx);
+ trace_mem = ctx->trace_part_total_allocated * sizeof(TracePart);
+ }
+ uptr internal_stats[AllocatorStatCount];
+ internal_allocator()->GetStats(internal_stats);
+ // All these are allocated from the common mmap region.
+ mem[MemMmap] -= meta.mem_block + meta.sync_obj + trace_mem +
+ stacks.allocated + internal_stats[AllocatorStatMapped];
+ if (s64(mem[MemMmap]) < 0)
+ mem[MemMmap] = 0;
+ internal_snprintf(
+ buf, buf_size,
+ "==%zu== %llus [%zu]: RSS %zd MB: shadow:%zd meta:%zd file:%zd"
+ " mmap:%zd heap:%zd other:%zd intalloc:%zd memblocks:%zd syncobj:%zu"
+ " trace:%zu stacks=%zd threads=%zu/%zu\n",
+ internal_getpid(), uptime_ns / (1000 * 1000 * 1000), ctx->global_epoch,
mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20,
- mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20,
- mem[MemHeap] >> 20, mem[MemOther] >> 20,
- stacks->allocated >> 20, stacks->n_uniq_ids,
- nlive, nthread);
-}
-
-#if SANITIZER_LINUX
-void FlushShadowMemoryCallback(
- const SuspendedThreadsList &suspended_threads_list,
- void *argument) {
- ReleaseMemoryPagesToOS(ShadowBeg(), ShadowEnd());
-}
-#endif
-
-void FlushShadowMemory() {
-#if SANITIZER_LINUX
- StopTheWorld(FlushShadowMemoryCallback, 0);
-#endif
+ mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemHeap] >> 20,
+ mem[MemOther] >> 20, internal_stats[AllocatorStatMapped] >> 20,
+ meta.mem_block >> 20, meta.sync_obj >> 20, trace_mem >> 20,
+ stacks.allocated >> 20, nlive, nthread);
}
#if !SANITIZER_GO
-// Mark shadow for .rodata sections with the special kShadowRodata marker.
+// Mark shadow for .rodata sections with the special Shadow::kRodata marker.
// Accesses to .rodata can't race, so this saves time, memory and trace space.
static void MapRodata() {
// First create temp file.
@@ -177,13 +171,14 @@ static void MapRodata() {
return;
internal_unlink(name); // Unlink it now, so that we can reuse the buffer.
fd_t fd = openrv;
- // Fill the file with kShadowRodata.
- const uptr kMarkerSize = 512 * 1024 / sizeof(u64);
- InternalMmapVector<u64> marker(kMarkerSize);
+ // Fill the file with Shadow::kRodata.
+ const uptr kMarkerSize = 512 * 1024 / sizeof(RawShadow);
+ InternalMmapVector<RawShadow> marker(kMarkerSize);
// volatile to prevent insertion of memset
- for (volatile u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++)
- *p = kShadowRodata;
- internal_write(fd, marker.data(), marker.size() * sizeof(u64));
+ for (volatile RawShadow *p = marker.data(); p < marker.data() + kMarkerSize;
+ p++)
+ *p = Shadow::kRodata;
+ internal_write(fd, marker.data(), marker.size() * sizeof(RawShadow));
// Map the file into memory.
uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, fd, 0);
@@ -203,9 +198,10 @@ static void MapRodata() {
char *shadow_start = (char *)MemToShadow(segment.start);
char *shadow_end = (char *)MemToShadow(segment.end);
for (char *p = shadow_start; p < shadow_end;
- p += marker.size() * sizeof(u64)) {
- internal_mmap(p, Min<uptr>(marker.size() * sizeof(u64), shadow_end - p),
- PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0);
+ p += marker.size() * sizeof(RawShadow)) {
+ internal_mmap(
+ p, Min<uptr>(marker.size() * sizeof(RawShadow), shadow_end - p),
+ PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0);
}
}
}
@@ -219,7 +215,6 @@ void InitializeShadowMemoryPlatform() {
#endif // #if !SANITIZER_GO
void InitializePlatformEarly() {
-#ifdef TSAN_RUNTIME_VMA
vmaSize =
(MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
#if defined(__aarch64__)
@@ -236,6 +231,14 @@ void InitializePlatformEarly() {
Die();
}
#endif
+#elif SANITIZER_LOONGARCH64
+# if !SANITIZER_GO
+ if (vmaSize != 47) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %zd - Supported 47\n", vmaSize);
+ Die();
+ }
+# endif
#elif defined(__powerpc64__)
# if !SANITIZER_GO
if (vmaSize != 44 && vmaSize != 46 && vmaSize != 47) {
@@ -265,7 +268,6 @@ void InitializePlatformEarly() {
}
# endif
#endif
-#endif
}
void InitializePlatform() {
@@ -297,11 +299,12 @@ void InitializePlatform() {
SetAddressSpaceUnlimited();
reexec = true;
}
-#if SANITIZER_LINUX && defined(__aarch64__)
+#if SANITIZER_ANDROID && (defined(__aarch64__) || defined(__x86_64__))
// After patch "arm64: mm: support ARCH_MMAP_RND_BITS." is introduced in
// linux kernel, the random gap between stack and mapped area is increased
// from 128M to 36G on 39-bit aarch64. As it is almost impossible to cover
// this big range, we should disable randomized virtual space on aarch64.
+ // ASLR personality check.
int old_personality = personality(0xffffffff);
if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) {
VReport(1, "WARNING: Program is run with randomized virtual address "
@@ -310,6 +313,9 @@ void InitializePlatform() {
CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
reexec = true;
}
+
+#endif
+#if SANITIZER_LINUX && (defined(__aarch64__) || defined(__loongarch_lp64))
// Initialize the xor key used in {sig}{set,long}jump.
InitializeLongjmpXorKey();
#endif
@@ -341,7 +347,7 @@ int ExtractResolvFDs(void *state, int *fds, int nfd) {
}
// Extract file descriptors passed via UNIX domain sockets.
-// This is requried to properly handle "open" of these fds.
+// This is required to properly handle "open" of these fds.
// see 'man recvmsg' and 'man 3 cmsg'.
int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) {
int res = 0;
@@ -382,6 +388,8 @@ static uptr UnmangleLongJmpSp(uptr mangled_sp) {
# else
return mangled_sp;
# endif
+#elif defined(__loongarch_lp64)
+ return mangled_sp ^ longjmp_xor_key;
#elif defined(__powerpc64__)
// Reverse of:
// ld r4, -28696(r13)
@@ -409,10 +417,16 @@ static uptr UnmangleLongJmpSp(uptr mangled_sp) {
#elif defined(__powerpc__)
# define LONG_JMP_SP_ENV_SLOT 0
#elif SANITIZER_FREEBSD
-# define LONG_JMP_SP_ENV_SLOT 2
+# ifdef __aarch64__
+# define LONG_JMP_SP_ENV_SLOT 1
+# else
+# define LONG_JMP_SP_ENV_SLOT 2
+# endif
#elif SANITIZER_LINUX
# ifdef __aarch64__
# define LONG_JMP_SP_ENV_SLOT 13
+# elif defined(__loongarch__)
+# define LONG_JMP_SP_ENV_SLOT 1
# elif defined(__mips64)
# define LONG_JMP_SP_ENV_SLOT 1
# elif defined(__s390x__)
@@ -439,7 +453,11 @@ static void InitializeLongjmpXorKey() {
// 2. Retrieve vanilla/mangled SP.
uptr sp;
+#ifdef __loongarch__
+ asm("move %0, $sp" : "=r" (sp));
+#else
asm("mov %0, sp" : "=r" (sp));
+#endif
uptr mangled_sp = ((uptr *)&env)[LONG_JMP_SP_ENV_SLOT];
// 3. xor SPs to obtain key.
@@ -447,6 +465,8 @@ static void InitializeLongjmpXorKey() {
}
#endif
+extern "C" void __tsan_tls_initialization() {}
+
void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
// Check that the thr object is in tls;
const uptr thr_beg = (uptr)thr;
@@ -456,9 +476,10 @@ void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
CHECK_GE(thr_end, tls_addr);
CHECK_LE(thr_end, tls_addr + tls_size);
// Since the thr object is huge, skip it.
- MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, thr_beg - tls_addr);
- MemoryRangeImitateWrite(thr, /*pc=*/2, thr_end,
- tls_addr + tls_size - thr_end);
+ const uptr pc = StackTrace::GetNextInstructionPc(
+ reinterpret_cast<uptr>(__tsan_tls_initialization));
+ MemoryRangeImitateWrite(thr, pc, tls_addr, thr_beg - tls_addr);
+ MemoryRangeImitateWrite(thr, pc, thr_end, tls_addr + tls_size - thr_end);
}
// Note: this function runs with async signals enabled,
lib/tsan/tsan_platform_mac.cpp
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
@@ -25,6 +25,7 @@
#include "tsan_rtl.h"
#include "tsan_flags.h"
+#include <limits.h>
#include <mach/mach.h>
#include <pthread.h>
#include <signal.h>
@@ -45,76 +46,86 @@
namespace __tsan {
#if !SANITIZER_GO
-static void *SignalSafeGetOrAllocate(uptr *dst, uptr size) {
- atomic_uintptr_t *a = (atomic_uintptr_t *)dst;
- void *val = (void *)atomic_load_relaxed(a);
- atomic_signal_fence(memory_order_acquire); // Turns the previous load into
- // acquire wrt signals.
- if (UNLIKELY(val == nullptr)) {
- val = (void *)internal_mmap(nullptr, size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANON, -1, 0);
- CHECK(val);
- void *cmp = nullptr;
- if (!atomic_compare_exchange_strong(a, (uintptr_t *)&cmp, (uintptr_t)val,
- memory_order_acq_rel)) {
- internal_munmap(val, size);
- val = cmp;
- }
- }
- return val;
+static char main_thread_state[sizeof(ThreadState)] ALIGNED(
+ SANITIZER_CACHE_LINE_SIZE);
+static ThreadState *dead_thread_state;
+static pthread_key_t thread_state_key;
+
+// We rely on the following documented, but Darwin-specific behavior to keep the
+// reference to the ThreadState object alive in TLS:
+// pthread_key_create man page:
+// If, after all the destructors have been called for all non-NULL values with
+// associated destructors, there are still some non-NULL values with
+// associated destructors, then the process is repeated. If, after at least
+// [PTHREAD_DESTRUCTOR_ITERATIONS] iterations of destructor calls for
+// outstanding non-NULL values, there are still some non-NULL values with
+// associated destructors, the implementation stops calling destructors.
+static_assert(PTHREAD_DESTRUCTOR_ITERATIONS == 4, "Small number of iterations");
+static void ThreadStateDestructor(void *thr) {
+ int res = pthread_setspecific(thread_state_key, thr);
+ CHECK_EQ(res, 0);
}
-// On OS X, accessing TLVs via __thread or manually by using pthread_key_* is
-// problematic, because there are several places where interceptors are called
-// when TLVs are not accessible (early process startup, thread cleanup, ...).
-// The following provides a "poor man's TLV" implementation, where we use the
-// shadow memory of the pointer returned by pthread_self() to store a pointer to
-// the ThreadState object. The main thread's ThreadState is stored separately
-// in a static variable, because we need to access it even before the
-// shadow memory is set up.
-static uptr main_thread_identity = 0;
-ALIGNED(64) static char main_thread_state[sizeof(ThreadState)];
-static ThreadState *main_thread_state_loc = (ThreadState *)main_thread_state;
-
-// We cannot use pthread_self() before libpthread has been initialized. Our
-// current heuristic for guarding this is checking `main_thread_identity` which
-// is only assigned in `__tsan::InitializePlatform`.
-static ThreadState **cur_thread_location() {
- if (main_thread_identity == 0)
- return &main_thread_state_loc;
- uptr thread_identity = (uptr)pthread_self();
- if (thread_identity == main_thread_identity)
- return &main_thread_state_loc;
- return (ThreadState **)MemToShadow(thread_identity);
+static void InitializeThreadStateStorage() {
+ int res;
+ CHECK_EQ(thread_state_key, 0);
+ res = pthread_key_create(&thread_state_key, ThreadStateDestructor);
+ CHECK_EQ(res, 0);
+ res = pthread_setspecific(thread_state_key, main_thread_state);
+ CHECK_EQ(res, 0);
+
+ auto dts = (ThreadState *)MmapOrDie(sizeof(ThreadState), "ThreadState");
+ dts->fast_state.SetIgnoreBit();
+ dts->ignore_interceptors = 1;
+ dts->is_dead = true;
+ const_cast<Tid &>(dts->tid) = kInvalidTid;
+ res = internal_mprotect(dts, sizeof(ThreadState), PROT_READ); // immutable
+ CHECK_EQ(res, 0);
+ dead_thread_state = dts;
}
ThreadState *cur_thread() {
- return (ThreadState *)SignalSafeGetOrAllocate(
- (uptr *)cur_thread_location(), sizeof(ThreadState));
+ // Some interceptors get called before libpthread has been initialized and in
+ // these cases we must avoid calling any pthread APIs.
+ if (UNLIKELY(!thread_state_key)) {
+ return (ThreadState *)main_thread_state;
+ }
+
+ // We only reach this line after InitializeThreadStateStorage() ran, i.e,
+ // after TSan (and therefore libpthread) have been initialized.
+ ThreadState *thr = (ThreadState *)pthread_getspecific(thread_state_key);
+ if (UNLIKELY(!thr)) {
+ thr = (ThreadState *)MmapOrDie(sizeof(ThreadState), "ThreadState");
+ int res = pthread_setspecific(thread_state_key, thr);
+ CHECK_EQ(res, 0);
+ }
+ return thr;
}
void set_cur_thread(ThreadState *thr) {
- *cur_thread_location() = thr;
+ int res = pthread_setspecific(thread_state_key, thr);
+ CHECK_EQ(res, 0);
}
-// TODO(kuba.brecka): This is not async-signal-safe. In particular, we call
-// munmap first and then clear `fake_tls`; if we receive a signal in between,
-// handler will try to access the unmapped ThreadState.
void cur_thread_finalize() {
- ThreadState **thr_state_loc = cur_thread_location();
- if (thr_state_loc == &main_thread_state_loc) {
+ ThreadState *thr = (ThreadState *)pthread_getspecific(thread_state_key);
+ CHECK(thr);
+ if (thr == (ThreadState *)main_thread_state) {
// Calling dispatch_main() or xpc_main() actually invokes pthread_exit to
// exit the main thread. Let's keep the main thread's ThreadState.
return;
}
- internal_munmap(*thr_state_loc, sizeof(ThreadState));
- *thr_state_loc = nullptr;
+ // Intercepted functions can still get called after cur_thread_finalize()
+ // (called from DestroyThreadState()), so put a fake thread state for "dead"
+ // threads. An alternative solution would be to release the ThreadState
+ // object from THREAD_DESTROY (which is delivered later and on the parent
+ // thread) instead of THREAD_TERMINATE.
+ int res = pthread_setspecific(thread_state_key, dead_thread_state);
+ CHECK_EQ(res, 0);
+ UnmapOrDie(thr, sizeof(ThreadState));
}
#endif
-void FlushShadowMemory() {
-}
-
static void RegionMemUsage(uptr start, uptr end, uptr *res, uptr *dirty) {
vm_address_t address = start;
vm_address_t end_address = end;
@@ -139,15 +150,13 @@ static void RegionMemUsage(uptr start, uptr end, uptr *res, uptr *dirty) {
*dirty = dirty_pages * GetPageSizeCached();
}
-void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
+void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
uptr shadow_res, shadow_dirty;
uptr meta_res, meta_dirty;
- uptr trace_res, trace_dirty;
RegionMemUsage(ShadowBeg(), ShadowEnd(), &shadow_res, &shadow_dirty);
RegionMemUsage(MetaShadowBeg(), MetaShadowEnd(), &meta_res, &meta_dirty);
- RegionMemUsage(TraceMemBeg(), TraceMemEnd(), &trace_res, &trace_dirty);
-#if !SANITIZER_GO
+# if !SANITIZER_GO
uptr low_res, low_dirty;
uptr high_res, high_dirty;
uptr heap_res, heap_dirty;
@@ -156,89 +165,70 @@ void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
RegionMemUsage(HeapMemBeg(), HeapMemEnd(), &heap_res, &heap_dirty);
#else // !SANITIZER_GO
uptr app_res, app_dirty;
- RegionMemUsage(AppMemBeg(), AppMemEnd(), &app_res, &app_dirty);
+ RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &app_res, &app_dirty);
#endif
- StackDepotStats *stacks = StackDepotGetStats();
- internal_snprintf(buf, buf_size,
- "shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
- "meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
- "traces (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
-#if !SANITIZER_GO
- "low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
- "high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
- "heap (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
-#else // !SANITIZER_GO
- "app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
-#endif
- "stacks: %zd unique IDs, %zd kB allocated\n"
- "threads: %zd total, %zd live\n"
- "------------------------------\n",
- ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024,
- MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024,
- TraceMemBeg(), TraceMemEnd(), trace_res / 1024, trace_dirty / 1024,
-#if !SANITIZER_GO
- LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024,
- HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024,
- HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024,
-#else // !SANITIZER_GO
- AppMemBeg(), AppMemEnd(), app_res / 1024, app_dirty / 1024,
-#endif
- stacks->n_uniq_ids, stacks->allocated / 1024,
- nthread, nlive);
+ StackDepotStats stacks = StackDepotGetStats();
+ uptr nthread, nlive;
+ ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive);
+ internal_snprintf(
+ buf, buf_size,
+ "shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+# if !SANITIZER_GO
+ "low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "heap (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+# else // !SANITIZER_GO
+ "app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+# endif
+ "stacks: %zd unique IDs, %zd kB allocated\n"
+ "threads: %zd total, %zd live\n"
+ "------------------------------\n",
+ ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024,
+ MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024,
+# if !SANITIZER_GO
+ LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024,
+ HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024,
+ HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024,
+# else // !SANITIZER_GO
+ LoAppMemBeg(), LoAppMemEnd(), app_res / 1024, app_dirty / 1024,
+# endif
+ stacks.n_uniq_ids, stacks.allocated / 1024, nthread, nlive);
}
-#if !SANITIZER_GO
+# if !SANITIZER_GO
void InitializeShadowMemoryPlatform() { }
-// On OS X, GCD worker threads are created without a call to pthread_create. We
-// need to properly register these threads with ThreadCreate and ThreadStart.
-// These threads don't have a parent thread, as they are created "spuriously".
-// We're using a libpthread API that notifies us about a newly created thread.
-// The `thread == pthread_self()` check indicates this is actually a worker
-// thread. If it's just a regular thread, this hook is called on the parent
-// thread.
-typedef void (*pthread_introspection_hook_t)(unsigned int event,
- pthread_t thread, void *addr,
- size_t size);
-extern "C" pthread_introspection_hook_t pthread_introspection_hook_install(
- pthread_introspection_hook_t hook);
-static const uptr PTHREAD_INTROSPECTION_THREAD_CREATE = 1;
-static const uptr PTHREAD_INTROSPECTION_THREAD_TERMINATE = 3;
-static pthread_introspection_hook_t prev_pthread_introspection_hook;
-static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
- void *addr, size_t size) {
- if (event == PTHREAD_INTROSPECTION_THREAD_CREATE) {
- if (thread == pthread_self()) {
- // The current thread is a newly created GCD worker thread.
- ThreadState *thr = cur_thread();
- Processor *proc = ProcCreate();
- ProcWire(proc, thr);
- ThreadState *parent_thread_state = nullptr; // No parent.
- int tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true);
- CHECK_NE(tid, 0);
- ThreadStart(thr, tid, GetTid(), ThreadType::Worker);
- }
- } else if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) {
- if (thread == pthread_self()) {
- ThreadState *thr = cur_thread();
- if (thr->tctx) {
- DestroyThreadState();
- }
- }
+// Register GCD worker threads, which are created without an observable call to
+// pthread_create().
+static void ThreadCreateCallback(uptr thread, bool gcd_worker) {
+ if (gcd_worker) {
+ ThreadState *thr = cur_thread();
+ Processor *proc = ProcCreate();
+ ProcWire(proc, thr);
+ ThreadState *parent_thread_state = nullptr; // No parent.
+ Tid tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true);
+ CHECK_NE(tid, kMainTid);
+ ThreadStart(thr, tid, GetTid(), ThreadType::Worker);
}
+}
- if (prev_pthread_introspection_hook != nullptr)
- prev_pthread_introspection_hook(event, thread, addr, size);
+// Destroy thread state for *all* threads.
+static void ThreadTerminateCallback(uptr thread) {
+ ThreadState *thr = cur_thread();
+ if (thr->tctx) {
+ DestroyThreadState();
+ }
}
#endif
void InitializePlatformEarly() {
-#if !SANITIZER_GO && !HAS_48_BIT_ADDRESS_SPACE
+# if !SANITIZER_GO && SANITIZER_IOS
uptr max_vm = GetMaxUserVirtualAddress() + 1;
- if (max_vm != Mapping::kHiAppMemEnd) {
+ if (max_vm != HiAppMemEnd()) {
Printf("ThreadSanitizer: unsupported vm address limit %p, expected %p.\n",
- max_vm, Mapping::kHiAppMemEnd);
+ (void *)max_vm, (void *)HiAppMemEnd());
Die();
}
#endif
@@ -251,11 +241,13 @@ void InitializePlatform() {
#if !SANITIZER_GO
CheckAndProtect();
- CHECK_EQ(main_thread_identity, 0);
- main_thread_identity = (uptr)pthread_self();
+ InitializeThreadStateStorage();
- prev_pthread_introspection_hook =
- pthread_introspection_hook_install(&my_pthread_introspection_hook);
+ ThreadEventCallbacks callbacks = {
+ .create = ThreadCreateCallback,
+ .terminate = ThreadTerminateCallback,
+ };
+ InstallPthreadIntrospectionHook(callbacks);
#endif
if (GetMacosAlignedVersion() >= MacosVersion(10, 14)) {
@@ -281,25 +273,14 @@ uptr ExtractLongJmpSp(uptr *env) {
}
#if !SANITIZER_GO
+extern "C" void __tsan_tls_initialization() {}
+
void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
- // The pointer to the ThreadState object is stored in the shadow memory
- // of the tls.
- uptr tls_end = tls_addr + tls_size;
- uptr thread_identity = (uptr)pthread_self();
- if (thread_identity == main_thread_identity) {
- MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, tls_size);
- } else {
- uptr thr_state_start = thread_identity;
- uptr thr_state_end = thr_state_start + sizeof(uptr);
- CHECK_GE(thr_state_start, tls_addr);
- CHECK_LE(thr_state_start, tls_addr + tls_size);
- CHECK_GE(thr_state_end, tls_addr);
- CHECK_LE(thr_state_end, tls_addr + tls_size);
- MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr,
- thr_state_start - tls_addr);
- MemoryRangeImitateWrite(thr, /*pc=*/2, thr_state_end,
- tls_end - thr_state_end);
- }
+ const uptr pc = StackTrace::GetNextInstructionPc(
+ reinterpret_cast<uptr>(__tsan_tls_initialization));
+ // Unlike Linux, we only store a pointer to the ThreadState object in TLS;
+ // just mark the entire range as written to.
+ MemoryRangeImitateWrite(thr, pc, tls_addr, tls_size);
}
#endif
@@ -320,4 +301,4 @@ int call_pthread_cancel_with_cleanup(int (*fn)(void *arg),
} // namespace __tsan
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
lib/tsan/tsan_platform_posix.cpp
@@ -14,12 +14,14 @@
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_POSIX
-#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_errno.h"
-#include "sanitizer_common/sanitizer_libc.h"
-#include "sanitizer_common/sanitizer_procmaps.h"
-#include "tsan_platform.h"
-#include "tsan_rtl.h"
+# include <dlfcn.h>
+
+# include "sanitizer_common/sanitizer_common.h"
+# include "sanitizer_common/sanitizer_errno.h"
+# include "sanitizer_common/sanitizer_libc.h"
+# include "sanitizer_common/sanitizer_procmaps.h"
+# include "tsan_platform.h"
+# include "tsan_rtl.h"
namespace __tsan {
@@ -29,7 +31,8 @@ static const char kShadowMemoryMappingHint[] =
"HINT: if %s is not supported in your environment, you may set "
"TSAN_OPTIONS=%s=0\n";
-static void DontDumpShadow(uptr addr, uptr size) {
+# if !SANITIZER_GO
+void DontDumpShadow(uptr addr, uptr size) {
if (common_flags()->use_madv_dontdump)
if (!DontDumpShadowMemory(addr, size)) {
Printf(kShadowMemoryMappingWarning, SanitizerToolName, addr, addr + size,
@@ -39,7 +42,6 @@ static void DontDumpShadow(uptr addr, uptr size) {
}
}
-#if !SANITIZER_GO
void InitializeShadowMemory() {
// Map memory shadow.
if (!MmapFixedSuperNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(),
@@ -70,6 +72,11 @@ void InitializeShadowMemory() {
meta, meta + meta_size, meta_size >> 30);
InitializeShadowMemoryPlatform();
+
+ on_initialize = reinterpret_cast<void (*)(void)>(
+ dlsym(RTLD_DEFAULT, "__tsan_on_initialize"));
+ on_finalize =
+ reinterpret_cast<int (*)(int)>(dlsym(RTLD_DEFAULT, "__tsan_on_finalize"));
}
static bool TryProtectRange(uptr beg, uptr end) {
@@ -98,32 +105,28 @@ void CheckAndProtect() {
continue;
if (segment.start >= VdsoBeg()) // vdso
break;
- Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n",
+ Printf("FATAL: ThreadSanitizer: unexpected memory mapping 0x%zx-0x%zx\n",
segment.start, segment.end);
Die();
}
-#if defined(__aarch64__) && defined(__APPLE__) && !HAS_48_BIT_ADDRESS_SPACE
+# if SANITIZER_IOS && !SANITIZER_IOSSIM
ProtectRange(HeapMemEnd(), ShadowBeg());
ProtectRange(ShadowEnd(), MetaShadowBeg());
- ProtectRange(MetaShadowEnd(), TraceMemBeg());
-#else
+ ProtectRange(MetaShadowEnd(), HiAppMemBeg());
+# else
ProtectRange(LoAppMemEnd(), ShadowBeg());
ProtectRange(ShadowEnd(), MetaShadowBeg());
-#ifdef TSAN_MID_APP_RANGE
- ProtectRange(MetaShadowEnd(), MidAppMemBeg());
- ProtectRange(MidAppMemEnd(), TraceMemBeg());
-#else
- ProtectRange(MetaShadowEnd(), TraceMemBeg());
-#endif
- // Memory for traces is mapped lazily in MapThreadTrace.
- // Protect the whole range for now, so that user does not map something here.
- ProtectRange(TraceMemBeg(), TraceMemEnd());
- ProtectRange(TraceMemEnd(), HeapMemBeg());
+ if (MidAppMemBeg()) {
+ ProtectRange(MetaShadowEnd(), MidAppMemBeg());
+ ProtectRange(MidAppMemEnd(), HeapMemBeg());
+ } else {
+ ProtectRange(MetaShadowEnd(), HeapMemBeg());
+ }
ProtectRange(HeapEnd(), HiAppMemBeg());
-#endif
+# endif
-#if defined(__s390x__)
+# if defined(__s390x__)
// Protect the rest of the address space.
const uptr user_addr_max_l4 = 0x0020000000000000ull;
const uptr user_addr_max_l5 = 0xfffffffffffff000ull;
lib/tsan/tsan_platform_windows.cpp
@@ -0,0 +1,33 @@
+//===-- tsan_platform_windows.cpp -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Windows-specific code.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+
+#include "tsan_platform.h"
+
+#include <stdlib.h>
+
+namespace __tsan {
+
+void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {}
+
+void InitializePlatformEarly() {
+}
+
+void InitializePlatform() {
+}
+
+} // namespace __tsan
+
+#endif // SANITIZER_WINDOWS
lib/tsan/tsan_report.cpp
@@ -19,22 +19,6 @@
namespace __tsan {
-ReportStack::ReportStack() : frames(nullptr), suppressable(false) {}
-
-ReportStack *ReportStack::New() {
- void *mem = internal_alloc(MBlockReportStack, sizeof(ReportStack));
- return new(mem) ReportStack();
-}
-
-ReportLocation::ReportLocation(ReportLocationType type)
- : type(type), global(), heap_chunk_start(0), heap_chunk_size(0), tid(0),
- fd(0), suppressable(false), stack(nullptr) {}
-
-ReportLocation *ReportLocation::New(ReportLocationType type) {
- void *mem = internal_alloc(MBlockReportStack, sizeof(ReportLocation));
- return new(mem) ReportLocation(type);
-}
-
class Decorator: public __sanitizer::SanitizerCommonDecorator {
public:
Decorator() : SanitizerCommonDecorator() { }
@@ -68,7 +52,7 @@ ReportDesc::~ReportDesc() {
#if !SANITIZER_GO
const int kThreadBufSize = 32;
-const char *thread_name(char *buf, int tid) {
+const char *thread_name(char *buf, Tid tid) {
if (tid == kMainTid)
return "main thread";
internal_snprintf(buf, kThreadBufSize, "thread T%d", tid);
@@ -114,12 +98,6 @@ static const char *ReportTypeString(ReportType typ, uptr tag) {
UNREACHABLE("missing case");
}
-#if SANITIZER_MAC
-static const char *const kInterposedFunctionPrefix = "wrap_";
-#else
-static const char *const kInterposedFunctionPrefix = "__interceptor_";
-#endif
-
void PrintStack(const ReportStack *ent) {
if (ent == 0 || ent->frames == 0) {
Printf(" [failed to restore the stack]\n\n");
@@ -131,7 +109,7 @@ void PrintStack(const ReportStack *ent) {
RenderFrame(&res, common_flags()->stack_trace_format, i,
frame->info.address, &frame->info,
common_flags()->symbolize_vs_style,
- common_flags()->strip_path_prefix, kInterposedFunctionPrefix);
+ common_flags()->strip_path_prefix);
Printf("%s\n", res.data());
}
Printf("\n");
@@ -142,7 +120,7 @@ static void PrintMutexSet(Vector<ReportMopMutex> const& mset) {
if (i == 0)
Printf(" (mutexes:");
const ReportMopMutex m = mset[i];
- Printf(" %s M%llu", m.write ? "write" : "read", m.id);
+ Printf(" %s M%u", m.write ? "write" : "read", m.id);
Printf(i == mset.Size() - 1 ? ")" : ",");
}
}
@@ -189,23 +167,25 @@ static void PrintLocation(const ReportLocation *loc) {
if (loc->type == ReportLocationGlobal) {
const DataInfo &global = loc->global;
if (global.size != 0)
- Printf(" Location is global '%s' of size %zu at %p (%s+%p)\n\n",
- global.name, global.size, global.start,
+ Printf(" Location is global '%s' of size %zu at %p (%s+0x%zx)\n\n",
+ global.name, global.size, reinterpret_cast<void *>(global.start),
StripModuleName(global.module), global.module_offset);
else
- Printf(" Location is global '%s' at %p (%s+%p)\n\n", global.name,
- global.start, StripModuleName(global.module),
- global.module_offset);
+ Printf(" Location is global '%s' at %p (%s+0x%zx)\n\n", global.name,
+ reinterpret_cast<void *>(global.start),
+ StripModuleName(global.module), global.module_offset);
} else if (loc->type == ReportLocationHeap) {
char thrbuf[kThreadBufSize];
const char *object_type = GetObjectTypeFromTag(loc->external_tag);
if (!object_type) {
Printf(" Location is heap block of size %zu at %p allocated by %s:\n",
- loc->heap_chunk_size, loc->heap_chunk_start,
+ loc->heap_chunk_size,
+ reinterpret_cast<void *>(loc->heap_chunk_start),
thread_name(thrbuf, loc->tid));
} else {
Printf(" Location is %s of size %zu at %p allocated by %s:\n",
- object_type, loc->heap_chunk_size, loc->heap_chunk_start,
+ object_type, loc->heap_chunk_size,
+ reinterpret_cast<void *>(loc->heap_chunk_start),
thread_name(thrbuf, loc->tid));
}
print_stack = true;
@@ -214,8 +194,9 @@ static void PrintLocation(const ReportLocation *loc) {
} else if (loc->type == ReportLocationTLS) {
Printf(" Location is TLS of %s.\n\n", thread_name(thrbuf, loc->tid));
} else if (loc->type == ReportLocationFD) {
- Printf(" Location is file descriptor %d created by %s at:\n",
- loc->fd, thread_name(thrbuf, loc->tid));
+ Printf(" Location is file descriptor %d %s by %s at:\n", loc->fd,
+ loc->fd_closed ? "destroyed" : "created",
+ thread_name(thrbuf, loc->tid));
print_stack = true;
}
Printf("%s", d.Default());
@@ -225,27 +206,23 @@ static void PrintLocation(const ReportLocation *loc) {
static void PrintMutexShort(const ReportMutex *rm, const char *after) {
Decorator d;
- Printf("%sM%zd%s%s", d.Mutex(), rm->id, d.Default(), after);
+ Printf("%sM%d%s%s", d.Mutex(), rm->id, d.Default(), after);
}
static void PrintMutexShortWithAddress(const ReportMutex *rm,
const char *after) {
Decorator d;
- Printf("%sM%zd (%p)%s%s", d.Mutex(), rm->id, rm->addr, d.Default(), after);
+ Printf("%sM%d (%p)%s%s", d.Mutex(), rm->id,
+ reinterpret_cast<void *>(rm->addr), d.Default(), after);
}
static void PrintMutex(const ReportMutex *rm) {
Decorator d;
- if (rm->destroyed) {
- Printf("%s", d.Mutex());
- Printf(" Mutex M%llu is already destroyed.\n\n", rm->id);
- Printf("%s", d.Default());
- } else {
- Printf("%s", d.Mutex());
- Printf(" Mutex M%llu (%p) created at:\n", rm->id, rm->addr);
- Printf("%s", d.Default());
- PrintStack(rm->stack);
- }
+ Printf("%s", d.Mutex());
+ Printf(" Mutex M%u (%p) created at:\n", rm->id,
+ reinterpret_cast<void *>(rm->addr));
+ Printf("%s", d.Default());
+ PrintStack(rm->stack);
}
static void PrintThread(const ReportThread *rt) {
@@ -259,12 +236,13 @@ static void PrintThread(const ReportThread *rt) {
char thrbuf[kThreadBufSize];
const char *thread_status = rt->running ? "running" : "finished";
if (rt->thread_type == ThreadType::Worker) {
- Printf(" (tid=%zu, %s) is a GCD worker thread\n", rt->os_id, thread_status);
+ Printf(" (tid=%llu, %s) is a GCD worker thread\n", rt->os_id,
+ thread_status);
Printf("\n");
Printf("%s", d.Default());
return;
}
- Printf(" (tid=%zu, %s) created by %s", rt->os_id, thread_status,
+ Printf(" (tid=%llu, %s) created by %s", rt->os_id, thread_status,
thread_name(thrbuf, rt->parent_tid));
if (rt->stack)
Printf(" at:");
@@ -300,6 +278,7 @@ static bool FrameIsInternal(const SymbolizedStack *frame) {
const char *module = frame->info.module;
if (file != 0 &&
(internal_strstr(file, "tsan_interceptors_posix.cpp") ||
+ internal_strstr(file, "tsan_interceptors_memintrinsics.cpp") ||
internal_strstr(file, "sanitizer_common_interceptors.inc") ||
internal_strstr(file, "tsan_interface_")))
return true;
@@ -323,6 +302,9 @@ void PrintReport(const ReportDesc *rep) {
(int)internal_getpid());
Printf("%s", d.Default());
+ if (rep->typ == ReportTypeErrnoInSignal)
+ Printf(" Signal %u handler invoked at:\n", rep->signum);
+
if (rep->typ == ReportTypeDeadlock) {
char thrbuf[kThreadBufSize];
Printf(" Cycle in lock order graph: ");
@@ -394,7 +376,7 @@ void PrintReport(const ReportDesc *rep) {
#else // #if !SANITIZER_GO
-const u32 kMainGoroutineId = 1;
+const Tid kMainGoroutineId = 1;
void PrintStack(const ReportStack *ent) {
if (ent == 0 || ent->frames == 0) {
@@ -405,16 +387,17 @@ void PrintStack(const ReportStack *ent) {
for (int i = 0; frame; frame = frame->next, i++) {
const AddressInfo &info = frame->info;
Printf(" %s()\n %s:%d +0x%zx\n", info.function,
- StripPathPrefix(info.file, common_flags()->strip_path_prefix),
- info.line, (void *)info.module_offset);
+ StripPathPrefix(info.file, common_flags()->strip_path_prefix),
+ info.line, info.module_offset);
}
}
static void PrintMop(const ReportMop *mop, bool first) {
Printf("\n");
Printf("%s at %p by ",
- (first ? (mop->write ? "Write" : "Read")
- : (mop->write ? "Previous write" : "Previous read")), mop->addr);
+ (first ? (mop->write ? "Write" : "Read")
+ : (mop->write ? "Previous write" : "Previous read")),
+ reinterpret_cast<void *>(mop->addr));
if (mop->tid == kMainGoroutineId)
Printf("main goroutine:\n");
else
@@ -426,8 +409,8 @@ static void PrintLocation(const ReportLocation *loc) {
switch (loc->type) {
case ReportLocationHeap: {
Printf("\n");
- Printf("Heap block of size %zu at %p allocated by ",
- loc->heap_chunk_size, loc->heap_chunk_start);
+ Printf("Heap block of size %zu at %p allocated by ", loc->heap_chunk_size,
+ reinterpret_cast<void *>(loc->heap_chunk_start));
if (loc->tid == kMainGoroutineId)
Printf("main goroutine:\n");
else
@@ -438,8 +421,9 @@ static void PrintLocation(const ReportLocation *loc) {
case ReportLocationGlobal: {
Printf("\n");
Printf("Global var %s of size %zu at %p declared at %s:%zu\n",
- loc->global.name, loc->global.size, loc->global.start,
- loc->global.file, loc->global.line);
+ loc->global.name, loc->global.size,
+ reinterpret_cast<void *>(loc->global.start), loc->global.file,
+ loc->global.line);
break;
}
default:
@@ -469,13 +453,13 @@ void PrintReport(const ReportDesc *rep) {
} else if (rep->typ == ReportTypeDeadlock) {
Printf("WARNING: DEADLOCK\n");
for (uptr i = 0; i < rep->mutexes.Size(); i++) {
- Printf("Goroutine %d lock mutex %d while holding mutex %d:\n",
- 999, rep->mutexes[i]->id,
- rep->mutexes[(i+1) % rep->mutexes.Size()]->id);
+ Printf("Goroutine %d lock mutex %u while holding mutex %u:\n", 999,
+ rep->mutexes[i]->id,
+ rep->mutexes[(i + 1) % rep->mutexes.Size()]->id);
PrintStack(rep->stacks[2*i]);
Printf("\n");
- Printf("Mutex %d was previously locked here:\n",
- rep->mutexes[(i+1) % rep->mutexes.Size()]->id);
+ Printf("Mutex %u was previously locked here:\n",
+ rep->mutexes[(i + 1) % rep->mutexes.Size()]->id);
PrintStack(rep->stacks[2*i + 1]);
Printf("\n");
}
lib/tsan/tsan_report.h
@@ -38,16 +38,12 @@ enum ReportType {
};
struct ReportStack {
- SymbolizedStack *frames;
- bool suppressable;
- static ReportStack *New();
-
- private:
- ReportStack();
+ SymbolizedStack *frames = nullptr;
+ bool suppressable = false;
};
struct ReportMopMutex {
- u64 id;
+ int id;
bool write;
};
@@ -73,35 +69,31 @@ enum ReportLocationType {
};
struct ReportLocation {
- ReportLocationType type;
- DataInfo global;
- uptr heap_chunk_start;
- uptr heap_chunk_size;
- uptr external_tag;
- int tid;
- int fd;
- bool suppressable;
- ReportStack *stack;
-
- static ReportLocation *New(ReportLocationType type);
- private:
- explicit ReportLocation(ReportLocationType type);
+ ReportLocationType type = ReportLocationGlobal;
+ DataInfo global = {};
+ uptr heap_chunk_start = 0;
+ uptr heap_chunk_size = 0;
+ uptr external_tag = 0;
+ Tid tid = kInvalidTid;
+ int fd = 0;
+ bool fd_closed = false;
+ bool suppressable = false;
+ ReportStack *stack = nullptr;
};
struct ReportThread {
- int id;
+ Tid id;
tid_t os_id;
bool running;
ThreadType thread_type;
char *name;
- u32 parent_tid;
+ Tid parent_tid;
ReportStack *stack;
};
struct ReportMutex {
- u64 id;
+ int id;
uptr addr;
- bool destroyed;
ReportStack *stack;
};
@@ -114,9 +106,10 @@ class ReportDesc {
Vector<ReportLocation*> locs;
Vector<ReportMutex*> mutexes;
Vector<ReportThread*> threads;
- Vector<int> unique_tids;
+ Vector<Tid> unique_tids;
ReportStack *sleep;
int count;
+ int signum = 0;
ReportDesc();
~ReportDesc();
lib/tsan/tsan_rtl.cpp
@@ -16,6 +16,7 @@
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_file.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
@@ -28,29 +29,28 @@
#include "tsan_symbolize.h"
#include "ubsan/ubsan_init.h"
-#ifdef __SSE3__
-// <emmintrin.h> transitively includes <stdlib.h>,
-// and it's prohibited to include std headers into tsan runtime.
-// So we do this dirty trick.
-#define _MM_MALLOC_H_INCLUDED
-#define __MM_MALLOC_H
-#include <emmintrin.h>
-typedef __m128i m128;
-#endif
-
volatile int __tsan_resumed = 0;
extern "C" void __tsan_resume() {
__tsan_resumed = 1;
}
+SANITIZER_WEAK_DEFAULT_IMPL
+void __tsan_test_only_on_fork() {}
+
namespace __tsan {
-#if !SANITIZER_GO && !SANITIZER_MAC
+#if !SANITIZER_GO
+void (*on_initialize)(void);
+int (*on_finalize)(int);
+#endif
+
+#if !SANITIZER_GO && !SANITIZER_APPLE
__attribute__((tls_model("initial-exec")))
-THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
+THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(
+ SANITIZER_CACHE_LINE_SIZE);
#endif
-static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
+static char ctx_placeholder[sizeof(Context)] ALIGNED(SANITIZER_CACHE_LINE_SIZE);
Context *ctx;
// Can be overriden by a front-end.
@@ -58,113 +58,404 @@ Context *ctx;
bool OnFinalize(bool failed);
void OnInitialize();
#else
-#include <dlfcn.h>
SANITIZER_WEAK_CXX_DEFAULT_IMPL
bool OnFinalize(bool failed) {
-#if !SANITIZER_GO
- if (auto *ptr = dlsym(RTLD_DEFAULT, "__tsan_on_finalize"))
- return reinterpret_cast<decltype(&__tsan_on_finalize)>(ptr)(failed);
-#endif
+# if !SANITIZER_GO
+ if (on_finalize)
+ return on_finalize(failed);
+# endif
return failed;
}
+
SANITIZER_WEAK_CXX_DEFAULT_IMPL
void OnInitialize() {
-#if !SANITIZER_GO
- if (auto *ptr = dlsym(RTLD_DEFAULT, "__tsan_on_initialize")) {
- return reinterpret_cast<decltype(&__tsan_on_initialize)>(ptr)();
- }
+# if !SANITIZER_GO
+ if (on_initialize)
+ on_initialize();
+# endif
+}
#endif
+
+static TracePart* TracePartAlloc(ThreadState* thr) {
+ TracePart* part = nullptr;
+ {
+ Lock lock(&ctx->slot_mtx);
+ uptr max_parts = Trace::kMinParts + flags()->history_size;
+ Trace* trace = &thr->tctx->trace;
+ if (trace->parts_allocated == max_parts ||
+ ctx->trace_part_finished_excess) {
+ part = ctx->trace_part_recycle.PopFront();
+ DPrintf("#%d: TracePartAlloc: part=%p\n", thr->tid, part);
+ if (part && part->trace) {
+ Trace* trace1 = part->trace;
+ Lock trace_lock(&trace1->mtx);
+ part->trace = nullptr;
+ TracePart* part1 = trace1->parts.PopFront();
+ CHECK_EQ(part, part1);
+ if (trace1->parts_allocated > trace1->parts.Size()) {
+ ctx->trace_part_finished_excess +=
+ trace1->parts_allocated - trace1->parts.Size();
+ trace1->parts_allocated = trace1->parts.Size();
+ }
+ }
+ }
+ if (trace->parts_allocated < max_parts) {
+ trace->parts_allocated++;
+ if (ctx->trace_part_finished_excess)
+ ctx->trace_part_finished_excess--;
+ }
+ if (!part)
+ ctx->trace_part_total_allocated++;
+ else if (ctx->trace_part_recycle_finished)
+ ctx->trace_part_recycle_finished--;
+ }
+ if (!part)
+ part = new (MmapOrDie(sizeof(*part), "TracePart")) TracePart();
+ return part;
+}
+
+static void TracePartFree(TracePart* part) SANITIZER_REQUIRES(ctx->slot_mtx) {
+ DCHECK(part->trace);
+ part->trace = nullptr;
+ ctx->trace_part_recycle.PushFront(part);
+}
+
+void TraceResetForTesting() {
+ Lock lock(&ctx->slot_mtx);
+ while (auto* part = ctx->trace_part_recycle.PopFront()) {
+ if (auto trace = part->trace)
+ CHECK_EQ(trace->parts.PopFront(), part);
+ UnmapOrDie(part, sizeof(*part));
+ }
+ ctx->trace_part_total_allocated = 0;
+ ctx->trace_part_recycle_finished = 0;
+ ctx->trace_part_finished_excess = 0;
}
+
+static void DoResetImpl(uptr epoch) {
+ ThreadRegistryLock lock0(&ctx->thread_registry);
+ Lock lock1(&ctx->slot_mtx);
+ CHECK_EQ(ctx->global_epoch, epoch);
+ ctx->global_epoch++;
+ CHECK(!ctx->resetting);
+ ctx->resetting = true;
+ for (u32 i = ctx->thread_registry.NumThreadsLocked(); i--;) {
+ ThreadContext* tctx = (ThreadContext*)ctx->thread_registry.GetThreadLocked(
+ static_cast<Tid>(i));
+ // Potentially we could purge all ThreadStatusDead threads from the
+ // registry. Since we reset all shadow, they can't race with anything
+ // anymore. However, their tid's can still be stored in some aux places
+ // (e.g. tid of thread that created something).
+ auto trace = &tctx->trace;
+ Lock lock(&trace->mtx);
+ bool attached = tctx->thr && tctx->thr->slot;
+ auto parts = &trace->parts;
+ bool local = false;
+ while (!parts->Empty()) {
+ auto part = parts->Front();
+ local = local || part == trace->local_head;
+ if (local)
+ CHECK(!ctx->trace_part_recycle.Queued(part));
+ else
+ ctx->trace_part_recycle.Remove(part);
+ if (attached && parts->Size() == 1) {
+ // The thread is running and this is the last/current part.
+ // Set the trace position to the end of the current part
+ // to force the thread to call SwitchTracePart and re-attach
+ // to a new slot and allocate a new trace part.
+ // Note: the thread is concurrently modifying the position as well,
+ // so this is only best-effort. The thread can only modify position
+ // within this part, because switching parts is protected by
+ // slot/trace mutexes that we hold here.
+ atomic_store_relaxed(
+ &tctx->thr->trace_pos,
+ reinterpret_cast<uptr>(&part->events[TracePart::kSize]));
+ break;
+ }
+ parts->Remove(part);
+ TracePartFree(part);
+ }
+ CHECK_LE(parts->Size(), 1);
+ trace->local_head = parts->Front();
+ if (tctx->thr && !tctx->thr->slot) {
+ atomic_store_relaxed(&tctx->thr->trace_pos, 0);
+ tctx->thr->trace_prev_pc = 0;
+ }
+ if (trace->parts_allocated > trace->parts.Size()) {
+ ctx->trace_part_finished_excess +=
+ trace->parts_allocated - trace->parts.Size();
+ trace->parts_allocated = trace->parts.Size();
+ }
+ }
+ while (ctx->slot_queue.PopFront()) {
+ }
+ for (auto& slot : ctx->slots) {
+ slot.SetEpoch(kEpochZero);
+ slot.journal.Reset();
+ slot.thr = nullptr;
+ ctx->slot_queue.PushBack(&slot);
+ }
+
+ DPrintf("Resetting shadow...\n");
+ auto shadow_begin = ShadowBeg();
+ auto shadow_end = ShadowEnd();
+#if SANITIZER_GO
+ CHECK_NE(0, ctx->mapped_shadow_begin);
+ shadow_begin = ctx->mapped_shadow_begin;
+ shadow_end = ctx->mapped_shadow_end;
+ VPrintf(2, "shadow_begin-shadow_end: (0x%zx-0x%zx)\n",
+ shadow_begin, shadow_end);
#endif
-static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)];
-
-static ThreadContextBase *CreateThreadContext(u32 tid) {
- // Map thread trace when context is created.
- char name[50];
- internal_snprintf(name, sizeof(name), "trace %u", tid);
- MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name);
- const uptr hdr = GetThreadTraceHeader(tid);
- internal_snprintf(name, sizeof(name), "trace header %u", tid);
- MapThreadTrace(hdr, sizeof(Trace), name);
- new((void*)hdr) Trace();
- // We are going to use only a small part of the trace with the default
- // value of history_size. However, the constructor writes to the whole trace.
- // Release the unused part.
- uptr hdr_end = hdr + sizeof(Trace);
- hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts());
- hdr_end = RoundUp(hdr_end, GetPageSizeCached());
- if (hdr_end < hdr + sizeof(Trace)) {
- ReleaseMemoryPagesToOS(hdr_end, hdr + sizeof(Trace));
- uptr unused = hdr + sizeof(Trace) - hdr_end;
- if (hdr_end != (uptr)MmapFixedNoAccess(hdr_end, unused)) {
- Report("ThreadSanitizer: failed to mprotect(%p, %p)\n",
- hdr_end, unused);
- CHECK("unable to mprotect" && 0);
+#if SANITIZER_WINDOWS
+ auto resetFailed =
+ !ZeroMmapFixedRegion(shadow_begin, shadow_end - shadow_begin);
+#else
+ auto resetFailed =
+ !MmapFixedSuperNoReserve(shadow_begin, shadow_end-shadow_begin, "shadow");
+# if !SANITIZER_GO
+ DontDumpShadow(shadow_begin, shadow_end - shadow_begin);
+# endif
+#endif
+ if (resetFailed) {
+ Printf("failed to reset shadow memory\n");
+ Die();
+ }
+ DPrintf("Resetting meta shadow...\n");
+ ctx->metamap.ResetClocks();
+ StoreShadow(&ctx->last_spurious_race, Shadow::kEmpty);
+ ctx->resetting = false;
+}
+
+// Clang does not understand locking all slots in the loop:
+// error: expecting mutex 'slot.mtx' to be held at start of each loop
+void DoReset(ThreadState* thr, uptr epoch) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ for (auto& slot : ctx->slots) {
+ slot.mtx.Lock();
+ if (UNLIKELY(epoch == 0))
+ epoch = ctx->global_epoch;
+ if (UNLIKELY(epoch != ctx->global_epoch)) {
+ // Epoch can't change once we've locked the first slot.
+ CHECK_EQ(slot.sid, 0);
+ slot.mtx.Unlock();
+ return;
+ }
+ }
+ DPrintf("#%d: DoReset epoch=%lu\n", thr ? thr->tid : -1, epoch);
+ DoResetImpl(epoch);
+ for (auto& slot : ctx->slots) slot.mtx.Unlock();
+}
+
+void FlushShadowMemory() { DoReset(nullptr, 0); }
+
+static TidSlot* FindSlotAndLock(ThreadState* thr)
+ SANITIZER_ACQUIRE(thr->slot->mtx) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ CHECK(!thr->slot);
+ TidSlot* slot = nullptr;
+ for (;;) {
+ uptr epoch;
+ {
+ Lock lock(&ctx->slot_mtx);
+ epoch = ctx->global_epoch;
+ if (slot) {
+ // This is an exhausted slot from the previous iteration.
+ if (ctx->slot_queue.Queued(slot))
+ ctx->slot_queue.Remove(slot);
+ thr->slot_locked = false;
+ slot->mtx.Unlock();
+ }
+ for (;;) {
+ slot = ctx->slot_queue.PopFront();
+ if (!slot)
+ break;
+ if (slot->epoch() != kEpochLast) {
+ ctx->slot_queue.PushBack(slot);
+ break;
+ }
+ }
+ }
+ if (!slot) {
+ DoReset(thr, epoch);
+ continue;
}
+ slot->mtx.Lock();
+ CHECK(!thr->slot_locked);
+ thr->slot_locked = true;
+ if (slot->thr) {
+ DPrintf("#%d: preempting sid=%d tid=%d\n", thr->tid, (u32)slot->sid,
+ slot->thr->tid);
+ slot->SetEpoch(slot->thr->fast_state.epoch());
+ slot->thr = nullptr;
+ }
+ if (slot->epoch() != kEpochLast)
+ return slot;
}
- void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
- return new(mem) ThreadContext(tid);
}
+void SlotAttachAndLock(ThreadState* thr) {
+ TidSlot* slot = FindSlotAndLock(thr);
+ DPrintf("#%d: SlotAttach: slot=%u\n", thr->tid, static_cast<int>(slot->sid));
+ CHECK(!slot->thr);
+ CHECK(!thr->slot);
+ slot->thr = thr;
+ thr->slot = slot;
+ Epoch epoch = EpochInc(slot->epoch());
+ CHECK(!EpochOverflow(epoch));
+ slot->SetEpoch(epoch);
+ thr->fast_state.SetSid(slot->sid);
+ thr->fast_state.SetEpoch(epoch);
+ if (thr->slot_epoch != ctx->global_epoch) {
+ thr->slot_epoch = ctx->global_epoch;
+ thr->clock.Reset();
#if !SANITIZER_GO
-static const u32 kThreadQuarantineSize = 16;
-#else
-static const u32 kThreadQuarantineSize = 64;
+ thr->last_sleep_stack_id = kInvalidStackID;
+ thr->last_sleep_clock.Reset();
+#endif
+ }
+ thr->clock.Set(slot->sid, epoch);
+ slot->journal.PushBack({thr->tid, epoch});
+}
+
+static void SlotDetachImpl(ThreadState* thr, bool exiting) {
+ TidSlot* slot = thr->slot;
+ thr->slot = nullptr;
+ if (thr != slot->thr) {
+ slot = nullptr; // we don't own the slot anymore
+ if (thr->slot_epoch != ctx->global_epoch) {
+ TracePart* part = nullptr;
+ auto* trace = &thr->tctx->trace;
+ {
+ Lock l(&trace->mtx);
+ auto* parts = &trace->parts;
+ // The trace can be completely empty in an unlikely event
+ // the thread is preempted right after it acquired the slot
+ // in ThreadStart and did not trace any events yet.
+ CHECK_LE(parts->Size(), 1);
+ part = parts->PopFront();
+ thr->tctx->trace.local_head = nullptr;
+ atomic_store_relaxed(&thr->trace_pos, 0);
+ thr->trace_prev_pc = 0;
+ }
+ if (part) {
+ Lock l(&ctx->slot_mtx);
+ TracePartFree(part);
+ }
+ }
+ return;
+ }
+ CHECK(exiting || thr->fast_state.epoch() == kEpochLast);
+ slot->SetEpoch(thr->fast_state.epoch());
+ slot->thr = nullptr;
+}
+
+void SlotDetach(ThreadState* thr) {
+ Lock lock(&thr->slot->mtx);
+ SlotDetachImpl(thr, true);
+}
+
+void SlotLock(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ DCHECK(!thr->slot_locked);
+#if SANITIZER_DEBUG
+ // Check these mutexes are not locked.
+ // We can call DoReset from SlotAttachAndLock, which will lock
+ // these mutexes, but it happens only every once in a while.
+ { ThreadRegistryLock lock(&ctx->thread_registry); }
+ { Lock lock(&ctx->slot_mtx); }
#endif
+ TidSlot* slot = thr->slot;
+ slot->mtx.Lock();
+ thr->slot_locked = true;
+ if (LIKELY(thr == slot->thr && thr->fast_state.epoch() != kEpochLast))
+ return;
+ SlotDetachImpl(thr, false);
+ thr->slot_locked = false;
+ slot->mtx.Unlock();
+ SlotAttachAndLock(thr);
+}
+
+void SlotUnlock(ThreadState* thr) {
+ DCHECK(thr->slot_locked);
+ thr->slot_locked = false;
+ thr->slot->mtx.Unlock();
+}
Context::Context()
: initialized(),
report_mtx(MutexTypeReport),
nreported(),
- nmissed_expected(),
- thread_registry(new (thread_registry_placeholder) ThreadRegistry(
- CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse)),
+ thread_registry([](Tid tid) -> ThreadContextBase* {
+ return new (Alloc(sizeof(ThreadContext))) ThreadContext(tid);
+ }),
racy_mtx(MutexTypeRacy),
racy_stacks(),
- racy_addresses(),
fired_suppressions_mtx(MutexTypeFired),
- clock_alloc(LINKER_INITIALIZED, "clock allocator") {
+ slot_mtx(MutexTypeSlots),
+ resetting() {
fired_suppressions.reserve(8);
+ for (uptr i = 0; i < ARRAY_SIZE(slots); i++) {
+ TidSlot* slot = &slots[i];
+ slot->sid = static_cast<Sid>(i);
+ slot_queue.PushBack(slot);
+ }
+ global_epoch = 1;
}
+TidSlot::TidSlot() : mtx(MutexTypeSlot) {}
+
// The objects are allocated in TLS, so one may rely on zero-initialization.
-ThreadState::ThreadState(Context *ctx, u32 tid, int unique_id, u64 epoch,
- unsigned reuse_count, uptr stk_addr, uptr stk_size,
- uptr tls_addr, uptr tls_size)
- : fast_state(tid, epoch)
- // Do not touch these, rely on zero initialization,
- // they may be accessed before the ctor.
- // , ignore_reads_and_writes()
- // , ignore_interceptors()
- ,
- clock(tid, reuse_count)
-#if !SANITIZER_GO
- ,
- jmp_bufs()
-#endif
- ,
- tid(tid),
- unique_id(unique_id),
- stk_addr(stk_addr),
- stk_size(stk_size),
- tls_addr(tls_addr),
- tls_size(tls_size)
+ThreadState::ThreadState(Tid tid)
+ // Do not touch these, rely on zero initialization,
+ // they may be accessed before the ctor.
+ // ignore_reads_and_writes()
+ // ignore_interceptors()
+ : tid(tid) {
+ CHECK_EQ(reinterpret_cast<uptr>(this) % SANITIZER_CACHE_LINE_SIZE, 0);
#if !SANITIZER_GO
- ,
- last_sleep_clock(tid)
+ // C/C++ uses fixed size shadow stack.
+ const int kInitStackSize = kShadowStackSize;
+ shadow_stack = static_cast<uptr*>(
+ MmapNoReserveOrDie(kInitStackSize * sizeof(uptr), "shadow stack"));
+ SetShadowRegionHugePageMode(reinterpret_cast<uptr>(shadow_stack),
+ kInitStackSize * sizeof(uptr));
+#else
+ // Go uses malloc-allocated shadow stack with dynamic size.
+ const int kInitStackSize = 8;
+ shadow_stack = static_cast<uptr*>(Alloc(kInitStackSize * sizeof(uptr)));
#endif
-{
+ shadow_stack_pos = shadow_stack;
+ shadow_stack_end = shadow_stack + kInitStackSize;
}
#if !SANITIZER_GO
-static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
- uptr n_threads;
- uptr n_running_threads;
- ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
+void MemoryProfiler(u64 uptime) {
+ if (ctx->memprof_fd == kInvalidFd)
+ return;
InternalMmapVector<char> buf(4096);
- WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads);
- WriteToFile(fd, buf.data(), internal_strlen(buf.data()));
+ WriteMemoryProfile(buf.data(), buf.size(), uptime);
+ WriteToFile(ctx->memprof_fd, buf.data(), internal_strlen(buf.data()));
+}
+
+static bool InitializeMemoryProfiler() {
+ ctx->memprof_fd = kInvalidFd;
+ const char *fname = flags()->profile_memory;
+ if (!fname || !fname[0])
+ return false;
+ if (internal_strcmp(fname, "stdout") == 0) {
+ ctx->memprof_fd = 1;
+ } else if (internal_strcmp(fname, "stderr") == 0) {
+ ctx->memprof_fd = 2;
+ } else {
+ InternalScopedString filename;
+ filename.append("%s.%d", fname, (int)internal_getpid());
+ ctx->memprof_fd = OpenFile(filename.data(), WrOnly);
+ if (ctx->memprof_fd == kInvalidFd) {
+ Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
+ filename.data());
+ return false;
+ }
+ }
+ MemoryProfiler(0);
+ return true;
}
static void *BackgroundThread(void *arg) {
@@ -172,64 +463,43 @@ static void *BackgroundThread(void *arg) {
// We don't use ScopedIgnoreInterceptors, because we want ignores to be
// enabled even when the thread function exits (e.g. during pthread thread
// shutdown code).
- cur_thread_init();
- cur_thread()->ignore_interceptors++;
+ cur_thread_init()->ignore_interceptors++;
const u64 kMs2Ns = 1000 * 1000;
+ const u64 start = NanoTime();
- fd_t mprof_fd = kInvalidFd;
- if (flags()->profile_memory && flags()->profile_memory[0]) {
- if (internal_strcmp(flags()->profile_memory, "stdout") == 0) {
- mprof_fd = 1;
- } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) {
- mprof_fd = 2;
- } else {
- InternalScopedString filename;
- filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid());
- fd_t fd = OpenFile(filename.data(), WrOnly);
- if (fd == kInvalidFd) {
- Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
- filename.data());
- } else {
- mprof_fd = fd;
- }
- }
- }
-
- u64 last_flush = NanoTime();
+ u64 last_flush = start;
uptr last_rss = 0;
- for (int i = 0;
- atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
- i++) {
+ while (!atomic_load_relaxed(&ctx->stop_background_thread)) {
SleepForMillis(100);
u64 now = NanoTime();
// Flush memory if requested.
if (flags()->flush_memory_ms > 0) {
if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
- VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
+ VReport(1, "ThreadSanitizer: periodic memory flush\n");
FlushShadowMemory();
- last_flush = NanoTime();
+ now = last_flush = NanoTime();
}
}
- // GetRSS can be expensive on huge programs, so don't do it every 100ms.
if (flags()->memory_limit_mb > 0) {
uptr rss = GetRSS();
uptr limit = uptr(flags()->memory_limit_mb) << 20;
- VPrintf(1, "ThreadSanitizer: memory flush check"
- " RSS=%llu LAST=%llu LIMIT=%llu\n",
+ VReport(1,
+ "ThreadSanitizer: memory flush check"
+ " RSS=%llu LAST=%llu LIMIT=%llu\n",
(u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
if (2 * rss > limit + last_rss) {
- VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
+ VReport(1, "ThreadSanitizer: flushing memory due to RSS\n");
FlushShadowMemory();
rss = GetRSS();
- VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
+ now = NanoTime();
+ VReport(1, "ThreadSanitizer: memory flushed RSS=%llu\n",
+ (u64)rss >> 20);
}
last_rss = rss;
}
- // Write memory profile if requested.
- if (mprof_fd != kInvalidFd)
- MemoryProfiler(ctx, mprof_fd, i);
+ MemoryProfiler(now - start);
// Flush symbolizer cache if requested.
if (flags()->flush_symbolizer_ms > 0) {
@@ -260,31 +530,96 @@ static void StopBackgroundThread() {
#endif
void DontNeedShadowFor(uptr addr, uptr size) {
- ReleaseMemoryPagesToOS(MemToShadow(addr), MemToShadow(addr + size));
+ ReleaseMemoryPagesToOS(reinterpret_cast<uptr>(MemToShadow(addr)),
+ reinterpret_cast<uptr>(MemToShadow(addr + size)));
}
#if !SANITIZER_GO
+// We call UnmapShadow before the actual munmap, at that point we don't yet
+// know if the provided address/size are sane. We can't call UnmapShadow
+// after the actual munmap becuase at that point the memory range can
+// already be reused for something else, so we can't rely on the munmap
+// return value to understand is the values are sane.
+// While calling munmap with insane values (non-canonical address, negative
+// size, etc) is an error, the kernel won't crash. We must also try to not
+// crash as the failure mode is very confusing (paging fault inside of the
+// runtime on some derived shadow address).
+static bool IsValidMmapRange(uptr addr, uptr size) {
+ if (size == 0)
+ return true;
+ if (static_cast<sptr>(size) < 0)
+ return false;
+ if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
+ return false;
+ // Check that if the start of the region belongs to one of app ranges,
+ // end of the region belongs to the same region.
+ const uptr ranges[][2] = {
+ {LoAppMemBeg(), LoAppMemEnd()},
+ {MidAppMemBeg(), MidAppMemEnd()},
+ {HiAppMemBeg(), HiAppMemEnd()},
+ };
+ for (auto range : ranges) {
+ if (addr >= range[0] && addr < range[1])
+ return addr + size <= range[1];
+ }
+ return false;
+}
+
void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {
- if (size == 0) return;
+ if (size == 0 || !IsValidMmapRange(addr, size))
+ return;
DontNeedShadowFor(addr, size);
ScopedGlobalProcessor sgp;
- ctx->metamap.ResetRange(thr->proc(), addr, size);
+ SlotLocker locker(thr, true);
+ ctx->metamap.ResetRange(thr->proc(), addr, size, true);
}
#endif
void MapShadow(uptr addr, uptr size) {
+ // Ensure thead registry lock held, so as to synchronize
+ // with DoReset, which also access the mapped_shadow_* ctxt fields.
+ ThreadRegistryLock lock0(&ctx->thread_registry);
+ static bool data_mapped = false;
+
+#if !SANITIZER_GO
// Global data is not 64K aligned, but there are no adjacent mappings,
// so we can get away with unaligned mapping.
// CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
const uptr kPageSize = GetPageSizeCached();
uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
- if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
- "shadow"))
+ if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
Die();
+#else
+ uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), (64 << 10));
+ uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), (64 << 10));
+ VPrintf(2, "MapShadow for (0x%zx-0x%zx), begin/end: (0x%zx-0x%zx)\n",
+ addr, addr + size, shadow_begin, shadow_end);
+
+ if (!data_mapped) {
+ // First call maps data+bss.
+ if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
+ Die();
+ } else {
+ VPrintf(2, "ctx->mapped_shadow_{begin,end} = (0x%zx-0x%zx)\n",
+ ctx->mapped_shadow_begin, ctx->mapped_shadow_end);
+ // Second and subsequent calls map heap.
+ if (shadow_end <= ctx->mapped_shadow_end)
+ return;
+ if (!ctx->mapped_shadow_begin || ctx->mapped_shadow_begin > shadow_begin)
+ ctx->mapped_shadow_begin = shadow_begin;
+ if (shadow_begin < ctx->mapped_shadow_end)
+ shadow_begin = ctx->mapped_shadow_end;
+ VPrintf(2, "MapShadow begin/end = (0x%zx-0x%zx)\n",
+ shadow_begin, shadow_end);
+ if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
+ "shadow"))
+ Die();
+ ctx->mapped_shadow_end = shadow_end;
+ }
+#endif
// Meta shadow is 2:1, so tread carefully.
- static bool data_mapped = false;
static uptr mapped_meta_end = 0;
uptr meta_begin = (uptr)MemToMeta(addr);
uptr meta_end = (uptr)MemToMeta(addr + size);
@@ -297,12 +632,11 @@ void MapShadow(uptr addr, uptr size) {
"meta shadow"))
Die();
} else {
- // Mapping continous heap.
+ // Mapping continuous heap.
// Windows wants 64K alignment.
meta_begin = RoundDownTo(meta_begin, 64 << 10);
meta_end = RoundUpTo(meta_end, 64 << 10);
- if (meta_end <= mapped_meta_end)
- return;
+ CHECK_GT(meta_end, mapped_meta_end);
if (meta_begin < mapped_meta_end)
meta_begin = mapped_meta_end;
if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
@@ -310,56 +644,8 @@ void MapShadow(uptr addr, uptr size) {
Die();
mapped_meta_end = meta_end;
}
- VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
- addr, addr+size, meta_begin, meta_end);
-}
-
-void MapThreadTrace(uptr addr, uptr size, const char *name) {
- DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
- CHECK_GE(addr, TraceMemBeg());
- CHECK_LE(addr + size, TraceMemEnd());
- CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
- if (!MmapFixedSuperNoReserve(addr, size, name)) {
- Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p)\n",
- addr, size);
- Die();
- }
-}
-
-static void CheckShadowMapping() {
- uptr beg, end;
- for (int i = 0; GetUserRegion(i, &beg, &end); i++) {
- // Skip cases for empty regions (heap definition for architectures that
- // do not use 64-bit allocator).
- if (beg == end)
- continue;
- VPrintf(3, "checking shadow region %p-%p\n", beg, end);
- uptr prev = 0;
- for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
- for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) {
- const uptr p = RoundDown(p0 + x, kShadowCell);
- if (p < beg || p >= end)
- continue;
- const uptr s = MemToShadow(p);
- const uptr m = (uptr)MemToMeta(p);
- VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m);
- CHECK(IsAppMem(p));
- CHECK(IsShadowMem(s));
- CHECK_EQ(p, ShadowToMem(s));
- CHECK(IsMetaMem(m));
- if (prev) {
- // Ensure that shadow and meta mappings are linear within a single
- // user range. Lots of code that processes memory ranges assumes it.
- const uptr prev_s = MemToShadow(prev);
- const uptr prev_m = (uptr)MemToMeta(prev);
- CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier);
- CHECK_EQ((m - prev_m) / kMetaShadowSize,
- (p - prev) / kMetaShadowCell);
- }
- prev = p;
- }
- }
- }
+ VPrintf(2, "mapped meta shadow for (0x%zx-0x%zx) at (0x%zx-0x%zx)\n", addr,
+ addr + size, meta_begin, meta_end);
}
#if !SANITIZER_GO
@@ -380,15 +666,19 @@ void CheckUnwind() {
// since we are going to die soon.
ScopedIgnoreInterceptors ignore;
#if !SANITIZER_GO
- cur_thread()->ignore_sync++;
- cur_thread()->ignore_reads_and_writes++;
+ ThreadState* thr = cur_thread();
+ thr->nomalloc = false;
+ thr->ignore_sync++;
+ thr->ignore_reads_and_writes++;
+ atomic_store_relaxed(&thr->in_signal_handler, 0);
#endif
PrintCurrentStackSlow(StackTrace::GetCurrentPc());
}
+bool is_initialized;
+
void Initialize(ThreadState *thr) {
// Thread safe because done before all threads exist.
- static bool is_initialized = false;
if (is_initialized)
return;
is_initialized = true;
@@ -409,9 +699,6 @@ void Initialize(ThreadState *thr) {
__tsan::InitializePlatformEarly();
#if !SANITIZER_GO
- // Re-exec ourselves if we need to set additional env or command line args.
- MaybeReexec();
-
InitializeAllocator();
ReplaceSystemMalloc();
#endif
@@ -420,7 +707,6 @@ void Initialize(ThreadState *thr) {
Processor *proc = ProcCreate();
ProcWire(proc, thr);
InitializeInterceptors();
- CheckShadowMapping();
InitializePlatform();
InitializeDynamicAnnotations();
#if !SANITIZER_GO
@@ -436,21 +722,23 @@ void Initialize(ThreadState *thr) {
Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
#endif
- VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
+ VPrintf(1, "***** Running under ThreadSanitizer v3 (pid %d) *****\n",
(int)internal_getpid());
// Initialize thread 0.
- int tid = ThreadCreate(thr, 0, 0, true);
- CHECK_EQ(tid, 0);
+ Tid tid = ThreadCreate(nullptr, 0, 0, true);
+ CHECK_EQ(tid, kMainTid);
ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
#if TSAN_CONTAINS_UBSAN
__ubsan::InitAsPlugin();
#endif
- ctx->initialized = true;
#if !SANITIZER_GO
Symbolizer::LateInitialize();
+ if (InitializeMemoryProfiler() || flags()->force_background_thread)
+ MaybeSpawnBackgroundThread();
#endif
+ ctx->initialized = true;
if (flags()->stop_on_start) {
Printf("ThreadSanitizer is suspended at startup (pid %d)."
@@ -476,20 +764,21 @@ void MaybeSpawnBackgroundThread() {
#endif
}
-
int Finalize(ThreadState *thr) {
bool failed = false;
+#if !SANITIZER_GO
if (common_flags()->print_module_map == 1)
DumpProcessMap();
+#endif
if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
- SleepForMillis(flags()->atexit_sleep_ms);
+ internal_usleep(u64(flags()->atexit_sleep_ms) * 1000);
- // Wait for pending reports.
- ctx->report_mtx.Lock();
- { ScopedErrorReportLock l; }
- ctx->report_mtx.Unlock();
+ {
+ // Wait for pending reports.
+ ScopedErrorReportLock lock;
+ }
#if !SANITIZER_GO
if (Verbosity()) AllocatorPrintStats();
@@ -506,18 +795,8 @@ int Finalize(ThreadState *thr) {
#endif
}
- if (ctx->nmissed_expected) {
- failed = true;
- Printf("ThreadSanitizer: missed %d expected races\n",
- ctx->nmissed_expected);
- }
-
if (common_flags()->print_suppressions)
PrintMatchedSuppressions();
-#if !SANITIZER_GO
- if (flags()->print_benign)
- PrintMatchedBenignRaces();
-#endif
failed = OnFinalize(failed);
@@ -525,10 +804,16 @@ int Finalize(ThreadState *thr) {
}
#if !SANITIZER_GO
-void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
- ctx->thread_registry->Lock();
- ctx->report_mtx.Lock();
+void ForkBefore(ThreadState* thr, uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ GlobalProcessorLock();
+ // Detaching from the slot makes OnUserFree skip writing to the shadow.
+ // The slot will be locked so any attempts to use it will deadlock anyway.
+ SlotDetach(thr);
+ for (auto& slot : ctx->slots) slot.mtx.Lock();
+ ctx->thread_registry.Lock();
+ ctx->slot_mtx.Lock();
ScopedErrorReportLock::Lock();
+ AllocatorLock();
// Suppress all reports in the pthread_atfork callbacks.
// Reports will deadlock on the report_mtx.
// We could ignore sync operations as well,
@@ -537,36 +822,48 @@ void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
thr->suppress_reports++;
// On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
// we'll assert in CheckNoLocks() unless we ignore interceptors.
+ // On OS X libSystem_atfork_prepare/parent/child callbacks are called
+ // after/before our callbacks and they call free.
thr->ignore_interceptors++;
+ // Disables memory write in OnUserAlloc/Free.
+ thr->ignore_reads_and_writes++;
+
+ __tsan_test_only_on_fork();
}
-void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
+static void ForkAfter(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
thr->suppress_reports--; // Enabled in ForkBefore.
thr->ignore_interceptors--;
+ thr->ignore_reads_and_writes--;
+ AllocatorUnlock();
ScopedErrorReportLock::Unlock();
- ctx->report_mtx.Unlock();
- ctx->thread_registry->Unlock();
+ ctx->slot_mtx.Unlock();
+ ctx->thread_registry.Unlock();
+ for (auto& slot : ctx->slots) slot.mtx.Unlock();
+ SlotAttachAndLock(thr);
+ SlotUnlock(thr);
+ GlobalProcessorUnlock();
}
-void ForkChildAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
- thr->suppress_reports--; // Enabled in ForkBefore.
- thr->ignore_interceptors--;
- ScopedErrorReportLock::Unlock();
- ctx->report_mtx.Unlock();
- ctx->thread_registry->Unlock();
+void ForkParentAfter(ThreadState* thr, uptr pc) { ForkAfter(thr); }
- uptr nthread = 0;
- ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */);
- VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
- " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
+void ForkChildAfter(ThreadState* thr, uptr pc, bool start_thread) {
+ ForkAfter(thr);
+ u32 nthread = ctx->thread_registry.OnFork(thr->tid);
+ VPrintf(1,
+ "ThreadSanitizer: forked new process with pid %d,"
+ " parent had %d threads\n",
+ (int)internal_getpid(), (int)nthread);
if (nthread == 1) {
- StartBackgroundThread();
+ if (start_thread)
+ StartBackgroundThread();
} else {
// We've just forked a multi-threaded process. We cannot reasonably function
// after that (some mutexes may be locked before fork). So just enable
// ignores for everything in the hope that we will exec soon.
ctx->after_multithreaded_fork = true;
thr->ignore_interceptors++;
+ thr->suppress_reports++;
ThreadIgnoreBegin(thr, pc);
ThreadIgnoreSyncBegin(thr, pc);
}
@@ -578,19 +875,20 @@ NOINLINE
void GrowShadowStack(ThreadState *thr) {
const int sz = thr->shadow_stack_end - thr->shadow_stack;
const int newsz = 2 * sz;
- uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
- newsz * sizeof(uptr));
+ auto *newstack = (uptr *)Alloc(newsz * sizeof(uptr));
internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
- internal_free(thr->shadow_stack);
+ Free(thr->shadow_stack);
thr->shadow_stack = newstack;
thr->shadow_stack_pos = newstack + sz;
thr->shadow_stack_end = newstack + newsz;
}
#endif
-u32 CurrentStackId(ThreadState *thr, uptr pc) {
+StackID CurrentStackId(ThreadState *thr, uptr pc) {
+#if !SANITIZER_GO
if (!thr->is_inited) // May happen during bootstrap.
- return 0;
+ return kInvalidStackID;
+#endif
if (pc != 0) {
#if !SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
@@ -601,486 +899,149 @@ u32 CurrentStackId(ThreadState *thr, uptr pc) {
thr->shadow_stack_pos[0] = pc;
thr->shadow_stack_pos++;
}
- u32 id = StackDepotPut(
+ StackID id = StackDepotPut(
StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
if (pc != 0)
thr->shadow_stack_pos--;
return id;
}
-void TraceSwitch(ThreadState *thr) {
-#if !SANITIZER_GO
- if (ctx->after_multithreaded_fork)
- return;
-#endif
- thr->nomalloc++;
- Trace *thr_trace = ThreadTrace(thr->tid);
- Lock l(&thr_trace->mtx);
- unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
- TraceHeader *hdr = &thr_trace->headers[trace];
- hdr->epoch0 = thr->fast_state.epoch();
- ObtainCurrentStack(thr, 0, &hdr->stack0);
- hdr->mset0 = thr->mset;
- thr->nomalloc--;
-}
-
-Trace *ThreadTrace(int tid) {
- return (Trace*)GetThreadTraceHeader(tid);
-}
-
-uptr TraceTopPC(ThreadState *thr) {
- Event *events = (Event*)GetThreadTrace(thr->tid);
- uptr pc = events[thr->fast_state.GetTracePos()];
- return pc;
-}
-
-uptr TraceSize() {
- return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
-}
-
-uptr TraceParts() {
- return TraceSize() / kTracePartSize;
-}
-
-#if !SANITIZER_GO
-extern "C" void __tsan_trace_switch() {
- TraceSwitch(cur_thread());
-}
-
-extern "C" void __tsan_report_race() {
- ReportRace(cur_thread());
-}
-#endif
-
-ALWAYS_INLINE
-Shadow LoadShadow(u64 *p) {
- u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
- return Shadow(raw);
-}
-
-ALWAYS_INLINE
-void StoreShadow(u64 *sp, u64 s) {
- atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
-}
-
-ALWAYS_INLINE
-void StoreIfNotYetStored(u64 *sp, u64 *s) {
- StoreShadow(sp, *s);
- *s = 0;
-}
-
-ALWAYS_INLINE
-void HandleRace(ThreadState *thr, u64 *shadow_mem,
- Shadow cur, Shadow old) {
- thr->racy_state[0] = cur.raw();
- thr->racy_state[1] = old.raw();
- thr->racy_shadow_addr = shadow_mem;
-#if !SANITIZER_GO
- HACKY_CALL(__tsan_report_race);
-#else
- ReportRace(thr);
-#endif
-}
-
-static inline bool HappensBefore(Shadow old, ThreadState *thr) {
- return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
-}
-
-ALWAYS_INLINE
-void MemoryAccessImpl1(ThreadState *thr, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
- u64 *shadow_mem, Shadow cur) {
-
- // This potentially can live in an MMX/SSE scratch register.
- // The required intrinsics are:
- // __m128i _mm_move_epi64(__m128i*);
- // _mm_storel_epi64(u64*, __m128i);
- u64 store_word = cur.raw();
- bool stored = false;
-
- // scan all the shadow values and dispatch to 4 categories:
- // same, replace, candidate and race (see comments below).
- // we consider only 3 cases regarding access sizes:
- // equal, intersect and not intersect. initially I considered
- // larger and smaller as well, it allowed to replace some
- // 'candidates' with 'same' or 'replace', but I think
- // it's just not worth it (performance- and complexity-wise).
-
- Shadow old(0);
-
- // It release mode we manually unroll the loop,
- // because empirically gcc generates better code this way.
- // However, we can't afford unrolling in debug mode, because the function
- // consumes almost 4K of stack. Gtest gives only 4K of stack to death test
- // threads, which is not enough for the unrolled loop.
-#if SANITIZER_DEBUG
- for (int idx = 0; idx < 4; idx++) {
-#include "tsan_update_shadow_word_inl.h"
- }
-#else
- int idx = 0;
-#include "tsan_update_shadow_word_inl.h"
- idx = 1;
- if (stored) {
-#include "tsan_update_shadow_word_inl.h"
- } else {
-#include "tsan_update_shadow_word_inl.h"
- }
- idx = 2;
- if (stored) {
-#include "tsan_update_shadow_word_inl.h"
- } else {
-#include "tsan_update_shadow_word_inl.h"
- }
- idx = 3;
- if (stored) {
-#include "tsan_update_shadow_word_inl.h"
- } else {
-#include "tsan_update_shadow_word_inl.h"
- }
-#endif
-
- // we did not find any races and had already stored
- // the current access info, so we are done
- if (LIKELY(stored))
- return;
- // choose a random candidate slot and replace it
- StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
- return;
- RACE:
- HandleRace(thr, shadow_mem, cur, old);
- return;
-}
-
-void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
- int size, bool kAccessIsWrite, bool kIsAtomic) {
- while (size) {
- int size1 = 1;
- int kAccessSizeLog = kSizeLog1;
- if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) {
- size1 = 8;
- kAccessSizeLog = kSizeLog8;
- } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) {
- size1 = 4;
- kAccessSizeLog = kSizeLog4;
- } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) {
- size1 = 2;
- kAccessSizeLog = kSizeLog2;
- }
- MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
- addr += size1;
- size -= size1;
- }
-}
-
-ALWAYS_INLINE
-bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
- Shadow cur(a);
- for (uptr i = 0; i < kShadowCnt; i++) {
- Shadow old(LoadShadow(&s[i]));
- if (Shadow::Addr0AndSizeAreEqual(cur, old) &&
- old.TidWithIgnore() == cur.TidWithIgnore() &&
- old.epoch() > sync_epoch &&
- old.IsAtomic() == cur.IsAtomic() &&
- old.IsRead() <= cur.IsRead())
- return true;
+static bool TraceSkipGap(ThreadState* thr) {
+ Trace *trace = &thr->tctx->trace;
+ Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
+ DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0);
+ auto *part = trace->parts.Back();
+ DPrintf("#%d: TraceSwitchPart enter trace=%p parts=%p-%p pos=%p\n", thr->tid,
+ trace, trace->parts.Front(), part, pos);
+ if (!part)
+ return false;
+ // We can get here when we still have space in the current trace part.
+ // The fast-path check in TraceAcquire has false positives in the middle of
+ // the part. Check if we are indeed at the end of the current part or not,
+ // and fill any gaps with NopEvent's.
+ Event* end = &part->events[TracePart::kSize];
+ DCHECK_GE(pos, &part->events[0]);
+ DCHECK_LE(pos, end);
+ if (pos + 1 < end) {
+ if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) ==
+ TracePart::kAlignment)
+ *pos++ = NopEvent;
+ *pos++ = NopEvent;
+ DCHECK_LE(pos + 2, end);
+ atomic_store_relaxed(&thr->trace_pos, reinterpret_cast<uptr>(pos));
+ return true;
}
+ // We are indeed at the end.
+ for (; pos < end; pos++) *pos = NopEvent;
return false;
}
-#if defined(__SSE3__)
-#define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \
- _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \
- (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
-ALWAYS_INLINE
-bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
- // This is an optimized version of ContainsSameAccessSlow.
- // load current access into access[0:63]
- const m128 access = _mm_cvtsi64_si128(a);
- // duplicate high part of access in addr0:
- // addr0[0:31] = access[32:63]
- // addr0[32:63] = access[32:63]
- // addr0[64:95] = access[32:63]
- // addr0[96:127] = access[32:63]
- const m128 addr0 = SHUF(access, access, 1, 1, 1, 1);
- // load 4 shadow slots
- const m128 shadow0 = _mm_load_si128((__m128i*)s);
- const m128 shadow1 = _mm_load_si128((__m128i*)s + 1);
- // load high parts of 4 shadow slots into addr_vect:
- // addr_vect[0:31] = shadow0[32:63]
- // addr_vect[32:63] = shadow0[96:127]
- // addr_vect[64:95] = shadow1[32:63]
- // addr_vect[96:127] = shadow1[96:127]
- m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3);
- if (!is_write) {
- // set IsRead bit in addr_vect
- const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15);
- const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0);
- addr_vect = _mm_or_si128(addr_vect, rw_mask);
- }
- // addr0 == addr_vect?
- const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect);
- // epoch1[0:63] = sync_epoch
- const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch);
- // epoch[0:31] = sync_epoch[0:31]
- // epoch[32:63] = sync_epoch[0:31]
- // epoch[64:95] = sync_epoch[0:31]
- // epoch[96:127] = sync_epoch[0:31]
- const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0);
- // load low parts of shadow cell epochs into epoch_vect:
- // epoch_vect[0:31] = shadow0[0:31]
- // epoch_vect[32:63] = shadow0[64:95]
- // epoch_vect[64:95] = shadow1[0:31]
- // epoch_vect[96:127] = shadow1[64:95]
- const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2);
- // epoch_vect >= sync_epoch?
- const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch);
- // addr_res & epoch_res
- const m128 res = _mm_and_si128(addr_res, epoch_res);
- // mask[0] = res[7]
- // mask[1] = res[15]
- // ...
- // mask[15] = res[127]
- const int mask = _mm_movemask_epi8(res);
- return mask != 0;
-}
-#endif
-
-ALWAYS_INLINE
-bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
-#if defined(__SSE3__)
- bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
- // NOTE: this check can fail if the shadow is concurrently mutated
- // by other threads. But it still can be useful if you modify
- // ContainsSameAccessFast and want to ensure that it's not completely broken.
- // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
- return res;
-#else
- return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
-#endif
-}
-
-ALWAYS_INLINE USED
-void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
- u64 *shadow_mem = (u64*)MemToShadow(addr);
- DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
- " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
- (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
- (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
- (uptr)shadow_mem[0], (uptr)shadow_mem[1],
- (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
-#if SANITIZER_DEBUG
- if (!IsAppMem(addr)) {
- Printf("Access to non app mem %zx\n", addr);
- DCHECK(IsAppMem(addr));
- }
- if (!IsShadowMem((uptr)shadow_mem)) {
- Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
- DCHECK(IsShadowMem((uptr)shadow_mem));
- }
-#endif
-
- if (!SANITIZER_GO && !kAccessIsWrite && *shadow_mem == kShadowRodata) {
- // Access to .rodata section, no races here.
- // Measurements show that it can be 10-20% of all memory accesses.
- return;
- }
-
- FastState fast_state = thr->fast_state;
- if (UNLIKELY(fast_state.GetIgnoreBit())) {
- return;
- }
-
- Shadow cur(fast_state);
- cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
- cur.SetWrite(kAccessIsWrite);
- cur.SetAtomic(kIsAtomic);
-
- if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
- thr->fast_synch_epoch, kAccessIsWrite))) {
- return;
- }
-
- if (kCollectHistory) {
- fast_state.IncrementEpoch();
- thr->fast_state = fast_state;
- TraceAddEvent(thr, fast_state, EventTypeMop, pc);
- cur.IncrementEpoch();
- }
-
- MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
- shadow_mem, cur);
-}
-
-// Called by MemoryAccessRange in tsan_rtl_thread.cpp
-ALWAYS_INLINE USED
-void MemoryAccessImpl(ThreadState *thr, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
- u64 *shadow_mem, Shadow cur) {
- if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
- thr->fast_synch_epoch, kAccessIsWrite))) {
- return;
- }
-
- MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
- shadow_mem, cur);
-}
-
-static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
- u64 val) {
- (void)thr;
- (void)pc;
- if (size == 0)
+NOINLINE
+void TraceSwitchPart(ThreadState* thr) {
+ if (TraceSkipGap(thr))
return;
- // FIXME: fix me.
- uptr offset = addr % kShadowCell;
- if (offset) {
- offset = kShadowCell - offset;
- if (size <= offset)
+#if !SANITIZER_GO
+ if (ctx->after_multithreaded_fork) {
+ // We just need to survive till exec.
+ TracePart* part = thr->tctx->trace.parts.Back();
+ if (part) {
+ atomic_store_relaxed(&thr->trace_pos,
+ reinterpret_cast<uptr>(&part->events[0]));
return;
- addr += offset;
- size -= offset;
- }
- DCHECK_EQ(addr % 8, 0);
- // If a user passes some insane arguments (memset(0)),
- // let it just crash as usual.
- if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
- return;
- // Don't want to touch lots of shadow memory.
- // If a program maps 10MB stack, there is no need reset the whole range.
- size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
- // UnmapOrDie/MmapFixedNoReserve does not work on Windows.
- if (SANITIZER_WINDOWS || size < common_flags()->clear_shadow_mmap_threshold) {
- u64 *p = (u64*)MemToShadow(addr);
- CHECK(IsShadowMem((uptr)p));
- CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
- // FIXME: may overwrite a part outside the region
- for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
- p[i++] = val;
- for (uptr j = 1; j < kShadowCnt; j++)
- p[i++] = 0;
- }
- } else {
- // The region is big, reset only beginning and end.
- const uptr kPageSize = GetPageSizeCached();
- u64 *begin = (u64*)MemToShadow(addr);
- u64 *end = begin + size / kShadowCell * kShadowCnt;
- u64 *p = begin;
- // Set at least first kPageSize/2 to page boundary.
- while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
- *p++ = val;
- for (uptr j = 1; j < kShadowCnt; j++)
- *p++ = 0;
- }
- // Reset middle part.
- u64 *p1 = p;
- p = RoundDown(end, kPageSize);
- if (!MmapFixedSuperNoReserve((uptr)p1, (uptr)p - (uptr)p1))
- Die();
- // Set the ending.
- while (p < end) {
- *p++ = val;
- for (uptr j = 1; j < kShadowCnt; j++)
- *p++ = 0;
}
}
+#endif
+ TraceSwitchPartImpl(thr);
}
-void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
- MemoryRangeSet(thr, pc, addr, size, 0);
-}
-
-void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
- // Processing more than 1k (4k of shadow) is expensive,
- // can cause excessive memory consumption (user does not necessary touch
- // the whole range) and most likely unnecessary.
- if (size > 1024)
- size = 1024;
- CHECK_EQ(thr->is_freeing, false);
- thr->is_freeing = true;
- MemoryAccessRange(thr, pc, addr, size, true);
- thr->is_freeing = false;
- if (kCollectHistory) {
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+void TraceSwitchPartImpl(ThreadState* thr) {
+ SlotLocker locker(thr, true);
+ Trace* trace = &thr->tctx->trace;
+ TracePart* part = TracePartAlloc(thr);
+ part->trace = trace;
+ thr->trace_prev_pc = 0;
+ TracePart* recycle = nullptr;
+ // Keep roughly half of parts local to the thread
+ // (not queued into the recycle queue).
+ uptr local_parts = (Trace::kMinParts + flags()->history_size + 1) / 2;
+ {
+ Lock lock(&trace->mtx);
+ if (trace->parts.Empty())
+ trace->local_head = part;
+ if (trace->parts.Size() >= local_parts) {
+ recycle = trace->local_head;
+ trace->local_head = trace->parts.Next(recycle);
+ }
+ trace->parts.PushBack(part);
+ atomic_store_relaxed(&thr->trace_pos,
+ reinterpret_cast<uptr>(&part->events[0]));
}
- Shadow s(thr->fast_state);
- s.ClearIgnoreBit();
- s.MarkAsFreed();
- s.SetWrite(true);
- s.SetAddr0AndSizeLog(0, 3);
- MemoryRangeSet(thr, pc, addr, size, s.raw());
-}
-
-void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
- if (kCollectHistory) {
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ // Make this part self-sufficient by restoring the current stack
+ // and mutex set in the beginning of the trace.
+ TraceTime(thr);
+ {
+ // Pathologically large stacks may not fit into the part.
+ // In these cases we log only fixed number of top frames.
+ const uptr kMaxFrames = 1000;
+ // Check that kMaxFrames won't consume the whole part.
+ static_assert(kMaxFrames < TracePart::kSize / 2, "kMaxFrames is too big");
+ uptr* pos = Max(&thr->shadow_stack[0], thr->shadow_stack_pos - kMaxFrames);
+ for (; pos < thr->shadow_stack_pos; pos++) {
+ if (TryTraceFunc(thr, *pos))
+ continue;
+ CHECK(TraceSkipGap(thr));
+ CHECK(TryTraceFunc(thr, *pos));
+ }
}
- Shadow s(thr->fast_state);
- s.ClearIgnoreBit();
- s.SetWrite(true);
- s.SetAddr0AndSizeLog(0, 3);
- MemoryRangeSet(thr, pc, addr, size, s.raw());
-}
-
-void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
- uptr size) {
- if (thr->ignore_reads_and_writes == 0)
- MemoryRangeImitateWrite(thr, pc, addr, size);
- else
- MemoryResetRange(thr, pc, addr, size);
-}
-
-ALWAYS_INLINE USED
-void FuncEntry(ThreadState *thr, uptr pc) {
- DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
- if (kCollectHistory) {
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
+ for (uptr i = 0; i < thr->mset.Size(); i++) {
+ MutexSet::Desc d = thr->mset.Get(i);
+ for (uptr i = 0; i < d.count; i++)
+ TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0,
+ d.addr, d.stack_id);
}
-
- // Shadow stack maintenance can be replaced with
- // stack unwinding during trace switch (which presumably must be faster).
- DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
-#if !SANITIZER_GO
- DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
-#else
- if (thr->shadow_stack_pos == thr->shadow_stack_end)
- GrowShadowStack(thr);
-#endif
- thr->shadow_stack_pos[0] = pc;
- thr->shadow_stack_pos++;
-}
-
-ALWAYS_INLINE USED
-void FuncExit(ThreadState *thr) {
- DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
- if (kCollectHistory) {
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
+ // Callers of TraceSwitchPart expect that TraceAcquire will always succeed
+ // after the call. It's possible that TryTraceFunc/TraceMutexLock above
+ // filled the trace part exactly up to the TracePart::kAlignment gap
+ // and the next TraceAcquire won't succeed. Skip the gap to avoid that.
+ EventFunc *ev;
+ if (!TraceAcquire(thr, &ev)) {
+ CHECK(TraceSkipGap(thr));
+ CHECK(TraceAcquire(thr, &ev));
}
-
- DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
-#if !SANITIZER_GO
- DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
-#endif
- thr->shadow_stack_pos--;
+ {
+ Lock lock(&ctx->slot_mtx);
+ // There is a small chance that the slot may be not queued at this point.
+ // This can happen if the slot has kEpochLast epoch and another thread
+ // in FindSlotAndLock discovered that it's exhausted and removed it from
+ // the slot queue. kEpochLast can happen in 2 cases: (1) if TraceSwitchPart
+ // was called with the slot locked and epoch already at kEpochLast,
+ // or (2) if we've acquired a new slot in SlotLock in the beginning
+ // of the function and the slot was at kEpochLast - 1, so after increment
+ // in SlotAttachAndLock it become kEpochLast.
+ if (ctx->slot_queue.Queued(thr->slot)) {
+ ctx->slot_queue.Remove(thr->slot);
+ ctx->slot_queue.PushBack(thr->slot);
+ }
+ if (recycle)
+ ctx->trace_part_recycle.PushBack(recycle);
+ }
+ DPrintf("#%d: TraceSwitchPart exit parts=%p-%p pos=0x%zx\n", thr->tid,
+ trace->parts.Front(), trace->parts.Back(),
+ atomic_load_relaxed(&thr->trace_pos));
}
-void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack) {
+void ThreadIgnoreBegin(ThreadState* thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
thr->ignore_reads_and_writes++;
CHECK_GT(thr->ignore_reads_and_writes, 0);
thr->fast_state.SetIgnoreBit();
#if !SANITIZER_GO
- if (save_stack && !ctx->after_multithreaded_fork)
+ if (pc && !ctx->after_multithreaded_fork)
thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
#endif
}
-void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
+void ThreadIgnoreEnd(ThreadState *thr) {
DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
CHECK_GT(thr->ignore_reads_and_writes, 0);
thr->ignore_reads_and_writes--;
@@ -1100,17 +1061,17 @@ uptr __tsan_testonly_shadow_stack_current_size() {
}
#endif
-void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack) {
+void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
thr->ignore_sync++;
CHECK_GT(thr->ignore_sync, 0);
#if !SANITIZER_GO
- if (save_stack && !ctx->after_multithreaded_fork)
+ if (pc && !ctx->after_multithreaded_fork)
thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
#endif
}
-void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
+void ThreadIgnoreSyncEnd(ThreadState *thr) {
DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
CHECK_GT(thr->ignore_sync, 0);
thr->ignore_sync--;
@@ -1129,7 +1090,6 @@ void build_consistency_debug() {}
#else
void build_consistency_release() {}
#endif
-
} // namespace __tsan
#if SANITIZER_CHECK_DEADLOCKS
@@ -1137,23 +1097,27 @@ namespace __sanitizer {
using namespace __tsan;
MutexMeta mutex_meta[] = {
{MutexInvalid, "Invalid", {}},
- {MutexThreadRegistry, "ThreadRegistry", {}},
- {MutexTypeTrace, "Trace", {MutexLeaf}},
- {MutexTypeReport, "Report", {MutexTypeSyncVar}},
- {MutexTypeSyncVar, "SyncVar", {}},
+ {MutexThreadRegistry,
+ "ThreadRegistry",
+ {MutexTypeSlots, MutexTypeTrace, MutexTypeReport}},
+ {MutexTypeReport, "Report", {MutexTypeTrace}},
+ {MutexTypeSyncVar, "SyncVar", {MutexTypeReport, MutexTypeTrace}},
{MutexTypeAnnotations, "Annotations", {}},
- {MutexTypeAtExit, "AtExit", {MutexTypeSyncVar}},
+ {MutexTypeAtExit, "AtExit", {}},
{MutexTypeFired, "Fired", {MutexLeaf}},
{MutexTypeRacy, "Racy", {MutexLeaf}},
- {MutexTypeGlobalProc, "GlobalProc", {}},
+ {MutexTypeGlobalProc, "GlobalProc", {MutexTypeSlot, MutexTypeSlots}},
+ {MutexTypeInternalAlloc, "InternalAlloc", {MutexLeaf}},
+ {MutexTypeTrace, "Trace", {}},
+ {MutexTypeSlot,
+ "Slot",
+ {MutexMulti, MutexTypeTrace, MutexTypeSyncVar, MutexThreadRegistry,
+ MutexTypeSlots}},
+ {MutexTypeSlots, "Slots", {MutexTypeTrace, MutexTypeReport}},
{},
};
void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); }
-} // namespace __sanitizer
-#endif
-#if !SANITIZER_GO
-// Must be included in this file to make sure everything is inlined.
-# include "tsan_interface_inl.h"
+} // namespace __sanitizer
#endif
lib/tsan/tsan_rtl.h
@@ -34,17 +34,19 @@
#include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
#include "sanitizer_common/sanitizer_vector.h"
-#include "tsan_clock.h"
#include "tsan_defs.h"
#include "tsan_flags.h"
+#include "tsan_ignoreset.h"
+#include "tsan_ilist.h"
#include "tsan_mman.h"
-#include "tsan_sync.h"
-#include "tsan_trace.h"
-#include "tsan_report.h"
-#include "tsan_platform.h"
#include "tsan_mutexset.h"
-#include "tsan_ignoreset.h"
+#include "tsan_platform.h"
+#include "tsan_report.h"
+#include "tsan_shadow.h"
#include "tsan_stack_trace.h"
+#include "tsan_sync.h"
+#include "tsan_trace.h"
+#include "tsan_vector_clock.h"
#if SANITIZER_WORDSIZE != 64
# error "ThreadSanitizer is supported only on 64-bit platforms"
@@ -54,7 +56,8 @@ namespace __tsan {
#if !SANITIZER_GO
struct MapUnmapCallback;
-#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
+#if defined(__mips64) || defined(__aarch64__) || defined(__loongarch__) || \
+ defined(__powerpc__)
struct AP32 {
static const uptr kSpaceBeg = 0;
@@ -69,6 +72,11 @@ struct AP32 {
typedef SizeClassAllocator32<AP32> PrimaryAllocator;
#else
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+# if defined(__s390x__)
+ typedef MappingS390x Mapping;
+# else
+ typedef Mapping48AddressSpace Mapping;
+# endif
static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
static const uptr kMetadataSize = 0;
@@ -84,240 +92,6 @@ typedef Allocator::AllocatorCache AllocatorCache;
Allocator *allocator();
#endif
-const u64 kShadowRodata = (u64)-1; // .rodata shadow marker
-
-// FastState (from most significant bit):
-// ignore : 1
-// tid : kTidBits
-// unused : -
-// history_size : 3
-// epoch : kClkBits
-class FastState {
- public:
- FastState(u64 tid, u64 epoch) {
- x_ = tid << kTidShift;
- x_ |= epoch;
- DCHECK_EQ(tid, this->tid());
- DCHECK_EQ(epoch, this->epoch());
- DCHECK_EQ(GetIgnoreBit(), false);
- }
-
- explicit FastState(u64 x)
- : x_(x) {
- }
-
- u64 raw() const {
- return x_;
- }
-
- u64 tid() const {
- u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
- return res;
- }
-
- u64 TidWithIgnore() const {
- u64 res = x_ >> kTidShift;
- return res;
- }
-
- u64 epoch() const {
- u64 res = x_ & ((1ull << kClkBits) - 1);
- return res;
- }
-
- void IncrementEpoch() {
- u64 old_epoch = epoch();
- x_ += 1;
- DCHECK_EQ(old_epoch + 1, epoch());
- (void)old_epoch;
- }
-
- void SetIgnoreBit() { x_ |= kIgnoreBit; }
- void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
- bool GetIgnoreBit() const { return (s64)x_ < 0; }
-
- void SetHistorySize(int hs) {
- CHECK_GE(hs, 0);
- CHECK_LE(hs, 7);
- x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
- }
-
- ALWAYS_INLINE
- int GetHistorySize() const {
- return (int)((x_ >> kHistoryShift) & kHistoryMask);
- }
-
- void ClearHistorySize() {
- SetHistorySize(0);
- }
-
- ALWAYS_INLINE
- u64 GetTracePos() const {
- const int hs = GetHistorySize();
- // When hs == 0, the trace consists of 2 parts.
- const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
- return epoch() & mask;
- }
-
- private:
- friend class Shadow;
- static const int kTidShift = 64 - kTidBits - 1;
- static const u64 kIgnoreBit = 1ull << 63;
- static const u64 kFreedBit = 1ull << 63;
- static const u64 kHistoryShift = kClkBits;
- static const u64 kHistoryMask = 7;
- u64 x_;
-};
-
-// Shadow (from most significant bit):
-// freed : 1
-// tid : kTidBits
-// is_atomic : 1
-// is_read : 1
-// size_log : 2
-// addr0 : 3
-// epoch : kClkBits
-class Shadow : public FastState {
- public:
- explicit Shadow(u64 x)
- : FastState(x) {
- }
-
- explicit Shadow(const FastState &s)
- : FastState(s.x_) {
- ClearHistorySize();
- }
-
- void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
- DCHECK_EQ((x_ >> kClkBits) & 31, 0);
- DCHECK_LE(addr0, 7);
- DCHECK_LE(kAccessSizeLog, 3);
- x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
- DCHECK_EQ(kAccessSizeLog, size_log());
- DCHECK_EQ(addr0, this->addr0());
- }
-
- void SetWrite(unsigned kAccessIsWrite) {
- DCHECK_EQ(x_ & kReadBit, 0);
- if (!kAccessIsWrite)
- x_ |= kReadBit;
- DCHECK_EQ(kAccessIsWrite, IsWrite());
- }
-
- void SetAtomic(bool kIsAtomic) {
- DCHECK(!IsAtomic());
- if (kIsAtomic)
- x_ |= kAtomicBit;
- DCHECK_EQ(IsAtomic(), kIsAtomic);
- }
-
- bool IsAtomic() const {
- return x_ & kAtomicBit;
- }
-
- bool IsZero() const {
- return x_ == 0;
- }
-
- static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
- u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
- DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
- return shifted_xor == 0;
- }
-
- static ALWAYS_INLINE
- bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
- u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
- return masked_xor == 0;
- }
-
- static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
- unsigned kS2AccessSize) {
- bool res = false;
- u64 diff = s1.addr0() - s2.addr0();
- if ((s64)diff < 0) { // s1.addr0 < s2.addr0
- // if (s1.addr0() + size1) > s2.addr0()) return true;
- if (s1.size() > -diff)
- res = true;
- } else {
- // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
- if (kS2AccessSize > diff)
- res = true;
- }
- DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
- DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
- return res;
- }
-
- u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
- u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
- bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
- bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
-
- // The idea behind the freed bit is as follows.
- // When the memory is freed (or otherwise unaccessible) we write to the shadow
- // values with tid/epoch related to the free and the freed bit set.
- // During memory accesses processing the freed bit is considered
- // as msb of tid. So any access races with shadow with freed bit set
- // (it is as if write from a thread with which we never synchronized before).
- // This allows us to detect accesses to freed memory w/o additional
- // overheads in memory access processing and at the same time restore
- // tid/epoch of free.
- void MarkAsFreed() {
- x_ |= kFreedBit;
- }
-
- bool IsFreed() const {
- return x_ & kFreedBit;
- }
-
- bool GetFreedAndReset() {
- bool res = x_ & kFreedBit;
- x_ &= ~kFreedBit;
- return res;
- }
-
- bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
- bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
- | (u64(kIsAtomic) << kAtomicShift));
- DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
- return v;
- }
-
- bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
- bool v = ((x_ >> kReadShift) & 3)
- <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
- DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
- (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
- return v;
- }
-
- bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
- bool v = ((x_ >> kReadShift) & 3)
- >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
- DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
- (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
- return v;
- }
-
- private:
- static const u64 kReadShift = 5 + kClkBits;
- static const u64 kReadBit = 1ull << kReadShift;
- static const u64 kAtomicShift = 6 + kClkBits;
- static const u64 kAtomicBit = 1ull << kAtomicShift;
-
- u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
-
- static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
- if (s1.addr0() == s2.addr0()) return true;
- if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
- return true;
- if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
- return true;
- return false;
- }
-};
-
struct ThreadSignalContext;
struct JmpBuf {
@@ -344,7 +118,6 @@ struct Processor {
#endif
DenseSlabAllocCache block_cache;
DenseSlabAllocCache sync_cache;
- DenseSlabAllocCache clock_cache;
DDPhysicalThread *dd_pt;
};
@@ -358,64 +131,86 @@ struct ScopedGlobalProcessor {
};
#endif
+struct TidEpoch {
+ Tid tid;
+ Epoch epoch;
+};
+
+struct TidSlot {
+ Mutex mtx;
+ Sid sid;
+ atomic_uint32_t raw_epoch;
+ ThreadState *thr;
+ Vector<TidEpoch> journal;
+ INode node;
+
+ Epoch epoch() const {
+ return static_cast<Epoch>(atomic_load(&raw_epoch, memory_order_relaxed));
+ }
+
+ void SetEpoch(Epoch v) {
+ atomic_store(&raw_epoch, static_cast<u32>(v), memory_order_relaxed);
+ }
+
+ TidSlot();
+} ALIGNED(SANITIZER_CACHE_LINE_SIZE);
+
// This struct is stored in TLS.
struct ThreadState {
FastState fast_state;
- // Synch epoch represents the threads's epoch before the last synchronization
- // action. It allows to reduce number of shadow state updates.
- // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
- // if we are processing write to X from the same thread at epoch=200,
- // we do nothing, because both writes happen in the same 'synch epoch'.
- // That is, if another memory access does not race with the former write,
- // it does not race with the latter as well.
- // QUESTION: can we can squeeze this into ThreadState::Fast?
- // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
- // taken by epoch between synchs.
- // This way we can save one load from tls.
- u64 fast_synch_epoch;
+ int ignore_sync;
+#if !SANITIZER_GO
+ int ignore_interceptors;
+#endif
+ uptr *shadow_stack_pos;
+
+ // Current position in tctx->trace.Back()->events (Event*).
+ atomic_uintptr_t trace_pos;
+ // PC of the last memory access, used to compute PC deltas in the trace.
+ uptr trace_prev_pc;
+
// Technically `current` should be a separate THREADLOCAL variable;
// but it is placed here in order to share cache line with previous fields.
ThreadState* current;
+
+ atomic_sint32_t pending_signals;
+
+ VectorClock clock;
+
// This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
// We do not distinguish beteween ignoring reads and writes
// for better performance.
int ignore_reads_and_writes;
- int ignore_sync;
int suppress_reports;
// Go does not support ignores.
#if !SANITIZER_GO
IgnoreSet mop_ignore_set;
IgnoreSet sync_ignore_set;
#endif
- // C/C++ uses fixed size shadow stack embed into Trace.
- // Go uses malloc-allocated shadow stack with dynamic size.
uptr *shadow_stack;
uptr *shadow_stack_end;
- uptr *shadow_stack_pos;
- u64 *racy_shadow_addr;
- u64 racy_state[2];
- MutexSet mset;
- ThreadClock clock;
#if !SANITIZER_GO
Vector<JmpBuf> jmp_bufs;
- int ignore_interceptors;
-#endif
- const u32 tid;
- const int unique_id;
- bool in_symbolizer;
+ int in_symbolizer;
+ atomic_uintptr_t in_blocking_func;
bool in_ignored_lib;
bool is_inited;
+#endif
+ MutexSet mset;
bool is_dead;
- bool is_freeing;
- bool is_vptr_access;
- const uptr stk_addr;
- const uptr stk_size;
- const uptr tls_addr;
- const uptr tls_size;
+ const Tid tid;
+ uptr stk_addr;
+ uptr stk_size;
+ uptr tls_addr;
+ uptr tls_size;
ThreadContext *tctx;
DDLogicalThread *dd_lt;
+ TidSlot *slot;
+ uptr slot_epoch;
+ bool slot_locked;
+
// Current wired Processor, or nullptr. Required to handle any events.
Processor *proc1;
#if !SANITIZER_GO
@@ -425,11 +220,11 @@ struct ThreadState {
#endif
atomic_uintptr_t in_signal_handler;
- ThreadSignalContext *signal_ctx;
+ atomic_uintptr_t signal_ctx;
#if !SANITIZER_GO
- u32 last_sleep_stack_id;
- ThreadClock last_sleep_clock;
+ StackID last_sleep_stack_id;
+ VectorClock last_sleep_clock;
#endif
// Set in regions of runtime that must be signal-safe and fork-safe.
@@ -438,47 +233,43 @@ struct ThreadState {
const ReportDesc *current_report;
- explicit ThreadState(Context *ctx, u32 tid, int unique_id, u64 epoch,
- unsigned reuse_count, uptr stk_addr, uptr stk_size,
- uptr tls_addr, uptr tls_size);
-};
+ explicit ThreadState(Tid tid);
+} ALIGNED(SANITIZER_CACHE_LINE_SIZE);
#if !SANITIZER_GO
-#if SANITIZER_MAC || SANITIZER_ANDROID
+#if SANITIZER_APPLE || SANITIZER_ANDROID
ThreadState *cur_thread();
void set_cur_thread(ThreadState *thr);
void cur_thread_finalize();
-inline void cur_thread_init() { }
-#else
+inline ThreadState *cur_thread_init() { return cur_thread(); }
+# else
__attribute__((tls_model("initial-exec")))
extern THREADLOCAL char cur_thread_placeholder[];
inline ThreadState *cur_thread() {
return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
}
-inline void cur_thread_init() {
+inline ThreadState *cur_thread_init() {
ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
if (UNLIKELY(!thr->current))
thr->current = thr;
+ return thr->current;
}
inline void set_cur_thread(ThreadState *thr) {
reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
}
inline void cur_thread_finalize() { }
-#endif // SANITIZER_MAC || SANITIZER_ANDROID
+# endif // SANITIZER_APPLE || SANITIZER_ANDROID
#endif // SANITIZER_GO
class ThreadContext final : public ThreadContextBase {
public:
- explicit ThreadContext(int tid);
+ explicit ThreadContext(Tid tid);
~ThreadContext();
ThreadState *thr;
- u32 creation_stack_id;
- SyncClock sync;
- // Epoch at which the thread had started.
- // If we see an event from the thread stamped by an older epoch,
- // the event is from a dead thread that shared tid with this thread.
- u64 epoch0;
- u64 epoch1;
+ StackID creation_stack_id;
+ VectorClock *sync;
+ uptr sync_epoch;
+ Trace trace;
// Override superclass callbacks.
void OnDead() override;
@@ -492,13 +283,7 @@ class ThreadContext final : public ThreadContextBase {
struct RacyStacks {
MD5Hash hash[2];
- bool operator==(const RacyStacks &other) const {
- if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
- return true;
- if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
- return true;
- return false;
- }
+ bool operator==(const RacyStacks &other) const;
};
struct RacyAddress {
@@ -524,28 +309,75 @@ struct Context {
Mutex report_mtx;
int nreported;
- int nmissed_expected;
atomic_uint64_t last_symbolize_time_ns;
void *background_thread;
atomic_uint32_t stop_background_thread;
- ThreadRegistry *thread_registry;
+ ThreadRegistry thread_registry;
+
+ // This is used to prevent a very unlikely but very pathological behavior.
+ // Since memory access handling is not synchronized with DoReset,
+ // a thread running concurrently with DoReset can leave a bogus shadow value
+ // that will be later falsely detected as a race. For such false races
+ // RestoreStack will return false and we will not report it.
+ // However, consider that a thread leaves a whole lot of such bogus values
+ // and these values are later read by a whole lot of threads.
+ // This will cause massive amounts of ReportRace calls and lots of
+ // serialization. In very pathological cases the resulting slowdown
+ // can be >100x. This is very unlikely, but it was presumably observed
+ // in practice: https://github.com/google/sanitizers/issues/1552
+ // If this happens, previous access sid+epoch will be the same for all of
+ // these false races b/c if the thread will try to increment epoch, it will
+ // notice that DoReset has happened and will stop producing bogus shadow
+ // values. So, last_spurious_race is used to remember the last sid+epoch
+ // for which RestoreStack returned false. Then it is used to filter out
+ // races with the same sid+epoch very early and quickly.
+ // It is of course possible that multiple threads left multiple bogus shadow
+ // values and all of them are read by lots of threads at the same time.
+ // In such case last_spurious_race will only be able to deduplicate a few
+ // races from one thread, then few from another and so on. An alternative
+ // would be to hold an array of such sid+epoch, but we consider such scenario
+ // as even less likely.
+ // Note: this can lead to some rare false negatives as well:
+ // 1. When a legit access with the same sid+epoch participates in a race
+ // as the "previous" memory access, it will be wrongly filtered out.
+ // 2. When RestoreStack returns false for a legit memory access because it
+ // was already evicted from the thread trace, we will still remember it in
+ // last_spurious_race. Then if there is another racing memory access from
+ // the same thread that happened in the same epoch, but was stored in the
+ // next thread trace part (which is still preserved in the thread trace),
+ // we will also wrongly filter it out while RestoreStack would actually
+ // succeed for that second memory access.
+ RawShadow last_spurious_race;
Mutex racy_mtx;
Vector<RacyStacks> racy_stacks;
- Vector<RacyAddress> racy_addresses;
// Number of fired suppressions may be large enough.
Mutex fired_suppressions_mtx;
InternalMmapVector<FiredSuppression> fired_suppressions;
DDetector *dd;
- ClockAlloc clock_alloc;
-
Flags flags;
-
- u64 int_alloc_cnt[MBlockTypeCount];
- u64 int_alloc_siz[MBlockTypeCount];
+ fd_t memprof_fd;
+
+ // The last slot index (kFreeSid) is used to denote freed memory.
+ TidSlot slots[kThreadSlotCount - 1];
+
+ // Protects global_epoch, slot_queue, trace_part_recycle.
+ Mutex slot_mtx;
+ uptr global_epoch; // guarded by slot_mtx and by all slot mutexes
+ bool resetting; // global reset is in progress
+ IList<TidSlot, &TidSlot::node> slot_queue SANITIZER_GUARDED_BY(slot_mtx);
+ IList<TraceHeader, &TraceHeader::global, TracePart> trace_part_recycle
+ SANITIZER_GUARDED_BY(slot_mtx);
+ uptr trace_part_total_allocated SANITIZER_GUARDED_BY(slot_mtx);
+ uptr trace_part_recycle_finished SANITIZER_GUARDED_BY(slot_mtx);
+ uptr trace_part_finished_excess SANITIZER_GUARDED_BY(slot_mtx);
+#if SANITIZER_GO
+ uptr mapped_shadow_begin;
+ uptr mapped_shadow_end;
+#endif
};
extern Context *ctx; // The one and the only global runtime context.
@@ -574,17 +406,17 @@ uptr TagFromShadowStackFrame(uptr pc);
class ScopedReportBase {
public:
- void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, StackTrace stack,
- const MutexSet *mset);
+ void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, Tid tid,
+ StackTrace stack, const MutexSet *mset);
void AddStack(StackTrace stack, bool suppressable = false);
void AddThread(const ThreadContext *tctx, bool suppressable = false);
- void AddThread(int unique_tid, bool suppressable = false);
- void AddUniqueTid(int unique_tid);
- void AddMutex(const SyncVar *s);
- u64 AddMutex(u64 id);
+ void AddThread(Tid tid, bool suppressable = false);
+ void AddUniqueTid(Tid unique_tid);
+ int AddMutex(uptr addr, StackID creation_stack_id);
void AddLocation(uptr addr, uptr size);
- void AddSleep(u32 stack_id);
+ void AddSleep(StackID stack_id);
void SetCount(int count);
+ void SetSigNum(int sig);
const ReportDesc *GetReport() const;
@@ -598,8 +430,6 @@ class ScopedReportBase {
// at best it will cause deadlocks on internal mutexes.
ScopedIgnoreInterceptors ignore_interceptors_;
- void AddDeadMutex(u64 id);
-
ScopedReportBase(const ScopedReportBase &) = delete;
void operator=(const ScopedReportBase &) = delete;
};
@@ -615,8 +445,6 @@ class ScopedReport : public ScopedReportBase {
bool ShouldReport(ThreadState *thr, ReportType typ);
ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
-void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
- MutexSet *mset, uptr *tag = nullptr);
// The stack could look like:
// <start> | <main> | <foo> | tag | <bar>
@@ -656,19 +484,20 @@ void MapThreadTrace(uptr addr, uptr size, const char *name);
void DontNeedShadowFor(uptr addr, uptr size);
void UnmapShadow(ThreadState *thr, uptr addr, uptr size);
void InitializeShadowMemory();
+void DontDumpShadow(uptr addr, uptr size);
void InitializeInterceptors();
void InitializeLibIgnore();
void InitializeDynamicAnnotations();
void ForkBefore(ThreadState *thr, uptr pc);
void ForkParentAfter(ThreadState *thr, uptr pc);
-void ForkChildAfter(ThreadState *thr, uptr pc);
+void ForkChildAfter(ThreadState *thr, uptr pc, bool start_thread);
-void ReportRace(ThreadState *thr);
+void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
+ AccessType typ);
bool OutputReport(ThreadState *thr, const ScopedReport &srep);
bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
bool IsExpectedReport(uptr addr, uptr size);
-void PrintMatchedBenignRaces();
#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
# define DPrintf Printf
@@ -682,10 +511,11 @@ void PrintMatchedBenignRaces();
# define DPrintf2(...)
#endif
-u32 CurrentStackId(ThreadState *thr, uptr pc);
-ReportStack *SymbolizeStackId(u32 stack_id);
+StackID CurrentStackId(ThreadState *thr, uptr pc);
+ReportStack *SymbolizeStackId(StackID stack_id);
void PrintCurrentStack(ThreadState *thr, uptr pc);
void PrintCurrentStackSlow(uptr pc); // uses libunwind
+MBlock *JavaHeapBlock(uptr addr, uptr *start);
void Initialize(ThreadState *thr);
void MaybeSpawnBackgroundThread();
@@ -694,69 +524,49 @@ int Finalize(ThreadState *thr);
void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
-void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
-void MemoryAccessImpl(ThreadState *thr, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
- u64 *shadow_mem, Shadow cur);
-void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
- uptr size, bool is_write);
-void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
- uptr size, uptr step, bool is_write);
-void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
- int size, bool kAccessIsWrite, bool kIsAtomic);
-
-const int kSizeLog1 = 0;
-const int kSizeLog2 = 1;
-const int kSizeLog4 = 2;
-const int kSizeLog8 = 3;
-
-void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
- uptr addr, int kAccessSizeLog) {
- MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
-}
-
-void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
- uptr addr, int kAccessSizeLog) {
- MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
-}
-
-void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
- uptr addr, int kAccessSizeLog) {
- MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
-}
-
-void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
- uptr addr, int kAccessSizeLog) {
- MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
+void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ AccessType typ);
+void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ AccessType typ);
+// This creates 2 non-inlined specialized versions of MemoryAccessRange.
+template <bool is_read>
+void MemoryAccessRangeT(ThreadState *thr, uptr pc, uptr addr, uptr size);
+
+ALWAYS_INLINE
+void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ bool is_write) {
+ if (size == 0)
+ return;
+ if (is_write)
+ MemoryAccessRangeT<false>(thr, pc, addr, size);
+ else
+ MemoryAccessRangeT<true>(thr, pc, addr, size);
}
-void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
+void ShadowSet(RawShadow *p, RawShadow *end, RawShadow v);
void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
+void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
uptr size);
-void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack = true);
-void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
-void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack = true);
-void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
-
-void FuncEntry(ThreadState *thr, uptr pc);
-void FuncExit(ThreadState *thr);
+void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
+void ThreadIgnoreEnd(ThreadState *thr);
+void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
+void ThreadIgnoreSyncEnd(ThreadState *thr);
-int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
-void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
+Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
+void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
ThreadType thread_type);
void ThreadFinish(ThreadState *thr);
-int ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
-void ThreadJoin(ThreadState *thr, uptr pc, int tid);
-void ThreadDetach(ThreadState *thr, uptr pc, int tid);
+Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
+void ThreadJoin(ThreadState *thr, uptr pc, Tid tid);
+void ThreadDetach(ThreadState *thr, uptr pc, Tid tid);
void ThreadFinalize(ThreadState *thr);
void ThreadSetName(ThreadState *thr, const char *name);
int ThreadCount(ThreadState *thr);
-void ProcessPendingSignals(ThreadState *thr);
-void ThreadNotJoined(ThreadState *thr, uptr pc, int tid, uptr uid);
+void ProcessPendingSignalsImpl(ThreadState *thr);
+void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid);
Processor *ProcCreate();
void ProcDestroy(Processor *proc);
@@ -785,65 +595,12 @@ void Acquire(ThreadState *thr, uptr pc, uptr addr);
// handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
// right before executing finalizers. This provides a coarse, but simple
// approximation of the actual required synchronization.
-void AcquireGlobal(ThreadState *thr, uptr pc);
+void AcquireGlobal(ThreadState *thr);
void Release(ThreadState *thr, uptr pc, uptr addr);
void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr);
void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
void AfterSleep(ThreadState *thr, uptr pc);
-void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
-void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
-void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
-void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
-void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
-
-// The hacky call uses custom calling convention and an assembly thunk.
-// It is considerably faster that a normal call for the caller
-// if it is not executed (it is intended for slow paths from hot functions).
-// The trick is that the call preserves all registers and the compiler
-// does not treat it as a call.
-// If it does not work for you, use normal call.
-#if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC
-// The caller may not create the stack frame for itself at all,
-// so we create a reserve stack frame for it (1024b must be enough).
-#define HACKY_CALL(f) \
- __asm__ __volatile__("sub $1024, %%rsp;" \
- CFI_INL_ADJUST_CFA_OFFSET(1024) \
- ".hidden " #f "_thunk;" \
- "call " #f "_thunk;" \
- "add $1024, %%rsp;" \
- CFI_INL_ADJUST_CFA_OFFSET(-1024) \
- ::: "memory", "cc");
-#else
-#define HACKY_CALL(f) f()
-#endif
-
-void TraceSwitch(ThreadState *thr);
-uptr TraceTopPC(ThreadState *thr);
-uptr TraceSize();
-uptr TraceParts();
-Trace *ThreadTrace(int tid);
-
-extern "C" void __tsan_trace_switch();
-void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
- EventType typ, u64 addr) {
- if (!kCollectHistory)
- return;
- DCHECK_GE((int)typ, 0);
- DCHECK_LE((int)typ, 7);
- DCHECK_EQ(GetLsb(addr, kEventPCBits), addr);
- u64 pos = fs.GetTracePos();
- if (UNLIKELY((pos % kTracePartSize) == 0)) {
-#if !SANITIZER_GO
- HACKY_CALL(__tsan_trace_switch);
-#else
- TraceSwitch(thr);
-#endif
- }
- Event *trace = (Event*)GetThreadTrace(fs.tid());
- Event *evp = &trace[pos];
- Event ev = (u64)addr | ((u64)typ << kEventPCBits);
- *evp = ev;
-}
+void IncrementEpoch(ThreadState *thr);
#if !SANITIZER_GO
uptr ALWAYS_INLINE HeapEnd() {
@@ -851,6 +608,13 @@ uptr ALWAYS_INLINE HeapEnd() {
}
#endif
+void SlotAttachAndLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
+void SlotDetach(ThreadState *thr);
+void SlotLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
+void SlotUnlock(ThreadState *thr) SANITIZER_RELEASE(thr->slot->mtx);
+void DoReset(ThreadState *thr, uptr epoch);
+void FlushShadowMemory();
+
ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags);
void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber);
void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags);
@@ -861,6 +625,189 @@ enum FiberSwitchFlags {
FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync
};
+class SlotLocker {
+ public:
+ ALWAYS_INLINE
+ SlotLocker(ThreadState *thr, bool recursive = false)
+ : thr_(thr), locked_(recursive ? thr->slot_locked : false) {
+#if !SANITIZER_GO
+ // We are in trouble if we are here with in_blocking_func set.
+ // If in_blocking_func is set, all signals will be delivered synchronously,
+ // which means we can't lock slots since the signal handler will try
+ // to lock it recursively and deadlock.
+ DCHECK(!atomic_load(&thr->in_blocking_func, memory_order_relaxed));
+#endif
+ if (!locked_)
+ SlotLock(thr_);
+ }
+
+ ALWAYS_INLINE
+ ~SlotLocker() {
+ if (!locked_)
+ SlotUnlock(thr_);
+ }
+
+ private:
+ ThreadState *thr_;
+ bool locked_;
+};
+
+class SlotUnlocker {
+ public:
+ SlotUnlocker(ThreadState *thr) : thr_(thr), locked_(thr->slot_locked) {
+ if (locked_)
+ SlotUnlock(thr_);
+ }
+
+ ~SlotUnlocker() {
+ if (locked_)
+ SlotLock(thr_);
+ }
+
+ private:
+ ThreadState *thr_;
+ bool locked_;
+};
+
+ALWAYS_INLINE void ProcessPendingSignals(ThreadState *thr) {
+ if (UNLIKELY(atomic_load_relaxed(&thr->pending_signals)))
+ ProcessPendingSignalsImpl(thr);
+}
+
+extern bool is_initialized;
+
+ALWAYS_INLINE
+void LazyInitialize(ThreadState *thr) {
+ // If we can use .preinit_array, assume that __tsan_init
+ // called from .preinit_array initializes runtime before
+ // any instrumented code except when tsan is used as a
+ // shared library.
+#if (!SANITIZER_CAN_USE_PREINIT_ARRAY || defined(SANITIZER_SHARED))
+ if (UNLIKELY(!is_initialized))
+ Initialize(thr);
+#endif
+}
+
+void TraceResetForTesting();
+void TraceSwitchPart(ThreadState *thr);
+void TraceSwitchPartImpl(ThreadState *thr);
+bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size,
+ AccessType typ, Tid *ptid, VarSizeStackTrace *pstk,
+ MutexSet *pmset, uptr *ptag);
+
+template <typename EventT>
+ALWAYS_INLINE WARN_UNUSED_RESULT bool TraceAcquire(ThreadState *thr,
+ EventT **ev) {
+ // TraceSwitchPart accesses shadow_stack, but it's called infrequently,
+ // so we check it here proactively.
+ DCHECK(thr->shadow_stack);
+ Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
+#if SANITIZER_DEBUG
+ // TraceSwitch acquires these mutexes,
+ // so we lock them here to detect deadlocks more reliably.
+ { Lock lock(&ctx->slot_mtx); }
+ { Lock lock(&thr->tctx->trace.mtx); }
+ TracePart *current = thr->tctx->trace.parts.Back();
+ if (current) {
+ DCHECK_GE(pos, ¤t->events[0]);
+ DCHECK_LE(pos, ¤t->events[TracePart::kSize]);
+ } else {
+ DCHECK_EQ(pos, nullptr);
+ }
+#endif
+ // TracePart is allocated with mmap and is at least 4K aligned.
+ // So the following check is a faster way to check for part end.
+ // It may have false positives in the middle of the trace,
+ // they are filtered out in TraceSwitch.
+ if (UNLIKELY(((uptr)(pos + 1) & TracePart::kAlignment) == 0))
+ return false;
+ *ev = reinterpret_cast<EventT *>(pos);
+ return true;
+}
+
+template <typename EventT>
+ALWAYS_INLINE void TraceRelease(ThreadState *thr, EventT *evp) {
+ DCHECK_LE(evp + 1, &thr->tctx->trace.parts.Back()->events[TracePart::kSize]);
+ atomic_store_relaxed(&thr->trace_pos, (uptr)(evp + 1));
+}
+
+template <typename EventT>
+void TraceEvent(ThreadState *thr, EventT ev) {
+ EventT *evp;
+ if (!TraceAcquire(thr, &evp)) {
+ TraceSwitchPart(thr);
+ UNUSED bool res = TraceAcquire(thr, &evp);
+ DCHECK(res);
+ }
+ *evp = ev;
+ TraceRelease(thr, evp);
+}
+
+ALWAYS_INLINE WARN_UNUSED_RESULT bool TryTraceFunc(ThreadState *thr,
+ uptr pc = 0) {
+ if (!kCollectHistory)
+ return true;
+ EventFunc *ev;
+ if (UNLIKELY(!TraceAcquire(thr, &ev)))
+ return false;
+ ev->is_access = 0;
+ ev->is_func = 1;
+ ev->pc = pc;
+ TraceRelease(thr, ev);
+ return true;
+}
+
+WARN_UNUSED_RESULT
+bool TryTraceMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ AccessType typ);
+WARN_UNUSED_RESULT
+bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ AccessType typ);
+void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ AccessType typ);
+void TraceFunc(ThreadState *thr, uptr pc = 0);
+void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
+ StackID stk);
+void TraceMutexUnlock(ThreadState *thr, uptr addr);
+void TraceTime(ThreadState *thr);
+
+void TraceRestartFuncExit(ThreadState *thr);
+void TraceRestartFuncEntry(ThreadState *thr, uptr pc);
+
+void GrowShadowStack(ThreadState *thr);
+
+ALWAYS_INLINE
+void FuncEntry(ThreadState *thr, uptr pc) {
+ DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.sid(), (void *)pc);
+ if (UNLIKELY(!TryTraceFunc(thr, pc)))
+ return TraceRestartFuncEntry(thr, pc);
+ DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
+#if !SANITIZER_GO
+ DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
+#else
+ if (thr->shadow_stack_pos == thr->shadow_stack_end)
+ GrowShadowStack(thr);
+#endif
+ thr->shadow_stack_pos[0] = pc;
+ thr->shadow_stack_pos++;
+}
+
+ALWAYS_INLINE
+void FuncExit(ThreadState *thr) {
+ DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.sid());
+ if (UNLIKELY(!TryTraceFunc(thr, 0)))
+ return TraceRestartFuncExit(thr);
+ DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
+#if !SANITIZER_GO
+ DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
+#endif
+ thr->shadow_stack_pos--;
+}
+
+#if !SANITIZER_GO
+extern void (*on_initialize)(void);
+extern int (*on_finalize)(int);
+#endif
} // namespace __tsan
#endif // TSAN_RTL_H
lib/tsan/tsan_rtl_aarch64.S
@@ -3,28 +3,6 @@
#include "sanitizer_common/sanitizer_asm.h"
-#if defined(__APPLE__)
-.align 2
-
-.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
-.long _setjmp$non_lazy_ptr
-_setjmp$non_lazy_ptr:
-.indirect_symbol _setjmp
-.long 0
-
-.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
-.long __setjmp$non_lazy_ptr
-__setjmp$non_lazy_ptr:
-.indirect_symbol __setjmp
-.long 0
-
-.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
-.long _sigsetjmp$non_lazy_ptr
-_sigsetjmp$non_lazy_ptr:
-.indirect_symbol _sigsetjmp
-.long 0
-#endif
-
#if !defined(__APPLE__)
.section .text
#else
@@ -75,9 +53,8 @@ ASM_SYMBOL_INTERCEPTOR(setjmp):
ldr x1, [x1, #:got_lo12:_ZN14__interception11real_setjmpE]
ldr x1, [x1]
#else
- adrp x1, _setjmp$non_lazy_ptr@page
- add x1, x1, _setjmp$non_lazy_ptr@pageoff
- ldr x1, [x1]
+ adrp x1, _setjmp@GOTPAGE
+ ldr x1, [x1, _setjmp@GOTPAGEOFF]
#endif
br x1
@@ -126,9 +103,8 @@ ASM_SYMBOL_INTERCEPTOR(_setjmp):
ldr x1, [x1, #:got_lo12:_ZN14__interception12real__setjmpE]
ldr x1, [x1]
#else
- adrp x1, __setjmp$non_lazy_ptr@page
- add x1, x1, __setjmp$non_lazy_ptr@pageoff
- ldr x1, [x1]
+ adrp x1, __setjmp@GOTPAGE
+ ldr x1, [x1, __setjmp@GOTPAGEOFF]
#endif
br x1
@@ -179,9 +155,8 @@ ASM_SYMBOL_INTERCEPTOR(sigsetjmp):
ldr x2, [x2, #:got_lo12:_ZN14__interception14real_sigsetjmpE]
ldr x2, [x2]
#else
- adrp x2, _sigsetjmp$non_lazy_ptr@page
- add x2, x2, _sigsetjmp$non_lazy_ptr@pageoff
- ldr x2, [x2]
+ adrp x2, _sigsetjmp@GOTPAGE
+ ldr x2, [x2, _sigsetjmp@GOTPAGEOFF]
#endif
br x2
CFI_ENDPROC
lib/tsan/tsan_rtl_access.cpp
@@ -0,0 +1,744 @@
+//===-- tsan_rtl_access.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Definitions of memory access and function entry/exit entry points.
+//===----------------------------------------------------------------------===//
+
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+ALWAYS_INLINE USED bool TryTraceMemoryAccess(ThreadState* thr, uptr pc,
+ uptr addr, uptr size,
+ AccessType typ) {
+ DCHECK(size == 1 || size == 2 || size == 4 || size == 8);
+ if (!kCollectHistory)
+ return true;
+ EventAccess* ev;
+ if (UNLIKELY(!TraceAcquire(thr, &ev)))
+ return false;
+ u64 size_log = size == 1 ? 0 : size == 2 ? 1 : size == 4 ? 2 : 3;
+ uptr pc_delta = pc - thr->trace_prev_pc + (1 << (EventAccess::kPCBits - 1));
+ thr->trace_prev_pc = pc;
+ if (LIKELY(pc_delta < (1 << EventAccess::kPCBits))) {
+ ev->is_access = 1;
+ ev->is_read = !!(typ & kAccessRead);
+ ev->is_atomic = !!(typ & kAccessAtomic);
+ ev->size_log = size_log;
+ ev->pc_delta = pc_delta;
+ DCHECK_EQ(ev->pc_delta, pc_delta);
+ ev->addr = CompressAddr(addr);
+ TraceRelease(thr, ev);
+ return true;
+ }
+ auto* evex = reinterpret_cast<EventAccessExt*>(ev);
+ evex->is_access = 0;
+ evex->is_func = 0;
+ evex->type = EventType::kAccessExt;
+ evex->is_read = !!(typ & kAccessRead);
+ evex->is_atomic = !!(typ & kAccessAtomic);
+ evex->size_log = size_log;
+ // Note: this is important, see comment in EventAccessExt.
+ evex->_ = 0;
+ evex->addr = CompressAddr(addr);
+ evex->pc = pc;
+ TraceRelease(thr, evex);
+ return true;
+}
+
+ALWAYS_INLINE
+bool TryTraceMemoryAccessRange(ThreadState* thr, uptr pc, uptr addr, uptr size,
+ AccessType typ) {
+ if (!kCollectHistory)
+ return true;
+ EventAccessRange* ev;
+ if (UNLIKELY(!TraceAcquire(thr, &ev)))
+ return false;
+ thr->trace_prev_pc = pc;
+ ev->is_access = 0;
+ ev->is_func = 0;
+ ev->type = EventType::kAccessRange;
+ ev->is_read = !!(typ & kAccessRead);
+ ev->is_free = !!(typ & kAccessFree);
+ ev->size_lo = size;
+ ev->pc = CompressAddr(pc);
+ ev->addr = CompressAddr(addr);
+ ev->size_hi = size >> EventAccessRange::kSizeLoBits;
+ TraceRelease(thr, ev);
+ return true;
+}
+
+void TraceMemoryAccessRange(ThreadState* thr, uptr pc, uptr addr, uptr size,
+ AccessType typ) {
+ if (LIKELY(TryTraceMemoryAccessRange(thr, pc, addr, size, typ)))
+ return;
+ TraceSwitchPart(thr);
+ UNUSED bool res = TryTraceMemoryAccessRange(thr, pc, addr, size, typ);
+ DCHECK(res);
+}
+
+void TraceFunc(ThreadState* thr, uptr pc) {
+ if (LIKELY(TryTraceFunc(thr, pc)))
+ return;
+ TraceSwitchPart(thr);
+ UNUSED bool res = TryTraceFunc(thr, pc);
+ DCHECK(res);
+}
+
+NOINLINE void TraceRestartFuncEntry(ThreadState* thr, uptr pc) {
+ TraceSwitchPart(thr);
+ FuncEntry(thr, pc);
+}
+
+NOINLINE void TraceRestartFuncExit(ThreadState* thr) {
+ TraceSwitchPart(thr);
+ FuncExit(thr);
+}
+
+void TraceMutexLock(ThreadState* thr, EventType type, uptr pc, uptr addr,
+ StackID stk) {
+ DCHECK(type == EventType::kLock || type == EventType::kRLock);
+ if (!kCollectHistory)
+ return;
+ EventLock ev;
+ ev.is_access = 0;
+ ev.is_func = 0;
+ ev.type = type;
+ ev.pc = CompressAddr(pc);
+ ev.stack_lo = stk;
+ ev.stack_hi = stk >> EventLock::kStackIDLoBits;
+ ev._ = 0;
+ ev.addr = CompressAddr(addr);
+ TraceEvent(thr, ev);
+}
+
+void TraceMutexUnlock(ThreadState* thr, uptr addr) {
+ if (!kCollectHistory)
+ return;
+ EventUnlock ev;
+ ev.is_access = 0;
+ ev.is_func = 0;
+ ev.type = EventType::kUnlock;
+ ev._ = 0;
+ ev.addr = CompressAddr(addr);
+ TraceEvent(thr, ev);
+}
+
+void TraceTime(ThreadState* thr) {
+ if (!kCollectHistory)
+ return;
+ FastState fast_state = thr->fast_state;
+ EventTime ev;
+ ev.is_access = 0;
+ ev.is_func = 0;
+ ev.type = EventType::kTime;
+ ev.sid = static_cast<u64>(fast_state.sid());
+ ev.epoch = static_cast<u64>(fast_state.epoch());
+ ev._ = 0;
+ TraceEvent(thr, ev);
+}
+
+NOINLINE void DoReportRace(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
+ Shadow old,
+ AccessType typ) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ // For the free shadow markers the first element (that contains kFreeSid)
+ // triggers the race, but the second element contains info about the freeing
+ // thread, take it.
+ if (old.sid() == kFreeSid)
+ old = Shadow(LoadShadow(&shadow_mem[1]));
+ // This prevents trapping on this address in future.
+ for (uptr i = 0; i < kShadowCnt; i++)
+ StoreShadow(&shadow_mem[i], i == 0 ? Shadow::kRodata : Shadow::kEmpty);
+ // See the comment in MemoryRangeFreed as to why the slot is locked
+ // for free memory accesses. ReportRace must not be called with
+ // the slot locked because of the fork. But MemoryRangeFreed is not
+ // called during fork because fork sets ignore_reads_and_writes,
+ // so simply unlocking the slot should be fine.
+ if (typ & kAccessSlotLocked)
+ SlotUnlock(thr);
+ ReportRace(thr, shadow_mem, cur, Shadow(old), typ);
+ if (typ & kAccessSlotLocked)
+ SlotLock(thr);
+}
+
+#if !TSAN_VECTORIZE
+ALWAYS_INLINE
+bool ContainsSameAccess(RawShadow* s, Shadow cur, int unused0, int unused1,
+ AccessType typ) {
+ for (uptr i = 0; i < kShadowCnt; i++) {
+ auto old = LoadShadow(&s[i]);
+ if (!(typ & kAccessRead)) {
+ if (old == cur.raw())
+ return true;
+ continue;
+ }
+ auto masked = static_cast<RawShadow>(static_cast<u32>(old) |
+ static_cast<u32>(Shadow::kRodata));
+ if (masked == cur.raw())
+ return true;
+ if (!(typ & kAccessNoRodata) && !SANITIZER_GO) {
+ if (old == Shadow::kRodata)
+ return true;
+ }
+ }
+ return false;
+}
+
+ALWAYS_INLINE
+bool CheckRaces(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
+ int unused0, int unused1, AccessType typ) {
+ bool stored = false;
+ for (uptr idx = 0; idx < kShadowCnt; idx++) {
+ RawShadow* sp = &shadow_mem[idx];
+ Shadow old(LoadShadow(sp));
+ if (LIKELY(old.raw() == Shadow::kEmpty)) {
+ if (!(typ & kAccessCheckOnly) && !stored)
+ StoreShadow(sp, cur.raw());
+ return false;
+ }
+ if (LIKELY(!(cur.access() & old.access())))
+ continue;
+ if (LIKELY(cur.sid() == old.sid())) {
+ if (!(typ & kAccessCheckOnly) &&
+ LIKELY(cur.access() == old.access() && old.IsRWWeakerOrEqual(typ))) {
+ StoreShadow(sp, cur.raw());
+ stored = true;
+ }
+ continue;
+ }
+ if (LIKELY(old.IsBothReadsOrAtomic(typ)))
+ continue;
+ if (LIKELY(thr->clock.Get(old.sid()) >= old.epoch()))
+ continue;
+ DoReportRace(thr, shadow_mem, cur, old, typ);
+ return true;
+ }
+ // We did not find any races and had already stored
+ // the current access info, so we are done.
+ if (LIKELY(stored))
+ return false;
+ // Choose a random candidate slot and replace it.
+ uptr index =
+ atomic_load_relaxed(&thr->trace_pos) / sizeof(Event) % kShadowCnt;
+ StoreShadow(&shadow_mem[index], cur.raw());
+ return false;
+}
+
+# define LOAD_CURRENT_SHADOW(cur, shadow_mem) UNUSED int access = 0, shadow = 0
+
+#else /* !TSAN_VECTORIZE */
+
+ALWAYS_INLINE
+bool ContainsSameAccess(RawShadow* unused0, Shadow unused1, m128 shadow,
+ m128 access, AccessType typ) {
+ // Note: we could check if there is a larger access of the same type,
+ // e.g. we just allocated/memset-ed a block (so it contains 8 byte writes)
+ // and now do smaller reads/writes, these can also be considered as "same
+ // access". However, it will make the check more expensive, so it's unclear
+ // if it's worth it. But this would conserve trace space, so it's useful
+ // besides potential speed up.
+ if (!(typ & kAccessRead)) {
+ const m128 same = _mm_cmpeq_epi32(shadow, access);
+ return _mm_movemask_epi8(same);
+ }
+ // For reads we need to reset read bit in the shadow,
+ // because we need to match read with both reads and writes.
+ // Shadow::kRodata has only read bit set, so it does what we want.
+ // We also abuse it for rodata check to save few cycles
+ // since we already loaded Shadow::kRodata into a register.
+ // Reads from rodata can't race.
+ // Measurements show that they can be 10-20% of all memory accesses.
+ // Shadow::kRodata has epoch 0 which cannot appear in shadow normally
+ // (thread epochs start from 1). So the same read bit mask
+ // serves as rodata indicator.
+ const m128 read_mask = _mm_set1_epi32(static_cast<u32>(Shadow::kRodata));
+ const m128 masked_shadow = _mm_or_si128(shadow, read_mask);
+ m128 same = _mm_cmpeq_epi32(masked_shadow, access);
+ // Range memory accesses check Shadow::kRodata before calling this,
+ // Shadow::kRodatas is not possible for free memory access
+ // and Go does not use Shadow::kRodata.
+ if (!(typ & kAccessNoRodata) && !SANITIZER_GO) {
+ const m128 ro = _mm_cmpeq_epi32(shadow, read_mask);
+ same = _mm_or_si128(ro, same);
+ }
+ return _mm_movemask_epi8(same);
+}
+
+NOINLINE void DoReportRaceV(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
+ u32 race_mask, m128 shadow, AccessType typ) {
+ // race_mask points which of the shadow elements raced with the current
+ // access. Extract that element.
+ CHECK_NE(race_mask, 0);
+ u32 old;
+ // Note: _mm_extract_epi32 index must be a constant value.
+ switch (__builtin_ffs(race_mask) / 4) {
+ case 0:
+ old = _mm_extract_epi32(shadow, 0);
+ break;
+ case 1:
+ old = _mm_extract_epi32(shadow, 1);
+ break;
+ case 2:
+ old = _mm_extract_epi32(shadow, 2);
+ break;
+ case 3:
+ old = _mm_extract_epi32(shadow, 3);
+ break;
+ }
+ Shadow prev(static_cast<RawShadow>(old));
+ // For the free shadow markers the first element (that contains kFreeSid)
+ // triggers the race, but the second element contains info about the freeing
+ // thread, take it.
+ if (prev.sid() == kFreeSid)
+ prev = Shadow(static_cast<RawShadow>(_mm_extract_epi32(shadow, 1)));
+ DoReportRace(thr, shadow_mem, cur, prev, typ);
+}
+
+ALWAYS_INLINE
+bool CheckRaces(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
+ m128 shadow, m128 access, AccessType typ) {
+ // Note: empty/zero slots don't intersect with any access.
+ const m128 zero = _mm_setzero_si128();
+ const m128 mask_access = _mm_set1_epi32(0x000000ff);
+ const m128 mask_sid = _mm_set1_epi32(0x0000ff00);
+ const m128 mask_read_atomic = _mm_set1_epi32(0xc0000000);
+ const m128 access_and = _mm_and_si128(access, shadow);
+ const m128 access_xor = _mm_xor_si128(access, shadow);
+ const m128 intersect = _mm_and_si128(access_and, mask_access);
+ const m128 not_intersect = _mm_cmpeq_epi32(intersect, zero);
+ const m128 not_same_sid = _mm_and_si128(access_xor, mask_sid);
+ const m128 same_sid = _mm_cmpeq_epi32(not_same_sid, zero);
+ const m128 both_read_or_atomic = _mm_and_si128(access_and, mask_read_atomic);
+ const m128 no_race =
+ _mm_or_si128(_mm_or_si128(not_intersect, same_sid), both_read_or_atomic);
+ const int race_mask = _mm_movemask_epi8(_mm_cmpeq_epi32(no_race, zero));
+ if (UNLIKELY(race_mask))
+ goto SHARED;
+
+STORE : {
+ if (typ & kAccessCheckOnly)
+ return false;
+ // We could also replace different sid's if access is the same,
+ // rw weaker and happens before. However, just checking access below
+ // is not enough because we also need to check that !both_read_or_atomic
+ // (reads from different sids can be concurrent).
+ // Theoretically we could replace smaller accesses with larger accesses,
+ // but it's unclear if it's worth doing.
+ const m128 mask_access_sid = _mm_set1_epi32(0x0000ffff);
+ const m128 not_same_sid_access = _mm_and_si128(access_xor, mask_access_sid);
+ const m128 same_sid_access = _mm_cmpeq_epi32(not_same_sid_access, zero);
+ const m128 access_read_atomic =
+ _mm_set1_epi32((typ & (kAccessRead | kAccessAtomic)) << 30);
+ const m128 rw_weaker =
+ _mm_cmpeq_epi32(_mm_max_epu32(shadow, access_read_atomic), shadow);
+ const m128 rewrite = _mm_and_si128(same_sid_access, rw_weaker);
+ const int rewrite_mask = _mm_movemask_epi8(rewrite);
+ int index = __builtin_ffs(rewrite_mask);
+ if (UNLIKELY(index == 0)) {
+ const m128 empty = _mm_cmpeq_epi32(shadow, zero);
+ const int empty_mask = _mm_movemask_epi8(empty);
+ index = __builtin_ffs(empty_mask);
+ if (UNLIKELY(index == 0))
+ index = (atomic_load_relaxed(&thr->trace_pos) / 2) % 16;
+ }
+ StoreShadow(&shadow_mem[index / 4], cur.raw());
+ // We could zero other slots determined by rewrite_mask.
+ // That would help other threads to evict better slots,
+ // but it's unclear if it's worth it.
+ return false;
+}
+
+SHARED:
+ m128 thread_epochs = _mm_set1_epi32(0x7fffffff);
+ // Need to unwind this because _mm_extract_epi8/_mm_insert_epi32
+ // indexes must be constants.
+# define LOAD_EPOCH(idx) \
+ if (LIKELY(race_mask & (1 << (idx * 4)))) { \
+ u8 sid = _mm_extract_epi8(shadow, idx * 4 + 1); \
+ u16 epoch = static_cast<u16>(thr->clock.Get(static_cast<Sid>(sid))); \
+ thread_epochs = _mm_insert_epi32(thread_epochs, u32(epoch) << 16, idx); \
+ }
+ LOAD_EPOCH(0);
+ LOAD_EPOCH(1);
+ LOAD_EPOCH(2);
+ LOAD_EPOCH(3);
+# undef LOAD_EPOCH
+ const m128 mask_epoch = _mm_set1_epi32(0x3fff0000);
+ const m128 shadow_epochs = _mm_and_si128(shadow, mask_epoch);
+ const m128 concurrent = _mm_cmplt_epi32(thread_epochs, shadow_epochs);
+ const int concurrent_mask = _mm_movemask_epi8(concurrent);
+ if (LIKELY(concurrent_mask == 0))
+ goto STORE;
+
+ DoReportRaceV(thr, shadow_mem, cur, concurrent_mask, shadow, typ);
+ return true;
+}
+
+# define LOAD_CURRENT_SHADOW(cur, shadow_mem) \
+ const m128 access = _mm_set1_epi32(static_cast<u32>((cur).raw())); \
+ const m128 shadow = _mm_load_si128(reinterpret_cast<m128*>(shadow_mem))
+#endif
+
+char* DumpShadow(char* buf, RawShadow raw) {
+ if (raw == Shadow::kEmpty) {
+ internal_snprintf(buf, 64, "0");
+ return buf;
+ }
+ Shadow s(raw);
+ AccessType typ;
+ s.GetAccess(nullptr, nullptr, &typ);
+ internal_snprintf(buf, 64, "{tid=%u@%u access=0x%x typ=%x}",
+ static_cast<u32>(s.sid()), static_cast<u32>(s.epoch()),
+ s.access(), static_cast<u32>(typ));
+ return buf;
+}
+
+// TryTrace* and TraceRestart* functions allow to turn memory access and func
+// entry/exit callbacks into leaf functions with all associated performance
+// benefits. These hottest callbacks do only 2 slow path calls: report a race
+// and trace part switching. Race reporting is easy to turn into a tail call, we
+// just always return from the runtime after reporting a race. But trace part
+// switching is harder because it needs to be in the middle of callbacks. To
+// turn it into a tail call we immidiately return after TraceRestart* functions,
+// but TraceRestart* functions themselves recurse into the callback after
+// switching trace part. As the result the hottest callbacks contain only tail
+// calls, which effectively makes them leaf functions (can use all registers,
+// no frame setup, etc).
+NOINLINE void TraceRestartMemoryAccess(ThreadState* thr, uptr pc, uptr addr,
+ uptr size, AccessType typ) {
+ TraceSwitchPart(thr);
+ MemoryAccess(thr, pc, addr, size, typ);
+}
+
+ALWAYS_INLINE USED void MemoryAccess(ThreadState* thr, uptr pc, uptr addr,
+ uptr size, AccessType typ) {
+ RawShadow* shadow_mem = MemToShadow(addr);
+ UNUSED char memBuf[4][64];
+ DPrintf2("#%d: Access: %d@%d %p/%zd typ=0x%x {%s, %s, %s, %s}\n", thr->tid,
+ static_cast<int>(thr->fast_state.sid()),
+ static_cast<int>(thr->fast_state.epoch()), (void*)addr, size,
+ static_cast<int>(typ), DumpShadow(memBuf[0], shadow_mem[0]),
+ DumpShadow(memBuf[1], shadow_mem[1]),
+ DumpShadow(memBuf[2], shadow_mem[2]),
+ DumpShadow(memBuf[3], shadow_mem[3]));
+
+ FastState fast_state = thr->fast_state;
+ Shadow cur(fast_state, addr, size, typ);
+
+ LOAD_CURRENT_SHADOW(cur, shadow_mem);
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur, shadow, access, typ)))
+ return;
+ if (UNLIKELY(fast_state.GetIgnoreBit()))
+ return;
+ if (!TryTraceMemoryAccess(thr, pc, addr, size, typ))
+ return TraceRestartMemoryAccess(thr, pc, addr, size, typ);
+ CheckRaces(thr, shadow_mem, cur, shadow, access, typ);
+}
+
+void MemoryAccess16(ThreadState* thr, uptr pc, uptr addr, AccessType typ);
+
+NOINLINE
+void RestartMemoryAccess16(ThreadState* thr, uptr pc, uptr addr,
+ AccessType typ) {
+ TraceSwitchPart(thr);
+ MemoryAccess16(thr, pc, addr, typ);
+}
+
+ALWAYS_INLINE USED void MemoryAccess16(ThreadState* thr, uptr pc, uptr addr,
+ AccessType typ) {
+ const uptr size = 16;
+ FastState fast_state = thr->fast_state;
+ if (UNLIKELY(fast_state.GetIgnoreBit()))
+ return;
+ Shadow cur(fast_state, 0, 8, typ);
+ RawShadow* shadow_mem = MemToShadow(addr);
+ bool traced = false;
+ {
+ LOAD_CURRENT_SHADOW(cur, shadow_mem);
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur, shadow, access, typ)))
+ goto SECOND;
+ if (!TryTraceMemoryAccessRange(thr, pc, addr, size, typ))
+ return RestartMemoryAccess16(thr, pc, addr, typ);
+ traced = true;
+ if (UNLIKELY(CheckRaces(thr, shadow_mem, cur, shadow, access, typ)))
+ return;
+ }
+SECOND:
+ shadow_mem += kShadowCnt;
+ LOAD_CURRENT_SHADOW(cur, shadow_mem);
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur, shadow, access, typ)))
+ return;
+ if (!traced && !TryTraceMemoryAccessRange(thr, pc, addr, size, typ))
+ return RestartMemoryAccess16(thr, pc, addr, typ);
+ CheckRaces(thr, shadow_mem, cur, shadow, access, typ);
+}
+
+NOINLINE
+void RestartUnalignedMemoryAccess(ThreadState* thr, uptr pc, uptr addr,
+ uptr size, AccessType typ) {
+ TraceSwitchPart(thr);
+ UnalignedMemoryAccess(thr, pc, addr, size, typ);
+}
+
+ALWAYS_INLINE USED void UnalignedMemoryAccess(ThreadState* thr, uptr pc,
+ uptr addr, uptr size,
+ AccessType typ) {
+ DCHECK_LE(size, 8);
+ FastState fast_state = thr->fast_state;
+ if (UNLIKELY(fast_state.GetIgnoreBit()))
+ return;
+ RawShadow* shadow_mem = MemToShadow(addr);
+ bool traced = false;
+ uptr size1 = Min<uptr>(size, RoundUp(addr + 1, kShadowCell) - addr);
+ {
+ Shadow cur(fast_state, addr, size1, typ);
+ LOAD_CURRENT_SHADOW(cur, shadow_mem);
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur, shadow, access, typ)))
+ goto SECOND;
+ if (!TryTraceMemoryAccessRange(thr, pc, addr, size, typ))
+ return RestartUnalignedMemoryAccess(thr, pc, addr, size, typ);
+ traced = true;
+ if (UNLIKELY(CheckRaces(thr, shadow_mem, cur, shadow, access, typ)))
+ return;
+ }
+SECOND:
+ uptr size2 = size - size1;
+ if (LIKELY(size2 == 0))
+ return;
+ shadow_mem += kShadowCnt;
+ Shadow cur(fast_state, 0, size2, typ);
+ LOAD_CURRENT_SHADOW(cur, shadow_mem);
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur, shadow, access, typ)))
+ return;
+ if (!traced && !TryTraceMemoryAccessRange(thr, pc, addr, size, typ))
+ return RestartUnalignedMemoryAccess(thr, pc, addr, size, typ);
+ CheckRaces(thr, shadow_mem, cur, shadow, access, typ);
+}
+
+void ShadowSet(RawShadow* p, RawShadow* end, RawShadow v) {
+ DCHECK_LE(p, end);
+ DCHECK(IsShadowMem(p));
+ DCHECK(IsShadowMem(end));
+ UNUSED const uptr kAlign = kShadowCnt * kShadowSize;
+ DCHECK_EQ(reinterpret_cast<uptr>(p) % kAlign, 0);
+ DCHECK_EQ(reinterpret_cast<uptr>(end) % kAlign, 0);
+#if !TSAN_VECTORIZE
+ for (; p < end; p += kShadowCnt) {
+ p[0] = v;
+ for (uptr i = 1; i < kShadowCnt; i++) p[i] = Shadow::kEmpty;
+ }
+#else
+ m128 vv = _mm_setr_epi32(
+ static_cast<u32>(v), static_cast<u32>(Shadow::kEmpty),
+ static_cast<u32>(Shadow::kEmpty), static_cast<u32>(Shadow::kEmpty));
+ m128* vp = reinterpret_cast<m128*>(p);
+ m128* vend = reinterpret_cast<m128*>(end);
+ for (; vp < vend; vp++) _mm_store_si128(vp, vv);
+#endif
+}
+
+static void MemoryRangeSet(uptr addr, uptr size, RawShadow val) {
+ if (size == 0)
+ return;
+ DCHECK_EQ(addr % kShadowCell, 0);
+ DCHECK_EQ(size % kShadowCell, 0);
+ // If a user passes some insane arguments (memset(0)),
+ // let it just crash as usual.
+ if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
+ return;
+ RawShadow* begin = MemToShadow(addr);
+ RawShadow* end = begin + size / kShadowCell * kShadowCnt;
+ // Don't want to touch lots of shadow memory.
+ // If a program maps 10MB stack, there is no need reset the whole range.
+ // UnmapOrDie/MmapFixedNoReserve does not work on Windows.
+ if (SANITIZER_WINDOWS ||
+ size <= common_flags()->clear_shadow_mmap_threshold) {
+ ShadowSet(begin, end, val);
+ return;
+ }
+ // The region is big, reset only beginning and end.
+ const uptr kPageSize = GetPageSizeCached();
+ // Set at least first kPageSize/2 to page boundary.
+ RawShadow* mid1 =
+ Min(end, reinterpret_cast<RawShadow*>(RoundUp(
+ reinterpret_cast<uptr>(begin) + kPageSize / 2, kPageSize)));
+ ShadowSet(begin, mid1, val);
+ // Reset middle part.
+ RawShadow* mid2 = RoundDown(end, kPageSize);
+ if (mid2 > mid1) {
+ if (!MmapFixedSuperNoReserve((uptr)mid1, (uptr)mid2 - (uptr)mid1))
+ Die();
+ }
+ // Set the ending.
+ ShadowSet(mid2, end, val);
+}
+
+void MemoryResetRange(ThreadState* thr, uptr pc, uptr addr, uptr size) {
+ uptr addr1 = RoundDown(addr, kShadowCell);
+ uptr size1 = RoundUp(size + addr - addr1, kShadowCell);
+ MemoryRangeSet(addr1, size1, Shadow::kEmpty);
+}
+
+void MemoryRangeFreed(ThreadState* thr, uptr pc, uptr addr, uptr size) {
+ // Callers must lock the slot to ensure synchronization with the reset.
+ // The problem with "freed" memory is that it's not "monotonic"
+ // with respect to bug detection: freed memory is bad to access,
+ // but then if the heap block is reallocated later, it's good to access.
+ // As the result a garbage "freed" shadow can lead to a false positive
+ // if it happens to match a real free in the thread trace,
+ // but the heap block was reallocated before the current memory access,
+ // so it's still good to access. It's not the case with data races.
+ DCHECK(thr->slot_locked);
+ DCHECK_EQ(addr % kShadowCell, 0);
+ size = RoundUp(size, kShadowCell);
+ // Processing more than 1k (2k of shadow) is expensive,
+ // can cause excessive memory consumption (user does not necessary touch
+ // the whole range) and most likely unnecessary.
+ size = Min<uptr>(size, 1024);
+ const AccessType typ = kAccessWrite | kAccessFree | kAccessSlotLocked |
+ kAccessCheckOnly | kAccessNoRodata;
+ TraceMemoryAccessRange(thr, pc, addr, size, typ);
+ RawShadow* shadow_mem = MemToShadow(addr);
+ Shadow cur(thr->fast_state, 0, kShadowCell, typ);
+#if TSAN_VECTORIZE
+ const m128 access = _mm_set1_epi32(static_cast<u32>(cur.raw()));
+ const m128 freed = _mm_setr_epi32(
+ static_cast<u32>(Shadow::FreedMarker()),
+ static_cast<u32>(Shadow::FreedInfo(cur.sid(), cur.epoch())), 0, 0);
+ for (; size; size -= kShadowCell, shadow_mem += kShadowCnt) {
+ const m128 shadow = _mm_load_si128((m128*)shadow_mem);
+ if (UNLIKELY(CheckRaces(thr, shadow_mem, cur, shadow, access, typ)))
+ return;
+ _mm_store_si128((m128*)shadow_mem, freed);
+ }
+#else
+ for (; size; size -= kShadowCell, shadow_mem += kShadowCnt) {
+ if (UNLIKELY(CheckRaces(thr, shadow_mem, cur, 0, 0, typ)))
+ return;
+ StoreShadow(&shadow_mem[0], Shadow::FreedMarker());
+ StoreShadow(&shadow_mem[1], Shadow::FreedInfo(cur.sid(), cur.epoch()));
+ StoreShadow(&shadow_mem[2], Shadow::kEmpty);
+ StoreShadow(&shadow_mem[3], Shadow::kEmpty);
+ }
+#endif
+}
+
+void MemoryRangeImitateWrite(ThreadState* thr, uptr pc, uptr addr, uptr size) {
+ DCHECK_EQ(addr % kShadowCell, 0);
+ size = RoundUp(size, kShadowCell);
+ TraceMemoryAccessRange(thr, pc, addr, size, kAccessWrite);
+ Shadow cur(thr->fast_state, 0, 8, kAccessWrite);
+ MemoryRangeSet(addr, size, cur.raw());
+}
+
+void MemoryRangeImitateWriteOrResetRange(ThreadState* thr, uptr pc, uptr addr,
+ uptr size) {
+ if (thr->ignore_reads_and_writes == 0)
+ MemoryRangeImitateWrite(thr, pc, addr, size);
+ else
+ MemoryResetRange(thr, pc, addr, size);
+}
+
+ALWAYS_INLINE
+bool MemoryAccessRangeOne(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
+ AccessType typ) {
+ LOAD_CURRENT_SHADOW(cur, shadow_mem);
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur, shadow, access, typ)))
+ return false;
+ return CheckRaces(thr, shadow_mem, cur, shadow, access, typ);
+}
+
+template <bool is_read>
+NOINLINE void RestartMemoryAccessRange(ThreadState* thr, uptr pc, uptr addr,
+ uptr size) {
+ TraceSwitchPart(thr);
+ MemoryAccessRangeT<is_read>(thr, pc, addr, size);
+}
+
+template <bool is_read>
+void MemoryAccessRangeT(ThreadState* thr, uptr pc, uptr addr, uptr size) {
+ const AccessType typ =
+ (is_read ? kAccessRead : kAccessWrite) | kAccessNoRodata;
+ RawShadow* shadow_mem = MemToShadow(addr);
+ DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_read=%d\n", thr->tid,
+ (void*)pc, (void*)addr, (int)size, is_read);
+
+#if SANITIZER_DEBUG
+ if (!IsAppMem(addr)) {
+ Printf("Access to non app mem %zx\n", addr);
+ DCHECK(IsAppMem(addr));
+ }
+ if (!IsAppMem(addr + size - 1)) {
+ Printf("Access to non app mem %zx\n", addr + size - 1);
+ DCHECK(IsAppMem(addr + size - 1));
+ }
+ if (!IsShadowMem(shadow_mem)) {
+ Printf("Bad shadow addr %p (%zx)\n", static_cast<void*>(shadow_mem), addr);
+ DCHECK(IsShadowMem(shadow_mem));
+ }
+ if (!IsShadowMem(shadow_mem + size * kShadowCnt - 1)) {
+ Printf("Bad shadow addr %p (%zx)\n",
+ static_cast<void*>(shadow_mem + size * kShadowCnt - 1),
+ addr + size - 1);
+ DCHECK(IsShadowMem(shadow_mem + size * kShadowCnt - 1));
+ }
+#endif
+
+ // Access to .rodata section, no races here.
+ // Measurements show that it can be 10-20% of all memory accesses.
+ // Check here once to not check for every access separately.
+ // Note: we could (and should) do this only for the is_read case
+ // (writes shouldn't go to .rodata). But it happens in Chromium tests:
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=1275581#c19
+ // Details are unknown since it happens only on CI machines.
+ if (*shadow_mem == Shadow::kRodata)
+ return;
+
+ FastState fast_state = thr->fast_state;
+ if (UNLIKELY(fast_state.GetIgnoreBit()))
+ return;
+
+ if (!TryTraceMemoryAccessRange(thr, pc, addr, size, typ))
+ return RestartMemoryAccessRange<is_read>(thr, pc, addr, size);
+
+ if (UNLIKELY(addr % kShadowCell)) {
+ // Handle unaligned beginning, if any.
+ uptr size1 = Min(size, RoundUp(addr, kShadowCell) - addr);
+ size -= size1;
+ Shadow cur(fast_state, addr, size1, typ);
+ if (UNLIKELY(MemoryAccessRangeOne(thr, shadow_mem, cur, typ)))
+ return;
+ shadow_mem += kShadowCnt;
+ }
+ // Handle middle part, if any.
+ Shadow cur(fast_state, 0, kShadowCell, typ);
+ for (; size >= kShadowCell; size -= kShadowCell, shadow_mem += kShadowCnt) {
+ if (UNLIKELY(MemoryAccessRangeOne(thr, shadow_mem, cur, typ)))
+ return;
+ }
+ // Handle ending, if any.
+ if (UNLIKELY(size)) {
+ Shadow cur(fast_state, 0, size, typ);
+ if (UNLIKELY(MemoryAccessRangeOne(thr, shadow_mem, cur, typ)))
+ return;
+ }
+}
+
+template void MemoryAccessRangeT<true>(ThreadState* thr, uptr pc, uptr addr,
+ uptr size);
+template void MemoryAccessRangeT<false>(ThreadState* thr, uptr pc, uptr addr,
+ uptr size);
+
+} // namespace __tsan
+
+#if !SANITIZER_GO
+// Must be included in this file to make sure everything is inlined.
+# include "tsan_interface.inc"
+#endif
lib/tsan/tsan_rtl_amd64.S
@@ -9,166 +9,6 @@
.section __TEXT,__text
#endif
-ASM_HIDDEN(__tsan_trace_switch)
-.globl ASM_SYMBOL(__tsan_trace_switch_thunk)
-ASM_SYMBOL(__tsan_trace_switch_thunk):
- CFI_STARTPROC
- # Save scratch registers.
- push %rax
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rax, 0)
- push %rcx
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rcx, 0)
- push %rdx
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rdx, 0)
- push %rsi
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rsi, 0)
- push %rdi
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rdi, 0)
- push %r8
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r8, 0)
- push %r9
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r9, 0)
- push %r10
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r10, 0)
- push %r11
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r11, 0)
- # Align stack frame.
- push %rbx # non-scratch
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rbx, 0)
- mov %rsp, %rbx # save current rsp
- CFI_DEF_CFA_REGISTER(%rbx)
- shr $4, %rsp # clear 4 lsb, align to 16
- shl $4, %rsp
-
- call ASM_SYMBOL(__tsan_trace_switch)
-
- # Unalign stack frame back.
- mov %rbx, %rsp # restore the original rsp
- CFI_DEF_CFA_REGISTER(%rsp)
- pop %rbx
- CFI_ADJUST_CFA_OFFSET(-8)
- # Restore scratch registers.
- pop %r11
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %r10
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %r9
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %r8
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rdi
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rsi
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rdx
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rcx
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rax
- CFI_ADJUST_CFA_OFFSET(-8)
- CFI_RESTORE(%rax)
- CFI_RESTORE(%rbx)
- CFI_RESTORE(%rcx)
- CFI_RESTORE(%rdx)
- CFI_RESTORE(%rsi)
- CFI_RESTORE(%rdi)
- CFI_RESTORE(%r8)
- CFI_RESTORE(%r9)
- CFI_RESTORE(%r10)
- CFI_RESTORE(%r11)
- ret
- CFI_ENDPROC
-
-ASM_HIDDEN(__tsan_report_race)
-.globl ASM_SYMBOL(__tsan_report_race_thunk)
-ASM_SYMBOL(__tsan_report_race_thunk):
- CFI_STARTPROC
- # Save scratch registers.
- push %rax
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rax, 0)
- push %rcx
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rcx, 0)
- push %rdx
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rdx, 0)
- push %rsi
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rsi, 0)
- push %rdi
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rdi, 0)
- push %r8
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r8, 0)
- push %r9
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r9, 0)
- push %r10
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r10, 0)
- push %r11
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r11, 0)
- # Align stack frame.
- push %rbx # non-scratch
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rbx, 0)
- mov %rsp, %rbx # save current rsp
- CFI_DEF_CFA_REGISTER(%rbx)
- shr $4, %rsp # clear 4 lsb, align to 16
- shl $4, %rsp
-
- call ASM_SYMBOL(__tsan_report_race)
-
- # Unalign stack frame back.
- mov %rbx, %rsp # restore the original rsp
- CFI_DEF_CFA_REGISTER(%rsp)
- pop %rbx
- CFI_ADJUST_CFA_OFFSET(-8)
- # Restore scratch registers.
- pop %r11
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %r10
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %r9
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %r8
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rdi
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rsi
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rdx
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rcx
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rax
- CFI_ADJUST_CFA_OFFSET(-8)
- CFI_RESTORE(%rax)
- CFI_RESTORE(%rbx)
- CFI_RESTORE(%rcx)
- CFI_RESTORE(%rdx)
- CFI_RESTORE(%rsi)
- CFI_RESTORE(%rdi)
- CFI_RESTORE(%r8)
- CFI_RESTORE(%r9)
- CFI_RESTORE(%r10)
- CFI_RESTORE(%r11)
- ret
- CFI_ENDPROC
-
ASM_HIDDEN(__tsan_setjmp)
#if defined(__NetBSD__)
.comm _ZN14__interception15real___setjmp14E,8,8
@@ -185,6 +25,7 @@ ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(setjmp))
ASM_SYMBOL_INTERCEPTOR(setjmp):
#endif
CFI_STARTPROC
+ _CET_ENDBR
// save env parameter
push %rdi
CFI_ADJUST_CFA_OFFSET(8)
@@ -226,6 +67,7 @@ ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(setjmp))
ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(_setjmp))
ASM_SYMBOL_INTERCEPTOR(_setjmp):
CFI_STARTPROC
+ _CET_ENDBR
// save env parameter
push %rdi
CFI_ADJUST_CFA_OFFSET(8)
@@ -267,6 +109,7 @@ ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
ASM_SYMBOL_INTERCEPTOR(sigsetjmp):
#endif
CFI_STARTPROC
+ _CET_ENDBR
// save env parameter
push %rdi
CFI_ADJUST_CFA_OFFSET(8)
@@ -323,6 +166,7 @@ ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
ASM_SYMBOL_INTERCEPTOR(__sigsetjmp):
CFI_STARTPROC
+ _CET_ENDBR
// save env parameter
push %rdi
CFI_ADJUST_CFA_OFFSET(8)
lib/tsan/tsan_rtl_mutex.cpp
@@ -23,6 +23,8 @@
namespace __tsan {
void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
+void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
+ FastState last_lock, StackID creation_stack_id);
struct Callback final : public DDCallback {
ThreadState *thr;
@@ -35,27 +37,27 @@ struct Callback final : public DDCallback {
DDCallback::lt = thr->dd_lt;
}
- u32 Unwind() override { return CurrentStackId(thr, pc); }
- int UniqueTid() override { return thr->unique_id; }
+ StackID Unwind() override { return CurrentStackId(thr, pc); }
+ int UniqueTid() override { return thr->tid; }
};
void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
Callback cb(thr, pc);
ctx->dd->MutexInit(&cb, &s->dd);
- s->dd.ctx = s->GetId();
+ s->dd.ctx = s->addr;
}
static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
- uptr addr, u64 mid) {
+ uptr addr, StackID creation_stack_id) {
// In Go, these misuses are either impossible, or detected by std lib,
// or false positives (e.g. unlock in a different thread).
if (SANITIZER_GO)
return;
if (!ShouldReport(thr, typ))
return;
- ThreadRegistryLock l(ctx->thread_registry);
+ ThreadRegistryLock l(&ctx->thread_registry);
ScopedReport rep(typ);
- rep.AddMutex(mid);
+ rep.AddMutex(addr, creation_stack_id);
VarSizeStackTrace trace;
ObtainCurrentStack(thr, pc, &trace);
rep.AddStack(trace, true);
@@ -63,185 +65,197 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
OutputReport(thr, rep);
}
-void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
+static void RecordMutexLock(ThreadState *thr, uptr pc, uptr addr,
+ StackID stack_id, bool write) {
+ auto typ = write ? EventType::kLock : EventType::kRLock;
+ // Note: it's important to trace before modifying mutex set
+ // because tracing can switch trace part and we write the current
+ // mutex set in the beginning of each part.
+ // If we do it in the opposite order, we will write already reduced
+ // mutex set in the beginning of the part and then trace unlock again.
+ TraceMutexLock(thr, typ, pc, addr, stack_id);
+ thr->mset.AddAddr(addr, stack_id, write);
+}
+
+static void RecordMutexUnlock(ThreadState *thr, uptr addr) {
+ // See the comment in RecordMutexLock re order of operations.
+ TraceMutexUnlock(thr, addr);
+ thr->mset.DelAddr(addr);
+}
+
+void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
- if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
- CHECK(!thr->is_freeing);
- thr->is_freeing = true;
- MemoryWrite(thr, pc, addr, kSizeLog1);
- thr->is_freeing = false;
- }
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ if (!(flagz & MutexFlagLinkerInit) && pc && IsAppMem(addr))
+ MemoryAccess(thr, pc, addr, 1, kAccessWrite);
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
s->SetFlags(flagz & MutexCreationFlagMask);
- if (!SANITIZER_GO && s->creation_stack_id == 0)
+ // Save stack in the case the sync object was created before as atomic.
+ if (!SANITIZER_GO && s->creation_stack_id == kInvalidStackID)
s->creation_stack_id = CurrentStackId(thr, pc);
- s->mtx.Unlock();
}
-void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
+void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
- SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
- if (s == 0)
- return;
- if ((flagz & MutexFlagLinkerInit)
- || s->IsFlagSet(MutexFlagLinkerInit)
- || ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
- // Destroy is no-op for linker-initialized mutexes.
- s->mtx.Unlock();
- return;
- }
- if (common_flags()->detect_deadlocks) {
- Callback cb(thr, pc);
- ctx->dd->MutexDestroy(&cb, &s->dd);
- ctx->dd->MutexInit(&cb, &s->dd);
- }
bool unlock_locked = false;
- if (flags()->report_destroy_locked && s->owner_tid != kInvalidTid &&
- !s->IsFlagSet(MutexFlagBroken)) {
- s->SetFlags(MutexFlagBroken);
- unlock_locked = true;
- }
- u64 mid = s->GetId();
- u64 last_lock = s->last_lock;
- if (!unlock_locked)
- s->Reset(thr->proc()); // must not reset it before the report is printed
- s->mtx.Unlock();
- if (unlock_locked && ShouldReport(thr, ReportTypeMutexDestroyLocked)) {
- ThreadRegistryLock l(ctx->thread_registry);
- ScopedReport rep(ReportTypeMutexDestroyLocked);
- rep.AddMutex(mid);
- VarSizeStackTrace trace;
- ObtainCurrentStack(thr, pc, &trace);
- rep.AddStack(trace, true);
- FastState last(last_lock);
- RestoreStack(last.tid(), last.epoch(), &trace, 0);
- rep.AddStack(trace, true);
- rep.AddLocation(addr, 1);
- OutputReport(thr, rep);
-
- SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
- if (s != 0) {
- s->Reset(thr->proc());
- s->mtx.Unlock();
+ StackID creation_stack_id;
+ FastState last_lock;
+ {
+ auto s = ctx->metamap.GetSyncIfExists(addr);
+ if (!s)
+ return;
+ SlotLocker locker(thr);
+ {
+ Lock lock(&s->mtx);
+ creation_stack_id = s->creation_stack_id;
+ last_lock = s->last_lock;
+ if ((flagz & MutexFlagLinkerInit) || s->IsFlagSet(MutexFlagLinkerInit) ||
+ ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
+ // Destroy is no-op for linker-initialized mutexes.
+ return;
+ }
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexDestroy(&cb, &s->dd);
+ ctx->dd->MutexInit(&cb, &s->dd);
+ }
+ if (flags()->report_destroy_locked && s->owner_tid != kInvalidTid &&
+ !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ unlock_locked = true;
+ }
+ s->Reset();
}
+ // Imitate a memory write to catch unlock-destroy races.
+ if (pc && IsAppMem(addr))
+ MemoryAccess(thr, pc, addr, 1,
+ kAccessWrite | kAccessFree | kAccessSlotLocked);
}
- thr->mset.Remove(mid);
- // Imitate a memory write to catch unlock-destroy races.
- // Do this outside of sync mutex, because it can report a race which locks
- // sync mutexes.
- if (IsAppMem(addr)) {
- CHECK(!thr->is_freeing);
- thr->is_freeing = true;
- MemoryWrite(thr, pc, addr, kSizeLog1);
- thr->is_freeing = false;
- }
+ if (unlock_locked && ShouldReport(thr, ReportTypeMutexDestroyLocked))
+ ReportDestroyLocked(thr, pc, addr, last_lock, creation_stack_id);
+ thr->mset.DelAddr(addr, true);
// s will be destroyed and freed in MetaMap::FreeBlock.
}
-void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
+void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
- if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
+ if (flagz & MutexFlagTryLock)
+ return;
+ if (!common_flags()->detect_deadlocks)
+ return;
+ Callback cb(thr, pc);
+ {
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ ReadLock lock(&s->mtx);
s->UpdateFlags(flagz);
- if (s->owner_tid != thr->tid) {
- Callback cb(thr, pc);
+ if (s->owner_tid != thr->tid)
ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
- s->mtx.ReadUnlock();
- ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
- } else {
- s->mtx.ReadUnlock();
- }
}
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
-void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz,
- int rec) NO_THREAD_SAFETY_ANALYSIS {
+void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
thr->tid, addr, flagz, rec);
if (flagz & MutexFlagRecursiveLock)
CHECK_GT(rec, 0);
else
rec = 1;
- if (IsAppMem(addr))
- MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
- s->UpdateFlags(flagz);
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
+ if (pc && IsAppMem(addr))
+ MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
bool report_double_lock = false;
- if (s->owner_tid == kInvalidTid) {
- CHECK_EQ(s->recursion, 0);
- s->owner_tid = thr->tid;
- s->last_lock = thr->fast_state.raw();
- } else if (s->owner_tid == thr->tid) {
- CHECK_GT(s->recursion, 0);
- } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
- s->SetFlags(MutexFlagBroken);
- report_double_lock = true;
- }
- const bool first = s->recursion == 0;
- s->recursion += rec;
- if (first) {
- AcquireImpl(thr, pc, &s->clock);
- AcquireImpl(thr, pc, &s->read_clock);
- } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
- }
- thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
bool pre_lock = false;
- if (first && common_flags()->detect_deadlocks) {
- pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
- !(flagz & MutexFlagTryLock);
- Callback cb(thr, pc);
- if (pre_lock)
- ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
- ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
+ bool first = false;
+ StackID creation_stack_id = kInvalidStackID;
+ {
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ creation_stack_id = s->creation_stack_id;
+ RecordMutexLock(thr, pc, addr, creation_stack_id, true);
+ {
+ Lock lock(&s->mtx);
+ first = s->recursion == 0;
+ s->UpdateFlags(flagz);
+ if (s->owner_tid == kInvalidTid) {
+ CHECK_EQ(s->recursion, 0);
+ s->owner_tid = thr->tid;
+ s->last_lock = thr->fast_state;
+ } else if (s->owner_tid == thr->tid) {
+ CHECK_GT(s->recursion, 0);
+ } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_double_lock = true;
+ }
+ s->recursion += rec;
+ if (first) {
+ if (!thr->ignore_sync) {
+ thr->clock.Acquire(s->clock);
+ thr->clock.Acquire(s->read_clock);
+ }
+ }
+ if (first && common_flags()->detect_deadlocks) {
+ pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
+ !(flagz & MutexFlagTryLock);
+ Callback cb(thr, pc);
+ if (pre_lock)
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
+ }
+ }
}
- u64 mid = s->GetId();
- s->mtx.Unlock();
- // Can't touch s after this point.
- s = 0;
if (report_double_lock)
- ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
+ ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr,
+ creation_stack_id);
if (first && pre_lock && common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
}
-int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
+int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
- if (IsAppMem(addr))
- MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
- int rec = 0;
+ if (pc && IsAppMem(addr))
+ MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
+ StackID creation_stack_id;
+ RecordMutexUnlock(thr, addr);
bool report_bad_unlock = false;
- if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
- if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
- s->SetFlags(MutexFlagBroken);
- report_bad_unlock = true;
- }
- } else {
- rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
- s->recursion -= rec;
- if (s->recursion == 0) {
- s->owner_tid = kInvalidTid;
- ReleaseStoreImpl(thr, pc, &s->clock);
- } else {
+ int rec = 0;
+ {
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ bool released = false;
+ {
+ Lock lock(&s->mtx);
+ creation_stack_id = s->creation_stack_id;
+ if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_unlock = true;
+ }
+ } else {
+ rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
+ s->recursion -= rec;
+ if (s->recursion == 0) {
+ s->owner_tid = kInvalidTid;
+ if (!thr->ignore_sync) {
+ thr->clock.ReleaseStore(&s->clock);
+ released = true;
+ }
+ }
+ }
+ if (common_flags()->detect_deadlocks && s->recursion == 0 &&
+ !report_bad_unlock) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
+ }
}
+ if (released)
+ IncrementEpoch(thr);
}
- thr->mset.Del(s->GetId(), true);
- if (common_flags()->detect_deadlocks && s->recursion == 0 &&
- !report_bad_unlock) {
- Callback cb(thr, pc);
- ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
- }
- u64 mid = s->GetId();
- s->mtx.Unlock();
- // Can't touch s after this point.
if (report_bad_unlock)
- ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr,
+ creation_stack_id);
if (common_flags()->detect_deadlocks && !report_bad_unlock) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
@@ -249,282 +263,275 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFET
return rec;
}
-void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
+void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
- if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
+ if ((flagz & MutexFlagTryLock) || !common_flags()->detect_deadlocks)
+ return;
+ Callback cb(thr, pc);
+ {
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ ReadLock lock(&s->mtx);
s->UpdateFlags(flagz);
- Callback cb(thr, pc);
ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
- s->mtx.ReadUnlock();
- ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
-void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
+void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
- if (IsAppMem(addr))
- MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
- s->UpdateFlags(flagz);
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
+ if (pc && IsAppMem(addr))
+ MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
bool report_bad_lock = false;
- if (s->owner_tid != kInvalidTid) {
- if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
- s->SetFlags(MutexFlagBroken);
- report_bad_lock = true;
- }
- }
- AcquireImpl(thr, pc, &s->clock);
- s->last_lock = thr->fast_state.raw();
- thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
bool pre_lock = false;
- if (common_flags()->detect_deadlocks) {
- pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
- !(flagz & MutexFlagTryLock);
- Callback cb(thr, pc);
- if (pre_lock)
- ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
- ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
+ StackID creation_stack_id = kInvalidStackID;
+ {
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ creation_stack_id = s->creation_stack_id;
+ RecordMutexLock(thr, pc, addr, creation_stack_id, false);
+ {
+ ReadLock lock(&s->mtx);
+ s->UpdateFlags(flagz);
+ if (s->owner_tid != kInvalidTid) {
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_lock = true;
+ }
+ }
+ if (!thr->ignore_sync)
+ thr->clock.Acquire(s->clock);
+ s->last_lock = thr->fast_state;
+ if (common_flags()->detect_deadlocks) {
+ pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
+ !(flagz & MutexFlagTryLock);
+ Callback cb(thr, pc);
+ if (pre_lock)
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
+ }
+ }
}
- u64 mid = s->GetId();
- s->mtx.ReadUnlock();
- // Can't touch s after this point.
- s = 0;
if (report_bad_lock)
- ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr,
+ creation_stack_id);
if (pre_lock && common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
}
-void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
+void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
- if (IsAppMem(addr))
- MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
+ if (pc && IsAppMem(addr))
+ MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
+ RecordMutexUnlock(thr, addr);
+ StackID creation_stack_id;
bool report_bad_unlock = false;
- if (s->owner_tid != kInvalidTid) {
- if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
- s->SetFlags(MutexFlagBroken);
- report_bad_unlock = true;
+ {
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ bool released = false;
+ {
+ Lock lock(&s->mtx);
+ creation_stack_id = s->creation_stack_id;
+ if (s->owner_tid != kInvalidTid) {
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_unlock = true;
+ }
+ }
+ if (!thr->ignore_sync) {
+ thr->clock.Release(&s->read_clock);
+ released = true;
+ }
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
+ }
}
+ if (released)
+ IncrementEpoch(thr);
}
- ReleaseImpl(thr, pc, &s->read_clock);
- if (common_flags()->detect_deadlocks && s->recursion == 0) {
- Callback cb(thr, pc);
- ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
- }
- u64 mid = s->GetId();
- s->mtx.Unlock();
- // Can't touch s after this point.
- thr->mset.Del(mid, false);
if (report_bad_unlock)
- ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr,
+ creation_stack_id);
if (common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
}
-void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
+void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
- if (IsAppMem(addr))
- MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
- bool write = true;
+ if (pc && IsAppMem(addr))
+ MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
+ RecordMutexUnlock(thr, addr);
+ StackID creation_stack_id;
bool report_bad_unlock = false;
- if (s->owner_tid == kInvalidTid) {
- // Seems to be read unlock.
- write = false;
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
- ReleaseImpl(thr, pc, &s->read_clock);
- } else if (s->owner_tid == thr->tid) {
- // Seems to be write unlock.
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
- CHECK_GT(s->recursion, 0);
- s->recursion--;
- if (s->recursion == 0) {
- s->owner_tid = kInvalidTid;
- ReleaseStoreImpl(thr, pc, &s->clock);
- } else {
+ bool write = true;
+ {
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ bool released = false;
+ {
+ Lock lock(&s->mtx);
+ creation_stack_id = s->creation_stack_id;
+ if (s->owner_tid == kInvalidTid) {
+ // Seems to be read unlock.
+ write = false;
+ if (!thr->ignore_sync) {
+ thr->clock.Release(&s->read_clock);
+ released = true;
+ }
+ } else if (s->owner_tid == thr->tid) {
+ // Seems to be write unlock.
+ CHECK_GT(s->recursion, 0);
+ s->recursion--;
+ if (s->recursion == 0) {
+ s->owner_tid = kInvalidTid;
+ if (!thr->ignore_sync) {
+ thr->clock.ReleaseStore(&s->clock);
+ released = true;
+ }
+ }
+ } else if (!s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_unlock = true;
+ }
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
+ }
}
- } else if (!s->IsFlagSet(MutexFlagBroken)) {
- s->SetFlags(MutexFlagBroken);
- report_bad_unlock = true;
+ if (released)
+ IncrementEpoch(thr);
}
- thr->mset.Del(s->GetId(), write);
- if (common_flags()->detect_deadlocks && s->recursion == 0) {
- Callback cb(thr, pc);
- ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
- }
- u64 mid = s->GetId();
- s->mtx.Unlock();
- // Can't touch s after this point.
if (report_bad_unlock)
- ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr,
+ creation_stack_id);
if (common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
}
-void MutexRepair(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
+void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ Lock lock(&s->mtx);
s->owner_tid = kInvalidTid;
s->recursion = 0;
- s->mtx.Unlock();
}
-void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
+void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
- u64 mid = s->GetId();
- s->mtx.Unlock();
- ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid);
+ StackID creation_stack_id = kInvalidStackID;
+ {
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ if (s)
+ creation_stack_id = s->creation_stack_id;
+ }
+ ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr,
+ creation_stack_id);
}
-void Acquire(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
+void Acquire(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false);
+ auto s = ctx->metamap.GetSyncIfExists(addr);
if (!s)
return;
- AcquireImpl(thr, pc, &s->clock);
- s->mtx.ReadUnlock();
-}
-
-static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
- ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
- ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
- u64 epoch = tctx->epoch1;
- if (tctx->status == ThreadStatusRunning) {
- epoch = tctx->thr->fast_state.epoch();
- tctx->thr->clock.NoteGlobalAcquire(epoch);
- }
- thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
+ SlotLocker locker(thr);
+ if (!s->clock)
+ return;
+ ReadLock lock(&s->mtx);
+ thr->clock.Acquire(s->clock);
}
-void AcquireGlobal(ThreadState *thr, uptr pc) {
+void AcquireGlobal(ThreadState *thr) {
DPrintf("#%d: AcquireGlobal\n", thr->tid);
if (thr->ignore_sync)
return;
- ThreadRegistryLock l(ctx->thread_registry);
- ctx->thread_registry->RunCallbackForEachThreadLocked(
- UpdateClockCallback, thr);
+ SlotLocker locker(thr);
+ for (auto &slot : ctx->slots) thr->clock.Set(slot.sid, slot.epoch());
}
-void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
- DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr);
+void Release(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: Release %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- ReleaseStoreAcquireImpl(thr, pc, &s->clock);
- s->mtx.Unlock();
+ SlotLocker locker(thr);
+ {
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
+ Lock lock(&s->mtx);
+ thr->clock.Release(&s->clock);
+ }
+ IncrementEpoch(thr);
}
-void Release(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
- DPrintf("#%d: Release %zx\n", thr->tid, addr);
+void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- ReleaseImpl(thr, pc, &s->clock);
- s->mtx.Unlock();
+ SlotLocker locker(thr);
+ {
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
+ Lock lock(&s->mtx);
+ thr->clock.ReleaseStore(&s->clock);
+ }
+ IncrementEpoch(thr);
}
-void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
- DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
+void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- ReleaseStoreImpl(thr, pc, &s->clock);
- s->mtx.Unlock();
+ SlotLocker locker(thr);
+ {
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
+ Lock lock(&s->mtx);
+ thr->clock.ReleaseStoreAcquire(&s->clock);
+ }
+ IncrementEpoch(thr);
}
-#if !SANITIZER_GO
-static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
- ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
- ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
- u64 epoch = tctx->epoch1;
- if (tctx->status == ThreadStatusRunning)
- epoch = tctx->thr->fast_state.epoch();
- thr->last_sleep_clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
+void IncrementEpoch(ThreadState *thr) {
+ DCHECK(!thr->ignore_sync);
+ DCHECK(thr->slot_locked);
+ Epoch epoch = EpochInc(thr->fast_state.epoch());
+ if (!EpochOverflow(epoch)) {
+ Sid sid = thr->fast_state.sid();
+ thr->clock.Set(sid, epoch);
+ thr->fast_state.SetEpoch(epoch);
+ thr->slot->SetEpoch(epoch);
+ TraceTime(thr);
+ }
}
+#if !SANITIZER_GO
void AfterSleep(ThreadState *thr, uptr pc) {
- DPrintf("#%d: AfterSleep %zx\n", thr->tid);
+ DPrintf("#%d: AfterSleep\n", thr->tid);
if (thr->ignore_sync)
return;
thr->last_sleep_stack_id = CurrentStackId(thr, pc);
- ThreadRegistryLock l(ctx->thread_registry);
- ctx->thread_registry->RunCallbackForEachThreadLocked(
- UpdateSleepClockCallback, thr);
+ thr->last_sleep_clock.Reset();
+ SlotLocker locker(thr);
+ for (auto &slot : ctx->slots)
+ thr->last_sleep_clock.Set(slot.sid, slot.epoch());
}
#endif
-void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
- if (thr->ignore_sync)
- return;
- thr->clock.set(thr->fast_state.epoch());
- thr->clock.acquire(&thr->proc()->clock_cache, c);
-}
-
-void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
- if (thr->ignore_sync)
- return;
- thr->clock.set(thr->fast_state.epoch());
- thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.releaseStoreAcquire(&thr->proc()->clock_cache, c);
-}
-
-void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
- if (thr->ignore_sync)
- return;
- thr->clock.set(thr->fast_state.epoch());
- thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.release(&thr->proc()->clock_cache, c);
-}
-
-void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
- if (thr->ignore_sync)
- return;
- thr->clock.set(thr->fast_state.epoch());
- thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
-}
-
-void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
- if (thr->ignore_sync)
- return;
- thr->clock.set(thr->fast_state.epoch());
- thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.acq_rel(&thr->proc()->clock_cache, c);
-}
-
void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock))
return;
- ThreadRegistryLock l(ctx->thread_registry);
+ ThreadRegistryLock l(&ctx->thread_registry);
ScopedReport rep(ReportTypeDeadlock);
for (int i = 0; i < r->n; i++) {
- rep.AddMutex(r->loop[i].mtx_ctx0);
+ rep.AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]);
rep.AddUniqueTid((int)r->loop[i].thr_ctx);
rep.AddThread((int)r->loop[i].thr_ctx);
}
@@ -532,7 +539,7 @@ void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
for (int i = 0; i < r->n; i++) {
for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
u32 stk = r->loop[i].stk[j];
- if (stk && stk != 0xffffffff) {
+ if (stk && stk != kInvalidStackID) {
rep.AddStack(StackDepotGet(stk), true);
} else {
// Sometimes we fail to extract the stack trace (FIXME: investigate),
@@ -544,4 +551,28 @@ void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
OutputReport(thr, rep);
}
+void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
+ FastState last_lock, StackID creation_stack_id) {
+ // We need to lock the slot during RestoreStack because it protects
+ // the slot journal.
+ Lock slot_lock(&ctx->slots[static_cast<uptr>(last_lock.sid())].mtx);
+ ThreadRegistryLock l0(&ctx->thread_registry);
+ Lock slots_lock(&ctx->slot_mtx);
+ ScopedReport rep(ReportTypeMutexDestroyLocked);
+ rep.AddMutex(addr, creation_stack_id);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace, true);
+
+ Tid tid;
+ DynamicMutexSet mset;
+ uptr tag;
+ if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(), addr,
+ 0, kAccessWrite, &tid, &trace, mset, &tag))
+ return;
+ rep.AddStack(trace, true);
+ rep.AddLocation(addr, 1);
+ OutputReport(thr, rep);
+}
+
} // namespace __tsan
lib/tsan/tsan_rtl_proc.cpp
@@ -35,7 +35,6 @@ void ProcDestroy(Processor *proc) {
#if !SANITIZER_GO
AllocatorProcFinish(proc);
#endif
- ctx->clock_alloc.FlushCache(&proc->clock_cache);
ctx->metamap.OnProcIdle(proc);
if (common_flags()->detect_deadlocks)
ctx->dd->DestroyPhysicalThread(proc->dd_pt);
lib/tsan/tsan_rtl_report.cpp
@@ -10,20 +10,20 @@
//
//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
-#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_fd.h"
+#include "tsan_flags.h"
+#include "tsan_mman.h"
#include "tsan_platform.h"
+#include "tsan_report.h"
#include "tsan_rtl.h"
#include "tsan_suppressions.h"
#include "tsan_symbolize.h"
-#include "tsan_report.h"
#include "tsan_sync.h"
-#include "tsan_mman.h"
-#include "tsan_flags.h"
-#include "tsan_fd.h"
namespace __tsan {
@@ -68,8 +68,10 @@ static void StackStripMain(SymbolizedStack *frames) {
} else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
last_frame->ClearAll();
last_frame2->next = nullptr;
- // Strip global ctors init.
- } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
+ // Strip global ctors init, .preinit_array and main caller.
+ } else if (last && (0 == internal_strcmp(last, "__do_global_ctors_aux") ||
+ 0 == internal_strcmp(last, "__libc_csu_init") ||
+ 0 == internal_strcmp(last, "__libc_start_main"))) {
last_frame->ClearAll();
last_frame2->next = nullptr;
// If both are 0, then we probably just failed to symbolize.
@@ -120,7 +122,7 @@ static ReportStack *SymbolizeStack(StackTrace trace) {
}
StackStripMain(top);
- ReportStack *stack = ReportStack::New();
+ auto *stack = New<ReportStack>();
stack->frames = top;
return stack;
}
@@ -132,7 +134,7 @@ bool ShouldReport(ThreadState *thr, ReportType typ) {
CheckedMutex::CheckNoLocks();
// For the same reason check we didn't lock thread_registry yet.
if (SANITIZER_DEBUG)
- ThreadRegistryLock l(ctx->thread_registry);
+ ThreadRegistryLock l(&ctx->thread_registry);
if (!flags()->report_bugs || thr->suppress_reports)
return false;
switch (typ) {
@@ -154,9 +156,8 @@ bool ShouldReport(ThreadState *thr, ReportType typ) {
}
ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {
- ctx->thread_registry->CheckLocked();
- void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
- rep_ = new(mem) ReportDesc;
+ ctx->thread_registry.CheckLocked();
+ rep_ = New<ReportDesc>();
rep_->typ = typ;
rep_->tag = tag;
ctx->report_mtx.Lock();
@@ -165,7 +166,6 @@ ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {
ScopedReportBase::~ScopedReportBase() {
ctx->report_mtx.Unlock();
DestroyAndFree(rep_);
- rep_ = nullptr;
}
void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) {
@@ -175,28 +175,31 @@ void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) {
}
void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
- StackTrace stack, const MutexSet *mset) {
- void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
- ReportMop *mop = new(mem) ReportMop;
+ Tid tid, StackTrace stack,
+ const MutexSet *mset) {
+ uptr addr0, size;
+ AccessType typ;
+ s.GetAccess(&addr0, &size, &typ);
+ auto *mop = New<ReportMop>();
rep_->mops.PushBack(mop);
- mop->tid = s.tid();
- mop->addr = addr + s.addr0();
- mop->size = s.size();
- mop->write = s.IsWrite();
- mop->atomic = s.IsAtomic();
+ mop->tid = tid;
+ mop->addr = addr + addr0;
+ mop->size = size;
+ mop->write = !(typ & kAccessRead);
+ mop->atomic = typ & kAccessAtomic;
mop->stack = SymbolizeStack(stack);
mop->external_tag = external_tag;
if (mop->stack)
mop->stack->suppressable = true;
for (uptr i = 0; i < mset->Size(); i++) {
MutexSet::Desc d = mset->Get(i);
- u64 mid = this->AddMutex(d.id);
- ReportMopMutex mtx = {mid, d.write};
+ int id = this->AddMutex(d.addr, d.stack_id);
+ ReportMopMutex mtx = {id, d.write};
mop->mset.PushBack(mtx);
}
}
-void ScopedReportBase::AddUniqueTid(int unique_tid) {
+void ScopedReportBase::AddUniqueTid(Tid unique_tid) {
rep_->unique_tids.PushBack(unique_tid);
}
@@ -205,8 +208,7 @@ void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) {
if ((u32)rep_->threads[i]->id == tctx->tid)
return;
}
- void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
- ReportThread *rt = new(mem) ReportThread;
+ auto *rt = New<ReportThread>();
rep_->threads.PushBack(rt);
rt->id = tctx->tid;
rt->os_id = tctx->os_id;
@@ -221,22 +223,10 @@ void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) {
}
#if !SANITIZER_GO
-static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
- int unique_id = *(int *)arg;
- return tctx->unique_id == (u32)unique_id;
-}
-
-static ThreadContext *FindThreadByUidLocked(int unique_id) {
- ctx->thread_registry->CheckLocked();
+static ThreadContext *FindThreadByTidLocked(Tid tid) {
+ ctx->thread_registry.CheckLocked();
return static_cast<ThreadContext *>(
- ctx->thread_registry->FindThreadContextLocked(
- FindThreadByUidLockedCallback, &unique_id));
-}
-
-static ThreadContext *FindThreadByTidLocked(int tid) {
- ctx->thread_registry->CheckLocked();
- return static_cast<ThreadContext*>(
- ctx->thread_registry->GetThreadLocked(tid));
+ ctx->thread_registry.GetThreadLocked(tid));
}
static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
@@ -251,10 +241,10 @@ static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
}
ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
- ctx->thread_registry->CheckLocked();
- ThreadContext *tctx = static_cast<ThreadContext*>(
- ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
- (void*)addr));
+ ctx->thread_registry.CheckLocked();
+ ThreadContext *tctx =
+ static_cast<ThreadContext *>(ctx->thread_registry.FindThreadContextLocked(
+ IsInStackOrTls, (void *)addr));
if (!tctx)
return 0;
ThreadState *thr = tctx->thr;
@@ -264,58 +254,24 @@ ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
}
#endif
-void ScopedReportBase::AddThread(int unique_tid, bool suppressable) {
+void ScopedReportBase::AddThread(Tid tid, bool suppressable) {
#if !SANITIZER_GO
- if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
+ if (const ThreadContext *tctx = FindThreadByTidLocked(tid))
AddThread(tctx, suppressable);
#endif
}
-void ScopedReportBase::AddMutex(const SyncVar *s) {
- for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
- if (rep_->mutexes[i]->id == s->uid)
- return;
- }
- void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
- ReportMutex *rm = new(mem) ReportMutex;
- rep_->mutexes.PushBack(rm);
- rm->id = s->uid;
- rm->addr = s->addr;
- rm->destroyed = false;
- rm->stack = SymbolizeStackId(s->creation_stack_id);
-}
-
-u64 ScopedReportBase::AddMutex(u64 id) NO_THREAD_SAFETY_ANALYSIS {
- u64 uid = 0;
- u64 mid = id;
- uptr addr = SyncVar::SplitId(id, &uid);
- SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
- // Check that the mutex is still alive.
- // Another mutex can be created at the same address,
- // so check uid as well.
- if (s && s->CheckId(uid)) {
- mid = s->uid;
- AddMutex(s);
- } else {
- AddDeadMutex(id);
- }
- if (s)
- s->mtx.Unlock();
- return mid;
-}
-
-void ScopedReportBase::AddDeadMutex(u64 id) {
+int ScopedReportBase::AddMutex(uptr addr, StackID creation_stack_id) {
for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
- if (rep_->mutexes[i]->id == id)
- return;
+ if (rep_->mutexes[i]->addr == addr)
+ return rep_->mutexes[i]->id;
}
- void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
- ReportMutex *rm = new(mem) ReportMutex;
+ auto *rm = New<ReportMutex>();
rep_->mutexes.PushBack(rm);
- rm->id = id;
- rm->addr = 0;
- rm->destroyed = true;
- rm->stack = 0;
+ rm->id = rep_->mutexes.Size() - 1;
+ rm->addr = addr;
+ rm->stack = SymbolizeStackId(creation_stack_id);
+ return rm->id;
}
void ScopedReportBase::AddLocation(uptr addr, uptr size) {
@@ -323,43 +279,46 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) {
return;
#if !SANITIZER_GO
int fd = -1;
- int creat_tid = kInvalidTid;
- u32 creat_stack = 0;
- if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
- ReportLocation *loc = ReportLocation::New(ReportLocationFD);
+ Tid creat_tid = kInvalidTid;
+ StackID creat_stack = 0;
+ bool closed = false;
+ if (FdLocation(addr, &fd, &creat_tid, &creat_stack, &closed)) {
+ auto *loc = New<ReportLocation>();
+ loc->type = ReportLocationFD;
+ loc->fd_closed = closed;
loc->fd = fd;
loc->tid = creat_tid;
loc->stack = SymbolizeStackId(creat_stack);
rep_->locs.PushBack(loc);
- ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
- if (tctx)
- AddThread(tctx);
+ AddThread(creat_tid);
return;
}
MBlock *b = 0;
+ uptr block_begin = 0;
Allocator *a = allocator();
if (a->PointerIsMine((void*)addr)) {
- void *block_begin = a->GetBlockBegin((void*)addr);
+ block_begin = (uptr)a->GetBlockBegin((void *)addr);
if (block_begin)
- b = ctx->metamap.GetBlock((uptr)block_begin);
+ b = ctx->metamap.GetBlock(block_begin);
}
+ if (!b)
+ b = JavaHeapBlock(addr, &block_begin);
if (b != 0) {
- ThreadContext *tctx = FindThreadByTidLocked(b->tid);
- ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
- loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
+ auto *loc = New<ReportLocation>();
+ loc->type = ReportLocationHeap;
+ loc->heap_chunk_start = block_begin;
loc->heap_chunk_size = b->siz;
loc->external_tag = b->tag;
- loc->tid = tctx ? tctx->tid : b->tid;
+ loc->tid = b->tid;
loc->stack = SymbolizeStackId(b->stk);
rep_->locs.PushBack(loc);
- if (tctx)
- AddThread(tctx);
+ AddThread(b->tid);
return;
}
bool is_stack = false;
if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
- ReportLocation *loc =
- ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS);
+ auto *loc = New<ReportLocation>();
+ loc->type = is_stack ? ReportLocationStack : ReportLocationTLS;
loc->tid = tctx->tid;
rep_->locs.PushBack(loc);
AddThread(tctx);
@@ -373,13 +332,15 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) {
}
#if !SANITIZER_GO
-void ScopedReportBase::AddSleep(u32 stack_id) {
+void ScopedReportBase::AddSleep(StackID stack_id) {
rep_->sleep = SymbolizeStackId(stack_id);
}
#endif
void ScopedReportBase::SetCount(int count) { rep_->count = count; }
+void ScopedReportBase::SetSigNum(int sig) { rep_->signum = sig; }
+
const ReportDesc *ScopedReportBase::GetReport() const { return rep_; }
ScopedReport::ScopedReport(ReportType typ, uptr tag)
@@ -387,67 +348,256 @@ ScopedReport::ScopedReport(ReportType typ, uptr tag)
ScopedReport::~ScopedReport() {}
-void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
- MutexSet *mset, uptr *tag) {
+// Replays the trace up to last_pos position in the last part
+// or up to the provided epoch/sid (whichever is earlier)
+// and calls the provided function f for each event.
+template <typename Func>
+void TraceReplay(Trace *trace, TracePart *last, Event *last_pos, Sid sid,
+ Epoch epoch, Func f) {
+ TracePart *part = trace->parts.Front();
+ Sid ev_sid = kFreeSid;
+ Epoch ev_epoch = kEpochOver;
+ for (;;) {
+ DCHECK_EQ(part->trace, trace);
+ // Note: an event can't start in the last element.
+ // Since an event can take up to 2 elements,
+ // we ensure we have at least 2 before adding an event.
+ Event *end = &part->events[TracePart::kSize - 1];
+ if (part == last)
+ end = last_pos;
+ f(kFreeSid, kEpochOver, nullptr); // notify about part start
+ for (Event *evp = &part->events[0]; evp < end; evp++) {
+ Event *evp0 = evp;
+ if (!evp->is_access && !evp->is_func) {
+ switch (evp->type) {
+ case EventType::kTime: {
+ auto *ev = reinterpret_cast<EventTime *>(evp);
+ ev_sid = static_cast<Sid>(ev->sid);
+ ev_epoch = static_cast<Epoch>(ev->epoch);
+ if (ev_sid == sid && ev_epoch > epoch)
+ return;
+ break;
+ }
+ case EventType::kAccessExt:
+ FALLTHROUGH;
+ case EventType::kAccessRange:
+ FALLTHROUGH;
+ case EventType::kLock:
+ FALLTHROUGH;
+ case EventType::kRLock:
+ // These take 2 Event elements.
+ evp++;
+ break;
+ case EventType::kUnlock:
+ // This takes 1 Event element.
+ break;
+ }
+ }
+ CHECK_NE(ev_sid, kFreeSid);
+ CHECK_NE(ev_epoch, kEpochOver);
+ f(ev_sid, ev_epoch, evp0);
+ }
+ if (part == last)
+ return;
+ part = trace->parts.Next(part);
+ CHECK(part);
+ }
+ CHECK(0);
+}
+
+static void RestoreStackMatch(VarSizeStackTrace *pstk, MutexSet *pmset,
+ Vector<uptr> *stack, MutexSet *mset, uptr pc,
+ bool *found) {
+ DPrintf2(" MATCHED\n");
+ *pmset = *mset;
+ stack->PushBack(pc);
+ pstk->Init(&(*stack)[0], stack->Size());
+ stack->PopBack();
+ *found = true;
+}
+
+// Checks if addr1|size1 is fully contained in addr2|size2.
+// We check for fully contained instread of just overlapping
+// because a memory access is always traced once, but can be
+// split into multiple accesses in the shadow.
+static constexpr bool IsWithinAccess(uptr addr1, uptr size1, uptr addr2,
+ uptr size2) {
+ return addr1 >= addr2 && addr1 + size1 <= addr2 + size2;
+}
+
+// Replays the trace of slot sid up to the target event identified
+// by epoch/addr/size/typ and restores and returns tid, stack, mutex set
+// and tag for that event. If there are multiple such events, it returns
+// the last one. Returns false if the event is not present in the trace.
+bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size,
+ AccessType typ, Tid *ptid, VarSizeStackTrace *pstk,
+ MutexSet *pmset, uptr *ptag) {
// This function restores stack trace and mutex set for the thread/epoch.
// It does so by getting stack trace and mutex set at the beginning of
// trace part, and then replaying the trace till the given epoch.
- Trace* trace = ThreadTrace(tid);
- ReadLock l(&trace->mtx);
- const int partidx = (epoch / kTracePartSize) % TraceParts();
- TraceHeader* hdr = &trace->headers[partidx];
- if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize)
- return;
- CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0);
- const u64 epoch0 = RoundDown(epoch, TraceSize());
- const u64 eend = epoch % TraceSize();
- const u64 ebegin = RoundDown(eend, kTracePartSize);
- DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
- tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
- Vector<uptr> stack;
- stack.Resize(hdr->stack0.size + 64);
- for (uptr i = 0; i < hdr->stack0.size; i++) {
- stack[i] = hdr->stack0.trace[i];
- DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]);
- }
- if (mset)
- *mset = hdr->mset0;
- uptr pos = hdr->stack0.size;
- Event *events = (Event*)GetThreadTrace(tid);
- for (uptr i = ebegin; i <= eend; i++) {
- Event ev = events[i];
- EventType typ = (EventType)(ev >> kEventPCBits);
- uptr pc = (uptr)(ev & ((1ull << kEventPCBits) - 1));
- DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
- if (typ == EventTypeMop) {
- stack[pos] = pc;
- } else if (typ == EventTypeFuncEnter) {
- if (stack.Size() < pos + 2)
- stack.Resize(pos + 2);
- stack[pos++] = pc;
- } else if (typ == EventTypeFuncExit) {
- if (pos > 0)
- pos--;
+ DPrintf2("RestoreStack: sid=%u@%u addr=0x%zx/%zu typ=%x\n",
+ static_cast<int>(sid), static_cast<int>(epoch), addr, size,
+ static_cast<int>(typ));
+ ctx->slot_mtx.CheckLocked(); // needed to prevent trace part recycling
+ ctx->thread_registry.CheckLocked();
+ TidSlot *slot = &ctx->slots[static_cast<uptr>(sid)];
+ Tid tid = kInvalidTid;
+ // Need to lock the slot mutex as it protects slot->journal.
+ slot->mtx.CheckLocked();
+ for (uptr i = 0; i < slot->journal.Size(); i++) {
+ DPrintf2(" journal: epoch=%d tid=%d\n",
+ static_cast<int>(slot->journal[i].epoch), slot->journal[i].tid);
+ if (i == slot->journal.Size() - 1 || slot->journal[i + 1].epoch > epoch) {
+ tid = slot->journal[i].tid;
+ break;
}
- if (mset) {
- if (typ == EventTypeLock) {
- mset->Add(pc, true, epoch0 + i);
- } else if (typ == EventTypeUnlock) {
- mset->Del(pc, true);
- } else if (typ == EventTypeRLock) {
- mset->Add(pc, false, epoch0 + i);
- } else if (typ == EventTypeRUnlock) {
- mset->Del(pc, false);
- }
+ }
+ if (tid == kInvalidTid)
+ return false;
+ *ptid = tid;
+ ThreadContext *tctx =
+ static_cast<ThreadContext *>(ctx->thread_registry.GetThreadLocked(tid));
+ Trace *trace = &tctx->trace;
+ // Snapshot first/last parts and the current position in the last part.
+ TracePart *first_part;
+ TracePart *last_part;
+ Event *last_pos;
+ {
+ Lock lock(&trace->mtx);
+ first_part = trace->parts.Front();
+ if (!first_part) {
+ DPrintf2("RestoreStack: tid=%d trace=%p no trace parts\n", tid, trace);
+ return false;
}
- for (uptr j = 0; j <= pos; j++)
- DPrintf2(" #%zu: %zx\n", j, stack[j]);
+ last_part = trace->parts.Back();
+ last_pos = trace->final_pos;
+ if (tctx->thr)
+ last_pos = (Event *)atomic_load_relaxed(&tctx->thr->trace_pos);
}
- if (pos == 0 && stack[0] == 0)
- return;
- pos++;
- stk->Init(&stack[0], pos);
- ExtractTagFromStack(stk, tag);
+ DynamicMutexSet mset;
+ Vector<uptr> stack;
+ uptr prev_pc = 0;
+ bool found = false;
+ bool is_read = typ & kAccessRead;
+ bool is_atomic = typ & kAccessAtomic;
+ bool is_free = typ & kAccessFree;
+ DPrintf2("RestoreStack: tid=%d parts=[%p-%p] last_pos=%p\n", tid,
+ trace->parts.Front(), last_part, last_pos);
+ TraceReplay(
+ trace, last_part, last_pos, sid, epoch,
+ [&](Sid ev_sid, Epoch ev_epoch, Event *evp) {
+ if (evp == nullptr) {
+ // Each trace part is self-consistent, so we reset state.
+ stack.Resize(0);
+ mset->Reset();
+ prev_pc = 0;
+ return;
+ }
+ bool match = ev_sid == sid && ev_epoch == epoch;
+ if (evp->is_access) {
+ if (evp->is_func == 0 && evp->type == EventType::kAccessExt &&
+ evp->_ == 0) // NopEvent
+ return;
+ auto *ev = reinterpret_cast<EventAccess *>(evp);
+ uptr ev_addr = RestoreAddr(ev->addr);
+ uptr ev_size = 1 << ev->size_log;
+ uptr ev_pc =
+ prev_pc + ev->pc_delta - (1 << (EventAccess::kPCBits - 1));
+ prev_pc = ev_pc;
+ DPrintf2(" Access: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc,
+ ev_addr, ev_size, ev->is_read, ev->is_atomic);
+ if (match && type == EventType::kAccessExt &&
+ IsWithinAccess(addr, size, ev_addr, ev_size) &&
+ is_read == ev->is_read && is_atomic == ev->is_atomic && !is_free)
+ RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
+ return;
+ }
+ if (evp->is_func) {
+ auto *ev = reinterpret_cast<EventFunc *>(evp);
+ if (ev->pc) {
+ DPrintf2(" FuncEnter: pc=0x%llx\n", ev->pc);
+ stack.PushBack(ev->pc);
+ } else {
+ DPrintf2(" FuncExit\n");
+ // We don't log pathologically large stacks in each part,
+ // if the stack was truncated we can have more func exits than
+ // entries.
+ if (stack.Size())
+ stack.PopBack();
+ }
+ return;
+ }
+ switch (evp->type) {
+ case EventType::kAccessExt: {
+ auto *ev = reinterpret_cast<EventAccessExt *>(evp);
+ uptr ev_addr = RestoreAddr(ev->addr);
+ uptr ev_size = 1 << ev->size_log;
+ prev_pc = ev->pc;
+ DPrintf2(" AccessExt: pc=0x%llx addr=0x%zx/%zu type=%u/%u\n",
+ ev->pc, ev_addr, ev_size, ev->is_read, ev->is_atomic);
+ if (match && type == EventType::kAccessExt &&
+ IsWithinAccess(addr, size, ev_addr, ev_size) &&
+ is_read == ev->is_read && is_atomic == ev->is_atomic &&
+ !is_free)
+ RestoreStackMatch(pstk, pmset, &stack, mset, ev->pc, &found);
+ break;
+ }
+ case EventType::kAccessRange: {
+ auto *ev = reinterpret_cast<EventAccessRange *>(evp);
+ uptr ev_addr = RestoreAddr(ev->addr);
+ uptr ev_size =
+ (ev->size_hi << EventAccessRange::kSizeLoBits) + ev->size_lo;
+ uptr ev_pc = RestoreAddr(ev->pc);
+ prev_pc = ev_pc;
+ DPrintf2(" Range: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc,
+ ev_addr, ev_size, ev->is_read, ev->is_free);
+ if (match && type == EventType::kAccessExt &&
+ IsWithinAccess(addr, size, ev_addr, ev_size) &&
+ is_read == ev->is_read && !is_atomic && is_free == ev->is_free)
+ RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
+ break;
+ }
+ case EventType::kLock:
+ FALLTHROUGH;
+ case EventType::kRLock: {
+ auto *ev = reinterpret_cast<EventLock *>(evp);
+ bool is_write = ev->type == EventType::kLock;
+ uptr ev_addr = RestoreAddr(ev->addr);
+ uptr ev_pc = RestoreAddr(ev->pc);
+ StackID stack_id =
+ (ev->stack_hi << EventLock::kStackIDLoBits) + ev->stack_lo;
+ DPrintf2(" Lock: pc=0x%zx addr=0x%zx stack=%u write=%d\n", ev_pc,
+ ev_addr, stack_id, is_write);
+ mset->AddAddr(ev_addr, stack_id, is_write);
+ // Events with ev_pc == 0 are written to the beginning of trace
+ // part as initial mutex set (are not real).
+ if (match && type == EventType::kLock && addr == ev_addr && ev_pc)
+ RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
+ break;
+ }
+ case EventType::kUnlock: {
+ auto *ev = reinterpret_cast<EventUnlock *>(evp);
+ uptr ev_addr = RestoreAddr(ev->addr);
+ DPrintf2(" Unlock: addr=0x%zx\n", ev_addr);
+ mset->DelAddr(ev_addr);
+ break;
+ }
+ case EventType::kTime:
+ // TraceReplay already extracted sid/epoch from it,
+ // nothing else to do here.
+ break;
+ }
+ });
+ ExtractTagFromStack(pstk, ptag);
+ return found;
+}
+
+bool RacyStacks::operator==(const RacyStacks &other) const {
+ if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
+ return true;
+ if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
+ return true;
+ return false;
}
static bool FindRacyStacks(const RacyStacks &hash) {
@@ -478,35 +628,6 @@ static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) {
return false;
}
-static bool FindRacyAddress(const RacyAddress &ra0) {
- for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
- RacyAddress ra2 = ctx->racy_addresses[i];
- uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
- uptr minend = min(ra0.addr_max, ra2.addr_max);
- if (maxbeg < minend) {
- VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
- return true;
- }
- }
- return false;
-}
-
-static bool HandleRacyAddress(ThreadState *thr, uptr addr_min, uptr addr_max) {
- if (!flags()->suppress_equal_addresses)
- return false;
- RacyAddress ra0 = {addr_min, addr_max};
- {
- ReadLock lock(&ctx->racy_mtx);
- if (FindRacyAddress(ra0))
- return true;
- }
- Lock lock(&ctx->racy_mtx);
- if (FindRacyAddress(ra0))
- return true;
- ctx->racy_addresses.PushBack(ra0);
- return false;
-}
-
bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
// These should have been checked in ShouldReport.
// It's too late to check them here, we have already taken locks.
@@ -532,10 +653,7 @@ bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
ctx->fired_suppressions.push_back(s);
}
{
- bool old_is_freeing = thr->is_freeing;
- thr->is_freeing = false;
bool suppressed = OnReport(rep, pc_or_addr != 0);
- thr->is_freeing = old_is_freeing;
if (suppressed) {
thr->current_report = nullptr;
return false;
@@ -582,101 +700,81 @@ static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
return false;
}
-static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
- Shadow s0(thr->racy_state[0]);
- Shadow s1(thr->racy_state[1]);
- CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
- if (!s0.IsAtomic() && !s1.IsAtomic())
- return true;
- if (s0.IsAtomic() && s1.IsFreed())
- return true;
- if (s1.IsAtomic() && thr->is_freeing)
- return true;
- return false;
+static bool SpuriousRace(Shadow old) {
+ Shadow last(LoadShadow(&ctx->last_spurious_race));
+ return last.sid() == old.sid() && last.epoch() == old.epoch();
}
-void ReportRace(ThreadState *thr) {
+void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
+ AccessType typ0) {
CheckedMutex::CheckNoLocks();
// Symbolizer makes lots of intercepted calls. If we try to process them,
// at best it will cause deadlocks on internal mutexes.
ScopedIgnoreInterceptors ignore;
+ uptr addr = ShadowToMem(shadow_mem);
+ DPrintf("#%d: ReportRace %p\n", thr->tid, (void *)addr);
if (!ShouldReport(thr, ReportTypeRace))
return;
- if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
+ uptr addr_off0, size0;
+ cur.GetAccess(&addr_off0, &size0, nullptr);
+ uptr addr_off1, size1, typ1;
+ old.GetAccess(&addr_off1, &size1, &typ1);
+ if (!flags()->report_atomic_races &&
+ ((typ0 & kAccessAtomic) || (typ1 & kAccessAtomic)) &&
+ !(typ0 & kAccessFree) && !(typ1 & kAccessFree))
+ return;
+ if (SpuriousRace(old))
return;
- bool freed = false;
- {
- Shadow s(thr->racy_state[1]);
- freed = s.GetFreedAndReset();
- thr->racy_state[1] = s.raw();
- }
-
- uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
- uptr addr_min = 0;
- uptr addr_max = 0;
- {
- uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
- uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
- uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
- uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
- addr_min = min(a0, a1);
- addr_max = max(e0, e1);
- if (IsExpectedReport(addr_min, addr_max - addr_min))
- return;
- }
- if (HandleRacyAddress(thr, addr_min, addr_max))
+ const uptr kMop = 2;
+ Shadow s[kMop] = {cur, old};
+ uptr addr0 = addr + addr_off0;
+ uptr addr1 = addr + addr_off1;
+ uptr end0 = addr0 + size0;
+ uptr end1 = addr1 + size1;
+ uptr addr_min = min(addr0, addr1);
+ uptr addr_max = max(end0, end1);
+ if (IsExpectedReport(addr_min, addr_max - addr_min))
return;
- ReportType typ = ReportTypeRace;
- if (thr->is_vptr_access && freed)
- typ = ReportTypeVptrUseAfterFree;
- else if (thr->is_vptr_access)
- typ = ReportTypeVptrRace;
- else if (freed)
- typ = ReportTypeUseAfterFree;
+ ReportType rep_typ = ReportTypeRace;
+ if ((typ0 & kAccessVptr) && (typ1 & kAccessFree))
+ rep_typ = ReportTypeVptrUseAfterFree;
+ else if (typ0 & kAccessVptr)
+ rep_typ = ReportTypeVptrRace;
+ else if (typ1 & kAccessFree)
+ rep_typ = ReportTypeUseAfterFree;
- if (IsFiredSuppression(ctx, typ, addr))
+ if (IsFiredSuppression(ctx, rep_typ, addr))
return;
- const uptr kMop = 2;
VarSizeStackTrace traces[kMop];
- uptr tags[kMop] = {kExternalTagNone};
- uptr toppc = TraceTopPC(thr);
- if (toppc >> kEventPCBits) {
- // This is a work-around for a known issue.
- // The scenario where this happens is rather elaborate and requires
- // an instrumented __sanitizer_report_error_summary callback and
- // a __tsan_symbolize_external callback and a race during a range memory
- // access larger than 8 bytes. MemoryAccessRange adds the current PC to
- // the trace and starts processing memory accesses. A first memory access
- // triggers a race, we report it and call the instrumented
- // __sanitizer_report_error_summary, which adds more stuff to the trace
- // since it is intrumented. Then a second memory access in MemoryAccessRange
- // also triggers a race and we get here and call TraceTopPC to get the
- // current PC, however now it contains some unrelated events from the
- // callback. Most likely, TraceTopPC will now return a EventTypeFuncExit
- // event. Later we subtract -1 from it (in GetPreviousInstructionPc)
- // and the resulting PC has kExternalPCBit set, so we pass it to
- // __tsan_symbolize_external_ex. __tsan_symbolize_external_ex is within its
- // rights to crash since the PC is completely bogus.
- // test/tsan/double_race.cpp contains a test case for this.
- toppc = 0;
- }
- ObtainCurrentStack(thr, toppc, &traces[0], &tags[0]);
- if (IsFiredSuppression(ctx, typ, traces[0]))
+ Tid tids[kMop] = {thr->tid, kInvalidTid};
+ uptr tags[kMop] = {kExternalTagNone, kExternalTagNone};
+
+ ObtainCurrentStack(thr, thr->trace_prev_pc, &traces[0], &tags[0]);
+ if (IsFiredSuppression(ctx, rep_typ, traces[0]))
return;
- // MutexSet is too large to live on stack.
- Vector<u64> mset_buffer;
- mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1);
- MutexSet *mset2 = new(&mset_buffer[0]) MutexSet();
+ DynamicMutexSet mset1;
+ MutexSet *mset[kMop] = {&thr->mset, mset1};
- Shadow s2(thr->racy_state[1]);
- RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2, &tags[1]);
- if (IsFiredSuppression(ctx, typ, traces[1]))
+ // We need to lock the slot during RestoreStack because it protects
+ // the slot journal.
+ Lock slot_lock(&ctx->slots[static_cast<uptr>(s[1].sid())].mtx);
+ ThreadRegistryLock l0(&ctx->thread_registry);
+ Lock slots_lock(&ctx->slot_mtx);
+ if (SpuriousRace(old))
+ return;
+ if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1,
+ size1, typ1, &tids[1], &traces[1], mset[1], &tags[1])) {
+ StoreShadow(&ctx->last_spurious_race, old.raw());
+ return;
+ }
+
+ if (IsFiredSuppression(ctx, rep_typ, traces[1]))
return;
if (HandleRacyStacks(thr, traces))
@@ -686,39 +784,41 @@ void ReportRace(ThreadState *thr) {
uptr tag = kExternalTagNone;
for (uptr i = 0; i < kMop; i++) {
if (tags[i] != kExternalTagNone) {
- typ = ReportTypeExternalRace;
+ rep_typ = ReportTypeExternalRace;
tag = tags[i];
break;
}
}
- ThreadRegistryLock l0(ctx->thread_registry);
- ScopedReport rep(typ, tag);
- for (uptr i = 0; i < kMop; i++) {
- Shadow s(thr->racy_state[i]);
- rep.AddMemoryAccess(addr, tags[i], s, traces[i],
- i == 0 ? &thr->mset : mset2);
- }
+ ScopedReport rep(rep_typ, tag);
+ for (uptr i = 0; i < kMop; i++)
+ rep.AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]);
for (uptr i = 0; i < kMop; i++) {
- FastState s(thr->racy_state[i]);
- ThreadContext *tctx = static_cast<ThreadContext*>(
- ctx->thread_registry->GetThreadLocked(s.tid()));
- if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
- continue;
+ ThreadContext *tctx = static_cast<ThreadContext *>(
+ ctx->thread_registry.GetThreadLocked(tids[i]));
rep.AddThread(tctx);
}
rep.AddLocation(addr_min, addr_max - addr_min);
-#if !SANITIZER_GO
- {
- Shadow s(thr->racy_state[1]);
- if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
- rep.AddSleep(thr->last_sleep_stack_id);
+ if (flags()->print_full_thread_history) {
+ const ReportDesc *rep_desc = rep.GetReport();
+ for (uptr i = 0; i < rep_desc->threads.Size(); i++) {
+ Tid parent_tid = rep_desc->threads[i]->parent_tid;
+ if (parent_tid == kMainTid || parent_tid == kInvalidTid)
+ continue;
+ ThreadContext *parent_tctx = static_cast<ThreadContext *>(
+ ctx->thread_registry.GetThreadLocked(parent_tid));
+ rep.AddThread(parent_tctx);
+ }
}
-#endif
+#if !SANITIZER_GO
+ if (!((typ0 | typ1) & kAccessFree) &&
+ s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid()))
+ rep.AddSleep(thr->last_sleep_stack_id);
+#endif
OutputReport(thr, rep);
}
@@ -738,9 +838,7 @@ void PrintCurrentStack(ThreadState *thr, uptr pc) {
ALWAYS_INLINE USED void PrintCurrentStackSlow(uptr pc) {
#if !SANITIZER_GO
uptr bp = GET_CURRENT_FRAME();
- BufferedStackTrace *ptrace =
- new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
- BufferedStackTrace();
+ auto *ptrace = New<BufferedStackTrace>();
ptrace->Unwind(pc, bp, nullptr, false);
for (uptr i = 0; i < ptrace->size / 2; i++) {
lib/tsan/tsan_rtl_thread.cpp
@@ -21,133 +21,14 @@ namespace __tsan {
// ThreadContext implementation.
-ThreadContext::ThreadContext(int tid)
- : ThreadContextBase(tid)
- , thr()
- , sync()
- , epoch0()
- , epoch1() {
-}
+ThreadContext::ThreadContext(Tid tid) : ThreadContextBase(tid), thr(), sync() {}
#if !SANITIZER_GO
ThreadContext::~ThreadContext() {
}
#endif
-void ThreadContext::OnDead() {
- CHECK_EQ(sync.size(), 0);
-}
-
-void ThreadContext::OnJoined(void *arg) {
- ThreadState *caller_thr = static_cast<ThreadState *>(arg);
- AcquireImpl(caller_thr, 0, &sync);
- sync.Reset(&caller_thr->proc()->clock_cache);
-}
-
-struct OnCreatedArgs {
- ThreadState *thr;
- uptr pc;
-};
-
-void ThreadContext::OnCreated(void *arg) {
- thr = 0;
- if (tid == kMainTid)
- return;
- OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
- if (!args->thr) // GCD workers don't have a parent thread.
- return;
- args->thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0);
- ReleaseImpl(args->thr, 0, &sync);
- creation_stack_id = CurrentStackId(args->thr, args->pc);
-}
-
-void ThreadContext::OnReset() {
- CHECK_EQ(sync.size(), 0);
- uptr trace_p = GetThreadTrace(tid);
- ReleaseMemoryPagesToOS(trace_p, trace_p + TraceSize() * sizeof(Event));
- //!!! ReleaseMemoryToOS(GetThreadTraceHeader(tid), sizeof(Trace));
-}
-
-void ThreadContext::OnDetached(void *arg) {
- ThreadState *thr1 = static_cast<ThreadState*>(arg);
- sync.Reset(&thr1->proc()->clock_cache);
-}
-
-struct OnStartedArgs {
- ThreadState *thr;
- uptr stk_addr;
- uptr stk_size;
- uptr tls_addr;
- uptr tls_size;
-};
-
-void ThreadContext::OnStarted(void *arg) {
- OnStartedArgs *args = static_cast<OnStartedArgs*>(arg);
- thr = args->thr;
- // RoundUp so that one trace part does not contain events
- // from different threads.
- epoch0 = RoundUp(epoch1 + 1, kTracePartSize);
- epoch1 = (u64)-1;
- new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count,
- args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
-#if !SANITIZER_GO
- thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0];
- thr->shadow_stack_pos = thr->shadow_stack;
- thr->shadow_stack_end = thr->shadow_stack + kShadowStackSize;
-#else
- // Setup dynamic shadow stack.
- const int kInitStackSize = 8;
- thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack,
- kInitStackSize * sizeof(uptr));
- thr->shadow_stack_pos = thr->shadow_stack;
- thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
-#endif
- if (common_flags()->detect_deadlocks)
- thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
- thr->fast_state.SetHistorySize(flags()->history_size);
- // Commit switch to the new part of the trace.
- // TraceAddEvent will reset stack0/mset0 in the new part for us.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
-
- thr->fast_synch_epoch = epoch0;
- AcquireImpl(thr, 0, &sync);
- sync.Reset(&thr->proc()->clock_cache);
- thr->is_inited = true;
- DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
- "tls_addr=%zx tls_size=%zx\n",
- tid, (uptr)epoch0, args->stk_addr, args->stk_size,
- args->tls_addr, args->tls_size);
-}
-
-void ThreadContext::OnFinished() {
-#if SANITIZER_GO
- internal_free(thr->shadow_stack);
- thr->shadow_stack = nullptr;
- thr->shadow_stack_pos = nullptr;
- thr->shadow_stack_end = nullptr;
-#endif
- if (!detached) {
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- ReleaseImpl(thr, 0, &sync);
- }
- epoch1 = thr->fast_state.epoch();
-
- if (common_flags()->detect_deadlocks)
- ctx->dd->DestroyLogicalThread(thr->dd_lt);
- thr->clock.ResetCached(&thr->proc()->clock_cache);
-#if !SANITIZER_GO
- thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
-#endif
-#if !SANITIZER_GO
- PlatformCleanUpThreadState(thr);
-#endif
- thr->~ThreadState();
- thr = 0;
-}
+void ThreadContext::OnReset() { CHECK(!sync); }
#if !SANITIZER_GO
struct ThreadLeak {
@@ -155,9 +36,9 @@ struct ThreadLeak {
int count;
};
-static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
- Vector<ThreadLeak> &leaks = *(Vector<ThreadLeak>*)arg;
- ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+static void CollectThreadLeaks(ThreadContextBase *tctx_base, void *arg) {
+ auto &leaks = *static_cast<Vector<ThreadLeak> *>(arg);
+ auto *tctx = static_cast<ThreadContext *>(tctx_base);
if (tctx->detached || tctx->status != ThreadStatusFinished)
return;
for (uptr i = 0; i < leaks.Size(); i++) {
@@ -166,12 +47,13 @@ static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
return;
}
}
- ThreadLeak leak = {tctx, 1};
- leaks.PushBack(leak);
+ leaks.PushBack({tctx, 1});
}
#endif
-#if !SANITIZER_GO
+// Disabled on Mac because lldb test TestTsanBasic fails:
+// https://reviews.llvm.org/D112603#3163158
+#if !SANITIZER_GO && !SANITIZER_APPLE
static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
if (tctx->tid == kMainTid) {
Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
@@ -206,10 +88,10 @@ void ThreadFinalize(ThreadState *thr) {
#if !SANITIZER_GO
if (!ShouldReport(thr, ReportTypeThreadLeak))
return;
- ThreadRegistryLock l(ctx->thread_registry);
+ ThreadRegistryLock l(&ctx->thread_registry);
Vector<ThreadLeak> leaks;
- ctx->thread_registry->RunCallbackForEachThreadLocked(
- MaybeReportThreadLeak, &leaks);
+ ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks,
+ &leaks);
for (uptr i = 0; i < leaks.Size(); i++) {
ScopedReport rep(ReportTypeThreadLeak);
rep.AddThread(leaks[i].tctx, true);
@@ -221,21 +103,63 @@ void ThreadFinalize(ThreadState *thr) {
int ThreadCount(ThreadState *thr) {
uptr result;
- ctx->thread_registry->GetNumberOfThreads(0, 0, &result);
+ ctx->thread_registry.GetNumberOfThreads(0, 0, &result);
return (int)result;
}
-int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
- OnCreatedArgs args = { thr, pc };
- u32 parent_tid = thr ? thr->tid : kInvalidTid; // No parent for GCD workers.
- int tid =
- ctx->thread_registry->CreateThread(uid, detached, parent_tid, &args);
- DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent_tid, tid, uid);
+struct OnCreatedArgs {
+ VectorClock *sync;
+ uptr sync_epoch;
+ StackID stack;
+};
+
+Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
+ // The main thread and GCD workers don't have a parent thread.
+ Tid parent = kInvalidTid;
+ OnCreatedArgs arg = {nullptr, 0, kInvalidStackID};
+ if (thr) {
+ parent = thr->tid;
+ arg.stack = CurrentStackId(thr, pc);
+ if (!thr->ignore_sync) {
+ SlotLocker locker(thr);
+ thr->clock.ReleaseStore(&arg.sync);
+ arg.sync_epoch = ctx->global_epoch;
+ IncrementEpoch(thr);
+ }
+ }
+ Tid tid = ctx->thread_registry.CreateThread(uid, detached, parent, &arg);
+ DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent, tid, uid);
return tid;
}
-void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
+void ThreadContext::OnCreated(void *arg) {
+ OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
+ sync = args->sync;
+ sync_epoch = args->sync_epoch;
+ creation_stack_id = args->stack;
+}
+
+extern "C" void __tsan_stack_initialization() {}
+
+struct OnStartedArgs {
+ ThreadState *thr;
+ uptr stk_addr;
+ uptr stk_size;
+ uptr tls_addr;
+ uptr tls_size;
+};
+
+void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
ThreadType thread_type) {
+ ctx->thread_registry.StartThread(tid, os_id, thread_type, thr);
+ if (!thr->ignore_sync) {
+ SlotAttachAndLock(thr);
+ if (thr->tctx->sync_epoch == ctx->global_epoch)
+ thr->clock.Acquire(thr->tctx->sync);
+ SlotUnlock(thr);
+ }
+ Free(thr->tctx->sync);
+
uptr stk_addr = 0;
uptr stk_size = 0;
uptr tls_addr = 0;
@@ -244,22 +168,11 @@ void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
if (thread_type != ThreadType::Fiber)
GetThreadStackAndTls(tid == kMainTid, &stk_addr, &stk_size, &tls_addr,
&tls_size);
-
- if (tid != kMainTid) {
- if (stk_addr && stk_size)
- MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size);
-
- if (tls_addr && tls_size) ImitateTlsWrite(thr, tls_addr, tls_size);
- }
#endif
-
- ThreadRegistry *tr = ctx->thread_registry;
- OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size };
- tr->StartThread(tid, os_id, thread_type, &args);
-
- tr->Lock();
- thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
- tr->Unlock();
+ thr->stk_addr = stk_addr;
+ thr->stk_size = stk_size;
+ thr->tls_addr = tls_addr;
+ thr->tls_size = tls_size;
#if !SANITIZER_GO
if (ctx->after_multithreaded_fork) {
@@ -268,16 +181,99 @@ void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
ThreadIgnoreSyncBegin(thr, 0);
}
#endif
+
+#if !SANITIZER_GO
+ // Don't imitate stack/TLS writes for the main thread,
+ // because its initialization is synchronized with all
+ // subsequent threads anyway.
+ if (tid != kMainTid) {
+ if (stk_addr && stk_size) {
+ const uptr pc = StackTrace::GetNextInstructionPc(
+ reinterpret_cast<uptr>(__tsan_stack_initialization));
+ MemoryRangeImitateWrite(thr, pc, stk_addr, stk_size);
+ }
+
+ if (tls_addr && tls_size)
+ ImitateTlsWrite(thr, tls_addr, tls_size);
+ }
+#endif
+}
+
+void ThreadContext::OnStarted(void *arg) {
+ thr = static_cast<ThreadState *>(arg);
+ DPrintf("#%d: ThreadStart\n", tid);
+ new (thr) ThreadState(tid);
+ if (common_flags()->detect_deadlocks)
+ thr->dd_lt = ctx->dd->CreateLogicalThread(tid);
+ thr->tctx = this;
+#if !SANITIZER_GO
+ thr->is_inited = true;
+#endif
}
void ThreadFinish(ThreadState *thr) {
+ DPrintf("#%d: ThreadFinish\n", thr->tid);
ThreadCheckIgnore(thr);
if (thr->stk_addr && thr->stk_size)
DontNeedShadowFor(thr->stk_addr, thr->stk_size);
if (thr->tls_addr && thr->tls_size)
DontNeedShadowFor(thr->tls_addr, thr->tls_size);
thr->is_dead = true;
- ctx->thread_registry->FinishThread(thr->tid);
+#if !SANITIZER_GO
+ thr->is_inited = false;
+ thr->ignore_interceptors++;
+ PlatformCleanUpThreadState(thr);
+#endif
+ if (!thr->ignore_sync) {
+ SlotLocker locker(thr);
+ ThreadRegistryLock lock(&ctx->thread_registry);
+ // Note: detached is protected by the thread registry mutex,
+ // the thread may be detaching concurrently in another thread.
+ if (!thr->tctx->detached) {
+ thr->clock.ReleaseStore(&thr->tctx->sync);
+ thr->tctx->sync_epoch = ctx->global_epoch;
+ IncrementEpoch(thr);
+ }
+ }
+#if !SANITIZER_GO
+ UnmapOrDie(thr->shadow_stack, kShadowStackSize * sizeof(uptr));
+#else
+ Free(thr->shadow_stack);
+#endif
+ thr->shadow_stack = nullptr;
+ thr->shadow_stack_pos = nullptr;
+ thr->shadow_stack_end = nullptr;
+ if (common_flags()->detect_deadlocks)
+ ctx->dd->DestroyLogicalThread(thr->dd_lt);
+ SlotDetach(thr);
+ ctx->thread_registry.FinishThread(thr->tid);
+ thr->~ThreadState();
+}
+
+void ThreadContext::OnFinished() {
+ Lock lock(&ctx->slot_mtx);
+ Lock lock1(&trace.mtx);
+ // Queue all trace parts into the global recycle queue.
+ auto parts = &trace.parts;
+ while (trace.local_head) {
+ CHECK(parts->Queued(trace.local_head));
+ ctx->trace_part_recycle.PushBack(trace.local_head);
+ trace.local_head = parts->Next(trace.local_head);
+ }
+ ctx->trace_part_recycle_finished += parts->Size();
+ if (ctx->trace_part_recycle_finished > Trace::kFinishedThreadHi) {
+ ctx->trace_part_finished_excess += parts->Size();
+ trace.parts_allocated = 0;
+ } else if (ctx->trace_part_recycle_finished > Trace::kFinishedThreadLo &&
+ parts->Size() > 1) {
+ ctx->trace_part_finished_excess += parts->Size() - 1;
+ trace.parts_allocated = 1;
+ }
+ // From now on replay will use trace->final_pos.
+ trace.final_pos = (Event *)atomic_load_relaxed(&thr->trace_pos);
+ atomic_store_relaxed(&thr->trace_pos, 0);
+ thr->tctx = nullptr;
+ thr = nullptr;
}
struct ConsumeThreadContext {
@@ -285,131 +281,52 @@ struct ConsumeThreadContext {
ThreadContextBase *tctx;
};
-static bool ConsumeThreadByUid(ThreadContextBase *tctx, void *arg) {
- ConsumeThreadContext *findCtx = (ConsumeThreadContext *)arg;
- if (tctx->user_id == findCtx->uid && tctx->status != ThreadStatusInvalid) {
- if (findCtx->tctx) {
- // Ensure that user_id is unique. If it's not the case we are screwed.
- // Something went wrong before, but now there is no way to recover.
- // Returning a wrong thread is not an option, it may lead to very hard
- // to debug false positives (e.g. if we join a wrong thread).
- Report("ThreadSanitizer: dup thread with used id 0x%zx\n", findCtx->uid);
- Die();
- }
- findCtx->tctx = tctx;
- tctx->user_id = 0;
- }
- return false;
+Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid) {
+ return ctx->thread_registry.ConsumeThreadUserId(uid);
}
-int ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid) {
- ConsumeThreadContext findCtx = {uid, nullptr};
- ctx->thread_registry->FindThread(ConsumeThreadByUid, &findCtx);
- int tid = findCtx.tctx ? findCtx.tctx->tid : kInvalidTid;
- DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, tid);
- return tid;
-}
+struct JoinArg {
+ VectorClock *sync;
+ uptr sync_epoch;
+};
-void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
+void ThreadJoin(ThreadState *thr, uptr pc, Tid tid) {
CHECK_GT(tid, 0);
- CHECK_LT(tid, kMaxTid);
DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
- ctx->thread_registry->JoinThread(tid, thr);
+ JoinArg arg = {};
+ ctx->thread_registry.JoinThread(tid, &arg);
+ if (!thr->ignore_sync) {
+ SlotLocker locker(thr);
+ if (arg.sync_epoch == ctx->global_epoch)
+ thr->clock.Acquire(arg.sync);
+ }
+ Free(arg.sync);
}
-void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
- CHECK_GT(tid, 0);
- CHECK_LT(tid, kMaxTid);
- ctx->thread_registry->DetachThread(tid, thr);
+void ThreadContext::OnJoined(void *ptr) {
+ auto arg = static_cast<JoinArg *>(ptr);
+ arg->sync = sync;
+ arg->sync_epoch = sync_epoch;
+ sync = nullptr;
+ sync_epoch = 0;
}
-void ThreadNotJoined(ThreadState *thr, uptr pc, int tid, uptr uid) {
- CHECK_GT(tid, 0);
- CHECK_LT(tid, kMaxTid);
- ctx->thread_registry->SetThreadUserId(tid, uid);
-}
+void ThreadContext::OnDead() { CHECK_EQ(sync, nullptr); }
-void ThreadSetName(ThreadState *thr, const char *name) {
- ctx->thread_registry->SetThreadName(thr->tid, name);
+void ThreadDetach(ThreadState *thr, uptr pc, Tid tid) {
+ CHECK_GT(tid, 0);
+ ctx->thread_registry.DetachThread(tid, thr);
}
-void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
- uptr size, bool is_write) {
- if (size == 0)
- return;
-
- u64 *shadow_mem = (u64*)MemToShadow(addr);
- DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n",
- thr->tid, (void*)pc, (void*)addr,
- (int)size, is_write);
+void ThreadContext::OnDetached(void *arg) { Free(sync); }
-#if SANITIZER_DEBUG
- if (!IsAppMem(addr)) {
- Printf("Access to non app mem %zx\n", addr);
- DCHECK(IsAppMem(addr));
- }
- if (!IsAppMem(addr + size - 1)) {
- Printf("Access to non app mem %zx\n", addr + size - 1);
- DCHECK(IsAppMem(addr + size - 1));
- }
- if (!IsShadowMem((uptr)shadow_mem)) {
- Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
- DCHECK(IsShadowMem((uptr)shadow_mem));
- }
- if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) {
- Printf("Bad shadow addr %p (%zx)\n",
- shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1);
- DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1)));
- }
-#endif
-
- if (*shadow_mem == kShadowRodata) {
- DCHECK(!is_write);
- // Access to .rodata section, no races here.
- // Measurements show that it can be 10-20% of all memory accesses.
- return;
- }
-
- FastState fast_state = thr->fast_state;
- if (fast_state.GetIgnoreBit())
- return;
-
- fast_state.IncrementEpoch();
- thr->fast_state = fast_state;
- TraceAddEvent(thr, fast_state, EventTypeMop, pc);
-
- bool unaligned = (addr % kShadowCell) != 0;
+void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid) {
+ CHECK_GT(tid, 0);
+ ctx->thread_registry.SetThreadUserId(tid, uid);
+}
- // Handle unaligned beginning, if any.
- for (; addr % kShadowCell && size; addr++, size--) {
- int const kAccessSizeLog = 0;
- Shadow cur(fast_state);
- cur.SetWrite(is_write);
- cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
- shadow_mem, cur);
- }
- if (unaligned)
- shadow_mem += kShadowCnt;
- // Handle middle part, if any.
- for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) {
- int const kAccessSizeLog = 3;
- Shadow cur(fast_state);
- cur.SetWrite(is_write);
- cur.SetAddr0AndSizeLog(0, kAccessSizeLog);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
- shadow_mem, cur);
- shadow_mem += kShadowCnt;
- }
- // Handle ending, if any.
- for (; size; addr++, size--) {
- int const kAccessSizeLog = 0;
- Shadow cur(fast_state);
- cur.SetWrite(is_write);
- cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
- shadow_mem, cur);
- }
+void ThreadSetName(ThreadState *thr, const char *name) {
+ ctx->thread_registry.SetThreadName(thr->tid, name);
}
#if !SANITIZER_GO
@@ -421,10 +338,10 @@ void FiberSwitchImpl(ThreadState *from, ThreadState *to) {
}
ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags) {
- void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadState));
+ void *mem = Alloc(sizeof(ThreadState));
ThreadState *fiber = static_cast<ThreadState *>(mem);
internal_memset(fiber, 0, sizeof(*fiber));
- int tid = ThreadCreate(thr, pc, 0, true);
+ Tid tid = ThreadCreate(thr, pc, 0, true);
FiberSwitchImpl(thr, fiber);
ThreadStart(fiber, tid, 0, ThreadType::Fiber);
FiberSwitchImpl(fiber, thr);
@@ -435,7 +352,7 @@ void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber) {
FiberSwitchImpl(thr, fiber);
ThreadFinish(fiber);
FiberSwitchImpl(fiber, thr);
- internal_free(fiber);
+ Free(fiber);
}
void FiberSwitch(ThreadState *thr, uptr pc,
lib/tsan/tsan_shadow.h
@@ -0,0 +1,193 @@
+//===-- tsan_shadow.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_SHADOW_H
+#define TSAN_SHADOW_H
+
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+class FastState {
+ public:
+ FastState() { Reset(); }
+
+ void Reset() {
+ part_.unused0_ = 0;
+ part_.sid_ = static_cast<u8>(kFreeSid);
+ part_.epoch_ = static_cast<u16>(kEpochLast);
+ part_.unused1_ = 0;
+ part_.ignore_accesses_ = false;
+ }
+
+ void SetSid(Sid sid) { part_.sid_ = static_cast<u8>(sid); }
+
+ Sid sid() const { return static_cast<Sid>(part_.sid_); }
+
+ Epoch epoch() const { return static_cast<Epoch>(part_.epoch_); }
+
+ void SetEpoch(Epoch epoch) { part_.epoch_ = static_cast<u16>(epoch); }
+
+ void SetIgnoreBit() { part_.ignore_accesses_ = 1; }
+ void ClearIgnoreBit() { part_.ignore_accesses_ = 0; }
+ bool GetIgnoreBit() const { return part_.ignore_accesses_; }
+
+ private:
+ friend class Shadow;
+ struct Parts {
+ u32 unused0_ : 8;
+ u32 sid_ : 8;
+ u32 epoch_ : kEpochBits;
+ u32 unused1_ : 1;
+ u32 ignore_accesses_ : 1;
+ };
+ union {
+ Parts part_;
+ u32 raw_;
+ };
+};
+
+static_assert(sizeof(FastState) == kShadowSize, "bad FastState size");
+
+class Shadow {
+ public:
+ static constexpr RawShadow kEmpty = static_cast<RawShadow>(0);
+
+ Shadow(FastState state, u32 addr, u32 size, AccessType typ) {
+ raw_ = state.raw_;
+ DCHECK_GT(size, 0);
+ DCHECK_LE(size, 8);
+ UNUSED Sid sid0 = part_.sid_;
+ UNUSED u16 epoch0 = part_.epoch_;
+ raw_ |= (!!(typ & kAccessAtomic) << kIsAtomicShift) |
+ (!!(typ & kAccessRead) << kIsReadShift) |
+ (((((1u << size) - 1) << (addr & 0x7)) & 0xff) << kAccessShift);
+ // Note: we don't check kAccessAtomic because it overlaps with
+ // FastState::ignore_accesses_ and it may be set spuriously.
+ DCHECK_EQ(part_.is_read_, !!(typ & kAccessRead));
+ DCHECK_EQ(sid(), sid0);
+ DCHECK_EQ(epoch(), epoch0);
+ }
+
+ explicit Shadow(RawShadow x = Shadow::kEmpty) { raw_ = static_cast<u32>(x); }
+
+ RawShadow raw() const { return static_cast<RawShadow>(raw_); }
+ Sid sid() const { return part_.sid_; }
+ Epoch epoch() const { return static_cast<Epoch>(part_.epoch_); }
+ u8 access() const { return part_.access_; }
+
+ void GetAccess(uptr *addr, uptr *size, AccessType *typ) const {
+ DCHECK(part_.access_ != 0 || raw_ == static_cast<u32>(Shadow::kRodata));
+ if (addr)
+ *addr = part_.access_ ? __builtin_ffs(part_.access_) - 1 : 0;
+ if (size)
+ *size = part_.access_ == kFreeAccess ? kShadowCell
+ : __builtin_popcount(part_.access_);
+ if (typ) {
+ *typ = part_.is_read_ ? kAccessRead : kAccessWrite;
+ if (part_.is_atomic_)
+ *typ |= kAccessAtomic;
+ if (part_.access_ == kFreeAccess)
+ *typ |= kAccessFree;
+ }
+ }
+
+ ALWAYS_INLINE
+ bool IsBothReadsOrAtomic(AccessType typ) const {
+ u32 is_read = !!(typ & kAccessRead);
+ u32 is_atomic = !!(typ & kAccessAtomic);
+ bool res =
+ raw_ & ((is_atomic << kIsAtomicShift) | (is_read << kIsReadShift));
+ DCHECK_EQ(res,
+ (part_.is_read_ && is_read) || (part_.is_atomic_ && is_atomic));
+ return res;
+ }
+
+ ALWAYS_INLINE
+ bool IsRWWeakerOrEqual(AccessType typ) const {
+ u32 is_read = !!(typ & kAccessRead);
+ u32 is_atomic = !!(typ & kAccessAtomic);
+ UNUSED u32 res0 =
+ (part_.is_atomic_ > is_atomic) ||
+ (part_.is_atomic_ == is_atomic && part_.is_read_ >= is_read);
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ const u32 kAtomicReadMask = (1 << kIsAtomicShift) | (1 << kIsReadShift);
+ bool res = (raw_ & kAtomicReadMask) >=
+ ((is_atomic << kIsAtomicShift) | (is_read << kIsReadShift));
+
+ DCHECK_EQ(res, res0);
+ return res;
+#else
+ return res0;
+#endif
+ }
+
+ // The FreedMarker must not pass "the same access check" so that we don't
+ // return from the race detection algorithm early.
+ static RawShadow FreedMarker() {
+ FastState fs;
+ fs.SetSid(kFreeSid);
+ fs.SetEpoch(kEpochLast);
+ Shadow s(fs, 0, 8, kAccessWrite);
+ return s.raw();
+ }
+
+ static RawShadow FreedInfo(Sid sid, Epoch epoch) {
+ Shadow s;
+ s.part_.sid_ = sid;
+ s.part_.epoch_ = static_cast<u16>(epoch);
+ s.part_.access_ = kFreeAccess;
+ return s.raw();
+ }
+
+ private:
+ struct Parts {
+ u8 access_;
+ Sid sid_;
+ u16 epoch_ : kEpochBits;
+ u16 is_read_ : 1;
+ u16 is_atomic_ : 1;
+ };
+ union {
+ Parts part_;
+ u32 raw_;
+ };
+
+ static constexpr u8 kFreeAccess = 0x81;
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ static constexpr uptr kAccessShift = 0;
+ static constexpr uptr kIsReadShift = 30;
+ static constexpr uptr kIsAtomicShift = 31;
+#else
+ static constexpr uptr kAccessShift = 24;
+ static constexpr uptr kIsReadShift = 1;
+ static constexpr uptr kIsAtomicShift = 0;
+#endif
+
+ public:
+ // .rodata shadow marker, see MapRodata and ContainsSameAccessFast.
+ static constexpr RawShadow kRodata =
+ static_cast<RawShadow>(1 << kIsReadShift);
+};
+
+static_assert(sizeof(Shadow) == kShadowSize, "bad Shadow size");
+
+ALWAYS_INLINE RawShadow LoadShadow(RawShadow *p) {
+ return static_cast<RawShadow>(
+ atomic_load((atomic_uint32_t *)p, memory_order_relaxed));
+}
+
+ALWAYS_INLINE void StoreShadow(RawShadow *sp, RawShadow s) {
+ atomic_store((atomic_uint32_t *)sp, static_cast<u32>(s),
+ memory_order_relaxed);
+}
+
+} // namespace __tsan
+
+#endif
lib/tsan/tsan_spinlock_defs_mac.h
@@ -0,0 +1,45 @@
+//===-- tsan_spinlock_defs_mac.h -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Mac-specific forward-declared function defintions that may be
+// deprecated in later versions of the OS.
+// These are needed for interceptors.
+//
+//===----------------------------------------------------------------------===//
+
+#if SANITIZER_APPLE
+
+#ifndef TSAN_SPINLOCK_DEFS_MAC_H
+#define TSAN_SPINLOCK_DEFS_MAC_H
+
+#include <stdint.h>
+
+extern "C" {
+
+/*
+Provides forward declarations related to OSSpinLocks on Darwin. These functions are
+deprecated on macOS version 10.12 and later,
+and are no longer included in the system headers.
+
+However, the symbols are still available on the system, so we provide these forward
+declarations to prevent compilation errors in tsan_interceptors_mac.cpp, which
+references these functions when defining TSAN interceptor functions.
+*/
+
+typedef int32_t OSSpinLock;
+
+void OSSpinLockLock(volatile OSSpinLock *__lock);
+void OSSpinLockUnlock(volatile OSSpinLock *__lock);
+bool OSSpinLockTry(volatile OSSpinLock *__lock);
+
+}
+
+#endif //TSAN_SPINLOCK_DEFS_MAC_H
+#endif // SANITIZER_APPLE
lib/tsan/tsan_stack_trace.cpp
@@ -23,14 +23,10 @@ VarSizeStackTrace::~VarSizeStackTrace() {
}
void VarSizeStackTrace::ResizeBuffer(uptr new_size) {
- if (trace_buffer) {
- internal_free(trace_buffer);
- }
- trace_buffer =
- (new_size > 0)
- ? (uptr *)internal_alloc(MBlockStackTrace,
- new_size * sizeof(trace_buffer[0]))
- : nullptr;
+ Free(trace_buffer);
+ trace_buffer = (new_size > 0)
+ ? (uptr *)Alloc(new_size * sizeof(trace_buffer[0]))
+ : nullptr;
trace = trace_buffer;
size = new_size;
}
lib/tsan/tsan_suppressions.cpp
@@ -10,15 +10,16 @@
//
//===----------------------------------------------------------------------===//
+#include "tsan_suppressions.h"
+
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_suppressions.h"
-#include "tsan_suppressions.h"
-#include "tsan_rtl.h"
#include "tsan_flags.h"
#include "tsan_mman.h"
#include "tsan_platform.h"
+#include "tsan_rtl.h"
#if !SANITIZER_GO
// Suppressions for true/false positives in standard libraries.
lib/tsan/tsan_symbolize.cpp
@@ -110,7 +110,8 @@ ReportLocation *SymbolizeData(uptr addr) {
DataInfo info;
if (!Symbolizer::GetOrInit()->SymbolizeData(addr, &info))
return 0;
- ReportLocation *ent = ReportLocation::New(ReportLocationGlobal);
+ auto *ent = New<ReportLocation>();
+ ent->type = ReportLocationGlobal;
internal_memcpy(&ent->global, &info, sizeof(info));
return ent;
}
lib/tsan/tsan_sync.cpp
@@ -18,42 +18,31 @@ namespace __tsan {
void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
-SyncVar::SyncVar() : mtx(MutexTypeSyncVar) { Reset(0); }
+SyncVar::SyncVar() : mtx(MutexTypeSyncVar) { Reset(); }
-void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
+void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, bool save_stack) {
+ Reset();
this->addr = addr;
- this->uid = uid;
- this->next = 0;
-
- creation_stack_id = 0;
- if (!SANITIZER_GO) // Go does not use them
+ next = 0;
+ if (save_stack && !SANITIZER_GO) // Go does not use them
creation_stack_id = CurrentStackId(thr, pc);
if (common_flags()->detect_deadlocks)
DDMutexInit(thr, pc, this);
}
-void SyncVar::Reset(Processor *proc) {
- uid = 0;
- creation_stack_id = 0;
+void SyncVar::Reset() {
+ CHECK(!ctx->resetting);
+ creation_stack_id = kInvalidStackID;
owner_tid = kInvalidTid;
- last_lock = 0;
+ last_lock.Reset();
recursion = 0;
atomic_store_relaxed(&flags, 0);
-
- if (proc == 0) {
- CHECK_EQ(clock.size(), 0);
- CHECK_EQ(read_clock.size(), 0);
- } else {
- clock.Reset(&proc->clock_cache);
- read_clock.Reset(&proc->clock_cache);
- }
+ Free(clock);
+ Free(read_clock);
}
MetaMap::MetaMap()
- : block_alloc_(LINKER_INITIALIZED, "heap block allocator"),
- sync_alloc_(LINKER_INITIALIZED, "sync allocator") {
- atomic_store(&uid_gen_, 0, memory_order_relaxed);
-}
+ : block_alloc_("heap block allocator"), sync_alloc_("sync allocator") {}
void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
u32 idx = block_alloc_.Alloc(&thr->proc()->block_cache);
@@ -67,16 +56,16 @@ void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
*meta = idx | kFlagBlock;
}
-uptr MetaMap::FreeBlock(Processor *proc, uptr p) {
+uptr MetaMap::FreeBlock(Processor *proc, uptr p, bool reset) {
MBlock* b = GetBlock(p);
if (b == 0)
return 0;
uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
- FreeRange(proc, p, sz);
+ FreeRange(proc, p, sz, reset);
return sz;
}
-bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
+bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz, bool reset) {
bool has_something = false;
u32 *meta = MemToMeta(p);
u32 *end = MemToMeta(p + sz);
@@ -98,7 +87,8 @@ bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
DCHECK(idx & kFlagSync);
SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
u32 next = s->next;
- s->Reset(proc);
+ if (reset)
+ s->Reset();
sync_alloc_.Free(&proc->sync_cache, idx & ~kFlagMask);
idx = next;
} else {
@@ -115,30 +105,30 @@ bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
// which can be huge. The function probes pages one-by-one until it finds a page
// without meta objects, at this point it stops freeing meta objects. Because
// thread stacks grow top-down, we do the same starting from end as well.
-void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
+void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz, bool reset) {
if (SANITIZER_GO) {
// UnmapOrDie/MmapFixedNoReserve does not work on Windows,
// so we do the optimization only for C/C++.
- FreeRange(proc, p, sz);
+ FreeRange(proc, p, sz, reset);
return;
}
const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
if (sz <= 4 * kPageSize) {
// If the range is small, just do the normal free procedure.
- FreeRange(proc, p, sz);
+ FreeRange(proc, p, sz, reset);
return;
}
// First, round both ends of the range to page size.
uptr diff = RoundUp(p, kPageSize) - p;
if (diff != 0) {
- FreeRange(proc, p, diff);
+ FreeRange(proc, p, diff, reset);
p += diff;
sz -= diff;
}
diff = p + sz - RoundDown(p + sz, kPageSize);
if (diff != 0) {
- FreeRange(proc, p + sz - diff, diff);
+ FreeRange(proc, p + sz - diff, diff, reset);
sz -= diff;
}
// Now we must have a non-empty page-aligned range.
@@ -149,7 +139,7 @@ void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
const uptr sz0 = sz;
// Probe start of the range.
for (uptr checked = 0; sz > 0; checked += kPageSize) {
- bool has_something = FreeRange(proc, p, kPageSize);
+ bool has_something = FreeRange(proc, p, kPageSize, reset);
p += kPageSize;
sz -= kPageSize;
if (!has_something && checked > (128 << 10))
@@ -157,7 +147,7 @@ void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
}
// Probe end of the range.
for (uptr checked = 0; sz > 0; checked += kPageSize) {
- bool has_something = FreeRange(proc, p + sz - kPageSize, kPageSize);
+ bool has_something = FreeRange(proc, p + sz - kPageSize, kPageSize, reset);
sz -= kPageSize;
// Stacks grow down, so sync object are most likely at the end of the region
// (if it is a stack). The very end of the stack is TLS and tsan increases
@@ -176,6 +166,27 @@ void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
Die();
}
+void MetaMap::ResetClocks() {
+ // This can be called from the background thread
+ // which does not have proc/cache.
+ // The cache is too large for stack.
+ static InternalAllocatorCache cache;
+ internal_memset(&cache, 0, sizeof(cache));
+ internal_allocator()->InitCache(&cache);
+ sync_alloc_.ForEach([&](SyncVar *s) {
+ if (s->clock) {
+ InternalFree(s->clock, &cache);
+ s->clock = nullptr;
+ }
+ if (s->read_clock) {
+ InternalFree(s->read_clock, &cache);
+ s->read_clock = nullptr;
+ }
+ s->last_lock.Reset();
+ });
+ internal_allocator()->DestroyCache(&cache);
+}
+
MBlock* MetaMap::GetBlock(uptr p) {
u32 *meta = MemToMeta(p);
u32 idx = *meta;
@@ -190,63 +201,41 @@ MBlock* MetaMap::GetBlock(uptr p) {
}
}
-SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
- uptr addr, bool write_lock) {
- return GetAndLock(thr, pc, addr, write_lock, true);
-}
-
-SyncVar* MetaMap::GetIfExistsAndLock(uptr addr, bool write_lock) {
- return GetAndLock(0, 0, addr, write_lock, false);
-}
-
-SyncVar *MetaMap::GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock,
- bool create) NO_THREAD_SAFETY_ANALYSIS {
+SyncVar *MetaMap::GetSync(ThreadState *thr, uptr pc, uptr addr, bool create,
+ bool save_stack) {
+ DCHECK(!create || thr->slot_locked);
u32 *meta = MemToMeta(addr);
u32 idx0 = *meta;
u32 myidx = 0;
- SyncVar *mys = 0;
+ SyncVar *mys = nullptr;
for (;;) {
- u32 idx = idx0;
- for (;;) {
- if (idx == 0)
- break;
- if (idx & kFlagBlock)
- break;
+ for (u32 idx = idx0; idx && !(idx & kFlagBlock);) {
DCHECK(idx & kFlagSync);
SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
- if (s->addr == addr) {
- if (myidx != 0) {
- mys->Reset(thr->proc());
+ if (LIKELY(s->addr == addr)) {
+ if (UNLIKELY(myidx != 0)) {
+ mys->Reset();
sync_alloc_.Free(&thr->proc()->sync_cache, myidx);
}
- if (write_lock)
- s->mtx.Lock();
- else
- s->mtx.ReadLock();
return s;
}
idx = s->next;
}
if (!create)
- return 0;
- if (*meta != idx0) {
+ return nullptr;
+ if (UNLIKELY(*meta != idx0)) {
idx0 = *meta;
continue;
}
- if (myidx == 0) {
- const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
+ if (LIKELY(myidx == 0)) {
myidx = sync_alloc_.Alloc(&thr->proc()->sync_cache);
mys = sync_alloc_.Map(myidx);
- mys->Init(thr, pc, addr, uid);
+ mys->Init(thr, pc, addr, save_stack);
}
mys->next = idx0;
if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
myidx | kFlagSync, memory_order_release)) {
- if (write_lock)
- mys->mtx.Lock();
- else
- mys->mtx.ReadLock();
return mys;
}
}
@@ -290,4 +279,11 @@ void MetaMap::OnProcIdle(Processor *proc) {
sync_alloc_.FlushCache(&proc->sync_cache);
}
+MetaMap::MemoryStats MetaMap::GetMemoryStats() const {
+ MemoryStats stats;
+ stats.mem_block = block_alloc_.AllocatedMemory();
+ stats.sync_obj = sync_alloc_.AllocatedMemory();
+ return stats;
+}
+
} // namespace __tsan
lib/tsan/tsan_sync.h
@@ -16,8 +16,9 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
#include "tsan_defs.h"
-#include "tsan_clock.h"
#include "tsan_dense_alloc.h"
+#include "tsan_shadow.h"
+#include "tsan_vector_clock.h"
namespace __tsan {
@@ -46,39 +47,25 @@ enum MutexFlags {
MutexFlagNotStatic,
};
+// SyncVar is a descriptor of a user synchronization object
+// (mutex or an atomic variable).
struct SyncVar {
SyncVar();
uptr addr; // overwritten by DenseSlabAlloc freelist
Mutex mtx;
- u64 uid; // Globally unique id.
- u32 creation_stack_id;
- u32 owner_tid; // Set only by exclusive owners.
- u64 last_lock;
+ StackID creation_stack_id;
+ Tid owner_tid; // Set only by exclusive owners.
+ FastState last_lock;
int recursion;
atomic_uint32_t flags;
u32 next; // in MetaMap
DDMutex dd;
- SyncClock read_clock; // Used for rw mutexes only.
- // The clock is placed last, so that it is situated on a different cache line
- // with the mtx. This reduces contention for hot sync objects.
- SyncClock clock;
+ VectorClock *read_clock; // Used for rw mutexes only.
+ VectorClock *clock;
- void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid);
- void Reset(Processor *proc);
-
- u64 GetId() const {
- // 48 lsb is addr, then 14 bits is low part of uid, then 2 zero bits.
- return GetLsb((u64)addr | (uid << 48), 60);
- }
- bool CheckId(u64 uid) const {
- CHECK_EQ(uid, GetLsb(uid, 14));
- return GetLsb(this->uid, 14) == uid;
- }
- static uptr SplitId(u64 id, u64 *uid) {
- *uid = id >> 48;
- return (uptr)GetLsb(id, 48);
- }
+ void Init(ThreadState *thr, uptr pc, uptr addr, bool save_stack);
+ void Reset();
bool IsFlagSet(u32 f) const {
return atomic_load_relaxed(&flags) & f;
@@ -101,28 +88,48 @@ struct SyncVar {
}
};
-/* MetaMap allows to map arbitrary user pointers onto various descriptors.
- Currently it maps pointers to heap block descriptors and sync var descs.
- It uses 1/2 direct shadow, see tsan_platform.h.
-*/
+// MetaMap maps app addresses to heap block (MBlock) and sync var (SyncVar)
+// descriptors. It uses 1/2 direct shadow, see tsan_platform.h for the mapping.
class MetaMap {
public:
MetaMap();
void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz);
- uptr FreeBlock(Processor *proc, uptr p);
- bool FreeRange(Processor *proc, uptr p, uptr sz);
- void ResetRange(Processor *proc, uptr p, uptr sz);
+
+ // FreeBlock resets all sync objects in the range if reset=true and must not
+ // run concurrently with ResetClocks which resets all sync objects
+ // w/o any synchronization (as part of DoReset).
+ // If we don't have a thread slot (very early/late in thread lifetime or
+ // Go/Java callbacks) or the slot is not locked, then reset must be set to
+ // false. In such case sync object clocks will be reset later (when it's
+ // reused or during the next ResetClocks).
+ uptr FreeBlock(Processor *proc, uptr p, bool reset);
+ bool FreeRange(Processor *proc, uptr p, uptr sz, bool reset);
+ void ResetRange(Processor *proc, uptr p, uptr sz, bool reset);
+ // Reset vector clocks of all sync objects.
+ // Must be called when no other threads access sync objects.
+ void ResetClocks();
MBlock* GetBlock(uptr p);
- SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc,
- uptr addr, bool write_lock);
- SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock);
+ SyncVar *GetSyncOrCreate(ThreadState *thr, uptr pc, uptr addr,
+ bool save_stack) {
+ return GetSync(thr, pc, addr, true, save_stack);
+ }
+ SyncVar *GetSyncIfExists(uptr addr) {
+ return GetSync(nullptr, 0, addr, false, false);
+ }
void MoveMemory(uptr src, uptr dst, uptr sz);
void OnProcIdle(Processor *proc);
+ struct MemoryStats {
+ uptr mem_block;
+ uptr sync_obj;
+ };
+
+ MemoryStats GetMemoryStats() const;
+
private:
static const u32 kFlagMask = 3u << 30;
static const u32 kFlagBlock = 1u << 30;
@@ -131,10 +138,9 @@ class MetaMap {
typedef DenseSlabAlloc<SyncVar, 1 << 20, 1 << 10, kFlagMask> SyncAlloc;
BlockAlloc block_alloc_;
SyncAlloc sync_alloc_;
- atomic_uint64_t uid_gen_;
- SyncVar* GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock,
- bool create);
+ SyncVar *GetSync(ThreadState *thr, uptr pc, uptr addr, bool create,
+ bool save_stack);
};
} // namespace __tsan
lib/tsan/tsan_trace.h
@@ -13,58 +13,201 @@
#define TSAN_TRACE_H
#include "tsan_defs.h"
-#include "tsan_stack_trace.h"
+#include "tsan_ilist.h"
#include "tsan_mutexset.h"
+#include "tsan_stack_trace.h"
namespace __tsan {
-const int kTracePartSizeBits = 13;
-const int kTracePartSize = 1 << kTracePartSizeBits;
-const int kTraceParts = 2 * 1024 * 1024 / kTracePartSize;
-const int kTraceSize = kTracePartSize * kTraceParts;
-
-// Must fit into 3 bits.
-enum EventType {
- EventTypeMop,
- EventTypeFuncEnter,
- EventTypeFuncExit,
- EventTypeLock,
- EventTypeUnlock,
- EventTypeRLock,
- EventTypeRUnlock
+enum class EventType : u64 {
+ kAccessExt,
+ kAccessRange,
+ kLock,
+ kRLock,
+ kUnlock,
+ kTime,
+};
+
+// "Base" type for all events for type dispatch.
+struct Event {
+ // We use variable-length type encoding to give more bits to some event
+ // types that need them. If is_access is set, this is EventAccess.
+ // Otherwise, if is_func is set, this is EventFunc.
+ // Otherwise type denotes the type.
+ u64 is_access : 1;
+ u64 is_func : 1;
+ EventType type : 3;
+ u64 _ : 59;
+};
+static_assert(sizeof(Event) == 8, "bad Event size");
+
+// Nop event used as padding and does not affect state during replay.
+static constexpr Event NopEvent = {1, 0, EventType::kAccessExt, 0};
+
+// Compressed memory access can represent only some events with PCs
+// close enough to each other. Otherwise we fall back to EventAccessExt.
+struct EventAccess {
+ static constexpr uptr kPCBits = 15;
+ static_assert(kPCBits + kCompressedAddrBits + 5 == 64,
+ "unused bits in EventAccess");
+
+ u64 is_access : 1; // = 1
+ u64 is_read : 1;
+ u64 is_atomic : 1;
+ u64 size_log : 2;
+ u64 pc_delta : kPCBits; // signed delta from the previous memory access PC
+ u64 addr : kCompressedAddrBits;
};
+static_assert(sizeof(EventAccess) == 8, "bad EventAccess size");
-// Represents a thread event (from most significant bit):
-// u64 typ : 3; // EventType.
-// u64 addr : 61; // Associated pc.
-typedef u64 Event;
+// Function entry (pc != 0) or exit (pc == 0).
+struct EventFunc {
+ u64 is_access : 1; // = 0
+ u64 is_func : 1; // = 1
+ u64 pc : 62;
+};
+static_assert(sizeof(EventFunc) == 8, "bad EventFunc size");
+
+// Extended memory access with full PC.
+struct EventAccessExt {
+ // Note: precisely specifying the unused parts of the bitfield is critical for
+ // performance. If we don't specify them, compiler will generate code to load
+ // the old value and shuffle it to extract the unused bits to apply to the new
+ // value. If we specify the unused part and store 0 in there, all that
+ // unnecessary code goes away (store of the 0 const is combined with other
+ // constant parts).
+ static constexpr uptr kUnusedBits = 11;
+ static_assert(kCompressedAddrBits + kUnusedBits + 9 == 64,
+ "unused bits in EventAccessExt");
+
+ u64 is_access : 1; // = 0
+ u64 is_func : 1; // = 0
+ EventType type : 3; // = EventType::kAccessExt
+ u64 is_read : 1;
+ u64 is_atomic : 1;
+ u64 size_log : 2;
+ u64 _ : kUnusedBits;
+ u64 addr : kCompressedAddrBits;
+ u64 pc;
+};
+static_assert(sizeof(EventAccessExt) == 16, "bad EventAccessExt size");
+
+// Access to a memory range.
+struct EventAccessRange {
+ static constexpr uptr kSizeLoBits = 13;
+ static_assert(kCompressedAddrBits + kSizeLoBits + 7 == 64,
+ "unused bits in EventAccessRange");
+
+ u64 is_access : 1; // = 0
+ u64 is_func : 1; // = 0
+ EventType type : 3; // = EventType::kAccessRange
+ u64 is_read : 1;
+ u64 is_free : 1;
+ u64 size_lo : kSizeLoBits;
+ u64 pc : kCompressedAddrBits;
+ u64 addr : kCompressedAddrBits;
+ u64 size_hi : 64 - kCompressedAddrBits;
+};
+static_assert(sizeof(EventAccessRange) == 16, "bad EventAccessRange size");
-const uptr kEventPCBits = 61;
+// Mutex lock.
+struct EventLock {
+ static constexpr uptr kStackIDLoBits = 15;
+ static constexpr uptr kStackIDHiBits =
+ sizeof(StackID) * kByteBits - kStackIDLoBits;
+ static constexpr uptr kUnusedBits = 3;
+ static_assert(kCompressedAddrBits + kStackIDLoBits + 5 == 64,
+ "unused bits in EventLock");
+ static_assert(kCompressedAddrBits + kStackIDHiBits + kUnusedBits == 64,
+ "unused bits in EventLock");
+
+ u64 is_access : 1; // = 0
+ u64 is_func : 1; // = 0
+ EventType type : 3; // = EventType::kLock or EventType::kRLock
+ u64 pc : kCompressedAddrBits;
+ u64 stack_lo : kStackIDLoBits;
+ u64 stack_hi : sizeof(StackID) * kByteBits - kStackIDLoBits;
+ u64 _ : kUnusedBits;
+ u64 addr : kCompressedAddrBits;
+};
+static_assert(sizeof(EventLock) == 16, "bad EventLock size");
+
+// Mutex unlock.
+struct EventUnlock {
+ static constexpr uptr kUnusedBits = 15;
+ static_assert(kCompressedAddrBits + kUnusedBits + 5 == 64,
+ "unused bits in EventUnlock");
+
+ u64 is_access : 1; // = 0
+ u64 is_func : 1; // = 0
+ EventType type : 3; // = EventType::kUnlock
+ u64 _ : kUnusedBits;
+ u64 addr : kCompressedAddrBits;
+};
+static_assert(sizeof(EventUnlock) == 8, "bad EventUnlock size");
+
+// Time change event.
+struct EventTime {
+ static constexpr uptr kUnusedBits = 37;
+ static_assert(kUnusedBits + sizeof(Sid) * kByteBits + kEpochBits + 5 == 64,
+ "unused bits in EventTime");
+
+ u64 is_access : 1; // = 0
+ u64 is_func : 1; // = 0
+ EventType type : 3; // = EventType::kTime
+ u64 sid : sizeof(Sid) * kByteBits;
+ u64 epoch : kEpochBits;
+ u64 _ : kUnusedBits;
+};
+static_assert(sizeof(EventTime) == 8, "bad EventTime size");
+
+struct Trace;
struct TraceHeader {
-#if !SANITIZER_GO
- BufferedStackTrace stack0; // Start stack for the trace.
-#else
- VarSizeStackTrace stack0;
-#endif
- u64 epoch0; // Start epoch for the trace.
- MutexSet mset0;
-
- TraceHeader() : stack0(), epoch0() {}
+ Trace* trace = nullptr; // back-pointer to Trace containing this part
+ INode trace_parts; // in Trace::parts
+ INode global; // in Contex::trace_part_recycle
};
+struct TracePart : TraceHeader {
+ // There are a lot of goroutines in Go, so we use smaller parts.
+ static constexpr uptr kByteSize = (SANITIZER_GO ? 128 : 256) << 10;
+ static constexpr uptr kSize =
+ (kByteSize - sizeof(TraceHeader)) / sizeof(Event);
+ // TraceAcquire does a fast event pointer overflow check by comparing
+ // pointer into TracePart::events with kAlignment mask. Since TracePart's
+ // are allocated page-aligned, this check detects end of the array
+ // (it also have false positives in the middle that are filtered separately).
+ // This also requires events to be the last field.
+ static constexpr uptr kAlignment = 0xff0;
+ Event events[kSize];
+
+ TracePart() {}
+};
+static_assert(sizeof(TracePart) == TracePart::kByteSize, "bad TracePart size");
+
struct Trace {
Mutex mtx;
-#if !SANITIZER_GO
- // Must be last to catch overflow as paging fault.
- // Go shadow stack is dynamically allocated.
- uptr shadow_stack[kShadowStackSize];
-#endif
- // Must be the last field, because we unmap the unused part in
- // CreateThreadContext.
- TraceHeader headers[kTraceParts];
+ IList<TraceHeader, &TraceHeader::trace_parts, TracePart> parts;
+ // First node non-queued into ctx->trace_part_recycle.
+ TracePart* local_head;
+ // Final position in the last part for finished threads.
+ Event* final_pos = nullptr;
+ // Number of trace parts allocated on behalf of this trace specifically.
+ // Total number of parts in this trace can be larger if we retake some
+ // parts from other traces.
+ uptr parts_allocated = 0;
Trace() : mtx(MutexTypeTrace) {}
+
+ // We need at least 3 parts per thread, because we want to keep at last
+ // 2 parts per thread that are not queued into ctx->trace_part_recycle
+ // (the current one being filled and one full part that ensures that
+ // we always have at least one part worth of previous memory accesses).
+ static constexpr uptr kMinParts = 3;
+
+ static constexpr uptr kFinishedThreadLo = 16;
+ static constexpr uptr kFinishedThreadHi = 64;
};
} // namespace __tsan
lib/tsan/tsan_update_shadow_word_inl.h
@@ -1,59 +0,0 @@
-//===-- tsan_update_shadow_word_inl.h ---------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-// Body of the hottest inner loop.
-// If we wrap this body into a function, compilers (both gcc and clang)
-// produce sligtly less efficient code.
-//===----------------------------------------------------------------------===//
-do {
- const unsigned kAccessSize = 1 << kAccessSizeLog;
- u64 *sp = &shadow_mem[idx];
- old = LoadShadow(sp);
- if (LIKELY(old.IsZero())) {
- if (!stored) {
- StoreIfNotYetStored(sp, &store_word);
- stored = true;
- }
- break;
- }
- // is the memory access equal to the previous?
- if (LIKELY(Shadow::Addr0AndSizeAreEqual(cur, old))) {
- // same thread?
- if (LIKELY(Shadow::TidsAreEqual(old, cur))) {
- if (LIKELY(old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic))) {
- StoreIfNotYetStored(sp, &store_word);
- stored = true;
- }
- break;
- }
- if (HappensBefore(old, thr)) {
- if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic)) {
- StoreIfNotYetStored(sp, &store_word);
- stored = true;
- }
- break;
- }
- if (LIKELY(old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic)))
- break;
- goto RACE;
- }
- // Do the memory access intersect?
- if (Shadow::TwoRangesIntersect(old, cur, kAccessSize)) {
- if (Shadow::TidsAreEqual(old, cur))
- break;
- if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic))
- break;
- if (LIKELY(HappensBefore(old, thr)))
- break;
- goto RACE;
- }
- // The accesses do not intersect.
- break;
-} while (0);
lib/tsan/tsan_vector_clock.cpp
@@ -0,0 +1,126 @@
+//===-- tsan_vector_clock.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_vector_clock.h"
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+#if TSAN_VECTORIZE
+const uptr kVectorClockSize = kThreadSlotCount * sizeof(Epoch) / sizeof(m128);
+#endif
+
+VectorClock::VectorClock() { Reset(); }
+
+void VectorClock::Reset() {
+#if !TSAN_VECTORIZE
+ for (uptr i = 0; i < kThreadSlotCount; i++)
+ clk_[i] = kEpochZero;
+#else
+ m128 z = _mm_setzero_si128();
+ m128* vclk = reinterpret_cast<m128*>(clk_);
+ for (uptr i = 0; i < kVectorClockSize; i++) _mm_store_si128(&vclk[i], z);
+#endif
+}
+
+void VectorClock::Acquire(const VectorClock* src) {
+ if (!src)
+ return;
+#if !TSAN_VECTORIZE
+ for (uptr i = 0; i < kThreadSlotCount; i++)
+ clk_[i] = max(clk_[i], src->clk_[i]);
+#else
+ m128* __restrict vdst = reinterpret_cast<m128*>(clk_);
+ m128 const* __restrict vsrc = reinterpret_cast<m128 const*>(src->clk_);
+ for (uptr i = 0; i < kVectorClockSize; i++) {
+ m128 s = _mm_load_si128(&vsrc[i]);
+ m128 d = _mm_load_si128(&vdst[i]);
+ m128 m = _mm_max_epu16(s, d);
+ _mm_store_si128(&vdst[i], m);
+ }
+#endif
+}
+
+static VectorClock* AllocClock(VectorClock** dstp) {
+ if (UNLIKELY(!*dstp))
+ *dstp = New<VectorClock>();
+ return *dstp;
+}
+
+void VectorClock::Release(VectorClock** dstp) const {
+ VectorClock* dst = AllocClock(dstp);
+ dst->Acquire(this);
+}
+
+void VectorClock::ReleaseStore(VectorClock** dstp) const {
+ VectorClock* dst = AllocClock(dstp);
+ *dst = *this;
+}
+
+VectorClock& VectorClock::operator=(const VectorClock& other) {
+#if !TSAN_VECTORIZE
+ for (uptr i = 0; i < kThreadSlotCount; i++)
+ clk_[i] = other.clk_[i];
+#else
+ m128* __restrict vdst = reinterpret_cast<m128*>(clk_);
+ m128 const* __restrict vsrc = reinterpret_cast<m128 const*>(other.clk_);
+ for (uptr i = 0; i < kVectorClockSize; i++) {
+ m128 s = _mm_load_si128(&vsrc[i]);
+ _mm_store_si128(&vdst[i], s);
+ }
+#endif
+ return *this;
+}
+
+void VectorClock::ReleaseStoreAcquire(VectorClock** dstp) {
+ VectorClock* dst = AllocClock(dstp);
+#if !TSAN_VECTORIZE
+ for (uptr i = 0; i < kThreadSlotCount; i++) {
+ Epoch tmp = dst->clk_[i];
+ dst->clk_[i] = clk_[i];
+ clk_[i] = max(clk_[i], tmp);
+ }
+#else
+ m128* __restrict vdst = reinterpret_cast<m128*>(dst->clk_);
+ m128* __restrict vclk = reinterpret_cast<m128*>(clk_);
+ for (uptr i = 0; i < kVectorClockSize; i++) {
+ m128 t = _mm_load_si128(&vdst[i]);
+ m128 c = _mm_load_si128(&vclk[i]);
+ m128 m = _mm_max_epu16(c, t);
+ _mm_store_si128(&vdst[i], c);
+ _mm_store_si128(&vclk[i], m);
+ }
+#endif
+}
+
+void VectorClock::ReleaseAcquire(VectorClock** dstp) {
+ VectorClock* dst = AllocClock(dstp);
+#if !TSAN_VECTORIZE
+ for (uptr i = 0; i < kThreadSlotCount; i++) {
+ dst->clk_[i] = max(dst->clk_[i], clk_[i]);
+ clk_[i] = dst->clk_[i];
+ }
+#else
+ m128* __restrict vdst = reinterpret_cast<m128*>(dst->clk_);
+ m128* __restrict vclk = reinterpret_cast<m128*>(clk_);
+ for (uptr i = 0; i < kVectorClockSize; i++) {
+ m128 c = _mm_load_si128(&vclk[i]);
+ m128 d = _mm_load_si128(&vdst[i]);
+ m128 m = _mm_max_epu16(c, d);
+ _mm_store_si128(&vdst[i], m);
+ _mm_store_si128(&vclk[i], m);
+ }
+#endif
+}
+
+} // namespace __tsan
lib/tsan/tsan_vector_clock.h
@@ -0,0 +1,51 @@
+//===-- tsan_vector_clock.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_VECTOR_CLOCK_H
+#define TSAN_VECTOR_CLOCK_H
+
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+// Fixed-size vector clock, used both for threads and sync objects.
+class VectorClock {
+ public:
+ VectorClock();
+
+ Epoch Get(Sid sid) const;
+ void Set(Sid sid, Epoch v);
+
+ void Reset();
+ void Acquire(const VectorClock* src);
+ void Release(VectorClock** dstp) const;
+ void ReleaseStore(VectorClock** dstp) const;
+ void ReleaseStoreAcquire(VectorClock** dstp);
+ void ReleaseAcquire(VectorClock** dstp);
+
+ VectorClock& operator=(const VectorClock& other);
+
+ private:
+ Epoch clk_[kThreadSlotCount] VECTOR_ALIGNED;
+};
+
+ALWAYS_INLINE Epoch VectorClock::Get(Sid sid) const {
+ return clk_[static_cast<u8>(sid)];
+}
+
+ALWAYS_INLINE void VectorClock::Set(Sid sid, Epoch v) {
+ DCHECK_GE(v, clk_[static_cast<u8>(sid)]);
+ clk_[static_cast<u8>(sid)] = v;
+}
+
+} // namespace __tsan
+
+#endif // TSAN_VECTOR_CLOCK_H