master
   1const std = @import("std.zig");
   2const Io = std.Io;
   3const Writer = std.Io.Writer;
   4const tty = std.Io.tty;
   5const math = std.math;
   6const mem = std.mem;
   7const posix = std.posix;
   8const fs = std.fs;
   9const testing = std.testing;
  10const Allocator = mem.Allocator;
  11const File = std.fs.File;
  12const windows = std.os.windows;
  13
  14const builtin = @import("builtin");
  15const native_arch = builtin.cpu.arch;
  16const native_os = builtin.os.tag;
  17const StackTrace = std.builtin.StackTrace;
  18
  19const root = @import("root");
  20
  21pub const Dwarf = @import("debug/Dwarf.zig");
  22pub const Pdb = @import("debug/Pdb.zig");
  23pub const ElfFile = @import("debug/ElfFile.zig");
  24pub const MachOFile = @import("debug/MachOFile.zig");
  25pub const Info = @import("debug/Info.zig");
  26pub const Coverage = @import("debug/Coverage.zig");
  27pub const cpu_context = @import("debug/cpu_context.zig");
  28
  29/// This type abstracts the target-specific implementation of accessing this process' own debug
  30/// information behind a generic interface which supports looking up source locations associated
  31/// with addresses, as well as unwinding the stack where a safe mechanism to do so exists.
  32///
  33/// The Zig Standard Library provides default implementations of `SelfInfo` for common targets, but
  34/// the implementation can be overriden by exposing `root.debug.SelfInfo`. Setting `SelfInfo` to
  35/// `void` indicates that the `SelfInfo` API is not supported.
  36///
  37/// This type must expose the following declarations:
  38///
  39/// ```
  40/// pub const init: SelfInfo;
  41/// pub fn deinit(si: *SelfInfo, gpa: Allocator) void;
  42///
  43/// /// Returns the symbol and source location of the instruction at `address`.
  44/// pub fn getSymbol(si: *SelfInfo, gpa: Allocator, address: usize) SelfInfoError!Symbol;
  45/// /// Returns a name for the "module" (e.g. shared library or executable image) containing `address`.
  46/// pub fn getModuleName(si: *SelfInfo, gpa: Allocator, address: usize) SelfInfoError![]const u8;
  47///
  48/// /// Whether a reliable stack unwinding strategy, such as DWARF unwinding, is available.
  49/// pub const can_unwind: bool;
  50/// /// Only required if `can_unwind == true`.
  51/// pub const UnwindContext = struct {
  52///     /// An address representing the instruction pointer in the last frame.
  53///     pc: usize,
  54///
  55///     pub fn init(ctx: *cpu_context.Native, gpa: Allocator) Allocator.Error!UnwindContext;
  56///     pub fn deinit(ctx: *UnwindContext, gpa: Allocator) void;
  57///     /// Returns the frame pointer associated with the last unwound stack frame.
  58///     /// If the frame pointer is unknown, 0 may be returned instead.
  59///     pub fn getFp(uc: *UnwindContext) usize;
  60/// };
  61/// /// Only required if `can_unwind == true`. Unwinds a single stack frame, returning the frame's
  62/// /// return address, or 0 if the end of the stack has been reached.
  63/// pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) SelfInfoError!usize;
  64/// ```
  65pub const SelfInfo = if (@hasDecl(root, "debug") and @hasDecl(root.debug, "SelfInfo"))
  66    root.debug.SelfInfo
  67else switch (std.Target.ObjectFormat.default(native_os, native_arch)) {
  68    .coff => if (native_os == .windows) @import("debug/SelfInfo/Windows.zig") else void,
  69    .elf => switch (native_os) {
  70        .freestanding, .other => void,
  71        else => @import("debug/SelfInfo/Elf.zig"),
  72    },
  73    .macho => @import("debug/SelfInfo/MachO.zig"),
  74    .plan9, .spirv, .wasm => void,
  75    .c, .hex, .raw => unreachable,
  76};
  77
  78pub const SelfInfoError = error{
  79    /// The required debug info is invalid or corrupted.
  80    InvalidDebugInfo,
  81    /// The required debug info could not be found.
  82    MissingDebugInfo,
  83    /// The required debug info was found, and may be valid, but is not supported by this implementation.
  84    UnsupportedDebugInfo,
  85    /// The required debug info could not be read from disk due to some IO error.
  86    ReadFailed,
  87    OutOfMemory,
  88    Canceled,
  89    Unexpected,
  90};
  91
  92pub const simple_panic = @import("debug/simple_panic.zig");
  93pub const no_panic = @import("debug/no_panic.zig");
  94
  95/// A fully-featured panic handler namespace which lowers all panics to calls to `panicFn`.
  96/// Safety panics will use formatted printing to provide a meaningful error message.
  97/// The signature of `panicFn` should match that of `defaultPanic`.
  98pub fn FullPanic(comptime panicFn: fn ([]const u8, ?usize) noreturn) type {
  99    return struct {
 100        pub const call = panicFn;
 101        pub fn sentinelMismatch(expected: anytype, found: @TypeOf(expected)) noreturn {
 102            @branchHint(.cold);
 103            std.debug.panicExtra(@returnAddress(), "sentinel mismatch: expected {any}, found {any}", .{
 104                expected, found,
 105            });
 106        }
 107        pub fn unwrapError(err: anyerror) noreturn {
 108            @branchHint(.cold);
 109            std.debug.panicExtra(@returnAddress(), "attempt to unwrap error: {s}", .{@errorName(err)});
 110        }
 111        pub fn outOfBounds(index: usize, len: usize) noreturn {
 112            @branchHint(.cold);
 113            std.debug.panicExtra(@returnAddress(), "index out of bounds: index {d}, len {d}", .{ index, len });
 114        }
 115        pub fn startGreaterThanEnd(start: usize, end: usize) noreturn {
 116            @branchHint(.cold);
 117            std.debug.panicExtra(@returnAddress(), "start index {d} is larger than end index {d}", .{ start, end });
 118        }
 119        pub fn inactiveUnionField(active: anytype, accessed: @TypeOf(active)) noreturn {
 120            @branchHint(.cold);
 121            std.debug.panicExtra(@returnAddress(), "access of union field '{s}' while field '{s}' is active", .{
 122                @tagName(accessed), @tagName(active),
 123            });
 124        }
 125        pub fn sliceCastLenRemainder(src_len: usize) noreturn {
 126            @branchHint(.cold);
 127            std.debug.panicExtra(@returnAddress(), "slice length '{d}' does not divide exactly into destination elements", .{src_len});
 128        }
 129        pub fn reachedUnreachable() noreturn {
 130            @branchHint(.cold);
 131            call("reached unreachable code", @returnAddress());
 132        }
 133        pub fn unwrapNull() noreturn {
 134            @branchHint(.cold);
 135            call("attempt to use null value", @returnAddress());
 136        }
 137        pub fn castToNull() noreturn {
 138            @branchHint(.cold);
 139            call("cast causes pointer to be null", @returnAddress());
 140        }
 141        pub fn incorrectAlignment() noreturn {
 142            @branchHint(.cold);
 143            call("incorrect alignment", @returnAddress());
 144        }
 145        pub fn invalidErrorCode() noreturn {
 146            @branchHint(.cold);
 147            call("invalid error code", @returnAddress());
 148        }
 149        pub fn integerOutOfBounds() noreturn {
 150            @branchHint(.cold);
 151            call("integer does not fit in destination type", @returnAddress());
 152        }
 153        pub fn integerOverflow() noreturn {
 154            @branchHint(.cold);
 155            call("integer overflow", @returnAddress());
 156        }
 157        pub fn shlOverflow() noreturn {
 158            @branchHint(.cold);
 159            call("left shift overflowed bits", @returnAddress());
 160        }
 161        pub fn shrOverflow() noreturn {
 162            @branchHint(.cold);
 163            call("right shift overflowed bits", @returnAddress());
 164        }
 165        pub fn divideByZero() noreturn {
 166            @branchHint(.cold);
 167            call("division by zero", @returnAddress());
 168        }
 169        pub fn exactDivisionRemainder() noreturn {
 170            @branchHint(.cold);
 171            call("exact division produced remainder", @returnAddress());
 172        }
 173        pub fn integerPartOutOfBounds() noreturn {
 174            @branchHint(.cold);
 175            call("integer part of floating point value out of bounds", @returnAddress());
 176        }
 177        pub fn corruptSwitch() noreturn {
 178            @branchHint(.cold);
 179            call("switch on corrupt value", @returnAddress());
 180        }
 181        pub fn shiftRhsTooBig() noreturn {
 182            @branchHint(.cold);
 183            call("shift amount is greater than the type size", @returnAddress());
 184        }
 185        pub fn invalidEnumValue() noreturn {
 186            @branchHint(.cold);
 187            call("invalid enum value", @returnAddress());
 188        }
 189        pub fn forLenMismatch() noreturn {
 190            @branchHint(.cold);
 191            call("for loop over objects with non-equal lengths", @returnAddress());
 192        }
 193        pub fn copyLenMismatch() noreturn {
 194            @branchHint(.cold);
 195            call("source and destination arguments have non-equal lengths", @returnAddress());
 196        }
 197        pub fn memcpyAlias() noreturn {
 198            @branchHint(.cold);
 199            call("@memcpy arguments alias", @returnAddress());
 200        }
 201        pub fn noreturnReturned() noreturn {
 202            @branchHint(.cold);
 203            call("'noreturn' function returned", @returnAddress());
 204        }
 205    };
 206}
 207
 208/// Unresolved source locations can be represented with a single `usize` that
 209/// corresponds to a virtual memory address of the program counter. Combined
 210/// with debug information, those values can be converted into a resolved
 211/// source location, including file, line, and column.
 212pub const SourceLocation = struct {
 213    line: u64,
 214    column: u64,
 215    file_name: []const u8,
 216
 217    pub const invalid: SourceLocation = .{
 218        .line = 0,
 219        .column = 0,
 220        .file_name = &.{},
 221    };
 222};
 223
 224pub const Symbol = struct {
 225    name: ?[]const u8,
 226    compile_unit_name: ?[]const u8,
 227    source_location: ?SourceLocation,
 228    pub const unknown: Symbol = .{
 229        .name = null,
 230        .compile_unit_name = null,
 231        .source_location = null,
 232    };
 233};
 234
 235/// Deprecated because it returns the optimization mode of the standard
 236/// library, when the caller probably wants to use the optimization mode of
 237/// their own module.
 238pub const runtime_safety = switch (builtin.mode) {
 239    .Debug, .ReleaseSafe => true,
 240    .ReleaseFast, .ReleaseSmall => false,
 241};
 242
 243/// Whether we can unwind the stack on this target, allowing capturing and/or printing the current
 244/// stack trace. It is still legal to call `captureCurrentStackTrace`, `writeCurrentStackTrace`, and
 245/// `dumpCurrentStackTrace` if this is `false`; it will just print an error / capture an empty
 246/// trace due to missing functionality. This value is just intended as a heuristic to avoid
 247/// pointless work e.g. capturing always-empty stack traces.
 248pub const sys_can_stack_trace = switch (builtin.cpu.arch) {
 249    // `@returnAddress()` in LLVM 10 gives
 250    // "Non-Emscripten WebAssembly hasn't implemented __builtin_return_address".
 251    // On Emscripten, Zig only supports `@returnAddress()` in debug builds
 252    // because Emscripten's implementation is very slow.
 253    .wasm32,
 254    .wasm64,
 255    => native_os == .emscripten and builtin.mode == .Debug,
 256
 257    // `@returnAddress()` is unsupported in LLVM 21.
 258    .bpfel,
 259    .bpfeb,
 260    => false,
 261
 262    else => true,
 263};
 264
 265/// Allows the caller to freely write to stderr until `unlockStdErr` is called.
 266///
 267/// During the lock, any `std.Progress` information is cleared from the terminal.
 268pub fn lockStdErr() void {
 269    std.Progress.lockStdErr();
 270}
 271
 272pub fn unlockStdErr() void {
 273    std.Progress.unlockStdErr();
 274}
 275
 276/// Allows the caller to freely write to stderr until `unlockStderrWriter` is called.
 277///
 278/// During the lock, any `std.Progress` information is cleared from the terminal.
 279///
 280/// The lock is recursive, so it is valid for the same thread to call `lockStderrWriter` multiple
 281/// times. The primary motivation is that this allows the panic handler to safely dump the stack
 282/// trace and panic message even if the mutex was held at the panic site.
 283///
 284/// The returned `Writer` does not need to be manually flushed: flushing is performed automatically
 285/// when the matching `unlockStderrWriter` call occurs.
 286pub fn lockStderrWriter(buffer: []u8) struct { *Writer, tty.Config } {
 287    const global = struct {
 288        var conf: ?tty.Config = null;
 289    };
 290    const w = std.Progress.lockStderrWriter(buffer);
 291    // The stderr lock also locks access to `global.conf`.
 292    if (global.conf == null) {
 293        global.conf = .detect(.stderr());
 294    }
 295    return .{ w, global.conf.? };
 296}
 297
 298pub fn unlockStderrWriter() void {
 299    std.Progress.unlockStderrWriter();
 300}
 301
 302/// Print to stderr, silently returning on failure. Intended for use in "printf
 303/// debugging". Use `std.log` functions for proper logging.
 304///
 305/// Uses a 64-byte buffer for formatted printing which is flushed before this
 306/// function returns.
 307pub fn print(comptime fmt: []const u8, args: anytype) void {
 308    var buffer: [64]u8 = undefined;
 309    const bw, _ = lockStderrWriter(&buffer);
 310    defer unlockStderrWriter();
 311    nosuspend bw.print(fmt, args) catch return;
 312}
 313
 314/// Marked `inline` to propagate a comptime-known error to callers.
 315pub inline fn getSelfDebugInfo() !*SelfInfo {
 316    if (SelfInfo == void) return error.UnsupportedTarget;
 317    const S = struct {
 318        var self_info: SelfInfo = .init;
 319    };
 320    return &S.self_info;
 321}
 322
 323/// Tries to print a hexadecimal view of the bytes, unbuffered, and ignores any error returned.
 324/// Obtains the stderr mutex while dumping.
 325pub fn dumpHex(bytes: []const u8) void {
 326    const bw, const ttyconf = lockStderrWriter(&.{});
 327    defer unlockStderrWriter();
 328    dumpHexFallible(bw, ttyconf, bytes) catch {};
 329}
 330
 331/// Prints a hexadecimal view of the bytes, returning any error that occurs.
 332pub fn dumpHexFallible(bw: *Writer, ttyconf: tty.Config, bytes: []const u8) !void {
 333    var chunks = mem.window(u8, bytes, 16, 16);
 334    while (chunks.next()) |window| {
 335        // 1. Print the address.
 336        const address = (@intFromPtr(bytes.ptr) + 0x10 * (std.math.divCeil(usize, chunks.index orelse bytes.len, 16) catch unreachable)) - 0x10;
 337        try ttyconf.setColor(bw, .dim);
 338        // We print the address in lowercase and the bytes in uppercase hexadecimal to distinguish them more.
 339        // Also, make sure all lines are aligned by padding the address.
 340        try bw.print("{x:0>[1]}  ", .{ address, @sizeOf(usize) * 2 });
 341        try ttyconf.setColor(bw, .reset);
 342
 343        // 2. Print the bytes.
 344        for (window, 0..) |byte, index| {
 345            try bw.print("{X:0>2} ", .{byte});
 346            if (index == 7) try bw.writeByte(' ');
 347        }
 348        try bw.writeByte(' ');
 349        if (window.len < 16) {
 350            var missing_columns = (16 - window.len) * 3;
 351            if (window.len < 8) missing_columns += 1;
 352            try bw.splatByteAll(' ', missing_columns);
 353        }
 354
 355        // 3. Print the characters.
 356        for (window) |byte| {
 357            if (std.ascii.isPrint(byte)) {
 358                try bw.writeByte(byte);
 359            } else {
 360                // Related: https://github.com/ziglang/zig/issues/7600
 361                if (ttyconf == .windows_api) {
 362                    try bw.writeByte('.');
 363                    continue;
 364                }
 365
 366                // Let's print some common control codes as graphical Unicode symbols.
 367                // We don't want to do this for all control codes because most control codes apart from
 368                // the ones that Zig has escape sequences for are likely not very useful to print as symbols.
 369                switch (byte) {
 370                    '\n' => try bw.writeAll(""),
 371                    '\r' => try bw.writeAll(""),
 372                    '\t' => try bw.writeAll(""),
 373                    else => try bw.writeByte('.'),
 374                }
 375            }
 376        }
 377        try bw.writeByte('\n');
 378    }
 379}
 380
 381test dumpHexFallible {
 382    const bytes: []const u8 = &.{ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, 0x01, 0x12, 0x13 };
 383    var aw: Writer.Allocating = .init(std.testing.allocator);
 384    defer aw.deinit();
 385
 386    try dumpHexFallible(&aw.writer, .no_color, bytes);
 387    const expected = try std.fmt.allocPrint(std.testing.allocator,
 388        \\{x:0>[2]}  00 11 22 33 44 55 66 77  88 99 AA BB CC DD EE FF  .."3DUfw........
 389        \\{x:0>[2]}  01 12 13                                          ...
 390        \\
 391    , .{
 392        @intFromPtr(bytes.ptr),
 393        @intFromPtr(bytes.ptr) + 16,
 394        @sizeOf(usize) * 2,
 395    });
 396    defer std.testing.allocator.free(expected);
 397    try std.testing.expectEqualStrings(expected, aw.written());
 398}
 399
 400/// The pointer through which a `cpu_context.Native` is received from callers of stack tracing logic.
 401pub const CpuContextPtr = if (cpu_context.Native == noreturn) noreturn else *const cpu_context.Native;
 402
 403/// Invokes detectable illegal behavior when `ok` is `false`.
 404///
 405/// In Debug and ReleaseSafe modes, calls to this function are always
 406/// generated, and the `unreachable` statement triggers a panic.
 407///
 408/// In ReleaseFast and ReleaseSmall modes, calls to this function are optimized
 409/// away, and in fact the optimizer is able to use the assertion in its
 410/// heuristics.
 411///
 412/// Inside a test block, it is best to use the `std.testing` module rather than
 413/// this function, because this function may not detect a test failure in
 414/// ReleaseFast and ReleaseSmall mode. Outside of a test block, this assert
 415/// function is the correct function to use.
 416pub fn assert(ok: bool) void {
 417    if (!ok) unreachable; // assertion failure
 418}
 419
 420/// Invokes detectable illegal behavior when the provided slice is not mapped
 421/// or lacks read permissions.
 422pub fn assertReadable(slice: []const volatile u8) void {
 423    if (!runtime_safety) return;
 424    for (slice) |*byte| _ = byte.*;
 425}
 426
 427/// Invokes detectable illegal behavior when the provided array is not aligned
 428/// to the provided amount.
 429pub fn assertAligned(ptr: anytype, comptime alignment: std.mem.Alignment) void {
 430    const aligned_ptr: *align(alignment.toByteUnits()) const anyopaque = @ptrCast(@alignCast(ptr));
 431    _ = aligned_ptr;
 432}
 433
 434/// Equivalent to `@panic` but with a formatted message.
 435pub fn panic(comptime format: []const u8, args: anytype) noreturn {
 436    @branchHint(.cold);
 437    panicExtra(@returnAddress(), format, args);
 438}
 439
 440/// Equivalent to `@panic` but with a formatted message and an explicitly provided return address
 441/// which will be the first address in the stack trace.
 442pub fn panicExtra(
 443    ret_addr: ?usize,
 444    comptime format: []const u8,
 445    args: anytype,
 446) noreturn {
 447    @branchHint(.cold);
 448
 449    const size = 0x1000;
 450    const trunc_msg = "(msg truncated)";
 451    var buf: [size + trunc_msg.len]u8 = undefined;
 452    var bw: Writer = .fixed(buf[0..size]);
 453    // a minor annoyance with this is that it will result in the NoSpaceLeft
 454    // error being part of the @panic stack trace (but that error should
 455    // only happen rarely)
 456    const msg = if (bw.print(format, args)) |_| bw.buffered() else |_| blk: {
 457        @memcpy(buf[size..], trunc_msg);
 458        break :blk &buf;
 459    };
 460    std.builtin.panic.call(msg, ret_addr);
 461}
 462
 463/// Non-zero whenever the program triggered a panic.
 464/// The counter is incremented/decremented atomically.
 465var panicking = std.atomic.Value(u8).init(0);
 466
 467/// Counts how many times the panic handler is invoked by this thread.
 468/// This is used to catch and handle panics triggered by the panic handler.
 469threadlocal var panic_stage: usize = 0;
 470
 471/// For backends that cannot handle the language features depended on by the
 472/// default panic handler, we will use a simpler implementation.
 473const use_trap_panic = switch (builtin.zig_backend) {
 474    .stage2_aarch64,
 475    .stage2_arm,
 476    .stage2_powerpc,
 477    .stage2_riscv64,
 478    .stage2_spirv,
 479    .stage2_wasm,
 480    .stage2_x86,
 481    => true,
 482    else => false,
 483};
 484
 485/// Dumps a stack trace to standard error, then aborts.
 486pub fn defaultPanic(
 487    msg: []const u8,
 488    first_trace_addr: ?usize,
 489) noreturn {
 490    @branchHint(.cold);
 491
 492    if (use_trap_panic) @trap();
 493
 494    switch (builtin.os.tag) {
 495        .freestanding, .other, .@"3ds", .vita => {
 496            @trap();
 497        },
 498        .uefi => {
 499            const uefi = std.os.uefi;
 500
 501            var utf16_buffer: [1000]u16 = undefined;
 502            const len_minus_3 = std.unicode.utf8ToUtf16Le(&utf16_buffer, msg) catch 0;
 503            utf16_buffer[len_minus_3..][0..3].* = .{ '\r', '\n', 0 };
 504            const len = len_minus_3 + 3;
 505            const exit_msg = utf16_buffer[0 .. len - 1 :0];
 506
 507            // Output to both std_err and con_out, as std_err is easier
 508            // to read in stuff like QEMU at times, but, unlike con_out,
 509            // isn't visible on actual hardware if directly booted into
 510            inline for ([_]?*uefi.protocol.SimpleTextOutput{ uefi.system_table.std_err, uefi.system_table.con_out }) |o| {
 511                if (o) |out| {
 512                    out.setAttribute(.{ .foreground = .red }) catch {};
 513                    _ = out.outputString(exit_msg) catch {};
 514                    out.setAttribute(.{ .foreground = .white }) catch {};
 515                }
 516            }
 517
 518            if (uefi.system_table.boot_services) |bs| {
 519                // ExitData buffer must be allocated using boot_services.allocatePool (spec: page 220)
 520                const exit_data = uefi.raw_pool_allocator.dupeZ(u16, exit_msg) catch @trap();
 521                bs.exit(uefi.handle, .aborted, exit_data) catch {};
 522            }
 523            @trap();
 524        },
 525        .cuda, .amdhsa => std.posix.abort(),
 526        .plan9 => {
 527            var status: [std.os.plan9.ERRMAX]u8 = undefined;
 528            const len = @min(msg.len, status.len - 1);
 529            @memcpy(status[0..len], msg[0..len]);
 530            status[len] = 0;
 531            std.os.plan9.exits(status[0..len :0]);
 532        },
 533        else => {},
 534    }
 535
 536    if (enable_segfault_handler) {
 537        // If a segfault happens while panicking, we want it to actually segfault, not trigger
 538        // the handler.
 539        resetSegfaultHandler();
 540    }
 541
 542    // There is very similar logic to the following in `handleSegfault`.
 543    switch (panic_stage) {
 544        0 => {
 545            panic_stage = 1;
 546            _ = panicking.fetchAdd(1, .seq_cst);
 547
 548            trace: {
 549                const stderr, const tty_config = lockStderrWriter(&.{});
 550                defer unlockStderrWriter();
 551
 552                if (builtin.single_threaded) {
 553                    stderr.print("panic: ", .{}) catch break :trace;
 554                } else {
 555                    const current_thread_id = std.Thread.getCurrentId();
 556                    stderr.print("thread {d} panic: ", .{current_thread_id}) catch break :trace;
 557                }
 558                stderr.print("{s}\n", .{msg}) catch break :trace;
 559
 560                if (@errorReturnTrace()) |t| if (t.index > 0) {
 561                    stderr.writeAll("error return context:\n") catch break :trace;
 562                    writeStackTrace(t, stderr, tty_config) catch break :trace;
 563                    stderr.writeAll("\nstack trace:\n") catch break :trace;
 564                };
 565                writeCurrentStackTrace(.{
 566                    .first_address = first_trace_addr orelse @returnAddress(),
 567                    .allow_unsafe_unwind = true, // we're crashing anyway, give it our all!
 568                }, stderr, tty_config) catch break :trace;
 569            }
 570
 571            waitForOtherThreadToFinishPanicking();
 572        },
 573        1 => {
 574            panic_stage = 2;
 575            // A panic happened while trying to print a previous panic message.
 576            // We're still holding the mutex but that's fine as we're going to
 577            // call abort().
 578            fs.File.stderr().writeAll("aborting due to recursive panic\n") catch {};
 579        },
 580        else => {}, // Panicked while printing the recursive panic message.
 581    }
 582
 583    posix.abort();
 584}
 585
 586/// Must be called only after adding 1 to `panicking`. There are three callsites.
 587fn waitForOtherThreadToFinishPanicking() void {
 588    if (panicking.fetchSub(1, .seq_cst) != 1) {
 589        // Another thread is panicking, wait for the last one to finish
 590        // and call abort()
 591        if (builtin.single_threaded) unreachable;
 592
 593        // Sleep forever without hammering the CPU
 594        var futex = std.atomic.Value(u32).init(0);
 595        while (true) std.Thread.Futex.wait(&futex, 0);
 596        unreachable;
 597    }
 598}
 599
 600pub const StackUnwindOptions = struct {
 601    /// If not `null`, we will ignore all frames up until this return address. This is typically
 602    /// used to omit intermediate handling code (for instance, a panic handler and its machinery)
 603    /// from stack traces.
 604    first_address: ?usize = null,
 605    /// If not `null`, we will unwind from this `cpu_context.Native` instead of the current top of
 606    /// the stack. The main use case here is printing stack traces from signal handlers, where the
 607    /// kernel provides a `*const cpu_context.Native` of the state before the signal.
 608    context: ?CpuContextPtr = null,
 609    /// If `true`, stack unwinding strategies which may cause crashes are used as a last resort.
 610    /// If `false`, only known-safe mechanisms will be attempted.
 611    allow_unsafe_unwind: bool = false,
 612};
 613
 614/// Capture and return the current stack trace. The returned `StackTrace` stores its addresses in
 615/// the given buffer, so `addr_buf` must have a lifetime at least equal to the `StackTrace`.
 616///
 617/// See `writeCurrentStackTrace` to immediately print the trace instead of capturing it.
 618pub noinline fn captureCurrentStackTrace(options: StackUnwindOptions, addr_buf: []usize) StackTrace {
 619    const empty_trace: StackTrace = .{ .index = 0, .instruction_addresses = &.{} };
 620    if (!std.options.allow_stack_tracing) return empty_trace;
 621    var it: StackIterator = .init(options.context);
 622    defer it.deinit();
 623    if (!it.stratOk(options.allow_unsafe_unwind)) return empty_trace;
 624    var total_frames: usize = 0;
 625    var index: usize = 0;
 626    var wait_for = options.first_address;
 627    // Ideally, we would iterate the whole stack so that the `index` in the returned trace was
 628    // indicative of how many frames were skipped. However, this has a significant runtime cost
 629    // in some cases, so at least for now, we don't do that.
 630    while (index < addr_buf.len) switch (it.next()) {
 631        .switch_to_fp => if (!it.stratOk(options.allow_unsafe_unwind)) break,
 632        .end => break,
 633        .frame => |ret_addr| {
 634            if (total_frames > 10_000) {
 635                // Limit the number of frames in case of (e.g.) broken debug information which is
 636                // getting unwinding stuck in a loop.
 637                break;
 638            }
 639            total_frames += 1;
 640            if (wait_for) |target| {
 641                if (ret_addr != target) continue;
 642                wait_for = null;
 643            }
 644            addr_buf[index] = ret_addr;
 645            index += 1;
 646        },
 647    };
 648    return .{
 649        .index = index,
 650        .instruction_addresses = addr_buf[0..index],
 651    };
 652}
 653/// Write the current stack trace to `writer`, annotated with source locations.
 654///
 655/// See `captureCurrentStackTrace` to capture the trace addresses into a buffer instead of printing.
 656pub noinline fn writeCurrentStackTrace(options: StackUnwindOptions, writer: *Writer, tty_config: tty.Config) Writer.Error!void {
 657    var threaded: Io.Threaded = .init_single_threaded;
 658    const io = threaded.ioBasic();
 659
 660    if (!std.options.allow_stack_tracing) {
 661        tty_config.setColor(writer, .dim) catch {};
 662        try writer.print("Cannot print stack trace: stack tracing is disabled\n", .{});
 663        tty_config.setColor(writer, .reset) catch {};
 664        return;
 665    }
 666    const di_gpa = getDebugInfoAllocator();
 667    const di = getSelfDebugInfo() catch |err| switch (err) {
 668        error.UnsupportedTarget => {
 669            tty_config.setColor(writer, .dim) catch {};
 670            try writer.print("Cannot print stack trace: debug info unavailable for target\n", .{});
 671            tty_config.setColor(writer, .reset) catch {};
 672            return;
 673        },
 674    };
 675    var it: StackIterator = .init(options.context);
 676    defer it.deinit();
 677    if (!it.stratOk(options.allow_unsafe_unwind)) {
 678        tty_config.setColor(writer, .dim) catch {};
 679        try writer.print("Cannot print stack trace: safe unwind unavailable for target\n", .{});
 680        tty_config.setColor(writer, .reset) catch {};
 681        return;
 682    }
 683    var total_frames: usize = 0;
 684    var wait_for = options.first_address;
 685    var printed_any_frame = false;
 686    while (true) switch (it.next()) {
 687        .switch_to_fp => |unwind_error| {
 688            switch (StackIterator.fp_usability) {
 689                .useless, .unsafe => {},
 690                .safe, .ideal => continue, // no need to even warn
 691            }
 692            const module_name = di.getModuleName(di_gpa, unwind_error.address) catch "???";
 693            const caption: []const u8 = switch (unwind_error.err) {
 694                error.MissingDebugInfo => "unwind info unavailable",
 695                error.InvalidDebugInfo => "unwind info invalid",
 696                error.UnsupportedDebugInfo => "unwind info unsupported",
 697                error.ReadFailed => "filesystem error",
 698                error.OutOfMemory => "out of memory",
 699                error.Canceled => "operation canceled",
 700                error.Unexpected => "unexpected error",
 701            };
 702            if (it.stratOk(options.allow_unsafe_unwind)) {
 703                tty_config.setColor(writer, .dim) catch {};
 704                try writer.print(
 705                    "Unwind error at address `{s}:0x{x}` ({s}), remaining frames may be incorrect\n",
 706                    .{ module_name, unwind_error.address, caption },
 707                );
 708                tty_config.setColor(writer, .reset) catch {};
 709            } else {
 710                tty_config.setColor(writer, .dim) catch {};
 711                try writer.print(
 712                    "Unwind error at address `{s}:0x{x}` ({s}), stopping trace early\n",
 713                    .{ module_name, unwind_error.address, caption },
 714                );
 715                tty_config.setColor(writer, .reset) catch {};
 716                return;
 717            }
 718        },
 719        .end => break,
 720        .frame => |ret_addr| {
 721            if (total_frames > 10_000) {
 722                tty_config.setColor(writer, .dim) catch {};
 723                try writer.print(
 724                    "Stopping trace after {d} frames (large frame count may indicate broken debug info)\n",
 725                    .{total_frames},
 726                );
 727                tty_config.setColor(writer, .reset) catch {};
 728                return;
 729            }
 730            total_frames += 1;
 731            if (wait_for) |target| {
 732                if (ret_addr != target) continue;
 733                wait_for = null;
 734            }
 735            // `ret_addr` is the return address, which is *after* the function call.
 736            // Subtract 1 to get an address *in* the function call for a better source location.
 737            try printSourceAtAddress(di_gpa, io, di, writer, ret_addr -| StackIterator.ra_call_offset, tty_config);
 738            printed_any_frame = true;
 739        },
 740    };
 741    if (!printed_any_frame) return writer.writeAll("(empty stack trace)\n");
 742}
 743/// A thin wrapper around `writeCurrentStackTrace` which writes to stderr and ignores write errors.
 744pub fn dumpCurrentStackTrace(options: StackUnwindOptions) void {
 745    const stderr, const tty_config = lockStderrWriter(&.{});
 746    defer unlockStderrWriter();
 747    writeCurrentStackTrace(.{
 748        .first_address = a: {
 749            if (options.first_address) |a| break :a a;
 750            if (options.context != null) break :a null;
 751            break :a @returnAddress(); // don't include this frame in the trace
 752        },
 753        .context = options.context,
 754        .allow_unsafe_unwind = options.allow_unsafe_unwind,
 755    }, stderr, tty_config) catch |err| switch (err) {
 756        error.WriteFailed => {},
 757    };
 758}
 759
 760pub const FormatStackTrace = struct {
 761    stack_trace: StackTrace,
 762    tty_config: tty.Config,
 763
 764    pub fn format(context: @This(), writer: *Io.Writer) Io.Writer.Error!void {
 765        try writer.writeAll("\n");
 766        try writeStackTrace(&context.stack_trace, writer, context.tty_config);
 767    }
 768};
 769
 770/// Write a previously captured stack trace to `writer`, annotated with source locations.
 771pub fn writeStackTrace(st: *const StackTrace, writer: *Writer, tty_config: tty.Config) Writer.Error!void {
 772    if (!std.options.allow_stack_tracing) {
 773        tty_config.setColor(writer, .dim) catch {};
 774        try writer.print("Cannot print stack trace: stack tracing is disabled\n", .{});
 775        tty_config.setColor(writer, .reset) catch {};
 776        return;
 777    }
 778    // We use an independent Io implementation here in case there was a problem
 779    // with the application's Io implementation itself.
 780    var threaded: Io.Threaded = .init_single_threaded;
 781    const io = threaded.ioBasic();
 782
 783    // Fetch `st.index` straight away. Aside from avoiding redundant loads, this prevents issues if
 784    // `st` is `@errorReturnTrace()` and errors are encountered while writing the stack trace.
 785    const n_frames = st.index;
 786    if (n_frames == 0) return writer.writeAll("(empty stack trace)\n");
 787    const di_gpa = getDebugInfoAllocator();
 788    const di = getSelfDebugInfo() catch |err| switch (err) {
 789        error.UnsupportedTarget => {
 790            tty_config.setColor(writer, .dim) catch {};
 791            try writer.print("Cannot print stack trace: debug info unavailable for target\n\n", .{});
 792            tty_config.setColor(writer, .reset) catch {};
 793            return;
 794        },
 795    };
 796    const captured_frames = @min(n_frames, st.instruction_addresses.len);
 797    for (st.instruction_addresses[0..captured_frames]) |ret_addr| {
 798        // `ret_addr` is the return address, which is *after* the function call.
 799        // Subtract 1 to get an address *in* the function call for a better source location.
 800        try printSourceAtAddress(di_gpa, io, di, writer, ret_addr -| StackIterator.ra_call_offset, tty_config);
 801    }
 802    if (n_frames > captured_frames) {
 803        tty_config.setColor(writer, .bold) catch {};
 804        try writer.print("({d} additional stack frames skipped...)\n", .{n_frames - captured_frames});
 805        tty_config.setColor(writer, .reset) catch {};
 806    }
 807}
 808/// A thin wrapper around `writeStackTrace` which writes to stderr and ignores write errors.
 809pub fn dumpStackTrace(st: *const StackTrace) void {
 810    const stderr, const tty_config = lockStderrWriter(&.{});
 811    defer unlockStderrWriter();
 812    writeStackTrace(st, stderr, tty_config) catch |err| switch (err) {
 813        error.WriteFailed => {},
 814    };
 815}
 816
 817const StackIterator = union(enum) {
 818    /// We will first report the current PC of this `CpuContextPtr`, then we will switch to a
 819    /// different strategy to actually unwind.
 820    ctx_first: CpuContextPtr,
 821    /// Unwinding using debug info (e.g. DWARF CFI).
 822    di: if (SelfInfo != void and SelfInfo.can_unwind and fp_usability != .ideal)
 823        SelfInfo.UnwindContext
 824    else
 825        noreturn,
 826    /// Naive frame-pointer-based unwinding. Very simple, but typically unreliable.
 827    fp: usize,
 828
 829    /// It is important that this function is marked `inline` so that it can safely use
 830    /// `@frameAddress` and `cpu_context.Native.current` as the caller's stack frame and
 831    /// our own are one and the same.
 832    ///
 833    /// `opt_context_ptr` must remain valid while the `StackIterator` is used.
 834    inline fn init(opt_context_ptr: ?CpuContextPtr) StackIterator {
 835        if (opt_context_ptr) |context_ptr| {
 836            // Use `ctx_first` here so we report the PC in the context before unwinding any further.
 837            return .{ .ctx_first = context_ptr };
 838        }
 839
 840        // Otherwise, we're going to capture the current context or frame address, so we don't need
 841        // `ctx_first`, because the first PC is in `std.debug` and we need to unwind before reaching
 842        // a frame we want to report.
 843
 844        // Workaround the C backend being unable to use inline assembly on MSVC by disabling the
 845        // call to `current`. This effectively constrains stack trace collection and dumping to FP
 846        // unwinding when building with CBE for MSVC.
 847        if (!(builtin.zig_backend == .stage2_c and builtin.target.abi == .msvc) and
 848            SelfInfo != void and
 849            SelfInfo.can_unwind and
 850            cpu_context.Native != noreturn and
 851            fp_usability != .ideal)
 852        {
 853            return .{ .di = .init(&.current()) };
 854        }
 855        return .{
 856            // On SPARC, the frame pointer will point to the previous frame's save area,
 857            // meaning we will read the previous return address and thus miss a frame.
 858            // Instead, start at the stack pointer so we get the return address from the
 859            // current frame's save area. The addition of the stack bias cannot fail here
 860            // since we know we have a valid stack pointer.
 861            .fp = if (native_arch.isSPARC()) sp: {
 862                flushSparcWindows();
 863                break :sp asm (""
 864                    : [_] "={o6}" (-> usize),
 865                ) + stack_bias;
 866            } else @frameAddress(),
 867        };
 868    }
 869    fn deinit(si: *StackIterator) void {
 870        switch (si.*) {
 871            .ctx_first => {},
 872            .fp => {},
 873            .di => |*unwind_context| unwind_context.deinit(getDebugInfoAllocator()),
 874        }
 875    }
 876
 877    noinline fn flushSparcWindows() void {
 878        // Flush all register windows except the current one (hence `noinline`). This ensures that
 879        // we actually see meaningful data on the stack when we walk the frame chain.
 880        if (comptime builtin.target.cpu.has(.sparc, .v9))
 881            asm volatile ("flushw" ::: .{ .memory = true })
 882        else
 883            asm volatile ("ta 3" ::: .{ .memory = true }); // ST_FLUSH_WINDOWS
 884    }
 885
 886    const FpUsability = enum {
 887        /// FP unwinding is impractical on this target. For example, due to its very silly ABI
 888        /// design decisions, it's not possible to do generic FP unwinding on MIPS without a
 889        /// complicated code scanning algorithm.
 890        useless,
 891        /// FP unwinding is unsafe on this target; we may crash when doing so. We will only perform
 892        /// FP unwinding in the case of crashes/panics, or if the user opts in.
 893        unsafe,
 894        /// FP unwinding is guaranteed to be safe on this target. We will do so if unwinding with
 895        /// debug info does not work, and if this compilation has frame pointers enabled.
 896        safe,
 897        /// FP unwinding is the best option on this target. This is usually because the ABI requires
 898        /// a backchain pointer, thus making it always available, safe, and fast.
 899        ideal,
 900    };
 901
 902    const fp_usability: FpUsability = switch (builtin.target.cpu.arch) {
 903        .alpha,
 904        .avr,
 905        .csky,
 906        .microblaze,
 907        .microblazeel,
 908        .mips,
 909        .mipsel,
 910        .mips64,
 911        .mips64el,
 912        .msp430,
 913        .sh,
 914        .sheb,
 915        .xcore,
 916        => .useless,
 917        .hexagon,
 918        // The PowerPC ABIs don't actually strictly require a backchain pointer; they allow omitting
 919        // it when full unwind info is present. Despite this, both GCC and Clang always enforce the
 920        // presence of the backchain pointer no matter what options they are given. This seems to be
 921        // a case of "the spec is only a polite suggestion", except it works in our favor this time!
 922        .powerpc,
 923        .powerpcle,
 924        .powerpc64,
 925        .powerpc64le,
 926        .sparc,
 927        .sparc64,
 928        => .ideal,
 929        // https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms#Respect-the-purpose-of-specific-CPU-registers
 930        .aarch64 => if (builtin.target.os.tag.isDarwin()) .safe else .unsafe,
 931        else => .unsafe,
 932    };
 933
 934    /// Whether the current unwind strategy is allowed given `allow_unsafe`.
 935    fn stratOk(it: *const StackIterator, allow_unsafe: bool) bool {
 936        return switch (it.*) {
 937            .ctx_first, .di => true,
 938            // If we omitted frame pointers from *this* compilation, FP unwinding would crash
 939            // immediately regardless of anything. But FPs could also be omitted from a different
 940            // linked object, so it's not guaranteed to be safe, unless the target specifically
 941            // requires it.
 942            .fp => switch (fp_usability) {
 943                .useless => false,
 944                .unsafe => allow_unsafe and !builtin.omit_frame_pointer,
 945                .safe => !builtin.omit_frame_pointer,
 946                .ideal => true,
 947            },
 948        };
 949    }
 950
 951    const Result = union(enum) {
 952        /// A stack frame has been found; this is the corresponding return address.
 953        frame: usize,
 954        /// The end of the stack has been reached.
 955        end,
 956        /// We were using `SelfInfo.UnwindInfo`, but are now switching to FP unwinding due to this error.
 957        switch_to_fp: struct {
 958            address: usize,
 959            err: SelfInfoError,
 960        },
 961    };
 962
 963    fn next(it: *StackIterator) Result {
 964        switch (it.*) {
 965            .ctx_first => |context_ptr| {
 966                // After the first frame, start actually unwinding.
 967                it.* = if (SelfInfo != void and SelfInfo.can_unwind and fp_usability != .ideal)
 968                    .{ .di = .init(context_ptr) }
 969                else
 970                    .{ .fp = context_ptr.getFp() };
 971
 972                // The caller expects *return* addresses, where they will subtract 1 to find the address of the call.
 973                // However, we have the actual current PC, which should not be adjusted. Compensate by adding 1.
 974                return .{ .frame = context_ptr.getPc() +| 1 };
 975            },
 976            .di => |*unwind_context| {
 977                const di = getSelfDebugInfo() catch unreachable;
 978                const di_gpa = getDebugInfoAllocator();
 979                const ret_addr = di.unwindFrame(di_gpa, unwind_context) catch |err| {
 980                    const pc = unwind_context.pc;
 981                    const fp = unwind_context.getFp();
 982                    it.* = .{ .fp = fp };
 983                    return .{ .switch_to_fp = .{
 984                        .address = pc,
 985                        .err = err,
 986                    } };
 987                };
 988                if (ret_addr <= 1) return .end;
 989                return .{ .frame = ret_addr };
 990            },
 991            .fp => |fp| {
 992                if (fp == 0) return .end; // we reached the "sentinel" base pointer
 993
 994                const bp_addr = applyOffset(fp, fp_to_bp_offset) orelse return .end;
 995                const ra_addr = applyOffset(fp, fp_to_ra_offset) orelse return .end;
 996
 997                if (bp_addr == 0 or !mem.isAligned(bp_addr, @alignOf(usize)) or
 998                    ra_addr == 0 or !mem.isAligned(ra_addr, @alignOf(usize)))
 999                {
1000                    // This isn't valid, but it most likely indicates end of stack.
1001                    return .end;
1002                }
1003
1004                const bp_ptr: *const usize = @ptrFromInt(bp_addr);
1005                const ra_ptr: *const usize = @ptrFromInt(ra_addr);
1006                const bp = applyOffset(bp_ptr.*, stack_bias) orelse return .end;
1007
1008                // If the stack grows downwards, `bp > fp` should always hold; conversely, if it
1009                // grows upwards, `bp < fp` should always hold. If that is not the case, this
1010                // frame is invalid, so we'll treat it as though we reached end of stack. The
1011                // exception is address 0, which is a graceful end-of-stack signal, in which case
1012                // *this* return address is valid and the *next* iteration will be the last.
1013                if (bp != 0 and switch (comptime builtin.target.stackGrowth()) {
1014                    .down => bp <= fp,
1015                    .up => bp >= fp,
1016                }) return .end;
1017
1018                it.fp = bp;
1019                const ra = stripInstructionPtrAuthCode(ra_ptr.*);
1020                if (ra <= 1) return .end;
1021                return .{ .frame = ra };
1022            },
1023        }
1024    }
1025
1026    /// Offset of the saved base pointer (previous frame pointer) wrt the frame pointer.
1027    const fp_to_bp_offset = off: {
1028        // On 32-bit PA-RISC, the base pointer is the final word of the frame marker.
1029        if (native_arch == .hppa) break :off -1 * @sizeOf(usize);
1030        // On 64-bit PA-RISC, the frame marker was shrunk significantly; now there's just the return
1031        // address followed by the base pointer.
1032        if (native_arch == .hppa64) break :off -1 * @sizeOf(usize);
1033        // On LoongArch and RISC-V, the frame pointer points to the top of the saved register area,
1034        // in which the base pointer is the first word.
1035        if (native_arch.isLoongArch() or native_arch.isRISCV()) break :off -2 * @sizeOf(usize);
1036        // On OpenRISC, the frame pointer is stored below the return address.
1037        if (native_arch == .or1k) break :off -2 * @sizeOf(usize);
1038        // On SPARC, the frame pointer points to the save area which holds 16 slots for the local
1039        // and incoming registers. The base pointer (i6) is stored in its customary save slot.
1040        if (native_arch.isSPARC()) break :off 14 * @sizeOf(usize);
1041        // Everywhere else, the frame pointer points directly to the location of the base pointer.
1042        break :off 0;
1043    };
1044
1045    /// Offset of the saved return address wrt the frame pointer.
1046    const fp_to_ra_offset = off: {
1047        // On 32-bit PA-RISC, the return address sits in the middle-ish of the frame marker.
1048        if (native_arch == .hppa) break :off -5 * @sizeOf(usize);
1049        // On 64-bit PA-RISC, the frame marker was shrunk significantly; now there's just the return
1050        // address followed by the base pointer.
1051        if (native_arch == .hppa64) break :off -2 * @sizeOf(usize);
1052        // On LoongArch and RISC-V, the frame pointer points to the top of the saved register area,
1053        // in which the return address is the second word.
1054        if (native_arch.isLoongArch() or native_arch.isRISCV()) break :off -1 * @sizeOf(usize);
1055        // On OpenRISC, the return address is stored below the stack parameter area.
1056        if (native_arch == .or1k) break :off -1 * @sizeOf(usize);
1057        if (native_arch.isPowerPC64()) break :off 2 * @sizeOf(usize);
1058        // On s390x, r14 is the link register and we need to grab it from its customary slot in the
1059        // register save area (ELF ABI s390x Supplement §1.2.2.2).
1060        if (native_arch == .s390x) break :off 14 * @sizeOf(usize);
1061        // On SPARC, the frame pointer points to the save area which holds 16 slots for the local
1062        // and incoming registers. The return address (i7) is stored in its customary save slot.
1063        if (native_arch.isSPARC()) break :off 15 * @sizeOf(usize);
1064        break :off @sizeOf(usize);
1065    };
1066
1067    /// Value to add to the stack pointer and frame/base pointers to get the real location being
1068    /// pointed to. Yes, SPARC really does this.
1069    const stack_bias = bias: {
1070        if (native_arch == .sparc64) break :bias 2047;
1071        break :bias 0;
1072    };
1073
1074    /// On some oddball architectures, a return address points to the call instruction rather than
1075    /// the instruction following it.
1076    const ra_call_offset = off: {
1077        if (native_arch.isSPARC()) break :off 0;
1078        break :off 1;
1079    };
1080
1081    fn applyOffset(addr: usize, comptime off: comptime_int) ?usize {
1082        if (off >= 0) return math.add(usize, addr, off) catch return null;
1083        return math.sub(usize, addr, -off) catch return null;
1084    }
1085};
1086
1087/// Some platforms use pointer authentication: the upper bits of instruction pointers contain a
1088/// signature. This function clears those signature bits to make the pointer directly usable.
1089pub inline fn stripInstructionPtrAuthCode(ptr: usize) usize {
1090    if (native_arch.isAARCH64()) {
1091        // `hint 0x07` maps to `xpaclri` (or `nop` if the hardware doesn't support it)
1092        // The save / restore is because `xpaclri` operates on x30 (LR)
1093        return asm (
1094            \\mov x16, x30
1095            \\mov x30, x15
1096            \\hint 0x07
1097            \\mov x15, x30
1098            \\mov x30, x16
1099            : [ret] "={x15}" (-> usize),
1100            : [ptr] "{x15}" (ptr),
1101            : .{ .x16 = true });
1102    }
1103
1104    return ptr;
1105}
1106
1107fn printSourceAtAddress(gpa: Allocator, io: Io, debug_info: *SelfInfo, writer: *Writer, address: usize, tty_config: tty.Config) Writer.Error!void {
1108    const symbol: Symbol = debug_info.getSymbol(gpa, io, address) catch |err| switch (err) {
1109        error.MissingDebugInfo,
1110        error.UnsupportedDebugInfo,
1111        error.InvalidDebugInfo,
1112        => .unknown,
1113        error.ReadFailed, error.Unexpected, error.Canceled => s: {
1114            tty_config.setColor(writer, .dim) catch {};
1115            try writer.print("Failed to read debug info from filesystem, trace may be incomplete\n\n", .{});
1116            tty_config.setColor(writer, .reset) catch {};
1117            break :s .unknown;
1118        },
1119        error.OutOfMemory => s: {
1120            tty_config.setColor(writer, .dim) catch {};
1121            try writer.print("Ran out of memory loading debug info, trace may be incomplete\n\n", .{});
1122            tty_config.setColor(writer, .reset) catch {};
1123            break :s .unknown;
1124        },
1125    };
1126    defer if (symbol.source_location) |sl| gpa.free(sl.file_name);
1127    return printLineInfo(
1128        writer,
1129        symbol.source_location,
1130        address,
1131        symbol.name orelse "???",
1132        symbol.compile_unit_name orelse debug_info.getModuleName(gpa, address) catch "???",
1133        tty_config,
1134    );
1135}
1136fn printLineInfo(
1137    writer: *Writer,
1138    source_location: ?SourceLocation,
1139    address: usize,
1140    symbol_name: []const u8,
1141    compile_unit_name: []const u8,
1142    tty_config: tty.Config,
1143) Writer.Error!void {
1144    nosuspend {
1145        tty_config.setColor(writer, .bold) catch {};
1146
1147        if (source_location) |*sl| {
1148            try writer.print("{s}:{d}:{d}", .{ sl.file_name, sl.line, sl.column });
1149        } else {
1150            try writer.writeAll("???:?:?");
1151        }
1152
1153        tty_config.setColor(writer, .reset) catch {};
1154        try writer.writeAll(": ");
1155        tty_config.setColor(writer, .dim) catch {};
1156        try writer.print("0x{x} in {s} ({s})", .{ address, symbol_name, compile_unit_name });
1157        tty_config.setColor(writer, .reset) catch {};
1158        try writer.writeAll("\n");
1159
1160        // Show the matching source code line if possible
1161        if (source_location) |sl| {
1162            if (printLineFromFile(writer, sl)) {
1163                if (sl.column > 0) {
1164                    // The caret already takes one char
1165                    const space_needed = @as(usize, @intCast(sl.column - 1));
1166
1167                    try writer.splatByteAll(' ', space_needed);
1168                    tty_config.setColor(writer, .green) catch {};
1169                    try writer.writeAll("^");
1170                    tty_config.setColor(writer, .reset) catch {};
1171                }
1172                try writer.writeAll("\n");
1173            } else |_| {
1174                // Ignore all errors; it's a better UX to just print the source location without the
1175                // corresponding line number. The user can always open the source file themselves.
1176            }
1177        }
1178    }
1179}
1180fn printLineFromFile(writer: *Writer, source_location: SourceLocation) !void {
1181    // Allow overriding the target-agnostic source line printing logic by exposing `root.debug.printLineFromFile`.
1182    if (@hasDecl(root, "debug") and @hasDecl(root.debug, "printLineFromFile")) {
1183        return root.debug.printLineFromFile(writer, source_location);
1184    }
1185
1186    // Need this to always block even in async I/O mode, because this could potentially
1187    // be called from e.g. the event loop code crashing.
1188    var f = try fs.cwd().openFile(source_location.file_name, .{});
1189    defer f.close();
1190    // TODO fstat and make sure that the file has the correct size
1191
1192    var buf: [4096]u8 = undefined;
1193    var amt_read = try f.read(buf[0..]);
1194    const line_start = seek: {
1195        var current_line_start: usize = 0;
1196        var next_line: usize = 1;
1197        while (next_line != source_location.line) {
1198            const slice = buf[current_line_start..amt_read];
1199            if (mem.indexOfScalar(u8, slice, '\n')) |pos| {
1200                next_line += 1;
1201                if (pos == slice.len - 1) {
1202                    amt_read = try f.read(buf[0..]);
1203                    current_line_start = 0;
1204                } else current_line_start += pos + 1;
1205            } else if (amt_read < buf.len) {
1206                return error.EndOfFile;
1207            } else {
1208                amt_read = try f.read(buf[0..]);
1209                current_line_start = 0;
1210            }
1211        }
1212        break :seek current_line_start;
1213    };
1214    const slice = buf[line_start..amt_read];
1215    if (mem.indexOfScalar(u8, slice, '\n')) |pos| {
1216        const line = slice[0 .. pos + 1];
1217        mem.replaceScalar(u8, line, '\t', ' ');
1218        return writer.writeAll(line);
1219    } else { // Line is the last inside the buffer, and requires another read to find delimiter. Alternatively the file ends.
1220        mem.replaceScalar(u8, slice, '\t', ' ');
1221        try writer.writeAll(slice);
1222        while (amt_read == buf.len) {
1223            amt_read = try f.read(buf[0..]);
1224            if (mem.indexOfScalar(u8, buf[0..amt_read], '\n')) |pos| {
1225                const line = buf[0 .. pos + 1];
1226                mem.replaceScalar(u8, line, '\t', ' ');
1227                return writer.writeAll(line);
1228            } else {
1229                const line = buf[0..amt_read];
1230                mem.replaceScalar(u8, line, '\t', ' ');
1231                try writer.writeAll(line);
1232            }
1233        }
1234        // Make sure printing last line of file inserts extra newline
1235        try writer.writeByte('\n');
1236    }
1237}
1238
1239test printLineFromFile {
1240    var aw: Writer.Allocating = .init(std.testing.allocator);
1241    defer aw.deinit();
1242    const output_stream = &aw.writer;
1243
1244    const allocator = std.testing.allocator;
1245    const join = std.fs.path.join;
1246    const expectError = std.testing.expectError;
1247    const expectEqualStrings = std.testing.expectEqualStrings;
1248
1249    var test_dir = std.testing.tmpDir(.{});
1250    defer test_dir.cleanup();
1251    // Relies on testing.tmpDir internals which is not ideal, but SourceLocation requires paths.
1252    const test_dir_path = try join(allocator, &.{ ".zig-cache", "tmp", test_dir.sub_path[0..] });
1253    defer allocator.free(test_dir_path);
1254
1255    // Cases
1256    {
1257        const path = try join(allocator, &.{ test_dir_path, "one_line.zig" });
1258        defer allocator.free(path);
1259        try test_dir.dir.writeFile(.{ .sub_path = "one_line.zig", .data = "no new lines in this file, but one is printed anyway" });
1260
1261        try expectError(error.EndOfFile, printLineFromFile(output_stream, .{ .file_name = path, .line = 2, .column = 0 }));
1262
1263        try printLineFromFile(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
1264        try expectEqualStrings("no new lines in this file, but one is printed anyway\n", aw.written());
1265        aw.clearRetainingCapacity();
1266    }
1267    {
1268        const path = try fs.path.join(allocator, &.{ test_dir_path, "three_lines.zig" });
1269        defer allocator.free(path);
1270        try test_dir.dir.writeFile(.{
1271            .sub_path = "three_lines.zig",
1272            .data =
1273            \\1
1274            \\2
1275            \\3
1276            ,
1277        });
1278
1279        try printLineFromFile(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
1280        try expectEqualStrings("1\n", aw.written());
1281        aw.clearRetainingCapacity();
1282
1283        try printLineFromFile(output_stream, .{ .file_name = path, .line = 3, .column = 0 });
1284        try expectEqualStrings("3\n", aw.written());
1285        aw.clearRetainingCapacity();
1286    }
1287    {
1288        const file = try test_dir.dir.createFile("line_overlaps_page_boundary.zig", .{});
1289        defer file.close();
1290        const path = try fs.path.join(allocator, &.{ test_dir_path, "line_overlaps_page_boundary.zig" });
1291        defer allocator.free(path);
1292
1293        const overlap = 10;
1294        var buf: [16]u8 = undefined;
1295        var file_writer = file.writer(&buf);
1296        const writer = &file_writer.interface;
1297        try writer.splatByteAll('a', std.heap.page_size_min - overlap);
1298        try writer.writeByte('\n');
1299        try writer.splatByteAll('a', overlap);
1300        try writer.flush();
1301
1302        try printLineFromFile(output_stream, .{ .file_name = path, .line = 2, .column = 0 });
1303        try expectEqualStrings(("a" ** overlap) ++ "\n", aw.written());
1304        aw.clearRetainingCapacity();
1305    }
1306    {
1307        const file = try test_dir.dir.createFile("file_ends_on_page_boundary.zig", .{});
1308        defer file.close();
1309        const path = try fs.path.join(allocator, &.{ test_dir_path, "file_ends_on_page_boundary.zig" });
1310        defer allocator.free(path);
1311
1312        var file_writer = file.writer(&.{});
1313        const writer = &file_writer.interface;
1314        try writer.splatByteAll('a', std.heap.page_size_max);
1315
1316        try printLineFromFile(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
1317        try expectEqualStrings(("a" ** std.heap.page_size_max) ++ "\n", aw.written());
1318        aw.clearRetainingCapacity();
1319    }
1320    {
1321        const file = try test_dir.dir.createFile("very_long_first_line_spanning_multiple_pages.zig", .{});
1322        defer file.close();
1323        const path = try fs.path.join(allocator, &.{ test_dir_path, "very_long_first_line_spanning_multiple_pages.zig" });
1324        defer allocator.free(path);
1325
1326        var file_writer = file.writer(&.{});
1327        const writer = &file_writer.interface;
1328        try writer.splatByteAll('a', 3 * std.heap.page_size_max);
1329
1330        try expectError(error.EndOfFile, printLineFromFile(output_stream, .{ .file_name = path, .line = 2, .column = 0 }));
1331
1332        try printLineFromFile(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
1333        try expectEqualStrings(("a" ** (3 * std.heap.page_size_max)) ++ "\n", aw.written());
1334        aw.clearRetainingCapacity();
1335
1336        try writer.writeAll("a\na");
1337
1338        try printLineFromFile(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
1339        try expectEqualStrings(("a" ** (3 * std.heap.page_size_max)) ++ "a\n", aw.written());
1340        aw.clearRetainingCapacity();
1341
1342        try printLineFromFile(output_stream, .{ .file_name = path, .line = 2, .column = 0 });
1343        try expectEqualStrings("a\n", aw.written());
1344        aw.clearRetainingCapacity();
1345    }
1346    {
1347        const file = try test_dir.dir.createFile("file_of_newlines.zig", .{});
1348        defer file.close();
1349        const path = try fs.path.join(allocator, &.{ test_dir_path, "file_of_newlines.zig" });
1350        defer allocator.free(path);
1351
1352        var file_writer = file.writer(&.{});
1353        const writer = &file_writer.interface;
1354        const real_file_start = 3 * std.heap.page_size_min;
1355        try writer.splatByteAll('\n', real_file_start);
1356        try writer.writeAll("abc\ndef");
1357
1358        try printLineFromFile(output_stream, .{ .file_name = path, .line = real_file_start + 1, .column = 0 });
1359        try expectEqualStrings("abc\n", aw.written());
1360        aw.clearRetainingCapacity();
1361
1362        try printLineFromFile(output_stream, .{ .file_name = path, .line = real_file_start + 2, .column = 0 });
1363        try expectEqualStrings("def\n", aw.written());
1364        aw.clearRetainingCapacity();
1365    }
1366}
1367
1368/// The returned allocator should be thread-safe if the compilation is multi-threaded, because
1369/// multiple threads could capture and/or print stack traces simultaneously.
1370pub fn getDebugInfoAllocator() Allocator {
1371    // Allow overriding the debug info allocator by exposing `root.debug.getDebugInfoAllocator`.
1372    if (@hasDecl(root, "debug") and @hasDecl(root.debug, "getDebugInfoAllocator")) {
1373        return root.debug.getDebugInfoAllocator();
1374    }
1375    // Otherwise, use a global arena backed by the page allocator
1376    const S = struct {
1377        var arena: std.heap.ArenaAllocator = .init(std.heap.page_allocator);
1378        var ts_arena: std.heap.ThreadSafeAllocator = .{ .child_allocator = arena.allocator() };
1379    };
1380    return S.ts_arena.allocator();
1381}
1382
1383/// Whether or not the current target can print useful debug information when a segfault occurs.
1384pub const have_segfault_handling_support = switch (native_os) {
1385    .haiku,
1386    .linux,
1387    .serenity,
1388
1389    .dragonfly,
1390    .freebsd,
1391    .netbsd,
1392    .openbsd,
1393
1394    .driverkit,
1395    .ios,
1396    .maccatalyst,
1397    .macos,
1398    .tvos,
1399    .visionos,
1400    .watchos,
1401
1402    .illumos,
1403
1404    .windows,
1405    => true,
1406
1407    else => false,
1408};
1409
1410const enable_segfault_handler = std.options.enable_segfault_handler;
1411pub const default_enable_segfault_handler = runtime_safety and have_segfault_handling_support;
1412
1413pub fn maybeEnableSegfaultHandler() void {
1414    if (enable_segfault_handler) {
1415        attachSegfaultHandler();
1416    }
1417}
1418
1419var windows_segfault_handle: ?windows.HANDLE = null;
1420
1421pub fn updateSegfaultHandler(act: ?*const posix.Sigaction) void {
1422    posix.sigaction(.SEGV, act, null);
1423    posix.sigaction(.ILL, act, null);
1424    posix.sigaction(.BUS, act, null);
1425    posix.sigaction(.FPE, act, null);
1426}
1427
1428/// Attaches a global handler for several signals which, when triggered, prints output to stderr
1429/// similar to the default panic handler, with a message containing the type of signal and a stack
1430/// trace if possible. This implementation does not just call the panic handler, because unwinding
1431/// the stack (for a stack trace) when a signal is received requires special target-specific logic.
1432///
1433/// The signals for which a handler is installed are:
1434/// * SIGSEGV (segmentation fault)
1435/// * SIGILL (illegal instruction)
1436/// * SIGBUS (bus error)
1437/// * SIGFPE (arithmetic exception)
1438pub fn attachSegfaultHandler() void {
1439    if (!have_segfault_handling_support) {
1440        @compileError("segfault handler not supported for this target");
1441    }
1442    if (native_os == .windows) {
1443        windows_segfault_handle = windows.ntdll.RtlAddVectoredExceptionHandler(0, handleSegfaultWindows);
1444        return;
1445    }
1446    const act = posix.Sigaction{
1447        .handler = .{ .sigaction = handleSegfaultPosix },
1448        .mask = posix.sigemptyset(),
1449        .flags = (posix.SA.SIGINFO | posix.SA.RESTART | posix.SA.RESETHAND),
1450    };
1451    updateSegfaultHandler(&act);
1452}
1453
1454fn resetSegfaultHandler() void {
1455    if (native_os == .windows) {
1456        if (windows_segfault_handle) |handle| {
1457            assert(windows.ntdll.RtlRemoveVectoredExceptionHandler(handle) != 0);
1458            windows_segfault_handle = null;
1459        }
1460        return;
1461    }
1462    const act = posix.Sigaction{
1463        .handler = .{ .handler = posix.SIG.DFL },
1464        .mask = posix.sigemptyset(),
1465        .flags = 0,
1466    };
1467    updateSegfaultHandler(&act);
1468}
1469
1470fn handleSegfaultPosix(sig: posix.SIG, info: *const posix.siginfo_t, ctx_ptr: ?*anyopaque) callconv(.c) noreturn {
1471    if (use_trap_panic) @trap();
1472    const addr: ?usize, const name: []const u8 = info: {
1473        if (native_os == .linux and native_arch == .x86_64) {
1474            // x86_64 doesn't have a full 64-bit virtual address space.
1475            // Addresses outside of that address space are non-canonical
1476            // and the CPU won't provide the faulting address to us.
1477            // This happens when accessing memory addresses such as 0xaaaaaaaaaaaaaaaa
1478            // but can also happen when no addressable memory is involved;
1479            // for example when reading/writing model-specific registers
1480            // by executing `rdmsr` or `wrmsr` in user-space (unprivileged mode).
1481            const SI_KERNEL = 0x80;
1482            if (sig == .SEGV and info.code == SI_KERNEL) {
1483                break :info .{ null, "General protection exception" };
1484            }
1485        }
1486        const addr: usize = switch (native_os) {
1487            .serenity,
1488            .dragonfly,
1489            .freebsd,
1490            .driverkit,
1491            .ios,
1492            .maccatalyst,
1493            .macos,
1494            .tvos,
1495            .visionos,
1496            .watchos,
1497            => @intFromPtr(info.addr),
1498            .linux,
1499            => @intFromPtr(info.fields.sigfault.addr),
1500            .netbsd,
1501            => @intFromPtr(info.info.reason.fault.addr),
1502            .haiku,
1503            .openbsd,
1504            => @intFromPtr(info.data.fault.addr),
1505            .illumos,
1506            => @intFromPtr(info.reason.fault.addr),
1507            else => comptime unreachable,
1508        };
1509        const name = switch (sig) {
1510            .SEGV => "Segmentation fault",
1511            .ILL => "Illegal instruction",
1512            .BUS => "Bus error",
1513            .FPE => "Arithmetic exception",
1514            else => unreachable,
1515        };
1516        break :info .{ addr, name };
1517    };
1518    const opt_cpu_context: ?cpu_context.Native = cpu_context.fromPosixSignalContext(ctx_ptr);
1519
1520    if (native_arch.isSPARC()) {
1521        // It's unclear to me whether this is a QEMU bug or also real kernel behavior, but in the
1522        // former, I observed that the most recent register window wasn't getting spilled on the
1523        // stack as expected when a signal arrived. A `flushw` from the signal handler does not
1524        // appear to be sufficient either. On the other hand, when doing a synchronous stack trace
1525        // and using `flushw`, this all appears to work as expected. So, *probably* a QEMU bug, but
1526        // someone with real SPARC hardware should verify.
1527        //
1528        // In any case, the register save area exists specifically so that register windows can be
1529        // spilled asynchronously. This means that it should be perfectly fine for us to manually do
1530        // so here.
1531        const ctx = opt_cpu_context.?;
1532        @as(*[16]usize, @ptrFromInt(ctx.o[6] + StackIterator.stack_bias)).* = ctx.l ++ ctx.i;
1533    }
1534
1535    handleSegfault(addr, name, if (opt_cpu_context) |*ctx| ctx else null);
1536}
1537
1538fn handleSegfaultWindows(info: *windows.EXCEPTION_POINTERS) callconv(.winapi) c_long {
1539    if (use_trap_panic) @trap();
1540    const name: []const u8, const addr: ?usize = switch (info.ExceptionRecord.ExceptionCode) {
1541        windows.EXCEPTION_DATATYPE_MISALIGNMENT => .{ "Unaligned memory access", null },
1542        windows.EXCEPTION_ACCESS_VIOLATION => .{ "Segmentation fault", info.ExceptionRecord.ExceptionInformation[1] },
1543        windows.EXCEPTION_ILLEGAL_INSTRUCTION => .{ "Illegal instruction", info.ContextRecord.getRegs().ip },
1544        windows.EXCEPTION_STACK_OVERFLOW => .{ "Stack overflow", null },
1545        else => return windows.EXCEPTION_CONTINUE_SEARCH,
1546    };
1547    handleSegfault(addr, name, &cpu_context.fromWindowsContext(info.ContextRecord));
1548}
1549
1550fn handleSegfault(addr: ?usize, name: []const u8, opt_ctx: ?CpuContextPtr) noreturn {
1551    // Allow overriding the target-agnostic segfault handler by exposing `root.debug.handleSegfault`.
1552    if (@hasDecl(root, "debug") and @hasDecl(root.debug, "handleSegfault")) {
1553        return root.debug.handleSegfault(addr, name, opt_ctx);
1554    }
1555    return defaultHandleSegfault(addr, name, opt_ctx);
1556}
1557
1558pub fn defaultHandleSegfault(addr: ?usize, name: []const u8, opt_ctx: ?CpuContextPtr) noreturn {
1559    // There is very similar logic to the following in `defaultPanic`.
1560    switch (panic_stage) {
1561        0 => {
1562            panic_stage = 1;
1563            _ = panicking.fetchAdd(1, .seq_cst);
1564
1565            trace: {
1566                const stderr, const tty_config = lockStderrWriter(&.{});
1567                defer unlockStderrWriter();
1568
1569                if (addr) |a| {
1570                    stderr.print("{s} at address 0x{x}\n", .{ name, a }) catch break :trace;
1571                } else {
1572                    stderr.print("{s} (no address available)\n", .{name}) catch break :trace;
1573                }
1574                if (opt_ctx) |context| {
1575                    writeCurrentStackTrace(.{
1576                        .context = context,
1577                        .allow_unsafe_unwind = true, // we're crashing anyway, give it our all!
1578                    }, stderr, tty_config) catch break :trace;
1579                }
1580            }
1581        },
1582        1 => {
1583            panic_stage = 2;
1584            // A segfault happened while trying to print a previous panic message.
1585            // We're still holding the mutex but that's fine as we're going to
1586            // call abort().
1587            fs.File.stderr().writeAll("aborting due to recursive panic\n") catch {};
1588        },
1589        else => {}, // Panicked while printing the recursive panic message.
1590    }
1591
1592    // We cannot allow the signal handler to return because when it runs the original instruction
1593    // again, the memory may be mapped and undefined behavior would occur rather than repeating
1594    // the segfault. So we simply abort here.
1595    posix.abort();
1596}
1597
1598pub fn dumpStackPointerAddr(prefix: []const u8) void {
1599    const sp = asm (""
1600        : [argc] "={rsp}" (-> usize),
1601    );
1602    print("{s} sp = 0x{x}\n", .{ prefix, sp });
1603}
1604
1605test "manage resources correctly" {
1606    if (SelfInfo == void) return error.SkipZigTest;
1607    const S = struct {
1608        noinline fn showMyTrace() usize {
1609            return @returnAddress();
1610        }
1611    };
1612    const gpa = std.testing.allocator;
1613    var threaded: Io.Threaded = .init_single_threaded;
1614    const io = threaded.ioBasic();
1615    var discarding: Io.Writer.Discarding = .init(&.{});
1616    var di: SelfInfo = .init;
1617    defer di.deinit(gpa);
1618    try printSourceAtAddress(
1619        gpa,
1620        io,
1621        &di,
1622        &discarding.writer,
1623        S.showMyTrace(),
1624        .no_color,
1625    );
1626}
1627
1628/// This API helps you track where a value originated and where it was mutated,
1629/// or any other points of interest.
1630/// In debug mode, it adds a small size penalty (104 bytes on 64-bit architectures)
1631/// to the aggregate that you add it to.
1632/// In release mode, it is size 0 and all methods are no-ops.
1633/// This is a pre-made type with default settings.
1634/// For more advanced usage, see `ConfigurableTrace`.
1635pub const Trace = ConfigurableTrace(2, 4, builtin.mode == .Debug);
1636
1637pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize, comptime is_enabled: bool) type {
1638    return struct {
1639        addrs: [actual_size][stack_frame_count]usize,
1640        notes: [actual_size][]const u8,
1641        index: Index,
1642
1643        const actual_size = if (enabled) size else 0;
1644        const Index = if (enabled) usize else u0;
1645
1646        pub const init: @This() = .{
1647            .addrs = undefined,
1648            .notes = undefined,
1649            .index = 0,
1650        };
1651
1652        pub const enabled = is_enabled;
1653
1654        pub const add = if (enabled) addNoInline else addNoOp;
1655
1656        pub noinline fn addNoInline(t: *@This(), note: []const u8) void {
1657            comptime assert(enabled);
1658            return addAddr(t, @returnAddress(), note);
1659        }
1660
1661        pub inline fn addNoOp(t: *@This(), note: []const u8) void {
1662            _ = t;
1663            _ = note;
1664            comptime assert(!enabled);
1665        }
1666
1667        pub fn addAddr(t: *@This(), addr: usize, note: []const u8) void {
1668            if (!enabled) return;
1669
1670            if (t.index < size) {
1671                t.notes[t.index] = note;
1672                const addrs = &t.addrs[t.index];
1673                const st = captureCurrentStackTrace(.{ .first_address = addr }, addrs);
1674                if (st.index < addrs.len) {
1675                    @memset(addrs[st.index..], 0); // zero unused frames to indicate end of trace
1676                }
1677            }
1678            // Keep counting even if the end is reached so that the
1679            // user can find out how much more size they need.
1680            t.index += 1;
1681        }
1682
1683        pub fn dump(t: @This()) void {
1684            if (!enabled) return;
1685
1686            const stderr, const tty_config = lockStderrWriter(&.{});
1687            defer unlockStderrWriter();
1688            const end = @min(t.index, size);
1689            for (t.addrs[0..end], 0..) |frames_array, i| {
1690                stderr.print("{s}:\n", .{t.notes[i]}) catch return;
1691                var frames_array_mutable = frames_array;
1692                const frames = mem.sliceTo(frames_array_mutable[0..], 0);
1693                const stack_trace: StackTrace = .{
1694                    .index = frames.len,
1695                    .instruction_addresses = frames,
1696                };
1697                writeStackTrace(&stack_trace, stderr, tty_config) catch return;
1698            }
1699            if (t.index > end) {
1700                stderr.print("{d} more traces not shown; consider increasing trace size\n", .{
1701                    t.index - end,
1702                }) catch return;
1703            }
1704        }
1705
1706        pub fn format(
1707            t: @This(),
1708            comptime fmt: []const u8,
1709            options: std.fmt.Options,
1710            writer: *Writer,
1711        ) !void {
1712            if (fmt.len != 0) std.fmt.invalidFmtError(fmt, t);
1713            _ = options;
1714            if (enabled) {
1715                try writer.writeAll("\n");
1716                t.dump();
1717                try writer.writeAll("\n");
1718            } else {
1719                return writer.writeAll("(value tracing disabled)");
1720            }
1721        }
1722    };
1723}
1724
1725pub const SafetyLock = struct {
1726    state: State = if (runtime_safety) .unlocked else .unknown,
1727
1728    pub const State = if (runtime_safety) enum { unlocked, locked } else enum { unknown };
1729
1730    pub fn lock(l: *SafetyLock) void {
1731        if (!runtime_safety) return;
1732        assert(l.state == .unlocked);
1733        l.state = .locked;
1734    }
1735
1736    pub fn unlock(l: *SafetyLock) void {
1737        if (!runtime_safety) return;
1738        assert(l.state == .locked);
1739        l.state = .unlocked;
1740    }
1741
1742    pub fn assertUnlocked(l: SafetyLock) void {
1743        if (!runtime_safety) return;
1744        assert(l.state == .unlocked);
1745    }
1746
1747    pub fn assertLocked(l: SafetyLock) void {
1748        if (!runtime_safety) return;
1749        assert(l.state == .locked);
1750    }
1751};
1752
1753test SafetyLock {
1754    var safety_lock: SafetyLock = .{};
1755    safety_lock.assertUnlocked();
1756    safety_lock.lock();
1757    safety_lock.assertLocked();
1758    safety_lock.unlock();
1759    safety_lock.assertUnlocked();
1760}
1761
1762/// Detect whether the program is being executed in the Valgrind virtual machine.
1763///
1764/// When Valgrind integrations are disabled, this returns comptime-known false.
1765/// Otherwise, the result is runtime-known.
1766pub inline fn inValgrind() bool {
1767    if (@inComptime()) return false;
1768    if (!builtin.valgrind_support) return false;
1769    return std.valgrind.runningOnValgrind() > 0;
1770}
1771
1772test {
1773    _ = &Dwarf;
1774    _ = &Pdb;
1775    _ = &SelfInfo;
1776    _ = &dumpHex;
1777}