Commit f87156e33c

Martin Wickham <spexguy070@gmail.com>
2021-09-30 01:37:12
Add a panic handler to give better errors for crashes in sema
1 parent 2ed9288
Changed files (5)
lib/std/Thread/Mutex.zig
@@ -33,17 +33,29 @@ const testing = std.testing;
 const StaticResetEvent = std.thread.StaticResetEvent;
 
 /// Try to acquire the mutex without blocking. Returns `null` if the mutex is
-/// unavailable. Otherwise returns `Held`. Call `release` on `Held`.
-pub fn tryAcquire(m: *Mutex) ?Impl.Held {
+/// unavailable. Otherwise returns `Held`. Call `release` on `Held`, or use
+/// releaseDirect().
+pub fn tryAcquire(m: *Mutex) ?Held {
     return m.impl.tryAcquire();
 }
 
 /// Acquire the mutex. Deadlocks if the mutex is already
 /// held by the calling thread.
-pub fn acquire(m: *Mutex) Impl.Held {
+pub fn acquire(m: *Mutex) Held {
     return m.impl.acquire();
 }
 
+/// Release the mutex. Prefer Held.release() if available.
+pub fn releaseDirect(m: *Mutex) void {
+    return m.impl.releaseDirect();
+}
+
+/// A held mutex handle.  Call release to allow other threads to
+/// take the mutex.  Do not call release() more than once.
+/// For more complex scenarios, this handle can be discarded
+/// and Mutex.releaseDirect can be called instead.
+pub const Held = Impl.Held;
+
 const Impl = if (builtin.single_threaded)
     Dummy
 else if (builtin.os.tag == .windows)
@@ -53,6 +65,32 @@ else if (std.Thread.use_pthreads)
 else
     AtomicMutex;
 
+fn HeldInterface(comptime MutexType: type) type {
+    return struct {
+        const Mixin = @This();
+        pub const Held = struct {
+            mutex: *MutexType,
+
+            pub fn release(held: Mixin.Held) void {
+                held.mutex.releaseDirect();
+            }
+        };
+
+        pub fn tryAcquire(m: *MutexType) ?Mixin.Held {
+            if (m.tryAcquireDirect()) {
+                return Mixin.Held{ .mutex = m };
+            } else {
+                return null;
+            }
+        }
+
+        pub fn acquire(m: *MutexType) Mixin.Held {
+            m.acquireDirect();
+            return Mixin.Held{ .mutex = m };
+        }
+    };
+}
+
 pub const AtomicMutex = struct {
     state: State = .unlocked,
 
@@ -62,39 +100,32 @@ pub const AtomicMutex = struct {
         waiting,
     };
 
-    pub const Held = struct {
-        mutex: *AtomicMutex,
+    pub usingnamespace HeldInterface(@This());
 
-        pub fn release(held: Held) void {
-            switch (@atomicRmw(State, &held.mutex.state, .Xchg, .unlocked, .Release)) {
-                .unlocked => unreachable,
-                .locked => {},
-                .waiting => held.mutex.unlockSlow(),
-            }
-        }
-    };
-
-    pub fn tryAcquire(m: *AtomicMutex) ?Held {
-        if (@cmpxchgStrong(
+    fn tryAcquireDirect(m: *AtomicMutex) bool {
+        return @cmpxchgStrong(
             State,
             &m.state,
             .unlocked,
             .locked,
             .Acquire,
             .Monotonic,
-        ) == null) {
-            return Held{ .mutex = m };
-        } else {
-            return null;
-        }
+        ) == null;
     }
 
-    pub fn acquire(m: *AtomicMutex) Held {
+    fn acquireDirect(m: *AtomicMutex) void {
         switch (@atomicRmw(State, &m.state, .Xchg, .locked, .Acquire)) {
             .unlocked => {},
             else => |s| m.lockSlow(s),
         }
-        return Held{ .mutex = m };
+    }
+
+    fn releaseDirect(m: *AtomicMutex) void {
+        switch (@atomicRmw(State, &m.state, .Xchg, .unlocked, .Release)) {
+            .unlocked => unreachable,
+            .locked => {},
+            .waiting => m.unlockSlow(),
+        }
     }
 
     fn lockSlow(m: *AtomicMutex, current_state: State) void {
@@ -171,36 +202,20 @@ pub const AtomicMutex = struct {
 pub const PthreadMutex = struct {
     pthread_mutex: std.c.pthread_mutex_t = .{},
 
-    pub const Held = struct {
-        mutex: *PthreadMutex,
-
-        pub fn release(held: Held) void {
-            switch (std.c.pthread_mutex_unlock(&held.mutex.pthread_mutex)) {
-                .SUCCESS => return,
-                .INVAL => unreachable,
-                .AGAIN => unreachable,
-                .PERM => unreachable,
-                else => unreachable,
-            }
-        }
-    };
+    pub usingnamespace HeldInterface(@This());
 
-    /// Try to acquire the mutex without blocking. Returns null if
-    /// the mutex is unavailable. Otherwise returns Held. Call
-    /// release on Held.
-    pub fn tryAcquire(m: *PthreadMutex) ?Held {
-        if (std.c.pthread_mutex_trylock(&m.pthread_mutex) == .SUCCESS) {
-            return Held{ .mutex = m };
-        } else {
-            return null;
-        }
+    /// Try to acquire the mutex without blocking. Returns true if
+    /// the mutex is unavailable. Otherwise returns false. Call
+    /// release when done.
+    fn tryAcquireDirect(m: *PthreadMutex) bool {
+        return std.c.pthread_mutex_trylock(&m.pthread_mutex) == .SUCCESS;
     }
 
     /// Acquire the mutex. Will deadlock if the mutex is already
     /// held by the calling thread.
-    pub fn acquire(m: *PthreadMutex) Held {
+    fn acquireDirect(m: *PthreadMutex) void {
         switch (std.c.pthread_mutex_lock(&m.pthread_mutex)) {
-            .SUCCESS => return Held{ .mutex = m },
+            .SUCCESS => {},
             .INVAL => unreachable,
             .BUSY => unreachable,
             .AGAIN => unreachable,
@@ -209,6 +224,16 @@ pub const PthreadMutex = struct {
             else => unreachable,
         }
     }
+
+    fn releaseDirect(m: *PthreadMutex) void {
+        switch (std.c.pthread_mutex_unlock(&m.pthread_mutex)) {
+            .SUCCESS => return,
+            .INVAL => unreachable,
+            .AGAIN => unreachable,
+            .PERM => unreachable,
+            else => unreachable,
+        }
+    }
 };
 
 /// This has the sematics as `Mutex`, however it does not actually do any
@@ -216,58 +241,50 @@ pub const PthreadMutex = struct {
 pub const Dummy = struct {
     lock: @TypeOf(lock_init) = lock_init,
 
-    const lock_init = if (std.debug.runtime_safety) false else {};
-
-    pub const Held = struct {
-        mutex: *Dummy,
+    pub usingnamespace HeldInterface(@This());
 
-        pub fn release(held: Held) void {
-            if (std.debug.runtime_safety) {
-                held.mutex.lock = false;
-            }
-        }
-    };
+    const lock_init = if (std.debug.runtime_safety) false else {};
 
-    /// Try to acquire the mutex without blocking. Returns null if
-    /// the mutex is unavailable. Otherwise returns Held. Call
-    /// release on Held.
-    pub fn tryAcquire(m: *Dummy) ?Held {
+    /// Try to acquire the mutex without blocking. Returns false if
+    /// the mutex is unavailable. Otherwise returns true.
+    fn tryAcquireDirect(m: *Dummy) bool {
         if (std.debug.runtime_safety) {
-            if (m.lock) return null;
+            if (m.lock) return false;
             m.lock = true;
         }
-        return Held{ .mutex = m };
+        return true;
     }
 
     /// Acquire the mutex. Will deadlock if the mutex is already
     /// held by the calling thread.
-    pub fn acquire(m: *Dummy) Held {
-        return m.tryAcquire() orelse @panic("deadlock detected");
+    fn acquireDirect(m: *Dummy) void {
+        if (!m.tryAcquireDirect()) {
+            @panic("deadlock detected");
+        }
+    }
+
+    fn releaseDirect(m: *Dummy) void {
+        if (std.debug.runtime_safety) {
+            m.lock = false;
+        }
     }
 };
 
 const WindowsMutex = struct {
     srwlock: windows.SRWLOCK = windows.SRWLOCK_INIT,
 
-    pub const Held = struct {
-        mutex: *WindowsMutex,
+    pub usingnamespace HeldInterface(@This());
 
-        pub fn release(held: Held) void {
-            windows.kernel32.ReleaseSRWLockExclusive(&held.mutex.srwlock);
-        }
-    };
-
-    pub fn tryAcquire(m: *WindowsMutex) ?Held {
-        if (windows.kernel32.TryAcquireSRWLockExclusive(&m.srwlock) != windows.FALSE) {
-            return Held{ .mutex = m };
-        } else {
-            return null;
-        }
+    fn tryAcquireDirect(m: *WindowsMutex) bool {
+        return windows.kernel32.TryAcquireSRWLockExclusive(&m.srwlock) != windows.FALSE;
     }
 
-    pub fn acquire(m: *WindowsMutex) Held {
+    fn acquireDirect(m: *WindowsMutex) void {
         windows.kernel32.AcquireSRWLockExclusive(&m.srwlock);
-        return Held{ .mutex = m };
+    }
+
+    fn releaseDirect(m: *WindowsMutex) void {
+        windows.kernel32.ReleaseSRWLockExclusive(&m.srwlock);
     }
 };
 
src/crash_report.zig
@@ -0,0 +1,581 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const debug = std.debug;
+const os = std.os;
+const io = std.io;
+const print_zir = @import("print_zir.zig");
+
+const Module = @import("Module.zig");
+const Sema = @import("Sema.zig");
+const Zir = @import("Zir.zig");
+
+pub const is_enabled = builtin.mode == .Debug;
+
+/// To use these crash report diagnostics, publish these symbols in your main file.
+/// You will also need to call initialize() on startup, preferably as the very first operation in your program.
+pub const root_decls = struct {
+    pub const panic = if (is_enabled) compilerPanic else std.builtin.default_panic;
+    pub const enable_segfault_handler = if (is_enabled) false else debug.default_enable_segfault_handler;
+};
+
+/// Install signal handlers to identify crashes and report diagnostics.
+pub fn initialize() void {
+    if (is_enabled and debug.have_segfault_handling_support) {
+        attachSegfaultHandler();
+    }
+}
+
+fn En(comptime T: type) type {
+    return if (is_enabled) T else void;
+}
+
+fn en(val: anytype) En(@TypeOf(val)) {
+    return if (is_enabled) val else {};
+}
+
+pub const AnalyzeBody = struct {
+    parent: if (is_enabled) ?*AnalyzeBody else void,
+    sema: En(*Sema),
+    block: En(*Module.Scope.Block),
+    body: En([]const Zir.Inst.Index),
+    body_index: En(usize),
+
+    pub fn push(self: *@This()) void {
+        if (!is_enabled) return;
+        const head = &zir_state;
+        debug.assert(self.parent == null);
+        self.parent = head.*;
+        head.* = self;
+    }
+
+    pub fn pop(self: *@This()) void {
+        if (!is_enabled) return;
+        const head = &zir_state;
+        const old = head.*.?;
+        debug.assert(old == self);
+        head.* = old.parent;
+    }
+
+    pub fn setBodyIndex(self: *@This(), index: usize) void {
+        if (!is_enabled) return;
+        self.body_index = index;
+    }
+};
+
+threadlocal var zir_state: ?*AnalyzeBody = if (is_enabled) null else @compileError("Cannot use zir_state if crash_report is disabled.");
+
+pub fn prepAnalyzeBody(sema: *Sema, block: *Module.Scope.Block, body: []const Zir.Inst.Index) AnalyzeBody {
+    if (is_enabled) {
+        return .{
+            .parent = null,
+            .sema = sema,
+            .block = block,
+            .body = body,
+            .body_index = 0,
+        };
+    } else {
+        if (@sizeOf(AnalyzeBody) != 0)
+            @compileError("AnalyzeBody must have zero size when crash reports are disabled");
+        return undefined;
+    }
+}
+
+fn dumpStatusReport() !void {
+    const anal = zir_state orelse return;
+    // Note: We have the panic mutex here, so we can safely use the global crash heap.
+    var fba = std.heap.FixedBufferAllocator.init(&crash_heap);
+    const allocator = &fba.allocator;
+
+    const stderr = io.getStdErr().writer();
+    const block: *Scope.Block = anal.block;
+
+    try stderr.writeAll("Analyzing ");
+    try writeFullyQualifiedDeclWithFile(block.src_decl, stderr);
+    try stderr.writeAll("\n");
+
+    print_zir.renderInstructionContext(
+        allocator,
+        anal.body,
+        anal.body_index,
+        block.src_decl.getFileScope(),
+        block.src_decl.src_node,
+        6, // indent
+        stderr,
+    ) catch |err| switch (err) {
+        error.OutOfMemory => try stderr.writeAll("  <out of memory dumping zir>\n"),
+        else => |e| return e,
+    };
+    try stderr.writeAll("    For full context, use the command\n      zig ast-check -t ");
+    try writeFilePath(block.src_decl.getFileScope(), stderr);
+    try stderr.writeAll("\n\n");
+
+    var parent = anal.parent;
+    while (parent) |curr| {
+        fba.reset();
+        try stderr.writeAll("  in ");
+        try writeFullyQualifiedDeclWithFile(curr.block.src_decl, stderr);
+        try stderr.writeAll("\n    > ");
+        print_zir.renderSingleInstruction(
+            allocator,
+            curr.body[curr.body_index],
+            curr.block.src_decl.getFileScope(),
+            curr.block.src_decl.src_node,
+            6, // indent
+            stderr,
+        ) catch |err| switch (err) {
+            error.OutOfMemory => try stderr.writeAll("  <out of memory dumping zir>\n"),
+            else => |e| return e,
+        };
+        try stderr.writeAll("\n");
+
+        parent = curr.parent;
+    }
+
+    try stderr.writeAll("\n");
+}
+
+const Scope = Module.Scope;
+const Decl = Module.Decl;
+
+var crash_heap: [16 * 4096]u8 = undefined;
+
+fn writeFilePath(file: *Scope.File, stream: anytype) !void {
+    if (file.pkg.root_src_directory.path) |path| {
+        try stream.writeAll(path);
+        try stream.writeAll(std.fs.path.sep_str);
+    }
+    try stream.writeAll(file.sub_file_path);
+}
+
+fn writeFullyQualifiedDeclWithFile(decl: *Decl, stream: anytype) !void {
+    try writeFilePath(decl.getFileScope(), stream);
+    try stream.writeAll(": ");
+    try decl.namespace.renderFullyQualifiedName(std.mem.sliceTo(decl.name, 0), stream);
+}
+
+fn compilerPanic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace) noreturn {
+    PanicSwitch.preDispatch();
+    @setCold(true);
+    const ret_addr = @returnAddress();
+    const stack_ctx: StackContext = .{ .current = .{ .ret_addr = ret_addr } };
+    PanicSwitch.dispatch(error_return_trace, stack_ctx, msg);
+}
+
+/// Attaches a global SIGSEGV handler
+pub fn attachSegfaultHandler() void {
+    if (!debug.have_segfault_handling_support) {
+        @compileError("segfault handler not supported for this target");
+    }
+    if (builtin.os.tag == .windows) {
+        _ = os.windows.kernel32.AddVectoredExceptionHandler(0, handleSegfaultWindows);
+        return;
+    }
+    var act = os.Sigaction{
+        .handler = .{ .sigaction = handleSegfaultLinux },
+        .mask = os.empty_sigset,
+        .flags = (os.SA.SIGINFO | os.SA.RESTART | os.SA.RESETHAND),
+    };
+
+    os.sigaction(os.SIG.SEGV, &act, null);
+    os.sigaction(os.SIG.ILL, &act, null);
+    os.sigaction(os.SIG.BUS, &act, null);
+}
+
+fn handleSegfaultLinux(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const c_void) callconv(.C) noreturn {
+    // TODO: use alarm() here to prevent infinite loops
+    PanicSwitch.preDispatch();
+
+    const addr = switch (builtin.os.tag) {
+        .linux => @ptrToInt(info.fields.sigfault.addr),
+        .freebsd => @ptrToInt(info.addr),
+        .netbsd => @ptrToInt(info.info.reason.fault.addr),
+        .openbsd => @ptrToInt(info.data.fault.addr),
+        .solaris => @ptrToInt(info.reason.fault.addr),
+        else => @compileError("TODO implement handleSegfaultLinux for new linux OS"),
+    };
+
+    var err_buffer: [128]u8 = undefined;
+    const error_msg = switch (sig) {
+        os.SIG.SEGV => std.fmt.bufPrint(&err_buffer, "Segmentation fault at address 0x{x}", .{addr}) catch "Segmentation fault",
+        os.SIG.ILL => std.fmt.bufPrint(&err_buffer, "Illegal instruction at address 0x{x}", .{addr}) catch "Illegal instruction",
+        os.SIG.BUS => std.fmt.bufPrint(&err_buffer, "Bus error at address 0x{x}", .{addr}) catch "Bus error",
+        else => std.fmt.bufPrint(&err_buffer, "Unknown error (signal {}) at address 0x{x}", .{ sig, addr }) catch "Unknown error",
+    };
+
+    const stack_ctx: StackContext = switch (builtin.cpu.arch) {
+        .i386 => ctx: {
+            const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
+            const ip = @intCast(usize, ctx.mcontext.gregs[os.REG.EIP]);
+            const bp = @intCast(usize, ctx.mcontext.gregs[os.REG.EBP]);
+            break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } };
+        },
+        .x86_64 => ctx: {
+            const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
+            const ip = switch (builtin.os.tag) {
+                .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RIP]),
+                .freebsd => @intCast(usize, ctx.mcontext.rip),
+                .openbsd => @intCast(usize, ctx.sc_rip),
+                else => unreachable,
+            };
+            const bp = switch (builtin.os.tag) {
+                .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RBP]),
+                .openbsd => @intCast(usize, ctx.sc_rbp),
+                .freebsd => @intCast(usize, ctx.mcontext.rbp),
+                else => unreachable,
+            };
+            break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } };
+        },
+        .arm => ctx: {
+            const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
+            const ip = @intCast(usize, ctx.mcontext.arm_pc);
+            const bp = @intCast(usize, ctx.mcontext.arm_fp);
+            break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } };
+        },
+        .aarch64 => ctx: {
+            const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
+            const ip = @intCast(usize, ctx.mcontext.pc);
+            // x29 is the ABI-designated frame pointer
+            const bp = @intCast(usize, ctx.mcontext.regs[29]);
+            break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } };
+        },
+        else => .not_supported,
+    };
+
+    PanicSwitch.dispatch(null, stack_ctx, error_msg);
+}
+
+const WindowsSegfaultMessage = union(enum) {
+    literal: []const u8,
+    segfault: void,
+    illegal_instruction: void,
+};
+
+fn handleSegfaultWindows(info: *os.windows.EXCEPTION_POINTERS) callconv(os.windows.WINAPI) c_long {
+    switch (info.ExceptionRecord.ExceptionCode) {
+        os.windows.EXCEPTION_DATATYPE_MISALIGNMENT => handleSegfaultWindowsExtra(info, .{ .literal = "Unaligned Memory Access" }),
+        os.windows.EXCEPTION_ACCESS_VIOLATION => handleSegfaultWindowsExtra(info, .segfault),
+        os.windows.EXCEPTION_ILLEGAL_INSTRUCTION => handleSegfaultWindowsExtra(info, .illegal_instruction),
+        os.windows.EXCEPTION_STACK_OVERFLOW => handleSegfaultWindowsExtra(info, .{ .literal = "Stack Overflow" }),
+        else => return os.windows.EXCEPTION_CONTINUE_SEARCH,
+    }
+}
+
+fn handleSegfaultWindowsExtra(info: *os.windows.EXCEPTION_POINTERS, comptime msg: WindowsSegfaultMessage) noreturn {
+    PanicSwitch.preDispatch();
+
+    const stack_ctx = if (@hasDecl(os.windows, "CONTEXT")) ctx: {
+        const regs = info.ContextRecord.getRegs();
+        break :ctx StackContext{ .exception = .{ .bp = regs.bp, .ip = regs.ip } };
+    } else ctx: {
+        const addr = @ptrToInt(info.ExceptionRecord.ExceptionAddress);
+        break :ctx StackContext{ .current = .{ .ret_addr = addr } };
+    };
+
+    switch (msg) {
+        .literal => |err| PanicSwitch.dispatch(null, stack_ctx, err),
+        .segfault => {
+            const format_item = "Segmentation fault at address 0x{x}";
+            var buf: [format_item.len + 32]u8 = undefined; // 32 is arbitrary, but sufficiently large
+            const to_print = std.fmt.bufPrint(&buf, format_item, .{info.ExceptionRecord.ExceptionInformation[1]}) catch unreachable;
+            PanicSwitch.dispatch(null, stack_ctx, to_print);
+        },
+        .illegal_instruction => {
+            const ip: ?usize = switch (stack_ctx) {
+                .exception => |ex| ex.ip,
+                .current => |cur| cur.ret_addr,
+                .not_supported => null,
+            };
+
+            if (ip) |addr| {
+                const format_item = "Illegal instruction at address 0x{x}";
+                var buf: [format_item.len + 32]u8 = undefined; // 32 is arbitrary, but sufficiently large
+                const to_print = std.fmt.bufPrint(&buf, format_item, .{addr}) catch unreachable;
+                PanicSwitch.dispatch(null, stack_ctx, to_print);
+            } else {
+                PanicSwitch.dispatch(null, stack_ctx, "Illegal Instruction");
+            }
+        },
+    }
+}
+
+const StackContext = union(enum) {
+    current: struct {
+        ret_addr: ?usize,
+    },
+    exception: struct {
+        bp: usize,
+        ip: usize,
+    },
+    not_supported: void,
+
+    pub fn dumpStackTrace(ctx: @This()) void {
+        switch (ctx) {
+            .current => |ct| {
+                debug.dumpCurrentStackTrace(ct.ret_addr);
+            },
+            .exception => |ex| {
+                debug.dumpStackTraceFromBase(ex.bp, ex.ip);
+            },
+            .not_supported => {
+                const stderr = io.getStdErr().writer();
+                stderr.writeAll("Stack trace not supported on this platform.\n") catch {};
+            },
+        }
+    }
+};
+
+const PanicSwitch = struct {
+    const RecoverStage = enum {
+        initialize,
+        report_stack,
+        release_mutex,
+        release_ref_count,
+        abort,
+        silent_abort,
+    };
+
+    const RecoverVerbosity = enum {
+        message_and_stack,
+        message_only,
+        silent,
+    };
+
+    const PanicState = struct {
+        recover_stage: RecoverStage = .initialize,
+        recover_verbosity: RecoverVerbosity = .message_and_stack,
+        panic_ctx: StackContext = undefined,
+        panic_trace: ?*const std.builtin.StackTrace = null,
+        awaiting_dispatch: bool = false,
+    };
+
+    /// Counter for the number of threads currently panicking.
+    /// Updated atomically before taking the panic_mutex.
+    /// In recoverable cases, the program will not abort
+    /// until all panicking threads have dumped their traces.
+    var panicking: u8 = 0;
+
+    // Locked to avoid interleaving panic messages from multiple threads.
+    var panic_mutex = std.Thread.Mutex{};
+
+    /// Tracks the state of the current panic.  If the code within the
+    /// panic triggers a secondary panic, this allows us to recover.
+    threadlocal var panic_state_raw: PanicState = .{};
+
+    /// The segfault handlers above need to do some work before they can dispatch
+    /// this switch.  Calling preDispatch() first makes that work fault tolerant.
+    pub fn preDispatch() void {
+        // TODO: We want segfaults to trigger the panic recursively here,
+        // but if there is a segfault accessing this TLS slot it will cause an
+        // infinite loop.  We should use `alarm()` to prevent the infinite
+        // loop and maybe also use a non-thread-local global to detect if
+        // it's happening and print a message.
+        var panic_state: *volatile PanicState = &panic_state_raw;
+        if (panic_state.awaiting_dispatch) {
+            dispatch(null, .{ .current = .{ .ret_addr = null } }, "Panic while preparing callstack");
+        }
+        panic_state.awaiting_dispatch = true;
+    }
+
+    /// This is the entry point to a panic-tolerant panic handler.
+    /// preDispatch() *MUST* be called exactly once before calling this.
+    /// A threadlocal "recover_stage" is updated throughout the process.
+    /// If a panic happens during the panic, the recover_stage will be
+    /// used to select a recover* function to call to resume the panic.
+    /// The recover_verbosity field is used to handle panics while reporting
+    /// panics within panics.  If the panic handler triggers a panic, it will
+    /// attempt to log an additional stack trace for the secondary panic.  If
+    /// that panics, it will fall back to just logging the panic message.  If
+    /// it can't even do that witout panicing, it will recover without logging
+    /// anything about the internal panic.  Depending on the state, "recover"
+    /// here may just mean "call abort".
+    pub fn dispatch(
+        trace: ?*const std.builtin.StackTrace,
+        stack_ctx: StackContext,
+        msg: []const u8,
+    ) noreturn {
+        var panic_state: *volatile PanicState = &panic_state_raw;
+        debug.assert(panic_state.awaiting_dispatch);
+        panic_state.awaiting_dispatch = false;
+        nosuspend switch (panic_state.recover_stage) {
+            .initialize => goTo(initPanic, .{ panic_state, trace, stack_ctx, msg }),
+            .report_stack => goTo(recoverReportStack, .{ panic_state, trace, stack_ctx, msg }),
+            .release_mutex => goTo(recoverReleaseMutex, .{ panic_state, trace, stack_ctx, msg }),
+            .release_ref_count => goTo(recoverReleaseRefCount, .{ panic_state, trace, stack_ctx, msg }),
+            .abort => goTo(recoverAbort, .{ panic_state, trace, stack_ctx, msg }),
+            .silent_abort => goTo(abort, .{}),
+        };
+    }
+
+    noinline fn initPanic(
+        state: *volatile PanicState,
+        trace: ?*const std.builtin.StackTrace,
+        stack: StackContext,
+        msg: []const u8,
+    ) noreturn {
+        // use a temporary so there's only one volatile store
+        const new_state = PanicState{
+            .recover_stage = .abort,
+            .panic_ctx = stack,
+            .panic_trace = trace,
+        };
+        state.* = new_state;
+
+        _ = @atomicRmw(u8, &panicking, .Add, 1, .SeqCst);
+
+        state.recover_stage = .release_ref_count;
+
+        _ = panic_mutex.acquire();
+
+        state.recover_stage = .release_mutex;
+
+        const stderr = io.getStdErr().writer();
+        if (builtin.single_threaded) {
+            stderr.print("panic: ", .{}) catch goTo(releaseMutex, .{state});
+        } else {
+            const current_thread_id = std.Thread.getCurrentId();
+            stderr.print("thread {} panic: ", .{current_thread_id}) catch goTo(releaseMutex, .{state});
+        }
+        stderr.print("{s}\n", .{msg}) catch goTo(releaseMutex, .{state});
+
+        state.recover_stage = .report_stack;
+
+        dumpStatusReport() catch |err| {
+            stderr.print("\nIntercepted error.{} while dumping current state.  Continuing...\n", .{err}) catch {};
+        };
+
+        goTo(reportStack, .{state});
+    }
+
+    noinline fn recoverReportStack(
+        state: *volatile PanicState,
+        trace: ?*const std.builtin.StackTrace,
+        stack: StackContext,
+        msg: []const u8,
+    ) noreturn {
+        recover(state, trace, stack, msg);
+
+        state.recover_stage = .release_mutex;
+        const stderr = io.getStdErr().writer();
+        stderr.writeAll("\nOriginal Error:\n") catch {};
+        goTo(reportStack, .{state});
+    }
+
+    noinline fn reportStack(state: *volatile PanicState) noreturn {
+        state.recover_stage = .release_mutex;
+
+        if (state.panic_trace) |t| {
+            debug.dumpStackTrace(t.*);
+        }
+        state.panic_ctx.dumpStackTrace();
+
+        goTo(releaseMutex, .{state});
+    }
+
+    noinline fn recoverReleaseMutex(
+        state: *volatile PanicState,
+        trace: ?*const std.builtin.StackTrace,
+        stack: StackContext,
+        msg: []const u8,
+    ) noreturn {
+        recover(state, trace, stack, msg);
+        goTo(releaseMutex, .{state});
+    }
+
+    noinline fn releaseMutex(state: *volatile PanicState) noreturn {
+        state.recover_stage = .abort;
+
+        panic_mutex.releaseDirect();
+
+        goTo(releaseRefCount, .{state});
+    }
+
+    noinline fn recoverReleaseRefCount(
+        state: *volatile PanicState,
+        trace: ?*const std.builtin.StackTrace,
+        stack: StackContext,
+        msg: []const u8,
+    ) noreturn {
+        recover(state, trace, stack, msg);
+        goTo(releaseRefCount, .{state});
+    }
+
+    noinline fn releaseRefCount(state: *volatile PanicState) noreturn {
+        state.recover_stage = .abort;
+
+        if (@atomicRmw(u8, &panicking, .Sub, 1, .SeqCst) != 1) {
+            // Another thread is panicking, wait for the last one to finish
+            // and call abort()
+
+            // Sleep forever without hammering the CPU
+            var event: std.Thread.StaticResetEvent = .{};
+            event.wait();
+            // This should be unreachable, recurse into recoverAbort.
+            @panic("event.wait() returned");
+        }
+
+        goTo(abort, .{});
+    }
+
+    noinline fn recoverAbort(
+        state: *volatile PanicState,
+        trace: ?*const std.builtin.StackTrace,
+        stack: StackContext,
+        msg: []const u8,
+    ) noreturn {
+        recover(state, trace, stack, msg);
+
+        state.recover_stage = .silent_abort;
+        const stderr = io.getStdErr().writer();
+        stderr.writeAll("Aborting...\n") catch {};
+        goTo(abort, .{});
+    }
+
+    noinline fn abort() noreturn {
+        os.abort();
+    }
+
+    inline fn goTo(comptime func: anytype, args: anytype) noreturn {
+        // TODO: Tailcall is broken right now, but eventually this should be used
+        // to avoid blowing up the stack.  It's ok for now though, there are no
+        // cycles in the state machine so the max stack usage is bounded.
+        //@call(.{.modifier = .always_tail}, func, args);
+        @call(.{}, func, args);
+    }
+
+    fn recover(
+        state: *volatile PanicState,
+        trace: ?*const std.builtin.StackTrace,
+        stack: StackContext,
+        msg: []const u8,
+    ) void {
+        switch (state.recover_verbosity) {
+            .message_and_stack => {
+                // lower the verbosity, and restore it at the end if we don't panic.
+                state.recover_verbosity = .message_only;
+
+                const stderr = io.getStdErr().writer();
+                stderr.writeAll("\nPanicked during a panic: ") catch {};
+                stderr.writeAll(msg) catch {};
+                stderr.writeAll("\nInner panic stack:\n") catch {};
+                if (trace) |t| {
+                    debug.dumpStackTrace(t.*);
+                }
+                stack.dumpStackTrace();
+
+                state.recover_verbosity = .message_and_stack;
+            },
+            .message_only => {
+                state.recover_verbosity = .silent;
+
+                const stderr = io.getStdErr().writer();
+                stderr.writeAll("\nPanicked while dumping inner panic stack: ") catch {};
+                stderr.writeAll(msg) catch {};
+                stderr.writeAll("\n") catch {};
+
+                // If we succeed, restore all the way to dumping the stack.
+                state.recover_verbosity = .message_and_stack;
+            },
+            .silent => {},
+        }
+    }
+};
src/main.zig
@@ -20,6 +20,10 @@ const translate_c = @import("translate_c.zig");
 const Cache = @import("Cache.zig");
 const target_util = @import("target.zig");
 const ThreadPool = @import("ThreadPool.zig");
+const crash_report = @import("crash_report.zig");
+
+// Crash report needs to override the panic handler and other root decls
+pub usingnamespace crash_report.root_decls;
 
 pub fn fatal(comptime format: []const u8, args: anytype) noreturn {
     std.log.emerg(format, args);
@@ -134,6 +138,8 @@ var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{
 }){};
 
 pub fn main() anyerror!void {
+    crash_report.initialize();
+
     var gpa_need_deinit = false;
     const gpa = gpa: {
         if (!std.builtin.link_libc) {
src/print_zir.zig
@@ -24,15 +24,20 @@ pub fn renderAsTextToFile(
         .code = scope_file.zir,
         .indent = 0,
         .parent_decl_node = 0,
+        .recurse_decls = true,
+        .recurse_blocks = true,
     };
 
+    var raw_stream = std.io.bufferedWriter(fs_file.writer());
+    const stream = raw_stream.writer();
+
     const main_struct_inst = Zir.main_struct_inst;
-    try fs_file.writer().print("%{d} ", .{main_struct_inst});
-    try writer.writeInstToStream(fs_file.writer(), main_struct_inst);
-    try fs_file.writeAll("\n");
+    try stream.print("%{d} ", .{main_struct_inst});
+    try writer.writeInstToStream(stream, main_struct_inst);
+    try stream.writeAll("\n");
     const imports_index = scope_file.zir.extra[@enumToInt(Zir.ExtraIndex.imports)];
     if (imports_index != 0) {
-        try fs_file.writeAll("Imports:\n");
+        try stream.writeAll("Imports:\n");
 
         const extra = scope_file.zir.extraData(Zir.Inst.Imports, imports_index);
         var import_i: u32 = 0;
@@ -44,13 +49,74 @@ pub fn renderAsTextToFile(
 
             const src: LazySrcLoc = .{ .token_abs = item.data.token };
             const import_path = scope_file.zir.nullTerminatedString(item.data.name);
-            try fs_file.writer().print("  @import(\"{}\") ", .{
+            try stream.print("  @import(\"{}\") ", .{
                 std.zig.fmtEscapes(import_path),
             });
-            try writer.writeSrc(fs_file.writer(), src);
-            try fs_file.writer().writeAll("\n");
+            try writer.writeSrc(stream, src);
+            try stream.writeAll("\n");
         }
     }
+
+    try raw_stream.flush();
+}
+
+pub fn renderInstructionContext(
+    gpa: *Allocator,
+    block: []const Zir.Inst.Index,
+    block_index: usize,
+    scope_file: *Module.Scope.File,
+    parent_decl_node: Ast.Node.Index,
+    indent: u32,
+    stream: anytype,
+) !void {
+    var arena = std.heap.ArenaAllocator.init(gpa);
+    defer arena.deinit();
+
+    var writer: Writer = .{
+        .gpa = gpa,
+        .arena = &arena.allocator,
+        .file = scope_file,
+        .code = scope_file.zir,
+        .indent = if (indent < 2) 2 else indent,
+        .parent_decl_node = parent_decl_node,
+        .recurse_decls = false,
+        .recurse_blocks = true,
+    };
+
+    try writer.writeBody(stream, block[0..block_index]);
+    try stream.writeByteNTimes(' ', writer.indent - 2);
+    try stream.print("> %{d} ", .{block[block_index]});
+    try writer.writeInstToStream(stream, block[block_index]);
+    try stream.writeByte('\n');
+    if (block_index + 1 < block.len) {
+        try writer.writeBody(stream, block[block_index + 1 ..]);
+    }
+}
+
+pub fn renderSingleInstruction(
+    gpa: *Allocator,
+    inst: Zir.Inst.Index,
+    scope_file: *Module.Scope.File,
+    parent_decl_node: Ast.Node.Index,
+    indent: u32,
+    stream: anytype,
+) !void {
+    var arena = std.heap.ArenaAllocator.init(gpa);
+    defer arena.deinit();
+
+    var writer: Writer = .{
+        .gpa = gpa,
+        .arena = &arena.allocator,
+        .file = scope_file,
+        .code = scope_file.zir,
+        .indent = indent,
+        .parent_decl_node = parent_decl_node,
+        .recurse_decls = false,
+        .recurse_blocks = false,
+    };
+
+    try stream.print("%{d} ", .{inst});
+    try writer.writeInstToStream(stream, inst);
 }
 
 const Writer = struct {
@@ -59,7 +125,9 @@ const Writer = struct {
     file: *Module.Scope.File,
     code: Zir,
     indent: u32,
-    parent_decl_node: u32,
+    parent_decl_node: Ast.Node.Index,
+    recurse_decls: bool,
+    recurse_blocks: bool,
 
     fn relativeToNodeIndex(self: *Writer, offset: i32) Ast.Node.Index {
         return @bitCast(Ast.Node.Index, offset + @bitCast(i32, self.parent_decl_node));
@@ -567,12 +635,8 @@ const Writer = struct {
         try stream.print("\"{}\", ", .{
             std.zig.fmtEscapes(self.code.nullTerminatedString(extra.data.name)),
         });
-        try stream.writeAll("{\n");
-        self.indent += 2;
-        try self.writeBody(stream, body);
-        self.indent -= 2;
-        try stream.writeByteNTimes(' ', self.indent);
-        try stream.writeAll("}) ");
+        try self.writeBracedBody(stream, body);
+        try stream.writeAll(") ");
         try self.writeSrc(stream, inst_data.src());
     }
 
@@ -881,12 +945,8 @@ const Writer = struct {
         const inst_data = self.code.instructions.items(.data)[inst].pl_node;
         const extra = self.code.extraData(Zir.Inst.Block, inst_data.payload_index);
         const body = self.code.extra[extra.end..][0..extra.data.body_len];
-        try stream.writeAll("{\n");
-        self.indent += 2;
-        try self.writeBody(stream, body);
-        self.indent -= 2;
-        try stream.writeByteNTimes(' ', self.indent);
-        try stream.writeAll("}) ");
+        try self.writeBracedBody(stream, body);
+        try stream.writeAll(") ");
     }
 
     fn writePlNodeCondBr(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
@@ -895,17 +955,11 @@ const Writer = struct {
         const then_body = self.code.extra[extra.end..][0..extra.data.then_body_len];
         const else_body = self.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
         try self.writeInstRef(stream, extra.data.condition);
-        try stream.writeAll(", {\n");
-        self.indent += 2;
-        try self.writeBody(stream, then_body);
-        self.indent -= 2;
-        try stream.writeByteNTimes(' ', self.indent);
-        try stream.writeAll("}, {\n");
-        self.indent += 2;
-        try self.writeBody(stream, else_body);
-        self.indent -= 2;
-        try stream.writeByteNTimes(' ', self.indent);
-        try stream.writeAll("}) ");
+        try stream.writeAll(", ");
+        try self.writeBracedBody(stream, then_body);
+        try stream.writeAll(", ");
+        try self.writeBracedBody(stream, else_body);
+        try stream.writeAll(") ");
         try self.writeSrc(stream, inst_data.src());
     }
 
@@ -963,17 +1017,10 @@ const Writer = struct {
         } else {
             const prev_parent_decl_node = self.parent_decl_node;
             if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
-            self.indent += 2;
-            if (body.len == 0) {
-                try stream.writeAll("{}, {\n");
-            } else {
-                try stream.writeAll("{\n");
-                try self.writeBody(stream, body);
-
-                try stream.writeByteNTimes(' ', self.indent - 2);
-                try stream.writeAll("}, {\n");
-            }
+            try self.writeBracedDecl(stream, body);
+            try stream.writeAll(", {\n");
 
+            self.indent += 2;
             const bits_per_field = 4;
             const fields_per_u32 = 32 / bits_per_field;
             const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable;
@@ -1096,17 +1143,10 @@ const Writer = struct {
 
         const prev_parent_decl_node = self.parent_decl_node;
         if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
-        self.indent += 2;
-        if (body.len == 0) {
-            try stream.writeAll("{}, {\n");
-        } else {
-            try stream.writeAll("{\n");
-            try self.writeBody(stream, body);
-
-            try stream.writeByteNTimes(' ', self.indent - 2);
-            try stream.writeAll("}, {\n");
-        }
+        try self.writeBracedDecl(stream, body);
+        try stream.writeAll(", {\n");
 
+        self.indent += 2;
         const bits_per_field = 4;
         const fields_per_u32 = 32 / bits_per_field;
         const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable;
@@ -1251,18 +1291,25 @@ const Writer = struct {
                     try stream.writeAll(")");
                 }
             }
-            const tag = self.code.instructions.items(.tag)[decl_index];
-            try stream.print(" line({d}) hash({}): %{d} = {s}(", .{
-                line, std.fmt.fmtSliceHexLower(&hash_bytes), decl_index, @tagName(tag),
-            });
 
-            const decl_block_inst_data = self.code.instructions.items(.data)[decl_index].pl_node;
-            const sub_decl_node_off = decl_block_inst_data.src_node;
-            self.parent_decl_node = self.relativeToNodeIndex(sub_decl_node_off);
-            try self.writePlNodeBlockWithoutSrc(stream, decl_index);
-            self.parent_decl_node = parent_decl_node;
-            try self.writeSrc(stream, decl_block_inst_data.src());
-            try stream.writeAll("\n");
+            if (self.recurse_decls) {
+                const tag = self.code.instructions.items(.tag)[decl_index];
+                try stream.print(" line({d}) hash({}): %{d} = {s}(", .{
+                    line, std.fmt.fmtSliceHexLower(&hash_bytes), decl_index, @tagName(tag),
+                });
+
+                const decl_block_inst_data = self.code.instructions.items(.data)[decl_index].pl_node;
+                const sub_decl_node_off = decl_block_inst_data.src_node;
+                self.parent_decl_node = self.relativeToNodeIndex(sub_decl_node_off);
+                try self.writePlNodeBlockWithoutSrc(stream, decl_index);
+                self.parent_decl_node = parent_decl_node;
+                try self.writeSrc(stream, decl_block_inst_data.src());
+                try stream.writeAll("\n");
+            } else {
+                try stream.print(" line({d}) hash({}): %{d} = ...\n", .{
+                    line, std.fmt.fmtSliceHexLower(&hash_bytes), decl_index,
+                });
+            }
         }
         return extra_index;
     }
@@ -1329,17 +1376,10 @@ const Writer = struct {
         } else {
             const prev_parent_decl_node = self.parent_decl_node;
             if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
-            self.indent += 2;
-            if (body.len == 0) {
-                try stream.writeAll("{}, {\n");
-            } else {
-                try stream.writeAll("{\n");
-                try self.writeBody(stream, body);
-
-                try stream.writeByteNTimes(' ', self.indent - 2);
-                try stream.writeAll("}, {\n");
-            }
+            try self.writeBracedDecl(stream, body);
+            try stream.writeAll(", {\n");
 
+            self.indent += 2;
             const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable;
             const body_end = extra_index;
             extra_index += bit_bags_count;
@@ -1463,18 +1503,18 @@ const Writer = struct {
 
         try self.writeInstRef(stream, extra.data.operand);
 
+        self.indent += 2;
+
         if (special.body.len != 0) {
             const prong_name = switch (special_prong) {
                 .@"else" => "else",
                 .under => "_",
                 else => unreachable,
             };
-            try stream.print(", {s} => {{\n", .{prong_name});
-            self.indent += 2;
-            try self.writeBody(stream, special.body);
-            self.indent -= 2;
+            try stream.writeAll(",\n");
             try stream.writeByteNTimes(' ', self.indent);
-            try stream.writeAll("}");
+            try stream.print("{s} => ", .{prong_name});
+            try self.writeBracedBody(stream, special.body);
         }
 
         var extra_index: usize = special.end;
@@ -1488,16 +1528,16 @@ const Writer = struct {
                 const body = self.code.extra[extra_index..][0..body_len];
                 extra_index += body_len;
 
-                try stream.writeAll(", ");
-                try self.writeInstRef(stream, item_ref);
-                try stream.writeAll(" => {\n");
-                self.indent += 2;
-                try self.writeBody(stream, body);
-                self.indent -= 2;
+                try stream.writeAll(",\n");
                 try stream.writeByteNTimes(' ', self.indent);
-                try stream.writeAll("}");
+                try self.writeInstRef(stream, item_ref);
+                try stream.writeAll(" => ");
+                try self.writeBracedBody(stream, body);
             }
         }
+
+        self.indent -= 2;
+
         try stream.writeAll(") ");
         try self.writeSrc(stream, inst_data.src());
     }
@@ -1527,18 +1567,18 @@ const Writer = struct {
 
         try self.writeInstRef(stream, extra.data.operand);
 
+        self.indent += 2;
+
         if (special.body.len != 0) {
             const prong_name = switch (special_prong) {
                 .@"else" => "else",
                 .under => "_",
                 else => unreachable,
             };
-            try stream.print(", {s} => {{\n", .{prong_name});
-            self.indent += 2;
-            try self.writeBody(stream, special.body);
-            self.indent -= 2;
+            try stream.writeAll(",\n");
             try stream.writeByteNTimes(' ', self.indent);
-            try stream.writeAll("}");
+            try stream.print("{s} => ", .{prong_name});
+            try self.writeBracedBody(stream, special.body);
         }
 
         var extra_index: usize = special.end;
@@ -1552,14 +1592,11 @@ const Writer = struct {
                 const body = self.code.extra[extra_index..][0..body_len];
                 extra_index += body_len;
 
-                try stream.writeAll(", ");
-                try self.writeInstRef(stream, item_ref);
-                try stream.writeAll(" => {\n");
-                self.indent += 2;
-                try self.writeBody(stream, body);
-                self.indent -= 2;
+                try stream.writeAll(",\n");
                 try stream.writeByteNTimes(' ', self.indent);
-                try stream.writeAll("}");
+                try self.writeInstRef(stream, item_ref);
+                try stream.writeAll(" => ");
+                try self.writeBracedBody(stream, body);
             }
         }
         {
@@ -1574,8 +1611,11 @@ const Writer = struct {
                 const items = self.code.refSlice(extra_index, items_len);
                 extra_index += items_len;
 
-                for (items) |item_ref| {
-                    try stream.writeAll(", ");
+                try stream.writeAll(",\n");
+                try stream.writeByteNTimes(' ', self.indent);
+
+                for (items) |item_ref, item_i| {
+                    if (item_i != 0) try stream.writeAll(", ");
                     try self.writeInstRef(stream, item_ref);
                 }
 
@@ -1586,7 +1626,9 @@ const Writer = struct {
                     const item_last = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
                     extra_index += 1;
 
-                    try stream.writeAll(", ");
+                    if (range_i != 0 or items.len != 0) {
+                        try stream.writeAll(", ");
+                    }
                     try self.writeInstRef(stream, item_first);
                     try stream.writeAll("...");
                     try self.writeInstRef(stream, item_last);
@@ -1594,14 +1636,13 @@ const Writer = struct {
 
                 const body = self.code.extra[extra_index..][0..body_len];
                 extra_index += body_len;
-                try stream.writeAll(" => {\n");
-                self.indent += 2;
-                try self.writeBody(stream, body);
-                self.indent -= 2;
-                try stream.writeByteNTimes(' ', self.indent);
-                try stream.writeAll("}");
+                try stream.writeAll(" => ");
+                try self.writeBracedBody(stream, body);
             }
         }
+
+        self.indent -= 2;
+
         try stream.writeAll(") ");
         try self.writeSrc(stream, inst_data.src());
     }
@@ -1796,12 +1837,8 @@ const Writer = struct {
         const extra = self.code.extraData(Zir.Inst.Block, inst_data.payload_index);
         const body = self.code.extra[extra.end..][0..extra.data.body_len];
         try self.writeInstRef(stream, inst_data.lhs);
-        try stream.writeAll(", {\n");
-        self.indent += 2;
-        try self.writeBody(stream, body);
-        self.indent -= 2;
-        try stream.writeByteNTimes(' ', self.indent);
-        try stream.writeAll("})");
+        try stream.writeAll(", ");
+        try self.writeBracedBody(stream, body);
     }
 
     fn writeIntType(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
@@ -1846,12 +1883,8 @@ const Writer = struct {
         if (ret_ty_body.len == 0) {
             try stream.writeAll("ret_ty=void");
         } else {
-            try stream.writeAll("ret_ty={\n");
-            self.indent += 2;
-            try self.writeBody(stream, ret_ty_body);
-            self.indent -= 2;
-            try stream.writeByteNTimes(' ', self.indent);
-            try stream.writeAll("}");
+            try stream.writeAll("ret_ty=");
+            try self.writeBracedBody(stream, ret_ty_body);
         }
 
         try self.writeOptionalInstRef(stream, ", cc=", cc);
@@ -1860,16 +1893,9 @@ const Writer = struct {
         try self.writeFlag(stream, ", extern", is_extern);
         try self.writeFlag(stream, ", inferror", inferred_error_set);
 
-        if (body.len == 0) {
-            try stream.writeAll(", body={}) ");
-        } else {
-            try stream.writeAll(", body={\n");
-            self.indent += 2;
-            try self.writeBody(stream, body);
-            self.indent -= 2;
-            try stream.writeByteNTimes(' ', self.indent);
-            try stream.writeAll("}) ");
-        }
+        try stream.writeAll(", body=");
+        try self.writeBracedBody(stream, body);
+        try stream.writeAll(") ");
         if (body.len != 0) {
             try stream.print("(lbrace={d}:{d},rbrace={d}:{d}) ", .{
                 src_locs.lbrace_line, @truncate(u16, src_locs.columns),
@@ -1929,18 +1955,19 @@ const Writer = struct {
     }
 
     fn writeSrc(self: *Writer, stream: anytype, src: LazySrcLoc) !void {
-        const tree = self.file.tree;
-        const src_loc: Module.SrcLoc = .{
-            .file_scope = self.file,
-            .parent_decl_node = self.parent_decl_node,
-            .lazy = src,
-        };
-        // Caller must ensure AST tree is loaded.
-        const abs_byte_off = src_loc.byteOffset(self.gpa) catch unreachable;
-        const delta_line = std.zig.findLineColumn(tree.source, abs_byte_off);
-        try stream.print("{s}:{d}:{d}", .{
-            @tagName(src), delta_line.line + 1, delta_line.column + 1,
-        });
+        if (self.file.tree_loaded) {
+            const tree = self.file.tree;
+            const src_loc: Module.SrcLoc = .{
+                .file_scope = self.file,
+                .parent_decl_node = self.parent_decl_node,
+                .lazy = src,
+            };
+            const abs_byte_off = src_loc.byteOffset(self.gpa) catch unreachable;
+            const delta_line = std.zig.findLineColumn(tree.source, abs_byte_off);
+            try stream.print("{s}:{d}:{d}", .{
+                @tagName(src), delta_line.line + 1, delta_line.column + 1,
+            });
+        }
     }
 
     fn writeSrcNode(self: *Writer, stream: anytype, src_node: ?i32) !void {
@@ -1950,6 +1977,43 @@ const Writer = struct {
         return self.writeSrc(stream, src);
     }
 
+    fn writeBracedDecl(self: *Writer, stream: anytype, body: []const Zir.Inst.Index) !void {
+        try self.writeBracedBodyConditional(stream, body, self.recurse_decls);
+    }
+
+    fn writeBracedBody(self: *Writer, stream: anytype, body: []const Zir.Inst.Index) !void {
+        try self.writeBracedBodyConditional(stream, body, self.recurse_blocks);
+    }
+
+    fn writeBracedBodyConditional(self: *Writer, stream: anytype, body: []const Zir.Inst.Index, enabled: bool) !void {
+        if (body.len == 0) {
+            try stream.writeAll("{}");
+        } else if (enabled) {
+            try stream.writeAll("{\n");
+            self.indent += 2;
+            try self.writeBody(stream, body);
+            self.indent -= 2;
+            try stream.writeByteNTimes(' ', self.indent);
+            try stream.writeAll("}");
+        } else if (body.len == 1) {
+            try stream.writeByte('{');
+            try self.writeInstIndex(stream, body[0]);
+            try stream.writeByte('}');
+        } else if (body.len == 2) {
+            try stream.writeByte('{');
+            try self.writeInstIndex(stream, body[0]);
+            try stream.writeAll(", ");
+            try self.writeInstIndex(stream, body[1]);
+            try stream.writeByte('}');
+        } else {
+            try stream.writeByte('{');
+            try self.writeInstIndex(stream, body[0]);
+            try stream.writeAll("..");
+            try self.writeInstIndex(stream, body[body.len - 1]);
+            try stream.writeByte('}');
+        }
+    }
+
     fn writeBody(self: *Writer, stream: anytype, body: []const Zir.Inst.Index) !void {
         for (body) |inst| {
             try stream.writeByteNTimes(' ', self.indent);
src/Sema.zig
@@ -90,6 +90,7 @@ const LazySrcLoc = Module.LazySrcLoc;
 const RangeSet = @import("RangeSet.zig");
 const target_util = @import("target.zig");
 const Package = @import("Package.zig");
+const crash_report = @import("crash_report.zig");
 
 pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Ref);
 
@@ -153,11 +154,16 @@ pub fn analyzeBody(
 
     var orig_captures: usize = parent_capture_scope.captures.count();
 
+    var crash_info = crash_report.prepAnalyzeBody(sema, block, body);
+    crash_info.push();
+    defer crash_info.pop();
+
     // We use a while(true) loop here to avoid a redundant way of breaking out of
     // the loop. The only way to break out of the loop is with a `noreturn`
     // instruction.
     var i: usize = 0;
     const result = while (true) {
+        crash_info.setBodyIndex(i);
         const inst = body[i];
         const air_inst: Air.Inst.Ref = switch (tags[inst]) {
             // zig fmt: off