Commit 1bce01de97

mlugg <mlugg@mlugg.co.uk>
2025-01-22 02:51:39
compiler: pass error return traces everywhere
1 parent 941677e
Changed files (4)
src/codegen/llvm.zig
@@ -1497,8 +1497,7 @@ pub const Object = struct {
             .unsigned => try attributes.addRetAttr(.zeroext, &o.builder),
         };
 
-        const err_return_tracing = Type.fromInterned(fn_info.return_type).isError(zcu) and
-            comp.config.any_error_tracing;
+        const err_return_tracing = fn_info.cc == .auto and comp.config.any_error_tracing;
 
         const err_ret_trace: Builder.Value = if (err_return_tracing) param: {
             const param = wip.arg(llvm_arg_i);
@@ -2805,9 +2804,7 @@ pub const Object = struct {
                     debug_param_types.appendAssumeCapacity(try o.lowerDebugType(Type.void));
                 }
 
-                if (Type.fromInterned(fn_info.return_type).isError(zcu) and
-                    zcu.comp.config.any_error_tracing)
-                {
+                if (fn_info.cc == .auto and zcu.comp.config.any_error_tracing) {
                     const ptr_ty = try pt.singleMutPtrType(try o.getStackTraceType());
                     debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ptr_ty));
                 }
@@ -2970,8 +2967,7 @@ pub const Object = struct {
             llvm_arg_i += 1;
         }
 
-        const err_return_tracing = Type.fromInterned(fn_info.return_type).isError(zcu) and
-            zcu.comp.config.any_error_tracing;
+        const err_return_tracing = fn_info.cc == .auto and zcu.comp.config.any_error_tracing;
 
         if (err_return_tracing) {
             try attributes.addParamAttr(llvm_arg_i, .nonnull, &o.builder);
@@ -3736,9 +3732,7 @@ pub const Object = struct {
             try llvm_params.append(o.gpa, .ptr);
         }
 
-        if (Type.fromInterned(fn_info.return_type).isError(zcu) and
-            zcu.comp.config.any_error_tracing)
-        {
+        if (fn_info.cc == .auto and zcu.comp.config.any_error_tracing) {
             const ptr_ty = try pt.singleMutPtrType(try o.getStackTraceType());
             try llvm_params.append(o.gpa, try o.lowerType(ptr_ty));
         }
@@ -5483,7 +5477,7 @@ pub const FuncGen = struct {
             break :blk ret_ptr;
         };
 
-        const err_return_tracing = return_type.isError(zcu) and zcu.comp.config.any_error_tracing;
+        const err_return_tracing = fn_info.cc == .auto and zcu.comp.config.any_error_tracing;
         if (err_return_tracing) {
             assert(self.err_ret_trace != .none);
             try llvm_args.append(self.err_ret_trace);
@@ -5762,6 +5756,8 @@ pub const FuncGen = struct {
         const panic_nav = ip.getNav(panic_func.owner_nav);
         const fn_info = zcu.typeToFunc(Type.fromInterned(panic_nav.typeOf(ip))).?;
         const panic_global = try o.resolveLlvmFunction(panic_func.owner_nav);
+        const has_err_trace = zcu.comp.config.any_error_tracing and fn_info.cc == .auto;
+        if (has_err_trace) assert(fg.err_ret_trace != .none);
         _ = try fg.wip.callIntrinsicAssumeCold();
         _ = try fg.wip.call(
             .normal,
@@ -5769,7 +5765,13 @@ pub const FuncGen = struct {
             .none,
             panic_global.typeOf(&o.builder),
             panic_global.toValue(&o.builder),
-            &.{
+            if (has_err_trace) &.{
+                fg.err_ret_trace,
+                msg_ptr.toValue(),
+                try o.builder.intValue(llvm_usize, msg_len),
+                try o.builder.nullValue(.ptr),
+                null_opt_addr_global.toValue(),
+            } else &.{
                 msg_ptr.toValue(),
                 try o.builder.intValue(llvm_usize, msg_len),
                 try o.builder.nullValue(.ptr),
src/Zcu/PerThread.zig
@@ -2596,7 +2596,7 @@ fn analyzeFnBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaE
     }
 
     // reset in case calls to errorable functions are removed.
-    func.setCallsOrAwaitsErrorableFn(ip, false);
+    ip.funcSetHasErrorTrace(func_index, fn_ty_info.cc == .auto);
 
     // First few indexes of extra are reserved and set at the end.
     const reserved_count = @typeInfo(Air.ExtraIndex).@"enum".fields.len;
@@ -2707,11 +2707,9 @@ fn analyzeFnBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaE
 
     func.setBranchHint(ip, sema.branch_hint orelse .none);
 
-    // If we don't get an error return trace from a caller, create our own.
-    if (func.analysisUnordered(ip).calls_or_awaits_errorable_fn and
-        zcu.comp.config.any_error_tracing and
-        !sema.fn_ret_ty.isError(zcu))
-    {
+    if (zcu.comp.config.any_error_tracing and func.analysisUnordered(ip).has_error_trace and fn_ty_info.cc != .auto) {
+        // We're using an error trace, but didn't start out with one from the caller.
+        // We'll have to create it at the start of the function.
         sema.setupErrorReturnTrace(&inner_block, last_arg_index) catch |err| switch (err) {
             error.ComptimeReturn => unreachable,
             error.ComptimeBreak => unreachable,
src/InternPool.zig
@@ -2294,17 +2294,6 @@ pub const Key = union(enum) {
             return @atomicLoad(FuncAnalysis, func.analysisPtr(ip), .unordered);
         }
 
-        pub fn setCallsOrAwaitsErrorableFn(func: Func, ip: *InternPool, value: bool) void {
-            const extra_mutex = &ip.getLocal(func.tid).mutate.extra.mutex;
-            extra_mutex.lock();
-            defer extra_mutex.unlock();
-
-            const analysis_ptr = func.analysisPtr(ip);
-            var analysis = analysis_ptr.*;
-            analysis.calls_or_awaits_errorable_fn = value;
-            @atomicStore(FuncAnalysis, analysis_ptr, analysis, .release);
-        }
-
         pub fn setBranchHint(func: Func, ip: *InternPool, hint: std.builtin.BranchHint) void {
             const extra_mutex = &ip.getLocal(func.tid).mutate.extra.mutex;
             extra_mutex.lock();
@@ -5975,7 +5964,7 @@ pub const FuncAnalysis = packed struct(u32) {
     is_analyzed: bool,
     branch_hint: std.builtin.BranchHint,
     is_noinline: bool,
-    calls_or_awaits_errorable_fn: bool,
+    has_error_trace: bool,
     /// True if this function has an inferred error set.
     inferred_error_set: bool,
     disable_instrumentation: bool,
@@ -9007,7 +8996,7 @@ pub fn getFuncDecl(
             .is_analyzed = false,
             .branch_hint = .none,
             .is_noinline = key.is_noinline,
-            .calls_or_awaits_errorable_fn = false,
+            .has_error_trace = false,
             .inferred_error_set = false,
             .disable_instrumentation = false,
         },
@@ -9116,7 +9105,7 @@ pub fn getFuncDeclIes(
             .is_analyzed = false,
             .branch_hint = .none,
             .is_noinline = key.is_noinline,
-            .calls_or_awaits_errorable_fn = false,
+            .has_error_trace = false,
             .inferred_error_set = true,
             .disable_instrumentation = false,
         },
@@ -9312,7 +9301,7 @@ pub fn getFuncInstance(
             .is_analyzed = false,
             .branch_hint = .none,
             .is_noinline = arg.is_noinline,
-            .calls_or_awaits_errorable_fn = false,
+            .has_error_trace = false,
             .inferred_error_set = false,
             .disable_instrumentation = false,
         },
@@ -9410,7 +9399,7 @@ pub fn getFuncInstanceIes(
             .is_analyzed = false,
             .branch_hint = .none,
             .is_noinline = arg.is_noinline,
-            .calls_or_awaits_errorable_fn = false,
+            .has_error_trace = false,
             .inferred_error_set = true,
             .disable_instrumentation = false,
         },
@@ -12174,7 +12163,7 @@ pub fn funcAnalysisUnordered(ip: *const InternPool, func: Index) FuncAnalysis {
     return @atomicLoad(FuncAnalysis, ip.funcAnalysisPtr(func), .unordered);
 }
 
-pub fn funcSetCallsOrAwaitsErrorableFn(ip: *InternPool, func: Index) void {
+pub fn funcSetHasErrorTrace(ip: *InternPool, func: Index, has_error_trace: bool) void {
     const unwrapped_func = func.unwrap(ip);
     const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex;
     extra_mutex.lock();
@@ -12182,7 +12171,7 @@ pub fn funcSetCallsOrAwaitsErrorableFn(ip: *InternPool, func: Index) void {
 
     const analysis_ptr = ip.funcAnalysisPtr(func);
     var analysis = analysis_ptr.*;
-    analysis.calls_or_awaits_errorable_fn = true;
+    analysis.has_error_trace = has_error_trace;
     @atomicStore(FuncAnalysis, analysis_ptr, analysis, .release);
 }
 
src/Sema.zig
@@ -7191,14 +7191,6 @@ fn zirCall(
     const call_dbg_node: Zir.Inst.Index = @enumFromInt(@intFromEnum(inst) - 1);
     const call_inst = try sema.analyzeCall(block, func, func_ty, callee_src, call_src, modifier, ensure_result_used, args_info, call_dbg_node, .call);
 
-    switch (sema.owner.unwrap()) {
-        .@"comptime", .type, .memoized_state, .nav_ty, .nav_val => input_is_error = false,
-        .func => |owner_func| if (!zcu.intern_pool.funcAnalysisUnordered(owner_func).calls_or_awaits_errorable_fn) {
-            // No errorable fn actually called; we have no error return trace
-            input_is_error = false;
-        },
-    }
-
     if (block.ownerModule().error_tracing and
         !block.isComptime() and !block.is_typeof and (input_is_error or pop_error_return_trace))
     {
@@ -7865,6 +7857,12 @@ fn analyzeCall(
             }
             break :msg msg;
         });
+        if (func_ty_info.cc == .auto) {
+            switch (sema.owner.unwrap()) {
+                .@"comptime", .nav_ty, .nav_val, .type, .memoized_state => {},
+                .func => |owner_func| ip.funcSetHasErrorTrace(owner_func, true),
+            }
+        }
         for (args, 0..) |arg, arg_idx| {
             try sema.validateRuntimeValue(block, args_info.argSrc(block, arg_idx), arg);
         }
@@ -7939,13 +7937,6 @@ fn analyzeCall(
             try zcu.ensureFuncBodyAnalysisQueued(runtime_func_val.toIntern());
         }
 
-        switch (sema.owner.unwrap()) {
-            .@"comptime", .nav_ty, .nav_val, .type, .memoized_state => {},
-            .func => |owner_func| if (resolved_ret_ty.isError(zcu)) {
-                ip.funcSetCallsOrAwaitsErrorableFn(owner_func);
-            },
-        }
-
         const call_tag: Air.Inst.Tag = switch (modifier) {
             .auto, .no_async => .call,
             .never_tail => .call_never_tail,
@@ -19699,16 +19690,16 @@ fn retWithErrTracing(
         .bool_false => false,
         else => true,
     };
+
+    // This means we're returning something that might be an error!
+    // This should only be possible with the `auto` cc, so we definitely have an error trace.
+    assert(pt.zcu.intern_pool.funcAnalysisUnordered(sema.owner.unwrap().func).has_error_trace);
+
     const gpa = sema.gpa;
-    const stack_trace_ty = try sema.getBuiltinType(src, .StackTrace);
-    try stack_trace_ty.resolveFields(pt);
-    const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty);
-    const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty);
     const return_err_fn = Air.internedToRef(try sema.getBuiltin(src, .returnError));
-    const args: [1]Air.Inst.Ref = .{err_return_trace};
 
     if (!need_check) {
-        try sema.callBuiltin(block, src, return_err_fn, .never_inline, &args, .@"error return");
+        try sema.callBuiltin(block, src, return_err_fn, .never_inline, &.{}, .@"error return");
         _ = try block.addUnOp(ret_tag, operand);
         return;
     }
@@ -19719,7 +19710,7 @@ fn retWithErrTracing(
 
     var else_block = block.makeSubBlock();
     defer else_block.instructions.deinit(gpa);
-    try sema.callBuiltin(&else_block, src, return_err_fn, .never_inline, &args, .@"error return");
+    try sema.callBuiltin(&else_block, src, return_err_fn, .never_inline, &.{}, .@"error return");
     _ = try else_block.addUnOp(ret_tag, operand);
 
     try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).@"struct".fields.len +
@@ -19830,7 +19821,7 @@ fn restoreErrRetIndex(sema: *Sema, start_block: *Block, src: LazySrcLoc, target_
         return;
     }
 
-    if (!zcu.intern_pool.funcAnalysisUnordered(sema.owner.unwrap().func).calls_or_awaits_errorable_fn) return;
+    if (!zcu.intern_pool.funcAnalysisUnordered(sema.owner.unwrap().func).has_error_trace) return;
     if (!start_block.ownerModule().error_tracing) return;
 
     assert(saved_index != .none); // The .error_return_trace_index field was dropped somewhere
@@ -21116,7 +21107,7 @@ fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
     const opt_ptr_stack_trace_ty = try pt.optionalType(ptr_stack_trace_ty.toIntern());
 
     switch (sema.owner.unwrap()) {
-        .func => |func| if (ip.funcAnalysisUnordered(func).calls_or_awaits_errorable_fn and block.ownerModule().error_tracing) {
+        .func => |func| if (ip.funcAnalysisUnordered(func).has_error_trace and block.ownerModule().error_tracing) {
             return block.addTy(.err_return_trace, opt_ptr_stack_trace_ty);
         },
         .@"comptime", .nav_ty, .nav_val, .type, .memoized_state => {},
@@ -27089,6 +27080,10 @@ fn preparePanicId(sema: *Sema, src: LazySrcLoc, panic_id: Zcu.PanicId) !InternPo
     const zcu = sema.pt.zcu;
     try sema.ensureMemoizedStateResolved(src, .panic);
     try zcu.ensureFuncBodyAnalysisQueued(zcu.builtin_decl_values.get(.@"Panic.call"));
+    switch (sema.owner.unwrap()) {
+        .@"comptime", .nav_ty, .nav_val, .type, .memoized_state => {},
+        .func => |owner_func| zcu.intern_pool.funcSetHasErrorTrace(owner_func, true),
+    }
     return zcu.builtin_decl_values.get(panic_id.toBuiltin());
 }