Commit e9bd2d45d4
Changed files (36)
lib
compiler
std
test
cases
compile_errors
standalone
simple
lib/compiler/aro_translate_c.zig
@@ -168,6 +168,7 @@ pub fn translate(
context.pattern_list.deinit(gpa);
}
+ @setEvalBranchQuota(2000);
inline for (@typeInfo(std.zig.c_builtins).@"struct".decls) |decl| {
const builtin_fn = try ZigTag.pub_var_simple.create(arena, .{
.name = decl.name,
lib/std/math/log_int.zig
@@ -61,6 +61,7 @@ pub fn log_int(comptime T: type, base: T, x: T) Log2Int(T) {
}
test "log_int" {
+ @setEvalBranchQuota(2000);
// Test all unsigned integers with 2, 3, ..., 64 bits.
// We cannot test 0 or 1 bits since base must be > 1.
inline for (2..64 + 1) |bits| {
lib/std/os/windows.zig
@@ -1468,6 +1468,7 @@ fn mountmgrIsVolumeName(name: []const u16) bool {
}
test mountmgrIsVolumeName {
+ @setEvalBranchQuota(2000);
const L = std.unicode.utf8ToUtf16LeStringLiteral;
try std.testing.expect(mountmgrIsVolumeName(L("\\\\?\\Volume{383da0b0-717f-41b6-8c36-00500992b58d}")));
try std.testing.expect(mountmgrIsVolumeName(L("\\??\\Volume{383da0b0-717f-41b6-8c36-00500992b58d}")));
lib/std/zig/AstGen.zig
@@ -10209,9 +10209,6 @@ fn callExpr(
const callee = try calleeExpr(gz, scope, ri.rl, call.ast.fn_expr);
const modifier: std.builtin.CallModifier = blk: {
- if (gz.is_comptime) {
- break :blk .compile_time;
- }
if (call.async_token != null) {
break :blk .async_kw;
}
lib/std/zig/Zir.zig
@@ -4735,6 +4735,7 @@ pub const FnInfo = struct {
body: []const Inst.Index,
ret_ty_ref: Zir.Inst.Ref,
total_params_len: u32,
+ inferred_error_set: bool,
};
pub fn getParamBody(zir: Zir, fn_inst: Inst.Index) []const Zir.Inst.Index {
@@ -4774,8 +4775,9 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo {
body: []const Inst.Index,
ret_ty_ref: Inst.Ref,
ret_ty_body: []const Inst.Index,
+ ies: bool,
} = switch (tags[@intFromEnum(fn_inst)]) {
- .func, .func_inferred => blk: {
+ .func, .func_inferred => |tag| blk: {
const inst_data = datas[@intFromEnum(fn_inst)].pl_node;
const extra = zir.extraData(Inst.Func, inst_data.payload_index);
@@ -4805,6 +4807,7 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo {
.ret_ty_ref = ret_ty_ref,
.ret_ty_body = ret_ty_body,
.body = body,
+ .ies = tag == .func_inferred,
};
},
.func_fancy => blk: {
@@ -4812,7 +4815,7 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo {
const extra = zir.extraData(Inst.FuncFancy, inst_data.payload_index);
var extra_index: usize = extra.end;
- var ret_ty_ref: Inst.Ref = .void_type;
+ var ret_ty_ref: Inst.Ref = .none;
var ret_ty_body: []const Inst.Index = &.{};
if (extra.data.bits.has_cc_body) {
@@ -4828,6 +4831,8 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo {
} else if (extra.data.bits.has_ret_ty_ref) {
ret_ty_ref = @enumFromInt(zir.extra[extra_index]);
extra_index += 1;
+ } else {
+ ret_ty_ref = .void_type;
}
extra_index += @intFromBool(extra.data.bits.has_any_noalias);
@@ -4839,6 +4844,7 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo {
.ret_ty_ref = ret_ty_ref,
.ret_ty_body = ret_ty_body,
.body = body,
+ .ies = extra.data.bits.is_inferred_error,
};
},
else => unreachable,
@@ -4860,6 +4866,7 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo {
.ret_ty_ref = info.ret_ty_ref,
.body = info.body,
.total_params_len = total_params_len,
+ .inferred_error_set = info.ies,
};
}
lib/std/zig.zig
@@ -749,6 +749,8 @@ pub const SimpleComptimeReason = enum(u32) {
array_mul_factor,
slice_cat_operand,
comptime_call_target,
+ inline_call_target,
+ generic_call_target,
wasm_memory_index,
work_group_dim_index,
@@ -791,7 +793,6 @@ pub const SimpleComptimeReason = enum(u32) {
struct_field_default_value,
enum_field_tag_value,
slice_single_item_ptr_bounds,
- comptime_param_arg,
stored_to_comptime_field,
stored_to_comptime_var,
casted_to_comptime_enum,
@@ -828,6 +829,8 @@ pub const SimpleComptimeReason = enum(u32) {
.array_mul_factor => "array multiplication factor must be comptime-known",
.slice_cat_operand => "slice being concatenated must be comptime-known",
.comptime_call_target => "function being called at comptime must be comptime-known",
+ .inline_call_target => "function being called inline must be comptime-known",
+ .generic_call_target => "generic function being called must be comptime-known",
.wasm_memory_index => "wasm memory index must be comptime-known",
.work_group_dim_index => "work group dimension index must be comptime-known",
@@ -865,7 +868,6 @@ pub const SimpleComptimeReason = enum(u32) {
.struct_field_default_value => "struct field default value must be comptime-known",
.enum_field_tag_value => "enum field tag value must be comptime-known",
.slice_single_item_ptr_bounds => "slice of single-item pointer must have comptime-known bounds",
- .comptime_param_arg => "argument to comptime parameter must be comptime-known",
.stored_to_comptime_field => "value stored to a comptime field must be comptime-known",
.stored_to_comptime_var => "value stored to a comptime variable must be comptime-known",
.casted_to_comptime_enum => "value casted to enum with 'comptime_int' tag type must be comptime-known",
src/Sema.zig
@@ -46,21 +46,6 @@ branch_count: u32 = 0,
/// Populated when returning `error.ComptimeBreak`. Used to communicate the
/// break instruction up the stack to find the corresponding Block.
comptime_break_inst: Zir.Inst.Index = undefined,
-/// When doing a generic function instantiation, this array collects a value
-/// for each parameter of the generic owner. `none` for non-comptime parameters.
-/// This is a separate array from `block.params` so that it can be passed
-/// directly to `comptime_args` when calling `InternPool.getFuncInstance`.
-/// This memory is allocated by a parent `Sema` in the temporary arena, and is
-/// used only to add a `func_instance` into the `InternPool`.
-comptime_args: []InternPool.Index = &.{},
-/// Used to communicate from a generic function instantiation to the logic that
-/// creates a generic function instantiation value in `funcCommon`.
-generic_owner: InternPool.Index = .none,
-/// When `generic_owner` is not none, this contains the generic function
-/// instantiation callsite so that compile errors on the parameter types of the
-/// instantiation can point back to the instantiation site in addition to the
-/// declaration site.
-generic_call_src: LazySrcLoc = LazySrcLoc.unneeded,
/// These are lazily created runtime blocks from block_inline instructions.
/// They are created when an break_inline passes through a runtime condition, because
/// Sema must convert comptime control flow to runtime control flow, which means
@@ -862,12 +847,29 @@ const ComptimeReason = union(enum) {
union_init,
struct_init,
tuple_init,
- param_ty_arg,
- ret_ty_call,
- ret_ty_generic_call,
},
},
+ /// Like `comptime_only`, but for a parameter type.
+ /// Includes a "parameter type declared here" note.
+ comptime_only_param_ty: struct {
+ ty: Type,
+ param_ty_src: LazySrcLoc,
+ },
+
+ /// Like `comptime_only`, but for a return type.
+ /// Includes a "return type declared here" note.
+ comptime_only_ret_ty: struct {
+ ty: Type,
+ is_generic_inst: bool,
+ ret_ty_src: LazySrcLoc,
+ },
+
+ /// Evaluating at comptime because we're evaluating an argument to a parameter marked `comptime`.
+ comptime_param: struct {
+ comptime_src: LazySrcLoc,
+ },
+
fn explain(reason: ComptimeReason, sema: *Sema, src: LazySrcLoc, err_msg: *Zcu.ErrorMsg) !void {
switch (reason) {
.simple => |simple| {
@@ -878,13 +880,25 @@ const ComptimeReason = union(enum) {
.union_init => .{ "initializer of comptime-only union", "must be comptime-known" },
.struct_init => .{ "initializer of comptime-only struct", "must be comptime-known" },
.tuple_init => .{ "initializer of comptime-only tuple", "must be comptime-known" },
- .param_ty_arg => .{ "argument to parameter with comptime-only type", "must be comptime-known" },
- .ret_ty_call => .{ "function with comptime-only return type", "is evaluated at comptime" },
- .ret_ty_generic_call => .{ "generic function instantiated with comptime-only return type", "is evaluated at comptime" },
};
try sema.errNote(src, err_msg, "{s} '{}' {s}", .{ pre, co.ty.fmt(sema.pt), post });
try sema.explainWhyTypeIsComptime(err_msg, src, co.ty);
},
+ .comptime_only_param_ty => |co| {
+ try sema.errNote(src, err_msg, "argument to parameter with comptime-only type '{}' must be comptime-known", .{co.ty.fmt(sema.pt)});
+ try sema.errNote(co.param_ty_src, err_msg, "parameter type declared here", .{});
+ try sema.explainWhyTypeIsComptime(err_msg, src, co.ty);
+ },
+ .comptime_only_ret_ty => |co| {
+ const function_with: []const u8 = if (co.is_generic_inst) "generic function instantiated with" else "function with";
+ try sema.errNote(src, err_msg, "call to {s} comptime-only return type '{}' is evaluated at comptime", .{ function_with, co.ty.fmt(sema.pt) });
+ try sema.errNote(co.ret_ty_src, err_msg, "return type declared here", .{});
+ try sema.explainWhyTypeIsComptime(err_msg, src, co.ty);
+ },
+ .comptime_param => |cp| {
+ try sema.errNote(src, err_msg, "argument to comptime parameter must be comptime-known", .{});
+ try sema.errNote(cp.comptime_src, err_msg, "parameter declared comptime here", .{});
+ },
}
}
};
@@ -7423,8 +7437,9 @@ const CallArgsInfo = union(enum) {
/// Analyzes the arg at `arg_index` and coerces it to `param_ty`.
/// `param_ty` may be `generic_poison`. A value of `null` indicates a varargs parameter.
- /// `func_ty_info` may be the type before instantiation, even if a generic
- /// instantiation has been partially completed.
+ /// `func_ty_info` may be the type before instantiation, even if a generic instantiation is in progress.
+ /// Emits a compile error if the argument is not comptime-known despite either `block.isComptime()` or
+ /// the parameter being marked `comptime`.
fn analyzeArg(
cai: CallArgsInfo,
sema: *Sema,
@@ -7433,6 +7448,7 @@ const CallArgsInfo = union(enum) {
maybe_param_ty: ?Type,
func_ty_info: InternPool.Key.FuncType,
func_inst: Air.Inst.Ref,
+ maybe_func_src_inst: ?InternPool.TrackedInst.Index,
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
@@ -7460,11 +7476,22 @@ const CallArgsInfo = union(enum) {
const parent_comptime = block.comptime_reason;
defer block.comptime_reason = parent_comptime;
// Note that we are indexing into parameters, not arguments, so use `arg_index` instead of `real_arg_idx`
- if (arg_index < @min(param_count, 32) and func_ty_info.paramIsComptime(@intCast(arg_index))) {
- block.comptime_reason = .{ .reason = .{
- .src = cai.argSrc(block, arg_index),
- .r = .{ .simple = .comptime_param_arg },
- } };
+ if (std.math.cast(u5, arg_index)) |i| {
+ if (i < param_count and func_ty_info.paramIsComptime(i)) {
+ block.comptime_reason = .{
+ .reason = .{
+ .src = cai.argSrc(block, arg_index),
+ .r = .{
+ .comptime_param = .{
+ .comptime_src = if (maybe_func_src_inst) |src_inst| .{
+ .base_node_inst = src_inst,
+ .offset = .{ .func_decl_param_comptime = @intCast(arg_index) },
+ } else unreachable, // should be non-null because the function is generic
+ },
+ },
+ },
+ };
+ }
}
// Give the arg its result type
const provide_param_ty = if (maybe_param_ty) |t| t else Type.generic_poison;
@@ -7472,6 +7499,10 @@ const CallArgsInfo = union(enum) {
// Resolve the arg!
const uncoerced_arg = try sema.resolveInlineBody(block, arg_body, zir_call.call_inst);
+ if (block.isComptime() and !try sema.isComptimeKnown(uncoerced_arg)) {
+ return sema.failWithNeededComptime(block, cai.argSrc(block, arg_index), null);
+ }
+
if (sema.typeOf(uncoerced_arg).zigTypeTag(zcu) == .noreturn) {
// This terminates resolution of arguments. The caller should
// propagate this.
@@ -7507,104 +7538,10 @@ const CallArgsInfo = union(enum) {
}
};
-/// While performing an inline call, we need to switch between two Sema states a few times: the
-/// state for the caller (with the callee's `code`, `fn_ret_ty`, etc), and the state for the callee.
-/// These cannot be two separate Sema instances as they must share AIR.
-/// Therefore, this struct acts as a helper to switch between the two.
-/// This switching is required during argument evaluation, where function argument analysis must be
-/// interleaved with resolving generic parameter types.
-const InlineCallSema = struct {
- sema: *Sema,
- cur: enum {
- caller,
- callee,
- },
-
- other_code: Zir,
- other_func_index: InternPool.Index,
- other_fn_ret_ty: Type,
- other_fn_ret_ty_ies: ?*InferredErrorSet,
- other_inst_map: InstMap,
- other_error_return_trace_index_on_fn_entry: Air.Inst.Ref,
- other_generic_owner: InternPool.Index,
- other_generic_call_src: LazySrcLoc,
-
- /// Sema should currently be set up for the caller (i.e. unchanged yet). This init will not
- /// change that. The other parameters contain data for the callee Sema. The other modified
- /// Sema fields are all initialized to default values for the callee.
- /// Must call deinit on the result.
- fn init(
- sema: *Sema,
- callee_code: Zir,
- callee_func_index: InternPool.Index,
- callee_error_return_trace_index_on_fn_entry: Air.Inst.Ref,
- ) InlineCallSema {
- return .{
- .sema = sema,
- .cur = .caller,
- .other_code = callee_code,
- .other_func_index = callee_func_index,
- .other_fn_ret_ty = Type.void,
- .other_fn_ret_ty_ies = null,
- .other_inst_map = .{},
- .other_error_return_trace_index_on_fn_entry = callee_error_return_trace_index_on_fn_entry,
- .other_generic_owner = .none,
- .other_generic_call_src = LazySrcLoc.unneeded,
- };
- }
-
- /// Switch back to the caller Sema if necessary and free all temporary state of the callee Sema.
- fn deinit(ics: *InlineCallSema) void {
- switch (ics.cur) {
- .caller => {},
- .callee => ics.swap(),
- }
- // Callee Sema owns the inst_map memory
- ics.other_inst_map.deinit(ics.sema.gpa);
- ics.* = undefined;
- }
-
- /// Returns a Sema instance suitable for usage from the caller context.
- fn caller(ics: *InlineCallSema) *Sema {
- switch (ics.cur) {
- .caller => {},
- .callee => ics.swap(),
- }
- return ics.sema;
- }
-
- /// Returns a Sema instance suitable for usage from the callee context.
- fn callee(ics: *InlineCallSema) *Sema {
- switch (ics.cur) {
- .caller => ics.swap(),
- .callee => {},
- }
- return ics.sema;
- }
-
- /// Internal use only. Swaps to the other Sema state.
- fn swap(ics: *InlineCallSema) void {
- ics.cur = switch (ics.cur) {
- .caller => .callee,
- .callee => .caller,
- };
- // zig fmt: off
- std.mem.swap(Zir, &ics.sema.code, &ics.other_code);
- std.mem.swap(InternPool.Index, &ics.sema.func_index, &ics.other_func_index);
- std.mem.swap(Type, &ics.sema.fn_ret_ty, &ics.other_fn_ret_ty);
- std.mem.swap(?*InferredErrorSet, &ics.sema.fn_ret_ty_ies, &ics.other_fn_ret_ty_ies);
- std.mem.swap(InstMap, &ics.sema.inst_map, &ics.other_inst_map);
- std.mem.swap(InternPool.Index, &ics.sema.generic_owner, &ics.other_generic_owner);
- std.mem.swap(LazySrcLoc, &ics.sema.generic_call_src, &ics.other_generic_call_src);
- std.mem.swap(Air.Inst.Ref, &ics.sema.error_return_trace_index_on_fn_entry, &ics.other_error_return_trace_index_on_fn_entry);
- // zig fmt: on
- }
-};
-
fn analyzeCall(
sema: *Sema,
block: *Block,
- func: Air.Inst.Ref,
+ callee: Air.Inst.Ref,
func_ty: Type,
func_src: LazySrcLoc,
call_src: LazySrcLoc,
@@ -7616,983 +7553,696 @@ fn analyzeCall(
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
+ const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
+ const arena = sema.arena;
+
+ if (modifier == .async_kw) {
+ return sema.failWithUseOfAsync(block, call_src);
+ }
+
+ const maybe_func_inst = try sema.funcDeclSrcInst(callee);
+ const func_ret_ty_src: LazySrcLoc = if (maybe_func_inst) |fn_decl_inst| .{
+ .base_node_inst = fn_decl_inst,
+ .offset = .{ .node_offset_fn_type_ret_ty = 0 },
+ } else func_src;
- const callee_ty = sema.typeOf(func);
const func_ty_info = zcu.typeToFunc(func_ty).?;
- const cc = func_ty_info.cc;
- if (try sema.resolveValue(func)) |func_val|
- if (func_val.isUndef(zcu))
- return sema.failWithUseOfUndef(block, call_src);
- if (!callConvIsCallable(cc)) {
- const maybe_func_inst = try sema.funcDeclSrcInst(func);
- const msg = msg: {
+ if (!callConvIsCallable(func_ty_info.cc)) {
+ return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(
func_src,
"unable to call function with calling convention '{s}'",
- .{@tagName(cc)},
+ .{@tagName(func_ty_info.cc)},
);
- errdefer msg.destroy(sema.gpa);
-
+ errdefer msg.destroy(gpa);
if (maybe_func_inst) |func_inst| try sema.errNote(.{
.base_node_inst = func_inst,
- .offset = LazySrcLoc.Offset.nodeOffset(0),
+ .offset = .nodeOffset(0),
}, msg, "function declared here", .{});
break :msg msg;
- };
- return sema.failWithOwnedErrorMsg(block, msg);
+ });
}
- const call_tag: Air.Inst.Tag = switch (modifier) {
- .auto,
- .always_inline,
- .compile_time,
- .no_async,
- => Air.Inst.Tag.call,
-
- .never_tail => Air.Inst.Tag.call_never_tail,
- .never_inline => Air.Inst.Tag.call_never_inline,
- .always_tail => Air.Inst.Tag.call_always_tail,
-
- .async_kw => return sema.failWithUseOfAsync(block, call_src),
- };
+ // We need this value in a few code paths.
+ const callee_val = try sema.resolveDefinedValue(block, call_src, callee);
+ // If the callee is a comptime-known *non-extern* function, `func_val` is populated.
+ // If it is a comptime-known extern function, `func_is_extern` is set instead.
+ // If it is not comptime-known, neither is set.
+ const func_val: ?Value, const func_is_extern: bool = if (callee_val) |c| switch (ip.indexToKey(c.toIntern())) {
+ .func => .{ c, false },
+ .ptr => switch (try sema.pointerDerefExtra(block, func_src, c)) {
+ .runtime_load, .needed_well_defined, .out_of_bounds => .{ null, false },
+ .val => |pointee| switch (ip.indexToKey(pointee.toIntern())) {
+ .func => .{ pointee, false },
+ .@"extern" => .{ null, true },
+ else => unreachable,
+ },
+ },
+ .@"extern" => .{ null, true },
+ else => unreachable,
+ } else .{ null, false };
- if (modifier == .never_inline and func_ty_info.cc == .@"inline") {
- return sema.fail(block, call_src, "'never_inline' call of inline function", .{});
+ if (func_ty_info.is_generic and func_val == null) {
+ return sema.failWithNeededComptime(block, func_src, .{ .simple = .generic_call_target });
}
- if (modifier == .always_inline and func_ty_info.is_noinline) {
- return sema.fail(block, call_src, "'always_inline' call of noinline function", .{});
- }
-
- const gpa = sema.gpa;
- const func_ret_ty_src: LazySrcLoc = if (try sema.funcDeclSrcInst(func)) |fn_decl_inst| .{
- .base_node_inst = fn_decl_inst,
- .offset = .{ .node_offset_fn_type_ret_ty = 0 },
- } else func_src;
+ const inline_requested = func_ty_info.cc == .@"inline" or modifier == .always_inline;
- // If this is not `null`, the call is comptime.
- var comptime_call_reason: ?BlockComptimeReason = cr: {
- if (block.comptime_reason) |r| break :cr r;
- if (modifier == .compile_time) break :cr .{ .reason = .{
- .src = call_src,
- .r = .{ .simple = .comptime_call_modifier },
- } };
- break :cr null;
- };
-
- const is_generic_call = func_ty_info.is_generic;
- var is_inline_call = comptime_call_reason != null or modifier == .always_inline or func_ty_info.cc == .@"inline";
- if (!is_inline_call) {
- if (try Type.fromInterned(func_ty_info.return_type).comptimeOnlySema(pt)) {
- is_inline_call = true;
- comptime_call_reason = .{ .reason = .{
- .src = func_ret_ty_src,
- .r = .{ .comptime_only = .{
- .ty = .fromInterned(func_ty_info.return_type),
- .msg = .ret_ty_call,
- } },
+ // If the modifier is `.compile_time`, or if the return type is non-generic and comptime-only,
+ // then we need to enter a comptime scope *now* to make sure the args are comptime-eval'd.
+ const old_block_comptime_reason = block.comptime_reason;
+ defer block.comptime_reason = old_block_comptime_reason;
+ if (!block.isComptime()) {
+ if (modifier == .compile_time) {
+ block.comptime_reason = .{ .reason = .{
+ .src = call_src,
+ .r = .{ .simple = .comptime_call_modifier },
} };
- }
- }
-
- if (sema.func_is_naked and !is_inline_call) {
- const msg = msg: {
- const msg = try sema.errMsg(call_src, "runtime {s} not allowed in naked function", .{@tagName(operation)});
- errdefer msg.destroy(sema.gpa);
-
- switch (operation) {
- .call, .@"@call", .@"@panic", .@"error return" => {},
- .@"safety check" => try sema.errNote(call_src, msg, "use @setRuntimeSafety to disable runtime safety", .{}),
- }
- break :msg msg;
- };
- return sema.failWithOwnedErrorMsg(block, msg);
- }
-
- if (!is_inline_call and is_generic_call) {
- var comptime_ret_ty: Type = undefined;
- if (sema.instantiateGenericCall(
- block,
- func,
- func_src,
- call_src,
- ensure_result_used,
- args_info,
- call_tag,
- call_dbg_node,
- &comptime_ret_ty,
- )) |some| {
- return some;
- } else |err| switch (err) {
- error.GenericPoison => {
- is_inline_call = true;
- },
- error.ComptimeReturn => {
- is_inline_call = true;
- comptime_call_reason = .{ .reason = .{
- .src = func_ret_ty_src,
+ } else if (!inline_requested and try Type.fromInterned(func_ty_info.return_type).comptimeOnlySema(pt)) {
+ block.comptime_reason = .{
+ .reason = .{
+ .src = call_src,
.r = .{
- .comptime_only = .{
- .ty = comptime_ret_ty,
- .msg = .ret_ty_generic_call,
+ .comptime_only_ret_ty = .{
+ .ty = .fromInterned(func_ty_info.return_type),
+ .is_generic_inst = false,
+ .ret_ty_src = func_ret_ty_src,
},
},
- } };
- },
- else => |e| return e,
+ },
+ };
}
}
- const is_comptime_call = comptime_call_reason != null;
- // `comptime_call_reason` shouldn't be mutated again
- defer assert(is_comptime_call == (comptime_call_reason != null));
+ // These values are undefined if `func_val == null`.
+ const fn_nav: InternPool.Nav, const fn_zir: Zir, const fn_tracked_inst: InternPool.TrackedInst.Index, const fn_zir_inst: Zir.Inst.Index, const fn_zir_info: Zir.FnInfo = if (func_val) |f| b: {
+ const info = ip.indexToKey(f.toIntern()).func;
+ const nav = ip.getNav(info.owner_nav);
+ const resolved_func_inst = info.zir_body_inst.resolveFull(ip) orelse return error.AnalysisFail;
+ const file = zcu.fileByIndex(resolved_func_inst.file);
+ assert(file.zir_loaded);
+ const zir_info = file.zir.getFnInfo(resolved_func_inst.inst);
+ break :b .{ nav, file.zir, info.zir_body_inst, resolved_func_inst.inst, zir_info };
+ } else .{ undefined, undefined, undefined, undefined, undefined };
+
+ // This is the `inst_map` used when evaluating generic parameters and return types.
+ var generic_inst_map: InstMap = .{};
+ defer generic_inst_map.deinit(gpa);
+ if (func_ty_info.is_generic) {
+ try generic_inst_map.ensureSpaceForInstructions(gpa, fn_zir_info.param_body);
+ }
+
+ // This exists so that `generic_block` below can include a "called from here" note back to this
+ // call site when analyzing generic parameter/return types.
+ var generic_inlining: Block.Inlining = if (func_ty_info.is_generic) .{
+ .call_block = block,
+ .call_src = call_src,
+ .has_comptime_args = false, // unused by error reporting
+ .func = .none, // unused by error reporting
+ .comptime_result = .none, // unused by error reporting
+ .merges = undefined, // unused because we'll never `return`
+ } else undefined;
- if (is_comptime_call and modifier == .never_inline) {
- return sema.fail(block, call_src, "unable to perform 'never_inline' call at compile-time", .{});
- }
+ // This is the block in which we evaluate generic function components: that is, generic parameter
+ // types and the generic return type. This must not be used if the function is not generic.
+ // `comptime_reason` is set as needed.
+ var generic_block: Block = if (func_ty_info.is_generic) .{
+ .parent = null,
+ .sema = sema,
+ .namespace = fn_nav.analysis.?.namespace,
+ .instructions = .{},
+ .inlining = &generic_inlining,
+ .src_base_inst = fn_nav.analysis.?.zir_index,
+ .type_name_ctx = fn_nav.fqn,
+ } else undefined;
+ defer if (func_ty_info.is_generic) generic_block.instructions.deinit(gpa);
+
+ if (func_ty_info.is_generic) {
+ // We certainly depend on the generic owner's signature!
+ try sema.declareDependency(.{ .src_hash = fn_tracked_inst });
+ }
+
+ const args = try arena.alloc(Air.Inst.Ref, args_info.count());
+ for (args, 0..) |*arg, arg_idx| {
+ const param_ty: ?Type = if (arg_idx < func_ty_info.param_types.len) ty: {
+ const raw = func_ty_info.param_types.get(ip)[arg_idx];
+ if (raw != .generic_poison_type) break :ty .fromInterned(raw);
+
+ // We must discover the generic parameter type.
+ assert(func_ty_info.is_generic);
+ const param_inst_idx = fn_zir_info.param_body[arg_idx];
+ const param_inst = fn_zir.instructions.get(@intFromEnum(param_inst_idx));
+ switch (param_inst.tag) {
+ .param_anytype, .param_anytype_comptime => break :ty .generic_poison,
+ .param, .param_comptime => {},
+ else => unreachable,
+ }
- const result: Air.Inst.Ref = if (is_inline_call) res: {
- const old_comptime_reason = block.comptime_reason;
- block.comptime_reason = comptime_call_reason;
- defer block.comptime_reason = old_comptime_reason;
+ // Evaluate the generic parameter type. We need to switch out `sema.code` and `sema.inst_map`, because
+ // the function definition may be in a different file to the call site.
+ const old_code = sema.code;
+ const old_inst_map = sema.inst_map;
+ defer {
+ generic_inst_map = sema.inst_map;
+ sema.code = old_code;
+ sema.inst_map = old_inst_map;
+ }
+ sema.code = fn_zir;
+ sema.inst_map = generic_inst_map;
- const func_val = try sema.resolveConstDefinedValue(block, func_src, func, .{ .simple = .comptime_call_target });
- const module_fn_index = switch (zcu.intern_pool.indexToKey(func_val.toIntern())) {
- .@"extern" => return sema.fail(block, call_src, "{s} call of extern function", .{
- @as([]const u8, if (is_comptime_call) "comptime" else "inline"),
- }),
- .func => func_val.toIntern(),
- .ptr => |ptr| blk: {
- switch (ptr.base_addr) {
- .nav => |nav_index| if (ptr.byte_offset == 0) {
- try sema.ensureNavResolved(call_src, nav_index, .fully);
- const nav = ip.getNav(nav_index);
- if (nav.getExtern(ip) != null)
- return sema.fail(block, call_src, "{s} call of extern function pointer", .{
- if (is_comptime_call) "comptime" else "inline",
- });
- break :blk nav.status.fully_resolved.val;
- },
- else => {},
- }
- assert(callee_ty.isPtrAtRuntime(zcu));
- return sema.fail(block, call_src, "{s} call of function pointer", .{
- if (is_comptime_call) "comptime" else "inline",
- });
- },
- else => unreachable,
- };
- if (func_ty_info.is_var_args) {
- return sema.fail(block, call_src, "{s} call of variadic function", .{
- if (is_comptime_call) "comptime" else "inline",
- });
- }
+ const extra = sema.code.extraData(Zir.Inst.Param, param_inst.data.pl_tok.payload_index);
+ const param_src = generic_block.tokenOffset(param_inst.data.pl_tok.src_tok);
+ const body = sema.code.bodySlice(extra.end, extra.data.body_len);
- // Analyze the ZIR. The same ZIR gets analyzed into a runtime function
- // or an inlined call depending on what union tag the `label` field is
- // set to in the `Block`.
- // This block instruction will be used to capture the return value from the
- // inlined function.
- const need_debug_scope = !is_comptime_call and !block.is_typeof and !block.ownerModule().strip;
- const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
- try sema.air_instructions.append(gpa, .{
- .tag = if (need_debug_scope) .dbg_inline_block else .block,
- .data = undefined,
- });
- // This one is shared among sub-blocks within the same callee, but not
- // shared among the entire inline/comptime call stack.
- var inlining: Block.Inlining = .{
- .call_block = block,
- .call_src = call_src,
- .has_comptime_args = false,
- .func = module_fn_index,
- .comptime_result = undefined,
- .merges = .{
- .src_locs = .{},
- .results = .{},
- .br_list = .{},
- .block_inst = block_inst,
- },
- };
+ generic_block.comptime_reason = .{ .reason = .{
+ .r = .{ .simple = .function_parameters },
+ .src = param_src,
+ } };
- const module_fn = zcu.funcInfo(module_fn_index);
+ const ty_ref = try sema.resolveInlineBody(&generic_block, body, param_inst_idx);
+ const param_ty = try sema.analyzeAsType(&generic_block, param_src, ty_ref);
- // The call site definitely depends on the function's signature.
- try sema.declareDependency(.{ .src_hash = module_fn.zir_body_inst });
+ if (!param_ty.isValidParamType(zcu)) {
+ const opaque_str = if (param_ty.zigTypeTag(zcu) == .@"opaque") "opaque " else "";
+ return sema.fail(block, param_src, "parameter of {s}type '{}' not allowed", .{
+ opaque_str, param_ty.fmt(pt),
+ });
+ }
- // This is not a function instance, so the function's `Nav` has analysis
- // state -- we don't need to check `generic_owner`.
- const fn_nav = ip.getNav(module_fn.owner_nav);
+ break :ty param_ty;
+ } else null; // vararg
- // We effectively want a child Sema here, but can't literally do that, because we need AIR
- // to be shared. InlineCallSema is a wrapper which handles this for us. While `ics` is in
- // scope, we should use its `caller`/`callee` methods rather than using `sema` directly
- // whenever performing an operation where the difference matters.
- var ics = InlineCallSema.init(
- sema,
- zcu.navFileScope(module_fn.owner_nav).zir,
- module_fn_index,
- block.error_return_trace_index,
- );
- defer ics.deinit();
+ arg.* = try args_info.analyzeArg(sema, block, arg_idx, param_ty, func_ty_info, callee, maybe_func_inst);
+ const arg_ty = sema.typeOf(arg.*);
+ if (arg_ty.zigTypeTag(zcu) == .noreturn) {
+ return arg.*; // terminate analysis here
+ }
+
+ if (func_ty_info.is_generic) {
+ // We need to put the argument into `generic_inst_map` so that other parameters can refer to it.
+ const param_inst_idx = fn_zir_info.param_body[arg_idx];
+ const declared_comptime = if (std.math.cast(u5, arg_idx)) |i| func_ty_info.paramIsComptime(i) else false;
+ const param_is_comptime = declared_comptime or try arg_ty.comptimeOnlySema(pt);
+ if (param_is_comptime) {
+ if (!try sema.isComptimeKnown(arg.*)) {
+ assert(!declared_comptime); // `analyzeArg` handles this
+ const arg_src = args_info.argSrc(block, arg_idx);
+ const param_ty_src: LazySrcLoc = .{
+ .base_node_inst = maybe_func_inst.?, // the function is generic
+ .offset = .{ .func_decl_param_ty = @intCast(arg_idx) },
+ };
+ return sema.failWithNeededComptime(
+ block,
+ arg_src,
+ .{ .comptime_only_param_ty = .{ .ty = arg_ty, .param_ty_src = param_ty_src } },
+ );
+ }
+ generic_inst_map.putAssumeCapacityNoClobber(param_inst_idx, arg.*);
+ } else {
+ // We need a dummy instruction with this type. It doesn't actually need to be in any block,
+ // since it will never be referenced at runtime!
+ const dummy: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
+ try sema.air_instructions.append(gpa, .{ .tag = .alloc, .data = .{ .ty = arg_ty } });
+ generic_inst_map.putAssumeCapacityNoClobber(param_inst_idx, dummy.toRef());
+ }
+ }
+ }
- var child_block: Block = .{
- .parent = null,
- .sema = sema,
- // The function body exists in the same namespace as the corresponding function declaration.
- .namespace = fn_nav.analysis.?.namespace,
- .instructions = .{},
- .label = null,
- .inlining = &inlining,
- .is_typeof = block.is_typeof,
- .comptime_reason = if (is_comptime_call) .inlining_parent else null,
- .error_return_trace_index = block.error_return_trace_index,
- .runtime_cond = block.runtime_cond,
- .runtime_loop = block.runtime_loop,
- .runtime_index = block.runtime_index,
- .src_base_inst = fn_nav.analysis.?.zir_index,
- .type_name_ctx = fn_nav.fqn,
- };
+ // This return type is never generic poison.
+ // However, if it has an IES, it is always associated with the callee value.
+ // This is not correct for inline calls (where it should be an ad-hoc IES), nor for generic
+ // calls (where it should be the IES of the instantiation). However, it's how we print this
+ // in error messages.
+ const resolved_ret_ty: Type = ret_ty: {
+ if (!func_ty_info.is_generic) break :ret_ty .fromInterned(func_ty_info.return_type);
- const merges = &child_block.inlining.?.merges;
+ const maybe_poison_bare = if (fn_zir_info.inferred_error_set) maybe_poison: {
+ break :maybe_poison ip.errorUnionPayload(func_ty_info.return_type);
+ } else func_ty_info.return_type;
- defer child_block.instructions.deinit(gpa);
- defer merges.deinit(gpa);
+ if (maybe_poison_bare != .generic_poison_type) break :ret_ty .fromInterned(func_ty_info.return_type);
- try sema.emitBackwardBranch(block, call_src);
+ // Evaluate the generic return type. As with generic parameters, we switch out `sema.code` and `sema.inst_map`.
- // Whether this call should be memoized, set to false if the call can
- // mutate comptime state.
- // TODO: comptime call memoization is currently not supported under incremental compilation
- // since dependencies are not marked on callers. If we want to keep this around (we should
- // check that it's worthwhile first!), each memoized call needs an `AnalUnit`.
- var should_memoize = !zcu.comp.incremental;
-
- // If it's a comptime function call, we need to memoize it as long as no external
- // comptime memory is mutated.
- const memoized_arg_values = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len);
-
- const owner_info = zcu.typeToFunc(Type.fromInterned(module_fn.ty)).?;
- const new_param_types = try sema.arena.alloc(InternPool.Index, owner_info.param_types.len);
- var new_fn_info: InternPool.GetFuncTypeKey = .{
- .param_types = new_param_types,
- .return_type = owner_info.return_type,
- .noalias_bits = owner_info.noalias_bits,
- .cc = owner_info.cc,
- .is_var_args = owner_info.is_var_args,
- .is_noinline = owner_info.is_noinline,
- .is_generic = owner_info.is_generic,
- };
+ assert(func_ty_info.is_generic);
- // This will have return instructions analyzed as break instructions to
- // the block_inst above. Here we are performing "comptime/inline semantic analysis"
- // for a function body, which means we must map the parameter ZIR instructions to
- // the AIR instructions of the callsite. The callee could be a generic function
- // which means its parameter type expressions must be resolved in order and used
- // to successively coerce the arguments.
- const fn_info = ics.callee().code.getFnInfo(module_fn.zir_body_inst.resolve(ip) orelse return error.AnalysisFail);
- try ics.callee().inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body);
-
- var arg_i: u32 = 0;
- for (fn_info.param_body) |inst| {
- const opt_noreturn_ref = try analyzeInlineCallArg(
- &ics,
- block,
- &child_block,
- inst,
- new_param_types,
- &arg_i,
- args_info,
- is_comptime_call,
- &should_memoize,
- memoized_arg_values,
- func_ty_info,
- func,
- );
- if (opt_noreturn_ref) |ref| {
- // Analyzing this argument gave a ref of a noreturn type. Terminate argument analysis here.
- return ref;
- }
+ const old_code = sema.code;
+ const old_inst_map = sema.inst_map;
+ defer {
+ generic_inst_map = sema.inst_map;
+ sema.code = old_code;
+ sema.inst_map = old_inst_map;
}
+ sema.code = fn_zir;
+ sema.inst_map = generic_inst_map;
- // From here, we only really need to use the callee Sema. Make it the active one, then we
- // can just use `sema` directly.
- _ = ics.callee();
+ generic_block.comptime_reason = .{ .reason = .{
+ .r = .{ .simple = .function_ret_ty },
+ .src = func_ret_ty_src,
+ } };
- if (!inlining.has_comptime_args) {
- var block_it = block;
- while (block_it.inlining) |parent_inlining| {
- if (!parent_inlining.has_comptime_args and parent_inlining.func == module_fn_index) {
- const err_msg = try sema.errMsg(call_src, "inline call is recursive", .{});
- return sema.failWithOwnedErrorMsg(null, err_msg);
- }
- block_it = parent_inlining.call_block;
- }
+ const bare_ty = if (fn_zir_info.ret_ty_ref != .none) bare: {
+ assert(fn_zir_info.ret_ty_body.len == 0);
+ break :bare try sema.resolveType(&generic_block, func_ret_ty_src, fn_zir_info.ret_ty_ref);
+ } else bare: {
+ assert(fn_zir_info.ret_ty_body.len != 0);
+ const ty_ref = try sema.resolveInlineBody(&generic_block, fn_zir_info.ret_ty_body, fn_zir_inst);
+ break :bare try sema.analyzeAsType(&generic_block, func_ret_ty_src, ty_ref);
+ };
+ assert(bare_ty.toIntern() != .generic_poison_type);
+
+ const full_ty = if (fn_zir_info.inferred_error_set) full: {
+ try sema.validateErrorUnionPayloadType(block, bare_ty, func_ret_ty_src);
+ const set = ip.errorUnionSet(func_ty_info.return_type);
+ break :full try pt.errorUnionType(.fromInterned(set), bare_ty);
+ } else bare_ty;
+
+ if (!full_ty.isValidReturnType(zcu)) {
+ const opaque_str = if (full_ty.zigTypeTag(zcu) == .@"opaque") "opaque " else "";
+ return sema.fail(block, func_ret_ty_src, "{s}return type '{}' not allowed", .{
+ opaque_str, full_ty.fmt(pt),
+ });
}
- // In case it is a generic function with an expression for the return type that depends
- // on parameters, we must now do the same for the return type as we just did with
- // each of the parameters, resolving the return type and providing it to the child
- // `Sema` so that it can be used for the `ret_ptr` instruction.
- const ret_ty_src: LazySrcLoc = .{ .base_node_inst = module_fn.zir_body_inst, .offset = .{ .node_offset_fn_type_ret_ty = 0 } };
- const ret_ty_inst = if (fn_info.ret_ty_body.len != 0) r: {
- const old_child_comptime_reason = child_block.comptime_reason;
- defer child_block.comptime_reason = old_child_comptime_reason;
- child_block.comptime_reason = .{ .reason = .{
- .src = ret_ty_src,
- .r = .{ .simple = .function_ret_ty },
- } };
- break :r try sema.resolveInlineBody(&child_block, fn_info.ret_ty_body, module_fn.zir_body_inst.resolve(ip) orelse return error.AnalysisFail);
- } else try sema.resolveInst(fn_info.ret_ty_ref);
- sema.fn_ret_ty = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst);
- if (module_fn.analysisUnordered(ip).inferred_error_set) {
- // Create a fresh inferred error set type for inline/comptime calls.
- const ies = try sema.arena.create(InferredErrorSet);
- ies.* = .{ .func = .none };
- sema.fn_ret_ty_ies = ies;
- sema.fn_ret_ty = Type.fromInterned(try pt.intern(.{ .error_union_type = .{
- .error_set_type = .adhoc_inferred_error_set_type,
- .payload_type = sema.fn_ret_ty.toIntern(),
- } }));
- }
+ break :ret_ty full_ty;
+ };
- memoize: {
- if (!should_memoize) break :memoize;
- if (!is_comptime_call) break :memoize;
- const memoized_call_index = ip.getIfExists(.{
- .memoized_call = .{
- .func = module_fn_index,
- .arg_values = memoized_arg_values,
- .result = undefined, // ignored by hash+eql
- .branch_count = undefined, // ignored by hash+eql
+ // If we've discovered after evaluating arguments that a generic function instantiation is
+ // comptime-only, then we can mark the block as comptime *now*.
+ if (!inline_requested and !block.isComptime() and try resolved_ret_ty.comptimeOnlySema(pt)) {
+ block.comptime_reason = .{
+ .reason = .{
+ .src = call_src,
+ .r = .{
+ .comptime_only_ret_ty = .{
+ .ty = resolved_ret_ty,
+ .is_generic_inst = true,
+ .ret_ty_src = func_ret_ty_src,
+ },
},
- }) orelse break :memoize;
- const memoized_call = ip.indexToKey(memoized_call_index).memoized_call;
- if (sema.branch_count + memoized_call.branch_count > sema.branch_quota) {
- // Let the call play out se we get the correct source location for the
- // "evaluation exceeded X backwards branches" error.
- break :memoize;
- }
- sema.branch_count += memoized_call.branch_count;
- break :res Air.internedToRef(memoized_call.result);
- }
+ },
+ };
+ }
- // Since we're doing an inline call, we depend on the source code of the whole
- // function declaration.
- try sema.declareDependency(.{ .src_hash = fn_nav.analysis.?.zir_index });
+ if (call_dbg_node) |some| try sema.zirDbgStmt(block, some);
- new_fn_info.return_type = sema.fn_ret_ty.toIntern();
- if (!is_comptime_call and !block.is_typeof) {
- const zir_tags = sema.code.instructions.items(.tag);
- for (fn_info.param_body) |param| switch (zir_tags[@intFromEnum(param)]) {
- .param, .param_comptime => {
- const inst_data = sema.code.instructions.items(.data)[@intFromEnum(param)].pl_tok;
- const extra = sema.code.extraData(Zir.Inst.Param, inst_data.payload_index);
- const param_name = sema.code.nullTerminatedString(extra.data.name);
- const inst = sema.inst_map.get(param).?;
-
- try sema.addDbgVar(&child_block, inst, .dbg_arg_inline, param_name);
- },
- .param_anytype, .param_anytype_comptime => {
- const inst_data = sema.code.instructions.items(.data)[@intFromEnum(param)].str_tok;
- const param_name = inst_data.get(sema.code);
- const inst = sema.inst_map.get(param).?;
+ const is_inline_call = block.isComptime() or inline_requested;
- try sema.addDbgVar(&child_block, inst, .dbg_arg_inline, param_name);
- },
- else => continue,
- };
+ if (!is_inline_call) {
+ if (sema.func_is_naked) return sema.failWithOwnedErrorMsg(block, msg: {
+ const msg = try sema.errMsg(call_src, "runtime {s} not allowed in naked function", .{@tagName(operation)});
+ errdefer msg.destroy(gpa);
+ switch (operation) {
+ .call, .@"@call", .@"@panic", .@"error return" => {},
+ .@"safety check" => try sema.errNote(call_src, msg, "use @setRuntimeSafety to disable runtime safety", .{}),
+ }
+ break :msg msg;
+ });
+ for (args, 0..) |arg, arg_idx| {
+ try sema.validateRuntimeValue(block, args_info.argSrc(block, arg_idx), arg);
}
+ const runtime_func: Air.Inst.Ref, const runtime_args: []const Air.Inst.Ref = func: {
+ if (!func_ty_info.is_generic) break :func .{ callee, args };
- if (is_comptime_call and ensure_result_used) {
- try sema.ensureResultUsed(block, sema.fn_ret_ty, call_src);
- }
+ // Instantiate the generic function!
- if (is_comptime_call or block.is_typeof) {
- // Save the error trace as our first action in the function
- // to match the behavior of runtime function calls.
- const error_return_trace_index = try sema.analyzeSaveErrRetIndex(&child_block);
- sema.error_return_trace_index_on_fn_entry = error_return_trace_index;
- child_block.error_return_trace_index = error_return_trace_index;
- }
+ // This may be an overestimate, but it's definitely sufficient.
+ const max_runtime_args = args_info.count() - @popCount(func_ty_info.comptime_bits);
+ var runtime_args: std.ArrayListUnmanaged(Air.Inst.Ref) = try .initCapacity(arena, max_runtime_args);
+ var runtime_param_tys: std.ArrayListUnmanaged(InternPool.Index) = try .initCapacity(arena, max_runtime_args);
- // We temporarily set `allow_memoize` to `true` to track this comptime call.
- // It is restored after this call finishes analysis, so that a caller may
- // know whether an in-progress call (containing this call) may be memoized.
- const old_allow_memoize = sema.allow_memoize;
- defer sema.allow_memoize = old_allow_memoize and sema.allow_memoize;
- sema.allow_memoize = true;
+ const comptime_args = try arena.alloc(InternPool.Index, args_info.count());
- // Store the current eval branch count so we can find out how many eval branches
- // the comptime call caused.
- const old_branch_count = sema.branch_count;
+ var noalias_bits: u32 = 0;
- const result = result: {
- sema.analyzeFnBody(&child_block, fn_info.body) catch |err| switch (err) {
- error.ComptimeReturn => break :result inlining.comptime_result,
- else => |e| return e,
- };
- break :result try sema.resolveAnalyzedBlock(block, call_src, &child_block, merges, need_debug_scope);
- };
+ for (args, comptime_args, 0..) |arg, *comptime_arg, arg_idx| {
+ const arg_ty = sema.typeOf(arg);
- if (is_comptime_call) {
- const result_val = try sema.resolveConstValue(block, LazySrcLoc.unneeded, result, undefined);
- const result_interned = result_val.toIntern();
-
- // Transform ad-hoc inferred error set types into concrete error sets.
- const result_transformed = try sema.resolveAdHocInferredErrorSet(block, call_src, result_interned);
-
- // If the result can mutate comptime vars, we must not memoize it, as it contains
- // a reference to `comptime_allocs` so is not stable across instances of `Sema`.
- // TODO: check whether any external comptime memory was mutated by the
- // comptime function call. If so, then do not memoize the call here.
- if (should_memoize and sema.allow_memoize and !Value.fromInterned(result_interned).canMutateComptimeVarState(zcu)) {
- _ = try pt.intern(.{ .memoized_call = .{
- .func = module_fn_index,
- .arg_values = memoized_arg_values,
- .result = result_transformed,
- .branch_count = sema.branch_count - old_branch_count,
- } });
+ const is_comptime = c: {
+ if (std.math.cast(u5, arg_idx)) |i| {
+ if (func_ty_info.paramIsComptime(i)) {
+ break :c true;
+ }
+ }
+ break :c try arg_ty.comptimeOnlySema(pt);
+ };
+ const is_noalias = if (std.math.cast(u5, arg_idx)) |i| func_ty_info.paramIsNoalias(i) else false;
+
+ if (is_comptime) {
+ // We already emitted an error if the argument isn't comptime-known.
+ comptime_arg.* = (try sema.resolveValue(arg)).?.toIntern();
+ } else {
+ comptime_arg.* = .none;
+ if (is_noalias) {
+ const runtime_idx = runtime_args.items.len;
+ noalias_bits |= @as(u32, 1) << @intCast(runtime_idx);
+ }
+ runtime_args.appendAssumeCapacity(arg);
+ runtime_param_tys.appendAssumeCapacity(arg_ty.toIntern());
+ }
}
- break :res Air.internedToRef(result_transformed);
- }
+ const bare_ret_ty = if (fn_zir_info.inferred_error_set) t: {
+ break :t resolved_ret_ty.errorUnionPayload(zcu);
+ } else resolved_ret_ty;
- if (try sema.resolveValue(result)) |result_val| {
- const result_transformed = try sema.resolveAdHocInferredErrorSet(block, call_src, result_val.toIntern());
- break :res Air.internedToRef(result_transformed);
- }
+ // We now need to actually create the function instance.
+ const func_instance = try ip.getFuncInstance(gpa, pt.tid, .{
+ .param_types = runtime_param_tys.items,
+ .noalias_bits = noalias_bits,
+ .bare_return_type = bare_ret_ty.toIntern(),
+ .is_noinline = func_ty_info.is_noinline,
+ .inferred_error_set = fn_zir_info.inferred_error_set,
+ .generic_owner = func_val.?.toIntern(),
+ .comptime_args = comptime_args,
+ });
- const new_ty = try sema.resolveAdHocInferredErrorSetTy(block, call_src, sema.typeOf(result).toIntern());
- if (new_ty != .none) {
- // TODO: mutate in place the previous instruction if possible
- // rather than adding a bitcast instruction.
- break :res try block.addBitCast(Type.fromInterned(new_ty), result);
- }
+ // This call is problematic as it breaks guarantees about order-independency of semantic analysis.
+ // These guarantees are necessary for incremental compilation and parallel semantic analysis.
+ // See: #22410
+ zcu.funcInfo(func_instance).maxBranchQuota(ip, sema.branch_quota);
- break :res result;
- } else res: {
- assert(!func_ty_info.is_generic);
+ break :func .{ Air.internedToRef(func_instance), runtime_args.items };
+ };
- const args = try sema.arena.alloc(Air.Inst.Ref, args_info.count());
- for (args, 0..) |*arg_out, arg_idx| {
- // Non-generic, so param types are already resolved
- const param_ty: ?Type = if (arg_idx < func_ty_info.param_types.len) ty: {
- break :ty Type.fromInterned(func_ty_info.param_types.get(ip)[arg_idx]);
- } else null;
- if (param_ty) |t| assert(!t.isGenericPoison());
- arg_out.* = try args_info.analyzeArg(sema, block, arg_idx, param_ty, func_ty_info, func);
- try sema.validateRuntimeValue(block, args_info.argSrc(block, arg_idx), arg_out.*);
- if (sema.typeOf(arg_out.*).zigTypeTag(zcu) == .noreturn) {
- return arg_out.*;
- }
+ ref_func: {
+ const runtime_func_val = try sema.resolveValue(runtime_func) orelse break :ref_func;
+ if (!ip.isFuncBody(runtime_func_val.toIntern())) break :ref_func;
+ try sema.addReferenceEntry(call_src, .wrap(.{ .func = runtime_func_val.toIntern() }));
+ try zcu.ensureFuncBodyAnalysisQueued(runtime_func_val.toIntern());
}
- if (call_dbg_node) |some| try sema.zirDbgStmt(block, some);
-
switch (sema.owner.unwrap()) {
.@"comptime", .nav_ty, .nav_val, .type, .memoized_state => {},
- .func => |owner_func| if (Type.fromInterned(func_ty_info.return_type).isError(zcu)) {
+ .func => |owner_func| if (resolved_ret_ty.isError(zcu)) {
ip.funcSetCallsOrAwaitsErrorableFn(owner_func);
},
}
- if (try sema.resolveValue(func)) |func_val| {
- if (zcu.intern_pool.isFuncBody(func_val.toIntern())) {
- try sema.addReferenceEntry(call_src, AnalUnit.wrap(.{ .func = func_val.toIntern() }));
- try zcu.ensureFuncBodyAnalysisQueued(func_val.toIntern());
- }
- }
+ const call_tag: Air.Inst.Tag = switch (modifier) {
+ .auto, .no_async => .call,
+ .never_tail => .call_never_tail,
+ .never_inline => .call_never_inline,
+ .always_tail => .call_always_tail,
- try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Call).@"struct".fields.len +
- args.len);
- const func_inst = try block.addInst(.{
+ .always_inline,
+ .compile_time,
+ .async_kw,
+ => unreachable,
+ };
+
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Call).@"struct".fields.len + runtime_args.len);
+ const result = try block.addInst(.{
.tag = call_tag,
.data = .{ .pl_op = .{
- .operand = func,
+ .operand = runtime_func,
.payload = sema.addExtraAssumeCapacity(Air.Call{
- .args_len = @intCast(args.len),
+ .args_len = @intCast(runtime_args.len),
}),
} },
});
- sema.appendRefsAssumeCapacity(args);
+ sema.appendRefsAssumeCapacity(runtime_args);
+
+ if (ensure_result_used) {
+ try sema.ensureResultUsed(block, sema.typeOf(result), call_src);
+ }
if (call_tag == .call_always_tail) {
- if (ensure_result_used) {
- try sema.ensureResultUsed(block, sema.typeOf(func_inst), call_src);
- }
- return sema.handleTailCall(block, call_src, func_ty, func_inst);
- }
- if (block.wantSafety() and func_ty_info.return_type == .noreturn_type) skip_safety: {
- // Function pointers and extern functions aren't guaranteed to
- // actually be noreturn so we add a safety check for them.
- if (try sema.resolveValue(func)) |func_val| {
- switch (zcu.intern_pool.indexToKey(func_val.toIntern())) {
- .func => break :skip_safety,
- .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
- .nav => |nav| {
- try sema.ensureNavResolved(call_src, nav, .fully);
- if (ip.getNav(nav).getExtern(ip) == null) break :skip_safety;
- },
- else => {},
- },
- else => {},
- }
- }
- try sema.safetyPanic(block, call_src, .noreturn_returned);
- return .unreachable_value;
+ return sema.handleTailCall(block, call_src, sema.typeOf(runtime_func), result);
}
- if (func_ty_info.return_type == .noreturn_type) {
- _ = try block.addNoOp(.unreach);
+
+ if (resolved_ret_ty.toIntern() == .noreturn_type) {
+ const want_check = c: {
+ if (!block.wantSafety()) break :c false;
+ if (func_val != null) break :c false;
+ break :c true;
+ };
+ if (want_check) {
+ try sema.safetyPanic(block, call_src, .noreturn_returned);
+ } else {
+ _ = try block.addNoOp(.unreach);
+ }
return .unreachable_value;
}
- break :res func_inst;
- };
- if (ensure_result_used) {
- try sema.ensureResultUsed(block, sema.typeOf(result), call_src);
+ return result;
}
- return result;
-}
-fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Type, result: Air.Inst.Ref) !Air.Inst.Ref {
- const pt = sema.pt;
- const zcu = pt.zcu;
- const target = zcu.getTarget();
- const backend = zcu.comp.getZigBackend();
- if (!target_util.supportsTailCall(target, backend)) {
- return sema.fail(block, call_src, "unable to perform tail call: compiler backend '{s}' does not support tail calls on target architecture '{s}' with the selected CPU feature flags", .{
- @tagName(backend), @tagName(target.cpu.arch),
- });
+ // This is an inline call. The function must be comptime-known. We will analyze its body directly using this `Sema`.
+
+ const call_type: []const u8 = if (block.isComptime()) "comptime" else "inline";
+
+ if (modifier == .never_inline) {
+ return sema.fail(block, call_src, "cannot perform {s} call with 'never_inline' modifier", .{call_type});
}
- const owner_func_ty = Type.fromInterned(zcu.funcInfo(sema.owner.unwrap().func).ty);
- if (owner_func_ty.toIntern() != func_ty.toIntern()) {
- return sema.fail(block, call_src, "unable to perform tail call: type of function being called '{}' does not match type of calling function '{}'", .{
- func_ty.fmt(pt), owner_func_ty.fmt(pt),
- });
+ if (func_ty_info.is_noinline and !block.isComptime()) {
+ return sema.fail(block, call_src, "{s} call of noinline function", .{call_type});
}
- _ = try block.addUnOp(.ret, result);
- return .unreachable_value;
-}
-
-/// Usually, returns null. If an argument was noreturn, returns that ref (which should become the call result).
-fn analyzeInlineCallArg(
- ics: *InlineCallSema,
- arg_block: *Block,
- param_block: *Block,
- inst: Zir.Inst.Index,
- new_param_types: []InternPool.Index,
- arg_i: *u32,
- args_info: CallArgsInfo,
- is_comptime_call: bool,
- should_memoize: *bool,
- memoized_arg_values: []InternPool.Index,
- func_ty_info: InternPool.Key.FuncType,
- func_inst: Air.Inst.Ref,
-) !?Air.Inst.Ref {
- const zcu = ics.sema.pt.zcu;
- const ip = &zcu.intern_pool;
- const zir_tags = ics.callee().code.instructions.items(.tag);
- switch (zir_tags[@intFromEnum(inst)]) {
- .param_comptime, .param_anytype_comptime => param_block.inlining.?.has_comptime_args = true,
- else => {},
+ if (func_ty_info.is_var_args) {
+ return sema.fail(block, call_src, "{s} call of variadic function", .{call_type});
}
- switch (zir_tags[@intFromEnum(inst)]) {
- .param, .param_comptime => {
- // Evaluate the parameter type expression now that previous ones have
- // been mapped, and coerce the corresponding argument to it.
- const pl_tok = ics.callee().code.instructions.items(.data)[@intFromEnum(inst)].pl_tok;
- const param_src = param_block.tokenOffset(pl_tok.src_tok);
- const extra = ics.callee().code.extraData(Zir.Inst.Param, pl_tok.payload_index);
- const param_body = ics.callee().code.bodySlice(extra.end, extra.data.body_len);
- const param_ty = param_ty: {
- const raw_param_ty = func_ty_info.param_types.get(ip)[arg_i.*];
- if (raw_param_ty != .generic_poison_type) break :param_ty raw_param_ty;
- const param_ty_inst = try ics.callee().resolveInlineBody(param_block, param_body, inst);
- const param_ty = try ics.callee().analyzeAsType(param_block, param_src, param_ty_inst);
- break :param_ty param_ty.toIntern();
- };
- new_param_types[arg_i.*] = param_ty;
- const casted_arg = try args_info.analyzeArg(ics.caller(), arg_block, arg_i.*, Type.fromInterned(param_ty), func_ty_info, func_inst);
- if (ics.caller().typeOf(casted_arg).zigTypeTag(zcu) == .noreturn) {
- return casted_arg;
- }
- const arg_src = args_info.argSrc(arg_block, arg_i.*);
- if (zir_tags[@intFromEnum(inst)] == .param_comptime) {
- _ = try ics.caller().resolveConstValue(arg_block, arg_src, casted_arg, .{ .simple = .comptime_param_arg });
- } else if (!is_comptime_call and try Type.fromInterned(param_ty).comptimeOnlySema(ics.callee().pt)) {
- _ = try ics.caller().resolveConstValue(arg_block, arg_src, casted_arg, .{ .comptime_only = .{
- .ty = .fromInterned(param_ty),
- .msg = .param_ty_arg,
- } });
- }
- if (is_comptime_call) {
- ics.callee().inst_map.putAssumeCapacityNoClobber(inst, casted_arg);
- const arg_val = try ics.caller().resolveConstValue(arg_block, arg_src, casted_arg, null);
- switch (arg_val.toIntern()) {
- .generic_poison, .generic_poison_type => {
- // This function is currently evaluated as part of an as-of-yet unresolvable
- // parameter or return type.
- return error.GenericPoison;
- },
- else => {},
- }
- // Needed so that lazy values do not trigger
- // assertion due to type not being resolved
- // when the hash function is called.
- const resolved_arg_val = try ics.caller().resolveLazyValue(arg_val);
- should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(zcu);
- memoized_arg_values[arg_i.*] = resolved_arg_val.toIntern();
- } else {
- ics.callee().inst_map.putAssumeCapacityNoClobber(inst, casted_arg);
- }
-
- if (try ics.caller().resolveValue(casted_arg)) |_| {
- param_block.inlining.?.has_comptime_args = true;
- }
-
- arg_i.* += 1;
- },
- .param_anytype, .param_anytype_comptime => {
- // No coercion needed.
- const uncasted_arg = try args_info.analyzeArg(ics.caller(), arg_block, arg_i.*, Type.generic_poison, func_ty_info, func_inst);
- if (ics.caller().typeOf(uncasted_arg).zigTypeTag(zcu) == .noreturn) {
- return uncasted_arg;
- }
- const arg_src = args_info.argSrc(arg_block, arg_i.*);
- new_param_types[arg_i.*] = ics.caller().typeOf(uncasted_arg).toIntern();
-
- if (is_comptime_call) {
- ics.callee().inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg);
- const arg_val = try ics.caller().resolveConstValue(arg_block, arg_src, uncasted_arg, null);
- switch (arg_val.toIntern()) {
- .generic_poison, .generic_poison_type => {
- // This function is currently evaluated as part of an as-of-yet unresolvable
- // parameter or return type.
- return error.GenericPoison;
- },
- else => {},
- }
- // Needed so that lazy values do not trigger
- // assertion due to type not being resolved
- // when the hash function is called.
- const resolved_arg_val = try ics.caller().resolveLazyValue(arg_val);
- should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(zcu);
- memoized_arg_values[arg_i.*] = resolved_arg_val.toIntern();
- } else {
- if (zir_tags[@intFromEnum(inst)] == .param_anytype_comptime) {
- _ = try ics.caller().resolveConstValue(arg_block, arg_src, uncasted_arg, .{ .simple = .comptime_param_arg });
- }
- ics.callee().inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg);
- }
+ if (func_val == null) {
+ if (func_is_extern) {
+ return sema.fail(block, call_src, "{s} call of extern function", .{call_type});
+ }
+ return sema.failWithNeededComptime(
+ block,
+ func_src,
+ .{ .simple = if (block.isComptime()) .comptime_call_target else .inline_call_target },
+ );
+ }
- if (try ics.caller().resolveValue(uncasted_arg)) |_| {
- param_block.inlining.?.has_comptime_args = true;
+ if (block.isComptime()) {
+ for (args, 0..) |arg, arg_idx| {
+ if (!try sema.isComptimeKnown(arg)) {
+ const arg_src = args_info.argSrc(block, arg_idx);
+ return sema.failWithNeededComptime(block, arg_src, null);
}
-
- arg_i.* += 1;
- },
- else => {},
+ }
}
- return null;
-}
-
-fn instantiateGenericCall(
- sema: *Sema,
- block: *Block,
- func: Air.Inst.Ref,
- func_src: LazySrcLoc,
- call_src: LazySrcLoc,
- ensure_result_used: bool,
- args_info: CallArgsInfo,
- call_tag: Air.Inst.Tag,
- call_dbg_node: ?Zir.Inst.Index,
- /// Populated when `error.ComptimeReturn` is returned.
- comptime_ret_ty: *Type,
-) CompileError!Air.Inst.Ref {
- const pt = sema.pt;
- const zcu = pt.zcu;
- const gpa = sema.gpa;
- const ip = &zcu.intern_pool;
-
- // Generic function pointers are comptime-only types, so `func` is definitely comptime-known.
- const func_val = (sema.resolveValue(func) catch unreachable).?;
- if (func_val.isUndef(zcu)) return sema.failWithUseOfUndef(block, func_src);
+ // For an inline call, we depend on the source code of the whole function definition.
+ try sema.declareDependency(.{ .src_hash = fn_nav.analysis.?.zir_index });
- const generic_owner = switch (zcu.intern_pool.indexToKey(func_val.toIntern())) {
- .func => func_val.toIntern(),
- .ptr => |ptr| ip.getNav(ptr.base_addr.nav).status.fully_resolved.val,
- else => unreachable,
- };
- const generic_owner_func = zcu.intern_pool.indexToKey(generic_owner).func;
- const generic_owner_ty_info = zcu.typeToFunc(Type.fromInterned(generic_owner_func.ty)).?;
+ try sema.emitBackwardBranch(block, call_src);
- try sema.declareDependency(.{ .src_hash = generic_owner_func.zir_body_inst });
-
- // Even though there may already be a generic instantiation corresponding
- // to this callsite, we must evaluate the expressions of the generic
- // function signature with the values of the callsite plugged in.
- // Importantly, this may include type coercions that determine whether the
- // instantiation is a match of a previous instantiation.
- // The actual monomorphization happens via adding `func_instance` to
- // `InternPool`.
-
- // Since we are looking at the generic owner here, it has analysis state.
- const fn_nav = ip.getNav(generic_owner_func.owner_nav);
- const fn_zir = zcu.navFileScope(generic_owner_func.owner_nav).zir;
- const fn_info = fn_zir.getFnInfo(generic_owner_func.zir_body_inst.resolve(ip) orelse return error.AnalysisFail);
+ const want_memoize = m: {
+ // TODO: comptime call memoization is currently not supported under incremental compilation
+ // since dependencies are not marked on callers. If we want to keep this around (we should
+ // check that it's worthwhile first!), each memoized call needs an `AnalUnit`.
+ if (zcu.comp.incremental) break :m false;
+ if (!block.isComptime()) break :m false;
+ for (args) |a| {
+ const val = (try sema.resolveValue(a)).?;
+ if (val.canMutateComptimeVarState(zcu)) break :m false;
+ }
+ break :m true;
+ };
+ const memoized_arg_values: []const InternPool.Index = if (want_memoize) arg_vals: {
+ const vals = try sema.arena.alloc(InternPool.Index, args.len);
+ for (vals, args) |*v, a| v.* = (try sema.resolveValue(a)).?.toIntern();
+ break :arg_vals vals;
+ } else undefined;
+ if (want_memoize) memoize: {
+ const memoized_call_index = ip.getIfExists(.{
+ .memoized_call = .{
+ .func = func_val.?.toIntern(),
+ .arg_values = memoized_arg_values,
+ .result = undefined, // ignored by hash+eql
+ .branch_count = undefined, // ignored by hash+eql
+ },
+ }) orelse break :memoize;
+ const memoized_call = ip.indexToKey(memoized_call_index).memoized_call;
+ if (sema.branch_count + memoized_call.branch_count > sema.branch_quota) {
+ // Let the call play out se we get the correct source location for the
+ // "evaluation exceeded X backwards branches" error.
+ break :memoize;
+ }
+ sema.branch_count += memoized_call.branch_count;
+ const result = Air.internedToRef(memoized_call.result);
+ if (ensure_result_used) {
+ try sema.ensureResultUsed(block, sema.typeOf(result), call_src);
+ }
+ return result;
+ }
- const comptime_args = try sema.arena.alloc(InternPool.Index, args_info.count());
- @memset(comptime_args, .none);
+ var new_ies: InferredErrorSet = .{ .func = .none };
- // We may overestimate the number of runtime args, but this will definitely be sufficient.
- const max_runtime_args = args_info.count() - @popCount(generic_owner_ty_info.comptime_bits);
- var runtime_args = try std.ArrayListUnmanaged(Air.Inst.Ref).initCapacity(sema.arena, max_runtime_args);
+ const old_inst_map = sema.inst_map;
+ const old_code = sema.code;
+ const old_func_index = sema.func_index;
+ const old_fn_ret_ty = sema.fn_ret_ty;
+ const old_fn_ret_ty_ies = sema.fn_ret_ty_ies;
+ const old_error_return_trace_index_on_fn_entry = sema.error_return_trace_index_on_fn_entry;
+ defer {
+ sema.inst_map.deinit(gpa);
+ sema.inst_map = old_inst_map;
+ sema.code = old_code;
+ sema.func_index = old_func_index;
+ sema.fn_ret_ty = old_fn_ret_ty;
+ sema.fn_ret_ty_ies = old_fn_ret_ty_ies;
+ sema.error_return_trace_index_on_fn_entry = old_error_return_trace_index_on_fn_entry;
+ }
+ sema.inst_map = .{};
+ sema.code = fn_zir;
+ sema.func_index = func_val.?.toIntern();
+ sema.fn_ret_ty = if (fn_zir_info.inferred_error_set) try pt.errorUnionType(
+ .fromInterned(.adhoc_inferred_error_set_type),
+ resolved_ret_ty.errorUnionPayload(zcu),
+ ) else resolved_ret_ty;
+ sema.fn_ret_ty_ies = if (fn_zir_info.inferred_error_set) &new_ies else null;
+
+ try sema.inst_map.ensureSpaceForInstructions(gpa, fn_zir_info.param_body);
+ for (args, 0..) |arg, arg_idx| {
+ sema.inst_map.putAssumeCapacityNoClobber(fn_zir_info.param_body[arg_idx], arg);
+ }
+
+ const need_debug_scope = !block.isComptime() and !block.is_typeof and !block.ownerModule().strip;
+ const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
+ try sema.air_instructions.append(gpa, .{
+ .tag = if (need_debug_scope) .dbg_inline_block else .block,
+ .data = undefined,
+ });
- // Re-run the block that creates the function, with the comptime parameters
- // pre-populated inside `inst_map`. This causes `param_comptime` and
- // `param_anytype_comptime` ZIR instructions to be ignored, resulting in a
- // new, monomorphized function, with the comptime parameters elided.
- var child_sema: Sema = .{
- .pt = pt,
- .gpa = gpa,
- .arena = sema.arena,
- .code = fn_zir,
- // We pass the generic callsite's owner decl here because whatever `Decl`
- // dependencies are chased at this point should be attached to the
- // callsite, not the `Decl` associated with the `func_instance`.
- .owner = sema.owner,
- .func_index = sema.func_index,
- // This may not be known yet, since the calling convention could be generic, but there
- // should be no illegal instructions encountered while creating the function anyway.
- .func_is_naked = false,
- .fn_ret_ty = Type.void,
- .fn_ret_ty_ies = null,
- .comptime_args = comptime_args,
- .generic_owner = generic_owner,
- .generic_call_src = call_src,
- .branch_quota = sema.branch_quota,
- .branch_count = sema.branch_count,
- .comptime_err_ret_trace = sema.comptime_err_ret_trace,
+ var inlining: Block.Inlining = .{
+ .call_block = block,
+ .call_src = call_src,
+ .has_comptime_args = for (args) |a| {
+ if (try sema.isComptimeKnown(a)) break true;
+ } else false,
+ .func = func_val.?.toIntern(),
+ .comptime_result = undefined,
+ .merges = .{
+ .block_inst = block_inst,
+ .results = .empty,
+ .br_list = .empty,
+ .src_locs = .empty,
+ },
};
- defer child_sema.deinit();
-
var child_block: Block = .{
.parent = null,
- .sema = &child_sema,
+ .sema = sema,
.namespace = fn_nav.analysis.?.namespace,
.instructions = .{},
- .inlining = null,
- .comptime_reason = undefined, // set as needed
+ .inlining = &inlining,
+ .is_typeof = block.is_typeof,
+ .comptime_reason = if (block.isComptime()) .inlining_parent else null,
+ .error_return_trace_index = block.error_return_trace_index,
+ .runtime_cond = block.runtime_cond,
+ .runtime_loop = block.runtime_loop,
+ .runtime_index = block.runtime_index,
.src_base_inst = fn_nav.analysis.?.zir_index,
.type_name_ctx = fn_nav.fqn,
};
- defer child_block.instructions.deinit(gpa);
-
- try child_sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body);
-
- for (fn_info.param_body[0..args_info.count()], 0..) |param_inst, arg_index| {
- const param_tag = fn_zir.instructions.items(.tag)[@intFromEnum(param_inst)];
- const param_ty = switch (generic_owner_ty_info.param_types.get(ip)[arg_index]) {
- else => |ty| Type.fromInterned(ty), // parameter is not generic, so type is already resolved
- .generic_poison_type => param_ty: {
- // We have every parameter before this one, so can resolve this parameter's type now.
- // However, first check the param type, since it may be anytype.
- switch (param_tag) {
- .param_anytype, .param_anytype_comptime => {
- // The parameter doesn't have a type.
- break :param_ty Type.generic_poison;
- },
- .param, .param_comptime => {
- // We now know every prior parameter, so can resolve this
- // parameter's type. The child sema has these types.
- const param_data = fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].pl_tok;
- const param_extra = fn_zir.extraData(Zir.Inst.Param, param_data.payload_index);
- const param_ty_body = fn_zir.bodySlice(param_extra.end, param_extra.data.body_len);
-
- // Make sure any nested instructions don't clobber our work.
- const prev_params = child_block.params;
- const prev_no_partial_func_ty = child_sema.no_partial_func_ty;
- const prev_generic_owner = child_sema.generic_owner;
- const prev_generic_call_src = child_sema.generic_call_src;
- child_block.params = .{};
- child_sema.no_partial_func_ty = true;
- child_sema.generic_owner = .none;
- child_sema.generic_call_src = LazySrcLoc.unneeded;
- defer {
- child_block.params = prev_params;
- child_sema.no_partial_func_ty = prev_no_partial_func_ty;
- child_sema.generic_owner = prev_generic_owner;
- child_sema.generic_call_src = prev_generic_call_src;
- }
+ defer child_block.instructions.deinit(gpa);
+ defer inlining.merges.deinit(gpa);
- const param_ty_src = child_block.tokenOffset(param_data.src_tok);
- child_block.comptime_reason = .{ .reason = .{
- .src = param_ty_src,
- .r = .{ .simple = .type },
- } };
- const param_ty_inst = try child_sema.resolveInlineBody(&child_block, param_ty_body, param_inst);
- break :param_ty try child_sema.analyzeAsType(&child_block, param_ty_src, param_ty_inst);
- },
- else => unreachable,
- }
- },
- };
- const arg_ref = try args_info.analyzeArg(sema, block, arg_index, param_ty, generic_owner_ty_info, func);
- try sema.validateRuntimeValue(block, args_info.argSrc(block, arg_index), arg_ref);
- const arg_ty = sema.typeOf(arg_ref);
- if (arg_ty.zigTypeTag(zcu) == .noreturn) {
- // This terminates argument analysis.
- return arg_ref;
+ if (!inlining.has_comptime_args) {
+ var block_it = block;
+ while (block_it.inlining) |parent_inlining| {
+ if (!parent_inlining.has_comptime_args and parent_inlining.func == func_val.?.toIntern()) {
+ return sema.fail(block, call_src, "inline call is recursive", .{});
+ }
+ block_it = parent_inlining.call_block;
}
+ }
- const arg_is_comptime = switch (param_tag) {
- .param_comptime, .param_anytype_comptime => true,
- .param, .param_anytype => try arg_ty.comptimeOnlySema(pt),
- else => unreachable,
+ if (!block.isComptime() and !block.is_typeof) {
+ const zir_tags = sema.code.instructions.items(.tag);
+ const zir_datas = sema.code.instructions.items(.data);
+ for (fn_zir_info.param_body) |inst| switch (zir_tags[@intFromEnum(inst)]) {
+ .param, .param_comptime => {
+ const extra = sema.code.extraData(Zir.Inst.Param, zir_datas[@intFromEnum(inst)].pl_tok.payload_index);
+ const param_name = sema.code.nullTerminatedString(extra.data.name);
+ const air_inst = sema.inst_map.get(inst).?;
+ try sema.addDbgVar(&child_block, air_inst, .dbg_arg_inline, param_name);
+ },
+ .param_anytype, .param_anytype_comptime => {
+ const param_name = zir_datas[@intFromEnum(inst)].str_tok.get(sema.code);
+ const air_inst = sema.inst_map.get(inst).?;
+ try sema.addDbgVar(&child_block, air_inst, .dbg_arg_inline, param_name);
+ },
+ else => {},
};
-
- if (arg_is_comptime) {
- if (try sema.resolveValue(arg_ref)) |arg_val| {
- comptime_args[arg_index] = arg_val.toIntern();
- child_sema.inst_map.putAssumeCapacityNoClobber(
- param_inst,
- Air.internedToRef(arg_val.toIntern()),
- );
- } else switch (param_tag) {
- .param_comptime,
- .param_anytype_comptime,
- => return sema.failWithOwnedErrorMsg(block, msg: {
- const arg_src = args_info.argSrc(block, arg_index);
- const msg = try sema.errMsg(arg_src, "runtime-known argument passed to comptime parameter", .{});
- errdefer msg.destroy(sema.gpa);
- const param_src = child_block.tokenOffset(switch (param_tag) {
- .param_comptime => fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].pl_tok.src_tok,
- .param_anytype_comptime => fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].str_tok.src_tok,
- else => unreachable,
- });
- try child_sema.errNote(param_src, msg, "declared comptime here", .{});
- break :msg msg;
- }),
-
- .param,
- .param_anytype,
- => return sema.failWithOwnedErrorMsg(block, msg: {
- const arg_src = args_info.argSrc(block, arg_index);
- const msg = try sema.errMsg(arg_src, "runtime-known argument passed to parameter of comptime-only type", .{});
- errdefer msg.destroy(sema.gpa);
- const param_src = child_block.tokenOffset(switch (param_tag) {
- .param => fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].pl_tok.src_tok,
- .param_anytype => fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].str_tok.src_tok,
- else => unreachable,
- });
- try child_sema.errNote(param_src, msg, "declared here", .{});
- try sema.explainWhyTypeIsComptime(msg, arg_src, arg_ty);
- break :msg msg;
- }),
-
- else => unreachable,
- }
- } else {
- // The parameter is runtime-known.
- const param_name: Zir.NullTerminatedString = switch (param_tag) {
- .param_anytype => fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].str_tok.start,
- .param => name: {
- const inst_data = fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].pl_tok;
- const extra = fn_zir.extraData(Zir.Inst.Param, inst_data.payload_index);
- break :name extra.data.name;
- },
- else => unreachable,
- };
- child_sema.inst_map.putAssumeCapacityNoClobber(param_inst, try child_block.addInst(.{
- .tag = .arg,
- .data = .{ .arg = .{
- .ty = Air.internedToRef(arg_ty.toIntern()),
- .name = if (child_block.ownerModule().strip)
- .none
- else
- try sema.appendAirString(fn_zir.nullTerminatedString(param_name)),
- } },
- }));
- try child_block.params.append(sema.arena, .{
- .ty = arg_ty.toIntern(), // This is the type after coercion
- .is_comptime = false, // We're adding only runtime args to the instantiation
- .name = param_name,
- });
- runtime_args.appendAssumeCapacity(arg_ref);
- }
}
- // We've already handled parameters, so don't resolve the whole body. Instead, just
- // do the instructions after the params (i.e. the func itself).
- child_block.comptime_reason = .{ .reason = .{
- .src = call_src,
- .r = .{ .simple = .type },
- } };
- const new_func_inst = try child_sema.resolveInlineBody(&child_block, fn_info.param_body[args_info.count()..], fn_info.param_body_inst);
- const callee_index = (child_sema.resolveConstDefinedValue(&child_block, LazySrcLoc.unneeded, new_func_inst, undefined) catch unreachable).toIntern();
+ child_block.error_return_trace_index = try sema.analyzeSaveErrRetIndex(&child_block);
+ // Save the error trace as our first action in the function
+ // to match the behavior of runtime function calls.
+ const error_return_trace_index_on_parent_fn_entry = sema.error_return_trace_index_on_fn_entry;
+ sema.error_return_trace_index_on_fn_entry = child_block.error_return_trace_index;
+ defer sema.error_return_trace_index_on_fn_entry = error_return_trace_index_on_parent_fn_entry;
- const callee = zcu.funcInfo(callee_index);
- callee.maxBranchQuota(ip, sema.branch_quota);
+ // We temporarily set `allow_memoize` to `true` to track this comptime call.
+ // It is restored after the call finishes analysis, so that a caller may
+ // know whether an in-progress call (containing this call) may be memoized.
+ const old_allow_memoize = sema.allow_memoize;
+ defer sema.allow_memoize = old_allow_memoize and sema.allow_memoize;
+ sema.allow_memoize = true;
- // Make a runtime call to the new function, making sure to omit the comptime args.
- const func_ty = Type.fromInterned(callee.ty);
- const func_ty_info = zcu.typeToFunc(func_ty).?;
+ // Store the current eval branch count so we can find out how many eval branches
+ // the comptime call caused.
+ const old_branch_count = sema.branch_count;
- // If the call evaluated to a return type that requires comptime, never mind
- // our generic instantiation. Instead we need to perform a comptime call.
- if (try Type.fromInterned(func_ty_info.return_type).comptimeOnlySema(pt)) {
- comptime_ret_ty.* = .fromInterned(func_ty_info.return_type);
- return error.ComptimeReturn;
- }
- // Similarly, if the call evaluated to a generic type we need to instead
- // call it inline.
- if (func_ty_info.is_generic or func_ty_info.cc == .@"inline") {
- return error.GenericPoison;
- }
+ const result_raw: Air.Inst.Ref = result: {
+ sema.analyzeFnBody(&child_block, fn_zir_info.body) catch |err| switch (err) {
+ error.ComptimeReturn => break :result inlining.comptime_result,
+ else => |e| return e,
+ };
+ break :result try sema.resolveAnalyzedBlock(block, call_src, &child_block, &inlining.merges, need_debug_scope);
+ };
- if (call_dbg_node) |some| try sema.zirDbgStmt(block, some);
+ const result: Air.Inst.Ref = if (try sema.resolveValue(result_raw)) |result_val| r: {
+ const val_resolved = try sema.resolveAdHocInferredErrorSet(block, call_src, result_val.toIntern());
+ break :r Air.internedToRef(val_resolved);
+ } else r: {
+ const resolved_ty = try sema.resolveAdHocInferredErrorSetTy(block, call_src, sema.typeOf(result_raw).toIntern());
+ if (resolved_ty == .none) break :r result_raw;
+ // TODO: mutate in place the previous instruction if possible
+ // rather than adding a bitcast instruction.
+ break :r try block.addBitCast(.fromInterned(resolved_ty), result_raw);
+ };
- switch (sema.owner.unwrap()) {
- .@"comptime", .nav_ty, .nav_val, .type, .memoized_state => {},
- .func => |owner_func| if (Type.fromInterned(func_ty_info.return_type).isError(zcu)) {
- ip.funcSetCallsOrAwaitsErrorableFn(owner_func);
- },
+ if (block.isComptime()) {
+ const result_val = (try sema.resolveValue(result)).?;
+ if (want_memoize and sema.allow_memoize and !result_val.canMutateComptimeVarState(zcu)) {
+ _ = try pt.intern(.{ .memoized_call = .{
+ .func = func_val.?.toIntern(),
+ .arg_values = memoized_arg_values,
+ .result = result_val.toIntern(),
+ .branch_count = sema.branch_count - old_branch_count,
+ } });
+ }
}
- try sema.addReferenceEntry(call_src, AnalUnit.wrap(.{ .func = callee_index }));
- try zcu.ensureFuncBodyAnalysisQueued(callee_index);
-
- try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).@"struct".fields.len + runtime_args.items.len);
- const result = try block.addInst(.{
- .tag = call_tag,
- .data = .{ .pl_op = .{
- .operand = Air.internedToRef(callee_index),
- .payload = sema.addExtraAssumeCapacity(Air.Call{
- .args_len = @intCast(runtime_args.items.len),
- }),
- } },
- });
- sema.appendRefsAssumeCapacity(runtime_args.items);
-
- // `child_sema` is owned by us, so just take its exports.
- try sema.exports.appendSlice(sema.gpa, child_sema.exports.items);
-
if (ensure_result_used) {
try sema.ensureResultUsed(block, sema.typeOf(result), call_src);
}
- if (call_tag == .call_always_tail) {
- return sema.handleTailCall(block, call_src, func_ty, result);
+
+ return result;
+}
+
+fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Type, result: Air.Inst.Ref) !Air.Inst.Ref {
+ const pt = sema.pt;
+ const zcu = pt.zcu;
+ const target = zcu.getTarget();
+ const backend = zcu.comp.getZigBackend();
+ if (!target_util.supportsTailCall(target, backend)) {
+ return sema.fail(block, call_src, "unable to perform tail call: compiler backend '{s}' does not support tail calls on target architecture '{s}' with the selected CPU feature flags", .{
+ @tagName(backend), @tagName(target.cpu.arch),
+ });
}
- if (func_ty.fnReturnType(zcu).isNoReturn(zcu)) {
- _ = try block.addNoOp(.unreach);
- return .unreachable_value;
+ const owner_func_ty = Type.fromInterned(zcu.funcInfo(sema.owner.unwrap().func).ty);
+ if (owner_func_ty.toIntern() != func_ty.toIntern()) {
+ return sema.fail(block, call_src, "unable to perform tail call: type of function being called '{}' does not match type of calling function '{}'", .{
+ func_ty.fmt(pt), owner_func_ty.fmt(pt),
+ });
}
- return result;
+ _ = try block.addUnOp(.ret, result);
+ return .unreachable_value;
}
fn zirIntType(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9576,9 +9226,7 @@ fn zirFunc(
// the callconv based on whether it is exported. Otherwise, the callconv defaults
// to `.auto`.
const cc: std.builtin.CallingConvention = if (has_body) cc: {
- const func_decl_nav = if (sema.generic_owner != .none) nav: {
- break :nav zcu.funcInfo(sema.generic_owner).owner_nav;
- } else sema.owner.unwrap().nav_val;
+ const func_decl_nav = sema.owner.unwrap().nav_val;
const fn_is_exported = exported: {
const decl_inst = ip.getNav(func_decl_nav).analysis.?.zir_index.resolve(ip) orelse return error.AnalysisFail;
const zir_decl = sema.code.getDeclaration(decl_inst);
@@ -9635,17 +9283,11 @@ fn resolveGenericBody(
// Make sure any nested param instructions don't clobber our work.
const prev_params = block.params;
const prev_no_partial_func_type = sema.no_partial_func_ty;
- const prev_generic_owner = sema.generic_owner;
- const prev_generic_call_src = sema.generic_call_src;
block.params = .{};
sema.no_partial_func_ty = true;
- sema.generic_owner = .none;
- sema.generic_call_src = LazySrcLoc.unneeded;
defer {
block.params = prev_params;
sema.no_partial_func_ty = prev_no_partial_func_type;
- sema.generic_owner = prev_generic_owner;
- sema.generic_call_src = prev_generic_call_src;
}
const uncasted = sema.resolveInlineBody(block, body, func_inst) catch |err| break :err err;
@@ -9911,16 +9553,10 @@ fn funcCommon(
const cc_src = block.src(.{ .node_offset_fn_type_cc = src_node_offset });
const func_src = block.nodeOffset(src_node_offset);
- var is_generic = bare_return_type.isGenericPoison();
+ if (bare_return_type.isGenericPoison() and sema.no_partial_func_ty) return error.GenericPoison;
- if (var_args) {
- if (is_generic) {
- return sema.fail(block, func_src, "generic function cannot be variadic", .{});
- }
- try sema.checkCallConvSupportsVarArgs(block, cc_src, cc);
- }
-
- const is_source_decl = sema.generic_owner == .none;
+ const ret_ty_requires_comptime = try bare_return_type.comptimeOnlySema(pt);
+ var is_generic = bare_return_type.isGenericPoison() or ret_ty_requires_comptime;
var comptime_bits: u32 = 0;
for (block.params.items(.ty), block.params.items(.is_comptime), 0..) |param_ty_ip, param_is_comptime, i| {
@@ -9933,16 +9569,21 @@ fn funcCommon(
.fn_proto_node_offset = src_node_offset,
.param_index = @intCast(i),
} });
- const requires_comptime = try param_ty.comptimeOnlySema(pt);
- if (param_is_comptime or requires_comptime) {
+ const param_ty_comptime = try param_ty.comptimeOnlySema(pt);
+ const param_ty_generic = param_ty.isGenericPoison();
+ if (param_ty_generic and sema.no_partial_func_ty) {
+ return error.GenericPoison;
+ }
+ if (param_is_comptime or param_ty_comptime or param_ty_generic) {
+ is_generic = true;
+ }
+ if (param_is_comptime) {
comptime_bits |= @as(u32, 1) << @intCast(i); // TODO: handle cast error
}
- const this_generic = param_ty.isGenericPoison();
- is_generic = is_generic or this_generic;
if (param_is_comptime and !target_util.fnCallConvAllowsZigTypes(cc)) {
return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)});
}
- if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(cc)) {
+ if (param_ty_generic and !target_util.fnCallConvAllowsZigTypes(cc)) {
return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)});
}
if (!param_ty.isValidParamType(zcu)) {
@@ -9951,7 +9592,7 @@ fn funcCommon(
opaque_str, param_ty.fmt(pt),
});
}
- if (!this_generic and !target_util.fnCallConvAllowsZigTypes(cc) and !try sema.validateExternType(param_ty, .param_ty)) {
+ if (!param_ty_generic and !target_util.fnCallConvAllowsZigTypes(cc) and !try sema.validateExternType(param_ty, .param_ty)) {
const msg = msg: {
const msg = try sema.errMsg(param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{
param_ty.fmt(pt), @tagName(cc),
@@ -9965,7 +9606,7 @@ fn funcCommon(
};
return sema.failWithOwnedErrorMsg(block, msg);
}
- if (is_source_decl and requires_comptime and !param_is_comptime and has_body and !block.isComptime()) {
+ if (param_ty_comptime and !param_is_comptime and has_body and !block.isComptime()) {
const msg = msg: {
const msg = try sema.errMsg(param_src, "parameter of type '{}' must be declared comptime", .{
param_ty.fmt(pt),
@@ -9979,7 +9620,7 @@ fn funcCommon(
};
return sema.failWithOwnedErrorMsg(block, msg);
}
- if (is_source_decl and !this_generic and is_noalias and
+ if (!param_ty_generic and is_noalias and
!(param_ty.zigTypeTag(zcu) == .pointer or param_ty.isPtrLikeOptional(zcu)))
{
return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{});
@@ -10007,48 +9648,17 @@ fn funcCommon(
}
}
- const ret_ty_requires_comptime = try bare_return_type.comptimeOnlySema(pt);
+ if (var_args) {
+ if (is_generic) {
+ return sema.fail(block, func_src, "generic function cannot be variadic", .{});
+ }
+ try sema.checkCallConvSupportsVarArgs(block, cc_src, cc);
+ }
+
const ret_poison = bare_return_type.isGenericPoison();
- const final_is_generic = is_generic or comptime_bits != 0 or ret_ty_requires_comptime;
const param_types = block.params.items(.ty);
- if (!is_source_decl) {
- assert(has_body);
- assert(!is_generic);
- assert(comptime_bits == 0);
- assert(!var_args);
- if (inferred_error_set) {
- try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src);
- }
- const func_index = try ip.getFuncInstance(gpa, pt.tid, .{
- .param_types = param_types,
- .noalias_bits = noalias_bits,
- .bare_return_type = bare_return_type.toIntern(),
- .is_noinline = is_noinline,
- .inferred_error_set = inferred_error_set,
- .generic_owner = sema.generic_owner,
- .comptime_args = sema.comptime_args,
- });
- return finishFunc(
- sema,
- block,
- func_index,
- .none,
- ret_poison,
- bare_return_type,
- ret_ty_src,
- cc,
- is_source_decl,
- ret_ty_requires_comptime,
- func_inst,
- cc_src,
- is_noinline,
- is_generic,
- final_is_generic,
- );
- }
-
if (inferred_error_set) {
assert(has_body);
if (!ret_poison)
@@ -10062,7 +9672,7 @@ fn funcCommon(
.bare_return_type = bare_return_type.toIntern(),
.cc = cc,
.is_var_args = var_args,
- .is_generic = final_is_generic,
+ .is_generic = is_generic,
.is_noinline = is_noinline,
.zir_body_inst = try block.trackZir(func_inst),
@@ -10080,13 +9690,11 @@ fn funcCommon(
bare_return_type,
ret_ty_src,
cc,
- is_source_decl,
ret_ty_requires_comptime,
func_inst,
cc_src,
is_noinline,
is_generic,
- final_is_generic,
);
}
@@ -10097,7 +9705,7 @@ fn funcCommon(
.return_type = bare_return_type.toIntern(),
.cc = cc,
.is_var_args = var_args,
- .is_generic = final_is_generic,
+ .is_generic = is_generic,
.is_noinline = is_noinline,
});
@@ -10122,13 +9730,11 @@ fn funcCommon(
bare_return_type,
ret_ty_src,
cc,
- is_source_decl,
ret_ty_requires_comptime,
func_inst,
cc_src,
is_noinline,
is_generic,
- final_is_generic,
);
}
@@ -10141,13 +9747,11 @@ fn funcCommon(
bare_return_type,
ret_ty_src,
cc,
- is_source_decl,
ret_ty_requires_comptime,
func_inst,
cc_src,
is_noinline,
is_generic,
- final_is_generic,
);
}
@@ -10160,13 +9764,11 @@ fn finishFunc(
bare_return_type: Type,
ret_ty_src: LazySrcLoc,
cc_resolved: std.builtin.CallingConvention,
- is_source_decl: bool,
ret_ty_requires_comptime: bool,
func_inst: Zir.Inst.Index,
cc_src: LazySrcLoc,
is_noinline: bool,
is_generic: bool,
- final_is_generic: bool,
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
@@ -10203,7 +9805,7 @@ fn finishFunc(
// If the return type is comptime-only but not dependent on parameters then
// all parameter types also need to be comptime.
- if (is_source_decl and opt_func_index != .none and ret_ty_requires_comptime and !block.isComptime()) comptime_check: {
+ if (opt_func_index != .none and ret_ty_requires_comptime and !block.isComptime()) comptime_check: {
for (block.params.items(.is_comptime)) |is_comptime| {
if (!is_comptime) break;
} else break :comptime_check;
@@ -10300,8 +9902,7 @@ fn finishFunc(
}),
}
- if (is_generic and sema.no_partial_func_ty) return error.GenericPoison;
- if (!final_is_generic and sema.wantErrorReturnTracing(return_type)) {
+ if (!is_generic and sema.wantErrorReturnTracing(return_type)) {
// Make sure that StackTrace's fields are resolved so that the backend can
// lower this fn type.
const unresolved_stack_trace_ty = try sema.getBuiltinType(block.nodeOffset(0), .StackTrace);
@@ -10328,17 +9929,11 @@ fn zirParam(
// Make sure any nested param instructions don't clobber our work.
const prev_params = block.params;
const prev_no_partial_func_type = sema.no_partial_func_ty;
- const prev_generic_owner = sema.generic_owner;
- const prev_generic_call_src = sema.generic_call_src;
block.params = .{};
sema.no_partial_func_ty = true;
- sema.generic_owner = .none;
- sema.generic_call_src = LazySrcLoc.unneeded;
defer {
block.params = prev_params;
sema.no_partial_func_ty = prev_no_partial_func_type;
- sema.generic_owner = prev_generic_owner;
- sema.generic_call_src = prev_generic_call_src;
}
if (sema.resolveInlineBody(block, body, inst)) |param_ty_inst| {
@@ -26646,11 +26241,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
break :blk try sema.analyzeValueAsCallconv(block, cc_src, cc_val);
} else cc: {
if (has_body) {
- const func_decl_nav = if (sema.generic_owner != .none) nav: {
- // Generic instance -- use the original function declaration to
- // look for the `export` syntax.
- break :nav zcu.funcInfo(sema.generic_owner).owner_nav;
- } else sema.owner.unwrap().nav_val;
+ const func_decl_nav = sema.owner.unwrap().nav_val;
const func_decl_inst = ip.getNav(func_decl_nav).analysis.?.zir_index.resolve(&zcu.intern_pool) orelse return error.AnalysisFail;
const zir_decl = sema.code.getDeclaration(func_decl_inst);
if (zir_decl.linkage == .@"export") {
@@ -31068,9 +30659,16 @@ fn coerceInMemoryAllowedFns(
const dest_param_ty: Type = .fromInterned(dest_info.param_types.get(ip)[param_i]);
const src_param_ty: Type = .fromInterned(src_info.param_types.get(ip)[param_i]);
- const src_is_comptime = src_info.paramIsComptime(@intCast(param_i));
- const dest_is_comptime = dest_info.paramIsComptime(@intCast(param_i));
- if (src_is_comptime != dest_is_comptime) {
+ comptime_param: {
+ const src_is_comptime = src_info.paramIsComptime(@intCast(param_i));
+ const dest_is_comptime = dest_info.paramIsComptime(@intCast(param_i));
+ if (src_is_comptime == dest_is_comptime) break :comptime_param;
+ if (!dest_is_mut and src_is_comptime and !dest_is_comptime and try dest_param_ty.comptimeOnlySema(pt)) {
+ // A parameter which is marked `comptime` can drop that annotation if the type is comptime-only.
+ // The function remains generic, and the parameter is going to be comptime-resolved either way,
+ // so this just affects whether or not the argument is comptime-evaluated at the call site.
+ break :comptime_param;
+ }
return .{ .fn_param_comptime = .{
.index = param_i,
.wanted = dest_is_comptime,
src/translate_c.zig
@@ -160,6 +160,7 @@ pub fn translate(
context.pattern_list.deinit(gpa);
}
+ @setEvalBranchQuota(2000);
inline for (@typeInfo(std.zig.c_builtins).@"struct".decls) |decl| {
const builtin = try Tag.pub_var_simple.create(arena, .{
.name = decl.name,
src/Zcu.zig
@@ -1928,6 +1928,24 @@ pub const SrcLoc = struct {
},
}
},
+ .func_decl_param_comptime => |param_idx| {
+ const tree = try src_loc.file_scope.getTree(gpa);
+ var buf: [1]Ast.Node.Index = undefined;
+ const full = tree.fullFnProto(&buf, src_loc.base_node).?;
+ var param_it = full.iterate(tree);
+ for (0..param_idx) |_| assert(param_it.next() != null);
+ const param = param_it.next().?;
+ return tree.tokenToSpan(param.comptime_noalias.?);
+ },
+ .func_decl_param_ty => |param_idx| {
+ const tree = try src_loc.file_scope.getTree(gpa);
+ var buf: [1]Ast.Node.Index = undefined;
+ const full = tree.fullFnProto(&buf, src_loc.base_node).?;
+ var param_it = full.iterate(tree);
+ for (0..param_idx) |_| assert(param_it.next() != null);
+ const param = param_it.next().?;
+ return tree.nodeToSpan(param.type_expr);
+ },
}
}
};
@@ -2235,6 +2253,12 @@ pub const LazySrcLoc = struct {
/// The source location points to the "tag" capture (second capture) of
/// a specific case of a `switch`.
switch_tag_capture: SwitchCapture,
+ /// The source location points to the `comptime` token on the given comptime parameter,
+ /// where the base node is a function declaration. The value is the parameter index.
+ func_decl_param_comptime: u32,
+ /// The source location points to the type annotation on the given function parameter,
+ /// where the base node is a function declaration. The value is the parameter index.
+ func_decl_param_ty: u32,
pub const FnProtoParam = struct {
/// The offset of the function prototype AST node.
test/behavior/eval.zig
@@ -363,7 +363,7 @@ test "comptime modification of const struct field" {
}
test "refer to the type of a generic function" {
- const Func = fn (type) void;
+ const Func = fn (comptime type) void;
const f: Func = doNothingWithType;
f(i32);
}
test/behavior/generics.zig
@@ -427,7 +427,7 @@ test "generic function passed as comptime argument" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const S = struct {
- fn doMath(comptime f: fn (type, i32, i32) error{Overflow}!i32, a: i32, b: i32) !void {
+ fn doMath(comptime f: fn (comptime type, i32, i32) error{Overflow}!i32, a: i32, b: i32) !void {
const result = try f(i32, a, b);
try expect(result == 11);
}
test/behavior/struct.zig
@@ -1511,7 +1511,7 @@ test "if inside struct init inside if" {
test "optional generic function label struct field" {
const Options = struct {
- isFoo: ?fn (type) u8 = defaultIsFoo,
+ isFoo: ?fn (comptime type) u8 = defaultIsFoo,
fn defaultIsFoo(comptime _: type) u8 {
return 123;
}
test/behavior/typename.zig
@@ -238,7 +238,7 @@ test "comptime parameters not converted to anytype in function type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- const T = fn (fn (type) void, void) void;
+ const T = fn (comptime fn (comptime type) void, void) void;
try expectEqualStrings("fn (comptime fn (comptime type) void, void) void", @typeName(T));
}
test/cases/compile_errors/arg_to_non_comptime_param_with_comptime_only_type_is_not_evaluated_at_comptime.zig
@@ -0,0 +1,36 @@
+//! Whew, that filename is a bit of a mouthful!
+//! To maximise consistency with other parts of the language, function arguments expressions are
+//! only *evaluated* at comptime if the parameter is declared `comptime`. If the parameter type is
+//! comptime-only, but the parameter is not declared `comptime`, the evaluation happens at runtime,
+//! and the value is just comptime-resolved.
+
+export fn foo() void {
+ // This function is itself generic, with the comptime-only parameter being generic.
+ simpleGeneric(type, if (cond()) u8 else u16);
+}
+
+export fn bar() void {
+ // This function is not generic; once `Wrapper` is called, its parameter type is immediately known.
+ Wrapper(type).inner(if (cond()) u8 else u16);
+}
+
+fn simpleGeneric(comptime T: type, _: T) void {}
+
+fn Wrapper(comptime T: type) type {
+ return struct {
+ fn inner(_: T) void {}
+ };
+}
+
+fn cond() bool {
+ return true;
+}
+
+// error
+//
+// :9:25: error: value with comptime-only type 'type' depends on runtime control flow
+// :9:33: note: runtime control flow here
+// :9:25: note: types are not available at runtime
+// :14:25: error: value with comptime-only type 'type' depends on runtime control flow
+// :14:33: note: runtime control flow here
+// :14:25: note: types are not available at runtime
test/cases/compile_errors/bad_usage_of_call.zig
@@ -42,8 +42,8 @@ noinline fn dummy2() void {}
// :2:23: error: expected a tuple, found 'void'
// :5:21: error: unable to perform 'never_inline' call at compile-time
// :8:21: error: unable to perform 'never_tail' call at compile-time
-// :11:5: error: 'never_inline' call of inline function
+// :11:5: error: cannot perform inline call with 'never_inline' modifier
// :15:26: error: modifier 'compile_time' requires a comptime-known function
-// :18:9: error: 'always_inline' call of noinline function
-// :21:9: error: 'always_inline' call of noinline function
+// :18:9: error: inline call of noinline function
+// :21:9: error: inline call of noinline function
// :26:27: error: modifier 'always_inline' requires a comptime-known function
test/cases/compile_errors/comptime_call_of_function_pointer.zig
@@ -4,7 +4,6 @@ export fn entry() void {
}
// error
-// backend=stage2
-// target=native
//
-// :3:20: error: comptime call of function pointer
+// :3:14: error: unable to resolve comptime value
+// :3:14: note: function being called at comptime must be comptime-known
test/cases/compile_errors/condition_comptime_reason_explained.zig
@@ -36,11 +36,13 @@ pub export fn entry2() void {
//
// :8:9: error: unable to resolve comptime value
// :19:15: note: called at comptime from here
-// :7:13: note: function with comptime-only return type 'tmp.S' is evaluated at comptime
+// :19:15: note: call to function with comptime-only return type 'tmp.S' is evaluated at comptime
+// :7:13: note: return type declared here
// :2:12: note: struct requires comptime because of this field
// :2:12: note: use '*const fn () void' for a function pointer type
// :22:13: error: unable to resolve comptime value
// :32:19: note: called at comptime from here
-// :21:17: note: function with comptime-only return type 'tmp.S' is evaluated at comptime
+// :32:19: note: call to function with comptime-only return type 'tmp.S' is evaluated at comptime
+// :21:17: note: return type declared here
// :2:12: note: struct requires comptime because of this field
// :2:12: note: use '*const fn () void' for a function pointer type
test/cases/compile_errors/dereference_anyopaque.zig
@@ -1,54 +1,7 @@
-const std = @import("std");
-
-const Error = error{Something};
-
-fn next() Error!void {
- return;
-}
-
-fn parse(comptime T: type, allocator: std.mem.Allocator) !void {
- parseFree(T, undefined, allocator);
- _ = (try next()) != null;
-}
-
-fn parseFree(comptime T: type, value: T, allocator: std.mem.Allocator) void {
- switch (@typeInfo(T)) {
- .@"struct" => |structInfo| {
- inline for (structInfo.fields) |field| {
- if (!field.is_comptime)
- parseFree(field.type, undefined, allocator);
- }
- },
- .pointer => |ptrInfo| {
- switch (ptrInfo.size) {
- .One => {
- parseFree(ptrInfo.child, value.*, allocator);
- },
- .Slice => {
- for (value) |v|
- parseFree(ptrInfo.child, v, allocator);
- },
- else => unreachable,
- }
- },
- else => unreachable,
- }
-}
-
-pub export fn entry() void {
- const allocator = std.testing.failing_allocator;
- _ = parse(std.StringArrayHashMap(bool), allocator) catch return;
+export fn foo(ptr: *anyopaque) void {
+ _ = ptr.*;
}
// error
-// target=native
-// backend=llvm
//
-// :11:22: error: comparison of 'void' with null
-// :25:51: error: cannot load opaque type 'anyopaque'
-// :25:51: error: values of type 'fn (*anyopaque, usize, u8, usize) ?[*]u8' must be comptime-known, but operand value is runtime-known
-// :25:51: note: use '*const fn (*anyopaque, usize, u8, usize) ?[*]u8' for a function pointer type
-// :25:51: error: values of type 'fn (*anyopaque, []u8, u8, usize, usize) bool' must be comptime-known, but operand value is runtime-known
-// :25:51: note: use '*const fn (*anyopaque, []u8, u8, usize, usize) bool' for a function pointer type
-// :25:51: error: values of type 'fn (*anyopaque, []u8, u8, usize) void' must be comptime-known, but operand value is runtime-known
-// :25:51: note: use '*const fn (*anyopaque, []u8, u8, usize) void' for a function pointer type
+// :2:12: error: cannot load opaque type 'anyopaque'
test/cases/compile_errors/explain_why_fn_is_called_at_comptime.zig
@@ -15,6 +15,7 @@ pub export fn entry() void {
// error
//
// :12:13: error: unable to resolve comptime value
-// :7:16: note: function with comptime-only return type 'tmp.S' is evaluated at comptime
+// :12:12: note: call to function with comptime-only return type 'tmp.S' is evaluated at comptime
+// :7:16: note: return type declared here
// :2:12: note: struct requires comptime because of this field
// :2:12: note: use '*const fn () void' for a function pointer type
test/cases/compile_errors/explain_why_generic_fn_is_called_at_comptime.zig
@@ -17,6 +17,7 @@ pub export fn entry() void {
// error
//
// :15:13: error: unable to resolve comptime value
-// :9:38: note: generic function instantiated with comptime-only return type 'tmp.S(fn () void)' is evaluated at comptime
+// :15:12: note: call to generic function instantiated with comptime-only return type 'tmp.S(fn () void)' is evaluated at comptime
+// :9:38: note: return type declared here
// :3:16: note: struct requires comptime because of this field
// :3:16: note: use '*const fn () void' for a function pointer type
test/cases/compile_errors/generic_function_instance_with_non-constant_expression.zig
@@ -10,8 +10,7 @@ export fn entry() usize {
}
// error
-// backend=stage2
-// target=native
//
-// :5:16: error: runtime-known argument passed to comptime parameter
-// :1:17: note: declared comptime here
+// :5:16: error: unable to resolve comptime value
+// :5:16: note: argument to comptime parameter must be comptime-known
+// :1:8: note: parameter declared comptime here
test/cases/compile_errors/generic_function_instantiation_inherits_parent_branch_quota.zig
@@ -22,9 +22,8 @@ fn Type(comptime n: usize) type {
}
// error
-// backend=stage2
-// target=native
//
// :21:16: error: evaluation exceeded 1001 backwards branches
// :21:16: note: use @setEvalBranchQuota() to raise the branch limit from 1001
// :16:34: note: called from here
+// :8:15: note: called from here
test/cases/compile_errors/generic_instantiation_failure_in_generic_function_return_type.zig
@@ -40,3 +40,4 @@ pub fn is(comptime id: std.builtin.TypeId) TraitFn {
// target=native
//
// :8:48: error: expected type 'type', found 'bool'
+// :5:21: note: called from here
test/cases/compile_errors/generic_method_call_with_invalid_param.zig
@@ -22,12 +22,11 @@ const S = struct {
};
// error
-// backend=stage2
-// target=native
//
// :3:18: error: expected type 'bool', found 'void'
// :19:43: note: parameter type declared here
// :8:18: error: expected type 'void', found 'bool'
// :20:43: note: parameter type declared here
-// :15:26: error: runtime-known argument passed to comptime parameter
-// :21:57: note: declared comptime here
+// :15:26: error: unable to resolve comptime value
+// :15:26: note: argument to comptime parameter must be comptime-known
+// :21:48: note: parameter declared comptime here
test/cases/compile_errors/global_variable_initializer_must_be_constant_expression.zig
@@ -5,7 +5,5 @@ export fn entry() i32 {
}
// error
-// backend=stage2
-// target=native
//
// :2:14: error: comptime call of extern function
test/cases/compile_errors/inline_call_runtime_value_to_comptime_param.zig
@@ -10,8 +10,7 @@ pub export fn entry() void {
}
// error
-// backend=stage2
-// target=native
//
// :5:18: error: unable to resolve comptime value
// :5:18: note: argument to comptime parameter must be comptime-known
+// :1:24: note: parameter declared comptime here
test/cases/compile_errors/invalid_extern_function_call.zig
@@ -9,8 +9,6 @@ export fn entry1() void {
}
// error
-// backend=stage2
-// target=native
//
-// :4:15: error: comptime call of extern function pointer
-// :8:5: error: inline call of extern function pointer
+// :4:15: error: comptime call of extern function
+// :8:5: error: inline call of extern function
test/cases/compile_errors/invalid_pointer_for_var_type.zig
@@ -7,7 +7,5 @@ export fn f() void {
}
// error
-// backend=stage2
-// target=native
//
// :2:16: error: comptime call of extern function
test/cases/compile_errors/nested_generic_function_param_type_mismatch.zig
@@ -19,6 +19,6 @@ pub export fn entry() void {
// backend=llvm
// target=native
//
-// :15:28: error: expected type '*const fn (comptime type, u8, u8) u32', found '*const fn (void, u8, u8) u32'
-// :15:28: note: pointer type child 'fn (void, u8, u8) u32' cannot cast into pointer type child 'fn (comptime type, u8, u8) u32'
+// :15:28: error: expected type '*const fn (type, u8, u8) u32', found '*const fn (void, u8, u8) u32'
+// :15:28: note: pointer type child 'fn (void, u8, u8) u32' cannot cast into pointer type child 'fn (type, u8, u8) u32'
// :15:28: note: non-generic function cannot cast into a generic function
test/cases/compile_errors/never_inline_call_of_inline_fn_with_comptime_param.zig
@@ -19,5 +19,5 @@ export fn entry2() void {
// error
//
-// :14:5: error: 'never_inline' call of inline function
-// :17:5: error: 'never_inline' call of inline function
+// :14:5: error: cannot perform inline call with 'never_inline' modifier
+// :17:5: error: cannot perform inline call with 'never_inline' modifier
test/cases/compile_errors/non-const_expression_in_struct_literal_outside_function.zig
@@ -9,7 +9,5 @@ export fn entry() usize {
}
// error
-// backend=stage2
-// target=native
//
// :4:27: error: comptime call of extern function
test/cases/compile_errors/non_comptime_param_in_comptime_function.zig
@@ -11,5 +11,6 @@ export fn entry() void {
// error
//
// :8:11: error: unable to resolve comptime value
-// :1:20: note: function with comptime-only return type 'type' is evaluated at comptime
-// :1:20: note: types are not available at runtime
+// :8:10: note: call to function with comptime-only return type 'type' is evaluated at comptime
+// :1:20: note: return type declared here
+// :8:10: note: types are not available at runtime
test/cases/compile_errors/recursive_inline_fn.zig
@@ -29,8 +29,10 @@ pub export fn entry2() void {
}
// error
-// backend=stage2
-// target=native
//
// :5:27: error: inline call is recursive
+// :12:12: note: called from here
// :24:10: error: inline call is recursive
+// :20:10: note: called from here
+// :16:11: note: called from here
+// :28:10: note: called from here
test/cases/compile_errors/runtime_operation_in_comptime_scope.zig
@@ -27,8 +27,9 @@ var rt: u32 = undefined;
// :19:5: note: operation is runtime due to this operand
// :14:8: note: called at comptime from here
// :10:12: note: called at comptime from here
-// :13:10: note: function with comptime-only return type 'type' is evaluated at comptime
-// :13:10: note: types are not available at runtime
+// :10:12: note: call to function with comptime-only return type 'type' is evaluated at comptime
+// :13:10: note: return type declared here
+// :10:12: note: types are not available at runtime
// :2:8: note: called from here
// :19:8: error: unable to evaluate comptime expression
// :19:5: note: operation is runtime due to this operand
test/standalone/simple/std_enums_big_enums.zig
@@ -31,6 +31,7 @@ pub fn main() void {
var bounded_multiset = std.enums.BoundedEnumMultiset(big.Big, u8).init(.{});
_ = &bounded_multiset;
+ @setEvalBranchQuota(3000);
var array = std.enums.EnumArray(big.Big, u8).init(undefined);
array = std.enums.EnumArray(big.Big, u8).initDefault(123, .{});
}
test/compile_errors.zig
@@ -57,8 +57,9 @@ pub fn addCases(ctx: *Cases, b: *std.Build) !void {
\\}
, &[_][]const u8{
":3:12: error: unable to resolve comptime value",
- ":2:55: note: generic function instantiated with comptime-only return type '?fn () void' is evaluated at comptime",
- ":2:55: note: use '*const fn () void' for a function pointer type",
+ ":3:19: note: call to generic function instantiated with comptime-only return type '?fn () void' is evaluated at comptime",
+ ":2:55: note: return type declared here",
+ ":3:19: note: use '*const fn () void' for a function pointer type",
});
case.addSourceFile("b.zig",
\\pub const ElfDynLib = struct {
@@ -193,10 +194,12 @@ pub fn addCases(ctx: *Cases, b: *std.Build) !void {
\\ import.anytypeFunction(S{ .x = x, .y = u32 });
\\}
, &[_][]const u8{
- ":4:33: error: runtime-known argument passed to comptime parameter",
- ":1:38: note: declared comptime here",
- ":8:36: error: runtime-known argument passed to comptime parameter",
- ":2:41: note: declared comptime here",
+ ":4:33: error: unable to resolve comptime value",
+ ":4:33: note: argument to comptime parameter must be comptime-known",
+ ":1:29: note: parameter declared comptime here",
+ ":8:36: error: unable to resolve comptime value",
+ ":8:36: note: argument to comptime parameter must be comptime-known",
+ ":2:32: note: parameter declared comptime here",
":13:32: error: unable to resolve comptime value",
":13:32: note: initializer of comptime-only struct 'tmp.callAnytypeFunctionWithRuntimeComptimeOnlyType.S' must be comptime-known",
":12:35: note: struct requires comptime because of this field",