Commit db33ee45b7
Changed files (30)
src
arch
aarch64
arm
riscv64
sparc64
wasm
x86_64
src/arch/aarch64/CodeGen.zig
@@ -13,6 +13,7 @@ const Value = @import("../../value.zig").Value;
const TypedValue = @import("../../TypedValue.zig");
const link = @import("../../link.zig");
const Module = @import("../../Module.zig");
+const InternPool = @import("../../InternPool.zig");
const Compilation = @import("../../Compilation.zig");
const ErrorMsg = Module.ErrorMsg;
const Target = std.Target;
@@ -49,7 +50,8 @@ liveness: Liveness,
bin_file: *link.File,
debug_output: DebugInfoOutput,
target: *const std.Target,
-mod_fn: *const Module.Fn,
+func_index: InternPool.Index,
+owner_decl: Module.Decl.Index,
err_msg: ?*ErrorMsg,
args: []MCValue,
ret_mcv: MCValue,
@@ -199,7 +201,7 @@ const DbgInfoReloc = struct {
else => unreachable, // not a possible argument
};
- try dw.genArgDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, loc);
+ try dw.genArgDbgInfo(reloc.name, reloc.ty, function.owner_decl, loc);
},
.plan9 => {},
.none => {},
@@ -245,7 +247,7 @@ const DbgInfoReloc = struct {
break :blk .nop;
},
};
- try dw.genVarDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, is_ptr, loc);
+ try dw.genVarDbgInfo(reloc.name, reloc.ty, function.owner_decl, is_ptr, loc);
},
.plan9 => {},
.none => {},
@@ -328,7 +330,7 @@ const Self = @This();
pub fn generate(
bin_file: *link.File,
src_loc: Module.SrcLoc,
- module_fn_index: Module.Fn.Index,
+ func_index: InternPool.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
@@ -339,8 +341,8 @@ pub fn generate(
}
const mod = bin_file.options.module.?;
- const module_fn = mod.funcPtr(module_fn_index);
- const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
+ const func = mod.funcInfo(func_index);
+ const fn_owner_decl = mod.declPtr(func.owner_decl);
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
@@ -359,7 +361,8 @@ pub fn generate(
.debug_output = debug_output,
.target = &bin_file.options.target,
.bin_file = bin_file,
- .mod_fn = module_fn,
+ .func_index = func_index,
+ .owner_decl = func.owner_decl,
.err_msg = null,
.args = undefined, // populated after `resolveCallingConventionValues`
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
@@ -368,8 +371,8 @@ pub fn generate(
.branch_stack = &branch_stack,
.src_loc = src_loc,
.stack_align = undefined,
- .end_di_line = module_fn.rbrace_line,
- .end_di_column = module_fn.rbrace_column,
+ .end_di_line = func.rbrace_line,
+ .end_di_column = func.rbrace_column,
};
defer function.stack.deinit(bin_file.allocator);
defer function.blocks.deinit(bin_file.allocator);
@@ -416,8 +419,8 @@ pub fn generate(
.src_loc = src_loc,
.code = code,
.prev_di_pc = 0,
- .prev_di_line = module_fn.lbrace_line,
- .prev_di_column = module_fn.lbrace_column,
+ .prev_di_line = func.lbrace_line,
+ .prev_di_column = func.lbrace_column,
.stack_size = function.max_end_stack,
.saved_regs_stack_space = function.saved_regs_stack_space,
};
@@ -4011,12 +4014,12 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
const atom_index = switch (self.bin_file.tag) {
.macho => blk: {
const macho_file = self.bin_file.cast(link.File.MachO).?;
- const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ const atom = try macho_file.getOrCreateAtomForDecl(self.owner_decl);
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
},
.coff => blk: {
const coff_file = self.bin_file.cast(link.File.Coff).?;
- const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ const atom = try coff_file.getOrCreateAtomForDecl(self.owner_decl);
break :blk coff_file.getAtom(atom).getSymbolIndex().?;
},
else => unreachable, // unsupported target format
@@ -4190,10 +4193,11 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
while (self.args[arg_index] == .none) arg_index += 1;
self.arg_index = arg_index + 1;
+ const mod = self.bin_file.options.module.?;
const ty = self.typeOfIndex(inst);
const tag = self.air.instructions.items(.tag)[inst];
const src_index = self.air.instructions.items(.data)[inst].arg.src_index;
- const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index);
+ const name = mod.getParamName(self.func_index, src_index);
try self.dbg_info_relocs.append(self.gpa, .{
.tag = tag,
@@ -4348,7 +4352,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const lib_name = mod.intern_pool.stringToSliceUnwrap(extern_func.lib_name);
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const sym_index = try macho_file.getGlobalSymbol(decl_name, lib_name);
- const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ const atom = try macho_file.getOrCreateAtomForDecl(self.owner_decl);
const atom_index = macho_file.getAtom(atom).getSymbolIndex().?;
_ = try self.addInst(.{
.tag = .call_extern,
@@ -4617,9 +4621,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void {
const ty_fn = self.air.instructions.items(.data)[inst].ty_fn;
const mod = self.bin_file.options.module.?;
- const function = mod.funcPtr(ty_fn.func);
+ const func = mod.funcInfo(ty_fn.func);
// TODO emit debug info for function change
- _ = function;
+ _ = func;
return self.finishAir(inst, .dead, .{ .none, .none, .none });
}
@@ -5529,12 +5533,12 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
const atom_index = switch (self.bin_file.tag) {
.macho => blk: {
const macho_file = self.bin_file.cast(link.File.MachO).?;
- const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ const atom = try macho_file.getOrCreateAtomForDecl(self.owner_decl);
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
},
.coff => blk: {
const coff_file = self.bin_file.cast(link.File.Coff).?;
- const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ const atom = try coff_file.getOrCreateAtomForDecl(self.owner_decl);
break :blk coff_file.getAtom(atom).getSymbolIndex().?;
},
else => unreachable, // unsupported target format
@@ -5650,12 +5654,12 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
const atom_index = switch (self.bin_file.tag) {
.macho => blk: {
const macho_file = self.bin_file.cast(link.File.MachO).?;
- const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ const atom = try macho_file.getOrCreateAtomForDecl(self.owner_decl);
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
},
.coff => blk: {
const coff_file = self.bin_file.cast(link.File.Coff).?;
- const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ const atom = try coff_file.getOrCreateAtomForDecl(self.owner_decl);
break :blk coff_file.getAtom(atom).getSymbolIndex().?;
},
else => unreachable, // unsupported target format
@@ -5847,12 +5851,12 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
const atom_index = switch (self.bin_file.tag) {
.macho => blk: {
const macho_file = self.bin_file.cast(link.File.MachO).?;
- const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ const atom = try macho_file.getOrCreateAtomForDecl(self.owner_decl);
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
},
.coff => blk: {
const coff_file = self.bin_file.cast(link.File.Coff).?;
- const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ const atom = try coff_file.getOrCreateAtomForDecl(self.owner_decl);
break :blk coff_file.getAtom(atom).getSymbolIndex().?;
},
else => unreachable, // unsupported target format
@@ -6164,7 +6168,7 @@ fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
self.bin_file,
self.src_loc,
arg_tv,
- self.mod_fn.owner_decl,
+ self.owner_decl,
)) {
.mcv => |mcv| switch (mcv) {
.none => .none,
@@ -6198,6 +6202,7 @@ const CallMCValues = struct {
/// Caller must call `CallMCValues.deinit`.
fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
const mod = self.bin_file.options.module.?;
+ const ip = &mod.intern_pool;
const fn_info = mod.typeToFunc(fn_ty).?;
const cc = fn_info.cc;
var result: CallMCValues = .{
@@ -6240,10 +6245,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
}
}
- for (fn_info.param_types, 0..) |ty, i| {
+ for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
if (param_size == 0) {
- result.args[i] = .{ .none = {} };
+ result_arg.* = .{ .none = {} };
continue;
}
@@ -6256,7 +6261,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
if (std.math.divCeil(u32, param_size, 8) catch unreachable <= 8 - ncrn) {
if (param_size <= 8) {
- result.args[i] = .{ .register = self.registerAlias(c_abi_int_param_regs[ncrn], ty.toType()) };
+ result_arg.* = .{ .register = self.registerAlias(c_abi_int_param_regs[ncrn], ty.toType()) };
ncrn += 1;
} else {
return self.fail("TODO MCValues with multiple registers", .{});
@@ -6273,7 +6278,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
}
}
- result.args[i] = .{ .stack_argument_offset = nsaa };
+ result_arg.* = .{ .stack_argument_offset = nsaa };
nsaa += param_size;
}
}
@@ -6305,16 +6310,16 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var stack_offset: u32 = 0;
- for (fn_info.param_types, 0..) |ty, i| {
+ for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
if (ty.toType().abiSize(mod) > 0) {
const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
const param_alignment = ty.toType().abiAlignment(mod);
stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment);
- result.args[i] = .{ .stack_argument_offset = stack_offset };
+ result_arg.* = .{ .stack_argument_offset = stack_offset };
stack_offset += param_size;
} else {
- result.args[i] = .{ .none = {} };
+ result_arg.* = .{ .none = {} };
}
}
src/arch/arm/CodeGen.zig
@@ -13,6 +13,7 @@ const Value = @import("../../value.zig").Value;
const TypedValue = @import("../../TypedValue.zig");
const link = @import("../../link.zig");
const Module = @import("../../Module.zig");
+const InternPool = @import("../../InternPool.zig");
const Compilation = @import("../../Compilation.zig");
const ErrorMsg = Module.ErrorMsg;
const Target = std.Target;
@@ -50,7 +51,7 @@ liveness: Liveness,
bin_file: *link.File,
debug_output: DebugInfoOutput,
target: *const std.Target,
-mod_fn: *const Module.Fn,
+func_index: InternPool.Index,
err_msg: ?*ErrorMsg,
args: []MCValue,
ret_mcv: MCValue,
@@ -258,6 +259,7 @@ const DbgInfoReloc = struct {
}
fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) error{OutOfMemory}!void {
+ const mod = function.bin_file.options.module.?;
switch (function.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (reloc.mcv) {
@@ -278,7 +280,7 @@ const DbgInfoReloc = struct {
else => unreachable, // not a possible argument
};
- try dw.genArgDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, loc);
+ try dw.genArgDbgInfo(reloc.name, reloc.ty, mod.funcOwnerDeclIndex(function.func_index), loc);
},
.plan9 => {},
.none => {},
@@ -286,6 +288,7 @@ const DbgInfoReloc = struct {
}
fn genVarDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
+ const mod = function.bin_file.options.module.?;
const is_ptr = switch (reloc.tag) {
.dbg_var_ptr => true,
.dbg_var_val => false,
@@ -321,7 +324,7 @@ const DbgInfoReloc = struct {
break :blk .nop;
},
};
- try dw.genVarDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, is_ptr, loc);
+ try dw.genVarDbgInfo(reloc.name, reloc.ty, mod.funcOwnerDeclIndex(function.func_index), is_ptr, loc);
},
.plan9 => {},
.none => {},
@@ -334,7 +337,7 @@ const Self = @This();
pub fn generate(
bin_file: *link.File,
src_loc: Module.SrcLoc,
- module_fn_index: Module.Fn.Index,
+ func_index: InternPool.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
@@ -345,8 +348,8 @@ pub fn generate(
}
const mod = bin_file.options.module.?;
- const module_fn = mod.funcPtr(module_fn_index);
- const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
+ const func = mod.funcInfo(func_index);
+ const fn_owner_decl = mod.declPtr(func.owner_decl);
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
@@ -365,7 +368,7 @@ pub fn generate(
.target = &bin_file.options.target,
.bin_file = bin_file,
.debug_output = debug_output,
- .mod_fn = module_fn,
+ .func_index = func_index,
.err_msg = null,
.args = undefined, // populated after `resolveCallingConventionValues`
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
@@ -374,8 +377,8 @@ pub fn generate(
.branch_stack = &branch_stack,
.src_loc = src_loc,
.stack_align = undefined,
- .end_di_line = module_fn.rbrace_line,
- .end_di_column = module_fn.rbrace_column,
+ .end_di_line = func.rbrace_line,
+ .end_di_column = func.rbrace_column,
};
defer function.stack.deinit(bin_file.allocator);
defer function.blocks.deinit(bin_file.allocator);
@@ -422,8 +425,8 @@ pub fn generate(
.src_loc = src_loc,
.code = code,
.prev_di_pc = 0,
- .prev_di_line = module_fn.lbrace_line,
- .prev_di_column = module_fn.lbrace_column,
+ .prev_di_line = func.lbrace_line,
+ .prev_di_column = func.lbrace_column,
.stack_size = function.max_end_stack,
.saved_regs_stack_space = function.saved_regs_stack_space,
};
@@ -4163,10 +4166,11 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
while (self.args[arg_index] == .none) arg_index += 1;
self.arg_index = arg_index + 1;
+ const mod = self.bin_file.options.module.?;
const ty = self.typeOfIndex(inst);
const tag = self.air.instructions.items(.tag)[inst];
const src_index = self.air.instructions.items(.data)[inst].arg.src_index;
- const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index);
+ const name = mod.getParamName(self.func_index, src_index);
try self.dbg_info_relocs.append(self.gpa, .{
.tag = tag,
@@ -4569,9 +4573,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void {
const ty_fn = self.air.instructions.items(.data)[inst].ty_fn;
const mod = self.bin_file.options.module.?;
- const function = mod.funcPtr(ty_fn.func);
+ const func = mod.funcInfo(ty_fn.func);
// TODO emit debug info for function change
- _ = function;
+ _ = func;
return self.finishAir(inst, .dead, .{ .none, .none, .none });
}
@@ -6113,11 +6117,12 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
+ const mod = self.bin_file.options.module.?;
const mcv: MCValue = switch (try codegen.genTypedValue(
self.bin_file,
self.src_loc,
arg_tv,
- self.mod_fn.owner_decl,
+ mod.funcOwnerDeclIndex(self.func_index),
)) {
.mcv => |mcv| switch (mcv) {
.none => .none,
@@ -6149,6 +6154,7 @@ const CallMCValues = struct {
/// Caller must call `CallMCValues.deinit`.
fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
const mod = self.bin_file.options.module.?;
+ const ip = &mod.intern_pool;
const fn_info = mod.typeToFunc(fn_ty).?;
const cc = fn_info.cc;
var result: CallMCValues = .{
@@ -6194,14 +6200,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
}
}
- for (fn_info.param_types, 0..) |ty, i| {
+ for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
if (ty.toType().abiAlignment(mod) == 8)
ncrn = std.mem.alignForward(usize, ncrn, 2);
const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) {
if (param_size <= 4) {
- result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] };
+ result_arg.* = .{ .register = c_abi_int_param_regs[ncrn] };
ncrn += 1;
} else {
return self.fail("TODO MCValues with multiple registers", .{});
@@ -6213,7 +6219,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
if (ty.toType().abiAlignment(mod) == 8)
nsaa = std.mem.alignForward(u32, nsaa, 8);
- result.args[i] = .{ .stack_argument_offset = nsaa };
+ result_arg.* = .{ .stack_argument_offset = nsaa };
nsaa += param_size;
}
}
@@ -6244,16 +6250,16 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var stack_offset: u32 = 0;
- for (fn_info.param_types, 0..) |ty, i| {
+ for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
if (ty.toType().abiSize(mod) > 0) {
const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
const param_alignment = ty.toType().abiAlignment(mod);
stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment);
- result.args[i] = .{ .stack_argument_offset = stack_offset };
+ result_arg.* = .{ .stack_argument_offset = stack_offset };
stack_offset += param_size;
} else {
- result.args[i] = .{ .none = {} };
+ result_arg.* = .{ .none = {} };
}
}
src/arch/riscv64/CodeGen.zig
@@ -12,6 +12,7 @@ const Value = @import("../../value.zig").Value;
const TypedValue = @import("../../TypedValue.zig");
const link = @import("../../link.zig");
const Module = @import("../../Module.zig");
+const InternPool = @import("../../InternPool.zig");
const Compilation = @import("../../Compilation.zig");
const ErrorMsg = Module.ErrorMsg;
const Target = std.Target;
@@ -43,7 +44,7 @@ air: Air,
liveness: Liveness,
bin_file: *link.File,
target: *const std.Target,
-mod_fn: *const Module.Fn,
+func_index: InternPool.Index,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
err_msg: ?*ErrorMsg,
@@ -217,7 +218,7 @@ const Self = @This();
pub fn generate(
bin_file: *link.File,
src_loc: Module.SrcLoc,
- module_fn_index: Module.Fn.Index,
+ func_index: InternPool.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
@@ -228,8 +229,8 @@ pub fn generate(
}
const mod = bin_file.options.module.?;
- const module_fn = mod.funcPtr(module_fn_index);
- const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
+ const func = mod.funcInfo(func_index);
+ const fn_owner_decl = mod.declPtr(func.owner_decl);
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
@@ -247,7 +248,7 @@ pub fn generate(
.liveness = liveness,
.target = &bin_file.options.target,
.bin_file = bin_file,
- .mod_fn = module_fn,
+ .func_index = func_index,
.code = code,
.debug_output = debug_output,
.err_msg = null,
@@ -258,8 +259,8 @@ pub fn generate(
.branch_stack = &branch_stack,
.src_loc = src_loc,
.stack_align = undefined,
- .end_di_line = module_fn.rbrace_line,
- .end_di_column = module_fn.rbrace_column,
+ .end_di_line = func.rbrace_line,
+ .end_di_column = func.rbrace_column,
};
defer function.stack.deinit(bin_file.allocator);
defer function.blocks.deinit(bin_file.allocator);
@@ -301,8 +302,8 @@ pub fn generate(
.src_loc = src_loc,
.code = code,
.prev_di_pc = 0,
- .prev_di_line = module_fn.lbrace_line,
- .prev_di_column = module_fn.lbrace_column,
+ .prev_di_line = func.lbrace_line,
+ .prev_di_column = func.lbrace_column,
};
defer emit.deinit();
@@ -1627,13 +1628,15 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
}
fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
+ const mod = self.bin_file.options.module.?;
const arg = self.air.instructions.items(.data)[inst].arg;
const ty = self.air.getRefType(arg.ty);
- const name = self.mod_fn.getParamName(self.bin_file.options.module.?, arg.src_index);
+ const owner_decl = mod.funcOwnerDeclIndex(self.func_index);
+ const name = mod.getParamName(self.func_index, arg.src_index);
switch (self.debug_output) {
.dwarf => |dw| switch (mcv) {
- .register => |reg| try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, .{
+ .register => |reg| try dw.genArgDbgInfo(name, ty, owner_decl, .{
.register = reg.dwarfLocOp(),
}),
.stack_offset => {},
@@ -1742,24 +1745,28 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
}
if (try self.air.value(callee, mod)) |func_value| {
- if (mod.funcPtrUnwrap(mod.intern_pool.indexToFunc(func_value.ip_index))) |func| {
- const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
- const atom = elf_file.getAtom(atom_index);
- _ = try atom.getOrCreateOffsetTableEntry(elf_file);
- const got_addr = @as(u32, @intCast(atom.getOffsetTableAddress(elf_file)));
- try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr });
- _ = try self.addInst(.{
- .tag = .jalr,
- .data = .{ .i_type = .{
- .rd = .ra,
- .rs1 = .ra,
- .imm12 = 0,
- } },
- });
- } else if (mod.intern_pool.indexToKey(func_value.ip_index) == .extern_func) {
- return self.fail("TODO implement calling extern functions", .{});
- } else {
- return self.fail("TODO implement calling bitcasted functions", .{});
+ switch (mod.intern_pool.indexToKey(func_value.ip_index)) {
+ .func => |func| {
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ _ = try atom.getOrCreateOffsetTableEntry(elf_file);
+ const got_addr = @as(u32, @intCast(atom.getOffsetTableAddress(elf_file)));
+ try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr });
+ _ = try self.addInst(.{
+ .tag = .jalr,
+ .data = .{ .i_type = .{
+ .rd = .ra,
+ .rs1 = .ra,
+ .imm12 = 0,
+ } },
+ });
+ },
+ .extern_func => {
+ return self.fail("TODO implement calling extern functions", .{});
+ },
+ else => {
+ return self.fail("TODO implement calling bitcasted functions", .{});
+ },
}
} else {
return self.fail("TODO implement calling runtime-known function pointer", .{});
@@ -1876,9 +1883,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void {
const ty_fn = self.air.instructions.items(.data)[inst].ty_fn;
const mod = self.bin_file.options.module.?;
- const function = mod.funcPtr(ty_fn.func);
+ const func = mod.funcInfo(ty_fn.func);
// TODO emit debug info for function change
- _ = function;
+ _ = func;
return self.finishAir(inst, .dead, .{ .none, .none, .none });
}
@@ -2569,11 +2576,12 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
+ const mod = self.bin_file.options.module.?;
const mcv: MCValue = switch (try codegen.genTypedValue(
self.bin_file,
self.src_loc,
typed_value,
- self.mod_fn.owner_decl,
+ mod.funcOwnerDeclIndex(self.func_index),
)) {
.mcv => |mcv| switch (mcv) {
.none => .none,
@@ -2605,6 +2613,7 @@ const CallMCValues = struct {
/// Caller must call `CallMCValues.deinit`.
fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
const mod = self.bin_file.options.module.?;
+ const ip = &mod.intern_pool;
const fn_info = mod.typeToFunc(fn_ty).?;
const cc = fn_info.cc;
var result: CallMCValues = .{
@@ -2636,14 +2645,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var next_stack_offset: u32 = 0;
const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 };
- for (fn_info.param_types, 0..) |ty, i| {
+ for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
if (param_size <= 8) {
if (next_register < argument_registers.len) {
- result.args[i] = .{ .register = argument_registers[next_register] };
+ result_arg.* = .{ .register = argument_registers[next_register] };
next_register += 1;
} else {
- result.args[i] = .{ .stack_offset = next_stack_offset };
+ result_arg.* = .{ .stack_offset = next_stack_offset };
next_register += next_stack_offset;
}
} else if (param_size <= 16) {
@@ -2652,11 +2661,11 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
} else if (next_register < argument_registers.len) {
return self.fail("TODO MCValues split register + stack", .{});
} else {
- result.args[i] = .{ .stack_offset = next_stack_offset };
+ result_arg.* = .{ .stack_offset = next_stack_offset };
next_register += next_stack_offset;
}
} else {
- result.args[i] = .{ .stack_offset = next_stack_offset };
+ result_arg.* = .{ .stack_offset = next_stack_offset };
next_register += next_stack_offset;
}
}
src/arch/sparc64/CodeGen.zig
@@ -11,6 +11,7 @@ const Allocator = mem.Allocator;
const builtin = @import("builtin");
const link = @import("../../link.zig");
const Module = @import("../../Module.zig");
+const InternPool = @import("../../InternPool.zig");
const TypedValue = @import("../../TypedValue.zig");
const ErrorMsg = Module.ErrorMsg;
const codegen = @import("../../codegen.zig");
@@ -52,7 +53,7 @@ air: Air,
liveness: Liveness,
bin_file: *link.File,
target: *const std.Target,
-mod_fn: *const Module.Fn,
+func_index: InternPool.Index,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
err_msg: ?*ErrorMsg,
@@ -260,7 +261,7 @@ const BigTomb = struct {
pub fn generate(
bin_file: *link.File,
src_loc: Module.SrcLoc,
- module_fn_index: Module.Fn.Index,
+ func_index: InternPool.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
@@ -271,8 +272,8 @@ pub fn generate(
}
const mod = bin_file.options.module.?;
- const module_fn = mod.funcPtr(module_fn_index);
- const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
+ const func = mod.funcInfo(func_index);
+ const fn_owner_decl = mod.declPtr(func.owner_decl);
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
@@ -289,8 +290,8 @@ pub fn generate(
.air = air,
.liveness = liveness,
.target = &bin_file.options.target,
+ .func_index = func_index,
.bin_file = bin_file,
- .mod_fn = module_fn,
.code = code,
.debug_output = debug_output,
.err_msg = null,
@@ -301,8 +302,8 @@ pub fn generate(
.branch_stack = &branch_stack,
.src_loc = src_loc,
.stack_align = undefined,
- .end_di_line = module_fn.rbrace_line,
- .end_di_column = module_fn.rbrace_column,
+ .end_di_line = func.rbrace_line,
+ .end_di_column = func.rbrace_column,
};
defer function.stack.deinit(bin_file.allocator);
defer function.blocks.deinit(bin_file.allocator);
@@ -344,8 +345,8 @@ pub fn generate(
.src_loc = src_loc,
.code = code,
.prev_di_pc = 0,
- .prev_di_line = module_fn.lbrace_line,
- .prev_di_column = module_fn.lbrace_column,
+ .prev_di_line = func.lbrace_line,
+ .prev_di_column = func.lbrace_column,
};
defer emit.deinit();
@@ -1345,37 +1346,41 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// on linking.
if (try self.air.value(callee, mod)) |func_value| {
if (self.bin_file.tag == link.File.Elf.base_tag) {
- if (mod.funcPtrUnwrap(mod.intern_pool.indexToFunc(func_value.ip_index))) |func| {
- const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
- const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
- const atom = elf_file.getAtom(atom_index);
- _ = try atom.getOrCreateOffsetTableEntry(elf_file);
- break :blk @as(u32, @intCast(atom.getOffsetTableAddress(elf_file)));
- } else unreachable;
+ switch (mod.intern_pool.indexToKey(func_value.ip_index)) {
+ .func => |func| {
+ const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ _ = try atom.getOrCreateOffsetTableEntry(elf_file);
+ break :blk @as(u32, @intCast(atom.getOffsetTableAddress(elf_file)));
+ } else unreachable;
- try self.genSetReg(Type.usize, .o7, .{ .memory = got_addr });
+ try self.genSetReg(Type.usize, .o7, .{ .memory = got_addr });
- _ = try self.addInst(.{
- .tag = .jmpl,
- .data = .{
- .arithmetic_3op = .{
- .is_imm = false,
- .rd = .o7,
- .rs1 = .o7,
- .rs2_or_imm = .{ .rs2 = .g0 },
+ _ = try self.addInst(.{
+ .tag = .jmpl,
+ .data = .{
+ .arithmetic_3op = .{
+ .is_imm = false,
+ .rd = .o7,
+ .rs1 = .o7,
+ .rs2_or_imm = .{ .rs2 = .g0 },
+ },
},
- },
- });
+ });
- // TODO Find a way to fill this delay slot
- _ = try self.addInst(.{
- .tag = .nop,
- .data = .{ .nop = {} },
- });
- } else if (mod.intern_pool.indexToKey(func_value.ip_index) == .extern_func) {
- return self.fail("TODO implement calling extern functions", .{});
- } else {
- return self.fail("TODO implement calling bitcasted functions", .{});
+ // TODO Find a way to fill this delay slot
+ _ = try self.addInst(.{
+ .tag = .nop,
+ .data = .{ .nop = {} },
+ });
+ },
+ .extern_func => {
+ return self.fail("TODO implement calling extern functions", .{});
+ },
+ else => {
+ return self.fail("TODO implement calling bitcasted functions", .{});
+ },
}
} else @panic("TODO SPARCv9 currently does not support non-ELF binaries");
} else {
@@ -1660,9 +1665,9 @@ fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void {
fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void {
const ty_fn = self.air.instructions.items(.data)[inst].ty_fn;
const mod = self.bin_file.options.module.?;
- const function = mod.funcPtr(ty_fn.func);
+ const func = mod.funcInfo(ty_fn.func);
// TODO emit debug info for function change
- _ = function;
+ _ = func;
return self.finishAir(inst, .dead, .{ .none, .none, .none });
}
@@ -3595,13 +3600,15 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live
}
fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
+ const mod = self.bin_file.options.module.?;
const arg = self.air.instructions.items(.data)[inst].arg;
const ty = self.air.getRefType(arg.ty);
- const name = self.mod_fn.getParamName(self.bin_file.options.module.?, arg.src_index);
+ const owner_decl = mod.funcOwnerDeclIndex(self.func_index);
+ const name = mod.getParamName(self.func_index, arg.src_index);
switch (self.debug_output) {
.dwarf => |dw| switch (mcv) {
- .register => |reg| try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, .{
+ .register => |reg| try dw.genArgDbgInfo(name, ty, owner_decl, .{
.register = reg.dwarfLocOp(),
}),
else => {},
@@ -4127,11 +4134,12 @@ fn genStoreASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Re
}
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
+ const mod = self.bin_file.options.module.?;
const mcv: MCValue = switch (try codegen.genTypedValue(
self.bin_file,
self.src_loc,
typed_value,
- self.mod_fn.owner_decl,
+ mod.funcOwnerDeclIndex(self.func_index),
)) {
.mcv => |mcv| switch (mcv) {
.none => .none,
@@ -4452,6 +4460,7 @@ fn realStackOffset(off: u32) u32 {
/// Caller must call `CallMCValues.deinit`.
fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) !CallMCValues {
const mod = self.bin_file.options.module.?;
+ const ip = &mod.intern_pool;
const fn_info = mod.typeToFunc(fn_ty).?;
const cc = fn_info.cc;
var result: CallMCValues = .{
@@ -4486,14 +4495,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
.callee => abi.c_abi_int_param_regs_callee_view,
};
- for (fn_info.param_types, 0..) |ty, i| {
+ for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
if (param_size <= 8) {
if (next_register < argument_registers.len) {
- result.args[i] = .{ .register = argument_registers[next_register] };
+ result_arg.* = .{ .register = argument_registers[next_register] };
next_register += 1;
} else {
- result.args[i] = .{ .stack_offset = next_stack_offset };
+ result_arg.* = .{ .stack_offset = next_stack_offset };
next_register += next_stack_offset;
}
} else if (param_size <= 16) {
@@ -4502,11 +4511,11 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
} else if (next_register < argument_registers.len) {
return self.fail("TODO MCValues split register + stack", .{});
} else {
- result.args[i] = .{ .stack_offset = next_stack_offset };
+ result_arg.* = .{ .stack_offset = next_stack_offset };
next_register += next_stack_offset;
}
} else {
- result.args[i] = .{ .stack_offset = next_stack_offset };
+ result_arg.* = .{ .stack_offset = next_stack_offset };
next_register += next_stack_offset;
}
}
src/arch/wasm/CodeGen.zig
@@ -650,7 +650,7 @@ air: Air,
liveness: Liveness,
gpa: mem.Allocator,
debug_output: codegen.DebugInfoOutput,
-mod_fn: *const Module.Fn,
+func_index: InternPool.Index,
/// Contains a list of current branches.
/// When we return from a branch, the branch will be popped from this list,
/// which means branches can only contain references from within its own branch,
@@ -1202,7 +1202,7 @@ fn genFunctype(
pub fn generate(
bin_file: *link.File,
src_loc: Module.SrcLoc,
- func_index: Module.Fn.Index,
+ func_index: InternPool.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
@@ -1210,7 +1210,7 @@ pub fn generate(
) codegen.CodeGenError!codegen.Result {
_ = src_loc;
const mod = bin_file.options.module.?;
- const func = mod.funcPtr(func_index);
+ const func = mod.funcInfo(func_index);
var code_gen: CodeGen = .{
.gpa = bin_file.allocator,
.air = air,
@@ -1223,7 +1223,7 @@ pub fn generate(
.target = bin_file.options.target,
.bin_file = bin_file.cast(link.File.Wasm).?,
.debug_output = debug_output,
- .mod_fn = func,
+ .func_index = func_index,
};
defer code_gen.deinit();
@@ -1237,8 +1237,9 @@ pub fn generate(
fn genFunc(func: *CodeGen) InnerError!void {
const mod = func.bin_file.base.options.module.?;
+ const ip = &mod.intern_pool;
const fn_info = mod.typeToFunc(func.decl.ty).?;
- var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type.toType(), mod);
+ var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), fn_info.return_type.toType(), mod);
defer func_type.deinit(func.gpa);
_ = try func.bin_file.storeDeclType(func.decl_index, func_type);
@@ -1347,6 +1348,7 @@ const CallWValues = struct {
fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues {
const mod = func.bin_file.base.options.module.?;
+ const ip = &mod.intern_pool;
const fn_info = mod.typeToFunc(fn_ty).?;
const cc = fn_info.cc;
var result: CallWValues = .{
@@ -1369,7 +1371,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
switch (cc) {
.Unspecified => {
- for (fn_info.param_types) |ty| {
+ for (fn_info.param_types.get(ip)) |ty| {
if (!ty.toType().hasRuntimeBitsIgnoreComptime(mod)) {
continue;
}
@@ -1379,7 +1381,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
}
},
.C => {
- for (fn_info.param_types) |ty| {
+ for (fn_info.param_types.get(ip)) |ty| {
const ty_classes = abi.classifyType(ty.toType(), mod);
for (ty_classes) |class| {
if (class == .none) continue;
@@ -2185,6 +2187,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const ty = func.typeOf(pl_op.operand);
const mod = func.bin_file.base.options.module.?;
+ const ip = &mod.intern_pool;
const fn_ty = switch (ty.zigTypeTag(mod)) {
.Fn => ty,
.Pointer => ty.childType(mod),
@@ -2203,7 +2206,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
} else if (func_val.getExternFunc(mod)) |extern_func| {
const ext_decl = mod.declPtr(extern_func.decl);
const ext_info = mod.typeToFunc(ext_decl.ty).?;
- var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type.toType(), mod);
+ var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types.get(ip), ext_info.return_type.toType(), mod);
defer func_type.deinit(func.gpa);
const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_func.decl);
const atom = func.bin_file.getAtomPtr(atom_index);
@@ -2253,7 +2256,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const operand = try func.resolveInst(pl_op.operand);
try func.emitWValue(operand);
- var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type.toType(), mod);
+ var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), fn_info.return_type.toType(), mod);
defer fn_type.deinit(func.gpa);
const fn_type_index = try func.bin_file.putOrGetFuncType(fn_type);
@@ -2564,8 +2567,8 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
switch (func.debug_output) {
.dwarf => |dwarf| {
const src_index = func.air.instructions.items(.data)[inst].arg.src_index;
- const name = func.mod_fn.getParamName(func.bin_file.base.options.module.?, src_index);
- try dwarf.genArgDbgInfo(name, arg_ty, func.mod_fn.owner_decl, .{
+ const name = mod.getParamName(func.func_index, src_index);
+ try dwarf.genArgDbgInfo(name, arg_ty, mod.funcOwnerDeclIndex(func.func_index), .{
.wasm_local = arg.local.value,
});
},
@@ -6198,6 +6201,7 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) !void {
if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{});
+ const mod = func.bin_file.base.options.module.?;
const pl_op = func.air.instructions.items(.data)[inst].pl_op;
const ty = func.typeOf(pl_op.operand);
const operand = try func.resolveInst(pl_op.operand);
@@ -6214,7 +6218,7 @@ fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) !void {
break :blk .nop;
},
};
- try func.debug_output.dwarf.genVarDbgInfo(name, ty, func.mod_fn.owner_decl, is_ptr, loc);
+ try func.debug_output.dwarf.genVarDbgInfo(name, ty, mod.funcOwnerDeclIndex(func.func_index), is_ptr, loc);
func.finishAir(inst, .none, &.{});
}
src/arch/x86_64/CodeGen.zig
@@ -110,20 +110,21 @@ const FrameAddr = struct { index: FrameIndex, off: i32 = 0 };
const RegisterOffset = struct { reg: Register, off: i32 = 0 };
const Owner = union(enum) {
- mod_fn: *const Module.Fn,
+ func_index: InternPool.Index,
lazy_sym: link.File.LazySymbol,
fn getDecl(owner: Owner, mod: *Module) Module.Decl.Index {
return switch (owner) {
- .mod_fn => |mod_fn| mod_fn.owner_decl,
+ .func_index => |func_index| mod.funcOwnerDeclIndex(func_index),
.lazy_sym => |lazy_sym| lazy_sym.ty.getOwnerDecl(mod),
};
}
fn getSymbolIndex(owner: Owner, ctx: *Self) !u32 {
switch (owner) {
- .mod_fn => |mod_fn| {
- const decl_index = mod_fn.owner_decl;
+ .func_index => |func_index| {
+ const mod = ctx.bin_file.options.module.?;
+ const decl_index = mod.funcOwnerDeclIndex(func_index);
if (ctx.bin_file.cast(link.File.MachO)) |macho_file| {
const atom = try macho_file.getOrCreateAtomForDecl(decl_index);
return macho_file.getAtom(atom).getSymbolIndex().?;
@@ -638,7 +639,7 @@ const Self = @This();
pub fn generate(
bin_file: *link.File,
src_loc: Module.SrcLoc,
- module_fn_index: Module.Fn.Index,
+ func_index: InternPool.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
@@ -649,8 +650,8 @@ pub fn generate(
}
const mod = bin_file.options.module.?;
- const module_fn = mod.funcPtr(module_fn_index);
- const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
+ const func = mod.funcInfo(func_index);
+ const fn_owner_decl = mod.declPtr(func.owner_decl);
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
@@ -662,15 +663,15 @@ pub fn generate(
.target = &bin_file.options.target,
.bin_file = bin_file,
.debug_output = debug_output,
- .owner = .{ .mod_fn = module_fn },
+ .owner = .{ .func_index = func_index },
.err_msg = null,
.args = undefined, // populated after `resolveCallingConventionValues`
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
.fn_type = fn_type,
.arg_index = 0,
.src_loc = src_loc,
- .end_di_line = module_fn.rbrace_line,
- .end_di_column = module_fn.rbrace_column,
+ .end_di_line = func.rbrace_line,
+ .end_di_column = func.rbrace_column,
};
defer {
function.frame_allocs.deinit(gpa);
@@ -687,17 +688,16 @@ pub fn generate(
if (builtin.mode == .Debug) function.mir_to_air_map.deinit(gpa);
}
- wip_mir_log.debug("{}:", .{function.fmtDecl(module_fn.owner_decl)});
+ wip_mir_log.debug("{}:", .{function.fmtDecl(func.owner_decl)});
+
+ const ip = &mod.intern_pool;
try function.frame_allocs.resize(gpa, FrameIndex.named_count);
function.frame_allocs.set(
@intFromEnum(FrameIndex.stack_frame),
FrameAlloc.init(.{
.size = 0,
- .alignment = if (mod.align_stack_fns.get(module_fn_index)) |set_align_stack|
- @intCast(set_align_stack.alignment.toByteUnitsOptional().?)
- else
- 1,
+ .alignment = @intCast(func.analysis(ip).stack_alignment.toByteUnitsOptional() orelse 1),
}),
);
function.frame_allocs.set(
@@ -761,8 +761,8 @@ pub fn generate(
.debug_output = debug_output,
.code = code,
.prev_di_pc = 0,
- .prev_di_line = module_fn.lbrace_line,
- .prev_di_column = module_fn.lbrace_column,
+ .prev_di_line = func.lbrace_line,
+ .prev_di_column = func.lbrace_column,
};
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
@@ -7942,7 +7942,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
const ty = self.typeOfIndex(inst);
const src_index = self.air.instructions.items(.data)[inst].arg.src_index;
- const name = self.owner.mod_fn.getParamName(mod, src_index);
+ const name = mod.getParamName(self.owner.func_index, src_index);
try self.genArgDbgInfo(ty, name, dst_mcv);
break :result dst_mcv;
@@ -8139,7 +8139,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (try self.air.value(callee, mod)) |func_value| {
const func_key = mod.intern_pool.indexToKey(func_value.ip_index);
if (switch (func_key) {
- .func => |func| mod.funcPtr(func.index).owner_decl,
+ .func => |func| func.owner_decl,
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl| decl,
else => null,
@@ -8582,9 +8582,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void {
const ty_fn = self.air.instructions.items(.data)[inst].ty_fn;
const mod = self.bin_file.options.module.?;
- const function = mod.funcPtr(ty_fn.func);
+ const func = mod.funcInfo(ty_fn.func);
// TODO emit debug info for function change
- _ = function;
+ _ = func;
return self.finishAir(inst, .unreach, .{ .none, .none, .none });
}
@@ -11719,11 +11719,12 @@ fn resolveCallingConventionValues(
stack_frame_base: FrameIndex,
) !CallMCValues {
const mod = self.bin_file.options.module.?;
+ const ip = &mod.intern_pool;
const cc = fn_info.cc;
const param_types = try self.gpa.alloc(Type, fn_info.param_types.len + var_args.len);
defer self.gpa.free(param_types);
- for (param_types[0..fn_info.param_types.len], fn_info.param_types) |*dest, src| {
+ for (param_types[0..fn_info.param_types.len], fn_info.param_types.get(ip)) |*dest, src| {
dest.* = src.toType();
}
// TODO: promote var arg types
src/codegen/c/type.zig
@@ -1722,6 +1722,7 @@ pub const CType = extern union {
.Fn => {
const info = mod.typeToFunc(ty).?;
+ const ip = &mod.intern_pool;
if (!info.is_generic) {
if (lookup.isMutable()) {
const param_kind: Kind = switch (kind) {
@@ -1730,7 +1731,7 @@ pub const CType = extern union {
.payload => unreachable,
};
_ = try lookup.typeToIndex(info.return_type.toType(), param_kind);
- for (info.param_types) |param_type| {
+ for (info.param_types.get(ip)) |param_type| {
if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
_ = try lookup.typeToIndex(param_type.toType(), param_kind);
}
@@ -2014,6 +2015,7 @@ pub const CType = extern union {
.function,
.varargs_function,
=> {
+ const ip = &mod.intern_pool;
const info = mod.typeToFunc(ty).?;
assert(!info.is_generic);
const param_kind: Kind = switch (kind) {
@@ -2023,14 +2025,14 @@ pub const CType = extern union {
};
var c_params_len: usize = 0;
- for (info.param_types) |param_type| {
+ for (info.param_types.get(ip)) |param_type| {
if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
c_params_len += 1;
}
const params_pl = try arena.alloc(Index, c_params_len);
var c_param_i: usize = 0;
- for (info.param_types) |param_type| {
+ for (info.param_types.get(ip)) |param_type| {
if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
params_pl[c_param_i] = store.set.typeToIndex(param_type.toType(), mod, param_kind).?;
c_param_i += 1;
@@ -2147,6 +2149,7 @@ pub const CType = extern union {
=> {
if (ty.zigTypeTag(mod) != .Fn) return false;
+ const ip = &mod.intern_pool;
const info = mod.typeToFunc(ty).?;
assert(!info.is_generic);
const data = cty.cast(Payload.Function).?.data;
@@ -2160,7 +2163,7 @@ pub const CType = extern union {
return false;
var c_param_i: usize = 0;
- for (info.param_types) |param_type| {
+ for (info.param_types.get(ip)) |param_type| {
if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
if (c_param_i >= data.param_types.len) return false;
@@ -2202,6 +2205,7 @@ pub const CType = extern union {
autoHash(hasher, t);
const mod = self.lookup.getModule();
+ const ip = &mod.intern_pool;
switch (t) {
.fwd_anon_struct,
.fwd_anon_union,
@@ -2270,7 +2274,7 @@ pub const CType = extern union {
};
self.updateHasherRecurse(hasher, info.return_type.toType(), param_kind);
- for (info.param_types) |param_type| {
+ for (info.param_types.get(ip)) |param_type| {
if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
self.updateHasherRecurse(hasher, param_type.toType(), param_kind);
}
src/codegen/c.zig
@@ -257,7 +257,8 @@ pub fn fmtIdent(ident: []const u8) std.fmt.Formatter(formatIdent) {
return .{ .data = ident };
}
-/// This data is available when outputting .c code for a `Module.Fn.Index`.
+/// This data is available when outputting .c code for a `InternPool.Index`
+/// that corresponds to `func`.
/// It is not available when generating .h file.
pub const Function = struct {
air: Air,
@@ -268,7 +269,7 @@ pub const Function = struct {
next_block_index: usize = 0,
object: Object,
lazy_fns: LazyFnMap,
- func_index: Module.Fn.Index,
+ func_index: InternPool.Index,
/// All the locals, to be emitted at the top of the function.
locals: std.ArrayListUnmanaged(Local) = .{},
/// Which locals are available for reuse, based on Type.
@@ -1487,6 +1488,7 @@ pub const DeclGen = struct {
) !void {
const store = &dg.ctypes.set;
const mod = dg.module;
+ const ip = &mod.intern_pool;
const fn_decl = mod.declPtr(fn_decl_index);
const fn_cty_idx = try dg.typeToIndex(fn_decl.ty, kind);
@@ -1499,7 +1501,7 @@ pub const DeclGen = struct {
else => unreachable,
}
}
- if (fn_decl.val.getFunction(mod)) |func| if (func.is_cold) try w.writeAll("zig_cold ");
+ if (fn_decl.val.getFunction(mod)) |func| if (func.analysis(ip).is_cold) try w.writeAll("zig_cold ");
if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn ");
const trailing = try renderTypePrefix(
@@ -1744,7 +1746,7 @@ pub const DeclGen = struct {
return switch (mod.intern_pool.indexToKey(tv.val.ip_index)) {
.variable => |variable| mod.decl_exports.contains(variable.decl),
.extern_func => true,
- .func => |func| mod.decl_exports.contains(mod.funcPtr(func.index).owner_decl),
+ .func => |func| mod.decl_exports.contains(func.owner_decl),
else => unreachable,
};
}
@@ -4161,7 +4163,7 @@ fn airCall(
const callee_val = (try f.air.value(pl_op.operand, mod)) orelse break :known;
break :fn_decl switch (mod.intern_pool.indexToKey(callee_val.ip_index)) {
.extern_func => |extern_func| extern_func.decl,
- .func => |func| mod.funcPtr(func.index).owner_decl,
+ .func => |func| func.owner_decl,
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl| decl,
else => break :known,
@@ -4238,9 +4240,9 @@ fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_fn = f.air.instructions.items(.data)[inst].ty_fn;
const mod = f.object.dg.module;
const writer = f.object.writer();
- const function = mod.funcPtr(ty_fn.func);
+ const owner_decl = mod.funcOwnerDeclPtr(ty_fn.func);
try writer.print("/* dbg func:{s} */\n", .{
- mod.intern_pool.stringToSlice(mod.declPtr(function.owner_decl).name),
+ mod.intern_pool.stringToSlice(owner_decl.name),
});
return .none;
}
src/codegen/llvm.zig
@@ -867,14 +867,15 @@ pub const Object = struct {
pub fn updateFunc(
o: *Object,
mod: *Module,
- func_index: Module.Fn.Index,
+ func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) !void {
- const func = mod.funcPtr(func_index);
+ const func = mod.funcInfo(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
const target = mod.getTarget();
+ const ip = &mod.intern_pool;
var dg: DeclGen = .{
.object = o,
@@ -885,24 +886,23 @@ pub const Object = struct {
const llvm_func = try o.resolveLlvmFunction(decl_index);
- if (mod.align_stack_fns.get(func_index)) |align_info| {
- o.addFnAttrInt(llvm_func, "alignstack", align_info.alignment.toByteUnitsOptional().?);
+ if (func.analysis(ip).is_noinline) {
o.addFnAttr(llvm_func, "noinline");
} else {
- Object.removeFnAttr(llvm_func, "alignstack");
- if (!func.is_noinline) Object.removeFnAttr(llvm_func, "noinline");
+ Object.removeFnAttr(llvm_func, "noinline");
}
- if (func.is_cold) {
- o.addFnAttr(llvm_func, "cold");
+ if (func.analysis(ip).stack_alignment.toByteUnitsOptional()) |alignment| {
+ o.addFnAttrInt(llvm_func, "alignstack", alignment);
+ o.addFnAttr(llvm_func, "noinline");
} else {
- Object.removeFnAttr(llvm_func, "cold");
+ Object.removeFnAttr(llvm_func, "alignstack");
}
- if (func.is_noinline) {
- o.addFnAttr(llvm_func, "noinline");
+ if (func.analysis(ip).is_cold) {
+ o.addFnAttr(llvm_func, "cold");
} else {
- Object.removeFnAttr(llvm_func, "noinline");
+ Object.removeFnAttr(llvm_func, "cold");
}
// TODO: disable this if safety is off for the function scope
@@ -921,7 +921,7 @@ pub const Object = struct {
o.addFnAttrString(llvm_func, "no-stack-arg-probe", "");
}
- if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |section|
+ if (ip.stringToSliceUnwrap(decl.@"linksection")) |section|
llvm_func.setSection(section);
// Remove all the basic blocks of a function in order to start over, generating
@@ -968,7 +968,7 @@ pub const Object = struct {
.byval => {
assert(!it.byval_attr);
const param_index = it.zig_index - 1;
- const param_ty = fn_info.param_types[param_index].toType();
+ const param_ty = fn_info.param_types.get(ip)[param_index].toType();
const param = llvm_func.getParam(llvm_arg_i);
try args.ensureUnusedCapacity(1);
@@ -987,7 +987,7 @@ pub const Object = struct {
llvm_arg_i += 1;
},
.byref => {
- const param_ty = fn_info.param_types[it.zig_index - 1].toType();
+ const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
const param_llvm_ty = try o.lowerType(param_ty);
const param = llvm_func.getParam(llvm_arg_i);
const alignment = param_ty.abiAlignment(mod);
@@ -1006,7 +1006,7 @@ pub const Object = struct {
}
},
.byref_mut => {
- const param_ty = fn_info.param_types[it.zig_index - 1].toType();
+ const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
const param_llvm_ty = try o.lowerType(param_ty);
const param = llvm_func.getParam(llvm_arg_i);
const alignment = param_ty.abiAlignment(mod);
@@ -1026,7 +1026,7 @@ pub const Object = struct {
},
.abi_sized_int => {
assert(!it.byval_attr);
- const param_ty = fn_info.param_types[it.zig_index - 1].toType();
+ const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
const param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
@@ -1053,7 +1053,7 @@ pub const Object = struct {
},
.slice => {
assert(!it.byval_attr);
- const param_ty = fn_info.param_types[it.zig_index - 1].toType();
+ const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
const ptr_info = param_ty.ptrInfo(mod);
if (math.cast(u5, it.zig_index - 1)) |i| {
@@ -1083,7 +1083,7 @@ pub const Object = struct {
.multiple_llvm_types => {
assert(!it.byval_attr);
const field_types = it.llvm_types_buffer[0..it.llvm_types_len];
- const param_ty = fn_info.param_types[it.zig_index - 1].toType();
+ const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
const param_llvm_ty = try o.lowerType(param_ty);
const param_alignment = param_ty.abiAlignment(mod);
const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target);
@@ -1114,7 +1114,7 @@ pub const Object = struct {
args.appendAssumeCapacity(casted);
},
.float_array => {
- const param_ty = fn_info.param_types[it.zig_index - 1].toType();
+ const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
const param_llvm_ty = try o.lowerType(param_ty);
const param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
@@ -1132,7 +1132,7 @@ pub const Object = struct {
}
},
.i32_array, .i64_array => {
- const param_ty = fn_info.param_types[it.zig_index - 1].toType();
+ const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
const param_llvm_ty = try o.lowerType(param_ty);
const param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
@@ -1168,7 +1168,7 @@ pub const Object = struct {
const decl_di_ty = try o.lowerDebugType(decl.ty, .full);
const subprogram = dib.createFunction(
di_file.?.toScope(),
- mod.intern_pool.stringToSlice(decl.name),
+ ip.stringToSlice(decl.name),
llvm_func.getValueName(),
di_file.?,
line_number,
@@ -1460,6 +1460,7 @@ pub const Object = struct {
const target = o.target;
const dib = o.di_builder.?;
const mod = o.module;
+ const ip = &mod.intern_pool;
switch (ty.zigTypeTag(mod)) {
.Void, .NoReturn => {
const di_type = dib.createBasicType("void", 0, DW.ATE.signed);
@@ -1492,7 +1493,6 @@ pub const Object = struct {
return enum_di_ty;
}
- const ip = &mod.intern_pool;
const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
const enumerators = try gpa.alloc(*llvm.DIEnumerator, enum_type.names.len);
@@ -1518,7 +1518,7 @@ pub const Object = struct {
if (@sizeOf(usize) == @sizeOf(u64)) {
enumerators[i] = dib.createEnumerator2(
field_name_z,
- @as(c_uint, @intCast(bigint.limbs.len)),
+ @intCast(bigint.limbs.len),
bigint.limbs.ptr,
int_info.bits,
int_info.signedness == .unsigned,
@@ -2320,8 +2320,8 @@ pub const Object = struct {
try param_di_types.append(try o.lowerDebugType(ptr_ty, .full));
}
- for (0..mod.typeToFunc(ty).?.param_types.len) |i| {
- const param_ty = mod.typeToFunc(ty).?.param_types[i].toType();
+ for (0..fn_info.param_types.len) |i| {
+ const param_ty = fn_info.param_types.get(ip)[i].toType();
if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
if (isByRef(param_ty, mod)) {
@@ -2475,9 +2475,10 @@ pub const Object = struct {
const fn_type = try o.lowerType(zig_fn_type);
const fqn = try decl.getFullyQualifiedName(mod);
+ const ip = &mod.intern_pool;
const llvm_addrspace = toLlvmAddressSpace(decl.@"addrspace", target);
- const llvm_fn = o.llvm_module.addFunctionInAddressSpace(mod.intern_pool.stringToSlice(fqn), fn_type, llvm_addrspace);
+ const llvm_fn = o.llvm_module.addFunctionInAddressSpace(ip.stringToSlice(fqn), fn_type, llvm_addrspace);
gop.value_ptr.* = llvm_fn;
const is_extern = decl.isExtern(mod);
@@ -2486,8 +2487,8 @@ pub const Object = struct {
llvm_fn.setUnnamedAddr(.True);
} else {
if (target.isWasm()) {
- o.addFnAttrString(llvm_fn, "wasm-import-name", mod.intern_pool.stringToSlice(decl.name));
- if (mod.intern_pool.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| {
+ o.addFnAttrString(llvm_fn, "wasm-import-name", ip.stringToSlice(decl.name));
+ if (ip.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| {
if (!std.mem.eql(u8, lib_name, "c")) {
o.addFnAttrString(llvm_fn, "wasm-import-module", lib_name);
}
@@ -2546,13 +2547,13 @@ pub const Object = struct {
while (it.next()) |lowering| switch (lowering) {
.byval => {
const param_index = it.zig_index - 1;
- const param_ty = fn_info.param_types[param_index].toType();
+ const param_ty = fn_info.param_types.get(ip)[param_index].toType();
if (!isByRef(param_ty, mod)) {
o.addByValParamAttrs(llvm_fn, param_ty, param_index, fn_info, it.llvm_index - 1);
}
},
.byref => {
- const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1];
const param_llvm_ty = try o.lowerType(param_ty.toType());
const alignment = param_ty.toType().abiAlignment(mod);
o.addByRefParamAttrs(llvm_fn, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
@@ -3031,6 +3032,7 @@ pub const Object = struct {
fn lowerTypeFn(o: *Object, fn_ty: Type) Allocator.Error!*llvm.Type {
const mod = o.module;
+ const ip = &mod.intern_pool;
const fn_info = mod.typeToFunc(fn_ty).?;
const llvm_ret_ty = try lowerFnRetTy(o, fn_info);
@@ -3052,19 +3054,19 @@ pub const Object = struct {
while (it.next()) |lowering| switch (lowering) {
.no_bits => continue,
.byval => {
- const param_ty = fn_info.param_types[it.zig_index - 1].toType();
+ const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
try llvm_params.append(try o.lowerType(param_ty));
},
.byref, .byref_mut => {
try llvm_params.append(o.context.pointerType(0));
},
.abi_sized_int => {
- const param_ty = fn_info.param_types[it.zig_index - 1].toType();
+ const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod)));
try llvm_params.append(o.context.intType(abi_size * 8));
},
.slice => {
- const param_ty = fn_info.param_types[it.zig_index - 1].toType();
+ const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional)
param_ty.optionalChild(mod).slicePtrFieldType(mod)
else
@@ -3083,7 +3085,7 @@ pub const Object = struct {
try llvm_params.append(o.context.intType(16));
},
.float_array => |count| {
- const param_ty = fn_info.param_types[it.zig_index - 1].toType();
+ const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?);
const field_count = @as(c_uint, @intCast(count));
const arr_ty = float_ty.arrayType(field_count);
@@ -3137,8 +3139,7 @@ pub const Object = struct {
return llvm_type.getUndef();
}
- const val_key = mod.intern_pool.indexToKey(tv.val.toIntern());
- switch (val_key) {
+ switch (mod.intern_pool.indexToKey(tv.val.toIntern())) {
.int_type,
.ptr_type,
.array_type,
@@ -3175,12 +3176,14 @@ pub const Object = struct {
.enum_literal,
.empty_enum_value,
=> unreachable, // non-runtime values
- .extern_func, .func => {
- const fn_decl_index = switch (val_key) {
- .extern_func => |extern_func| extern_func.decl,
- .func => |func| mod.funcPtr(func.index).owner_decl,
- else => unreachable,
- };
+ .extern_func => |extern_func| {
+ const fn_decl_index = extern_func.decl;
+ const fn_decl = mod.declPtr(fn_decl_index);
+ try mod.markDeclAlive(fn_decl);
+ return o.resolveLlvmFunction(fn_decl_index);
+ },
+ .func => |func| {
+ const fn_decl_index = func.owner_decl;
const fn_decl = mod.declPtr(fn_decl_index);
try mod.markDeclAlive(fn_decl);
return o.resolveLlvmFunction(fn_decl_index);
@@ -4598,6 +4601,7 @@ pub const FuncGen = struct {
const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]));
const o = self.dg.object;
const mod = o.module;
+ const ip = &mod.intern_pool;
const callee_ty = self.typeOf(pl_op.operand);
const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) {
.Fn => callee_ty,
@@ -4801,14 +4805,14 @@ pub const FuncGen = struct {
while (it.next()) |lowering| switch (lowering) {
.byval => {
const param_index = it.zig_index - 1;
- const param_ty = fn_info.param_types[param_index].toType();
+ const param_ty = fn_info.param_types.get(ip)[param_index].toType();
if (!isByRef(param_ty, mod)) {
o.addByValParamAttrs(call, param_ty, param_index, fn_info, it.llvm_index - 1);
}
},
.byref => {
const param_index = it.zig_index - 1;
- const param_ty = fn_info.param_types[param_index].toType();
+ const param_ty = fn_info.param_types.get(ip)[param_index].toType();
const param_llvm_ty = try o.lowerType(param_ty);
const alignment = param_ty.abiAlignment(mod);
o.addByRefParamAttrs(call, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
@@ -4828,7 +4832,7 @@ pub const FuncGen = struct {
.slice => {
assert(!it.byval_attr);
- const param_ty = fn_info.param_types[it.zig_index - 1].toType();
+ const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
const ptr_info = param_ty.ptrInfo(mod);
const llvm_arg_i = it.llvm_index - 2;
@@ -4930,7 +4934,7 @@ pub const FuncGen = struct {
fg.context.pointerType(0).constNull(),
null_opt_addr_global,
};
- const panic_func = mod.funcPtrUnwrap(mod.panic_func_index).?;
+ const panic_func = mod.funcInfo(mod.panic_func_index);
const panic_decl = mod.declPtr(panic_func.owner_decl);
const fn_info = mod.typeToFunc(panic_decl.ty).?;
const panic_global = try o.resolveLlvmFunction(panic_func.owner_decl);
@@ -6030,7 +6034,7 @@ pub const FuncGen = struct {
const ty_fn = self.air.instructions.items(.data)[inst].ty_fn;
const mod = o.module;
- const func = mod.funcPtr(ty_fn.func);
+ const func = mod.funcInfo(ty_fn.func);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
const di_file = try o.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope);
@@ -6039,7 +6043,7 @@ pub const FuncGen = struct {
const cur_debug_location = self.builder.getCurrentDebugLocation2();
try self.dbg_inlined.append(self.gpa, .{
- .loc = @as(*llvm.DILocation, @ptrCast(cur_debug_location)),
+ .loc = @ptrCast(cur_debug_location),
.scope = self.di_scope.?,
.base_line = self.base_line,
});
@@ -6090,8 +6094,7 @@ pub const FuncGen = struct {
const ty_fn = self.air.instructions.items(.data)[inst].ty_fn;
const mod = o.module;
- const func = mod.funcPtr(ty_fn.func);
- const decl = mod.declPtr(func.owner_decl);
+ const decl = mod.funcOwnerDeclPtr(ty_fn.func);
const di_file = try o.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope);
self.di_file = di_file;
const old = self.dbg_inlined.pop();
@@ -8137,12 +8140,13 @@ pub const FuncGen = struct {
}
const src_index = self.air.instructions.items(.data)[inst].arg.src_index;
- const func = self.dg.decl.getOwnedFunction(mod).?;
+ const func_index = self.dg.decl.getOwnedFunctionIndex();
+ const func = mod.funcInfo(func_index);
const lbrace_line = mod.declPtr(func.owner_decl).src_line + func.lbrace_line + 1;
const lbrace_col = func.lbrace_column + 1;
const di_local_var = dib.createParameterVariable(
self.di_scope.?,
- func.getParamName(mod, src_index).ptr, // TODO test 0 bit args
+ mod.getParamName(func_index, src_index).ptr, // TODO test 0 bit args
self.di_file.?,
lbrace_line,
try o.lowerDebugType(inst_ty, .full),
@@ -10888,13 +10892,17 @@ const ParamTypeIterator = struct {
pub fn next(it: *ParamTypeIterator) ?Lowering {
if (it.zig_index >= it.fn_info.param_types.len) return null;
- const ty = it.fn_info.param_types[it.zig_index];
+ const mod = it.object.module;
+ const ip = &mod.intern_pool;
+ const ty = it.fn_info.param_types.get(ip)[it.zig_index];
it.byval_attr = false;
return nextInner(it, ty.toType());
}
/// `airCall` uses this instead of `next` so that it can take into account variadic functions.
pub fn nextCall(it: *ParamTypeIterator, fg: *FuncGen, args: []const Air.Inst.Ref) ?Lowering {
+ const mod = it.object.module;
+ const ip = &mod.intern_pool;
if (it.zig_index >= it.fn_info.param_types.len) {
if (it.zig_index >= args.len) {
return null;
@@ -10902,7 +10910,7 @@ const ParamTypeIterator = struct {
return nextInner(it, fg.typeOf(args[it.zig_index]));
}
} else {
- return nextInner(it, it.fn_info.param_types[it.zig_index].toType());
+ return nextInner(it, it.fn_info.param_types.get(ip)[it.zig_index].toType());
}
}
src/codegen/spirv.zig
@@ -238,7 +238,7 @@ pub const DeclGen = struct {
if (ty.zigTypeTag(mod) == .Fn) {
const fn_decl_index = switch (mod.intern_pool.indexToKey(val.ip_index)) {
.extern_func => |extern_func| extern_func.decl,
- .func => |func| mod.funcPtr(func.index).owner_decl,
+ .func => |func| func.owner_decl,
else => unreachable,
};
const spv_decl_index = try self.resolveDecl(fn_decl_index);
@@ -255,13 +255,14 @@ pub const DeclGen = struct {
/// Fetch or allocate a result id for decl index. This function also marks the decl as alive.
/// Note: Function does not actually generate the decl.
fn resolveDecl(self: *DeclGen, decl_index: Module.Decl.Index) !SpvModule.Decl.Index {
- const decl = self.module.declPtr(decl_index);
- try self.module.markDeclAlive(decl);
+ const mod = self.module;
+ const decl = mod.declPtr(decl_index);
+ try mod.markDeclAlive(decl);
const entry = try self.decl_link.getOrPut(decl_index);
if (!entry.found_existing) {
// TODO: Extern fn?
- const kind: SpvModule.DeclKind = if (decl.val.getFunctionIndex(self.module) != .none)
+ const kind: SpvModule.DeclKind = if (decl.val.isFuncBody(mod))
.func
else
.global;
@@ -1268,6 +1269,7 @@ pub const DeclGen = struct {
},
.Fn => switch (repr) {
.direct => {
+ const ip = &mod.intern_pool;
const fn_info = mod.typeToFunc(ty).?;
// TODO: Put this somewhere in Sema.zig
if (fn_info.is_var_args)
@@ -1275,8 +1277,8 @@ pub const DeclGen = struct {
const param_ty_refs = try self.gpa.alloc(CacheRef, fn_info.param_types.len);
defer self.gpa.free(param_ty_refs);
- for (param_ty_refs, 0..) |*param_type, i| {
- param_type.* = try self.resolveType(fn_info.param_types[i].toType(), .direct);
+ for (param_ty_refs, fn_info.param_types.get(ip)) |*param_type, fn_param_type| {
+ param_type.* = try self.resolveType(fn_param_type.toType(), .direct);
}
const return_ty_ref = try self.resolveType(fn_info.return_type.toType(), .direct);
@@ -1576,6 +1578,7 @@ pub const DeclGen = struct {
fn genDecl(self: *DeclGen) !void {
const mod = self.module;
+ const ip = &mod.intern_pool;
const decl = mod.declPtr(self.decl_index);
const spv_decl_index = try self.resolveDecl(self.decl_index);
@@ -1594,7 +1597,8 @@ pub const DeclGen = struct {
const fn_info = mod.typeToFunc(decl.ty).?;
try self.args.ensureUnusedCapacity(self.gpa, fn_info.param_types.len);
- for (fn_info.param_types) |param_type| {
+ for (0..fn_info.param_types.len) |i| {
+ const param_type = fn_info.param_types.get(ip)[i];
const param_type_id = try self.resolveTypeId(param_type.toType());
const arg_result_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpFunctionParameter, .{
@@ -1621,7 +1625,7 @@ pub const DeclGen = struct {
try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {});
try self.spv.addFunction(spv_decl_index, self.func);
- const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(self.module));
+ const fqn = ip.stringToSlice(try decl.getFullyQualifiedName(self.module));
try self.spv.sections.debug_names.emit(self.gpa, .OpName, .{
.target = decl_id,
src/link/C.zig
@@ -88,13 +88,13 @@ pub fn freeDecl(self: *C, decl_index: Module.Decl.Index) void {
}
}
-pub fn updateFunc(self: *C, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(self: *C, module: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
const tracy = trace(@src());
defer tracy.end();
const gpa = self.base.allocator;
- const func = module.funcPtr(func_index);
+ const func = module.funcInfo(func_index);
const decl_index = func.owner_decl;
const gop = try self.decl_table.getOrPut(gpa, decl_index);
if (!gop.found_existing) {
src/link/Coff.zig
@@ -1032,7 +1032,7 @@ fn freeAtom(self: *Coff, atom_index: Atom.Index) void {
self.getAtomPtr(atom_index).sym_index = 0;
}
-pub fn updateFunc(self: *Coff, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(self: *Coff, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .coff) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
@@ -1044,7 +1044,7 @@ pub fn updateFunc(self: *Coff, mod: *Module, func_index: Module.Fn.Index, air: A
const tracy = trace(@src());
defer tracy.end();
- const func = mod.funcPtr(func_index);
+ const func = mod.funcInfo(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
@@ -1424,7 +1424,7 @@ pub fn updateDeclExports(
// detect the default subsystem.
for (exports) |exp| {
const exported_decl = mod.declPtr(exp.exported_decl);
- if (exported_decl.getOwnedFunctionIndex(mod) == .none) continue;
+ if (exported_decl.getOwnedFunctionIndex() == .none) continue;
const winapi_cc = switch (self.base.options.target.cpu.arch) {
.x86 => std.builtin.CallingConvention.Stdcall,
else => std.builtin.CallingConvention.C,
src/link/Elf.zig
@@ -2575,7 +2575,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s
return local_sym;
}
-pub fn updateFunc(self: *Elf, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(self: *Elf, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .elf) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
@@ -2586,7 +2586,7 @@ pub fn updateFunc(self: *Elf, mod: *Module, func_index: Module.Fn.Index, air: Ai
const tracy = trace(@src());
defer tracy.end();
- const func = mod.funcPtr(func_index);
+ const func = mod.funcInfo(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
src/link/MachO.zig
@@ -1845,7 +1845,7 @@ fn addStubEntry(self: *MachO, target: SymbolWithLoc) !void {
self.markRelocsDirtyByTarget(target);
}
-pub fn updateFunc(self: *MachO, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(self: *MachO, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
@@ -1855,7 +1855,7 @@ pub fn updateFunc(self: *MachO, mod: *Module, func_index: Module.Fn.Index, air:
const tracy = trace(@src());
defer tracy.end();
- const func = mod.funcPtr(func_index);
+ const func = mod.funcInfo(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
src/link/NvPtx.zig
@@ -13,6 +13,7 @@ const assert = std.debug.assert;
const log = std.log.scoped(.link);
const Module = @import("../Module.zig");
+const InternPool = @import("../InternPool.zig");
const Compilation = @import("../Compilation.zig");
const link = @import("../link.zig");
const trace = @import("../tracy.zig").trace;
@@ -68,7 +69,7 @@ pub fn deinit(self: *NvPtx) void {
self.base.allocator.free(self.ptx_file_name);
}
-pub fn updateFunc(self: *NvPtx, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(self: *NvPtx, module: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
if (!build_options.have_llvm) return;
try self.llvm_object.updateFunc(module, func_index, air, liveness);
}
src/link/Plan9.zig
@@ -4,6 +4,7 @@
const Plan9 = @This();
const link = @import("../link.zig");
const Module = @import("../Module.zig");
+const InternPool = @import("../InternPool.zig");
const Compilation = @import("../Compilation.zig");
const aout = @import("Plan9/aout.zig");
const codegen = @import("../codegen.zig");
@@ -344,12 +345,12 @@ fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !voi
}
}
-pub fn updateFunc(self: *Plan9, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .plan9) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
- const func = mod.funcPtr(func_index);
+ const func = mod.funcInfo(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
self.freeUnnamedConsts(decl_index);
@@ -908,7 +909,7 @@ pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void {
// in the deleteUnusedDecl function.
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
- const is_fn = decl.val.getFunctionIndex(mod) != .none;
+ const is_fn = decl.val.isFuncBody(mod);
if (is_fn) {
var symidx_and_submap = self.fn_decl_table.get(decl.getFileScope(mod)).?;
var submap = symidx_and_submap.functions;
src/link/SpirV.zig
@@ -29,6 +29,7 @@ const assert = std.debug.assert;
const log = std.log.scoped(.link);
const Module = @import("../Module.zig");
+const InternPool = @import("../InternPool.zig");
const Compilation = @import("../Compilation.zig");
const link = @import("../link.zig");
const codegen = @import("../codegen/spirv.zig");
@@ -103,12 +104,12 @@ pub fn deinit(self: *SpirV) void {
self.decl_link.deinit();
}
-pub fn updateFunc(self: *SpirV, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(self: *SpirV, module: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
- const func = module.funcPtr(func_index);
+ const func = module.funcInfo(func_index);
var decl_gen = codegen.DeclGen.init(self.base.allocator, module, &self.spv, &self.decl_link);
defer decl_gen.deinit();
@@ -138,7 +139,7 @@ pub fn updateDeclExports(
exports: []const *Module.Export,
) !void {
const decl = mod.declPtr(decl_index);
- if (decl.val.getFunctionIndex(mod) != .none and decl.ty.fnCallingConvention(mod) == .Kernel) {
+ if (decl.val.isFuncBody(mod) and decl.ty.fnCallingConvention(mod) == .Kernel) {
// TODO: Unify with resolveDecl in spirv.zig.
const entry = try self.decl_link.getOrPut(decl_index);
if (!entry.found_existing) {
src/link/Wasm.zig
@@ -12,6 +12,7 @@ const log = std.log.scoped(.link);
pub const Atom = @import("Wasm/Atom.zig");
const Dwarf = @import("Dwarf.zig");
const Module = @import("../Module.zig");
+const InternPool = @import("../InternPool.zig");
const Compilation = @import("../Compilation.zig");
const CodeGen = @import("../arch/wasm/CodeGen.zig");
const codegen = @import("../codegen.zig");
@@ -1338,7 +1339,7 @@ pub fn allocateSymbol(wasm: *Wasm) !u32 {
return index;
}
-pub fn updateFunc(wasm: *Wasm, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(wasm: *Wasm, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .wasm) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
@@ -1349,7 +1350,7 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func_index: Module.Fn.Index, air: A
const tracy = trace(@src());
defer tracy.end();
- const func = mod.funcPtr(func_index);
+ const func = mod.funcInfo(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
const atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
src/Air.zig
@@ -1003,7 +1003,7 @@ pub const Inst = struct {
},
ty_fn: struct {
ty: Ref,
- func: Module.Fn.Index,
+ func: InternPool.Index,
},
br: struct {
block_inst: Index,
src/codegen.zig
@@ -67,7 +67,7 @@ pub const DebugInfoOutput = union(enum) {
pub fn generateFunction(
bin_file: *link.File,
src_loc: Module.SrcLoc,
- func_index: Module.Fn.Index,
+ func_index: InternPool.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
src/Compilation.zig
@@ -29,6 +29,7 @@ const wasi_libc = @import("wasi_libc.zig");
const fatal = @import("main.zig").fatal;
const clangMain = @import("main.zig").clangMain;
const Module = @import("Module.zig");
+const InternPool = @import("InternPool.zig");
const BuildId = std.Build.CompileStep.BuildId;
const Cache = std.Build.Cache;
const translate_c = @import("translate_c.zig");
@@ -227,7 +228,8 @@ const Job = union(enum) {
/// Write the constant value for a Decl to the output file.
codegen_decl: Module.Decl.Index,
/// Write the machine code for a function to the output file.
- codegen_func: Module.Fn.Index,
+ /// This will either be a non-generic `func_decl` or a `func_instance`.
+ codegen_func: InternPool.Index,
/// Render the .h file snippet for the Decl.
emit_h_decl: Module.Decl.Index,
/// The Decl needs to be analyzed and possibly export itself.
@@ -3216,8 +3218,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v
// Tests are always emitted in test binaries. The decl_refs are created by
// Module.populateTestFunctions, but this will not queue body analysis, so do
// that now.
- const func_index = module.intern_pool.indexToFunc(decl.val.ip_index).unwrap().?;
- try module.ensureFuncBodyAnalysisQueued(func_index);
+ try module.ensureFuncBodyAnalysisQueued(decl.val.toIntern());
}
},
.update_embed_file => |embed_file| {
src/InternPool.zig
@@ -34,19 +34,13 @@ allocated_unions: std.SegmentedList(Module.Union, 0) = .{},
/// When a Union object is freed from `allocated_unions`, it is pushed into this stack.
unions_free_list: std.ArrayListUnmanaged(Module.Union.Index) = .{},
-/// Fn objects are stored in this data structure because:
-/// * They need to be mutated after creation.
-allocated_funcs: std.SegmentedList(Module.Fn, 0) = .{},
-/// When a Fn object is freed from `allocated_funcs`, it is pushed into this stack.
-funcs_free_list: std.ArrayListUnmanaged(Module.Fn.Index) = .{},
-
/// InferredErrorSet objects are stored in this data structure because:
/// * They contain pointers such as the errors map and the set of other inferred error sets.
/// * They need to be mutated after creation.
-allocated_inferred_error_sets: std.SegmentedList(Module.Fn.InferredErrorSet, 0) = .{},
+allocated_inferred_error_sets: std.SegmentedList(Module.InferredErrorSet, 0) = .{},
/// When a Struct object is freed from `allocated_inferred_error_sets`, it is
/// pushed into this stack.
-inferred_error_sets_free_list: std.ArrayListUnmanaged(Module.Fn.InferredErrorSet.Index) = .{},
+inferred_error_sets_free_list: std.ArrayListUnmanaged(Module.InferredErrorSet.Index) = .{},
/// Some types such as enums, structs, and unions need to store mappings from field names
/// to field index, or value to field index. In such cases, they will store the underlying
@@ -73,6 +67,7 @@ const Hash = std.hash.Wyhash;
const InternPool = @This();
const Module = @import("Module.zig");
+const Zir = @import("Zir.zig");
const Sema = @import("Sema.zig");
const KeyAdapter = struct {
@@ -224,7 +219,7 @@ pub const Key = union(enum) {
enum_type: EnumType,
func_type: FuncType,
error_set_type: ErrorSetType,
- inferred_error_set_type: Module.Fn.InferredErrorSet.Index,
+ inferred_error_set_type: Module.InferredErrorSet.Index,
/// Typed `undefined`. This will never be `none`; untyped `undefined` is represented
/// via `simple_value` and has a named `Index` tag for it.
@@ -487,7 +482,7 @@ pub const Key = union(enum) {
};
pub const FuncType = struct {
- param_types: []Index,
+ param_types: Index.Slice,
return_type: Index,
/// Tells whether a parameter is comptime. See `paramIsComptime` helper
/// method for accessing this.
@@ -541,10 +536,61 @@ pub const Key = union(enum) {
lib_name: OptionalNullTerminatedString,
};
- /// Extern so it can be hashed by reinterpreting memory.
- pub const Func = extern struct {
+ pub const Func = struct {
+ /// In the case of a generic function, this type will potentially have fewer parameters
+ /// than the generic owner's type, because the comptime parameters will be deleted.
ty: Index,
- index: Module.Fn.Index,
+ /// Index into extra array of the `FuncAnalysis` corresponding to this function.
+ /// Used for mutating that data.
+ analysis_extra_index: u32,
+ /// Index into extra array of the `zir_body_inst` corresponding to this function.
+ /// Used for mutating that data.
+ zir_body_inst_extra_index: u32,
+ /// When a generic function is instantiated, branch_quota is inherited from the
+ /// active Sema context. Importantly, this value is also updated when an existing
+ /// generic function instantiation is found and called.
+ /// This field contains the index into the extra array of this value,
+ /// so that it can be mutated.
+ /// This will be 0 when the function is not a generic function instantiation.
+ branch_quota_extra_index: u32,
+ /// The Decl that corresponds to the function itself.
+ owner_decl: Module.Decl.Index,
+ /// The ZIR instruction that is a function instruction. Use this to find
+ /// the body. We store this rather than the body directly so that when ZIR
+ /// is regenerated on update(), we can map this to the new corresponding
+ /// ZIR instruction.
+ zir_body_inst: Zir.Inst.Index,
+ /// Relative to owner Decl.
+ lbrace_line: u32,
+ /// Relative to owner Decl.
+ rbrace_line: u32,
+ lbrace_column: u32,
+ rbrace_column: u32,
+
+ /// The `func_decl` which is the generic function from whence this instance was spawned.
+ /// If this is `none` it means the function is not a generic instantiation.
+ generic_owner: Index,
+ /// If this is a generic function instantiation, this will be non-empty.
+ /// Corresponds to the parameters of the `generic_owner` type, which
+ /// may have more parameters than `ty`.
+ /// Each element is the comptime-known value the generic function was instantiated with,
+ /// or `none` if the element is runtime-known.
+ /// TODO: as a follow-up optimization, don't store `none` values here since that data
+ /// is redundant with `comptime_bits` stored elsewhere.
+ comptime_args: Index.Slice,
+
+ /// Returns a pointer that becomes invalid after any additions to the `InternPool`.
+ pub fn analysis(func: *const Func, ip: *const InternPool) *FuncAnalysis {
+ return @ptrCast(&ip.extra.items[func.analysis_extra_index]);
+ }
+
+ pub fn zirBodyInst(func: *const Func, ip: *const InternPool) *Zir.Inst.Index {
+ return @ptrCast(&ip.extra.items[func.zir_body_inst_extra_index]);
+ }
+
+ pub fn branchQuota(func: *const Func, ip: *const InternPool) *u32 {
+ return &ip.extra.items[func.zir_body_inst_extra_index];
+ }
};
pub const Int = struct {
@@ -679,7 +725,7 @@ pub const Key = union(enum) {
};
pub const MemoizedCall = struct {
- func: Module.Fn.Index,
+ func: Index,
arg_values: []const Index,
result: Index,
};
@@ -695,7 +741,6 @@ pub const Key = union(enum) {
return switch (key) {
// TODO: assert no padding in these types
inline .ptr_type,
- .func,
.array_type,
.vector_type,
.opt_type,
@@ -723,20 +768,11 @@ pub const Key = union(enum) {
},
.runtime_value => |x| Hash.hash(seed, asBytes(&x.val)),
- .opaque_type => |x| Hash.hash(seed, asBytes(&x.decl)),
-
- .enum_type => |enum_type| {
- var hasher = Hash.init(seed);
- std.hash.autoHash(&hasher, enum_type.decl);
- return hasher.final();
- },
- .variable => |variable| {
- var hasher = Hash.init(seed);
- std.hash.autoHash(&hasher, variable.decl);
- return hasher.final();
- },
- .extern_func => |x| Hash.hash(seed, asBytes(&x.ty) ++ asBytes(&x.decl)),
+ inline .opaque_type,
+ .enum_type,
+ .variable,
+ => |x| Hash.hash(seed, asBytes(&x.decl)),
.int => |int| {
var hasher = Hash.init(seed);
@@ -875,7 +911,9 @@ pub const Key = union(enum) {
.func_type => |func_type| {
var hasher = Hash.init(seed);
- for (func_type.param_types) |param_type| std.hash.autoHash(&hasher, param_type);
+ for (func_type.param_types.get(ip)) |param_type| {
+ std.hash.autoHash(&hasher, param_type);
+ }
std.hash.autoHash(&hasher, func_type.return_type);
std.hash.autoHash(&hasher, func_type.comptime_bits);
std.hash.autoHash(&hasher, func_type.noalias_bits);
@@ -893,6 +931,19 @@ pub const Key = union(enum) {
for (memoized_call.arg_values) |arg| std.hash.autoHash(&hasher, arg);
return hasher.final();
},
+
+ .func => |func| {
+ if (func.generic_owner == .none)
+ return Hash.hash(seed, asBytes(&func.owner_decl) ++ asBytes(&func.ty));
+
+ var hasher = Hash.init(seed);
+ std.hash.autoHash(&hasher, func.generic_owner);
+ for (func.comptime_args.get(ip)) |arg| std.hash.autoHash(&hasher, arg);
+ std.hash.autoHash(&hasher, func.ty);
+ return hasher.final();
+ },
+
+ .extern_func => |x| Hash.hash(seed, asBytes(&x.ty) ++ asBytes(&x.decl)),
};
}
@@ -993,7 +1044,17 @@ pub const Key = union(enum) {
},
.func => |a_info| {
const b_info = b.func;
- return a_info.ty == b_info.ty and a_info.index == b_info.index;
+
+ if (a_info.generic_owner != b_info.generic_owner)
+ return false;
+
+ if (a_info.ty != b_info.ty)
+ return false;
+
+ if (a_info.generic_owner == .none)
+ return a_info.owner_decl == b_info.owner_decl;
+
+ return std.mem.eql(Index, a_info.comptime_args.get(ip), b_info.comptime_args.get(ip));
},
.ptr => |a_info| {
@@ -1155,7 +1216,7 @@ pub const Key = union(enum) {
.func_type => |a_info| {
const b_info = b.func_type;
- return std.mem.eql(Index, a_info.param_types, b_info.param_types) and
+ return std.mem.eql(Index, a_info.param_types.get(ip), b_info.param_types.get(ip)) and
a_info.return_type == b_info.return_type and
a_info.comptime_bits == b_info.comptime_bits and
a_info.noalias_bits == b_info.noalias_bits and
@@ -1360,6 +1421,18 @@ pub const Index = enum(u32) {
_,
+ /// An array of `Index` existing within the `extra` array.
+ /// This type exists to provide a struct with lifetime that is
+ /// not invalidated when items are added to the `InternPool`.
+ pub const Slice = struct {
+ start: u32,
+ len: u32,
+
+ pub fn get(slice: Slice, ip: *const InternPool) []Index {
+ return @ptrCast(ip.extra.items[slice.start..][0..slice.len]);
+ }
+ };
+
pub fn toType(i: Index) @import("type.zig").Type {
assert(i != .none);
return .{ .ip_index = i };
@@ -1390,6 +1463,7 @@ pub const Index = enum(u32) {
/// This function is used in the debugger pretty formatters in tools/ to fetch the
/// Tag to encoding mapping to facilitate fancy debug printing for this type.
+ /// TODO merge this with `Tag.Payload`.
fn dbHelper(self: *Index, tag_to_encoding_map: *struct {
const DataIsIndex = struct { data: Index };
const DataIsExtraIndexOfEnumExplicit = struct {
@@ -1427,11 +1501,11 @@ pub const Index = enum(u32) {
type_error_union: struct { data: *Key.ErrorUnionType },
type_error_set: struct {
const @"data.names_len" = opaque {};
- data: *ErrorSet,
+ data: *Tag.ErrorSet,
@"trailing.names.len": *@"data.names_len",
trailing: struct { names: []NullTerminatedString },
},
- type_inferred_error_set: struct { data: Module.Fn.InferredErrorSet.Index },
+ type_inferred_error_set: struct { data: Module.InferredErrorSet.Index },
type_enum_auto: struct {
const @"data.fields_len" = opaque {};
data: *EnumAuto,
@@ -1451,7 +1525,7 @@ pub const Index = enum(u32) {
type_union_safety: struct { data: Module.Union.Index },
type_function: struct {
const @"data.params_len" = opaque {};
- data: *TypeFunction,
+ data: *Tag.TypeFunction,
@"trailing.param_types.len": *@"data.params_len",
trailing: struct { param_types: []Index },
},
@@ -1497,7 +1571,8 @@ pub const Index = enum(u32) {
float_comptime_float: struct { data: *Float128 },
variable: struct { data: *Tag.Variable },
extern_func: struct { data: *Key.ExternFunc },
- func: struct { data: *Tag.Func },
+ func_decl: struct { data: *Tag.FuncDecl },
+ func_instance: struct { data: *Tag.FuncInstance },
only_possible_value: DataIsIndex,
union_value: struct { data: *Key.Union },
bytes: struct { data: *Bytes },
@@ -1826,7 +1901,7 @@ pub const Tag = enum(u8) {
/// data is payload to `ErrorSet`.
type_error_set,
/// The inferred error set type of a function.
- /// data is `Module.Fn.InferredErrorSet.Index`.
+ /// data is `Module.InferredErrorSet.Index`.
type_inferred_error_set,
/// An enum type with auto-numbered tag values.
/// The enum is exhaustive.
@@ -2005,11 +2080,16 @@ pub const Tag = enum(u8) {
/// data is extra index to Variable.
variable,
/// An extern function.
- /// data is extra index to Key.ExternFunc.
+ /// data is extra index to ExternFunc.
extern_func,
- /// A regular function.
- /// data is extra index to Func.
- func,
+ /// A non-extern function corresponding directly to the AST node from whence it originated.
+ /// data is extra index to `FuncDecl`.
+ /// Only the owner Decl is used for hashing and equality because the other
+ /// fields can get patched up during incremental compilation.
+ func_decl,
+ /// A generic function instantiation.
+ /// data is extra index to `FuncInstance`.
+ func_instance,
/// This represents the only possible value for *some* types which have
/// only one possible value. Not all only-possible-values are encoded this way;
/// for example structs which have all comptime fields are not encoded this way.
@@ -2114,7 +2194,8 @@ pub const Tag = enum(u8) {
.float_comptime_float => unreachable,
.variable => Variable,
.extern_func => ExternFunc,
- .func => Func,
+ .func_decl => FuncDecl,
+ .func_instance => FuncInstance,
.only_possible_value => unreachable,
.union_value => Union,
.bytes => Bytes,
@@ -2150,36 +2231,93 @@ pub const Tag = enum(u8) {
/// The type of the aggregate.
ty: Index,
};
-};
-/// Trailing:
-/// 0. name: NullTerminatedString for each names_len
-pub const ErrorSet = struct {
- names_len: u32,
- /// Maps error names to declaration index.
- names_map: MapIndex,
-};
+ pub const FuncDecl = struct {
+ analysis: FuncAnalysis,
+ owner_decl: Module.Decl.Index,
+ ty: Index,
+ zir_body_inst: Zir.Inst.Index,
+ lbrace_line: u32,
+ rbrace_line: u32,
+ lbrace_column: u32,
+ rbrace_column: u32,
+ };
-/// Trailing:
-/// 0. param_type: Index for each params_len
-pub const TypeFunction = struct {
- params_len: u32,
- return_type: Index,
- comptime_bits: u32,
- noalias_bits: u32,
- flags: Flags,
+ /// Trailing:
+ /// 0. For each parameter of generic_owner: Index
+ /// - comptime parameter: the comptime-known value
+ /// - anytype parameter: the type of the runtime-known value
+ /// - otherwise: `none`
+ pub const FuncInstance = struct {
+ analysis: FuncAnalysis,
+ // Needed by the linker for codegen. Not part of hashing or equality.
+ owner_decl: Module.Decl.Index,
+ ty: Index,
+ branch_quota: u32,
+ /// Points to a `FuncDecl`.
+ generic_owner: Index,
+ };
- pub const Flags = packed struct(u32) {
- alignment: Alignment,
- cc: std.builtin.CallingConvention,
- is_var_args: bool,
- is_generic: bool,
- is_noinline: bool,
- align_is_generic: bool,
- cc_is_generic: bool,
- section_is_generic: bool,
- addrspace_is_generic: bool,
- _: u11 = 0,
+ /// Trailing:
+ /// 0. name: NullTerminatedString for each names_len
+ pub const ErrorSet = struct {
+ names_len: u32,
+ /// Maps error names to declaration index.
+ names_map: MapIndex,
+ };
+
+ /// Trailing:
+ /// 0. comptime_bits: u32, // if has_comptime_bits
+ /// 1. noalias_bits: u32, // if has_noalias_bits
+ /// 2. param_type: Index for each params_len
+ pub const TypeFunction = struct {
+ params_len: u32,
+ return_type: Index,
+ flags: Flags,
+
+ pub const Flags = packed struct(u32) {
+ alignment: Alignment,
+ cc: std.builtin.CallingConvention,
+ is_var_args: bool,
+ is_generic: bool,
+ has_comptime_bits: bool,
+ has_noalias_bits: bool,
+ is_noinline: bool,
+ align_is_generic: bool,
+ cc_is_generic: bool,
+ section_is_generic: bool,
+ addrspace_is_generic: bool,
+ _: u9 = 0,
+ };
+ };
+};
+
+/// State that is mutable during semantic analysis. This data is not used for
+/// equality or hashing.
+pub const FuncAnalysis = packed struct(u32) {
+ state: State,
+ is_cold: bool,
+ is_noinline: bool,
+ calls_or_awaits_errorable_fn: bool,
+ stack_alignment: Alignment,
+ _: u15 = 0,
+
+ pub const State = enum(u8) {
+ /// This function has not yet undergone analysis, because we have not
+ /// seen a potential runtime call. It may be analyzed in future.
+ none,
+ /// Analysis for this function has been queued, but not yet completed.
+ queued,
+ /// This function intentionally only has ZIR generated because it is marked
+ /// inline, which means no runtime version of the function will be generated.
+ inline_only,
+ in_progress,
+ /// There will be a corresponding ErrorMsg in Module.failed_decls
+ sema_failure,
+ /// This function might be OK but it depends on another Decl which did not
+ /// successfully complete semantic analysis.
+ dependency_failure,
+ success,
};
};
@@ -2499,7 +2637,7 @@ pub const Float128 = struct {
/// Trailing:
/// 0. arg value: Index for each args_len
pub const MemoizedCall = struct {
- func: Module.Fn.Index,
+ func: Index,
args_len: u32,
result: Index,
};
@@ -2553,9 +2691,6 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void {
ip.unions_free_list.deinit(gpa);
ip.allocated_unions.deinit(gpa);
- ip.funcs_free_list.deinit(gpa);
- ip.allocated_funcs.deinit(gpa);
-
ip.inferred_error_sets_free_list.deinit(gpa);
ip.allocated_inferred_error_sets.deinit(gpa);
@@ -2625,21 +2760,21 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
.type_error_union => .{ .error_union_type = ip.extraData(Key.ErrorUnionType, data) },
.type_error_set => {
- const error_set = ip.extraDataTrail(ErrorSet, data);
+ const error_set = ip.extraDataTrail(Tag.ErrorSet, data);
const names_len = error_set.data.names_len;
const names = ip.extra.items[error_set.end..][0..names_len];
return .{ .error_set_type = .{
- .names = @as([]const NullTerminatedString, @ptrCast(names)),
+ .names = @ptrCast(names),
.names_map = error_set.data.names_map.toOptional(),
} };
},
.type_inferred_error_set => .{
- .inferred_error_set_type = @as(Module.Fn.InferredErrorSet.Index, @enumFromInt(data)),
+ .inferred_error_set_type = @enumFromInt(data),
},
.type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) },
.type_struct => {
- const struct_index = @as(Module.Struct.OptionalIndex, @enumFromInt(data));
+ const struct_index: Module.Struct.OptionalIndex = @enumFromInt(data);
const namespace = if (struct_index.unwrap()) |i|
ip.structPtrConst(i).namespace.toOptional()
else
@@ -2661,9 +2796,9 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len];
const names = ip.extra.items[type_struct_anon.end + 2 * fields_len ..][0..fields_len];
return .{ .anon_struct_type = .{
- .types = @as([]const Index, @ptrCast(types)),
- .values = @as([]const Index, @ptrCast(values)),
- .names = @as([]const NullTerminatedString, @ptrCast(names)),
+ .types = @ptrCast(types),
+ .values = @ptrCast(values),
+ .names = @ptrCast(names),
} };
},
.type_tuple_anon => {
@@ -2672,8 +2807,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
const types = ip.extra.items[type_struct_anon.end..][0..fields_len];
const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len];
return .{ .anon_struct_type = .{
- .types = @as([]const Index, @ptrCast(types)),
- .values = @as([]const Index, @ptrCast(values)),
+ .types = @ptrCast(types),
+ .values = @ptrCast(values),
.names = &.{},
} };
},
@@ -2957,7 +3092,12 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
} };
},
.extern_func => .{ .extern_func = ip.extraData(Tag.ExternFunc, data) },
- .func => .{ .func = ip.extraData(Tag.Func, data) },
+ .func_instance => {
+ @panic("TODO");
+ },
+ .func_decl => {
+ @panic("TODO");
+ },
.only_possible_value => {
const ty = @as(Index, @enumFromInt(data));
const ty_item = ip.items.get(@intFromEnum(ty));
@@ -3063,25 +3203,39 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
}
fn indexToKeyFuncType(ip: *const InternPool, data: u32) Key.FuncType {
- const type_function = ip.extraDataTrail(TypeFunction, data);
- const param_types = @as(
- []Index,
- @ptrCast(ip.extra.items[type_function.end..][0..type_function.data.params_len]),
- );
+ const type_function = ip.extraDataTrail(Tag.TypeFunction, data);
+ var index: usize = type_function.end;
+ const comptime_bits: u32 = if (!type_function.data.flags.has_comptime_bits) 0 else b: {
+ const x = ip.extra.items[index];
+ index += 1;
+ break :b x;
+ };
+ const noalias_bits: u32 = if (!type_function.data.flags.has_noalias_bits) 0 else b: {
+ const x = ip.extra.items[index];
+ index += 1;
+ break :b x;
+ };
return .{
- .param_types = param_types,
+ .param_types = .{
+ .start = @intCast(index),
+ .len = type_function.data.params_len,
+ },
.return_type = type_function.data.return_type,
- .comptime_bits = type_function.data.comptime_bits,
- .noalias_bits = type_function.data.noalias_bits,
+ .comptime_bits = comptime_bits,
+ .noalias_bits = noalias_bits,
.alignment = type_function.data.flags.alignment,
.cc = type_function.data.flags.cc,
.is_var_args = type_function.data.flags.is_var_args,
- .is_generic = type_function.data.flags.is_generic,
.is_noinline = type_function.data.flags.is_noinline,
.align_is_generic = type_function.data.flags.align_is_generic,
.cc_is_generic = type_function.data.flags.cc_is_generic,
.section_is_generic = type_function.data.flags.section_is_generic,
.addrspace_is_generic = type_function.data.flags.addrspace_is_generic,
+ .is_generic = comptime_bits != 0 or
+ type_function.data.flags.align_is_generic or
+ type_function.data.flags.cc_is_generic or
+ type_function.data.flags.section_is_generic or
+ type_function.data.flags.addrspace_is_generic,
};
}
@@ -3224,10 +3378,10 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
const names_map = try ip.addMap(gpa);
try addStringsToMap(ip, gpa, names_map, error_set_type.names);
const names_len = @as(u32, @intCast(error_set_type.names.len));
- try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(ErrorSet).Struct.fields.len + names_len);
+ try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.ErrorSet).Struct.fields.len + names_len);
ip.items.appendAssumeCapacity(.{
.tag = .type_error_set,
- .data = ip.addExtraAssumeCapacity(ErrorSet{
+ .data = ip.addExtraAssumeCapacity(Tag.ErrorSet{
.names_len = names_len,
.names_map = names_map,
}),
@@ -3369,36 +3523,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
}
},
- .func_type => |func_type| {
- assert(func_type.return_type != .none);
- for (func_type.param_types) |param_type| assert(param_type != .none);
-
- const params_len = @as(u32, @intCast(func_type.param_types.len));
-
- try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(TypeFunction).Struct.fields.len +
- params_len);
- ip.items.appendAssumeCapacity(.{
- .tag = .type_function,
- .data = ip.addExtraAssumeCapacity(TypeFunction{
- .params_len = params_len,
- .return_type = func_type.return_type,
- .comptime_bits = func_type.comptime_bits,
- .noalias_bits = func_type.noalias_bits,
- .flags = .{
- .alignment = func_type.alignment,
- .cc = func_type.cc,
- .is_var_args = func_type.is_var_args,
- .is_generic = func_type.is_generic,
- .is_noinline = func_type.is_noinline,
- .align_is_generic = func_type.align_is_generic,
- .cc_is_generic = func_type.cc_is_generic,
- .section_is_generic = func_type.section_is_generic,
- .addrspace_is_generic = func_type.addrspace_is_generic,
- },
- }),
- });
- ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(func_type.param_types)));
- },
+ .func_type => unreachable, // use getFuncType() instead
+ .extern_func => unreachable, // use getExternFunc() instead
+ .func => unreachable, // use getFuncInstance() or getFuncDecl() instead
.variable => |variable| {
const has_init = variable.init != .none;
@@ -3420,16 +3547,6 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
});
},
- .extern_func => |extern_func| ip.items.appendAssumeCapacity(.{
- .tag = .extern_func,
- .data = try ip.addExtra(gpa, @as(Tag.ExternFunc, extern_func)),
- }),
-
- .func => |func| ip.items.appendAssumeCapacity(.{
- .tag = .func,
- .data = try ip.addExtra(gpa, @as(Tag.Func, func)),
- }),
-
.ptr => |ptr| {
const ptr_type = ip.indexToKey(ptr.ty).ptr_type;
switch (ptr.len) {
@@ -4068,6 +4185,147 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
return @as(Index, @enumFromInt(ip.items.len - 1));
}
+/// This is equivalent to `Key.FuncType` but adjusted to have a slice for `param_types`.
+pub const GetFuncTypeKey = struct {
+ param_types: []Index,
+ return_type: Index,
+ comptime_bits: u32,
+ noalias_bits: u32,
+ alignment: Alignment,
+ cc: std.builtin.CallingConvention,
+ is_var_args: bool,
+ is_generic: bool,
+ is_noinline: bool,
+ align_is_generic: bool,
+ cc_is_generic: bool,
+ section_is_generic: bool,
+ addrspace_is_generic: bool,
+};
+
+pub fn getFuncType(ip: *InternPool, gpa: Allocator, key: GetFuncTypeKey) Allocator.Error!Index {
+ // Validate input parameters.
+ assert(key.return_type != .none);
+ for (key.param_types) |param_type| assert(param_type != .none);
+
+ // The strategy here is to add the function type unconditionally, then to
+ // ask if it already exists, and if so, revert the lengths of the mutated
+ // arrays. This is similar to what `getOrPutTrailingString` does.
+ const prev_extra_len = ip.extra.items.len;
+ const params_len: u32 = @intCast(key.param_types.len);
+
+ try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeFunction).Struct.fields.len +
+ @intFromBool(key.comptime_bits != 0) +
+ @intFromBool(key.noalias_bits != 0) +
+ params_len);
+ try ip.items.ensureUnusedCapacity(gpa, 1);
+
+ ip.items.appendAssumeCapacity(.{
+ .tag = .type_function,
+ .data = ip.addExtraAssumeCapacity(Tag.TypeFunction{
+ .params_len = params_len,
+ .return_type = key.return_type,
+ .flags = .{
+ .alignment = key.alignment,
+ .cc = key.cc,
+ .is_var_args = key.is_var_args,
+ .has_comptime_bits = key.comptime_bits != 0,
+ .has_noalias_bits = key.noalias_bits != 0,
+ .is_generic = key.is_generic,
+ .is_noinline = key.is_noinline,
+ .align_is_generic = key.align_is_generic,
+ .cc_is_generic = key.cc_is_generic,
+ .section_is_generic = key.section_is_generic,
+ .addrspace_is_generic = key.addrspace_is_generic,
+ },
+ }),
+ });
+ if (key.comptime_bits != 0) ip.extra.appendAssumeCapacity(key.comptime_bits);
+ if (key.noalias_bits != 0) ip.extra.appendAssumeCapacity(key.noalias_bits);
+ ip.extra.appendSliceAssumeCapacity(@ptrCast(key.param_types));
+
+ const adapter: KeyAdapter = .{ .intern_pool = ip };
+ const gop = try ip.map.getOrPutAdapted(gpa, Key{
+ .func_type = indexToKeyFuncType(ip, @intCast(ip.items.len - 1)),
+ }, adapter);
+ if (!gop.found_existing) return @enumFromInt(ip.items.len - 1);
+
+ // An existing function type was found; undo the additions to our two arrays.
+ ip.items.len -= 1;
+ ip.extra.items.len = prev_extra_len;
+ return @enumFromInt(gop.index);
+}
+
+pub const GetExternFuncKey = struct {
+ param_types: []const Index,
+ noalias_bits: u32,
+ return_type: Index,
+ cc: std.builtin.CallingConvention,
+ alignment: Alignment,
+ is_var_args: bool,
+ decl: Module.Decl.Index,
+ lib_name: OptionalNullTerminatedString,
+};
+
+pub fn getExternFunc(ip: *InternPool, gpa: Allocator, key: GetExternFuncKey) Allocator.Error!Index {
+ _ = ip;
+ _ = gpa;
+ _ = key;
+ @panic("TODO");
+}
+
+pub const GetFuncDeclKey = struct {
+ param_types: []const Index,
+ noalias_bits: u32,
+ comptime_bits: u32,
+ return_type: Index,
+ inferred_error_set: bool,
+ /// null means generic.
+ cc: ?std.builtin.CallingConvention,
+ /// null means generic.
+ alignment: ?Alignment,
+ section: Section,
+ /// null means generic
+ address_space: ?std.builtin.AddressSpace,
+ is_var_args: bool,
+ is_generic: bool,
+ is_noinline: bool,
+ zir_body_inst: Zir.Inst.Index,
+ lbrace_line: u32,
+ rbrace_line: u32,
+ lbrace_column: u32,
+ rbrace_column: u32,
+
+ pub const Section = union(enum) {
+ generic,
+ default,
+ explicit: InternPool.NullTerminatedString,
+ };
+};
+
+pub fn getFuncDecl(ip: *InternPool, gpa: Allocator, key: GetFuncDeclKey) Allocator.Error!Index {
+ _ = ip;
+ _ = gpa;
+ _ = key;
+ @panic("TODO");
+}
+
+pub const GetFuncInstanceKey = struct {
+ param_types: []const Index,
+ noalias_bits: u32,
+ return_type: Index,
+ cc: std.builtin.CallingConvention,
+ alignment: Alignment,
+ is_noinline: bool,
+ generic_owner: Index,
+};
+
+pub fn getFuncInstance(ip: *InternPool, gpa: Allocator, key: GetFuncInstanceKey) Allocator.Error!Index {
+ _ = ip;
+ _ = gpa;
+ _ = key;
+ @panic("TODO");
+}
+
/// Provides API for completing an enum type after calling `getIncompleteEnum`.
pub const IncompleteEnumType = struct {
index: Index,
@@ -4347,7 +4605,6 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
Module.Decl.Index => @intFromEnum(@field(extra, field.name)),
Module.Namespace.Index => @intFromEnum(@field(extra, field.name)),
Module.Namespace.OptionalIndex => @intFromEnum(@field(extra, field.name)),
- Module.Fn.Index => @intFromEnum(@field(extra, field.name)),
MapIndex => @intFromEnum(@field(extra, field.name)),
OptionalMapIndex => @intFromEnum(@field(extra, field.name)),
RuntimeIndex => @intFromEnum(@field(extra, field.name)),
@@ -4356,7 +4613,7 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
OptionalNullTerminatedString => @intFromEnum(@field(extra, field.name)),
i32 => @as(u32, @bitCast(@field(extra, field.name))),
Tag.TypePointer.Flags => @as(u32, @bitCast(@field(extra, field.name))),
- TypeFunction.Flags => @as(u32, @bitCast(@field(extra, field.name))),
+ Tag.TypeFunction.Flags => @as(u32, @bitCast(@field(extra, field.name))),
Tag.TypePointer.PackedOffset => @as(u32, @bitCast(@field(extra, field.name))),
Tag.TypePointer.VectorIndex => @intFromEnum(@field(extra, field.name)),
Tag.Variable.Flags => @as(u32, @bitCast(@field(extra, field.name))),
@@ -4411,23 +4668,28 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct
const int32 = ip.extra.items[i + index];
@field(result, field.name) = switch (field.type) {
u32 => int32,
- Index => @as(Index, @enumFromInt(int32)),
- Module.Decl.Index => @as(Module.Decl.Index, @enumFromInt(int32)),
- Module.Namespace.Index => @as(Module.Namespace.Index, @enumFromInt(int32)),
- Module.Namespace.OptionalIndex => @as(Module.Namespace.OptionalIndex, @enumFromInt(int32)),
- Module.Fn.Index => @as(Module.Fn.Index, @enumFromInt(int32)),
- MapIndex => @as(MapIndex, @enumFromInt(int32)),
- OptionalMapIndex => @as(OptionalMapIndex, @enumFromInt(int32)),
- RuntimeIndex => @as(RuntimeIndex, @enumFromInt(int32)),
- String => @as(String, @enumFromInt(int32)),
- NullTerminatedString => @as(NullTerminatedString, @enumFromInt(int32)),
- OptionalNullTerminatedString => @as(OptionalNullTerminatedString, @enumFromInt(int32)),
- i32 => @as(i32, @bitCast(int32)),
- Tag.TypePointer.Flags => @as(Tag.TypePointer.Flags, @bitCast(int32)),
- TypeFunction.Flags => @as(TypeFunction.Flags, @bitCast(int32)),
- Tag.TypePointer.PackedOffset => @as(Tag.TypePointer.PackedOffset, @bitCast(int32)),
- Tag.TypePointer.VectorIndex => @as(Tag.TypePointer.VectorIndex, @enumFromInt(int32)),
- Tag.Variable.Flags => @as(Tag.Variable.Flags, @bitCast(int32)),
+
+ Index,
+ Module.Decl.Index,
+ Module.Namespace.Index,
+ Module.Namespace.OptionalIndex,
+ MapIndex,
+ OptionalMapIndex,
+ RuntimeIndex,
+ String,
+ NullTerminatedString,
+ OptionalNullTerminatedString,
+ Tag.TypePointer.VectorIndex,
+ => @enumFromInt(int32),
+
+ i32,
+ Tag.TypePointer.Flags,
+ Tag.TypeFunction.Flags,
+ Tag.TypePointer.PackedOffset,
+ Tag.Variable.Flags,
+ FuncAnalysis,
+ => @bitCast(int32),
+
else => @compileError("bad field type: " ++ @typeName(field.type)),
};
}
@@ -4627,11 +4889,15 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
.decl = extern_func.decl,
.lib_name = extern_func.lib_name,
} }),
- .func => |func| if (ip.isFunctionType(new_ty))
- return ip.get(gpa, .{ .func = .{
- .ty = new_ty,
- .index = func.index,
- } }),
+
+ .func => |func| {
+ if (func.generic_owner == .none) {
+ @panic("TODO");
+ } else {
+ @panic("TODO");
+ }
+ },
+
.int => |int| switch (ip.indexToKey(new_ty)) {
.enum_type => |enum_type| return ip.get(gpa, .{ .enum_tag = .{
.ty = new_ty,
@@ -4886,20 +5152,12 @@ pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType {
}
}
-pub fn indexToFunc(ip: *const InternPool, val: Index) Module.Fn.OptionalIndex {
- assert(val != .none);
- const tags = ip.items.items(.tag);
- if (tags[@intFromEnum(val)] != .func) return .none;
- const datas = ip.items.items(.data);
- return ip.extraData(Tag.Func, datas[@intFromEnum(val)]).index.toOptional();
-}
-
-pub fn indexToInferredErrorSetType(ip: *const InternPool, val: Index) Module.Fn.InferredErrorSet.OptionalIndex {
+pub fn indexToInferredErrorSetType(ip: *const InternPool, val: Index) Module.InferredErrorSet.OptionalIndex {
assert(val != .none);
const tags = ip.items.items(.tag);
if (tags[@intFromEnum(val)] != .type_inferred_error_set) return .none;
const datas = ip.items.items(.data);
- return @as(Module.Fn.InferredErrorSet.Index, @enumFromInt(datas[@intFromEnum(val)])).toOptional();
+ return @as(Module.InferredErrorSet.Index, @enumFromInt(datas[@intFromEnum(val)])).toOptional();
}
/// includes .comptime_int_type
@@ -4994,12 +5252,10 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
(@sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl));
const unions_size = ip.allocated_unions.len *
(@sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl));
- const funcs_size = ip.allocated_funcs.len *
- (@sizeOf(Module.Fn) + @sizeOf(Module.Decl));
// TODO: map overhead size is not taken into account
const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size +
- structs_size + unions_size + funcs_size;
+ structs_size + unions_size;
std.debug.print(
\\InternPool size: {d} bytes
@@ -5008,7 +5264,6 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
\\ {d} limbs: {d} bytes
\\ {d} structs: {d} bytes
\\ {d} unions: {d} bytes
- \\ {d} funcs: {d} bytes
\\
, .{
total_size,
@@ -5022,8 +5277,6 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
structs_size,
ip.allocated_unions.len,
unions_size,
- ip.allocated_funcs.len,
- funcs_size,
});
const tags = ip.items.items(.tag);
@@ -5049,10 +5302,10 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
.type_anyframe => 0,
.type_error_union => @sizeOf(Key.ErrorUnionType),
.type_error_set => b: {
- const info = ip.extraData(ErrorSet, data);
- break :b @sizeOf(ErrorSet) + (@sizeOf(u32) * info.names_len);
+ const info = ip.extraData(Tag.ErrorSet, data);
+ break :b @sizeOf(Tag.ErrorSet) + (@sizeOf(u32) * info.names_len);
},
- .type_inferred_error_set => @sizeOf(Module.Fn.InferredErrorSet),
+ .type_inferred_error_set => @sizeOf(Module.InferredErrorSet),
.type_enum_explicit, .type_enum_nonexhaustive => @sizeOf(EnumExplicit),
.type_enum_auto => @sizeOf(EnumAuto),
.type_opaque => @sizeOf(Key.OpaqueType),
@@ -5080,8 +5333,8 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
=> @sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl),
.type_function => b: {
- const info = ip.extraData(TypeFunction, data);
- break :b @sizeOf(TypeFunction) + (@sizeOf(Index) * info.params_len);
+ const info = ip.extraData(Tag.TypeFunction, data);
+ break :b @sizeOf(Tag.TypeFunction) + (@sizeOf(Index) * info.params_len);
},
.undef => 0,
@@ -5130,7 +5383,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
},
.aggregate => b: {
const info = ip.extraData(Tag.Aggregate, data);
- const fields_len = @as(u32, @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty)));
+ const fields_len: u32 = @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty));
break :b @sizeOf(Tag.Aggregate) + (@sizeOf(Index) * fields_len);
},
.repeated => @sizeOf(Repeated),
@@ -5145,7 +5398,14 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
.float_comptime_float => @sizeOf(Float128),
.variable => @sizeOf(Tag.Variable) + @sizeOf(Module.Decl),
.extern_func => @sizeOf(Tag.ExternFunc) + @sizeOf(Module.Decl),
- .func => @sizeOf(Tag.Func) + @sizeOf(Module.Fn) + @sizeOf(Module.Decl),
+ .func_decl => @sizeOf(Tag.Func) + @sizeOf(Module.Decl),
+ .func_instance => b: {
+ const info = ip.extraData(Tag.FuncInstance, data);
+ const ty = ip.typeOf(info.generic_owner);
+ const params_len = ip.indexToKey(ty).func_type.param_types.len;
+ break :b @sizeOf(Tag.FuncInstance) + @sizeOf(Index) * params_len +
+ @sizeOf(Module.Decl);
+ },
.only_possible_value => 0,
.union_value => @sizeOf(Key.Union),
@@ -5249,7 +5509,8 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void {
.float_comptime_float,
.variable,
.extern_func,
- .func,
+ .func_decl,
+ .func_instance,
.union_value,
.memoized_call,
=> try w.print("{d}", .{data}),
@@ -5284,19 +5545,11 @@ pub fn unionPtrConst(ip: *const InternPool, index: Module.Union.Index) *const Mo
return ip.allocated_unions.at(@intFromEnum(index));
}
-pub fn funcPtr(ip: *InternPool, index: Module.Fn.Index) *Module.Fn {
- return ip.allocated_funcs.at(@intFromEnum(index));
-}
-
-pub fn funcPtrConst(ip: *const InternPool, index: Module.Fn.Index) *const Module.Fn {
- return ip.allocated_funcs.at(@intFromEnum(index));
-}
-
-pub fn inferredErrorSetPtr(ip: *InternPool, index: Module.Fn.InferredErrorSet.Index) *Module.Fn.InferredErrorSet {
+pub fn inferredErrorSetPtr(ip: *InternPool, index: Module.InferredErrorSet.Index) *Module.InferredErrorSet {
return ip.allocated_inferred_error_sets.at(@intFromEnum(index));
}
-pub fn inferredErrorSetPtrConst(ip: *const InternPool, index: Module.Fn.InferredErrorSet.Index) *const Module.Fn.InferredErrorSet {
+pub fn inferredErrorSetPtrConst(ip: *const InternPool, index: Module.InferredErrorSet.Index) *const Module.InferredErrorSet {
return ip.allocated_inferred_error_sets.at(@intFromEnum(index));
}
@@ -5344,43 +5597,21 @@ pub fn destroyUnion(ip: *InternPool, gpa: Allocator, index: Module.Union.Index)
};
}
-pub fn createFunc(
- ip: *InternPool,
- gpa: Allocator,
- initialization: Module.Fn,
-) Allocator.Error!Module.Fn.Index {
- if (ip.funcs_free_list.popOrNull()) |index| {
- ip.allocated_funcs.at(@intFromEnum(index)).* = initialization;
- return index;
- }
- const ptr = try ip.allocated_funcs.addOne(gpa);
- ptr.* = initialization;
- return @as(Module.Fn.Index, @enumFromInt(ip.allocated_funcs.len - 1));
-}
-
-pub fn destroyFunc(ip: *InternPool, gpa: Allocator, index: Module.Fn.Index) void {
- ip.funcPtr(index).* = undefined;
- ip.funcs_free_list.append(gpa, index) catch {
- // In order to keep `destroyFunc` a non-fallible function, we ignore memory
- // allocation failures here, instead leaking the Fn until garbage collection.
- };
-}
-
pub fn createInferredErrorSet(
ip: *InternPool,
gpa: Allocator,
- initialization: Module.Fn.InferredErrorSet,
-) Allocator.Error!Module.Fn.InferredErrorSet.Index {
+ initialization: Module.InferredErrorSet,
+) Allocator.Error!Module.InferredErrorSet.Index {
if (ip.inferred_error_sets_free_list.popOrNull()) |index| {
ip.allocated_inferred_error_sets.at(@intFromEnum(index)).* = initialization;
return index;
}
const ptr = try ip.allocated_inferred_error_sets.addOne(gpa);
ptr.* = initialization;
- return @as(Module.Fn.InferredErrorSet.Index, @enumFromInt(ip.allocated_inferred_error_sets.len - 1));
+ return @as(Module.InferredErrorSet.Index, @enumFromInt(ip.allocated_inferred_error_sets.len - 1));
}
-pub fn destroyInferredErrorSet(ip: *InternPool, gpa: Allocator, index: Module.Fn.InferredErrorSet.Index) void {
+pub fn destroyInferredErrorSet(ip: *InternPool, gpa: Allocator, index: Module.InferredErrorSet.Index) void {
ip.inferredErrorSetPtr(index).* = undefined;
ip.inferred_error_sets_free_list.append(gpa, index) catch {
// In order to keep `destroyInferredErrorSet` a non-fallible function, we ignore memory
@@ -5620,7 +5851,8 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.enum_tag,
.variable,
.extern_func,
- .func,
+ .func_decl,
+ .func_instance,
.union_value,
.bytes,
.aggregate,
@@ -5704,7 +5936,7 @@ pub fn funcReturnType(ip: *const InternPool, ty: Index) Index {
};
assert(child_item.tag == .type_function);
return @as(Index, @enumFromInt(ip.extra.items[
- child_item.data + std.meta.fieldIndex(TypeFunction, "return_type").?
+ child_item.data + std.meta.fieldIndex(Tag.TypeFunction, "return_type").?
]));
}
@@ -5712,7 +5944,7 @@ pub fn isNoReturn(ip: *const InternPool, ty: Index) bool {
return switch (ty) {
.noreturn_type => true,
else => switch (ip.items.items(.tag)[@intFromEnum(ty)]) {
- .type_error_set => ip.extra.items[ip.items.items(.data)[@intFromEnum(ty)] + std.meta.fieldIndex(ErrorSet, "names_len").?] == 0,
+ .type_error_set => ip.extra.items[ip.items.items(.data)[@intFromEnum(ty)] + std.meta.fieldIndex(Tag.ErrorSet, "names_len").?] == 0,
else => false,
},
};
@@ -5969,7 +6201,8 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois
.float_comptime_float,
.variable,
.extern_func,
- .func,
+ .func_decl,
+ .func_instance,
.only_possible_value,
.union_value,
.bytes,
@@ -5982,3 +6215,39 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois
.none => unreachable, // special tag
};
}
+
+pub fn isFuncBody(ip: *const InternPool, i: Index) bool {
+ assert(i != .none);
+ return switch (ip.items.items(.tag)[@intFromEnum(i)]) {
+ .func_decl, .func_instance => true,
+ else => false,
+ };
+}
+
+pub fn funcAnalysis(ip: *const InternPool, i: Index) *FuncAnalysis {
+ assert(i != .none);
+ const item = ip.items.get(@intFromEnum(i));
+ const extra_index = switch (item.tag) {
+ .func_decl => item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?,
+ .func_instance => item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?,
+ else => unreachable,
+ };
+ return @ptrCast(&ip.extra.items[extra_index]);
+}
+
+pub fn funcZirBodyInst(ip: *const InternPool, i: Index) Zir.Inst.Index {
+ assert(i != .none);
+ const item = ip.items.get(@intFromEnum(i));
+ const zir_body_inst_field_index = std.meta.fieldIndex(Tag.FuncDecl, "zir_body_inst").?;
+ const extra_index = switch (item.tag) {
+ .func_decl => item.data + zir_body_inst_field_index,
+ .func_instance => b: {
+ const generic_owner_field_index = std.meta.fieldIndex(Tag.FuncInstance, "generic_owner").?;
+ const func_decl_index = ip.extra.items[item.data + generic_owner_field_index];
+ assert(ip.items.items(.tag)[func_decl_index] == .func_decl);
+ break :b ip.items.items(.data)[func_decl_index] + zir_body_inst_field_index;
+ },
+ else => unreachable,
+ };
+ return ip.extra.items[extra_index];
+}
src/link.zig
@@ -16,6 +16,7 @@ const Compilation = @import("Compilation.zig");
const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
const Liveness = @import("Liveness.zig");
const Module = @import("Module.zig");
+const InternPool = @import("InternPool.zig");
const Package = @import("Package.zig");
const Type = @import("type.zig").Type;
const TypedValue = @import("TypedValue.zig");
@@ -562,7 +563,7 @@ pub const File = struct {
}
/// May be called before or after updateDeclExports for any given Decl.
- pub fn updateFunc(base: *File, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) UpdateDeclError!void {
+ pub fn updateFunc(base: *File, module: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) UpdateDeclError!void {
if (build_options.only_c) {
assert(base.tag == .c);
return @fieldParentPtr(C, "base", base).updateFunc(module, func_index, air, liveness);
src/Module.zig
@@ -101,16 +101,6 @@ tmp_hack_arena: std.heap.ArenaAllocator,
/// This is currently only used for string literals.
memoized_decls: std.AutoHashMapUnmanaged(InternPool.Index, Decl.Index) = .{},
-monomorphed_func_keys: std.ArrayListUnmanaged(InternPool.Index) = .{},
-/// The set of all the generic function instantiations. This is used so that when a generic
-/// function is called twice with the same comptime parameter arguments, both calls dispatch
-/// to the same function.
-monomorphed_funcs: MonomorphedFuncsSet = .{},
-/// Contains the values from `@setAlignStack`. A sparse table is used here
-/// instead of a field of `Fn` because usage of `@setAlignStack` is rare, while
-/// functions are many.
-align_stack_fns: std.AutoHashMapUnmanaged(Fn.Index, SetAlignStack) = .{},
-
/// We optimize memory usage for a compilation with no compile errors by storing the
/// error messages and mapping outside of `Decl`.
/// The ErrorMsg memory is owned by the decl, using Module's general purpose allocator.
@@ -189,7 +179,8 @@ reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct {
}) = .{},
panic_messages: [PanicId.len]Decl.OptionalIndex = .{.none} ** PanicId.len,
-panic_func_index: Fn.OptionalIndex = .none,
+/// The panic function body.
+panic_func_index: InternPool.Index = .none,
null_stack_trace: InternPool.Index = .none,
pub const PanicId = enum {
@@ -239,50 +230,6 @@ pub const CImportError = struct {
}
};
-pub const MonomorphedFuncKey = struct { func: Fn.Index, args_index: u32, args_len: u32 };
-
-pub const MonomorphedFuncAdaptedKey = struct { func: Fn.Index, args: []const InternPool.Index };
-
-pub const MonomorphedFuncsSet = std.HashMapUnmanaged(
- MonomorphedFuncKey,
- InternPool.Index,
- MonomorphedFuncsContext,
- std.hash_map.default_max_load_percentage,
-);
-
-pub const MonomorphedFuncsContext = struct {
- mod: *Module,
-
- pub fn eql(_: @This(), a: MonomorphedFuncKey, b: MonomorphedFuncKey) bool {
- return std.meta.eql(a, b);
- }
-
- pub fn hash(ctx: @This(), key: MonomorphedFuncKey) u64 {
- const key_args = ctx.mod.monomorphed_func_keys.items[key.args_index..][0..key.args_len];
- return std.hash.Wyhash.hash(@intFromEnum(key.func), std.mem.sliceAsBytes(key_args));
- }
-};
-
-pub const MonomorphedFuncsAdaptedContext = struct {
- mod: *Module,
-
- pub fn eql(ctx: @This(), adapted_key: MonomorphedFuncAdaptedKey, other_key: MonomorphedFuncKey) bool {
- const other_key_args = ctx.mod.monomorphed_func_keys.items[other_key.args_index..][0..other_key.args_len];
- return adapted_key.func == other_key.func and std.mem.eql(InternPool.Index, adapted_key.args, other_key_args);
- }
-
- pub fn hash(_: @This(), adapted_key: MonomorphedFuncAdaptedKey) u64 {
- return std.hash.Wyhash.hash(@intFromEnum(adapted_key.func), std.mem.sliceAsBytes(adapted_key.args));
- }
-};
-
-pub const SetAlignStack = struct {
- alignment: Alignment,
- /// TODO: This needs to store a non-lazy source location for the case of an inline function
- /// which does `@setAlignStack` (applying it to the caller).
- src: LazySrcLoc,
-};
-
/// A `Module` has zero or one of these depending on whether `-femit-h` is enabled.
pub const GlobalEmitH = struct {
/// Where to put the output.
@@ -625,13 +572,6 @@ pub const Decl = struct {
function_body,
};
- pub fn clearValues(decl: *Decl, mod: *Module) void {
- if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| {
- _ = mod.align_stack_fns.remove(func);
- mod.destroyFunc(func);
- }
- }
-
/// This name is relative to the containing namespace of the decl.
/// The memory is owned by the containing File ZIR.
pub fn getName(decl: Decl, mod: *Module) ?[:0]const u8 {
@@ -816,14 +756,17 @@ pub const Decl = struct {
return mod.typeToUnion(decl.val.toType());
}
- /// If the Decl owns its value and it is a function, return it,
- /// otherwise null.
- pub fn getOwnedFunction(decl: Decl, mod: *Module) ?*Fn {
- return mod.funcPtrUnwrap(decl.getOwnedFunctionIndex(mod));
+ pub fn getOwnedFunction(decl: Decl, mod: *Module) ?InternPool.Key.Func {
+ const i = decl.getOwnedFunctionIndex();
+ if (i == .none) return null;
+ return switch (mod.intern_pool.indexToKey(i)) {
+ .func => |func| func,
+ else => null,
+ };
}
- pub fn getOwnedFunctionIndex(decl: Decl, mod: *Module) Fn.OptionalIndex {
- return if (decl.owns_tv) decl.val.getFunctionIndex(mod) else .none;
+ pub fn getOwnedFunctionIndex(decl: Decl) InternPool.Index {
+ return if (decl.owns_tv) decl.val.toIntern() else .none;
}
/// If the Decl owns its value and it is an extern function, returns it,
@@ -1385,71 +1328,39 @@ pub const ExternFn = struct {
}
};
-/// Some Fn struct memory is owned by the Decl's TypedValue.Managed arena allocator.
-/// Extern functions do not have this data structure; they are represented by `ExternFn`
-/// instead.
-pub const Fn = struct {
- /// The Decl that corresponds to the function itself.
- owner_decl: Decl.Index,
- /// The ZIR instruction that is a function instruction. Use this to find
- /// the body. We store this rather than the body directly so that when ZIR
- /// is regenerated on update(), we can map this to the new corresponding
- /// ZIR instruction.
- zir_body_inst: Zir.Inst.Index,
- /// If this is not null, this function is a generic function instantiation, and
- /// there is a `TypedValue` here for each parameter of the function.
- /// Non-comptime parameters are marked with a `generic_poison` for the value.
- /// Non-anytype parameters are marked with a `generic_poison` for the type.
- /// These never have .generic_poison for the Type
- /// because the Type is needed to pass to `Type.eql` and for inserting comptime arguments
- /// into the inst_map when analyzing the body of a generic function instantiation.
- /// Instead, the is_anytype knowledge is communicated via `isAnytypeParam`.
- comptime_args: ?[*]TypedValue,
-
- /// Precomputed hash for monomorphed_funcs.
- /// This is important because it may be accessed when resizing monomorphed_funcs
- /// while this Fn has already been added to the set, but does not have the
- /// owner_decl, comptime_args, or other fields populated yet.
- /// This field is undefined if comptime_args == null.
- hash: u64,
-
- /// Relative to owner Decl.
- lbrace_line: u32,
- /// Relative to owner Decl.
- rbrace_line: u32,
- lbrace_column: u16,
- rbrace_column: u16,
-
- /// When a generic function is instantiated, this value is inherited from the
- /// active Sema context. Importantly, this value is also updated when an existing
- /// generic function instantiation is found and called.
- branch_quota: u32,
-
- /// If this is not none, this function is a generic function instantiation, and
- /// this is the generic function decl from which the instance was derived.
- /// This information is redundant with a combination of checking if comptime_args is
- /// not null and looking at the first decl dependency of owner_decl. This redundant
- /// information is useful for three reasons:
- /// 1. Improved perf of monomorphed_funcs when checking the eql() function because it
- /// can do two fewer pointer chases by grabbing the info from this field directly
- /// instead of accessing the decl and then the dependencies set.
- /// 2. While a generic function instantiation is being initialized, we need hash()
- /// and eql() to work before the initialization is complete. Completing the
- /// insertion into the decl dependency set has more fallible operations than simply
- /// setting this field.
- /// 3. I forgot what the third thing was while typing up the other two.
- generic_owner_decl: Decl.OptionalIndex,
-
- state: Analysis,
- is_cold: bool = false,
- is_noinline: bool,
- calls_or_awaits_errorable_fn: bool = false,
+/// This struct is used to keep track of any dependencies related to functions instances
+/// that return inferred error sets. Note that a function may be associated to
+/// multiple different error sets, for example an inferred error set which
+/// this function returns, but also any inferred error sets of called inline
+/// or comptime functions.
+pub const InferredErrorSet = struct {
+ /// The function from which this error set originates.
+ func: InternPool.Index,
+
+ /// All currently known errors that this error set contains. This includes
+ /// direct additions via `return error.Foo;`, and possibly also errors that
+ /// are returned from any dependent functions. When the inferred error set is
+ /// fully resolved, this map contains all the errors that the function might return.
+ errors: NameMap = .{},
+
+ /// Other inferred error sets which this inferred error set should include.
+ inferred_error_sets: std.AutoArrayHashMapUnmanaged(InferredErrorSet.Index, void) = .{},
+
+ /// Whether the function returned anyerror. This is true if either of
+ /// the dependent functions returns anyerror.
+ is_anyerror: bool = false,
+
+ /// Whether this error set is already fully resolved. If true, resolving
+ /// can skip resolving any dependents of this inferred error set.
+ is_resolved: bool = false,
+
+ pub const NameMap = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void);
pub const Index = enum(u32) {
_,
- pub fn toOptional(i: Index) OptionalIndex {
- return @as(OptionalIndex, @enumFromInt(@intFromEnum(i)));
+ pub fn toOptional(i: InferredErrorSet.Index) InferredErrorSet.OptionalIndex {
+ return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(i)));
}
};
@@ -1457,159 +1368,37 @@ pub const Fn = struct {
none = std.math.maxInt(u32),
_,
- pub fn init(oi: ?Index) OptionalIndex {
- return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none)));
+ pub fn init(oi: ?InferredErrorSet.Index) InferredErrorSet.OptionalIndex {
+ return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none)));
}
- pub fn unwrap(oi: OptionalIndex) ?Index {
+ pub fn unwrap(oi: InferredErrorSet.OptionalIndex) ?InferredErrorSet.Index {
if (oi == .none) return null;
- return @as(Index, @enumFromInt(@intFromEnum(oi)));
+ return @as(InferredErrorSet.Index, @enumFromInt(@intFromEnum(oi)));
}
};
- pub const Analysis = enum {
- /// This function has not yet undergone analysis, because we have not
- /// seen a potential runtime call. It may be analyzed in future.
- none,
- /// Analysis for this function has been queued, but not yet completed.
- queued,
- /// This function intentionally only has ZIR generated because it is marked
- /// inline, which means no runtime version of the function will be generated.
- inline_only,
- in_progress,
- /// There will be a corresponding ErrorMsg in Module.failed_decls
- sema_failure,
- /// This Fn might be OK but it depends on another Decl which did not
- /// successfully complete semantic analysis.
- dependency_failure,
- success,
- };
-
- /// This struct is used to keep track of any dependencies related to functions instances
- /// that return inferred error sets. Note that a function may be associated to
- /// multiple different error sets, for example an inferred error set which
- /// this function returns, but also any inferred error sets of called inline
- /// or comptime functions.
- pub const InferredErrorSet = struct {
- /// The function from which this error set originates.
- func: Fn.Index,
-
- /// All currently known errors that this error set contains. This includes
- /// direct additions via `return error.Foo;`, and possibly also errors that
- /// are returned from any dependent functions. When the inferred error set is
- /// fully resolved, this map contains all the errors that the function might return.
- errors: NameMap = .{},
-
- /// Other inferred error sets which this inferred error set should include.
- inferred_error_sets: std.AutoArrayHashMapUnmanaged(InferredErrorSet.Index, void) = .{},
-
- /// Whether the function returned anyerror. This is true if either of
- /// the dependent functions returns anyerror.
- is_anyerror: bool = false,
-
- /// Whether this error set is already fully resolved. If true, resolving
- /// can skip resolving any dependents of this inferred error set.
- is_resolved: bool = false,
-
- pub const NameMap = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void);
-
- pub const Index = enum(u32) {
- _,
-
- pub fn toOptional(i: InferredErrorSet.Index) InferredErrorSet.OptionalIndex {
- return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(i)));
- }
- };
-
- pub const OptionalIndex = enum(u32) {
- none = std.math.maxInt(u32),
- _,
-
- pub fn init(oi: ?InferredErrorSet.Index) InferredErrorSet.OptionalIndex {
- return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none)));
- }
-
- pub fn unwrap(oi: InferredErrorSet.OptionalIndex) ?InferredErrorSet.Index {
- if (oi == .none) return null;
- return @as(InferredErrorSet.Index, @enumFromInt(@intFromEnum(oi)));
- }
- };
-
- pub fn addErrorSet(
- self: *InferredErrorSet,
- err_set_ty: Type,
- ip: *InternPool,
- gpa: Allocator,
- ) !void {
- switch (err_set_ty.toIntern()) {
- .anyerror_type => {
- self.is_anyerror = true;
+ pub fn addErrorSet(
+ self: *InferredErrorSet,
+ err_set_ty: Type,
+ ip: *InternPool,
+ gpa: Allocator,
+ ) !void {
+ switch (err_set_ty.toIntern()) {
+ .anyerror_type => {
+ self.is_anyerror = true;
+ },
+ else => switch (ip.indexToKey(err_set_ty.toIntern())) {
+ .error_set_type => |error_set_type| {
+ for (error_set_type.names) |name| {
+ try self.errors.put(gpa, name, {});
+ }
},
- else => switch (ip.indexToKey(err_set_ty.toIntern())) {
- .error_set_type => |error_set_type| {
- for (error_set_type.names) |name| {
- try self.errors.put(gpa, name, {});
- }
- },
- .inferred_error_set_type => |ies_index| {
- try self.inferred_error_sets.put(gpa, ies_index, {});
- },
- else => unreachable,
+ .inferred_error_set_type => |ies_index| {
+ try self.inferred_error_sets.put(gpa, ies_index, {});
},
- }
- }
- };
-
- pub fn isAnytypeParam(func: Fn, mod: *Module, index: u32) bool {
- const file = mod.declPtr(func.owner_decl).getFileScope(mod);
-
- const tags = file.zir.instructions.items(.tag);
-
- const param_body = file.zir.getParamBody(func.zir_body_inst);
- const param = param_body[index];
-
- return switch (tags[param]) {
- .param, .param_comptime => false,
- .param_anytype, .param_anytype_comptime => true,
- else => unreachable,
- };
- }
-
- pub fn getParamName(func: Fn, mod: *Module, index: u32) [:0]const u8 {
- const file = mod.declPtr(func.owner_decl).getFileScope(mod);
-
- const tags = file.zir.instructions.items(.tag);
- const data = file.zir.instructions.items(.data);
-
- const param_body = file.zir.getParamBody(func.zir_body_inst);
- const param = param_body[index];
-
- return switch (tags[param]) {
- .param, .param_comptime => blk: {
- const extra = file.zir.extraData(Zir.Inst.Param, data[param].pl_tok.payload_index);
- break :blk file.zir.nullTerminatedString(extra.data.name);
- },
- .param_anytype, .param_anytype_comptime => blk: {
- const param_data = data[param].str_tok;
- break :blk param_data.get(file.zir);
- },
- else => unreachable,
- };
- }
-
- pub fn hasInferredErrorSet(func: Fn, mod: *Module) bool {
- const owner_decl = mod.declPtr(func.owner_decl);
- const zir = owner_decl.getFileScope(mod).zir;
- const zir_tags = zir.instructions.items(.tag);
- switch (zir_tags[func.zir_body_inst]) {
- .func => return false,
- .func_inferred => return true,
- .func_fancy => {
- const inst_data = zir.instructions.items(.data)[func.zir_body_inst].pl_node;
- const extra = zir.extraData(Zir.Inst.FuncFancy, inst_data.payload_index);
- return extra.data.bits.is_inferred_error;
+ else => unreachable,
},
- else => unreachable,
}
}
};
@@ -2468,6 +2257,22 @@ pub const SrcLoc = struct {
}
} else unreachable;
},
+ .call_arg => |call_arg| {
+ const tree = try src_loc.file_scope.getTree(gpa);
+ const node = src_loc.declRelativeToNodeIndex(call_arg.call_node_offset);
+ var buf: [1]Ast.Node.Index = undefined;
+ const call_full = tree.fullCall(&buf, node).?;
+ const src_node = call_full.ast.params[call_arg.arg_index];
+ return nodeToSpan(tree, src_node);
+ },
+ .fn_proto_param => |fn_proto_param| {
+ const tree = try src_loc.file_scope.getTree(gpa);
+ const node = src_loc.declRelativeToNodeIndex(fn_proto_param.fn_proto_node_offset);
+ var buf: [1]Ast.Node.Index = undefined;
+ const fn_proto_full = tree.fullFnProto(&buf, node).?;
+ const src_node = fn_proto_full.ast.params[fn_proto_param.param_index];
+ return nodeToSpan(tree, src_node);
+ },
.node_offset_bin_lhs => |node_off| {
const tree = try src_loc.file_scope.getTree(gpa);
const node = src_loc.declRelativeToNodeIndex(node_off);
@@ -3146,6 +2951,20 @@ pub const LazySrcLoc = union(enum) {
/// Next, navigate to the corresponding capture.
/// The Decl is determined contextually.
for_capture_from_input: i32,
+ /// The source location points to the argument node of a function call.
+ /// The Decl is determined contextually.
+ call_arg: struct {
+ /// Points to the function call AST node.
+ call_node_offset: i32,
+ /// The index of the argument the source location points to.
+ arg_index: u32,
+ },
+ fn_proto_param: struct {
+ /// Points to the function prototype AST node.
+ fn_proto_node_offset: i32,
+ /// The index of the parameter the source location points to.
+ param_index: u32,
+ },
pub const nodeOffset = if (TracedOffset.want_tracing) nodeOffsetDebug else nodeOffsetRelease;
@@ -3235,6 +3054,8 @@ pub const LazySrcLoc = union(enum) {
.node_offset_store_operand,
.for_input,
.for_capture_from_input,
+ .call_arg,
+ .fn_proto_param,
=> .{
.file_scope = decl.getFileScope(mod),
.parent_decl_node = decl.src_node,
@@ -3373,8 +3194,6 @@ pub fn deinit(mod: *Module) void {
mod.global_error_set.deinit(gpa);
mod.test_functions.deinit(gpa);
- mod.align_stack_fns.deinit(gpa);
- mod.monomorphed_funcs.deinit(gpa);
mod.decls_free_list.deinit(gpa);
mod.allocated_decls.deinit(gpa);
@@ -3407,7 +3226,6 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void {
}
}
if (decl.src_scope) |scope| scope.decRef(gpa);
- decl.clearValues(mod);
decl.dependants.deinit(gpa);
decl.dependencies.deinit(gpa);
decl.* = undefined;
@@ -3439,11 +3257,7 @@ pub fn structPtr(mod: *Module, index: Struct.Index) *Struct {
return mod.intern_pool.structPtr(index);
}
-pub fn funcPtr(mod: *Module, index: Fn.Index) *Fn {
- return mod.intern_pool.funcPtr(index);
-}
-
-pub fn inferredErrorSetPtr(mod: *Module, index: Fn.InferredErrorSet.Index) *Fn.InferredErrorSet {
+pub fn inferredErrorSetPtr(mod: *Module, index: InferredErrorSet.Index) *InferredErrorSet {
return mod.intern_pool.inferredErrorSetPtr(index);
}
@@ -3457,10 +3271,6 @@ pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct {
return mod.structPtr(index.unwrap() orelse return null);
}
-pub fn funcPtrUnwrap(mod: *Module, index: Fn.OptionalIndex) ?*Fn {
- return mod.funcPtr(index.unwrap() orelse return null);
-}
-
/// Returns true if and only if the Decl is the top level struct associated with a File.
pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool {
const decl = mod.declPtr(decl_index);
@@ -3881,6 +3691,8 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void {
// to re-generate ZIR for the File.
try file.outdated_decls.append(gpa, root_decl);
+ const ip = &mod.intern_pool;
+
while (decl_stack.popOrNull()) |decl_index| {
const decl = mod.declPtr(decl_index);
// Anonymous decls and the root decl have this set to 0. We still need
@@ -3918,7 +3730,7 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void {
}
if (decl.getOwnedFunction(mod)) |func| {
- func.zir_body_inst = inst_map.get(func.zir_body_inst) orelse {
+ func.zirBodyInst(ip).* = inst_map.get(func.zir_body_inst) orelse {
try file.deleted_decls.append(gpa, decl_index);
continue;
};
@@ -4101,11 +3913,6 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
// prior to re-analysis.
try mod.deleteDeclExports(decl_index);
- // Similarly, `@setAlignStack` invocations will be re-discovered.
- if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| {
- _ = mod.align_stack_fns.remove(func);
- }
-
// Dependencies will be re-discovered, so we remove them here prior to re-analysis.
for (decl.dependencies.keys()) |dep_index| {
const dep = mod.declPtr(dep_index);
@@ -4189,11 +3996,12 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
}
}
-pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void {
+pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: InternPool.Index) SemaError!void {
const tracy = trace(@src());
defer tracy.end();
- const func = mod.funcPtr(func_index);
+ const ip = &mod.intern_pool;
+ const func = mod.funcInfo(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
@@ -4211,7 +4019,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void
=> return error.AnalysisFail,
.complete, .codegen_failure_retryable => {
- switch (func.state) {
+ switch (func.analysis(ip).state) {
.sema_failure, .dependency_failure => return error.AnalysisFail,
.none, .queued => {},
.in_progress => unreachable,
@@ -4227,11 +4035,11 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void
var air = mod.analyzeFnBody(func_index, sema_arena) catch |err| switch (err) {
error.AnalysisFail => {
- if (func.state == .in_progress) {
+ if (func.analysis(ip).state == .in_progress) {
// If this decl caused the compile error, the analysis field would
// be changed to indicate it was this Decl's fault. Because this
// did not happen, we infer here that it was a dependency failure.
- func.state = .dependency_failure;
+ func.analysis(ip).state = .dependency_failure;
}
return error.AnalysisFail;
},
@@ -4251,14 +4059,14 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void
if (no_bin_file and !dump_air and !dump_llvm_ir) return;
- var liveness = try Liveness.analyze(gpa, air, &mod.intern_pool);
+ var liveness = try Liveness.analyze(gpa, air, ip);
defer liveness.deinit(gpa);
if (dump_air) {
const fqn = try decl.getFullyQualifiedName(mod);
- std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(&mod.intern_pool)});
+ std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(ip)});
@import("print_air.zig").dump(mod, air, liveness);
- std.debug.print("# End Function AIR: {}\n\n", .{fqn.fmt(&mod.intern_pool)});
+ std.debug.print("# End Function AIR: {}\n\n", .{fqn.fmt(ip)});
}
if (std.debug.runtime_safety) {
@@ -4266,7 +4074,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void
.gpa = gpa,
.air = air,
.liveness = liveness,
- .intern_pool = &mod.intern_pool,
+ .intern_pool = ip,
};
defer verify.deinit();
@@ -4321,8 +4129,9 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void
/// analyzed, and for ensuring it can exist at runtime (see
/// `sema.fnHasRuntimeBits`). This function does *not* guarantee that the body
/// will be analyzed when it returns: for that, see `ensureFuncBodyAnalyzed`.
-pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: Fn.Index) !void {
- const func = mod.funcPtr(func_index);
+pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index) !void {
+ const ip = &mod.intern_pool;
+ const func = mod.funcInfo(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
@@ -4348,7 +4157,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: Fn.Index) !void {
assert(decl.has_tv);
- switch (func.state) {
+ switch (func.analysis(ip).state) {
.none => {},
.queued => return,
// As above, we don't need to forward errors here.
@@ -4366,7 +4175,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: Fn.Index) !void {
// since the last update
try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index });
}
- func.state = .queued;
+ func.analysis(ip).state = .queued;
}
pub fn updateEmbedFile(mod: *Module, embed_file: *EmbedFile) SemaError!void {
@@ -4490,10 +4299,8 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
.code = file.zir,
.owner_decl = new_decl,
.owner_decl_index = new_decl_index,
- .func = null,
.func_index = .none,
.fn_ret_ty = Type.void,
- .owner_func = null,
.owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls,
};
@@ -4573,10 +4380,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
.code = zir,
.owner_decl = decl,
.owner_decl_index = decl_index,
- .func = null,
.func_index = .none,
.fn_ret_ty = Type.void,
- .owner_func = null,
.owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls,
};
@@ -4658,48 +4463,49 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
return true;
}
- if (mod.intern_pool.indexToFunc(decl_tv.val.toIntern()).unwrap()) |func_index| {
- const func = mod.funcPtr(func_index);
- const owns_tv = func.owner_decl == decl_index;
- if (owns_tv) {
- var prev_type_has_bits = false;
- var prev_is_inline = false;
- var type_changed = true;
-
- if (decl.has_tv) {
- prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(mod);
- type_changed = !decl.ty.eql(decl_tv.ty, mod);
- if (decl.getOwnedFunction(mod)) |prev_func| {
- prev_is_inline = prev_func.state == .inline_only;
+ const ip = &mod.intern_pool;
+ switch (ip.indexToKey(decl_tv.val.toIntern())) {
+ .func => |func| {
+ const owns_tv = func.owner_decl == decl_index;
+ if (owns_tv) {
+ var prev_type_has_bits = false;
+ var prev_is_inline = false;
+ var type_changed = true;
+
+ if (decl.has_tv) {
+ prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(mod);
+ type_changed = !decl.ty.eql(decl_tv.ty, mod);
+ if (decl.getOwnedFunction(mod)) |prev_func| {
+ prev_is_inline = prev_func.analysis(ip).state == .inline_only;
+ }
}
- }
- decl.clearValues(mod);
-
- decl.ty = decl_tv.ty;
- decl.val = (try decl_tv.val.intern(decl_tv.ty, mod)).toValue();
- // linksection, align, and addrspace were already set by Sema
- decl.has_tv = true;
- decl.owns_tv = owns_tv;
- decl.analysis = .complete;
- decl.generation = mod.generation;
-
- const is_inline = decl.ty.fnCallingConvention(mod) == .Inline;
- if (decl.is_exported) {
- const export_src: LazySrcLoc = .{ .token_offset = @intFromBool(decl.is_pub) };
- if (is_inline) {
- return sema.fail(&block_scope, export_src, "export of inline function", .{});
+
+ decl.ty = decl_tv.ty;
+ decl.val = (try decl_tv.val.intern(decl_tv.ty, mod)).toValue();
+ // linksection, align, and addrspace were already set by Sema
+ decl.has_tv = true;
+ decl.owns_tv = owns_tv;
+ decl.analysis = .complete;
+ decl.generation = mod.generation;
+
+ const is_inline = decl.ty.fnCallingConvention(mod) == .Inline;
+ if (decl.is_exported) {
+ const export_src: LazySrcLoc = .{ .token_offset = @intFromBool(decl.is_pub) };
+ if (is_inline) {
+ return sema.fail(&block_scope, export_src, "export of inline function", .{});
+ }
+ // The scope needs to have the decl in it.
+ try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index);
}
- // The scope needs to have the decl in it.
- try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index);
+ return type_changed or is_inline != prev_is_inline;
}
- return type_changed or is_inline != prev_is_inline;
- }
+ },
+ else => {},
}
var type_changed = true;
if (decl.has_tv) {
type_changed = !decl.ty.eql(decl_tv.ty, mod);
}
- decl.clearValues(mod);
decl.owns_tv = false;
var queue_linker_work = false;
@@ -4707,7 +4513,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
switch (decl_tv.val.toIntern()) {
.generic_poison => unreachable,
.unreachable_value => unreachable,
- else => switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) {
+ else => switch (ip.indexToKey(decl_tv.val.toIntern())) {
.variable => |variable| if (variable.decl == decl_index) {
decl.owns_tv = true;
queue_linker_work = true;
@@ -4743,11 +4549,11 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
} else if (bytes.len == 0) {
return sema.fail(&block_scope, section_src, "linksection cannot be empty", .{});
}
- const section = try mod.intern_pool.getOrPutString(gpa, bytes);
+ const section = try ip.getOrPutString(gpa, bytes);
break :blk section.toOptional();
};
decl.@"addrspace" = blk: {
- const addrspace_ctx: Sema.AddressSpaceContext = switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) {
+ const addrspace_ctx: Sema.AddressSpaceContext = switch (ip.indexToKey(decl_tv.val.toIntern())) {
.variable => .variable,
.extern_func, .func => .function,
else => .constant,
@@ -5309,7 +5115,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
decl.has_align = has_align;
decl.has_linksection_or_addrspace = has_linksection_or_addrspace;
decl.zir_decl_index = @as(u32, @intCast(decl_sub_index));
- if (decl.getOwnedFunctionIndex(mod) != .none) {
+ if (decl.getOwnedFunctionIndex() != .none) {
switch (comp.bin_file.tag) {
.coff, .elf, .macho, .plan9 => {
// TODO Look into detecting when this would be unnecessary by storing enough state
@@ -5386,7 +5192,6 @@ pub fn clearDecl(
try namespace.deleteAllDecls(mod, outdated_decls);
}
}
- decl.clearValues(mod);
if (decl.deletion_flag) {
decl.deletion_flag = false;
@@ -5497,19 +5302,26 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void
export_owners.deinit(mod.gpa);
}
-pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaError!Air {
+pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocator) SemaError!Air {
const tracy = trace(@src());
defer tracy.end();
const gpa = mod.gpa;
- const func = mod.funcPtr(func_index);
+ const ip = &mod.intern_pool;
+ const func = mod.funcInfo(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
defer comptime_mutable_decls.deinit();
+ // In the case of a generic function instance, this is the type of the
+ // instance, which has comptime parameters elided. In other words, it is
+ // the runtime-known parameters only, not to be confused with the
+ // generic_owner function type, which potentially has more parameters,
+ // including comptime parameters.
const fn_ty = decl.ty;
+ const fn_ty_info = mod.typeToFunc(fn_ty).?;
var sema: Sema = .{
.mod = mod,
@@ -5518,18 +5330,16 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE
.code = decl.getFileScope(mod).zir,
.owner_decl = decl,
.owner_decl_index = decl_index,
- .func = func,
- .func_index = func_index.toOptional(),
- .fn_ret_ty = mod.typeToFunc(fn_ty).?.return_type.toType(),
- .owner_func = func,
- .owner_func_index = func_index.toOptional(),
- .branch_quota = @max(func.branch_quota, Sema.default_branch_quota),
+ .func_index = func_index,
+ .fn_ret_ty = fn_ty_info.return_type.toType(),
+ .owner_func_index = func_index,
+ .branch_quota = @max(func.branchQuota(ip).*, Sema.default_branch_quota),
.comptime_mutable_decls = &comptime_mutable_decls,
};
defer sema.deinit();
// reset in case calls to errorable functions are removed.
- func.calls_or_awaits_errorable_fn = false;
+ func.analysis(ip).calls_or_awaits_errorable_fn = false;
// First few indexes of extra are reserved and set at the end.
const reserved_count = @typeInfo(Air.ExtraIndex).Enum.fields.len;
@@ -5551,8 +5361,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE
};
defer inner_block.instructions.deinit(gpa);
- const fn_info = sema.code.getFnInfo(func.zir_body_inst);
- const zir_tags = sema.code.instructions.items(.tag);
+ const fn_info = sema.code.getFnInfo(func.zirBodyInst(ip).*);
// Here we are performing "runtime semantic analysis" for a function body, which means
// we must map the parameter ZIR instructions to `arg` AIR instructions.
@@ -5560,35 +5369,36 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE
// This could be a generic function instantiation, however, in which case we need to
// map the comptime parameters to constant values and only emit arg AIR instructions
// for the runtime ones.
- const runtime_params_len = @as(u32, @intCast(mod.typeToFunc(fn_ty).?.param_types.len));
+ const runtime_params_len = fn_ty_info.param_types.len;
try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len);
- try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len * 2); // * 2 for the `addType`
+ try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len);
try sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body);
- var runtime_param_index: usize = 0;
- var total_param_index: usize = 0;
- for (fn_info.param_body) |inst| {
- switch (zir_tags[inst]) {
- .param, .param_comptime, .param_anytype, .param_anytype_comptime => {},
- else => continue,
+ // In the case of a generic function instance, pre-populate all the comptime args.
+ if (func.comptime_args.len != 0) {
+ for (
+ fn_info.param_body[0..func.comptime_args.len],
+ func.comptime_args.get(ip),
+ ) |inst, comptime_arg| {
+ if (comptime_arg == .none) continue;
+ sema.inst_map.putAssumeCapacityNoClobber(inst, Air.internedToRef(comptime_arg));
}
- const param_ty = if (func.comptime_args) |comptime_args| t: {
- const arg_tv = comptime_args[total_param_index];
-
- const arg_val = if (!arg_tv.val.isGenericPoison())
- arg_tv.val
- else if (try arg_tv.ty.onePossibleValue(mod)) |opv|
- opv
- else
- break :t arg_tv.ty;
-
- const arg = try sema.addConstant(arg_val);
- sema.inst_map.putAssumeCapacityNoClobber(inst, arg);
- total_param_index += 1;
- continue;
- } else mod.typeToFunc(fn_ty).?.param_types[runtime_param_index].toType();
+ }
+
+ const src_params_len = if (func.comptime_args.len != 0)
+ func.comptime_args.len
+ else
+ runtime_params_len;
+
+ var runtime_param_index: usize = 0;
+ for (fn_info.param_body[0..src_params_len], 0..) |inst, src_param_index| {
+ const gop = sema.inst_map.getOrPutAssumeCapacity(inst);
+ if (gop.found_existing) continue; // provided above by comptime arg
- const opt_opv = sema.typeHasOnePossibleValue(param_ty) catch |err| switch (err) {
+ const param_ty = fn_ty_info.param_types.get(ip)[runtime_param_index];
+ runtime_param_index += 1;
+
+ const opt_opv = sema.typeHasOnePossibleValue(param_ty.toType()) catch |err| switch (err) {
error.NeededSourceLocation => unreachable,
error.GenericPoison => unreachable,
error.ComptimeReturn => unreachable,
@@ -5596,28 +5406,22 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE
else => |e| return e,
};
if (opt_opv) |opv| {
- const arg = try sema.addConstant(opv);
- sema.inst_map.putAssumeCapacityNoClobber(inst, arg);
- total_param_index += 1;
- runtime_param_index += 1;
+ gop.value_ptr.* = Air.internedToRef(opv.toIntern());
continue;
}
- const air_ty = try sema.addType(param_ty);
- const arg_index = @as(u32, @intCast(sema.air_instructions.len));
+ const arg_index: u32 = @intCast(sema.air_instructions.len);
+ gop.value_ptr.* = Air.indexToRef(arg_index);
inner_block.instructions.appendAssumeCapacity(arg_index);
sema.air_instructions.appendAssumeCapacity(.{
.tag = .arg,
.data = .{ .arg = .{
- .ty = air_ty,
- .src_index = @as(u32, @intCast(total_param_index)),
+ .ty = Air.internedToRef(param_ty),
+ .src_index = @intCast(src_param_index),
} },
});
- sema.inst_map.putAssumeCapacityNoClobber(inst, Air.indexToRef(arg_index));
- total_param_index += 1;
- runtime_param_index += 1;
}
- func.state = .in_progress;
+ func.analysis(ip).state = .in_progress;
const last_arg_index = inner_block.instructions.items.len;
@@ -5648,7 +5452,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE
}
// If we don't get an error return trace from a caller, create our own.
- if (func.calls_or_awaits_errorable_fn and
+ if (func.analysis(ip).calls_or_awaits_errorable_fn and
mod.comp.bin_file.options.error_return_tracing and
!sema.fn_ret_ty.isError(mod))
{
@@ -5677,7 +5481,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE
sema.air_extra.appendSliceAssumeCapacity(inner_block.instructions.items);
sema.air_extra.items[@intFromEnum(Air.ExtraIndex.main_block)] = main_block_index;
- func.state = .success;
+ func.analysis(ip).state = .success;
// Finally we must resolve the return type and parameter types so that backends
// have full access to type information.
@@ -5716,7 +5520,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE
};
}
- return Air{
+ return .{
.instructions = sema.air_instructions.toOwnedSlice(),
.extra = try sema.air_extra.toOwnedSlice(gpa),
};
@@ -5731,9 +5535,6 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void {
if (mod.cimport_errors.fetchSwapRemove(decl_index)) |kv| {
for (kv.value) |err| err.deinit(mod.gpa);
}
- if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| {
- _ = mod.align_stack_fns.remove(func);
- }
if (mod.emit_h) |emit_h| {
if (emit_h.failed_decls.fetchSwapRemove(decl_index)) |kv| {
kv.value.destroy(mod.gpa);
@@ -5777,14 +5578,6 @@ pub fn destroyUnion(mod: *Module, index: Union.Index) void {
return mod.intern_pool.destroyUnion(mod.gpa, index);
}
-pub fn createFunc(mod: *Module, initialization: Fn) Allocator.Error!Fn.Index {
- return mod.intern_pool.createFunc(mod.gpa, initialization);
-}
-
-pub fn destroyFunc(mod: *Module, index: Fn.Index) void {
- return mod.intern_pool.destroyFunc(mod.gpa, index);
-}
-
pub fn allocateNewDecl(
mod: *Module,
namespace: Namespace.Index,
@@ -6578,7 +6371,6 @@ pub fn populateTestFunctions(
// Since we are replacing the Decl's value we must perform cleanup on the
// previous value.
- decl.clearValues(mod);
decl.ty = new_ty;
decl.val = new_val;
decl.has_tv = true;
@@ -6657,7 +6449,7 @@ pub fn markReferencedDeclsAlive(mod: *Module, val: Value) Allocator.Error!void {
switch (mod.intern_pool.indexToKey(val.toIntern())) {
.variable => |variable| try mod.markDeclIndexAlive(variable.decl),
.extern_func => |extern_func| try mod.markDeclIndexAlive(extern_func.decl),
- .func => |func| try mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl),
+ .func => |func| try mod.markDeclIndexAlive(func.owner_decl),
.error_union => |error_union| switch (error_union.val) {
.err_name => {},
.payload => |payload| try mod.markReferencedDeclsAlive(payload.toValue()),
@@ -6851,8 +6643,8 @@ pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator
return mod.ptrType(info);
}
-pub fn funcType(mod: *Module, info: InternPool.Key.FuncType) Allocator.Error!Type {
- return (try intern(mod, .{ .func_type = info })).toType();
+pub fn funcType(mod: *Module, key: InternPool.GetFuncTypeKey) Allocator.Error!Type {
+ return (try mod.intern_pool.getFuncType(mod.gpa, key)).toType();
}
/// Use this for `anyframe->T` only.
@@ -7231,16 +7023,28 @@ pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType {
return mod.intern_pool.indexToFuncType(ty.toIntern());
}
-pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*Fn.InferredErrorSet {
+pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*InferredErrorSet {
const index = typeToInferredErrorSetIndex(mod, ty).unwrap() orelse return null;
return mod.inferredErrorSetPtr(index);
}
-pub fn typeToInferredErrorSetIndex(mod: *Module, ty: Type) Fn.InferredErrorSet.OptionalIndex {
+pub fn typeToInferredErrorSetIndex(mod: *Module, ty: Type) InferredErrorSet.OptionalIndex {
if (ty.ip_index == .none) return .none;
return mod.intern_pool.indexToInferredErrorSetType(ty.toIntern());
}
+pub fn funcOwnerDeclPtr(mod: *Module, func_index: InternPool.Index) *Decl {
+ return mod.declPtr(mod.funcOwnerDeclIndex(func_index));
+}
+
+pub fn funcOwnerDeclIndex(mod: *Module, func_index: InternPool.Index) Decl.Index {
+ return mod.funcInfo(func_index).owner_decl;
+}
+
+pub fn funcInfo(mod: *Module, func_index: InternPool.Index) InternPool.Key.Func {
+ return mod.intern_pool.indexToKey(func_index).func;
+}
+
pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQuery) SrcLoc {
@setCold(true);
const owner_decl = mod.declPtr(owner_decl_index);
@@ -7265,3 +7069,57 @@ pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQu
pub fn toEnum(mod: *Module, comptime E: type, val: Value) E {
return mod.intern_pool.toEnum(E, val.toIntern());
}
+
+pub fn isAnytypeParam(mod: *Module, func: InternPool.Index, index: u32) bool {
+ const file = mod.declPtr(func.owner_decl).getFileScope(mod);
+
+ const tags = file.zir.instructions.items(.tag);
+
+ const param_body = file.zir.getParamBody(func.zir_body_inst);
+ const param = param_body[index];
+
+ return switch (tags[param]) {
+ .param, .param_comptime => false,
+ .param_anytype, .param_anytype_comptime => true,
+ else => unreachable,
+ };
+}
+
+pub fn getParamName(mod: *Module, func_index: InternPool.Index, index: u32) [:0]const u8 {
+ const func = mod.funcInfo(func_index);
+ const file = mod.declPtr(func.owner_decl).getFileScope(mod);
+
+ const tags = file.zir.instructions.items(.tag);
+ const data = file.zir.instructions.items(.data);
+
+ const param_body = file.zir.getParamBody(func.zir_body_inst);
+ const param = param_body[index];
+
+ return switch (tags[param]) {
+ .param, .param_comptime => blk: {
+ const extra = file.zir.extraData(Zir.Inst.Param, data[param].pl_tok.payload_index);
+ break :blk file.zir.nullTerminatedString(extra.data.name);
+ },
+ .param_anytype, .param_anytype_comptime => blk: {
+ const param_data = data[param].str_tok;
+ break :blk param_data.get(file.zir);
+ },
+ else => unreachable,
+ };
+}
+
+pub fn hasInferredErrorSet(mod: *Module, func: InternPool.Key.Func) bool {
+ const owner_decl = mod.declPtr(func.owner_decl);
+ const zir = owner_decl.getFileScope(mod).zir;
+ const zir_tags = zir.instructions.items(.tag);
+ switch (zir_tags[func.zir_body_inst]) {
+ .func => return false,
+ .func_inferred => return true,
+ .func_fancy => {
+ const inst_data = zir.instructions.items(.data)[func.zir_body_inst].pl_node;
+ const extra = zir.extraData(Zir.Inst.FuncFancy, inst_data.payload_index);
+ return extra.data.bits.is_inferred_error;
+ },
+ else => unreachable,
+ }
+}
src/print_air.zig
@@ -665,7 +665,7 @@ const Writer = struct {
fn writeDbgInline(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ty_fn = w.air.instructions.items(.data)[inst].ty_fn;
const func_index = ty_fn.func;
- const owner_decl = w.module.declPtr(w.module.funcPtr(func_index).owner_decl);
+ const owner_decl = w.module.funcOwnerDeclPtr(func_index);
try s.print("{}", .{owner_decl.name.fmt(&w.module.intern_pool)});
}
src/Sema.zig
@@ -23,13 +23,13 @@ owner_decl: *Decl,
owner_decl_index: Decl.Index,
/// For an inline or comptime function call, this will be the root parent function
/// which contains the callsite. Corresponds to `owner_decl`.
-owner_func: ?*Module.Fn,
-owner_func_index: Module.Fn.OptionalIndex,
+/// This could be `none`, a `func_decl`, or a `func_instance`.
+owner_func_index: InternPool.Index,
/// The function this ZIR code is the body of, according to the source code.
-/// This starts out the same as `owner_func` and then diverges in the case of
+/// This starts out the same as `owner_func_index` and then diverges in the case of
/// an inline or comptime function call.
-func: ?*Module.Fn,
-func_index: Module.Fn.OptionalIndex,
+/// This could be `none`, a `func_decl`, or a `func_instance`.
+func_index: InternPool.Index,
/// Used to restore the error return trace when returning a non-error from a function.
error_return_trace_index_on_fn_entry: Air.Inst.Ref = .none,
/// When semantic analysis needs to know the return type of the function whose body
@@ -49,21 +49,16 @@ comptime_break_inst: Zir.Inst.Index = undefined,
/// contain a mapped source location.
src: LazySrcLoc = .{ .token_offset = 0 },
decl_val_table: std.AutoHashMapUnmanaged(Decl.Index, Air.Inst.Ref) = .{},
-/// When doing a generic function instantiation, this array collects a
-/// `Value` object for each parameter that is comptime-known and thus elided
-/// from the generated function. This memory is allocated by a parent `Sema` and
-/// owned by the values arena of the Sema owner_decl.
-comptime_args: []TypedValue = &.{},
-/// Marks the function instruction that `comptime_args` applies to so that we
-/// don't accidentally apply it to a function prototype which is used in the
-/// type expression of a generic function parameter.
-comptime_args_fn_inst: Zir.Inst.Index = 0,
-/// When `comptime_args` is provided, this field is also provided. It was used as
-/// the key in the `monomorphed_funcs` set. The `func` instruction is supposed
-/// to use this instead of allocating a fresh one. This avoids an unnecessary
-/// extra hash table lookup in the `monomorphed_funcs` set.
-/// Sema will set this to null when it takes ownership.
-preallocated_new_func: Module.Fn.OptionalIndex = .none,
+/// When doing a generic function instantiation, this array collects a value
+/// for each parameter of the generic owner. `none` for non-comptime parameters.
+/// This is a separate array from `block.params` so that it can be passed
+/// directly to `comptime_args` when calling `InternPool.getFuncInstance`.
+/// This memory is allocated by a parent `Sema` in the temporary arena, and is
+/// used only to add a `func_instance` into the `InternPool`.
+comptime_args: []InternPool.Index = &.{},
+/// Used to communicate from a generic function instantiation to the logic that
+/// creates a generic function instantiation value in `funcCommon`.
+generic_owner: InternPool.Index = .none,
/// The key is types that must be fully resolved prior to machine code
/// generation pass. Types are added to this set when resolving them
/// immediately could cause a dependency loop, but they do need to be resolved
@@ -79,8 +74,6 @@ types_to_resolve: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{},
post_hoc_blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *LabeledBlock) = .{},
/// Populated with the last compile error created.
err: ?*Module.ErrorMsg = null,
-/// True when analyzing a generic instantiation. Used to suppress some errors.
-is_generic_instantiation: bool = false,
/// Set to true when analyzing a func type instruction so that nested generic
/// function types will emit generic poison instead of a partial type.
no_partial_func_ty: bool = false,
@@ -97,6 +90,10 @@ unresolved_inferred_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, InferredAll
/// involve transitioning comptime-mutable memory away from using Decls at all.
comptime_mutable_decls: *std.ArrayList(Decl.Index),
+/// This is populated when `@setAlignStack` occurs so that if there is a duplicate
+/// one encountered, the conflicting source location can be shown.
+prev_stack_alignment_src: ?LazySrcLoc = null,
+
const std = @import("std");
const math = std.math;
const mem = std.mem;
@@ -243,7 +240,13 @@ pub const Block = struct {
/// The AIR instructions generated for this block.
instructions: std.ArrayListUnmanaged(Air.Inst.Index),
// `param` instructions are collected here to be used by the `func` instruction.
- params: std.ArrayListUnmanaged(Param) = .{},
+ /// When doing a generic function instantiation, this array collects a type
+ /// for each *runtime-known* parameter. This array corresponds to the instance
+ /// function type, while `Sema.comptime_args` corresponds to the generic owner
+ /// function type.
+ /// This memory is allocated by a parent `Sema` in the temporary arena, and is
+ /// used to add a `func_instance` into the `InternPool`.
+ params: std.MultiArrayList(Param) = .{},
wip_capture_scope: *CaptureScope,
@@ -323,10 +326,10 @@ pub const Block = struct {
};
const Param = struct {
- /// `noreturn` means `anytype`.
- ty: Type,
+ /// `none` means `anytype`.
+ ty: InternPool.Index,
is_comptime: bool,
- name: []const u8,
+ name: Zir.NullTerminatedString,
};
/// This `Block` maps a block ZIR instruction to the corresponding
@@ -342,7 +345,8 @@ pub const Block = struct {
/// It is shared among all the blocks in an inline or comptime called
/// function.
pub const Inlining = struct {
- func: ?*Module.Fn,
+ /// Might be `none`.
+ func: InternPool.Index,
comptime_result: Air.Inst.Ref,
merges: Merges,
};
@@ -906,7 +910,7 @@ fn analyzeBodyInner(
// We use a while (true) loop here to avoid a redundant way of breaking out of
// the loop. The only way to break out of the loop is with a `noreturn`
// instruction.
- var i: usize = 0;
+ var i: u32 = 0;
const result = while (true) {
crash_info.setBodyIndex(i);
const inst = body[i];
@@ -1338,22 +1342,22 @@ fn analyzeBodyInner(
continue;
},
.param => {
- try sema.zirParam(block, inst, false);
+ try sema.zirParam(block, inst, i, false);
i += 1;
continue;
},
.param_comptime => {
- try sema.zirParam(block, inst, true);
+ try sema.zirParam(block, inst, i, true);
i += 1;
continue;
},
.param_anytype => {
- try sema.zirParamAnytype(block, inst, false);
+ try sema.zirParamAnytype(block, inst, i, false);
i += 1;
continue;
},
.param_anytype_comptime => {
- try sema.zirParamAnytype(block, inst, true);
+ try sema.zirParamAnytype(block, inst, i, true);
i += 1;
continue;
},
@@ -1493,10 +1497,7 @@ fn analyzeBodyInner(
// Note: this probably needs to be resolved in a more general manner.
const prev_params = block.params;
block.params = .{};
- defer {
- block.params.deinit(sema.gpa);
- block.params = prev_params;
- }
+ defer block.params = prev_params;
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
break always_noreturn;
if (inst == break_data.block_inst) {
@@ -1532,7 +1533,6 @@ fn analyzeBodyInner(
.merges = undefined,
};
child_block.label = &label;
- defer child_block.params.deinit(gpa);
// Write these instructions directly into the parent block
child_block.instructions = block.instructions;
@@ -2363,7 +2363,10 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError {
break :blk default_reference_trace_len;
};
- var referenced_by = if (sema.func) |some| some.owner_decl else sema.owner_decl_index;
+ var referenced_by = if (sema.func_index != .none)
+ mod.funcOwnerDeclIndex(sema.func_index)
+ else
+ sema.owner_decl_index;
var reference_stack = std.ArrayList(Module.ErrorMsg.Trace).init(gpa);
defer reference_stack.deinit();
@@ -2399,14 +2402,15 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError {
}
err_msg.reference_trace = try reference_stack.toOwnedSlice();
}
- if (sema.owner_func) |func| {
- func.state = .sema_failure;
+ const ip = &mod.intern_pool;
+ if (sema.owner_func_index != .none) {
+ ip.funcAnalysis(sema.owner_func_index).state = .sema_failure;
} else {
sema.owner_decl.analysis = .sema_failure;
sema.owner_decl.generation = mod.generation;
}
- if (sema.func) |func| {
- func.state = .sema_failure;
+ if (sema.func_index != .none) {
+ ip.funcAnalysis(sema.func_index).state = .sema_failure;
}
const gop = mod.failed_decls.getOrPutAssumeCapacity(sema.owner_decl_index);
if (gop.found_existing) {
@@ -2866,6 +2870,7 @@ fn createAnonymousDeclTypeNamed(
inst: ?Zir.Inst.Index,
) !Decl.Index {
const mod = sema.mod;
+ const ip = &mod.intern_pool;
const gpa = sema.gpa;
const namespace = block.namespace;
const src_scope = block.wip_capture_scope;
@@ -2895,7 +2900,7 @@ fn createAnonymousDeclTypeNamed(
return new_decl_index;
},
.func => {
- const fn_info = sema.code.getFnInfo(sema.func.?.zir_body_inst);
+ const fn_info = sema.code.getFnInfo(ip.funcZirBodyInst(sema.func_index));
const zir_tags = sema.code.instructions.items(.tag);
var buf = std.ArrayList(u8).init(gpa);
@@ -3070,18 +3075,12 @@ fn zirEnumDecl(
sema.owner_decl_index = prev_owner_decl_index;
}
- const prev_owner_func = sema.owner_func;
const prev_owner_func_index = sema.owner_func_index;
- sema.owner_func = null;
sema.owner_func_index = .none;
- defer sema.owner_func = prev_owner_func;
defer sema.owner_func_index = prev_owner_func_index;
- const prev_func = sema.func;
const prev_func_index = sema.func_index;
- sema.func = null;
sema.func_index = .none;
- defer sema.func = prev_func;
defer sema.func_index = prev_func_index;
var wip_captures = try WipCaptureScope.init(gpa, new_decl.src_scope);
@@ -3393,7 +3392,7 @@ fn zirErrorSetDecl(
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.ErrorSetDecl, inst_data.payload_index);
- var names: Module.Fn.InferredErrorSet.NameMap = .{};
+ var names: Module.InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, extra.data.fields_len);
var extra_index = @as(u32, @intCast(extra.end));
@@ -5379,7 +5378,10 @@ fn zirCompileLog(
}
try writer.print("\n", .{});
- const decl_index = if (sema.func) |some| some.owner_decl else sema.owner_decl_index;
+ const decl_index = if (sema.func_index != .none)
+ mod.funcOwnerDeclIndex(sema.func_index)
+ else
+ sema.owner_decl_index;
const gop = try mod.compile_log_decls.getOrPut(sema.gpa, decl_index);
if (!gop.found_existing) {
gop.value_ptr.* = src_node;
@@ -5967,11 +5969,11 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
alignment.toByteUnitsOptional().?,
});
}
- const func_index = sema.func_index.unwrap() orelse
+ if (sema.func_index == .none) {
return sema.fail(block, src, "@setAlignStack outside function body", .{});
- const func = mod.funcPtr(func_index);
+ }
- const fn_owner_decl = mod.declPtr(func.owner_decl);
+ const fn_owner_decl = mod.funcOwnerDeclPtr(sema.func_index);
switch (fn_owner_decl.ty.fnCallingConvention(mod)) {
.Naked => return sema.fail(block, src, "@setAlignStack in naked function", .{}),
.Inline => return sema.fail(block, src, "@setAlignStack in inline function", .{}),
@@ -5980,25 +5982,34 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
},
}
- const gop = try mod.align_stack_fns.getOrPut(sema.gpa, func_index);
- if (gop.found_existing) {
+ if (sema.prev_stack_alignment_src) |prev_src| {
const msg = msg: {
const msg = try sema.errMsg(block, src, "multiple @setAlignStack in the same function body", .{});
errdefer msg.destroy(sema.gpa);
- try sema.errNote(block, gop.value_ptr.src, msg, "other instance here", .{});
+ try sema.errNote(block, prev_src, msg, "other instance here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
- gop.value_ptr.* = .{ .alignment = alignment, .src = src };
+
+ const ip = &mod.intern_pool;
+ const a = ip.funcAnalysis(sema.func_index);
+ if (a.stack_alignment != .none) {
+ a.stack_alignment = @enumFromInt(@max(
+ @intFromEnum(alignment),
+ @intFromEnum(a.stack_alignment),
+ ));
+ }
}
fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
+ const mod = sema.mod;
+ const ip = &mod.intern_pool;
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const is_cold = try sema.resolveConstBool(block, operand_src, extra.operand, "operand to @setCold must be comptime-known");
- const func = sema.func orelse return; // does nothing outside a function
- func.is_cold = is_cold;
+ if (sema.func_index == .none) return; // does nothing outside a function
+ ip.funcAnalysis(sema.func_index).is_cold = is_cold;
}
fn zirSetFloatMode(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
@@ -6308,7 +6319,7 @@ fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl {
if (func_val.isUndef(mod)) return null;
const owner_decl_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) {
.extern_func => |extern_func| extern_func.decl,
- .func => |func| mod.funcPtr(func.index).owner_decl,
+ .func => |func| func.owner_decl,
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl| mod.declPtr(decl).val.getFunction(mod).?.owner_decl,
else => return null,
@@ -6445,6 +6456,7 @@ fn zirCall(
defer tracy.end();
const mod = sema.mod;
+ const ip = &mod.intern_pool;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const callee_src: LazySrcLoc = .{ .node_offset_call_func = inst_data.src_node };
const call_src = inst_data.src();
@@ -6493,9 +6505,10 @@ fn zirCall(
const args_body = sema.code.extra[extra.end..];
var input_is_error = false;
- const block_index = @as(Air.Inst.Index, @intCast(block.instructions.items.len));
+ const block_index: Air.Inst.Index = @intCast(block.instructions.items.len);
- const fn_params_len = mod.typeToFunc(func_ty).?.param_types.len;
+ const func_ty_info = mod.typeToFunc(func_ty).?;
+ const fn_params_len = func_ty_info.param_types.len;
const parent_comptime = block.is_comptime;
// `extra_index` and `arg_index` are separate since the bound function is passed as the first argument.
var extra_index: usize = 0;
@@ -6504,13 +6517,12 @@ fn zirCall(
extra_index += 1;
arg_index += 1;
}) {
- const func_ty_info = mod.typeToFunc(func_ty).?;
const arg_end = sema.code.extra[extra.end + extra_index];
defer arg_start = arg_end;
// Generate args to comptime params in comptime block.
defer block.is_comptime = parent_comptime;
- if (arg_index < @min(fn_params_len, 32) and func_ty_info.paramIsComptime(@as(u5, @intCast(arg_index)))) {
+ if (arg_index < @min(fn_params_len, 32) and func_ty_info.paramIsComptime(@intCast(arg_index))) {
block.is_comptime = true;
// TODO set comptime_reason
}
@@ -6519,10 +6531,10 @@ fn zirCall(
if (arg_index >= fn_params_len)
break :inst Air.Inst.Ref.var_args_param_type;
- if (func_ty_info.param_types[arg_index] == .generic_poison_type)
+ if (func_ty_info.param_types.get(ip)[arg_index] == .generic_poison_type)
break :inst Air.Inst.Ref.generic_poison_type;
- break :inst try sema.addType(func_ty_info.param_types[arg_index].toType());
+ break :inst try sema.addType(func_ty_info.param_types.get(ip)[arg_index].toType());
});
const resolved = try sema.resolveBody(block, args_body[arg_start..arg_end], inst);
@@ -6535,7 +6547,9 @@ fn zirCall(
}
resolved_args[arg_index] = resolved;
}
- if (sema.owner_func == null or !sema.owner_func.?.calls_or_awaits_errorable_fn) {
+ if (sema.owner_func_index == .none or
+ !ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn)
+ {
input_is_error = false; // input was an error type, but no errorable fn's were actually called
}
@@ -6702,6 +6716,7 @@ fn analyzeCall(
call_dbg_node: ?Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
+ const ip = &mod.intern_pool;
const callee_ty = sema.typeOf(func);
const func_ty_info = mod.typeToFunc(func_ty).?;
@@ -6749,20 +6764,17 @@ fn analyzeCall(
var is_generic_call = func_ty_info.is_generic;
var is_comptime_call = block.is_comptime or modifier == .compile_time;
- var comptime_reason_buf: Block.ComptimeReason = undefined;
var comptime_reason: ?*const Block.ComptimeReason = null;
if (!is_comptime_call) {
if (sema.typeRequiresComptime(func_ty_info.return_type.toType())) |ct| {
is_comptime_call = ct;
if (ct) {
- // stage1 can't handle doing this directly
- comptime_reason_buf = .{ .comptime_ret_ty = .{
+ comptime_reason = &.{ .comptime_ret_ty = .{
.block = block,
.func = func,
.func_src = func_src,
.return_ty = func_ty_info.return_type.toType(),
} };
- comptime_reason = &comptime_reason_buf;
}
} else |err| switch (err) {
error.GenericPoison => is_generic_call = true,
@@ -6778,7 +6790,6 @@ fn analyzeCall(
func,
func_src,
call_src,
- func_ty,
ensure_result_used,
uncasted_args,
call_tag,
@@ -6793,14 +6804,12 @@ fn analyzeCall(
error.ComptimeReturn => {
is_inline_call = true;
is_comptime_call = true;
- // stage1 can't handle doing this directly
- comptime_reason_buf = .{ .comptime_ret_ty = .{
+ comptime_reason = &.{ .comptime_ret_ty = .{
.block = block,
.func = func,
.func_src = func_src,
.return_ty = func_ty_info.return_type.toType(),
} };
- comptime_reason = &comptime_reason_buf;
},
else => |e| return e,
}
@@ -6819,9 +6828,9 @@ fn analyzeCall(
.extern_func => return sema.fail(block, call_src, "{s} call of extern function", .{
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
}),
- .func => |function| function.index,
+ .func => func_val.toIntern(),
.ptr => |ptr| switch (ptr.addr) {
- .decl => |decl| mod.declPtr(decl).val.getFunctionIndex(mod).unwrap().?,
+ .decl => |decl| mod.declPtr(decl).val.toIntern(),
else => {
assert(callee_ty.isPtrAtRuntime(mod));
return sema.fail(block, call_src, "{s} call of function pointer", .{
@@ -6850,7 +6859,7 @@ fn analyzeCall(
// This one is shared among sub-blocks within the same callee, but not
// shared among the entire inline/comptime call stack.
var inlining: Block.Inlining = .{
- .func = null,
+ .func = .none,
.comptime_result = undefined,
.merges = .{
.src_locs = .{},
@@ -6862,7 +6871,7 @@ fn analyzeCall(
// In order to save a bit of stack space, directly modify Sema rather
// than create a child one.
const parent_zir = sema.code;
- const module_fn = mod.funcPtr(module_fn_index);
+ const module_fn = mod.funcInfo(module_fn_index);
const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
sema.code = fn_owner_decl.getFileScope(mod).zir;
defer sema.code = parent_zir;
@@ -6877,11 +6886,8 @@ fn analyzeCall(
sema.inst_map = parent_inst_map;
}
- const parent_func = sema.func;
const parent_func_index = sema.func_index;
- sema.func = module_fn;
- sema.func_index = module_fn_index.toOptional();
- defer sema.func = parent_func;
+ sema.func_index = module_fn_index;
defer sema.func_index = parent_func_index;
const parent_err_ret_index = sema.error_return_trace_index_on_fn_entry;
@@ -6913,16 +6919,30 @@ fn analyzeCall(
try sema.emitBackwardBranch(block, call_src);
- // Whether this call should be memoized, set to false if the call can mutate comptime state.
+ // Whether this call should be memoized, set to false if the call can
+ // mutate comptime state.
var should_memoize = true;
// If it's a comptime function call, we need to memoize it as long as no external
// comptime memory is mutated.
const memoized_arg_values = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len);
- var new_fn_info = mod.typeToFunc(fn_owner_decl.ty).?;
- new_fn_info.param_types = try sema.arena.alloc(InternPool.Index, new_fn_info.param_types.len);
- new_fn_info.comptime_bits = 0;
+ const owner_info = mod.typeToFunc(fn_owner_decl.ty).?;
+ var new_fn_info: InternPool.GetFuncTypeKey = .{
+ .param_types = try sema.arena.alloc(InternPool.Index, owner_info.param_types.len),
+ .return_type = owner_info.return_type,
+ .comptime_bits = 0,
+ .noalias_bits = owner_info.noalias_bits,
+ .alignment = owner_info.alignment,
+ .cc = owner_info.cc,
+ .is_var_args = owner_info.is_var_args,
+ .is_noinline = owner_info.is_noinline,
+ .align_is_generic = owner_info.align_is_generic,
+ .cc_is_generic = owner_info.cc_is_generic,
+ .section_is_generic = owner_info.section_is_generic,
+ .addrspace_is_generic = owner_info.addrspace_is_generic,
+ .is_generic = owner_info.is_generic,
+ };
// This will have return instructions analyzed as break instructions to
// the block_inst above. Here we are performing "comptime/inline semantic analysis"
@@ -6934,59 +6954,42 @@ fn analyzeCall(
try sema.inst_map.ensureSpaceForInstructions(sema.gpa, fn_info.param_body);
var has_comptime_args = false;
- var arg_i: usize = 0;
+ var arg_i: u32 = 0;
for (fn_info.param_body) |inst| {
- sema.analyzeInlineCallArg(
+ const arg_src: LazySrcLoc = .{ .call_arg = .{
+ .call_node_offset = call_src.node_offset.x,
+ .arg_index = arg_i,
+ } };
+ try sema.analyzeInlineCallArg(
block,
&child_block,
- .unneeded,
+ arg_src,
inst,
- &new_fn_info,
+ new_fn_info.param_types,
&arg_i,
uncasted_args,
is_comptime_call,
&should_memoize,
memoized_arg_values,
- mod.typeToFunc(func_ty).?.param_types,
+ func_ty_info.param_types,
func,
&has_comptime_args,
- ) catch |err| switch (err) {
- error.NeededSourceLocation => {
- _ = sema.inst_map.remove(inst);
- const decl = mod.declPtr(block.src_decl);
- try sema.analyzeInlineCallArg(
- block,
- &child_block,
- mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src),
- inst,
- &new_fn_info,
- &arg_i,
- uncasted_args,
- is_comptime_call,
- &should_memoize,
- memoized_arg_values,
- mod.typeToFunc(func_ty).?.param_types,
- func,
- &has_comptime_args,
- );
- unreachable;
- },
- else => |e| return e,
- };
+ );
}
- if (!has_comptime_args and module_fn.state == .sema_failure) return error.AnalysisFail;
+ if (!has_comptime_args and module_fn.analysis(ip).state == .sema_failure)
+ return error.AnalysisFail;
const recursive_msg = "inline call is recursive";
var head = if (!has_comptime_args) block else null;
while (head) |some| {
const parent_inlining = some.inlining orelse break;
- if (parent_inlining.func == module_fn) {
+ if (parent_inlining.func == module_fn_index) {
return sema.fail(block, call_src, recursive_msg, .{});
}
head = some.parent;
}
- if (!has_comptime_args) inlining.func = module_fn;
+ if (!has_comptime_args) inlining.func = module_fn_index;
// In case it is a generic function with an expression for the return type that depends
// on parameters, we must now do the same for the return type as we just did with
@@ -7000,7 +7003,7 @@ fn analyzeCall(
const bare_return_type = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst);
// Create a fresh inferred error set type for inline/comptime calls.
const fn_ret_ty = blk: {
- if (module_fn.hasInferredErrorSet(mod)) {
+ if (mod.hasInferredErrorSet(module_fn)) {
const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{
.func = module_fn_index,
});
@@ -7032,7 +7035,7 @@ fn analyzeCall(
const new_func_resolved_ty = try mod.funcType(new_fn_info);
if (!is_comptime_call and !block.is_typeof) {
- try sema.emitDbgInline(block, parent_func_index.unwrap().?, module_fn_index, new_func_resolved_ty, .dbg_inline_begin);
+ try sema.emitDbgInline(block, parent_func_index, module_fn_index, new_func_resolved_ty, .dbg_inline_begin);
const zir_tags = sema.code.instructions.items(.tag);
for (fn_info.param_body) |param| switch (zir_tags[param]) {
@@ -7078,21 +7081,22 @@ fn analyzeCall(
try sema.emitDbgInline(
block,
module_fn_index,
- parent_func_index.unwrap().?,
- mod.declPtr(parent_func.?.owner_decl).ty,
+ parent_func_index,
+ mod.funcOwnerDeclPtr(parent_func_index).ty,
.dbg_inline_end,
);
}
if (should_memoize and is_comptime_call) {
const result_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, result, "");
+ const result_interned = try result_val.intern(fn_ret_ty, mod);
// TODO: check whether any external comptime memory was mutated by the
// comptime function call. If so, then do not memoize the call here.
_ = try mod.intern(.{ .memoized_call = .{
.func = module_fn_index,
.arg_values = memoized_arg_values,
- .result = try result_val.intern(fn_ret_ty, mod),
+ .result = result_interned,
} });
}
@@ -7112,7 +7116,7 @@ fn analyzeCall(
.func_inst = func,
.param_i = @as(u32, @intCast(i)),
} };
- const param_ty = mod.typeToFunc(func_ty).?.param_types[i].toType();
+ const param_ty = func_ty_info.param_types.get(ip)[i].toType();
args[i] = sema.analyzeCallArg(
block,
.unneeded,
@@ -7152,13 +7156,13 @@ fn analyzeCall(
if (call_dbg_node) |some| try sema.zirDbgStmt(block, some);
try sema.queueFullTypeResolution(func_ty_info.return_type.toType());
- if (sema.owner_func != null and func_ty_info.return_type.toType().isError(mod)) {
- sema.owner_func.?.calls_or_awaits_errorable_fn = true;
+ if (sema.owner_func_index != .none and func_ty_info.return_type.toType().isError(mod)) {
+ ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true;
}
if (try sema.resolveMaybeUndefVal(func)) |func_val| {
- if (mod.intern_pool.indexToFunc(func_val.toIntern()).unwrap()) |func_index| {
- try mod.ensureFuncBodyAnalysisQueued(func_index);
+ if (mod.intern_pool.isFuncBody(func_val.toIntern())) {
+ try mod.ensureFuncBodyAnalysisQueued(func_val.toIntern());
}
}
@@ -7219,7 +7223,7 @@ fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Typ
@tagName(backend), @tagName(target.cpu.arch),
});
}
- const func_decl = mod.declPtr(sema.owner_func.?.owner_decl);
+ const func_decl = mod.funcOwnerDeclPtr(sema.owner_func_index);
if (!func_ty.eql(func_decl.ty, mod)) {
return sema.fail(block, call_src, "unable to perform tail call: type of function being called '{}' does not match type of calling function '{}'", .{
func_ty.fmt(mod), func_decl.ty.fmt(mod),
@@ -7235,17 +7239,18 @@ fn analyzeInlineCallArg(
param_block: *Block,
arg_src: LazySrcLoc,
inst: Zir.Inst.Index,
- new_fn_info: *InternPool.Key.FuncType,
- arg_i: *usize,
+ new_param_types: []InternPool.Index,
+ arg_i: *u32,
uncasted_args: []const Air.Inst.Ref,
is_comptime_call: bool,
should_memoize: *bool,
memoized_arg_values: []InternPool.Index,
- raw_param_types: []const InternPool.Index,
+ raw_param_types: InternPool.Index.Slice,
func_inst: Air.Inst.Ref,
has_comptime_args: *bool,
) !void {
const mod = sema.mod;
+ const ip = &mod.intern_pool;
const zir_tags = sema.code.instructions.items(.tag);
switch (zir_tags[inst]) {
.param_comptime, .param_anytype_comptime => has_comptime_args.* = true,
@@ -7260,13 +7265,13 @@ fn analyzeInlineCallArg(
const extra = sema.code.extraData(Zir.Inst.Param, pl_tok.payload_index);
const param_body = sema.code.extra[extra.end..][0..extra.data.body_len];
const param_ty = param_ty: {
- const raw_param_ty = raw_param_types[arg_i.*];
+ const raw_param_ty = raw_param_types.get(ip)[arg_i.*];
if (raw_param_ty != .generic_poison_type) break :param_ty raw_param_ty;
const param_ty_inst = try sema.resolveBody(param_block, param_body, inst);
const param_ty = try sema.analyzeAsType(param_block, param_src, param_ty_inst);
break :param_ty param_ty.toIntern();
};
- new_fn_info.param_types[arg_i.*] = param_ty;
+ new_param_types[arg_i.*] = param_ty;
const uncasted_arg = uncasted_args[arg_i.*];
if (try sema.typeRequiresComptime(param_ty.toType())) {
_ = sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to parameter with comptime-only type must be comptime-known") catch |err| {
@@ -7317,7 +7322,7 @@ fn analyzeInlineCallArg(
.param_anytype, .param_anytype_comptime => {
// No coercion needed.
const uncasted_arg = uncasted_args[arg_i.*];
- new_fn_info.param_types[arg_i.*] = sema.typeOf(uncasted_arg).toIntern();
+ new_param_types[arg_i.*] = sema.typeOf(uncasted_arg).toIntern();
if (is_comptime_call) {
sema.inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg);
@@ -7371,50 +7376,12 @@ fn analyzeCallArg(
};
}
-fn analyzeGenericCallArg(
- sema: *Sema,
- block: *Block,
- arg_src: LazySrcLoc,
- uncasted_arg: Air.Inst.Ref,
- comptime_arg: TypedValue,
- runtime_args: []Air.Inst.Ref,
- new_fn_info: InternPool.Key.FuncType,
- runtime_i: *u32,
-) !void {
- const mod = sema.mod;
- const is_runtime = comptime_arg.val.isGenericPoison() and
- comptime_arg.ty.hasRuntimeBits(mod) and
- !(try sema.typeRequiresComptime(comptime_arg.ty));
- if (is_runtime) {
- const param_ty = new_fn_info.param_types[runtime_i.*].toType();
- const casted_arg = try sema.coerce(block, param_ty, uncasted_arg, arg_src);
- try sema.queueFullTypeResolution(param_ty);
- runtime_args[runtime_i.*] = casted_arg;
- runtime_i.* += 1;
- } else if (try sema.typeHasOnePossibleValue(comptime_arg.ty)) |_| {
- _ = try sema.coerce(block, comptime_arg.ty, uncasted_arg, arg_src);
- }
-}
-
-fn analyzeGenericCallArgVal(
- sema: *Sema,
- block: *Block,
- arg_src: LazySrcLoc,
- arg_ty: Type,
- uncasted_arg: Air.Inst.Ref,
- reason: []const u8,
-) !Value {
- const casted_arg = try sema.coerce(block, arg_ty, uncasted_arg, arg_src);
- return sema.resolveLazyValue(try sema.resolveValue(block, arg_src, casted_arg, reason));
-}
-
fn instantiateGenericCall(
sema: *Sema,
block: *Block,
func: Air.Inst.Ref,
func_src: LazySrcLoc,
call_src: LazySrcLoc,
- generic_func_ty: Type,
ensure_result_used: bool,
uncasted_args: []const Air.Inst.Ref,
call_tag: Air.Inst.Tag,
@@ -7423,248 +7390,132 @@ fn instantiateGenericCall(
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
+ const ip = &mod.intern_pool;
const func_val = try sema.resolveConstValue(block, func_src, func, "generic function being called must be comptime-known");
- const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) {
- .func => |function| function.index,
- .ptr => |ptr| mod.declPtr(ptr.addr.decl).val.getFunctionIndex(mod).unwrap().?,
+ const module_fn = switch (mod.intern_pool.indexToKey(func_val.toIntern())) {
+ .func => |x| x,
+ .ptr => |ptr| mod.intern_pool.indexToKey(mod.declPtr(ptr.addr.decl).val.toIntern()).func,
else => unreachable,
};
- const module_fn = mod.funcPtr(module_fn_index);
- // Check the Module's generic function map with an adapted context, so that we
- // can match against `uncasted_args` rather than doing the work below to create a
- // generic Scope only to junk it if it matches an existing instantiation.
+
+ // Even though there may already be a generic instantiation corresponding
+ // to this callsite, we must evaluate the expressions of the generic
+ // function signature with the values of the callsite plugged in.
+ // Importantly, this may include type coercions that determine whether the
+ // instantiation is a match of a previous instantiation.
+ // The actual monomorphization happens via adding `func_instance` to
+ // `InternPool`.
+
const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
const namespace_index = fn_owner_decl.src_namespace;
const namespace = mod.namespacePtr(namespace_index);
const fn_zir = namespace.file_scope.zir;
const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst);
- const zir_tags = fn_zir.instructions.items(.tag);
-
- const monomorphed_args = try sema.arena.alloc(InternPool.Index, mod.typeToFunc(generic_func_ty).?.param_types.len);
- const callee_index = callee: {
- var arg_i: usize = 0;
- var monomorphed_arg_i: u32 = 0;
- var known_unique = false;
- for (fn_info.param_body) |inst| {
- const generic_func_ty_info = mod.typeToFunc(generic_func_ty).?;
- var is_comptime = false;
- var is_anytype = false;
- switch (zir_tags[inst]) {
- .param => {
- is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i)));
- },
- .param_comptime => {
- is_comptime = true;
- },
- .param_anytype => {
- is_anytype = true;
- is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i)));
- },
- .param_anytype_comptime => {
- is_anytype = true;
- is_comptime = true;
- },
- else => continue,
- }
-
- defer arg_i += 1;
- const param_ty = generic_func_ty_info.param_types[arg_i];
- const is_generic = !is_anytype and param_ty == .generic_poison_type;
-
- if (known_unique) {
- if (is_comptime or is_anytype or is_generic) {
- monomorphed_arg_i += 1;
- }
- continue;
- }
-
- const uncasted_arg = uncasted_args[arg_i];
- const arg_ty = if (is_generic) mod.monomorphed_funcs.getAdapted(
- Module.MonomorphedFuncAdaptedKey{
- .func = module_fn_index,
- .args = monomorphed_args[0..monomorphed_arg_i],
- },
- Module.MonomorphedFuncsAdaptedContext{ .mod = mod },
- ) orelse {
- known_unique = true;
- monomorphed_arg_i += 1;
- continue;
- } else if (is_anytype) sema.typeOf(uncasted_arg).toIntern() else param_ty;
- const was_comptime = is_comptime;
- if (!is_comptime and try sema.typeRequiresComptime(arg_ty.toType())) is_comptime = true;
- if (is_comptime or is_anytype) {
- // Tuple default values are a part of the type and need to be
- // resolved to hash the type.
- try sema.resolveTupleLazyValues(block, call_src, arg_ty.toType());
- }
-
- if (is_comptime) {
- const casted_arg = sema.analyzeGenericCallArgVal(block, .unneeded, arg_ty.toType(), uncasted_arg, "") catch |err| switch (err) {
- error.NeededSourceLocation => {
- const decl = mod.declPtr(block.src_decl);
- const arg_src = mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src);
- _ = try sema.analyzeGenericCallArgVal(
- block,
- arg_src,
- arg_ty.toType(),
- uncasted_arg,
- if (was_comptime)
- "parameter is comptime"
- else
- "argument to parameter with comptime-only type must be comptime-known",
- );
- unreachable;
- },
- else => |e| return e,
- };
- monomorphed_args[monomorphed_arg_i] = casted_arg.toIntern();
- monomorphed_arg_i += 1;
- } else if (is_anytype or is_generic) {
- monomorphed_args[monomorphed_arg_i] = try mod.intern(.{ .undef = arg_ty });
- monomorphed_arg_i += 1;
- }
- }
-
- if (!known_unique) {
- if (mod.monomorphed_funcs.getAdapted(
- Module.MonomorphedFuncAdaptedKey{
- .func = module_fn_index,
- .args = monomorphed_args[0..monomorphed_arg_i],
- },
- Module.MonomorphedFuncsAdaptedContext{ .mod = mod },
- )) |callee_func| break :callee mod.intern_pool.indexToKey(callee_func).func.index;
- }
-
- const new_module_func_index = try mod.createFunc(undefined);
- const new_module_func = mod.funcPtr(new_module_func_index);
- new_module_func.generic_owner_decl = module_fn.owner_decl.toOptional();
- new_module_func.comptime_args = null;
+ const comptime_args = try sema.arena.alloc(InternPool.Index, uncasted_args.len);
+ @memset(comptime_args, .none);
- try namespace.anon_decls.ensureUnusedCapacity(gpa, 1);
+ // Re-run the block that creates the function, with the comptime parameters
+ // pre-populated inside `inst_map`. This causes `param_comptime` and
+ // `param_anytype_comptime` ZIR instructions to be ignored, resulting in a
+ // new, monomorphized function, with the comptime parameters elided.
+ var child_sema: Sema = .{
+ .mod = mod,
+ .gpa = gpa,
+ .arena = sema.arena,
+ .code = fn_zir,
+ // We pass the generic callsite's owner decl here because whatever `Decl`
+ // dependencies are chased at this point should be attached to the
+ // callsite, not the `Decl` associated with the `func_instance`.
+ .owner_decl = sema.owner_decl,
+ .owner_decl_index = sema.owner_decl_index,
+ .func_index = sema.owner_func_index,
+ .fn_ret_ty = Type.void,
+ .owner_func_index = .none,
+ .comptime_args = comptime_args,
+ .generic_owner = module_fn.generic_owner,
+ .branch_quota = sema.branch_quota,
+ .branch_count = sema.branch_count,
+ .comptime_mutable_decls = sema.comptime_mutable_decls,
+ };
+ defer child_sema.deinit();
- // Create a Decl for the new function.
- const src_decl_index = namespace.getDeclIndex(mod);
- const src_decl = mod.declPtr(src_decl_index);
- const new_decl_index = try mod.allocateNewDecl(namespace_index, fn_owner_decl.src_node, src_decl.src_scope);
- const new_decl = mod.declPtr(new_decl_index);
- // TODO better names for generic function instantiations
- const decl_name = try mod.intern_pool.getOrPutStringFmt(gpa, "{}__anon_{d}", .{
- fn_owner_decl.name.fmt(&mod.intern_pool), @intFromEnum(new_decl_index),
- });
- new_decl.name = decl_name;
- new_decl.src_line = fn_owner_decl.src_line;
- new_decl.is_pub = fn_owner_decl.is_pub;
- new_decl.is_exported = fn_owner_decl.is_exported;
- new_decl.has_align = fn_owner_decl.has_align;
- new_decl.has_linksection_or_addrspace = fn_owner_decl.has_linksection_or_addrspace;
- new_decl.@"linksection" = fn_owner_decl.@"linksection";
- new_decl.@"addrspace" = fn_owner_decl.@"addrspace";
- new_decl.zir_decl_index = fn_owner_decl.zir_decl_index;
- new_decl.alive = true; // This Decl is called at runtime.
- new_decl.analysis = .in_progress;
- new_decl.generation = mod.generation;
+ var child_block: Block = .{
+ .parent = null,
+ .sema = &child_sema,
+ .src_decl = module_fn.owner_decl,
+ .namespace = namespace_index,
+ .wip_capture_scope = block.wip_capture_scope,
+ .instructions = .{},
+ .inlining = null,
+ .is_comptime = true,
+ };
+ defer child_block.instructions.deinit(gpa);
- namespace.anon_decls.putAssumeCapacityNoClobber(new_decl_index, {});
+ try child_sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body);
- // The generic function Decl is guaranteed to be the first dependency
- // of each of its instantiations.
- assert(new_decl.dependencies.keys().len == 0);
- try mod.declareDeclDependencyType(new_decl_index, module_fn.owner_decl, .function_body);
+ for (fn_info.param_body[0..uncasted_args.len], uncasted_args, 0..) |inst, arg, i| {
+ // `child_sema` will use a different `inst_map` which means we have to
+ // convert from parent-relative `Air.Inst.Ref` to child-relative here.
+ // Constants are simple; runtime-known values need a new instruction.
+ child_sema.inst_map.putAssumeCapacityNoClobber(inst, if (try sema.resolveMaybeUndefVal(arg)) |val|
+ Air.internedToRef(val.toIntern())
+ else
+ // We insert into the map an instruction which is runtime-known
+ // but has the type of the argument.
+ try child_block.addInst(.{
+ .tag = .arg,
+ .data = .{ .arg = .{
+ .ty = Air.internedToRef(sema.typeOf(arg).toIntern()),
+ .src_index = @intCast(i),
+ } },
+ }));
+ }
- const new_func = sema.resolveGenericInstantiationType(
- block,
- fn_zir,
- new_decl,
- new_decl_index,
- uncasted_args,
- monomorphed_arg_i,
- module_fn_index,
- new_module_func_index,
- namespace_index,
- generic_func_ty,
- call_src,
- bound_arg_src,
- ) catch |err| switch (err) {
- error.GenericPoison, error.ComptimeReturn => {
- // Resolving the new function type below will possibly declare more decl dependencies
- // and so we remove them all here in case of error.
- for (new_decl.dependencies.keys()) |dep_index| {
- const dep = mod.declPtr(dep_index);
- dep.removeDependant(new_decl_index);
- }
- assert(namespace.anon_decls.orderedRemove(new_decl_index));
- mod.destroyDecl(new_decl_index);
- mod.destroyFunc(new_module_func_index);
- return err;
- },
- else => {
- // TODO look up the compile error that happened here and attach a note to it
- // pointing here, at the generic instantiation callsite.
- if (sema.owner_func) |owner_func| {
- owner_func.state = .dependency_failure;
- } else {
- sema.owner_decl.analysis = .dependency_failure;
- }
- return err;
- },
- };
+ const new_func_inst = try child_sema.resolveBody(&child_block, fn_info.param_body, fn_info.param_body_inst);
+ const callee_index = (child_sema.resolveConstValue(&child_block, .unneeded, new_func_inst, undefined) catch unreachable).toIntern();
- break :callee new_func;
- };
- const callee = mod.funcPtr(callee_index);
- callee.branch_quota = @max(callee.branch_quota, sema.branch_quota);
+ const callee = mod.funcInfo(callee_index);
+ callee.branchQuota(ip).* = @max(callee.branchQuota(ip).*, sema.branch_quota);
const callee_inst = try sema.analyzeDeclVal(block, func_src, callee.owner_decl);
// Make a runtime call to the new function, making sure to omit the comptime args.
- const comptime_args = callee.comptime_args.?;
- const func_ty = mod.declPtr(callee.owner_decl).ty;
- const runtime_args_len = @as(u32, @intCast(mod.typeToFunc(func_ty).?.param_types.len));
+ const func_ty = callee.ty.toType();
+ const func_ty_info = mod.typeToFunc(func_ty).?;
+ const runtime_args_len: u32 = func_ty_info.param_types.len;
const runtime_args = try sema.arena.alloc(Air.Inst.Ref, runtime_args_len);
{
var runtime_i: u32 = 0;
- var total_i: u32 = 0;
- for (fn_info.param_body) |inst| {
- switch (zir_tags[inst]) {
- .param_comptime, .param_anytype_comptime, .param, .param_anytype => {},
- else => continue,
+ for (uncasted_args, 0..) |uncasted_arg, total_i| {
+ const arg_src: LazySrcLoc = if (total_i == 0 and bound_arg_src != null)
+ bound_arg_src.?
+ else
+ .{ .call_arg = .{
+ .call_node_offset = call_src.node_offset.x,
+ .arg_index = @intCast(total_i),
+ } };
+
+ const comptime_arg = callee.comptime_args.get(ip)[total_i];
+ if (comptime_arg == .none) {
+ const param_ty = func_ty_info.param_types.get(ip)[runtime_i].toType();
+ const casted_arg = try sema.coerce(block, param_ty, uncasted_arg, arg_src);
+ try sema.queueFullTypeResolution(param_ty);
+ runtime_args[runtime_i] = casted_arg;
+ runtime_i += 1;
}
- sema.analyzeGenericCallArg(
- block,
- .unneeded,
- uncasted_args[total_i],
- comptime_args[total_i],
- runtime_args,
- mod.typeToFunc(func_ty).?,
- &runtime_i,
- ) catch |err| switch (err) {
- error.NeededSourceLocation => {
- const decl = mod.declPtr(block.src_decl);
- _ = try sema.analyzeGenericCallArg(
- block,
- mod.argSrc(call_src.node_offset.x, decl, total_i, bound_arg_src),
- uncasted_args[total_i],
- comptime_args[total_i],
- runtime_args,
- mod.typeToFunc(func_ty).?,
- &runtime_i,
- );
- unreachable;
- },
- else => |e| return e,
- };
- total_i += 1;
}
- try sema.queueFullTypeResolution(mod.typeToFunc(func_ty).?.return_type.toType());
+ try sema.queueFullTypeResolution(func_ty_info.return_type.toType());
}
if (call_dbg_node) |some| try sema.zirDbgStmt(block, some);
- if (sema.owner_func != null and mod.typeToFunc(func_ty).?.return_type.toType().isError(mod)) {
- sema.owner_func.?.calls_or_awaits_errorable_fn = true;
+ if (sema.owner_func_index != .none and
+ func_ty_info.return_type.toType().isError(mod))
+ {
+ ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true;
}
try mod.ensureFuncBodyAnalysisQueued(callee_index);
@@ -7695,238 +7546,6 @@ fn instantiateGenericCall(
return result;
}
-fn resolveGenericInstantiationType(
- sema: *Sema,
- block: *Block,
- fn_zir: Zir,
- new_decl: *Decl,
- new_decl_index: Decl.Index,
- uncasted_args: []const Air.Inst.Ref,
- monomorphed_args_len: u32,
- module_fn_index: Module.Fn.Index,
- new_module_func: Module.Fn.Index,
- namespace: Namespace.Index,
- generic_func_ty: Type,
- call_src: LazySrcLoc,
- bound_arg_src: ?LazySrcLoc,
-) !Module.Fn.Index {
- const mod = sema.mod;
- const gpa = sema.gpa;
-
- const zir_tags = fn_zir.instructions.items(.tag);
- const module_fn = mod.funcPtr(module_fn_index);
- const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst);
-
- // Re-run the block that creates the function, with the comptime parameters
- // pre-populated inside `inst_map`. This causes `param_comptime` and
- // `param_anytype_comptime` ZIR instructions to be ignored, resulting in a
- // new, monomorphized function, with the comptime parameters elided.
- var child_sema: Sema = .{
- .mod = mod,
- .gpa = gpa,
- .arena = sema.arena,
- .code = fn_zir,
- .owner_decl = new_decl,
- .owner_decl_index = new_decl_index,
- .func = null,
- .func_index = .none,
- .fn_ret_ty = Type.void,
- .owner_func = null,
- .owner_func_index = .none,
- // TODO: fully migrate functions into InternPool
- .comptime_args = try mod.tmp_hack_arena.allocator().alloc(TypedValue, uncasted_args.len),
- .comptime_args_fn_inst = module_fn.zir_body_inst,
- .preallocated_new_func = new_module_func.toOptional(),
- .is_generic_instantiation = true,
- .branch_quota = sema.branch_quota,
- .branch_count = sema.branch_count,
- .comptime_mutable_decls = sema.comptime_mutable_decls,
- };
- defer child_sema.deinit();
-
- var wip_captures = try WipCaptureScope.init(gpa, new_decl.src_scope);
- defer wip_captures.deinit();
-
- var child_block: Block = .{
- .parent = null,
- .sema = &child_sema,
- .src_decl = new_decl_index,
- .namespace = namespace,
- .wip_capture_scope = wip_captures.scope,
- .instructions = .{},
- .inlining = null,
- .is_comptime = true,
- };
- defer {
- child_block.instructions.deinit(gpa);
- child_block.params.deinit(gpa);
- }
-
- try child_sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body);
-
- var arg_i: usize = 0;
- for (fn_info.param_body) |inst| {
- const generic_func_ty_info = mod.typeToFunc(generic_func_ty).?;
- var is_comptime = false;
- var is_anytype = false;
- switch (zir_tags[inst]) {
- .param => {
- is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i)));
- },
- .param_comptime => {
- is_comptime = true;
- },
- .param_anytype => {
- is_anytype = true;
- is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i)));
- },
- .param_anytype_comptime => {
- is_anytype = true;
- is_comptime = true;
- },
- else => continue,
- }
- const arg = uncasted_args[arg_i];
- if (is_comptime) {
- const arg_val = (try sema.resolveMaybeUndefVal(arg)).?;
- const child_arg = try child_sema.addConstant(arg_val);
- child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg);
- } else if (is_anytype) {
- const arg_ty = sema.typeOf(arg);
- if (try sema.typeRequiresComptime(arg_ty)) {
- const arg_val = sema.resolveConstValue(block, .unneeded, arg, "") catch |err| switch (err) {
- error.NeededSourceLocation => {
- const decl = mod.declPtr(block.src_decl);
- const arg_src = mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src);
- _ = try sema.resolveConstValue(block, arg_src, arg, "argument to parameter with comptime-only type must be comptime-known");
- unreachable;
- },
- else => |e| return e,
- };
- const child_arg = try child_sema.addConstant(arg_val);
- child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg);
- } else {
- // We insert into the map an instruction which is runtime-known
- // but has the type of the argument.
- const child_arg = try child_block.addInst(.{
- .tag = .arg,
- .data = .{ .arg = .{
- .ty = try child_sema.addType(arg_ty),
- .src_index = @as(u32, @intCast(arg_i)),
- } },
- });
- child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg);
- }
- }
- arg_i += 1;
- }
-
- // Save the error trace as our first action in the function.
- // If this is unnecessary after all, Liveness will clean it up for us.
- const error_return_trace_index = try sema.analyzeSaveErrRetIndex(&child_block);
- child_sema.error_return_trace_index_on_fn_entry = error_return_trace_index;
- child_block.error_return_trace_index = error_return_trace_index;
-
- const new_func_inst = try child_sema.resolveBody(&child_block, fn_info.param_body, fn_info.param_body_inst);
- const new_func_val = child_sema.resolveConstValue(&child_block, .unneeded, new_func_inst, undefined) catch unreachable;
- const new_func = new_func_val.getFunctionIndex(mod).unwrap().?;
- assert(new_func == new_module_func);
-
- const monomorphed_args_index = @as(u32, @intCast(mod.monomorphed_func_keys.items.len));
- const monomorphed_args = try mod.monomorphed_func_keys.addManyAsSlice(gpa, monomorphed_args_len);
- var monomorphed_arg_i: u32 = 0;
- try mod.monomorphed_funcs.ensureUnusedCapacityContext(gpa, monomorphed_args_len + 1, .{ .mod = mod });
-
- arg_i = 0;
- for (fn_info.param_body) |inst| {
- const generic_func_ty_info = mod.typeToFunc(generic_func_ty).?;
- var is_comptime = false;
- var is_anytype = false;
- switch (zir_tags[inst]) {
- .param => {
- is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i)));
- },
- .param_comptime => {
- is_comptime = true;
- },
- .param_anytype => {
- is_anytype = true;
- is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i)));
- },
- .param_anytype_comptime => {
- is_anytype = true;
- is_comptime = true;
- },
- else => continue,
- }
-
- const param_ty = generic_func_ty_info.param_types[arg_i];
- const is_generic = !is_anytype and param_ty == .generic_poison_type;
-
- const arg = child_sema.inst_map.get(inst).?;
- const arg_ty = child_sema.typeOf(arg);
-
- if (is_generic) if (mod.monomorphed_funcs.fetchPutAssumeCapacityContext(.{
- .func = module_fn_index,
- .args_index = monomorphed_args_index,
- .args_len = monomorphed_arg_i,
- }, arg_ty.toIntern(), .{ .mod = mod })) |kv| assert(kv.value == arg_ty.toIntern());
- if (!is_comptime and try sema.typeRequiresComptime(arg_ty)) is_comptime = true;
-
- if (is_comptime) {
- const arg_val = (child_sema.resolveMaybeUndefValAllowVariables(arg) catch unreachable).?;
- monomorphed_args[monomorphed_arg_i] = arg_val.toIntern();
- monomorphed_arg_i += 1;
- child_sema.comptime_args[arg_i] = .{ .ty = arg_ty, .val = arg_val };
- } else {
- if (is_anytype or is_generic) {
- monomorphed_args[monomorphed_arg_i] = try mod.intern(.{ .undef = arg_ty.toIntern() });
- monomorphed_arg_i += 1;
- }
- child_sema.comptime_args[arg_i] = .{ .ty = arg_ty, .val = Value.generic_poison };
- }
-
- arg_i += 1;
- }
-
- try wip_captures.finalize();
-
- // Populate the Decl ty/val with the function and its type.
- new_decl.ty = child_sema.typeOf(new_func_inst);
- // If the call evaluated to a return type that requires comptime, never mind
- // our generic instantiation. Instead we need to perform a comptime call.
- const new_fn_info = mod.typeToFunc(new_decl.ty).?;
- if (try sema.typeRequiresComptime(new_fn_info.return_type.toType())) {
- return error.ComptimeReturn;
- }
- // Similarly, if the call evaluated to a generic type we need to instead
- // call it inline.
- if (new_fn_info.is_generic or new_fn_info.cc == .Inline) {
- return error.GenericPoison;
- }
-
- new_decl.val = (try mod.intern(.{ .func = .{
- .ty = new_decl.ty.toIntern(),
- .index = new_func,
- } })).toValue();
- new_decl.alignment = .none;
- new_decl.has_tv = true;
- new_decl.owns_tv = true;
- new_decl.analysis = .complete;
-
- mod.monomorphed_funcs.putAssumeCapacityNoClobberContext(.{
- .func = module_fn_index,
- .args_index = monomorphed_args_index,
- .args_len = monomorphed_arg_i,
- }, new_decl.val.toIntern(), .{ .mod = mod });
-
- // Queue up a `codegen_func` work item for the new Fn. The `comptime_args` field
- // will be populated, ensuring it will have `analyzeBody` called with the ZIR
- // parameters mapped appropriately.
- try mod.comp.work_queue.writeItem(.{ .codegen_func = new_func });
- return new_func;
-}
-
fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
const mod = sema.mod;
const tuple = switch (mod.intern_pool.indexToKey(ty.toIntern())) {
@@ -7944,8 +7563,8 @@ fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type)
fn emitDbgInline(
sema: *Sema,
block: *Block,
- old_func: Module.Fn.Index,
- new_func: Module.Fn.Index,
+ old_func: InternPool.Index,
+ new_func: InternPool.Index,
new_func_ty: Type,
tag: Air.Inst.Tag,
) CompileError!void {
@@ -8802,7 +8421,7 @@ fn zirFunc(
inst,
.none,
target_util.defaultAddressSpace(target, .function),
- FuncLinkSection.default,
+ .default,
cc,
ret_ty,
false,
@@ -8831,10 +8450,7 @@ fn resolveGenericBody(
// Make sure any nested param instructions don't clobber our work.
const prev_params = block.params;
block.params = .{};
- defer {
- block.params.deinit(sema.gpa);
- block.params = prev_params;
- }
+ defer block.params = prev_params;
const uncasted = sema.resolveBody(block, body, func_inst) catch |err| break :err err;
const result = sema.coerce(block, dest_ty, uncasted, src) catch |err| break :err err;
@@ -8952,12 +8568,6 @@ fn checkCallConvSupportsVarArgs(sema: *Sema, block: *Block, src: LazySrcLoc, cc:
}
}
-const FuncLinkSection = union(enum) {
- generic,
- default,
- explicit: InternPool.NullTerminatedString,
-};
-
fn funcCommon(
sema: *Sema,
block: *Block,
@@ -8967,8 +8577,7 @@ fn funcCommon(
alignment: ?Alignment,
/// null means generic poison
address_space: ?std.builtin.AddressSpace,
- /// outer null means generic poison; inner null means default link section
- section: FuncLinkSection,
+ section: InternPool.GetFuncDeclKey.Section,
/// null means generic poison
cc: ?std.builtin.CallingConvention,
/// this might be Type.generic_poison
@@ -8984,6 +8593,8 @@ fn funcCommon(
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
+ const target = mod.getTarget();
+ const ip = &mod.intern_pool;
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset };
const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = src_node_offset };
const func_src = LazySrcLoc.nodeOffset(src_node_offset);
@@ -9001,367 +8612,323 @@ fn funcCommon(
try sema.checkCallConvSupportsVarArgs(block, cc_src, cc.?);
}
- var destroy_fn_on_error = false;
- const new_func_index = new_func: {
- if (!has_body) break :new_func undefined;
- if (sema.comptime_args_fn_inst == func_inst) {
- const new_func_index = sema.preallocated_new_func.unwrap().?;
- sema.preallocated_new_func = .none; // take ownership
- break :new_func new_func_index;
- }
- destroy_fn_on_error = true;
- var new_func: Module.Fn = undefined;
- // Set this here so that the inferred return type can be printed correctly if it appears in an error.
- new_func.owner_decl = sema.owner_decl_index;
- const new_func_index = try mod.createFunc(new_func);
- break :new_func new_func_index;
- };
- errdefer if (destroy_fn_on_error) mod.destroyFunc(new_func_index);
-
- const target = mod.getTarget();
- const fn_ty: Type = fn_ty: {
- // In the case of generic calling convention, or generic alignment, we use
- // default values which are only meaningful for the generic function, *not*
- // the instantiation, which can depend on comptime parameters.
- // Related proposal: https://github.com/ziglang/zig/issues/11834
- const cc_resolved = cc orelse .Unspecified;
- const param_types = try sema.arena.alloc(InternPool.Index, block.params.items.len);
- var comptime_bits: u32 = 0;
- for (param_types, block.params.items, 0..) |*dest_param_ty, param, i| {
- const is_noalias = blk: {
- const index = std.math.cast(u5, i) orelse break :blk false;
- break :blk @as(u1, @truncate(noalias_bits >> index)) != 0;
- };
- dest_param_ty.* = param.ty.toIntern();
- sema.analyzeParameter(
- block,
- .unneeded,
- param,
- &comptime_bits,
- i,
- &is_generic,
- cc_resolved,
- has_body,
- is_noalias,
- ) catch |err| switch (err) {
- error.NeededSourceLocation => {
- const decl = mod.declPtr(block.src_decl);
- try sema.analyzeParameter(
- block,
- Module.paramSrc(src_node_offset, mod, decl, i),
- param,
- &comptime_bits,
- i,
- &is_generic,
- cc_resolved,
- has_body,
- is_noalias,
- );
- unreachable;
- },
- else => |e| return e,
- };
- }
-
- var ret_ty_requires_comptime = false;
- const ret_poison = if (sema.typeRequiresComptime(bare_return_type)) |ret_comptime| rp: {
- ret_ty_requires_comptime = ret_comptime;
- break :rp bare_return_type.isGenericPoison();
- } else |err| switch (err) {
- error.GenericPoison => rp: {
- is_generic = true;
- break :rp true;
- },
- else => |e| return e,
- };
+ const is_source_decl = sema.generic_owner == .none;
- const return_type: Type = if (!inferred_error_set or ret_poison)
- bare_return_type
- else blk: {
- try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src);
- const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{
- .func = new_func_index,
- });
- const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index });
- break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type);
+ // In the case of generic calling convention, or generic alignment, we use
+ // default values which are only meaningful for the generic function, *not*
+ // the instantiation, which can depend on comptime parameters.
+ // Related proposal: https://github.com/ziglang/zig/issues/11834
+ const cc_resolved = cc orelse .Unspecified;
+ var comptime_bits: u32 = 0;
+ for (block.params.items(.ty), block.params.items(.is_comptime), 0..) |param_ty_ip, param_is_comptime, i| {
+ const param_ty = param_ty_ip.toType();
+ const is_noalias = blk: {
+ const index = std.math.cast(u5, i) orelse break :blk false;
+ break :blk @as(u1, @truncate(noalias_bits >> index)) != 0;
};
-
- if (!return_type.isValidReturnType(mod)) {
- const opaque_str = if (return_type.zigTypeTag(mod) == .Opaque) "opaque " else "";
+ const param_src: LazySrcLoc = .{ .fn_proto_param = .{
+ .fn_proto_node_offset = src_node_offset,
+ .param_index = @intCast(i),
+ } };
+ const requires_comptime = try sema.typeRequiresComptime(param_ty);
+ if (param_is_comptime or requires_comptime) {
+ comptime_bits |= @as(u32, 1) << @intCast(i); // TODO: handle cast error
+ }
+ const this_generic = param_ty.isGenericPoison();
+ is_generic = is_generic or this_generic;
+ if (param_is_comptime and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved)) {
+ return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc_resolved)});
+ }
+ if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved)) {
+ return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc_resolved)});
+ }
+ if (!param_ty.isValidParamType(mod)) {
+ const opaque_str = if (param_ty.zigTypeTag(mod) == .Opaque) "opaque " else "";
const msg = msg: {
- const msg = try sema.errMsg(block, ret_ty_src, "{s}return type '{}' not allowed", .{
- opaque_str, return_type.fmt(mod),
+ const msg = try sema.errMsg(block, param_src, "parameter of {s}type '{}' not allowed", .{
+ opaque_str, param_ty.fmt(mod),
});
- errdefer msg.destroy(gpa);
+ errdefer msg.destroy(sema.gpa);
- try sema.addDeclaredHereNote(msg, return_type);
+ try sema.addDeclaredHereNote(msg, param_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
- if (!ret_poison and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and
- !try sema.validateExternType(return_type, .ret_ty))
- {
+ if (!this_generic and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and !try sema.validateExternType(param_ty, .param_ty)) {
const msg = msg: {
- const msg = try sema.errMsg(block, ret_ty_src, "return type '{}' not allowed in function with calling convention '{s}'", .{
- return_type.fmt(mod), @tagName(cc_resolved),
+ const msg = try sema.errMsg(block, param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{
+ param_ty.fmt(mod), @tagName(cc_resolved),
});
- errdefer msg.destroy(gpa);
+ errdefer msg.destroy(sema.gpa);
const src_decl = mod.declPtr(block.src_decl);
- try sema.explainWhyTypeIsNotExtern(msg, ret_ty_src.toSrcLoc(src_decl, mod), return_type, .ret_ty);
+ try sema.explainWhyTypeIsNotExtern(msg, param_src.toSrcLoc(src_decl, mod), param_ty, .param_ty);
- try sema.addDeclaredHereNote(msg, return_type);
+ try sema.addDeclaredHereNote(msg, param_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
+ if (is_source_decl and requires_comptime and !param_is_comptime and has_body) {
+ const msg = msg: {
+ const msg = try sema.errMsg(block, param_src, "parameter of type '{}' must be declared comptime", .{
+ param_ty.fmt(mod),
+ });
+ errdefer msg.destroy(sema.gpa);
- // If the return type is comptime-only but not dependent on parameters then all parameter types also need to be comptime
- if (!sema.is_generic_instantiation and has_body and ret_ty_requires_comptime) comptime_check: {
- for (block.params.items) |param| {
- if (!param.is_comptime) break;
- } else break :comptime_check;
+ const src_decl = mod.declPtr(block.src_decl);
+ try sema.explainWhyTypeIsComptime(msg, param_src.toSrcLoc(src_decl, mod), param_ty);
- const msg = try sema.errMsg(
- block,
- ret_ty_src,
- "function with comptime-only return type '{}' requires all parameters to be comptime",
- .{return_type.fmt(mod)},
- );
- try sema.explainWhyTypeIsComptime(msg, ret_ty_src.toSrcLoc(sema.owner_decl, mod), return_type);
-
- const tags = sema.code.instructions.items(.tag);
- const data = sema.code.instructions.items(.data);
- const param_body = sema.code.getParamBody(func_inst);
- for (block.params.items, 0..) |param, i| {
- if (!param.is_comptime) {
- const param_index = param_body[i];
- const param_src = switch (tags[param_index]) {
- .param => data[param_index].pl_tok.src(),
- .param_anytype => data[param_index].str_tok.src(),
- else => unreachable,
- };
- if (param.name.len != 0) {
- try sema.errNote(block, param_src, msg, "param '{s}' is required to be comptime", .{param.name});
- } else {
- try sema.errNote(block, param_src, msg, "param is required to be comptime", .{});
- }
- }
- }
+ try sema.addDeclaredHereNote(msg, param_ty);
+ break :msg msg;
+ };
return sema.failWithOwnedErrorMsg(msg);
}
+ if (is_source_decl and !this_generic and is_noalias and
+ !(param_ty.zigTypeTag(mod) == .Pointer or param_ty.isPtrLikeOptional(mod)))
+ {
+ return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{});
+ }
+ }
- const arch = mod.getTarget().cpu.arch;
- if (switch (cc_resolved) {
- .Unspecified, .C, .Naked, .Async, .Inline => null,
- .Interrupt => switch (arch) {
- .x86, .x86_64, .avr, .msp430 => null,
- else => @as([]const u8, "x86, x86_64, AVR, and MSP430"),
- },
- .Signal => switch (arch) {
- .avr => null,
- else => @as([]const u8, "AVR"),
- },
- .Stdcall, .Fastcall, .Thiscall => switch (arch) {
- .x86 => null,
- else => @as([]const u8, "x86"),
- },
- .Vectorcall => switch (arch) {
- .x86, .aarch64, .aarch64_be, .aarch64_32 => null,
- else => @as([]const u8, "x86 and AArch64"),
- },
- .APCS, .AAPCS, .AAPCSVFP => switch (arch) {
- .arm, .armeb, .aarch64, .aarch64_be, .aarch64_32, .thumb, .thumbeb => null,
- else => @as([]const u8, "ARM"),
- },
- .SysV, .Win64 => switch (arch) {
- .x86_64 => null,
- else => @as([]const u8, "x86_64"),
- },
- .Kernel => switch (arch) {
- .nvptx, .nvptx64, .amdgcn, .spirv32, .spirv64 => null,
- else => @as([]const u8, "nvptx, amdgcn and SPIR-V"),
- },
- }) |allowed_platform| {
- return sema.fail(block, cc_src, "callconv '{s}' is only available on {s}, not {s}", .{
- @tagName(cc_resolved),
- allowed_platform,
- @tagName(arch),
+ var ret_ty_requires_comptime = false;
+ const ret_poison = if (sema.typeRequiresComptime(bare_return_type)) |ret_comptime| rp: {
+ ret_ty_requires_comptime = ret_comptime;
+ break :rp bare_return_type.isGenericPoison();
+ } else |err| switch (err) {
+ error.GenericPoison => rp: {
+ is_generic = true;
+ break :rp true;
+ },
+ else => |e| return e,
+ };
+ const final_is_generic = is_generic or comptime_bits != 0 or ret_ty_requires_comptime;
+
+ const param_types = block.params.items(.ty);
+
+ const opt_func_index: InternPool.Index = i: {
+ if (is_extern) {
+ assert(comptime_bits == 0);
+ assert(cc != null);
+ assert(section != .generic);
+ assert(address_space != null);
+ assert(!is_generic);
+ break :i try ip.getExternFunc(gpa, .{
+ .param_types = param_types,
+ .noalias_bits = noalias_bits,
+ .return_type = bare_return_type.toIntern(),
+ .cc = cc_resolved,
+ .alignment = alignment.?,
+ .is_var_args = var_args,
+ .decl = sema.owner_decl_index,
+ .lib_name = if (opt_lib_name) |lib_name| (try mod.intern_pool.getOrPutString(
+ gpa,
+ try sema.handleExternLibName(block, .{
+ .node_offset_lib_name = src_node_offset,
+ }, lib_name),
+ )).toOptional() else .none,
});
}
- if (cc_resolved == .Inline and is_noinline) {
- return sema.fail(block, cc_src, "'noinline' function cannot have callconv 'Inline'", .{});
- }
- if (is_generic and sema.no_partial_func_ty) return error.GenericPoison;
- is_generic = is_generic or comptime_bits != 0 or ret_ty_requires_comptime;
+ if (!has_body) break :i .none;
- if (!is_generic and sema.wantErrorReturnTracing(return_type)) {
- // Make sure that StackTrace's fields are resolved so that the backend can
- // lower this fn type.
- const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
- _ = try sema.resolveTypeFields(unresolved_stack_trace_ty);
+ if (is_source_decl) {
+ if (inferred_error_set)
+ try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src);
+
+ break :i try ip.getFuncDecl(gpa, .{
+ .param_types = param_types,
+ .noalias_bits = noalias_bits,
+ .comptime_bits = comptime_bits,
+ .return_type = bare_return_type.toIntern(),
+ .inferred_error_set = inferred_error_set,
+ .cc = cc,
+ .alignment = alignment,
+ .section = section,
+ .address_space = address_space,
+ .is_var_args = var_args,
+ .is_generic = final_is_generic,
+ .is_noinline = is_noinline,
+
+ .zir_body_inst = func_inst,
+ .lbrace_line = src_locs.lbrace_line,
+ .rbrace_line = src_locs.rbrace_line,
+ .lbrace_column = @as(u16, @truncate(src_locs.columns)),
+ .rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)),
+ });
}
- break :fn_ty try mod.funcType(.{
+ assert(!is_generic);
+ assert(comptime_bits == 0);
+ assert(cc != null);
+ assert(section != .generic);
+ assert(address_space != null);
+ assert(!var_args);
+
+ break :i try ip.getFuncInstance(gpa, .{
.param_types = param_types,
.noalias_bits = noalias_bits,
- .comptime_bits = comptime_bits,
- .return_type = return_type.toIntern(),
+ .return_type = bare_return_type.toIntern(),
.cc = cc_resolved,
- .cc_is_generic = cc == null,
- .alignment = alignment orelse .none,
- .align_is_generic = alignment == null,
- .section_is_generic = section == .generic,
- .addrspace_is_generic = address_space == null,
- .is_var_args = var_args,
- .is_generic = is_generic,
+ .alignment = alignment.?,
.is_noinline = is_noinline,
- });
- };
- sema.owner_decl.@"linksection" = switch (section) {
- .generic => .none,
- .default => .none,
- .explicit => |section_name| section_name.toOptional(),
+ .generic_owner = sema.generic_owner,
+ });
};
- sema.owner_decl.alignment = alignment orelse .none;
- sema.owner_decl.@"addrspace" = address_space orelse .generic;
- if (is_extern) {
- return sema.addConstant((try mod.intern(.{ .extern_func = .{
- .ty = fn_ty.toIntern(),
- .decl = sema.owner_decl_index,
- .lib_name = if (opt_lib_name) |lib_name| (try mod.intern_pool.getOrPutString(
- gpa,
- try sema.handleExternLibName(block, .{
- .node_offset_lib_name = src_node_offset,
- }, lib_name),
- )).toOptional() else .none,
- } })).toValue());
- }
-
- if (!has_body) {
- return sema.addType(fn_ty);
- }
-
- const is_inline = fn_ty.fnCallingConvention(mod) == .Inline;
- const anal_state: Module.Fn.Analysis = if (is_inline) .inline_only else .none;
-
- const comptime_args: ?[*]TypedValue = if (sema.comptime_args_fn_inst == func_inst) blk: {
- break :blk if (sema.comptime_args.len == 0) null else sema.comptime_args.ptr;
- } else null;
-
- const new_func = mod.funcPtr(new_func_index);
- const hash = new_func.hash;
- const generic_owner_decl = if (comptime_args == null) .none else new_func.generic_owner_decl;
- new_func.* = .{
- .state = anal_state,
- .zir_body_inst = func_inst,
- .owner_decl = sema.owner_decl_index,
- .generic_owner_decl = generic_owner_decl,
- .comptime_args = comptime_args,
- .hash = hash,
- .lbrace_line = src_locs.lbrace_line,
- .rbrace_line = src_locs.rbrace_line,
- .lbrace_column = @as(u16, @truncate(src_locs.columns)),
- .rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)),
- .branch_quota = default_branch_quota,
- .is_noinline = is_noinline,
- };
- return sema.addConstant((try mod.intern(.{ .func = .{
- .ty = fn_ty.toIntern(),
- .index = new_func_index,
- } })).toValue());
-}
+ const return_type: Type = if (opt_func_index == .none or ret_poison)
+ bare_return_type
+ else
+ ip.funcReturnType(ip.typeOf(opt_func_index)).toType();
-fn analyzeParameter(
- sema: *Sema,
- block: *Block,
- param_src: LazySrcLoc,
- param: Block.Param,
- comptime_bits: *u32,
- i: usize,
- is_generic: *bool,
- cc: std.builtin.CallingConvention,
- has_body: bool,
- is_noalias: bool,
-) !void {
- const mod = sema.mod;
- const requires_comptime = try sema.typeRequiresComptime(param.ty);
- if (param.is_comptime or requires_comptime) {
- comptime_bits.* |= @as(u32, 1) << @as(u5, @intCast(i)); // TODO: handle cast error
- }
- const this_generic = param.ty.isGenericPoison();
- is_generic.* = is_generic.* or this_generic;
- const target = mod.getTarget();
- if (param.is_comptime and !target_util.fnCallConvAllowsZigTypes(target, cc)) {
- return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)});
- }
- if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(target, cc)) {
- return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)});
- }
- if (!param.ty.isValidParamType(mod)) {
- const opaque_str = if (param.ty.zigTypeTag(mod) == .Opaque) "opaque " else "";
+ if (!return_type.isValidReturnType(mod)) {
+ const opaque_str = if (return_type.zigTypeTag(mod) == .Opaque) "opaque " else "";
const msg = msg: {
- const msg = try sema.errMsg(block, param_src, "parameter of {s}type '{}' not allowed", .{
- opaque_str, param.ty.fmt(mod),
+ const msg = try sema.errMsg(block, ret_ty_src, "{s}return type '{}' not allowed", .{
+ opaque_str, return_type.fmt(mod),
});
- errdefer msg.destroy(sema.gpa);
+ errdefer msg.destroy(gpa);
- try sema.addDeclaredHereNote(msg, param.ty);
+ try sema.addDeclaredHereNote(msg, return_type);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
- if (!this_generic and !target_util.fnCallConvAllowsZigTypes(target, cc) and !try sema.validateExternType(param.ty, .param_ty)) {
+ if (!ret_poison and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and
+ !try sema.validateExternType(return_type, .ret_ty))
+ {
const msg = msg: {
- const msg = try sema.errMsg(block, param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{
- param.ty.fmt(mod), @tagName(cc),
+ const msg = try sema.errMsg(block, ret_ty_src, "return type '{}' not allowed in function with calling convention '{s}'", .{
+ return_type.fmt(mod), @tagName(cc_resolved),
});
- errdefer msg.destroy(sema.gpa);
+ errdefer msg.destroy(gpa);
const src_decl = mod.declPtr(block.src_decl);
- try sema.explainWhyTypeIsNotExtern(msg, param_src.toSrcLoc(src_decl, mod), param.ty, .param_ty);
+ try sema.explainWhyTypeIsNotExtern(msg, ret_ty_src.toSrcLoc(src_decl, mod), return_type, .ret_ty);
- try sema.addDeclaredHereNote(msg, param.ty);
+ try sema.addDeclaredHereNote(msg, return_type);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
- if (!sema.is_generic_instantiation and requires_comptime and !param.is_comptime and has_body) {
- const msg = msg: {
- const msg = try sema.errMsg(block, param_src, "parameter of type '{}' must be declared comptime", .{
- param.ty.fmt(mod),
- });
- errdefer msg.destroy(sema.gpa);
- const src_decl = mod.declPtr(block.src_decl);
- try sema.explainWhyTypeIsComptime(msg, param_src.toSrcLoc(src_decl, mod), param.ty);
+ // If the return type is comptime-only but not dependent on parameters then
+ // all parameter types also need to be comptime.
+ if (is_source_decl and opt_func_index != .none and ret_ty_requires_comptime) comptime_check: {
+ for (block.params.items(.is_comptime)) |is_comptime| {
+ if (!is_comptime) break;
+ } else break :comptime_check;
- try sema.addDeclaredHereNote(msg, param.ty);
- break :msg msg;
- };
+ const msg = try sema.errMsg(
+ block,
+ ret_ty_src,
+ "function with comptime-only return type '{}' requires all parameters to be comptime",
+ .{return_type.fmt(mod)},
+ );
+ try sema.explainWhyTypeIsComptime(msg, ret_ty_src.toSrcLoc(sema.owner_decl, mod), return_type);
+
+ const tags = sema.code.instructions.items(.tag);
+ const data = sema.code.instructions.items(.data);
+ const param_body = sema.code.getParamBody(func_inst);
+ for (block.params.items(.is_comptime), block.params.items(.name), param_body) |is_comptime, name_nts, param_index| {
+ if (!is_comptime) {
+ const param_src = switch (tags[param_index]) {
+ .param => data[param_index].pl_tok.src(),
+ .param_anytype => data[param_index].str_tok.src(),
+ else => unreachable,
+ };
+ const name = sema.code.nullTerminatedString2(name_nts);
+ if (name.len != 0) {
+ try sema.errNote(block, param_src, msg, "param '{s}' is required to be comptime", .{name});
+ } else {
+ try sema.errNote(block, param_src, msg, "param is required to be comptime", .{});
+ }
+ }
+ }
return sema.failWithOwnedErrorMsg(msg);
}
- if (!sema.is_generic_instantiation and !this_generic and is_noalias and
- !(param.ty.zigTypeTag(mod) == .Pointer or param.ty.isPtrLikeOptional(mod)))
- {
- return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{});
+
+ const arch = target.cpu.arch;
+ if (switch (cc_resolved) {
+ .Unspecified, .C, .Naked, .Async, .Inline => null,
+ .Interrupt => switch (arch) {
+ .x86, .x86_64, .avr, .msp430 => null,
+ else => @as([]const u8, "x86, x86_64, AVR, and MSP430"),
+ },
+ .Signal => switch (arch) {
+ .avr => null,
+ else => @as([]const u8, "AVR"),
+ },
+ .Stdcall, .Fastcall, .Thiscall => switch (arch) {
+ .x86 => null,
+ else => @as([]const u8, "x86"),
+ },
+ .Vectorcall => switch (arch) {
+ .x86, .aarch64, .aarch64_be, .aarch64_32 => null,
+ else => @as([]const u8, "x86 and AArch64"),
+ },
+ .APCS, .AAPCS, .AAPCSVFP => switch (arch) {
+ .arm, .armeb, .aarch64, .aarch64_be, .aarch64_32, .thumb, .thumbeb => null,
+ else => @as([]const u8, "ARM"),
+ },
+ .SysV, .Win64 => switch (arch) {
+ .x86_64 => null,
+ else => @as([]const u8, "x86_64"),
+ },
+ .Kernel => switch (arch) {
+ .nvptx, .nvptx64, .amdgcn, .spirv32, .spirv64 => null,
+ else => @as([]const u8, "nvptx, amdgcn and SPIR-V"),
+ },
+ }) |allowed_platform| {
+ return sema.fail(block, cc_src, "callconv '{s}' is only available on {s}, not {s}", .{
+ @tagName(cc_resolved),
+ allowed_platform,
+ @tagName(arch),
+ });
}
+
+ if (cc_resolved == .Inline and is_noinline) {
+ return sema.fail(block, cc_src, "'noinline' function cannot have callconv 'Inline'", .{});
+ }
+ if (is_generic and sema.no_partial_func_ty) return error.GenericPoison;
+
+ if (!final_is_generic and sema.wantErrorReturnTracing(return_type)) {
+ // Make sure that StackTrace's fields are resolved so that the backend can
+ // lower this fn type.
+ const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
+ _ = try sema.resolveTypeFields(unresolved_stack_trace_ty);
+ }
+
+ return Air.internedToRef(if (opt_func_index == .none) try ip.getFuncType(gpa, .{
+ .param_types = param_types,
+ .noalias_bits = noalias_bits,
+ .comptime_bits = comptime_bits,
+ .return_type = return_type.toIntern(),
+ .cc = cc_resolved,
+ .cc_is_generic = cc == null,
+ .alignment = alignment orelse .none,
+ .align_is_generic = alignment == null,
+ .section_is_generic = section == .generic,
+ .addrspace_is_generic = address_space == null,
+ .is_var_args = var_args,
+ .is_generic = final_is_generic,
+ .is_noinline = is_noinline,
+ }) else opt_func_index);
}
fn zirParam(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
+ param_index: u32,
comptime_syntax: bool,
) CompileError!void {
const inst_data = sema.code.instructions.items(.data)[inst].pl_tok;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.Param, inst_data.payload_index);
- const param_name = sema.code.nullTerminatedString(extra.data.name);
+ const param_name: Zir.NullTerminatedString = @enumFromInt(extra.data.name);
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
// We could be in a generic function instantiation, or we could be evaluating a generic
@@ -9370,15 +8937,11 @@ fn zirParam(
const err = err: {
// Make sure any nested param instructions don't clobber our work.
const prev_params = block.params;
- const prev_preallocated_new_func = sema.preallocated_new_func;
const prev_no_partial_func_type = sema.no_partial_func_ty;
block.params = .{};
- sema.preallocated_new_func = .none;
sema.no_partial_func_ty = true;
defer {
- block.params.deinit(sema.gpa);
block.params = prev_params;
- sema.preallocated_new_func = prev_preallocated_new_func;
sema.no_partial_func_ty = prev_no_partial_func_type;
}
@@ -9390,7 +8953,7 @@ fn zirParam(
};
switch (err) {
error.GenericPoison => {
- if (sema.inst_map.get(inst)) |_| {
+ if (sema.inst_map.contains(inst)) {
// A generic function is about to evaluate to another generic function.
// Return an error instead.
return error.GenericPoison;
@@ -9398,8 +8961,8 @@ fn zirParam(
// The type is not available until the generic instantiation.
// We result the param instruction with a poison value and
// insert an anytype parameter.
- try block.params.append(sema.gpa, .{
- .ty = Type.generic_poison,
+ try block.params.append(sema.arena, .{
+ .ty = .generic_poison_type,
.is_comptime = comptime_syntax,
.name = param_name,
});
@@ -9409,9 +8972,10 @@ fn zirParam(
else => |e| return e,
}
};
+
const is_comptime = sema.typeRequiresComptime(param_ty) catch |err| switch (err) {
error.GenericPoison => {
- if (sema.inst_map.get(inst)) |_| {
+ if (sema.inst_map.contains(inst)) {
// A generic function is about to evaluate to another generic function.
// Return an error instead.
return error.GenericPoison;
@@ -9419,8 +8983,8 @@ fn zirParam(
// The type is not available until the generic instantiation.
// We result the param instruction with a poison value and
// insert an anytype parameter.
- try block.params.append(sema.gpa, .{
- .ty = Type.generic_poison,
+ try block.params.append(sema.arena, .{
+ .ty = .generic_poison_type,
.is_comptime = comptime_syntax,
.name = param_name,
});
@@ -9429,8 +8993,9 @@ fn zirParam(
},
else => |e| return e,
} or comptime_syntax;
+
if (sema.inst_map.get(inst)) |arg| {
- if (is_comptime and sema.preallocated_new_func != .none) {
+ if (is_comptime and sema.generic_owner != .none) {
// We have a comptime value for this parameter so it should be elided from the
// function type of the function instruction in this block.
const coerced_arg = sema.coerce(block, param_ty, arg, .unneeded) catch |err| switch (err) {
@@ -9440,12 +9005,13 @@ fn zirParam(
// have the callee source location return `GenericPoison`
// so that the instantiation is failed and the coercion
// is handled by comptime call logic instead.
- assert(sema.is_generic_instantiation);
+ assert(sema.generic_owner != .none);
return error.GenericPoison;
},
- else => return err,
+ else => |e| return e,
};
sema.inst_map.putAssumeCapacity(inst, coerced_arg);
+ sema.comptime_args[param_index] = (try sema.resolveConstMaybeUndefVal(block, src, coerced_arg, "parameter is declared comptime")).toIntern();
return;
}
// Even though a comptime argument is provided, the generic function wants to treat
@@ -9453,19 +9019,19 @@ fn zirParam(
assert(sema.inst_map.remove(inst));
}
- if (sema.preallocated_new_func != .none) {
+ if (sema.generic_owner != .none) {
if (try sema.typeHasOnePossibleValue(param_ty)) |opv| {
// In this case we are instantiating a generic function call with a non-comptime
// non-anytype parameter that ended up being a one-possible-type.
// We don't want the parameter to be part of the instantiated function type.
- const result = try sema.addConstant(opv);
- sema.inst_map.putAssumeCapacity(inst, result);
+ sema.inst_map.putAssumeCapacity(inst, Air.internedToRef(opv.toIntern()));
+ sema.comptime_args[param_index] = opv.toIntern();
return;
}
}
- try block.params.append(sema.gpa, .{
- .ty = param_ty,
+ try block.params.append(sema.arena, .{
+ .ty = param_ty.toIntern(),
.is_comptime = comptime_syntax,
.name = param_name,
});
@@ -9473,17 +9039,15 @@ fn zirParam(
if (is_comptime) {
// If this is a comptime parameter we can add a constant generic_poison
// since this is also a generic parameter.
- const result = try sema.addConstant(Value.generic_poison);
- sema.inst_map.putAssumeCapacityNoClobber(inst, result);
+ sema.inst_map.putAssumeCapacityNoClobber(inst, .generic_poison);
} else {
// Otherwise we need a dummy runtime instruction.
- const result_index = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
+ const result_index: Air.Inst.Index = @intCast(sema.air_instructions.len);
try sema.air_instructions.append(sema.gpa, .{
.tag = .alloc,
.data = .{ .ty = param_ty },
});
- const result = Air.indexToRef(result_index);
- sema.inst_map.putAssumeCapacityNoClobber(inst, result);
+ sema.inst_map.putAssumeCapacityNoClobber(inst, Air.indexToRef(result_index));
}
}
@@ -9491,24 +9055,34 @@ fn zirParamAnytype(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
+ param_index: u32,
comptime_syntax: bool,
) CompileError!void {
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
- const param_name = inst_data.get(sema.code);
+ const param_name: Zir.NullTerminatedString = @enumFromInt(inst_data.start);
+ const src = inst_data.src();
if (sema.inst_map.get(inst)) |air_ref| {
const param_ty = sema.typeOf(air_ref);
- if (comptime_syntax or try sema.typeRequiresComptime(param_ty)) {
- // We have a comptime value for this parameter so it should be elided from the
- // function type of the function instruction in this block.
+ // If we have a comptime value for this parameter, it should be elided
+ // from the function type of the function instruction in this block.
+ if (try sema.typeHasOnePossibleValue(param_ty)) |opv| {
+ sema.comptime_args[param_index] = opv.toIntern();
return;
}
- if (null != try sema.typeHasOnePossibleValue(param_ty)) {
+ if (comptime_syntax) {
+ sema.comptime_args[param_index] = (try sema.resolveConstMaybeUndefVal(block, src, air_ref, "parameter is declared comptime")).toIntern();
return;
}
+ if (try sema.typeRequiresComptime(param_ty)) {
+ sema.comptime_args[param_index] = (try sema.resolveConstMaybeUndefVal(block, src, air_ref, "parameter type requires comptime")).toIntern();
+ return;
+ }
+
+ // The parameter is runtime-known.
// The map is already populated but we do need to add a runtime parameter.
- try block.params.append(sema.gpa, .{
- .ty = param_ty,
+ try block.params.append(sema.arena, .{
+ .ty = param_ty.toIntern(),
.is_comptime = false,
.name = param_name,
});
@@ -9517,8 +9091,8 @@ fn zirParamAnytype(
// We are evaluating a generic function without any comptime args provided.
- try block.params.append(sema.gpa, .{
- .ty = Type.generic_poison,
+ try block.params.append(sema.arena, .{
+ .ty = .generic_poison_type,
.is_comptime = comptime_syntax,
.name = param_name,
});
@@ -10673,7 +10247,7 @@ const SwitchProngAnalysis = struct {
return sema.bitCast(block, item_ty, spa.operand, operand_src, null);
}
- var names: Module.Fn.InferredErrorSet.NameMap = .{};
+ var names: Module.InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, case_vals.len);
for (case_vals) |err| {
const err_val = sema.resolveConstValue(block, .unneeded, err, "") catch unreachable;
@@ -11122,7 +10696,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
}
const error_names = operand_ty.errorSetNames(mod);
- var names: Module.Fn.InferredErrorSet.NameMap = .{};
+ var names: Module.InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, error_names.len);
for (error_names) |error_name| {
if (seen_errors.contains(error_name)) continue;
@@ -16295,6 +15869,7 @@ fn zirClosureCapture(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
fn zirClosureGet(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
+ const ip = &mod.intern_pool;
const inst_data = sema.code.instructions.items(.data)[inst].inst_node;
var scope: *CaptureScope = mod.declPtr(block.src_decl).src_scope.?;
// Note: The target closure must be in this scope list.
@@ -16305,8 +15880,8 @@ fn zirClosureGet(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
// Fail this decl if a scope it depended on failed.
if (scope.failed()) {
- if (sema.owner_func) |owner_func| {
- owner_func.state = .dependency_failure;
+ if (sema.owner_func_index != .none) {
+ ip.funcAnalysis(sema.owner_func_index).state = .dependency_failure;
} else {
sema.owner_decl.analysis = .dependency_failure;
}
@@ -16423,8 +15998,8 @@ fn zirBuiltinSrc(
const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.Src, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
- const func = sema.func orelse return sema.fail(block, src, "@src outside function", .{});
- const fn_owner_decl = mod.declPtr(func.owner_decl);
+ if (sema.func_index == .none) return sema.fail(block, src, "@src outside function", .{});
+ const fn_owner_decl = mod.funcOwnerDeclPtr(sema.func_index);
const func_name_val = blk: {
var anon_decl = try block.startAnonDecl();
@@ -16548,10 +16123,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const param_info_decl = mod.declPtr(param_info_decl_index);
const param_info_ty = param_info_decl.val.toType();
- const param_vals = try sema.arena.alloc(InternPool.Index, mod.typeToFunc(ty).?.param_types.len);
+ const func_ty_info = mod.typeToFunc(ty).?;
+ const param_vals = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len);
for (param_vals, 0..) |*param_val, i| {
- const info = mod.typeToFunc(ty).?;
- const param_ty = info.param_types[i];
+ const param_ty = func_ty_info.param_types.get(ip)[i];
const is_generic = param_ty == .generic_poison_type;
const param_ty_val = try ip.get(gpa, .{ .opt = .{
.ty = try ip.get(gpa, .{ .opt_type = .type_type }),
@@ -16560,7 +16135,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const is_noalias = blk: {
const index = std.math.cast(u5, i) orelse break :blk false;
- break :blk @as(u1, @truncate(info.noalias_bits >> index)) != 0;
+ break :blk @as(u1, @truncate(func_ty_info.noalias_bits >> index)) != 0;
};
const param_fields = .{
@@ -16603,23 +16178,25 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} });
};
- const info = mod.typeToFunc(ty).?;
const ret_ty_opt = try mod.intern(.{ .opt = .{
.ty = try ip.get(gpa, .{ .opt_type = .type_type }),
- .val = if (info.return_type == .generic_poison_type) .none else info.return_type,
+ .val = if (func_ty_info.return_type == .generic_poison_type)
+ .none
+ else
+ func_ty_info.return_type,
} });
const callconv_ty = try sema.getBuiltinType("CallingConvention");
const field_values = .{
// calling_convention: CallingConvention,
- (try mod.enumValueFieldIndex(callconv_ty, @intFromEnum(info.cc))).toIntern(),
+ (try mod.enumValueFieldIndex(callconv_ty, @intFromEnum(func_ty_info.cc))).toIntern(),
// alignment: comptime_int,
(try mod.intValue(Type.comptime_int, ty.abiAlignment(mod))).toIntern(),
// is_generic: bool,
- Value.makeBool(info.is_generic).toIntern(),
+ Value.makeBool(func_ty_info.is_generic).toIntern(),
// is_var_args: bool,
- Value.makeBool(info.is_var_args).toIntern(),
+ Value.makeBool(func_ty_info.is_var_args).toIntern(),
// return_type: ?type,
ret_ty_opt,
// args: []const Fn.Param,
@@ -18425,9 +18002,12 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index)
// This is only relevant at runtime.
if (start_block.is_comptime or start_block.is_typeof) return;
- if (!sema.mod.backendSupportsFeature(.error_return_trace)) return;
- if (!sema.owner_func.?.calls_or_awaits_errorable_fn) return;
- if (!sema.mod.comp.bin_file.options.error_return_tracing) return;
+ const mod = sema.mod;
+ const ip = &mod.intern_pool;
+
+ if (!mod.backendSupportsFeature(.error_return_trace)) return;
+ if (!ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn) return;
+ if (!mod.comp.bin_file.options.error_return_tracing) return;
const tracy = trace(@src());
defer tracy.end();
@@ -19461,13 +19041,14 @@ fn zirErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
const mod = sema.mod;
+ const ip = &mod.intern_pool;
const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty);
const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern());
- if (sema.owner_func != null and
- sema.owner_func.?.calls_or_awaits_errorable_fn and
+ if (sema.owner_func_index != .none and
+ ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn and
mod.comp.bin_file.options.error_return_tracing and
mod.backendSupportsFeature(.error_return_trace))
{
@@ -19920,7 +19501,7 @@ fn zirReify(
return sema.addType(Type.anyerror);
const len = try sema.usizeCast(block, src, payload_val.sliceLen(mod));
- var names: Module.Fn.InferredErrorSet.NameMap = .{};
+ var names: Module.InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, len);
for (0..len) |i| {
const elem_val = try payload_val.elemValue(mod, i);
@@ -23917,7 +23498,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
break :blk mod.toEnum(std.builtin.AddressSpace, addrspace_tv.val);
} else target_util.defaultAddressSpace(target, .function);
- const @"linksection": FuncLinkSection = if (extra.data.bits.has_section_body) blk: {
+ const section: InternPool.GetFuncDeclKey.Section = if (extra.data.bits.has_section_body) blk: {
const body_len = sema.code.extra[extra_index];
extra_index += 1;
const body = sema.code.extra[extra_index..][0..body_len];
@@ -23926,20 +23507,20 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const ty = Type.slice_const_u8;
const val = try sema.resolveGenericBody(block, section_src, body, inst, ty, "linksection must be comptime-known");
if (val.isGenericPoison()) {
- break :blk FuncLinkSection{ .generic = {} };
+ break :blk .generic;
}
- break :blk FuncLinkSection{ .explicit = try val.toIpString(ty, mod) };
+ break :blk .{ .explicit = try val.toIpString(ty, mod) };
} else if (extra.data.bits.has_section_ref) blk: {
const section_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
extra_index += 1;
const section_name = sema.resolveConstStringIntern(block, section_src, section_ref, "linksection must be comptime-known") catch |err| switch (err) {
error.GenericPoison => {
- break :blk FuncLinkSection{ .generic = {} };
+ break :blk .generic;
},
else => |e| return e,
};
- break :blk FuncLinkSection{ .explicit = section_name };
- } else FuncLinkSection{ .default = {} };
+ break :blk .{ .explicit = section_name };
+ } else .default;
const cc: ?std.builtin.CallingConvention = if (extra.data.bits.has_cc_body) blk: {
const body_len = sema.code.extra[extra_index];
@@ -24013,7 +23594,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
inst,
@"align",
@"addrspace",
- @"linksection",
+ section,
cc,
ret_ty,
is_var_args,
@@ -24846,9 +24427,9 @@ fn prepareSimplePanic(sema: *Sema, block: *Block) !void {
const tv = try mod.declPtr(decl_index).typedValue();
assert(tv.ty.zigTypeTag(mod) == .Fn);
assert(try sema.fnHasRuntimeBits(tv.ty));
- const func_index = mod.intern_pool.indexToFunc(tv.val.toIntern()).unwrap().?;
+ const func_index = tv.val.toIntern();
try mod.ensureFuncBodyAnalysisQueued(func_index);
- mod.panic_func_index = func_index.toOptional();
+ mod.panic_func_index = func_index;
}
if (mod.null_stack_trace == .none) {
@@ -24982,7 +24563,7 @@ fn panicWithMsg(sema: *Sema, block: *Block, msg_inst: Air.Inst.Ref) !void {
try sema.prepareSimplePanic(block);
- const panic_func = mod.funcPtrUnwrap(mod.panic_func_index).?;
+ const panic_func = mod.funcInfo(mod.panic_func_index);
const panic_fn = try sema.analyzeDeclVal(block, .unneeded, panic_func.owner_decl);
const null_stack_trace = try sema.addConstant(mod.null_stack_trace.toValue());
@@ -25688,7 +25269,7 @@ fn fieldCallBind(
if (mod.typeToFunc(decl_type)) |func_type| f: {
if (func_type.param_types.len == 0) break :f;
- const first_param_type = func_type.param_types[0].toType();
+ const first_param_type = func_type.param_types.get(ip)[0].toType();
// zig fmt: off
if (first_param_type.isGenericPoison() or (
first_param_type.zigTypeTag(mod) == .Pointer and
@@ -27526,7 +27107,7 @@ fn coerceExtra(
errdefer msg.destroy(sema.gpa);
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
- const src_decl = mod.declPtr(sema.func.?.owner_decl);
+ const src_decl = mod.funcOwnerDeclPtr(sema.func_index);
try mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "'noreturn' declared here", .{});
break :msg msg;
};
@@ -27556,9 +27137,11 @@ fn coerceExtra(
try in_memory_result.report(sema, block, inst_src, msg);
// Add notes about function return type
- if (opts.is_ret and mod.test_functions.get(sema.func.?.owner_decl) == null) {
+ if (opts.is_ret and
+ mod.test_functions.get(mod.funcOwnerDeclIndex(sema.func_index)) == null)
+ {
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
- const src_decl = mod.declPtr(sema.func.?.owner_decl);
+ const src_decl = mod.funcOwnerDeclPtr(sema.func_index);
if (inst_ty.isError(mod) and !dest_ty.isError(mod)) {
try mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function cannot return an error", .{});
} else {
@@ -28185,7 +27768,7 @@ fn coerceInMemoryAllowedErrorSets(
},
}
- if (dst_ies.func == sema.owner_func_index.unwrap()) {
+ if (dst_ies.func == sema.owner_func_index) {
// We are trying to coerce an error set to the current function's
// inferred error set.
try dst_ies.addErrorSet(src_ty, ip, gpa);
@@ -28264,11 +27847,12 @@ fn coerceInMemoryAllowedFns(
src_src: LazySrcLoc,
) !InMemoryCoercionResult {
const mod = sema.mod;
+ const ip = &mod.intern_pool;
- {
- const dest_info = mod.typeToFunc(dest_ty).?;
- const src_info = mod.typeToFunc(src_ty).?;
+ const dest_info = mod.typeToFunc(dest_ty).?;
+ const src_info = mod.typeToFunc(src_ty).?;
+ {
if (dest_info.is_var_args != src_info.is_var_args) {
return InMemoryCoercionResult{ .fn_var_args = dest_info.is_var_args };
}
@@ -28302,9 +27886,6 @@ fn coerceInMemoryAllowedFns(
}
const params_len = params_len: {
- const dest_info = mod.typeToFunc(dest_ty).?;
- const src_info = mod.typeToFunc(src_ty).?;
-
if (dest_info.param_types.len != src_info.param_types.len) {
return InMemoryCoercionResult{ .fn_param_count = .{
.actual = src_info.param_types.len,
@@ -28323,13 +27904,10 @@ fn coerceInMemoryAllowedFns(
};
for (0..params_len) |param_i| {
- const dest_info = mod.typeToFunc(dest_ty).?;
- const src_info = mod.typeToFunc(src_ty).?;
-
- const dest_param_ty = dest_info.param_types[param_i].toType();
- const src_param_ty = src_info.param_types[param_i].toType();
+ const dest_param_ty = dest_info.param_types.get(ip)[param_i].toType();
+ const src_param_ty = src_info.param_types.get(ip)[param_i].toType();
- const param_i_small = @as(u5, @intCast(param_i));
+ const param_i_small: u5 = @intCast(param_i);
if (dest_info.paramIsComptime(param_i_small) != src_info.paramIsComptime(param_i_small)) {
return InMemoryCoercionResult{ .fn_param_comptime = .{
.index = param_i,
@@ -30471,6 +30049,7 @@ fn addReferencedBy(
fn ensureDeclAnalyzed(sema: *Sema, decl_index: Decl.Index) CompileError!void {
const mod = sema.mod;
+ const ip = &mod.intern_pool;
const decl = mod.declPtr(decl_index);
if (decl.analysis == .in_progress) {
const msg = try Module.ErrorMsg.create(sema.gpa, decl.srcLoc(mod), "dependency loop detected", .{});
@@ -30478,8 +30057,8 @@ fn ensureDeclAnalyzed(sema: *Sema, decl_index: Decl.Index) CompileError!void {
}
mod.ensureDeclAnalyzed(decl_index) catch |err| {
- if (sema.owner_func) |owner_func| {
- owner_func.state = .dependency_failure;
+ if (sema.owner_func_index != .none) {
+ ip.funcAnalysis(sema.owner_func_index).state = .dependency_failure;
} else {
sema.owner_decl.analysis = .dependency_failure;
}
@@ -30487,10 +30066,12 @@ fn ensureDeclAnalyzed(sema: *Sema, decl_index: Decl.Index) CompileError!void {
};
}
-fn ensureFuncBodyAnalyzed(sema: *Sema, func: Module.Fn.Index) CompileError!void {
- sema.mod.ensureFuncBodyAnalyzed(func) catch |err| {
- if (sema.owner_func) |owner_func| {
- owner_func.state = .dependency_failure;
+fn ensureFuncBodyAnalyzed(sema: *Sema, func: InternPool.Index) CompileError!void {
+ const mod = sema.mod;
+ const ip = &mod.intern_pool;
+ mod.ensureFuncBodyAnalyzed(func) catch |err| {
+ if (sema.owner_func_index != .none) {
+ ip.funcAnalysis(sema.owner_func_index).state = .dependency_failure;
} else {
sema.owner_decl.analysis = .dependency_failure;
}
@@ -30566,7 +30147,8 @@ fn maybeQueueFuncBodyAnalysis(sema: *Sema, decl_index: Decl.Index) !void {
const tv = try decl.typedValue();
if (tv.ty.zigTypeTag(mod) != .Fn) return;
if (!try sema.fnHasRuntimeBits(tv.ty)) return;
- const func_index = mod.intern_pool.indexToFunc(tv.val.toIntern()).unwrap() orelse return; // undef or extern_fn
+ const func_index = tv.val.toIntern();
+ if (!mod.intern_pool.isFuncBody(func_index)) return; // undef or extern function
try mod.ensureFuncBodyAnalysisQueued(func_index);
}
@@ -30582,7 +30164,7 @@ fn analyzeRef(
if (try sema.resolveMaybeUndefVal(operand)) |val| {
switch (mod.intern_pool.indexToKey(val.toIntern())) {
.extern_func => |extern_func| return sema.analyzeDeclRef(extern_func.decl),
- .func => |func| return sema.analyzeDeclRef(mod.funcPtr(func.index).owner_decl),
+ .func => |func| return sema.analyzeDeclRef(func.owner_decl),
else => {},
}
var anon_decl = try block.startAnonDecl();
@@ -30810,7 +30392,7 @@ fn analyzeIsNonErrComptimeOnly(
if (other_ies.errors.count() != 0) break :blk;
}
- if (ies.func == sema.owner_func_index.unwrap()) {
+ if (ies.func == sema.owner_func_index) {
// We're checking the inferred errorset of the current function and none of
// its child inferred error sets contained any errors meaning that any value
// so far with this type can't contain errors either.
@@ -33275,15 +32857,17 @@ fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike {
pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void {
const mod = sema.mod;
- try sema.resolveTypeFully(mod.typeToFunc(fn_ty).?.return_type.toType());
+ const ip = &mod.intern_pool;
+ const fn_ty_info = mod.typeToFunc(fn_ty).?;
+ try sema.resolveTypeFully(fn_ty_info.return_type.toType());
- if (mod.comp.bin_file.options.error_return_tracing and mod.typeToFunc(fn_ty).?.return_type.toType().isError(mod)) {
+ if (mod.comp.bin_file.options.error_return_tracing and fn_ty_info.return_type.toType().isError(mod)) {
// Ensure the type exists so that backends can assume that.
_ = try sema.getBuiltinType("StackTrace");
}
- for (0..mod.typeToFunc(fn_ty).?.param_types.len) |i| {
- try sema.resolveTypeFully(mod.typeToFunc(fn_ty).?.param_types[i].toType());
+ for (0..fn_ty_info.param_types.len) |i| {
+ try sema.resolveTypeFully(fn_ty_info.param_types.get(ip)[i].toType());
}
}
@@ -33448,7 +33032,9 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void {
// the function is instantiated.
return;
}
- for (info.param_types) |param_ty| {
+ const ip = &mod.intern_pool;
+ for (0..info.param_types.len) |i| {
+ const param_ty = info.param_types.get(ip)[i];
try sema.resolveTypeLayout(param_ty.toType());
}
try sema.resolveTypeLayout(info.return_type.toType());
@@ -33578,10 +33164,8 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
.code = zir,
.owner_decl = decl,
.owner_decl_index = decl_index,
- .func = null,
.func_index = .none,
.fn_ret_ty = Type.void,
- .owner_func = null,
.owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls,
};
@@ -33600,10 +33184,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
.inlining = null,
.is_comptime = true,
};
- defer {
- assert(block.instructions.items.len == 0);
- block.params.deinit(gpa);
- }
+ defer assert(block.instructions.items.len == 0);
const backing_int_src: LazySrcLoc = .{ .node_offset_container_tag = 0 };
const backing_int_ty = blk: {
@@ -33633,10 +33214,8 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
.code = zir,
.owner_decl = decl,
.owner_decl_index = decl_index,
- .func = null,
.func_index = .none,
.fn_ret_ty = Type.void,
- .owner_func = null,
.owner_func_index = .none,
.comptime_mutable_decls = undefined,
};
@@ -33943,7 +33522,9 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void {
// the function is instantiated.
return;
}
- for (info.param_types) |param_ty| {
+ const ip = &mod.intern_pool;
+ for (0..info.param_types.len) |i| {
+ const param_ty = info.param_types.get(ip)[i];
try sema.resolveTypeFully(param_ty.toType());
}
try sema.resolveTypeFully(info.return_type.toType());
@@ -34213,15 +33794,16 @@ fn resolveInferredErrorSet(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
- ies_index: Module.Fn.InferredErrorSet.Index,
+ ies_index: Module.InferredErrorSet.Index,
) CompileError!void {
const mod = sema.mod;
+ const ip = &mod.intern_pool;
const ies = mod.inferredErrorSetPtr(ies_index);
if (ies.is_resolved) return;
- const func = mod.funcPtr(ies.func);
- if (func.state == .in_progress) {
+ const func = mod.funcInfo(ies.func);
+ if (func.analysis(ip).state == .in_progress) {
return sema.fail(block, src, "unable to resolve inferred error set", .{});
}
@@ -34229,7 +33811,7 @@ fn resolveInferredErrorSet(
// need to ensure the function body is analyzed of the inferred error set.
// However, in the case of comptime/inline function calls with inferred error sets,
// each call gets a new InferredErrorSet object, which contains the same
- // `Module.Fn.Index`. Not only is the function not relevant to the inferred error set
+ // `InternPool.Index`. Not only is the function not relevant to the inferred error set
// in this case, it may be a generic function which would cause an assertion failure
// if we called `ensureFuncBodyAnalyzed` on it here.
const ies_func_owner_decl = mod.declPtr(func.owner_decl);
@@ -34346,10 +33928,8 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
.code = zir,
.owner_decl = decl,
.owner_decl_index = decl_index,
- .func = null,
.func_index = .none,
.fn_ret_ty = Type.void,
- .owner_func = null,
.owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls,
};
@@ -34693,10 +34273,8 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
.code = zir,
.owner_decl = decl,
.owner_decl_index = decl_index,
- .func = null,
.func_index = .none,
.fn_ret_ty = Type.void,
- .owner_func = null,
.owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls,
};
@@ -35148,10 +34726,7 @@ fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref {
.inlining = null,
.is_comptime = true,
};
- defer {
- block.instructions.deinit(gpa);
- block.params.deinit(gpa);
- }
+ defer block.instructions.deinit(gpa);
const decl_index = try getBuiltinDecl(sema, &block, name);
return sema.analyzeDeclVal(&block, src, decl_index);
@@ -35202,10 +34777,7 @@ fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type {
.inlining = null,
.is_comptime = true,
};
- defer {
- block.instructions.deinit(sema.gpa);
- block.params.deinit(sema.gpa);
- }
+ defer block.instructions.deinit(sema.gpa);
const src = LazySrcLoc.nodeOffset(0);
const result_ty = sema.analyzeAsType(&block, src, ty_inst) catch |err| switch (err) {
@@ -35327,6 +34899,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.type_opaque,
.type_function,
=> null,
+
.simple_type, // handled above
// values, not types
.undef,
@@ -35370,7 +34943,8 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.float_comptime_float,
.variable,
.extern_func,
- .func,
+ .func_decl,
+ .func_instance,
.only_possible_value,
.union_value,
.bytes,
@@ -35379,6 +34953,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
// memoized value, not types
.memoized_call,
=> unreachable,
+
.type_array_big,
.type_array_small,
.type_vector,
@@ -36772,7 +36347,7 @@ fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type {
const arena = sema.arena;
const lhs_names = lhs.errorSetNames(mod);
const rhs_names = rhs.errorSetNames(mod);
- var names: Module.Fn.InferredErrorSet.NameMap = .{};
+ var names: Module.InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(arena, lhs_names.len);
for (lhs_names) |name| {
src/type.zig
@@ -255,7 +255,7 @@ pub const Type = struct {
const func = ies.func;
try writer.writeAll("@typeInfo(@typeInfo(@TypeOf(");
- const owner_decl = mod.declPtr(mod.funcPtr(func).owner_decl);
+ const owner_decl = mod.funcOwnerDeclPtr(func);
try owner_decl.renderFullyQualifiedName(mod, writer);
try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set");
},
@@ -367,7 +367,8 @@ pub const Type = struct {
try writer.writeAll("noinline ");
}
try writer.writeAll("fn(");
- for (fn_info.param_types, 0..) |param_ty, i| {
+ const param_types = fn_info.param_types.get(&mod.intern_pool);
+ for (param_types, 0..) |param_ty, i| {
if (i != 0) try writer.writeAll(", ");
if (std.math.cast(u5, i)) |index| {
if (fn_info.paramIsComptime(index)) {
@@ -384,7 +385,7 @@ pub const Type = struct {
}
}
if (fn_info.is_var_args) {
- if (fn_info.param_types.len != 0) {
+ if (param_types.len != 0) {
try writer.writeAll(", ");
}
try writer.writeAll("...");
src/TypedValue.zig
@@ -205,7 +205,7 @@ pub fn print(
mod.declPtr(extern_func.decl).name.fmt(ip),
}),
.func => |func| return writer.print("(function '{}')", .{
- mod.declPtr(mod.funcPtr(func.index).owner_decl).name.fmt(ip),
+ mod.declPtr(func.owner_decl).name.fmt(ip),
}),
.int => |int| switch (int.storage) {
inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}),
src/value.zig
@@ -473,12 +473,15 @@ pub const Value = struct {
};
}
- pub fn getFunction(val: Value, mod: *Module) ?*Module.Fn {
- return mod.funcPtrUnwrap(val.getFunctionIndex(mod));
+ pub fn isFuncBody(val: Value, mod: *Module) bool {
+ return mod.intern_pool.isFuncBody(val.toIntern());
}
- pub fn getFunctionIndex(val: Value, mod: *Module) Module.Fn.OptionalIndex {
- return if (val.ip_index != .none) mod.intern_pool.indexToFunc(val.toIntern()) else .none;
+ pub fn getFunction(val: Value, mod: *Module) ?InternPool.Key.Func {
+ return switch (mod.intern_pool.indexToKey(val.toIntern())) {
+ .func => |x| x,
+ else => null,
+ };
}
pub fn getExternFunc(val: Value, mod: *Module) ?InternPool.Key.ExternFunc {
@@ -1462,7 +1465,7 @@ pub const Value = struct {
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.variable => |variable| variable.decl,
.extern_func => |extern_func| extern_func.decl,
- .func => |func| mod.funcPtr(func.index).owner_decl,
+ .func => |func| func.owner_decl,
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl| decl,
.mut_decl => |mut_decl| mut_decl.decl,
src/Zir.zig
@@ -90,13 +90,24 @@ pub fn extraData(code: Zir, comptime T: type, index: usize) struct { data: T, en
};
}
-/// Given an index into `string_bytes` returns the null-terminated string found there.
+/// TODO migrate to use this for type safety
+pub const NullTerminatedString = enum(u32) {
+ _,
+};
+
+/// TODO: migrate to nullTerminatedString2 for type safety
pub fn nullTerminatedString(code: Zir, index: usize) [:0]const u8 {
- var end: usize = index;
+ return nullTerminatedString2(code, @enumFromInt(index));
+}
+
+/// Given an index into `string_bytes` returns the null-terminated string found there.
+pub fn nullTerminatedString2(code: Zir, index: NullTerminatedString) [:0]const u8 {
+ const start = @intFromEnum(index);
+ var end: u32 = start;
while (code.string_bytes[end] != 0) {
end += 1;
}
- return code.string_bytes[index..end :0];
+ return code.string_bytes[start..end :0];
}
pub fn refSlice(code: Zir, start: usize, len: usize) []Inst.Ref {