Commit 17882162b3
Changed files (23)
lib
std
src
arch
aarch64
arm
riscv64
sparc64
wasm
x86_64
lib/std/builtin.zig
@@ -143,7 +143,7 @@ pub const Mode = OptimizeMode;
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
-pub const CallingConvention = enum {
+pub const CallingConvention = enum(u8) {
/// This is the default Zig calling convention used when not using `export` on `fn`
/// and no other calling convention is specified.
Unspecified,
src/arch/aarch64/CodeGen.zig
@@ -472,7 +472,7 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
fn gen(self: *Self) !void {
const mod = self.bin_file.options.module.?;
- const cc = self.fn_type.fnCallingConvention();
+ const cc = self.fn_type.fnCallingConvention(mod);
if (cc != .Naked) {
// stp fp, lr, [sp, #-16]!
_ = try self.addInst(.{
@@ -1146,7 +1146,7 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void {
.stack_offset => blk: {
// self.ret_mcv is an address to where this function
// should store its result into
- const ret_ty = self.fn_type.fnReturnType();
+ const ret_ty = self.fn_type.fnReturnType(mod);
const ptr_ty = try mod.singleMutPtrType(ret_ty);
// addr_reg will contain the address of where to store the
@@ -4271,7 +4271,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (info.return_value == .stack_offset) {
log.debug("airCall: return by reference", .{});
- const ret_ty = fn_ty.fnReturnType();
+ const ret_ty = fn_ty.fnReturnType(mod);
const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod));
const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod));
const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
@@ -4428,10 +4428,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
}
fn airRet(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
- const ret_ty = self.fn_type.fnReturnType();
- const mod = self.bin_file.options.module.?;
+ const ret_ty = self.fn_type.fnReturnType(mod);
switch (self.ret_mcv) {
.none => {},
@@ -4460,10 +4460,11 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
}
fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const ptr = try self.resolveInst(un_op);
const ptr_ty = self.typeOf(un_op);
- const ret_ty = self.fn_type.fnReturnType();
+ const ret_ty = self.fn_type.fnReturnType(mod);
switch (self.ret_mcv) {
.none => {},
@@ -4483,7 +4484,6 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
// location.
const op_inst = Air.refToIndex(un_op).?;
if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) {
- const mod = self.bin_file.options.module.?;
const abi_size = @intCast(u32, ret_ty.abiSize(mod));
const abi_align = ret_ty.abiAlignment(mod);
@@ -6226,12 +6226,11 @@ const CallMCValues = struct {
/// Caller must call `CallMCValues.deinit`.
fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
- const cc = fn_ty.fnCallingConvention();
- const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen());
- defer self.gpa.free(param_types);
- fn_ty.fnParamTypes(param_types);
+ const mod = self.bin_file.options.module.?;
+ const fn_info = mod.typeToFunc(fn_ty).?;
+ const cc = fn_info.cc;
var result: CallMCValues = .{
- .args = try self.gpa.alloc(MCValue, param_types.len),
+ .args = try self.gpa.alloc(MCValue, fn_info.param_types.len),
// These undefined values must be populated before returning from this function.
.return_value = undefined,
.stack_byte_count = undefined,
@@ -6239,8 +6238,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
};
errdefer self.gpa.free(result.args);
- const ret_ty = fn_ty.fnReturnType();
- const mod = self.bin_file.options.module.?;
+ const ret_ty = fn_ty.fnReturnType(mod);
switch (cc) {
.Naked => {
@@ -6271,8 +6269,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
}
}
- for (param_types, 0..) |ty, i| {
- const param_size = @intCast(u32, ty.abiSize(mod));
+ for (fn_info.param_types, 0..) |ty, i| {
+ const param_size = @intCast(u32, ty.toType().abiSize(mod));
if (param_size == 0) {
result.args[i] = .{ .none = {} };
continue;
@@ -6280,14 +6278,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
// We round up NCRN only for non-Apple platforms which allow the 16-byte aligned
// values to spread across odd-numbered registers.
- if (ty.abiAlignment(mod) == 16 and !self.target.isDarwin()) {
+ if (ty.toType().abiAlignment(mod) == 16 and !self.target.isDarwin()) {
// Round up NCRN to the next even number
ncrn += ncrn % 2;
}
if (std.math.divCeil(u32, param_size, 8) catch unreachable <= 8 - ncrn) {
if (param_size <= 8) {
- result.args[i] = .{ .register = self.registerAlias(c_abi_int_param_regs[ncrn], ty) };
+ result.args[i] = .{ .register = self.registerAlias(c_abi_int_param_regs[ncrn], ty.toType()) };
ncrn += 1;
} else {
return self.fail("TODO MCValues with multiple registers", .{});
@@ -6298,7 +6296,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
ncrn = 8;
// TODO Apple allows the arguments on the stack to be non-8-byte aligned provided
// that the entire stack space consumed by the arguments is 8-byte aligned.
- if (ty.abiAlignment(mod) == 8) {
+ if (ty.toType().abiAlignment(mod) == 8) {
if (nsaa % 8 != 0) {
nsaa += 8 - (nsaa % 8);
}
@@ -6336,10 +6334,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var stack_offset: u32 = 0;
- for (param_types, 0..) |ty, i| {
- if (ty.abiSize(mod) > 0) {
- const param_size = @intCast(u32, ty.abiSize(mod));
- const param_alignment = ty.abiAlignment(mod);
+ for (fn_info.param_types, 0..) |ty, i| {
+ if (ty.toType().abiSize(mod) > 0) {
+ const param_size = @intCast(u32, ty.toType().abiSize(mod));
+ const param_alignment = ty.toType().abiAlignment(mod);
stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment);
result.args[i] = .{ .stack_argument_offset = stack_offset };
src/arch/arm/CodeGen.zig
@@ -478,7 +478,7 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
fn gen(self: *Self) !void {
const mod = self.bin_file.options.module.?;
- const cc = self.fn_type.fnCallingConvention();
+ const cc = self.fn_type.fnCallingConvention(mod);
if (cc != .Naked) {
// push {fp, lr}
const push_reloc = try self.addNop();
@@ -1123,7 +1123,7 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void {
.stack_offset => blk: {
// self.ret_mcv is an address to where this function
// should store its result into
- const ret_ty = self.fn_type.fnReturnType();
+ const ret_ty = self.fn_type.fnReturnType(mod);
const ptr_ty = try mod.singleMutPtrType(ret_ty);
// addr_reg will contain the address of where to store the
@@ -4250,7 +4250,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// untouched by the parameter passing code
const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: {
log.debug("airCall: return by reference", .{});
- const ret_ty = fn_ty.fnReturnType();
+ const ret_ty = fn_ty.fnReturnType(mod);
const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod));
const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod));
const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
@@ -4350,7 +4350,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (RegisterManager.indexOfRegIntoTracked(reg) == null) {
// Save function return value into a tracked register
log.debug("airCall: copying {} as it is not tracked", .{reg});
- const new_reg = try self.copyToTmpRegister(fn_ty.fnReturnType(), info.return_value);
+ const new_reg = try self.copyToTmpRegister(fn_ty.fnReturnType(mod), info.return_value);
break :result MCValue{ .register = new_reg };
}
},
@@ -4374,10 +4374,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
}
fn airRet(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
- const ret_ty = self.fn_type.fnReturnType();
- const mod = self.bin_file.options.module.?;
+ const ret_ty = self.fn_type.fnReturnType(mod);
switch (self.ret_mcv) {
.none => {},
@@ -4406,10 +4406,11 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
}
fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const ptr = try self.resolveInst(un_op);
const ptr_ty = self.typeOf(un_op);
- const ret_ty = self.fn_type.fnReturnType();
+ const ret_ty = self.fn_type.fnReturnType(mod);
switch (self.ret_mcv) {
.none => {},
@@ -4429,7 +4430,6 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
// location.
const op_inst = Air.refToIndex(un_op).?;
if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) {
- const mod = self.bin_file.options.module.?;
const abi_size = @intCast(u32, ret_ty.abiSize(mod));
const abi_align = ret_ty.abiAlignment(mod);
@@ -6171,12 +6171,11 @@ const CallMCValues = struct {
/// Caller must call `CallMCValues.deinit`.
fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
- const cc = fn_ty.fnCallingConvention();
- const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen());
- defer self.gpa.free(param_types);
- fn_ty.fnParamTypes(param_types);
+ const mod = self.bin_file.options.module.?;
+ const fn_info = mod.typeToFunc(fn_ty).?;
+ const cc = fn_info.cc;
var result: CallMCValues = .{
- .args = try self.gpa.alloc(MCValue, param_types.len),
+ .args = try self.gpa.alloc(MCValue, fn_info.param_types.len),
// These undefined values must be populated before returning from this function.
.return_value = undefined,
.stack_byte_count = undefined,
@@ -6184,8 +6183,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
};
errdefer self.gpa.free(result.args);
- const ret_ty = fn_ty.fnReturnType();
- const mod = self.bin_file.options.module.?;
+ const ret_ty = fn_ty.fnReturnType(mod);
switch (cc) {
.Naked => {
@@ -6219,11 +6217,11 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
}
}
- for (param_types, 0..) |ty, i| {
- if (ty.abiAlignment(mod) == 8)
+ for (fn_info.param_types, 0..) |ty, i| {
+ if (ty.toType().abiAlignment(mod) == 8)
ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2);
- const param_size = @intCast(u32, ty.abiSize(mod));
+ const param_size = @intCast(u32, ty.toType().abiSize(mod));
if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) {
if (param_size <= 4) {
result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] };
@@ -6235,7 +6233,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
return self.fail("TODO MCValues split between registers and stack", .{});
} else {
ncrn = 4;
- if (ty.abiAlignment(mod) == 8)
+ if (ty.toType().abiAlignment(mod) == 8)
nsaa = std.mem.alignForwardGeneric(u32, nsaa, 8);
result.args[i] = .{ .stack_argument_offset = nsaa };
@@ -6269,10 +6267,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var stack_offset: u32 = 0;
- for (param_types, 0..) |ty, i| {
- if (ty.abiSize(mod) > 0) {
- const param_size = @intCast(u32, ty.abiSize(mod));
- const param_alignment = ty.abiAlignment(mod);
+ for (fn_info.param_types, 0..) |ty, i| {
+ if (ty.toType().abiSize(mod) > 0) {
+ const param_size = @intCast(u32, ty.toType().abiSize(mod));
+ const param_alignment = ty.toType().abiAlignment(mod);
stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment);
result.args[i] = .{ .stack_argument_offset = stack_offset };
src/arch/riscv64/CodeGen.zig
@@ -347,7 +347,8 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
}
fn gen(self: *Self) !void {
- const cc = self.fn_type.fnCallingConvention();
+ const mod = self.bin_file.options.module.?;
+ const cc = self.fn_type.fnCallingConvention(mod);
if (cc != .Naked) {
// TODO Finish function prologue and epilogue for riscv64.
@@ -1803,7 +1804,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
}
fn ret(self: *Self, mcv: MCValue) !void {
- const ret_ty = self.fn_type.fnReturnType();
+ const mod = self.bin_file.options.module.?;
+ const ret_ty = self.fn_type.fnReturnType(mod);
try self.setRegOrMem(ret_ty, self.ret_mcv, mcv);
// Just add space for an instruction, patch this later
const index = try self.addInst(.{
@@ -2621,12 +2623,11 @@ const CallMCValues = struct {
/// Caller must call `CallMCValues.deinit`.
fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
- const cc = fn_ty.fnCallingConvention();
- const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen());
- defer self.gpa.free(param_types);
- fn_ty.fnParamTypes(param_types);
+ const mod = self.bin_file.options.module.?;
+ const fn_info = mod.typeToFunc(fn_ty).?;
+ const cc = fn_info.cc;
var result: CallMCValues = .{
- .args = try self.gpa.alloc(MCValue, param_types.len),
+ .args = try self.gpa.alloc(MCValue, fn_info.param_types.len),
// These undefined values must be populated before returning from this function.
.return_value = undefined,
.stack_byte_count = undefined,
@@ -2634,8 +2635,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
};
errdefer self.gpa.free(result.args);
- const ret_ty = fn_ty.fnReturnType();
- const mod = self.bin_file.options.module.?;
+ const ret_ty = fn_ty.fnReturnType(mod);
switch (cc) {
.Naked => {
@@ -2655,8 +2655,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var next_stack_offset: u32 = 0;
const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 };
- for (param_types, 0..) |ty, i| {
- const param_size = @intCast(u32, ty.abiSize(mod));
+ for (fn_info.param_types, 0..) |ty, i| {
+ const param_size = @intCast(u32, ty.toType().abiSize(mod));
if (param_size <= 8) {
if (next_register < argument_registers.len) {
result.args[i] = .{ .register = argument_registers[next_register] };
src/arch/sparc64/CodeGen.zig
@@ -363,7 +363,8 @@ pub fn generate(
}
fn gen(self: *Self) !void {
- const cc = self.fn_type.fnCallingConvention();
+ const mod = self.bin_file.options.module.?;
+ const cc = self.fn_type.fnCallingConvention(mod);
if (cc != .Naked) {
// TODO Finish function prologue and epilogue for sparc64.
@@ -4458,12 +4459,11 @@ fn realStackOffset(off: u32) u32 {
/// Caller must call `CallMCValues.deinit`.
fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) !CallMCValues {
- const cc = fn_ty.fnCallingConvention();
- const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen());
- defer self.gpa.free(param_types);
- fn_ty.fnParamTypes(param_types);
+ const mod = self.bin_file.options.module.?;
+ const fn_info = mod.typeToFunc(fn_ty).?;
+ const cc = fn_info.cc;
var result: CallMCValues = .{
- .args = try self.gpa.alloc(MCValue, param_types.len),
+ .args = try self.gpa.alloc(MCValue, fn_info.param_types.len),
// These undefined values must be populated before returning from this function.
.return_value = undefined,
.stack_byte_count = undefined,
@@ -4471,8 +4471,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
};
errdefer self.gpa.free(result.args);
- const ret_ty = fn_ty.fnReturnType();
- const mod = self.bin_file.options.module.?;
+ const ret_ty = fn_ty.fnReturnType(mod);
switch (cc) {
.Naked => {
@@ -4495,8 +4494,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
.callee => abi.c_abi_int_param_regs_callee_view,
};
- for (param_types, 0..) |ty, i| {
- const param_size = @intCast(u32, ty.abiSize(mod));
+ for (fn_info.param_types, 0..) |ty, i| {
+ const param_size = @intCast(u32, ty.toType().abiSize(mod));
if (param_size <= 8) {
if (next_register < argument_registers.len) {
result.args[i] = .{ .register = argument_registers[next_register] };
@@ -4580,7 +4579,8 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
}
fn ret(self: *Self, mcv: MCValue) !void {
- const ret_ty = self.fn_type.fnReturnType();
+ const mod = self.bin_file.options.module.?;
+ const ret_ty = self.fn_type.fnReturnType(mod);
try self.setRegOrMem(ret_ty, self.ret_mcv, mcv);
// Just add space for a branch instruction, patch this later
src/arch/wasm/CodeGen.zig
@@ -1145,7 +1145,7 @@ fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
fn genFunctype(
gpa: Allocator,
cc: std.builtin.CallingConvention,
- params: []const Type,
+ params: []const InternPool.Index,
return_type: Type,
mod: *Module,
) !wasm.Type {
@@ -1170,7 +1170,8 @@ fn genFunctype(
}
// param types
- for (params) |param_type| {
+ for (params) |param_type_ip| {
+ const param_type = param_type_ip.toType();
if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
switch (cc) {
@@ -1234,9 +1235,9 @@ pub fn generate(
}
fn genFunc(func: *CodeGen) InnerError!void {
- const fn_info = func.decl.ty.fnInfo();
const mod = func.bin_file.base.options.module.?;
- var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, mod);
+ const fn_info = mod.typeToFunc(func.decl.ty).?;
+ var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type.toType(), mod);
defer func_type.deinit(func.gpa);
_ = try func.bin_file.storeDeclType(func.decl_index, func_type);
@@ -1345,10 +1346,8 @@ const CallWValues = struct {
fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues {
const mod = func.bin_file.base.options.module.?;
- const cc = fn_ty.fnCallingConvention();
- const param_types = try func.gpa.alloc(Type, fn_ty.fnParamLen());
- defer func.gpa.free(param_types);
- fn_ty.fnParamTypes(param_types);
+ const fn_info = mod.typeToFunc(fn_ty).?;
+ const cc = fn_info.cc;
var result: CallWValues = .{
.args = &.{},
.return_value = .none,
@@ -1360,8 +1359,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
// Check if we store the result as a pointer to the stack rather than
// by value
- const fn_info = fn_ty.fnInfo();
- if (firstParamSRet(fn_info.cc, fn_info.return_type, mod)) {
+ if (firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) {
// the sret arg will be passed as first argument, therefore we
// set the `return_value` before allocating locals for regular args.
result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } };
@@ -1370,8 +1368,8 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
switch (cc) {
.Unspecified => {
- for (param_types) |ty| {
- if (!ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ for (fn_info.param_types) |ty| {
+ if (!ty.toType().hasRuntimeBitsIgnoreComptime(mod)) {
continue;
}
@@ -1380,8 +1378,8 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
}
},
.C => {
- for (param_types) |ty| {
- const ty_classes = abi.classifyType(ty, mod);
+ for (fn_info.param_types) |ty| {
+ const ty_classes = abi.classifyType(ty.toType(), mod);
for (ty_classes) |class| {
if (class == .none) continue;
try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } });
@@ -2095,11 +2093,11 @@ fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
}
fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const un_op = func.air.instructions.items(.data)[inst].un_op;
const operand = try func.resolveInst(un_op);
- const fn_info = func.decl.ty.fnInfo();
- const ret_ty = fn_info.return_type;
- const mod = func.bin_file.base.options.module.?;
+ const fn_info = mod.typeToFunc(func.decl.ty).?;
+ const ret_ty = fn_info.return_type.toType();
// result must be stored in the stack and we return a pointer
// to the stack instead
@@ -2146,8 +2144,8 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result try func.allocStack(Type.usize); // create pointer to void
}
- const fn_info = func.decl.ty.fnInfo();
- if (firstParamSRet(fn_info.cc, fn_info.return_type, mod)) {
+ const fn_info = mod.typeToFunc(func.decl.ty).?;
+ if (firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) {
break :result func.return_value;
}
@@ -2163,12 +2161,12 @@ fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(un_op);
const ret_ty = func.typeOf(un_op).childType(mod);
- const fn_info = func.decl.ty.fnInfo();
+ const fn_info = mod.typeToFunc(func.decl.ty).?;
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
if (ret_ty.isError(mod)) {
try func.addImm32(0);
}
- } else if (!firstParamSRet(fn_info.cc, fn_info.return_type, mod)) {
+ } else if (!firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) {
// leave on the stack
_ = try func.load(operand, ret_ty, 0);
}
@@ -2191,9 +2189,9 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
.Pointer => ty.childType(mod),
else => unreachable,
};
- const ret_ty = fn_ty.fnReturnType();
- const fn_info = fn_ty.fnInfo();
- const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, mod);
+ const ret_ty = fn_ty.fnReturnType(mod);
+ const fn_info = mod.typeToFunc(fn_ty).?;
+ const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod);
const callee: ?Decl.Index = blk: {
const func_val = (try func.air.value(pl_op.operand, mod)) orelse break :blk null;
@@ -2203,8 +2201,8 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
break :blk function.data.owner_decl;
} else if (func_val.castTag(.extern_fn)) |extern_fn| {
const ext_decl = mod.declPtr(extern_fn.data.owner_decl);
- const ext_info = ext_decl.ty.fnInfo();
- var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, mod);
+ const ext_info = mod.typeToFunc(ext_decl.ty).?;
+ var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type.toType(), mod);
defer func_type.deinit(func.gpa);
const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl);
const atom = func.bin_file.getAtomPtr(atom_index);
@@ -2235,7 +2233,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const arg_ty = func.typeOf(arg);
if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- try func.lowerArg(fn_ty.fnInfo().cc, arg_ty, arg_val);
+ try func.lowerArg(mod.typeToFunc(fn_ty).?.cc, arg_ty, arg_val);
}
if (callee) |direct| {
@@ -2248,7 +2246,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const operand = try func.resolveInst(pl_op.operand);
try func.emitWValue(operand);
- var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, mod);
+ var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type.toType(), mod);
defer fn_type.deinit(func.gpa);
const fn_type_index = try func.bin_file.putOrGetFuncType(fn_type);
@@ -2264,7 +2262,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
} else if (first_param_sret) {
break :result_value sret;
// TODO: Make this less fragile and optimize
- } else if (fn_ty.fnInfo().cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) {
+ } else if (mod.typeToFunc(fn_ty).?.cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) {
const result_local = try func.allocLocal(ret_ty);
try func.addLabel(.local_set, result_local.local.value);
const scalar_type = abi.scalarType(ret_ty, mod);
@@ -2528,7 +2526,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mod = func.bin_file.base.options.module.?;
const arg_index = func.arg_index;
const arg = func.args[arg_index];
- const cc = func.decl.ty.fnInfo().cc;
+ const cc = mod.typeToFunc(func.decl.ty).?.cc;
const arg_ty = func.typeOfIndex(inst);
if (cc == .C) {
const arg_classes = abi.classifyType(arg_ty, mod);
@@ -2647,9 +2645,9 @@ fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) Inner
}
switch (op) {
- .mul => return func.callIntrinsic("__multi3", &.{ ty, ty }, ty, &.{ lhs, rhs }),
- .shr => return func.callIntrinsic("__lshrti3", &.{ ty, Type.i32 }, ty, &.{ lhs, rhs }),
- .shl => return func.callIntrinsic("__ashlti3", &.{ ty, Type.i32 }, ty, &.{ lhs, rhs }),
+ .mul => return func.callIntrinsic("__multi3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
+ .shr => return func.callIntrinsic("__lshrti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }),
+ .shl => return func.callIntrinsic("__ashlti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }),
.xor => {
const result = try func.allocStack(ty);
try func.emitWValue(result);
@@ -2839,7 +2837,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In
};
// fma requires three operands
- var param_types_buffer: [3]Type = .{ ty, ty, ty };
+ var param_types_buffer: [3]InternPool.Index = .{ ty.ip_index, ty.ip_index, ty.ip_index };
const param_types = param_types_buffer[0..args.len];
return func.callIntrinsic(fn_name, param_types, ty, args);
}
@@ -5298,7 +5296,7 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!
// call __extendhfsf2(f16) f32
const f32_result = try func.callIntrinsic(
"__extendhfsf2",
- &.{Type.f16},
+ &.{.f16_type},
Type.f32,
&.{operand},
);
@@ -5316,7 +5314,7 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!
target_util.compilerRtFloatAbbrev(wanted_bits),
}) catch unreachable;
- return func.callIntrinsic(fn_name, &.{given}, wanted, &.{operand});
+ return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand});
}
fn airFptrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
@@ -5347,7 +5345,7 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
} else operand;
// call __truncsfhf2(f32) f16
- return func.callIntrinsic("__truncsfhf2", &.{Type.f32}, Type.f16, &.{op});
+ return func.callIntrinsic("__truncsfhf2", &.{.f32_type}, Type.f16, &.{op});
}
var fn_name_buf: [12]u8 = undefined;
@@ -5356,7 +5354,7 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
target_util.compilerRtFloatAbbrev(wanted_bits),
}) catch unreachable;
- return func.callIntrinsic(fn_name, &.{given}, wanted, &.{operand});
+ return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand});
}
fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
@@ -5842,7 +5840,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = try func.callIntrinsic(
"__multi3",
- &[_]Type{Type.i64} ** 4,
+ &[_]InternPool.Index{.i64_type} ** 4,
Type.i128,
&.{ lhs, lhs_shifted, rhs, rhs_shifted },
);
@@ -5866,19 +5864,19 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mul1 = try func.callIntrinsic(
"__multi3",
- &[_]Type{Type.i64} ** 4,
+ &[_]InternPool.Index{.i64_type} ** 4,
Type.i128,
&.{ lhs_lsb, zero, rhs_msb, zero },
);
const mul2 = try func.callIntrinsic(
"__multi3",
- &[_]Type{Type.i64} ** 4,
+ &[_]InternPool.Index{.i64_type} ** 4,
Type.i128,
&.{ rhs_lsb, zero, lhs_msb, zero },
);
const mul3 = try func.callIntrinsic(
"__multi3",
- &[_]Type{Type.i64} ** 4,
+ &[_]InternPool.Index{.i64_type} ** 4,
Type.i128,
&.{ lhs_msb, zero, rhs_msb, zero },
);
@@ -5977,7 +5975,7 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// call to compiler-rt `fn fmaf(f32, f32, f32) f32`
var result = try func.callIntrinsic(
"fmaf",
- &.{ Type.f32, Type.f32, Type.f32 },
+ &.{ .f32_type, .f32_type, .f32_type },
Type.f32,
&.{ rhs_ext, lhs_ext, addend_ext },
);
@@ -6707,7 +6705,7 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn callIntrinsic(
func: *CodeGen,
name: []const u8,
- param_types: []const Type,
+ param_types: []const InternPool.Index,
return_type: Type,
args: []const WValue,
) InnerError!WValue {
@@ -6735,8 +6733,8 @@ fn callIntrinsic(
// Lower all arguments to the stack before we call our function
for (args, 0..) |arg, arg_i| {
assert(!(want_sret_param and arg == .stack));
- assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime(mod));
- try func.lowerArg(.C, param_types[arg_i], arg);
+ assert(param_types[arg_i].toType().hasRuntimeBitsIgnoreComptime(mod));
+ try func.lowerArg(.C, param_types[arg_i].toType(), arg);
}
// Actually call our intrinsic
@@ -6938,7 +6936,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
try writer.writeByte(std.wasm.opcode(.end));
const slice_ty = Type.const_slice_u8_sentinel_0;
- const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, mod);
+ const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, mod);
return func.bin_file.createFunction(func_name, func_type, &body_list, &relocs);
}
src/arch/x86_64/CodeGen.zig
@@ -26,6 +26,7 @@ const Liveness = @import("../../Liveness.zig");
const Lower = @import("Lower.zig");
const Mir = @import("Mir.zig");
const Module = @import("../../Module.zig");
+const InternPool = @import("../../InternPool.zig");
const Target = std.Target;
const Type = @import("../../type.zig").Type;
const TypedValue = @import("../../TypedValue.zig");
@@ -697,7 +698,8 @@ pub fn generate(
FrameAlloc.init(.{ .size = 0, .alignment = 1 }),
);
- var call_info = function.resolveCallingConventionValues(fn_type, &.{}, .args_frame) catch |err| switch (err) {
+ const fn_info = mod.typeToFunc(fn_type).?;
+ var call_info = function.resolveCallingConventionValues(fn_info, &.{}, .args_frame) catch |err| switch (err) {
error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(
@@ -1566,7 +1568,7 @@ fn asmMemoryRegisterImmediate(
fn gen(self: *Self) InnerError!void {
const mod = self.bin_file.options.module.?;
- const cc = self.fn_type.fnCallingConvention();
+ const cc = self.fn_type.fnCallingConvention(mod);
if (cc != .Naked) {
try self.asmRegister(.{ ._, .push }, .rbp);
const backpatch_push_callee_preserved_regs = try self.asmPlaceholder();
@@ -8042,7 +8044,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
else => unreachable,
};
- var info = try self.resolveCallingConventionValues(fn_ty, args[fn_ty.fnParamLen()..], .call_frame);
+ const fn_info = mod.typeToFunc(fn_ty).?;
+
+ var info = try self.resolveCallingConventionValues(fn_info, args[fn_info.param_types.len..], .call_frame);
defer info.deinit(self);
// We need a properly aligned and sized call frame to be able to call this function.
@@ -8083,7 +8087,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const ret_lock = switch (info.return_value.long) {
.none, .unreach => null,
.indirect => |reg_off| lock: {
- const ret_ty = fn_ty.fnReturnType();
+ const ret_ty = fn_info.return_type.toType();
const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ret_ty, mod));
try self.genSetReg(reg_off.reg, Type.usize, .{
.lea_frame = .{ .index = frame_index, .off = -reg_off.off },
@@ -8199,9 +8203,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
}
fn airRet(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
- const ret_ty = self.fn_type.fnReturnType();
+ const ret_ty = self.fn_type.fnReturnType(mod);
switch (self.ret_mcv.short) {
.none => {},
.register => try self.genCopy(ret_ty, self.ret_mcv.short, operand),
@@ -11683,18 +11688,23 @@ const CallMCValues = struct {
/// Caller must call `CallMCValues.deinit`.
fn resolveCallingConventionValues(
self: *Self,
- fn_ty: Type,
+ fn_info: InternPool.Key.FuncType,
var_args: []const Air.Inst.Ref,
stack_frame_base: FrameIndex,
) !CallMCValues {
const mod = self.bin_file.options.module.?;
- const cc = fn_ty.fnCallingConvention();
- const param_len = fn_ty.fnParamLen();
- const param_types = try self.gpa.alloc(Type, param_len + var_args.len);
+ const cc = fn_info.cc;
+ const param_types = try self.gpa.alloc(Type, fn_info.param_types.len + var_args.len);
defer self.gpa.free(param_types);
- fn_ty.fnParamTypes(param_types);
+
+ for (param_types[0..fn_info.param_types.len], fn_info.param_types) |*dest, src| {
+ dest.* = src.toType();
+ }
// TODO: promote var arg types
- for (param_types[param_len..], var_args) |*param_ty, arg| param_ty.* = self.typeOf(arg);
+ for (param_types[fn_info.param_types.len..], var_args) |*param_ty, arg| {
+ param_ty.* = self.typeOf(arg);
+ }
+
var result: CallMCValues = .{
.args = try self.gpa.alloc(MCValue, param_types.len),
// These undefined values must be populated before returning from this function.
@@ -11704,7 +11714,7 @@ fn resolveCallingConventionValues(
};
errdefer self.gpa.free(result.args);
- const ret_ty = fn_ty.fnReturnType();
+ const ret_ty = fn_info.return_type.toType();
switch (cc) {
.Naked => {
src/codegen/c/type.zig
@@ -1720,7 +1720,7 @@ pub const CType = extern union {
.Opaque => self.init(.void),
.Fn => {
- const info = ty.fnInfo();
+ const info = mod.typeToFunc(ty).?;
if (!info.is_generic) {
if (lookup.isMutable()) {
const param_kind: Kind = switch (kind) {
@@ -1728,10 +1728,10 @@ pub const CType = extern union {
.complete, .parameter, .global => .parameter,
.payload => unreachable,
};
- _ = try lookup.typeToIndex(info.return_type, param_kind);
+ _ = try lookup.typeToIndex(info.return_type.toType(), param_kind);
for (info.param_types) |param_type| {
- if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
- _ = try lookup.typeToIndex(param_type, param_kind);
+ if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
+ _ = try lookup.typeToIndex(param_type.toType(), param_kind);
}
}
self.init(if (info.is_var_args) .varargs_function else .function);
@@ -2013,7 +2013,7 @@ pub const CType = extern union {
.function,
.varargs_function,
=> {
- const info = ty.fnInfo();
+ const info = mod.typeToFunc(ty).?;
assert(!info.is_generic);
const param_kind: Kind = switch (kind) {
.forward, .forward_parameter => .forward_parameter,
@@ -2023,21 +2023,21 @@ pub const CType = extern union {
var c_params_len: usize = 0;
for (info.param_types) |param_type| {
- if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
c_params_len += 1;
}
const params_pl = try arena.alloc(Index, c_params_len);
var c_param_i: usize = 0;
for (info.param_types) |param_type| {
- if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
- params_pl[c_param_i] = store.set.typeToIndex(param_type, mod, param_kind).?;
+ if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
+ params_pl[c_param_i] = store.set.typeToIndex(param_type.toType(), mod, param_kind).?;
c_param_i += 1;
}
const fn_pl = try arena.create(Payload.Function);
fn_pl.* = .{ .base = .{ .tag = t }, .data = .{
- .return_type = store.set.typeToIndex(info.return_type, mod, param_kind).?,
+ .return_type = store.set.typeToIndex(info.return_type.toType(), mod, param_kind).?,
.param_types = params_pl,
} };
return initPayload(fn_pl);
@@ -2145,7 +2145,7 @@ pub const CType = extern union {
=> {
if (ty.zigTypeTag(mod) != .Fn) return false;
- const info = ty.fnInfo();
+ const info = mod.typeToFunc(ty).?;
assert(!info.is_generic);
const data = cty.cast(Payload.Function).?.data;
const param_kind: Kind = switch (self.kind) {
@@ -2154,18 +2154,18 @@ pub const CType = extern union {
.payload => unreachable,
};
- if (!self.eqlRecurse(info.return_type, data.return_type, param_kind))
+ if (!self.eqlRecurse(info.return_type.toType(), data.return_type, param_kind))
return false;
var c_param_i: usize = 0;
for (info.param_types) |param_type| {
- if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
if (c_param_i >= data.param_types.len) return false;
const param_cty = data.param_types[c_param_i];
c_param_i += 1;
- if (!self.eqlRecurse(param_type, param_cty, param_kind))
+ if (!self.eqlRecurse(param_type.toType(), param_cty, param_kind))
return false;
}
return c_param_i == data.param_types.len;
@@ -2258,7 +2258,7 @@ pub const CType = extern union {
.function,
.varargs_function,
=> {
- const info = ty.fnInfo();
+ const info = mod.typeToFunc(ty).?;
assert(!info.is_generic);
const param_kind: Kind = switch (self.kind) {
.forward, .forward_parameter => .forward_parameter,
@@ -2266,10 +2266,10 @@ pub const CType = extern union {
.payload => unreachable,
};
- self.updateHasherRecurse(hasher, info.return_type, param_kind);
+ self.updateHasherRecurse(hasher, info.return_type.toType(), param_kind);
for (info.param_types) |param_type| {
- if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
- self.updateHasherRecurse(hasher, param_type, param_kind);
+ if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
+ self.updateHasherRecurse(hasher, param_type.toType(), param_kind);
}
},
src/codegen/c.zig
@@ -1507,7 +1507,7 @@ pub const DeclGen = struct {
const fn_decl = mod.declPtr(fn_decl_index);
const fn_cty_idx = try dg.typeToIndex(fn_decl.ty, kind);
- const fn_info = fn_decl.ty.fnInfo();
+ const fn_info = mod.typeToFunc(fn_decl.ty).?;
if (fn_info.cc == .Naked) {
switch (kind) {
.forward => try w.writeAll("zig_naked_decl "),
@@ -1517,7 +1517,7 @@ pub const DeclGen = struct {
}
if (fn_decl.val.castTag(.function)) |func_payload|
if (func_payload.data.is_cold) try w.writeAll("zig_cold ");
- if (fn_info.return_type.ip_index == .noreturn_type) try w.writeAll("zig_noreturn ");
+ if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn ");
const trailing = try renderTypePrefix(
dg.decl_index,
@@ -3455,7 +3455,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
} else {
try reap(f, inst, &.{un_op});
// Not even allowed to return void in a naked function.
- if (if (f.object.dg.decl) |decl| decl.ty.fnCallingConvention() != .Naked else true)
+ if (if (f.object.dg.decl) |decl| decl.ty.fnCallingConvention(mod) != .Naked else true)
try writer.writeAll("return;\n");
}
return .none;
@@ -4094,7 +4094,7 @@ fn airCall(
) !CValue {
const mod = f.object.dg.module;
// Not even allowed to call panic in a naked function.
- if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none;
+ if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention(mod) == .Naked) return .none;
const gpa = f.object.dg.gpa;
const writer = f.object.writer();
@@ -4143,7 +4143,7 @@ fn airCall(
else => unreachable,
};
- const ret_ty = fn_ty.fnReturnType();
+ const ret_ty = fn_ty.fnReturnType(mod);
const lowered_ret_ty = try lowerFnRetTy(ret_ty, mod);
const result_local = result: {
@@ -4622,8 +4622,9 @@ fn airFence(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airUnreach(f: *Function) !CValue {
+ const mod = f.object.dg.module;
// Not even allowed to call unreachable in a naked function.
- if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none;
+ if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention(mod) == .Naked) return .none;
try f.object.writer().writeAll("zig_unreachable();\n");
return .none;
src/codegen/llvm.zig
@@ -954,17 +954,17 @@ pub const Object = struct {
builder.positionBuilderAtEnd(entry_block);
// This gets the LLVM values from the function and stores them in `dg.args`.
- const fn_info = decl.ty.fnInfo();
+ const fn_info = mod.typeToFunc(decl.ty).?;
const sret = firstParamSRet(fn_info, mod);
const ret_ptr = if (sret) llvm_func.getParam(0) else null;
const gpa = dg.gpa;
- if (ccAbiPromoteInt(fn_info.cc, mod, fn_info.return_type)) |s| switch (s) {
+ if (ccAbiPromoteInt(fn_info.cc, mod, fn_info.return_type.toType())) |s| switch (s) {
.signed => dg.addAttr(llvm_func, 0, "signext"),
.unsigned => dg.addAttr(llvm_func, 0, "zeroext"),
};
- const err_return_tracing = fn_info.return_type.isError(mod) and
+ const err_return_tracing = fn_info.return_type.toType().isError(mod) and
mod.comp.bin_file.options.error_return_tracing;
const err_ret_trace = if (err_return_tracing)
@@ -986,7 +986,7 @@ pub const Object = struct {
.byval => {
assert(!it.byval_attr);
const param_index = it.zig_index - 1;
- const param_ty = fn_info.param_types[param_index];
+ const param_ty = fn_info.param_types[param_index].toType();
const param = llvm_func.getParam(llvm_arg_i);
try args.ensureUnusedCapacity(1);
@@ -1005,7 +1005,7 @@ pub const Object = struct {
llvm_arg_i += 1;
},
.byref => {
- const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
const param_llvm_ty = try dg.lowerType(param_ty);
const param = llvm_func.getParam(llvm_arg_i);
const alignment = param_ty.abiAlignment(mod);
@@ -1024,7 +1024,7 @@ pub const Object = struct {
}
},
.byref_mut => {
- const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
const param_llvm_ty = try dg.lowerType(param_ty);
const param = llvm_func.getParam(llvm_arg_i);
const alignment = param_ty.abiAlignment(mod);
@@ -1044,7 +1044,7 @@ pub const Object = struct {
},
.abi_sized_int => {
assert(!it.byval_attr);
- const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
const param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
@@ -1071,7 +1071,7 @@ pub const Object = struct {
},
.slice => {
assert(!it.byval_attr);
- const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
const ptr_info = param_ty.ptrInfo(mod);
if (math.cast(u5, it.zig_index - 1)) |i| {
@@ -1104,7 +1104,7 @@ pub const Object = struct {
.multiple_llvm_types => {
assert(!it.byval_attr);
const field_types = it.llvm_types_buffer[0..it.llvm_types_len];
- const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
const param_llvm_ty = try dg.lowerType(param_ty);
const param_alignment = param_ty.abiAlignment(mod);
const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target);
@@ -1135,7 +1135,7 @@ pub const Object = struct {
args.appendAssumeCapacity(casted);
},
.float_array => {
- const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
const param_llvm_ty = try dg.lowerType(param_ty);
const param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
@@ -1153,7 +1153,7 @@ pub const Object = struct {
}
},
.i32_array, .i64_array => {
- const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
const param_llvm_ty = try dg.lowerType(param_ty);
const param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
@@ -1182,7 +1182,7 @@ pub const Object = struct {
const line_number = decl.src_line + 1;
const is_internal_linkage = decl.val.tag() != .extern_fn and
!mod.decl_exports.contains(decl_index);
- const noret_bit: c_uint = if (fn_info.return_type.isNoReturn())
+ const noret_bit: c_uint = if (fn_info.return_type == .noreturn_type)
llvm.DIFlags.NoReturn
else
0;
@@ -2331,26 +2331,26 @@ pub const Object = struct {
return full_di_ty;
},
.Fn => {
- const fn_info = ty.fnInfo();
+ const fn_info = mod.typeToFunc(ty).?;
var param_di_types = std.ArrayList(*llvm.DIType).init(gpa);
defer param_di_types.deinit();
// Return type goes first.
- if (fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (fn_info.return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) {
const sret = firstParamSRet(fn_info, mod);
- const di_ret_ty = if (sret) Type.void else fn_info.return_type;
+ const di_ret_ty = if (sret) Type.void else fn_info.return_type.toType();
try param_di_types.append(try o.lowerDebugType(di_ret_ty, .full));
if (sret) {
- const ptr_ty = try mod.singleMutPtrType(fn_info.return_type);
+ const ptr_ty = try mod.singleMutPtrType(fn_info.return_type.toType());
try param_di_types.append(try o.lowerDebugType(ptr_ty, .full));
}
} else {
try param_di_types.append(try o.lowerDebugType(Type.void, .full));
}
- if (fn_info.return_type.isError(mod) and
+ if (fn_info.return_type.toType().isError(mod) and
o.module.comp.bin_file.options.error_return_tracing)
{
const ptr_ty = try mod.singleMutPtrType(o.getStackTraceType());
@@ -2358,13 +2358,13 @@ pub const Object = struct {
}
for (fn_info.param_types) |param_ty| {
- if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!param_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
- if (isByRef(param_ty, mod)) {
- const ptr_ty = try mod.singleMutPtrType(param_ty);
+ if (isByRef(param_ty.toType(), mod)) {
+ const ptr_ty = try mod.singleMutPtrType(param_ty.toType());
try param_di_types.append(try o.lowerDebugType(ptr_ty, .full));
} else {
- try param_di_types.append(try o.lowerDebugType(param_ty, .full));
+ try param_di_types.append(try o.lowerDebugType(param_ty.toType(), .full));
}
}
@@ -2565,7 +2565,7 @@ pub const DeclGen = struct {
if (gop.found_existing) return gop.value_ptr.*;
assert(decl.has_tv);
- const fn_info = zig_fn_type.fnInfo();
+ const fn_info = mod.typeToFunc(zig_fn_type).?;
const target = mod.getTarget();
const sret = firstParamSRet(fn_info, mod);
@@ -2598,11 +2598,11 @@ pub const DeclGen = struct {
dg.addArgAttr(llvm_fn, 0, "nonnull"); // Sret pointers must not be address 0
dg.addArgAttr(llvm_fn, 0, "noalias");
- const raw_llvm_ret_ty = try dg.lowerType(fn_info.return_type);
+ const raw_llvm_ret_ty = try dg.lowerType(fn_info.return_type.toType());
llvm_fn.addSretAttr(raw_llvm_ret_ty);
}
- const err_return_tracing = fn_info.return_type.isError(mod) and
+ const err_return_tracing = fn_info.return_type.toType().isError(mod) and
mod.comp.bin_file.options.error_return_tracing;
if (err_return_tracing) {
@@ -2626,13 +2626,13 @@ pub const DeclGen = struct {
}
if (fn_info.alignment != 0) {
- llvm_fn.setAlignment(fn_info.alignment);
+ llvm_fn.setAlignment(@intCast(c_uint, fn_info.alignment));
}
// Function attributes that are independent of analysis results of the function body.
dg.addCommonFnAttributes(llvm_fn);
- if (fn_info.return_type.isNoReturn()) {
+ if (fn_info.return_type == .noreturn_type) {
dg.addFnAttr(llvm_fn, "noreturn");
}
@@ -2645,15 +2645,15 @@ pub const DeclGen = struct {
while (it.next()) |lowering| switch (lowering) {
.byval => {
const param_index = it.zig_index - 1;
- const param_ty = fn_info.param_types[param_index];
+ const param_ty = fn_info.param_types[param_index].toType();
if (!isByRef(param_ty, mod)) {
dg.addByValParamAttrs(llvm_fn, param_ty, param_index, fn_info, it.llvm_index - 1);
}
},
.byref => {
const param_ty = fn_info.param_types[it.zig_index - 1];
- const param_llvm_ty = try dg.lowerType(param_ty);
- const alignment = param_ty.abiAlignment(mod);
+ const param_llvm_ty = try dg.lowerType(param_ty.toType());
+ const alignment = param_ty.toType().abiAlignment(mod);
dg.addByRefParamAttrs(llvm_fn, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
},
.byref_mut => {
@@ -3142,7 +3142,7 @@ pub const DeclGen = struct {
fn lowerTypeFn(dg: *DeclGen, fn_ty: Type) Allocator.Error!*llvm.Type {
const mod = dg.module;
- const fn_info = fn_ty.fnInfo();
+ const fn_info = mod.typeToFunc(fn_ty).?;
const llvm_ret_ty = try lowerFnRetTy(dg, fn_info);
var llvm_params = std.ArrayList(*llvm.Type).init(dg.gpa);
@@ -3152,7 +3152,7 @@ pub const DeclGen = struct {
try llvm_params.append(dg.context.pointerType(0));
}
- if (fn_info.return_type.isError(mod) and
+ if (fn_info.return_type.toType().isError(mod) and
mod.comp.bin_file.options.error_return_tracing)
{
const ptr_ty = try mod.singleMutPtrType(dg.object.getStackTraceType());
@@ -3163,19 +3163,19 @@ pub const DeclGen = struct {
while (it.next()) |lowering| switch (lowering) {
.no_bits => continue,
.byval => {
- const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
try llvm_params.append(try dg.lowerType(param_ty));
},
.byref, .byref_mut => {
try llvm_params.append(dg.context.pointerType(0));
},
.abi_sized_int => {
- const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
const abi_size = @intCast(c_uint, param_ty.abiSize(mod));
try llvm_params.append(dg.context.intType(abi_size * 8));
},
.slice => {
- const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional)
param_ty.optionalChild(mod).slicePtrFieldType(&buf, mod)
@@ -3195,7 +3195,7 @@ pub const DeclGen = struct {
try llvm_params.append(dg.context.intType(16));
},
.float_array => |count| {
- const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
const float_ty = try dg.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?);
const field_count = @intCast(c_uint, count);
const arr_ty = float_ty.arrayType(field_count);
@@ -3223,7 +3223,7 @@ pub const DeclGen = struct {
const mod = dg.module;
const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) {
.Opaque => true,
- .Fn => !elem_ty.fnInfo().is_generic,
+ .Fn => !mod.typeToFunc(elem_ty).?.is_generic,
.Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod),
else => elem_ty.hasRuntimeBitsIgnoreComptime(mod),
};
@@ -4204,7 +4204,7 @@ pub const DeclGen = struct {
const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn;
if ((!is_fn_body and !decl.ty.hasRuntimeBits(mod)) or
- (is_fn_body and decl.ty.fnInfo().is_generic))
+ (is_fn_body and mod.typeToFunc(decl.ty).?.is_generic))
{
return self.lowerPtrToVoid(tv.ty);
}
@@ -4354,7 +4354,7 @@ pub const DeclGen = struct {
llvm_fn: *llvm.Value,
param_ty: Type,
param_index: u32,
- fn_info: Type.Payload.Function.Data,
+ fn_info: InternPool.Key.FuncType,
llvm_arg_i: u32,
) void {
const mod = dg.module;
@@ -4774,8 +4774,8 @@ pub const FuncGen = struct {
.Pointer => callee_ty.childType(mod),
else => unreachable,
};
- const fn_info = zig_fn_ty.fnInfo();
- const return_type = fn_info.return_type;
+ const fn_info = mod.typeToFunc(zig_fn_ty).?;
+ const return_type = fn_info.return_type.toType();
const llvm_fn = try self.resolveInst(pl_op.operand);
const target = mod.getTarget();
const sret = firstParamSRet(fn_info, mod);
@@ -4790,7 +4790,7 @@ pub const FuncGen = struct {
break :blk ret_ptr;
};
- const err_return_tracing = fn_info.return_type.isError(mod) and
+ const err_return_tracing = return_type.isError(mod) and
self.dg.module.comp.bin_file.options.error_return_tracing;
if (err_return_tracing) {
try llvm_args.append(self.err_ret_trace.?);
@@ -4971,14 +4971,14 @@ pub const FuncGen = struct {
while (it.next()) |lowering| switch (lowering) {
.byval => {
const param_index = it.zig_index - 1;
- const param_ty = fn_info.param_types[param_index];
+ const param_ty = fn_info.param_types[param_index].toType();
if (!isByRef(param_ty, mod)) {
self.dg.addByValParamAttrs(call, param_ty, param_index, fn_info, it.llvm_index - 1);
}
},
.byref => {
const param_index = it.zig_index - 1;
- const param_ty = fn_info.param_types[param_index];
+ const param_ty = fn_info.param_types[param_index].toType();
const param_llvm_ty = try self.dg.lowerType(param_ty);
const alignment = param_ty.abiAlignment(mod);
self.dg.addByRefParamAttrs(call, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
@@ -4998,7 +4998,7 @@ pub const FuncGen = struct {
.slice => {
assert(!it.byval_attr);
- const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
const ptr_info = param_ty.ptrInfo(mod);
const llvm_arg_i = it.llvm_index - 2;
@@ -5023,7 +5023,7 @@ pub const FuncGen = struct {
};
}
- if (return_type.isNoReturn() and attr != .AlwaysTail) {
+ if (fn_info.return_type == .noreturn_type and attr != .AlwaysTail) {
return null;
}
@@ -5088,9 +5088,9 @@ pub const FuncGen = struct {
_ = self.builder.buildRetVoid();
return null;
}
- const fn_info = self.dg.decl.ty.fnInfo();
+ const fn_info = mod.typeToFunc(self.dg.decl.ty).?;
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- if (fn_info.return_type.isError(mod)) {
+ if (fn_info.return_type.toType().isError(mod)) {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
// to functions that return anyerror.
@@ -5135,9 +5135,9 @@ pub const FuncGen = struct {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const ptr_ty = self.typeOf(un_op);
const ret_ty = ptr_ty.childType(mod);
- const fn_info = self.dg.decl.ty.fnInfo();
+ const fn_info = mod.typeToFunc(self.dg.decl.ty).?;
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- if (fn_info.return_type.isError(mod)) {
+ if (fn_info.return_type.toType().isError(mod)) {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
// to functions that return anyerror.
@@ -6148,25 +6148,21 @@ pub const FuncGen = struct {
defer self.gpa.free(fqn);
const is_internal_linkage = !mod.decl_exports.contains(decl_index);
- var fn_ty_pl: Type.Payload.Function = .{
- .base = .{ .tag = .function },
- .data = .{
- .param_types = &.{},
- .comptime_params = undefined,
- .return_type = Type.void,
- .alignment = 0,
- .noalias_bits = 0,
- .cc = .Unspecified,
- .is_var_args = false,
- .is_generic = false,
- .is_noinline = false,
- .align_is_generic = false,
- .cc_is_generic = false,
- .section_is_generic = false,
- .addrspace_is_generic = false,
- },
- };
- const fn_ty = Type.initPayload(&fn_ty_pl.base);
+ const fn_ty = try mod.funcType(.{
+ .param_types = &.{},
+ .return_type = .void_type,
+ .alignment = 0,
+ .noalias_bits = 0,
+ .comptime_bits = 0,
+ .cc = .Unspecified,
+ .is_var_args = false,
+ .is_generic = false,
+ .is_noinline = false,
+ .align_is_generic = false,
+ .cc_is_generic = false,
+ .section_is_generic = false,
+ .addrspace_is_generic = false,
+ });
const subprogram = dib.createFunction(
di_file.toScope(),
decl.name,
@@ -10546,31 +10542,31 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField {
}
}
-fn firstParamSRet(fn_info: Type.Payload.Function.Data, mod: *Module) bool {
- if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) return false;
+fn firstParamSRet(fn_info: InternPool.Key.FuncType, mod: *Module) bool {
+ if (!fn_info.return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) return false;
const target = mod.getTarget();
switch (fn_info.cc) {
- .Unspecified, .Inline => return isByRef(fn_info.return_type, mod),
+ .Unspecified, .Inline => return isByRef(fn_info.return_type.toType(), mod),
.C => switch (target.cpu.arch) {
.mips, .mipsel => return false,
.x86_64 => switch (target.os.tag) {
- .windows => return x86_64_abi.classifyWindows(fn_info.return_type, mod) == .memory,
- else => return firstParamSRetSystemV(fn_info.return_type, mod),
+ .windows => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory,
+ else => return firstParamSRetSystemV(fn_info.return_type.toType(), mod),
},
- .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type, mod)[0] == .indirect,
- .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type, mod) == .memory,
- .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type, mod, .ret)) {
+ .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type.toType(), mod)[0] == .indirect,
+ .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory,
+ .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type.toType(), mod, .ret)) {
.memory, .i64_array => return true,
.i32_array => |size| return size != 1,
.byval => return false,
},
- .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type, mod) == .memory,
+ .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory,
else => return false, // TODO investigate C ABI for other architectures
},
- .SysV => return firstParamSRetSystemV(fn_info.return_type, mod),
- .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type, mod) == .memory,
- .Stdcall => return !isScalar(mod, fn_info.return_type),
+ .SysV => return firstParamSRetSystemV(fn_info.return_type.toType(), mod),
+ .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory,
+ .Stdcall => return !isScalar(mod, fn_info.return_type.toType()),
else => return false,
}
}
@@ -10585,13 +10581,14 @@ fn firstParamSRetSystemV(ty: Type, mod: *Module) bool {
/// In order to support the C calling convention, some return types need to be lowered
/// completely differently in the function prototype to honor the C ABI, and then
/// be effectively bitcasted to the actual return type.
-fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
+fn lowerFnRetTy(dg: *DeclGen, fn_info: InternPool.Key.FuncType) !*llvm.Type {
const mod = dg.module;
- if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) {
+ const return_type = fn_info.return_type.toType();
+ if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) {
// If the return type is an error set or an error union, then we make this
// anyerror return type instead, so that it can be coerced into a function
// pointer type which has anyerror as the return type.
- if (fn_info.return_type.isError(mod)) {
+ if (return_type.isError(mod)) {
return dg.lowerType(Type.anyerror);
} else {
return dg.context.voidType();
@@ -10600,61 +10597,61 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
const target = mod.getTarget();
switch (fn_info.cc) {
.Unspecified, .Inline => {
- if (isByRef(fn_info.return_type, mod)) {
+ if (isByRef(return_type, mod)) {
return dg.context.voidType();
} else {
- return dg.lowerType(fn_info.return_type);
+ return dg.lowerType(return_type);
}
},
.C => {
switch (target.cpu.arch) {
- .mips, .mipsel => return dg.lowerType(fn_info.return_type),
+ .mips, .mipsel => return dg.lowerType(return_type),
.x86_64 => switch (target.os.tag) {
.windows => return lowerWin64FnRetTy(dg, fn_info),
else => return lowerSystemVFnRetTy(dg, fn_info),
},
.wasm32 => {
- if (isScalar(mod, fn_info.return_type)) {
- return dg.lowerType(fn_info.return_type);
+ if (isScalar(mod, return_type)) {
+ return dg.lowerType(return_type);
}
- const classes = wasm_c_abi.classifyType(fn_info.return_type, mod);
+ const classes = wasm_c_abi.classifyType(return_type, mod);
if (classes[0] == .indirect or classes[0] == .none) {
return dg.context.voidType();
}
assert(classes[0] == .direct and classes[1] == .none);
- const scalar_type = wasm_c_abi.scalarType(fn_info.return_type, mod);
+ const scalar_type = wasm_c_abi.scalarType(return_type, mod);
const abi_size = scalar_type.abiSize(mod);
return dg.context.intType(@intCast(c_uint, abi_size * 8));
},
.aarch64, .aarch64_be => {
- switch (aarch64_c_abi.classifyType(fn_info.return_type, mod)) {
+ switch (aarch64_c_abi.classifyType(return_type, mod)) {
.memory => return dg.context.voidType(),
- .float_array => return dg.lowerType(fn_info.return_type),
- .byval => return dg.lowerType(fn_info.return_type),
+ .float_array => return dg.lowerType(return_type),
+ .byval => return dg.lowerType(return_type),
.integer => {
- const bit_size = fn_info.return_type.bitSize(mod);
+ const bit_size = return_type.bitSize(mod);
return dg.context.intType(@intCast(c_uint, bit_size));
},
.double_integer => return dg.context.intType(64).arrayType(2),
}
},
.arm, .armeb => {
- switch (arm_c_abi.classifyType(fn_info.return_type, mod, .ret)) {
+ switch (arm_c_abi.classifyType(return_type, mod, .ret)) {
.memory, .i64_array => return dg.context.voidType(),
.i32_array => |len| if (len == 1) {
return dg.context.intType(32);
} else {
return dg.context.voidType();
},
- .byval => return dg.lowerType(fn_info.return_type),
+ .byval => return dg.lowerType(return_type),
}
},
.riscv32, .riscv64 => {
- switch (riscv_c_abi.classifyType(fn_info.return_type, mod)) {
+ switch (riscv_c_abi.classifyType(return_type, mod)) {
.memory => return dg.context.voidType(),
.integer => {
- const bit_size = fn_info.return_type.bitSize(mod);
+ const bit_size = return_type.bitSize(mod);
return dg.context.intType(@intCast(c_uint, bit_size));
},
.double_integer => {
@@ -10664,50 +10661,52 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
};
return dg.context.structType(&llvm_types_buffer, 2, .False);
},
- .byval => return dg.lowerType(fn_info.return_type),
+ .byval => return dg.lowerType(return_type),
}
},
// TODO investigate C ABI for other architectures
- else => return dg.lowerType(fn_info.return_type),
+ else => return dg.lowerType(return_type),
}
},
.Win64 => return lowerWin64FnRetTy(dg, fn_info),
.SysV => return lowerSystemVFnRetTy(dg, fn_info),
.Stdcall => {
- if (isScalar(mod, fn_info.return_type)) {
- return dg.lowerType(fn_info.return_type);
+ if (isScalar(mod, return_type)) {
+ return dg.lowerType(return_type);
} else {
return dg.context.voidType();
}
},
- else => return dg.lowerType(fn_info.return_type),
+ else => return dg.lowerType(return_type),
}
}
-fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
+fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: InternPool.Key.FuncType) !*llvm.Type {
const mod = dg.module;
- switch (x86_64_abi.classifyWindows(fn_info.return_type, mod)) {
+ const return_type = fn_info.return_type.toType();
+ switch (x86_64_abi.classifyWindows(return_type, mod)) {
.integer => {
- if (isScalar(mod, fn_info.return_type)) {
- return dg.lowerType(fn_info.return_type);
+ if (isScalar(mod, return_type)) {
+ return dg.lowerType(return_type);
} else {
- const abi_size = fn_info.return_type.abiSize(mod);
+ const abi_size = return_type.abiSize(mod);
return dg.context.intType(@intCast(c_uint, abi_size * 8));
}
},
.win_i128 => return dg.context.intType(64).vectorType(2),
.memory => return dg.context.voidType(),
- .sse => return dg.lowerType(fn_info.return_type),
+ .sse => return dg.lowerType(return_type),
else => unreachable,
}
}
-fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
+fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: InternPool.Key.FuncType) !*llvm.Type {
const mod = dg.module;
- if (isScalar(mod, fn_info.return_type)) {
- return dg.lowerType(fn_info.return_type);
+ const return_type = fn_info.return_type.toType();
+ if (isScalar(mod, return_type)) {
+ return dg.lowerType(return_type);
}
- const classes = x86_64_abi.classifySystemV(fn_info.return_type, mod, .ret);
+ const classes = x86_64_abi.classifySystemV(return_type, mod, .ret);
if (classes[0] == .memory) {
return dg.context.voidType();
}
@@ -10748,7 +10747,7 @@ fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm
}
}
if (classes[0] == .integer and classes[1] == .none) {
- const abi_size = fn_info.return_type.abiSize(mod);
+ const abi_size = return_type.abiSize(mod);
return dg.context.intType(@intCast(c_uint, abi_size * 8));
}
return dg.context.structType(&llvm_types_buffer, llvm_types_index, .False);
@@ -10756,7 +10755,7 @@ fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm
const ParamTypeIterator = struct {
dg: *DeclGen,
- fn_info: Type.Payload.Function.Data,
+ fn_info: InternPool.Key.FuncType,
zig_index: u32,
llvm_index: u32,
llvm_types_len: u32,
@@ -10781,7 +10780,7 @@ const ParamTypeIterator = struct {
if (it.zig_index >= it.fn_info.param_types.len) return null;
const ty = it.fn_info.param_types[it.zig_index];
it.byval_attr = false;
- return nextInner(it, ty);
+ return nextInner(it, ty.toType());
}
/// `airCall` uses this instead of `next` so that it can take into account variadic functions.
@@ -10793,7 +10792,7 @@ const ParamTypeIterator = struct {
return nextInner(it, fg.typeOf(args[it.zig_index]));
}
} else {
- return nextInner(it, it.fn_info.param_types[it.zig_index]);
+ return nextInner(it, it.fn_info.param_types[it.zig_index].toType());
}
}
@@ -11009,7 +11008,7 @@ const ParamTypeIterator = struct {
}
};
-fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTypeIterator {
+fn iterateParamTypes(dg: *DeclGen, fn_info: InternPool.Key.FuncType) ParamTypeIterator {
return .{
.dg = dg,
.fn_info = fn_info,
src/codegen/spirv.zig
@@ -1227,8 +1227,9 @@ pub const DeclGen = struct {
},
.Fn => switch (repr) {
.direct => {
+ const fn_info = mod.typeToFunc(ty).?;
// TODO: Put this somewhere in Sema.zig
- if (ty.fnIsVarArgs())
+ if (fn_info.is_var_args)
return self.fail("VarArgs functions are unsupported for SPIR-V", .{});
const param_ty_refs = try self.gpa.alloc(CacheRef, ty.fnParamLen());
@@ -1546,18 +1547,17 @@ pub const DeclGen = struct {
assert(decl.ty.zigTypeTag(mod) == .Fn);
const prototype_id = try self.resolveTypeId(decl.ty);
try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
- .id_result_type = try self.resolveTypeId(decl.ty.fnReturnType()),
+ .id_result_type = try self.resolveTypeId(decl.ty.fnReturnType(mod)),
.id_result = decl_id,
.function_control = .{}, // TODO: We can set inline here if the type requires it.
.function_type = prototype_id,
});
- const params = decl.ty.fnParamLen();
- var i: usize = 0;
+ const fn_info = mod.typeToFunc(decl.ty).?;
- try self.args.ensureUnusedCapacity(self.gpa, params);
- while (i < params) : (i += 1) {
- const param_type_id = try self.resolveTypeId(decl.ty.fnParamType(i));
+ try self.args.ensureUnusedCapacity(self.gpa, fn_info.param_types.len);
+ for (fn_info.param_types) |param_type| {
+ const param_type_id = try self.resolveTypeId(param_type.toType());
const arg_result_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpFunctionParameter, .{
.id_result_type = param_type_id,
@@ -3338,10 +3338,10 @@ pub const DeclGen = struct {
.Pointer => return self.fail("cannot call function pointers", .{}),
else => unreachable,
};
- const fn_info = zig_fn_ty.fnInfo();
+ const fn_info = mod.typeToFunc(zig_fn_ty).?;
const return_type = fn_info.return_type;
- const result_type_id = try self.resolveTypeId(return_type);
+ const result_type_id = try self.resolveTypeId(return_type.toType());
const result_id = self.spv.allocId();
const callee_id = try self.resolve(pl_op.operand);
@@ -3368,11 +3368,11 @@ pub const DeclGen = struct {
.id_ref_3 = params[0..n_params],
});
- if (return_type.isNoReturn()) {
+ if (return_type == .noreturn_type) {
try self.func.body.emit(self.spv.gpa, .OpUnreachable, {});
}
- if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (self.liveness.isUnused(inst) or !return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) {
return null;
}
src/link/Coff.zig
@@ -1430,7 +1430,7 @@ pub fn updateDeclExports(
.x86 => std.builtin.CallingConvention.Stdcall,
else => std.builtin.CallingConvention.C,
};
- const decl_cc = exported_decl.ty.fnCallingConvention();
+ const decl_cc = exported_decl.ty.fnCallingConvention(mod);
if (decl_cc == .C and mem.eql(u8, exp.options.name, "main") and
self.base.options.link_libc)
{
src/link/Dwarf.zig
@@ -1022,7 +1022,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
const decl_name_with_null = decl_name[0 .. decl_name.len + 1];
try dbg_info_buffer.ensureUnusedCapacity(25 + decl_name_with_null.len);
- const fn_ret_type = decl.ty.fnReturnType();
+ const fn_ret_type = decl.ty.fnReturnType(mod);
const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(mod);
if (fn_ret_has_bits) {
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.subprogram));
src/link/SpirV.zig
@@ -131,12 +131,12 @@ pub fn updateDecl(self: *SpirV, module: *Module, decl_index: Module.Decl.Index)
pub fn updateDeclExports(
self: *SpirV,
- module: *Module,
+ mod: *Module,
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
- const decl = module.declPtr(decl_index);
- if (decl.val.tag() == .function and decl.ty.fnCallingConvention() == .Kernel) {
+ const decl = mod.declPtr(decl_index);
+ if (decl.val.tag() == .function and decl.ty.fnCallingConvention(mod) == .Kernel) {
// TODO: Unify with resolveDecl in spirv.zig.
const entry = try self.decl_link.getOrPut(decl_index);
if (!entry.found_existing) {
src/Air.zig
@@ -845,7 +845,6 @@ pub const Inst = struct {
pub const Ref = enum(u32) {
u1_type = @enumToInt(InternPool.Index.u1_type),
- u5_type = @enumToInt(InternPool.Index.u5_type),
u8_type = @enumToInt(InternPool.Index.u8_type),
i8_type = @enumToInt(InternPool.Index.i8_type),
u16_type = @enumToInt(InternPool.Index.u16_type),
@@ -914,8 +913,8 @@ pub const Inst = struct {
zero_u8 = @enumToInt(InternPool.Index.zero_u8),
one = @enumToInt(InternPool.Index.one),
one_usize = @enumToInt(InternPool.Index.one_usize),
- one_u5 = @enumToInt(InternPool.Index.one_u5),
- four_u5 = @enumToInt(InternPool.Index.four_u5),
+ one_u8 = @enumToInt(InternPool.Index.one_u8),
+ four_u8 = @enumToInt(InternPool.Index.four_u8),
negative_one = @enumToInt(InternPool.Index.negative_one),
calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c),
calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline),
@@ -1383,7 +1382,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type {
.call, .call_always_tail, .call_never_tail, .call_never_inline => {
const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip);
- return callee_ty.fnReturnType();
+ return callee_ty.fnReturnTypeIp(ip);
},
.slice_elem_val, .ptr_elem_val, .array_elem_val => {
src/codegen.zig
@@ -1081,7 +1081,7 @@ fn genDeclRef(
// TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
if (tv.ty.castPtrToFn(mod)) |fn_ty| {
- if (fn_ty.fnInfo().is_generic) {
+ if (mod.typeToFunc(fn_ty).?.is_generic) {
return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(mod) });
}
} else if (tv.ty.zigTypeTag(mod) == .Pointer) {
src/InternPool.zig
@@ -148,6 +148,7 @@ pub const Key = union(enum) {
union_type: UnionType,
opaque_type: OpaqueType,
enum_type: EnumType,
+ func_type: FuncType,
/// Typed `undefined`. This will never be `none`; untyped `undefined` is represented
/// via `simple_value` and has a named `Index` tag for it.
@@ -185,6 +186,13 @@ pub const Key = union(enum) {
/// If zero use pointee_type.abiAlignment()
/// When creating pointer types, if alignment is equal to pointee type
/// abi alignment, this value should be set to 0 instead.
+ ///
+ /// Please don't change this to u32 or u29. If you want to save bits,
+ /// migrate the rest of the codebase to use the `Alignment` type rather
+ /// than using byte units. The LLVM backend can only handle `c_uint`
+ /// byte units; we can emit a semantic analysis error if alignment that
+ /// overflows that amount is attempted to be used, but it shouldn't
+ /// affect the other backends.
alignment: u64 = 0,
/// If this is non-zero it means the pointer points to a sub-byte
/// range of data, which is backed by a "host integer" with this
@@ -358,6 +366,44 @@ pub const Key = union(enum) {
}
};
+ pub const FuncType = struct {
+ param_types: []Index,
+ return_type: Index,
+ /// Tells whether a parameter is comptime. See `paramIsComptime` helper
+ /// method for accessing this.
+ comptime_bits: u32,
+ /// Tells whether a parameter is noalias. See `paramIsNoalias` helper
+ /// method for accessing this.
+ noalias_bits: u32,
+ /// If zero use default target function code alignment.
+ ///
+ /// Please don't change this to u32 or u29. If you want to save bits,
+ /// migrate the rest of the codebase to use the `Alignment` type rather
+ /// than using byte units. The LLVM backend can only handle `c_uint`
+ /// byte units; we can emit a semantic analysis error if alignment that
+ /// overflows that amount is attempted to be used, but it shouldn't
+ /// affect the other backends.
+ alignment: u64,
+ cc: std.builtin.CallingConvention,
+ is_var_args: bool,
+ is_generic: bool,
+ is_noinline: bool,
+ align_is_generic: bool,
+ cc_is_generic: bool,
+ section_is_generic: bool,
+ addrspace_is_generic: bool,
+
+ pub fn paramIsComptime(self: @This(), i: u5) bool {
+ assert(i < self.param_types.len);
+ return @truncate(u1, self.comptime_bits >> i) != 0;
+ }
+
+ pub fn paramIsNoalias(self: @This(), i: u5) bool {
+ assert(i < self.param_types.len);
+ return @truncate(u1, self.noalias_bits >> i) != 0;
+ }
+ };
+
pub const Int = struct {
ty: Index,
storage: Storage,
@@ -512,6 +558,18 @@ pub const Key = union(enum) {
for (anon_struct_type.values) |elem| std.hash.autoHash(hasher, elem);
for (anon_struct_type.names) |elem| std.hash.autoHash(hasher, elem);
},
+
+ .func_type => |func_type| {
+ for (func_type.param_types) |param_type| std.hash.autoHash(hasher, param_type);
+ std.hash.autoHash(hasher, func_type.return_type);
+ std.hash.autoHash(hasher, func_type.comptime_bits);
+ std.hash.autoHash(hasher, func_type.noalias_bits);
+ std.hash.autoHash(hasher, func_type.alignment);
+ std.hash.autoHash(hasher, func_type.cc);
+ std.hash.autoHash(hasher, func_type.is_var_args);
+ std.hash.autoHash(hasher, func_type.is_generic);
+ std.hash.autoHash(hasher, func_type.is_noinline);
+ },
}
}
@@ -670,6 +728,20 @@ pub const Key = union(enum) {
std.mem.eql(Index, a_info.values, b_info.values) and
std.mem.eql(NullTerminatedString, a_info.names, b_info.names);
},
+
+ .func_type => |a_info| {
+ const b_info = b.func_type;
+
+ return std.mem.eql(Index, a_info.param_types, b_info.param_types) and
+ a_info.return_type == b_info.return_type and
+ a_info.comptime_bits == b_info.comptime_bits and
+ a_info.noalias_bits == b_info.noalias_bits and
+ a_info.alignment == b_info.alignment and
+ a_info.cc == b_info.cc and
+ a_info.is_var_args == b_info.is_var_args and
+ a_info.is_generic == b_info.is_generic and
+ a_info.is_noinline == b_info.is_noinline;
+ },
}
}
@@ -687,6 +759,7 @@ pub const Key = union(enum) {
.opaque_type,
.enum_type,
.anon_struct_type,
+ .func_type,
=> .type_type,
inline .ptr,
@@ -734,7 +807,6 @@ pub const Index = enum(u32) {
pub const last_value: Index = .empty_struct;
u1_type,
- u5_type,
u8_type,
i8_type,
u16_type,
@@ -811,10 +883,10 @@ pub const Index = enum(u32) {
one,
/// `1` (usize)
one_usize,
- /// `1` (u5)
- one_u5,
- /// `4` (u5)
- four_u5,
+ /// `1` (u8)
+ one_u8,
+ /// `4` (u8)
+ four_u8,
/// `-1` (comptime_int)
negative_one,
/// `std.builtin.CallingConvention.C`
@@ -880,12 +952,6 @@ pub const static_keys = [_]Key{
.bits = 1,
} },
- // u5_type
- .{ .int_type = .{
- .signedness = .unsigned,
- .bits = 5,
- } },
-
.{ .int_type = .{
.signedness = .unsigned,
.bits = 8,
@@ -1074,14 +1140,14 @@ pub const static_keys = [_]Key{
.storage = .{ .u64 = 1 },
} },
- // one_u5
+ // one_u8
.{ .int = .{
- .ty = .u5_type,
+ .ty = .u8_type,
.storage = .{ .u64 = 1 },
} },
- // four_u5
+ // four_u8
.{ .int = .{
- .ty = .u5_type,
+ .ty = .u8_type,
.storage = .{ .u64 = 4 },
} },
// negative_one
@@ -1092,12 +1158,12 @@ pub const static_keys = [_]Key{
// calling_convention_c
.{ .enum_tag = .{
.ty = .calling_convention_type,
- .int = .one_u5,
+ .int = .one_u8,
} },
// calling_convention_inline
.{ .enum_tag = .{
.ty = .calling_convention_type,
- .int = .four_u5,
+ .int = .four_u8,
} },
.{ .simple_value = .void },
@@ -1181,6 +1247,9 @@ pub const Tag = enum(u8) {
/// An untagged union type which has a safety tag.
/// `data` is `Module.Union.Index`.
type_union_safety,
+ /// A function body type.
+ /// `data` is extra index to `TypeFunction`.
+ type_function,
/// Typed `undefined`.
/// `data` is `Index` of the type.
@@ -1283,6 +1352,29 @@ pub const Tag = enum(u8) {
aggregate,
};
+/// Trailing:
+/// 0. param_type: Index for each params_len
+pub const TypeFunction = struct {
+ params_len: u32,
+ return_type: Index,
+ comptime_bits: u32,
+ noalias_bits: u32,
+ flags: Flags,
+
+ pub const Flags = packed struct(u32) {
+ alignment: Alignment,
+ cc: std.builtin.CallingConvention,
+ is_var_args: bool,
+ is_generic: bool,
+ is_noinline: bool,
+ align_is_generic: bool,
+ cc_is_generic: bool,
+ section_is_generic: bool,
+ addrspace_is_generic: bool,
+ _: u11 = 0,
+ };
+};
+
/// Trailing:
/// 0. element: Index for each len
/// len is determined by the aggregate type.
@@ -1371,24 +1463,6 @@ pub const Pointer = struct {
flags: Flags,
packed_offset: PackedOffset,
- /// Stored as a power-of-two, with one special value to indicate none.
- pub const Alignment = enum(u6) {
- none = std.math.maxInt(u6),
- _,
-
- pub fn toByteUnits(a: Alignment, default: u64) u64 {
- return switch (a) {
- .none => default,
- _ => @as(u64, 1) << @enumToInt(a),
- };
- }
-
- pub fn fromByteUnits(n: u64) Alignment {
- if (n == 0) return .none;
- return @intToEnum(Alignment, @ctz(n));
- }
- };
-
pub const Flags = packed struct(u32) {
size: Size,
alignment: Alignment,
@@ -1409,6 +1483,24 @@ pub const Pointer = struct {
pub const VectorIndex = Key.PtrType.VectorIndex;
};
+/// Stored as a power-of-two, with one special value to indicate none.
+pub const Alignment = enum(u6) {
+ none = std.math.maxInt(u6),
+ _,
+
+ pub fn toByteUnits(a: Alignment, default: u64) u64 {
+ return switch (a) {
+ .none => default,
+ _ => @as(u64, 1) << @enumToInt(a),
+ };
+ }
+
+ pub fn fromByteUnits(n: u64) Alignment {
+ if (n == 0) return .none;
+ return @intToEnum(Alignment, @ctz(n));
+ }
+};
+
/// Used for non-sentineled arrays that have length fitting in u32, as well as
/// vectors.
pub const Vector = struct {
@@ -1765,6 +1857,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
},
.type_enum_explicit => indexToKeyEnum(ip, data, .explicit),
.type_enum_nonexhaustive => indexToKeyEnum(ip, data, .nonexhaustive),
+ .type_function => .{ .func_type = indexToKeyFuncType(ip, data) },
.undef => .{ .undef = @intToEnum(Index, data) },
.opt_null => .{ .opt = .{
@@ -1896,6 +1989,29 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
};
}
+fn indexToKeyFuncType(ip: InternPool, data: u32) Key.FuncType {
+ const type_function = ip.extraDataTrail(TypeFunction, data);
+ const param_types = @ptrCast(
+ []Index,
+ ip.extra.items[type_function.end..][0..type_function.data.params_len],
+ );
+ return .{
+ .param_types = param_types,
+ .return_type = type_function.data.return_type,
+ .comptime_bits = type_function.data.comptime_bits,
+ .noalias_bits = type_function.data.noalias_bits,
+ .alignment = type_function.data.flags.alignment.toByteUnits(0),
+ .cc = type_function.data.flags.cc,
+ .is_var_args = type_function.data.flags.is_var_args,
+ .is_generic = type_function.data.flags.is_generic,
+ .is_noinline = type_function.data.flags.is_noinline,
+ .align_is_generic = type_function.data.flags.align_is_generic,
+ .cc_is_generic = type_function.data.flags.cc_is_generic,
+ .section_is_generic = type_function.data.flags.section_is_generic,
+ .addrspace_is_generic = type_function.data.flags.addrspace_is_generic,
+ };
+}
+
/// Asserts the integer tag type is already present in the InternPool.
fn getEnumIntTagType(ip: InternPool, fields_len: u32) Index {
return ip.getAssumeExists(.{ .int_type = .{
@@ -1977,7 +2093,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.child = ptr_type.elem_type,
.sentinel = ptr_type.sentinel,
.flags = .{
- .alignment = Pointer.Alignment.fromByteUnits(ptr_type.alignment),
+ .alignment = Alignment.fromByteUnits(ptr_type.alignment),
.is_const = ptr_type.is_const,
.is_volatile = ptr_type.is_volatile,
.is_allowzero = ptr_type.is_allowzero,
@@ -2163,6 +2279,37 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
}
},
+ .func_type => |func_type| {
+ assert(func_type.return_type != .none);
+ for (func_type.param_types) |param_type| assert(param_type != .none);
+
+ const params_len = @intCast(u32, func_type.param_types.len);
+
+ try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(TypeFunction).Struct.fields.len +
+ params_len);
+ ip.items.appendAssumeCapacity(.{
+ .tag = .type_function,
+ .data = ip.addExtraAssumeCapacity(TypeFunction{
+ .params_len = params_len,
+ .return_type = func_type.return_type,
+ .comptime_bits = func_type.comptime_bits,
+ .noalias_bits = func_type.noalias_bits,
+ .flags = .{
+ .alignment = Alignment.fromByteUnits(func_type.alignment),
+ .cc = func_type.cc,
+ .is_var_args = func_type.is_var_args,
+ .is_generic = func_type.is_generic,
+ .is_noinline = func_type.is_noinline,
+ .align_is_generic = func_type.align_is_generic,
+ .cc_is_generic = func_type.cc_is_generic,
+ .section_is_generic = func_type.section_is_generic,
+ .addrspace_is_generic = func_type.addrspace_is_generic,
+ },
+ }),
+ });
+ ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, func_type.param_types));
+ },
+
.extern_func => @panic("TODO"),
.ptr => |ptr| switch (ptr.addr) {
@@ -2736,6 +2883,7 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
OptionalMapIndex => @enumToInt(@field(extra, field.name)),
i32 => @bitCast(u32, @field(extra, field.name)),
Pointer.Flags => @bitCast(u32, @field(extra, field.name)),
+ TypeFunction.Flags => @bitCast(u32, @field(extra, field.name)),
Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)),
Pointer.VectorIndex => @enumToInt(@field(extra, field.name)),
else => @compileError("bad field type: " ++ @typeName(field.type)),
@@ -2797,6 +2945,7 @@ fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data:
OptionalMapIndex => @intToEnum(OptionalMapIndex, int32),
i32 => @bitCast(i32, int32),
Pointer.Flags => @bitCast(Pointer.Flags, int32),
+ TypeFunction.Flags => @bitCast(TypeFunction.Flags, int32),
Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32),
Pointer.VectorIndex => @intToEnum(Pointer.VectorIndex, int32),
else => @compileError("bad field type: " ++ @typeName(field.type)),
@@ -2988,17 +3137,17 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Ind
}
}
-pub fn indexToStruct(ip: *InternPool, val: Index) Module.Struct.OptionalIndex {
+pub fn indexToStructType(ip: InternPool, val: Index) Module.Struct.OptionalIndex {
+ assert(val != .none);
const tags = ip.items.items(.tag);
- if (val == .none) return .none;
if (tags[@enumToInt(val)] != .type_struct) return .none;
const datas = ip.items.items(.data);
return @intToEnum(Module.Struct.Index, datas[@enumToInt(val)]).toOptional();
}
-pub fn indexToUnion(ip: *InternPool, val: Index) Module.Union.OptionalIndex {
+pub fn indexToUnionType(ip: InternPool, val: Index) Module.Union.OptionalIndex {
+ assert(val != .none);
const tags = ip.items.items(.tag);
- if (val == .none) return .none;
switch (tags[@enumToInt(val)]) {
.type_union_tagged, .type_union_untagged, .type_union_safety => {},
else => return .none,
@@ -3007,6 +3156,16 @@ pub fn indexToUnion(ip: *InternPool, val: Index) Module.Union.OptionalIndex {
return @intToEnum(Module.Union.Index, datas[@enumToInt(val)]).toOptional();
}
+pub fn indexToFuncType(ip: InternPool, val: Index) ?Key.FuncType {
+ assert(val != .none);
+ const tags = ip.items.items(.tag);
+ const datas = ip.items.items(.data);
+ switch (tags[@enumToInt(val)]) {
+ .type_function => return indexToKeyFuncType(ip, datas[@enumToInt(val)]),
+ else => return null,
+ }
+}
+
pub fn isOptionalType(ip: InternPool, ty: Index) bool {
const tags = ip.items.items(.tag);
if (ty == .none) return false;
@@ -3092,6 +3251,11 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
.type_union_safety,
=> @sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl),
+ .type_function => b: {
+ const info = ip.extraData(TypeFunction, data);
+ break :b @sizeOf(TypeFunction) + (@sizeOf(u32) * info.params_len);
+ },
+
.undef => 0,
.simple_type => 0,
.simple_value => 0,
src/Module.zig
@@ -846,7 +846,7 @@ pub const Decl = struct {
pub fn getStructIndex(decl: *Decl, mod: *Module) Struct.OptionalIndex {
if (!decl.owns_tv) return .none;
const ty = (decl.val.castTag(.ty) orelse return .none).data;
- return mod.intern_pool.indexToStruct(ty.ip_index);
+ return mod.intern_pool.indexToStructType(ty.ip_index);
}
/// If the Decl has a value and it is a union, return it,
@@ -4764,7 +4764,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
decl.analysis = .complete;
decl.generation = mod.generation;
- const is_inline = decl.ty.fnCallingConvention() == .Inline;
+ const is_inline = decl.ty.fnCallingConvention(mod) == .Inline;
if (decl.is_exported) {
const export_src: LazySrcLoc = .{ .token_offset = @boolToInt(decl.is_pub) };
if (is_inline) {
@@ -5617,6 +5617,9 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena);
defer decl.value_arena.?.release(&decl_arena);
+ const fn_ty = decl.ty;
+ const fn_ty_info = mod.typeToFunc(fn_ty).?;
+
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
@@ -5626,7 +5629,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
.owner_decl = decl,
.owner_decl_index = decl_index,
.func = func,
- .fn_ret_ty = decl.ty.fnReturnType(),
+ .fn_ret_ty = fn_ty_info.return_type.toType(),
.owner_func = func,
.branch_quota = @max(func.branch_quota, Sema.default_branch_quota),
};
@@ -5664,8 +5667,6 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
// This could be a generic function instantiation, however, in which case we need to
// map the comptime parameters to constant values and only emit arg AIR instructions
// for the runtime ones.
- const fn_ty = decl.ty;
- const fn_ty_info = fn_ty.fnInfo();
const runtime_params_len = @intCast(u32, fn_ty_info.param_types.len);
try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len);
try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len * 2); // * 2 for the `addType`
@@ -5692,7 +5693,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
sema.inst_map.putAssumeCapacityNoClobber(inst, arg);
total_param_index += 1;
continue;
- } else fn_ty_info.param_types[runtime_param_index];
+ } else fn_ty_info.param_types[runtime_param_index].toType();
const opt_opv = sema.typeHasOnePossibleValue(param_ty) catch |err| switch (err) {
error.NeededSourceLocation => unreachable,
@@ -6864,6 +6865,10 @@ pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type {
return ptrType(mod, .{ .elem_type = child_type.ip_index, .is_const = true });
}
+pub fn funcType(mod: *Module, info: InternPool.Key.FuncType) Allocator.Error!Type {
+ return (try intern(mod, .{ .func_type = info })).toType();
+}
+
/// Supports optionals in addition to pointers.
pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value {
if (ty.isPtrLikeOptional(mod)) {
@@ -6996,6 +7001,16 @@ pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value {
return i.toValue();
}
+pub fn nullValue(mod: *Module, opt_ty: Type) Allocator.Error!Value {
+ const ip = &mod.intern_pool;
+ assert(ip.isOptionalType(opt_ty.ip_index));
+ const result = try ip.get(mod.gpa, .{ .opt = .{
+ .ty = opt_ty.ip_index,
+ .val = .none,
+ } });
+ return result.toValue();
+}
+
pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type {
return intType(mod, .unsigned, Type.smallestUnsignedBits(max));
}
@@ -7201,15 +7216,22 @@ pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.I
/// * A struct which has no fields (`struct {}`).
/// * Not a struct.
pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct {
- const struct_index = mod.intern_pool.indexToStruct(ty.ip_index).unwrap() orelse return null;
+ if (ty.ip_index == .none) return null;
+ const struct_index = mod.intern_pool.indexToStructType(ty.ip_index).unwrap() orelse return null;
return mod.structPtr(struct_index);
}
pub fn typeToUnion(mod: *Module, ty: Type) ?*Union {
- const union_index = mod.intern_pool.indexToUnion(ty.ip_index).unwrap() orelse return null;
+ if (ty.ip_index == .none) return null;
+ const union_index = mod.intern_pool.indexToUnionType(ty.ip_index).unwrap() orelse return null;
return mod.unionPtr(union_index);
}
+pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType {
+ if (ty.ip_index == .none) return null;
+ return mod.intern_pool.indexToFuncType(ty.ip_index);
+}
+
pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQuery) SrcLoc {
@setCold(true);
const owner_decl = mod.declPtr(owner_decl_index);
src/Sema.zig
@@ -5850,6 +5850,7 @@ pub fn analyzeExport(
}
fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
+ const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const src = LazySrcLoc.nodeOffset(extra.node);
@@ -5862,8 +5863,8 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
const func = sema.func orelse
return sema.fail(block, src, "@setAlignStack outside function body", .{});
- const fn_owner_decl = sema.mod.declPtr(func.owner_decl);
- switch (fn_owner_decl.ty.fnCallingConvention()) {
+ const fn_owner_decl = mod.declPtr(func.owner_decl);
+ switch (fn_owner_decl.ty.fnCallingConvention(mod)) {
.Naked => return sema.fail(block, src, "@setAlignStack in naked function", .{}),
.Inline => return sema.fail(block, src, "@setAlignStack in inline function", .{}),
else => if (block.inlining != null) {
@@ -5871,7 +5872,7 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
},
}
- const gop = try sema.mod.align_stack_fns.getOrPut(sema.mod.gpa, func);
+ const gop = try mod.align_stack_fns.getOrPut(mod.gpa, func);
if (gop.found_existing) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "multiple @setAlignStack in the same function body", .{});
@@ -6378,7 +6379,7 @@ fn zirCall(
var input_is_error = false;
const block_index = @intCast(Air.Inst.Index, block.instructions.items.len);
- const func_ty_info = func_ty.fnInfo();
+ const func_ty_info = mod.typeToFunc(func_ty).?;
const fn_params_len = func_ty_info.param_types.len;
const parent_comptime = block.is_comptime;
// `extra_index` and `arg_index` are separate since the bound function is passed as the first argument.
@@ -6393,7 +6394,7 @@ fn zirCall(
// Generate args to comptime params in comptime block.
defer block.is_comptime = parent_comptime;
- if (arg_index < fn_params_len and func_ty_info.comptime_params[arg_index]) {
+ if (arg_index < fn_params_len and func_ty_info.paramIsComptime(@intCast(u5, arg_index))) {
block.is_comptime = true;
// TODO set comptime_reason
}
@@ -6402,10 +6403,10 @@ fn zirCall(
if (arg_index >= fn_params_len)
break :inst Air.Inst.Ref.var_args_param_type;
- if (func_ty_info.param_types[arg_index].isGenericPoison())
+ if (func_ty_info.param_types[arg_index] == .generic_poison_type)
break :inst Air.Inst.Ref.generic_poison_type;
- break :inst try sema.addType(func_ty_info.param_types[arg_index]);
+ break :inst try sema.addType(func_ty_info.param_types[arg_index].toType());
});
const resolved = try sema.resolveBody(block, args_body[arg_start..arg_end], inst);
@@ -6506,7 +6507,7 @@ fn checkCallArgumentCount(
return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(sema.mod)});
};
- const func_ty_info = func_ty.fnInfo();
+ const func_ty_info = mod.typeToFunc(func_ty).?;
const fn_params_len = func_ty_info.param_types.len;
const args_len = total_args - @boolToInt(member_fn);
if (func_ty_info.is_var_args) {
@@ -6562,7 +6563,7 @@ fn callBuiltin(
std.debug.panic("type '{}' is not a function calling builtin fn", .{callee_ty.fmt(sema.mod)});
};
- const func_ty_info = func_ty.fnInfo();
+ const func_ty_info = mod.typeToFunc(func_ty).?;
const fn_params_len = func_ty_info.param_types.len;
if (args.len != fn_params_len or (func_ty_info.is_var_args and args.len < fn_params_len)) {
std.debug.panic("parameter count mismatch calling builtin fn, expected {d}, found {d}", .{ fn_params_len, args.len });
@@ -6573,7 +6574,7 @@ fn callBuiltin(
const GenericCallAdapter = struct {
generic_fn: *Module.Fn,
precomputed_hash: u64,
- func_ty_info: Type.Payload.Function.Data,
+ func_ty_info: InternPool.Key.FuncType,
args: []const Arg,
module: *Module,
@@ -6656,7 +6657,7 @@ fn analyzeCall(
const mod = sema.mod;
const callee_ty = sema.typeOf(func);
- const func_ty_info = func_ty.fnInfo();
+ const func_ty_info = mod.typeToFunc(func_ty).?;
const fn_params_len = func_ty_info.param_types.len;
const cc = func_ty_info.cc;
if (cc == .Naked) {
@@ -6704,7 +6705,7 @@ fn analyzeCall(
var comptime_reason_buf: Block.ComptimeReason = undefined;
var comptime_reason: ?*const Block.ComptimeReason = null;
if (!is_comptime_call) {
- if (sema.typeRequiresComptime(func_ty_info.return_type)) |ct| {
+ if (sema.typeRequiresComptime(func_ty_info.return_type.toType())) |ct| {
is_comptime_call = ct;
if (ct) {
// stage1 can't handle doing this directly
@@ -6712,7 +6713,7 @@ fn analyzeCall(
.block = block,
.func = func,
.func_src = func_src,
- .return_ty = func_ty_info.return_type,
+ .return_ty = func_ty_info.return_type.toType(),
} };
comptime_reason = &comptime_reason_buf;
}
@@ -6750,7 +6751,7 @@ fn analyzeCall(
.block = block,
.func = func,
.func_src = func_src,
- .return_ty = func_ty_info.return_type,
+ .return_ty = func_ty_info.return_type.toType(),
} };
comptime_reason = &comptime_reason_buf;
},
@@ -6875,9 +6876,9 @@ fn analyzeCall(
// comptime state.
var should_memoize = true;
- var new_fn_info = fn_owner_decl.ty.fnInfo();
- new_fn_info.param_types = try sema.arena.alloc(Type, new_fn_info.param_types.len);
- new_fn_info.comptime_params = (try sema.arena.alloc(bool, new_fn_info.param_types.len)).ptr;
+ var new_fn_info = mod.typeToFunc(fn_owner_decl.ty).?;
+ new_fn_info.param_types = try sema.arena.alloc(InternPool.Index, new_fn_info.param_types.len);
+ new_fn_info.comptime_bits = 0;
// This will have return instructions analyzed as break instructions to
// the block_inst above. Here we are performing "comptime/inline semantic analysis"
@@ -6970,7 +6971,7 @@ fn analyzeCall(
}
break :blk bare_return_type;
};
- new_fn_info.return_type = fn_ret_ty;
+ new_fn_info.return_type = fn_ret_ty.ip_index;
const parent_fn_ret_ty = sema.fn_ret_ty;
sema.fn_ret_ty = fn_ret_ty;
defer sema.fn_ret_ty = parent_fn_ret_ty;
@@ -6993,7 +6994,7 @@ fn analyzeCall(
}
}
- const new_func_resolved_ty = try Type.Tag.function.create(sema.arena, new_fn_info);
+ const new_func_resolved_ty = try mod.funcType(new_fn_info);
if (!is_comptime_call and !block.is_typeof) {
try sema.emitDbgInline(block, parent_func.?, module_fn, new_func_resolved_ty, .dbg_inline_begin);
@@ -7081,13 +7082,14 @@ fn analyzeCall(
assert(!func_ty_info.is_generic);
const args = try sema.arena.alloc(Air.Inst.Ref, uncasted_args.len);
+ const fn_info = mod.typeToFunc(func_ty).?;
for (uncasted_args, 0..) |uncasted_arg, i| {
if (i < fn_params_len) {
const opts: CoerceOpts = .{ .param_src = .{
.func_inst = func,
.param_i = @intCast(u32, i),
} };
- const param_ty = func_ty.fnParamType(i);
+ const param_ty = fn_info.param_types[i].toType();
args[i] = sema.analyzeCallArg(
block,
.unneeded,
@@ -7126,8 +7128,8 @@ fn analyzeCall(
if (call_dbg_node) |some| try sema.zirDbgStmt(block, some);
- try sema.queueFullTypeResolution(func_ty_info.return_type);
- if (sema.owner_func != null and func_ty_info.return_type.isError(mod)) {
+ try sema.queueFullTypeResolution(func_ty_info.return_type.toType());
+ if (sema.owner_func != null and func_ty_info.return_type.toType().isError(mod)) {
sema.owner_func.?.calls_or_awaits_errorable_fn = true;
}
@@ -7155,7 +7157,7 @@ fn analyzeCall(
try sema.ensureResultUsed(block, sema.typeOf(func_inst), call_src);
}
return sema.handleTailCall(block, call_src, func_ty, func_inst);
- } else if (block.wantSafety() and func_ty_info.return_type.isNoReturn()) {
+ } else if (block.wantSafety() and func_ty_info.return_type == .noreturn_type) {
// Function pointers and extern functions aren't guaranteed to
// actually be noreturn so we add a safety check for them.
check: {
@@ -7171,7 +7173,7 @@ fn analyzeCall(
try sema.safetyPanic(block, .noreturn_returned);
return Air.Inst.Ref.unreachable_value;
- } else if (func_ty_info.return_type.isNoReturn()) {
+ } else if (func_ty_info.return_type == .noreturn_type) {
_ = try block.addNoOp(.unreach);
return Air.Inst.Ref.unreachable_value;
}
@@ -7208,13 +7210,13 @@ fn analyzeInlineCallArg(
param_block: *Block,
arg_src: LazySrcLoc,
inst: Zir.Inst.Index,
- new_fn_info: Type.Payload.Function.Data,
+ new_fn_info: InternPool.Key.FuncType,
arg_i: *usize,
uncasted_args: []const Air.Inst.Ref,
is_comptime_call: bool,
should_memoize: *bool,
memoized_call_key: Module.MemoizedCall.Key,
- raw_param_types: []const Type,
+ raw_param_types: []const InternPool.Index,
func_inst: Air.Inst.Ref,
has_comptime_args: *bool,
) !void {
@@ -7233,13 +7235,14 @@ fn analyzeInlineCallArg(
const param_body = sema.code.extra[extra.end..][0..extra.data.body_len];
const param_ty = param_ty: {
const raw_param_ty = raw_param_types[arg_i.*];
- if (!raw_param_ty.isGenericPoison()) break :param_ty raw_param_ty;
+ if (raw_param_ty != .generic_poison_type) break :param_ty raw_param_ty;
const param_ty_inst = try sema.resolveBody(param_block, param_body, inst);
- break :param_ty try sema.analyzeAsType(param_block, param_src, param_ty_inst);
+ const param_ty = try sema.analyzeAsType(param_block, param_src, param_ty_inst);
+ break :param_ty param_ty.toIntern();
};
new_fn_info.param_types[arg_i.*] = param_ty;
const uncasted_arg = uncasted_args[arg_i.*];
- if (try sema.typeRequiresComptime(param_ty)) {
+ if (try sema.typeRequiresComptime(param_ty.toType())) {
_ = sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to parameter with comptime-only type must be comptime-known") catch |err| {
if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err);
return err;
@@ -7247,7 +7250,7 @@ fn analyzeInlineCallArg(
} else if (!is_comptime_call and zir_tags[inst] == .param_comptime) {
_ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime");
}
- const casted_arg = sema.coerceExtra(arg_block, param_ty, uncasted_arg, arg_src, .{ .param_src = .{
+ const casted_arg = sema.coerceExtra(arg_block, param_ty.toType(), uncasted_arg, arg_src, .{ .param_src = .{
.func_inst = func_inst,
.param_i = @intCast(u32, arg_i.*),
} }) catch |err| switch (err) {
@@ -7276,7 +7279,7 @@ fn analyzeInlineCallArg(
}
should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState();
memoized_call_key.args[arg_i.*] = .{
- .ty = param_ty,
+ .ty = param_ty.toType(),
.val = arg_val,
};
} else {
@@ -7292,7 +7295,7 @@ fn analyzeInlineCallArg(
.param_anytype, .param_anytype_comptime => {
// No coercion needed.
const uncasted_arg = uncasted_args[arg_i.*];
- new_fn_info.param_types[arg_i.*] = sema.typeOf(uncasted_arg);
+ new_fn_info.param_types[arg_i.*] = sema.typeOf(uncasted_arg).toIntern();
if (is_comptime_call) {
sema.inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg);
@@ -7357,7 +7360,7 @@ fn analyzeGenericCallArg(
uncasted_arg: Air.Inst.Ref,
comptime_arg: TypedValue,
runtime_args: []Air.Inst.Ref,
- new_fn_info: Type.Payload.Function.Data,
+ new_fn_info: InternPool.Key.FuncType,
runtime_i: *u32,
) !void {
const mod = sema.mod;
@@ -7365,7 +7368,7 @@ fn analyzeGenericCallArg(
comptime_arg.ty.hasRuntimeBits(mod) and
!(try sema.typeRequiresComptime(comptime_arg.ty));
if (is_runtime) {
- const param_ty = new_fn_info.param_types[runtime_i.*];
+ const param_ty = new_fn_info.param_types[runtime_i.*].toType();
const casted_arg = try sema.coerce(block, param_ty, uncasted_arg, arg_src);
try sema.queueFullTypeResolution(param_ty);
runtime_args[runtime_i.*] = casted_arg;
@@ -7387,7 +7390,7 @@ fn instantiateGenericCall(
func: Air.Inst.Ref,
func_src: LazySrcLoc,
call_src: LazySrcLoc,
- func_ty_info: Type.Payload.Function.Data,
+ func_ty_info: InternPool.Key.FuncType,
ensure_result_used: bool,
uncasted_args: []const Air.Inst.Ref,
call_tag: Air.Inst.Tag,
@@ -7431,14 +7434,14 @@ fn instantiateGenericCall(
var is_anytype = false;
switch (zir_tags[inst]) {
.param => {
- is_comptime = func_ty_info.paramIsComptime(i);
+ is_comptime = func_ty_info.paramIsComptime(@intCast(u5, i));
},
.param_comptime => {
is_comptime = true;
},
.param_anytype => {
is_anytype = true;
- is_comptime = func_ty_info.paramIsComptime(i);
+ is_comptime = func_ty_info.paramIsComptime(@intCast(u5, i));
},
.param_anytype_comptime => {
is_anytype = true;
@@ -7609,7 +7612,7 @@ fn instantiateGenericCall(
// Make a runtime call to the new function, making sure to omit the comptime args.
const comptime_args = callee.comptime_args.?;
const func_ty = mod.declPtr(callee.owner_decl).ty;
- const new_fn_info = func_ty.fnInfo();
+ const new_fn_info = mod.typeToFunc(func_ty).?;
const runtime_args_len = @intCast(u32, new_fn_info.param_types.len);
const runtime_args = try sema.arena.alloc(Air.Inst.Ref, runtime_args_len);
{
@@ -7647,12 +7650,12 @@ fn instantiateGenericCall(
total_i += 1;
}
- try sema.queueFullTypeResolution(new_fn_info.return_type);
+ try sema.queueFullTypeResolution(new_fn_info.return_type.toType());
}
if (call_dbg_node) |some| try sema.zirDbgStmt(block, some);
- if (sema.owner_func != null and new_fn_info.return_type.isError(mod)) {
+ if (sema.owner_func != null and new_fn_info.return_type.toType().isError(mod)) {
sema.owner_func.?.calls_or_awaits_errorable_fn = true;
}
@@ -7677,7 +7680,7 @@ fn instantiateGenericCall(
if (call_tag == .call_always_tail) {
return sema.handleTailCall(block, call_src, func_ty, result);
}
- if (new_fn_info.return_type.isNoReturn()) {
+ if (new_fn_info.return_type == .noreturn_type) {
_ = try block.addNoOp(.unreach);
return Air.Inst.Ref.unreachable_value;
}
@@ -7695,7 +7698,7 @@ fn resolveGenericInstantiationType(
module_fn: *Module.Fn,
new_module_func: *Module.Fn,
namespace: Namespace.Index,
- func_ty_info: Type.Payload.Function.Data,
+ func_ty_info: InternPool.Key.FuncType,
call_src: LazySrcLoc,
bound_arg_src: ?LazySrcLoc,
) !*Module.Fn {
@@ -7755,14 +7758,14 @@ fn resolveGenericInstantiationType(
var is_anytype = false;
switch (zir_tags[inst]) {
.param => {
- is_comptime = func_ty_info.paramIsComptime(arg_i);
+ is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i));
},
.param_comptime => {
is_comptime = true;
},
.param_anytype => {
is_anytype = true;
- is_comptime = func_ty_info.paramIsComptime(arg_i);
+ is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i));
},
.param_anytype_comptime => {
is_anytype = true;
@@ -7822,13 +7825,13 @@ fn resolveGenericInstantiationType(
var is_comptime = false;
switch (zir_tags[inst]) {
.param => {
- is_comptime = func_ty_info.paramIsComptime(arg_i);
+ is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i));
},
.param_comptime => {
is_comptime = true;
},
.param_anytype => {
- is_comptime = func_ty_info.paramIsComptime(arg_i);
+ is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i));
},
.param_anytype_comptime => {
is_comptime = true;
@@ -7868,8 +7871,8 @@ fn resolveGenericInstantiationType(
new_decl.ty = try child_sema.typeOf(new_func_inst).copy(new_decl_arena_allocator);
// If the call evaluated to a return type that requires comptime, never mind
// our generic instantiation. Instead we need to perform a comptime call.
- const new_fn_info = new_decl.ty.fnInfo();
- if (try sema.typeRequiresComptime(new_fn_info.return_type)) {
+ const new_fn_info = mod.typeToFunc(new_decl.ty).?;
+ if (try sema.typeRequiresComptime(new_fn_info.return_type.toType())) {
return error.ComptimeReturn;
}
// Similarly, if the call evaluated to a generic type we need to instead
@@ -8969,19 +8972,19 @@ fn funcCommon(
// the instantiation, which can depend on comptime parameters.
// Related proposal: https://github.com/ziglang/zig/issues/11834
const cc_resolved = cc orelse .Unspecified;
- const param_types = try sema.arena.alloc(Type, block.params.items.len);
- const comptime_params = try sema.arena.alloc(bool, block.params.items.len);
- for (block.params.items, 0..) |param, i| {
+ const param_types = try sema.arena.alloc(InternPool.Index, block.params.items.len);
+ var comptime_bits: u32 = 0;
+ for (param_types, block.params.items, 0..) |*dest_param_ty, param, i| {
const is_noalias = blk: {
const index = std.math.cast(u5, i) orelse break :blk false;
break :blk @truncate(u1, noalias_bits >> index) != 0;
};
- param_types[i] = param.ty;
+ dest_param_ty.* = param.ty.toIntern();
sema.analyzeParameter(
block,
.unneeded,
param,
- comptime_params,
+ &comptime_bits,
i,
&is_generic,
cc_resolved,
@@ -8994,7 +8997,7 @@ fn funcCommon(
block,
Module.paramSrc(src_node_offset, mod, decl, i),
param,
- comptime_params,
+ &comptime_bits,
i,
&is_generic,
cc_resolved,
@@ -9019,7 +9022,7 @@ fn funcCommon(
else => |e| return e,
};
- const return_type = if (!inferred_error_set or ret_poison)
+ const return_type: Type = if (!inferred_error_set or ret_poison)
bare_return_type
else blk: {
try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src);
@@ -9047,7 +9050,9 @@ fn funcCommon(
};
return sema.failWithOwnedErrorMsg(msg);
}
- if (!ret_poison and !Type.fnCallingConventionAllowsZigTypes(target, cc_resolved) and !try sema.validateExternType(return_type, .ret_ty)) {
+ if (!ret_poison and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and
+ !try sema.validateExternType(return_type, .ret_ty))
+ {
const msg = msg: {
const msg = try sema.errMsg(block, ret_ty_src, "return type '{}' not allowed in function with calling convention '{s}'", .{
return_type.fmt(sema.mod), @tagName(cc_resolved),
@@ -9141,8 +9146,7 @@ fn funcCommon(
return sema.fail(block, cc_src, "'noinline' function cannot have callconv 'Inline'", .{});
}
if (is_generic and sema.no_partial_func_ty) return error.GenericPoison;
- for (comptime_params) |ct| is_generic = is_generic or ct;
- is_generic = is_generic or ret_ty_requires_comptime;
+ is_generic = is_generic or comptime_bits != 0 or ret_ty_requires_comptime;
if (!is_generic and sema.wantErrorReturnTracing(return_type)) {
// Make sure that StackTrace's fields are resolved so that the backend can
@@ -9151,10 +9155,11 @@ fn funcCommon(
_ = try sema.resolveTypeFields(unresolved_stack_trace_ty);
}
- break :fn_ty try Type.Tag.function.create(sema.arena, .{
+ break :fn_ty try mod.funcType(.{
.param_types = param_types,
- .comptime_params = comptime_params.ptr,
- .return_type = return_type,
+ .noalias_bits = noalias_bits,
+ .comptime_bits = comptime_bits,
+ .return_type = return_type.toIntern(),
.cc = cc_resolved,
.cc_is_generic = cc == null,
.alignment = alignment orelse 0,
@@ -9164,7 +9169,6 @@ fn funcCommon(
.is_var_args = var_args,
.is_generic = is_generic,
.is_noinline = is_noinline,
- .noalias_bits = noalias_bits,
});
};
@@ -9203,7 +9207,7 @@ fn funcCommon(
return sema.addType(fn_ty);
}
- const is_inline = fn_ty.fnCallingConvention() == .Inline;
+ const is_inline = fn_ty.fnCallingConvention(mod) == .Inline;
const anal_state: Module.Fn.Analysis = if (is_inline) .inline_only else .none;
const comptime_args: ?[*]TypedValue = if (sema.comptime_args_fn_inst == func_inst) blk: {
@@ -9243,7 +9247,7 @@ fn analyzeParameter(
block: *Block,
param_src: LazySrcLoc,
param: Block.Param,
- comptime_params: []bool,
+ comptime_bits: *u32,
i: usize,
is_generic: *bool,
cc: std.builtin.CallingConvention,
@@ -9252,14 +9256,16 @@ fn analyzeParameter(
) !void {
const mod = sema.mod;
const requires_comptime = try sema.typeRequiresComptime(param.ty);
- comptime_params[i] = param.is_comptime or requires_comptime;
+ if (param.is_comptime or requires_comptime) {
+ comptime_bits.* |= @as(u32, 1) << @intCast(u5, i); // TODO: handle cast error
+ }
const this_generic = param.ty.isGenericPoison();
is_generic.* = is_generic.* or this_generic;
const target = mod.getTarget();
- if (param.is_comptime and !Type.fnCallingConventionAllowsZigTypes(target, cc)) {
+ if (param.is_comptime and !target_util.fnCallConvAllowsZigTypes(target, cc)) {
return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)});
}
- if (this_generic and !sema.no_partial_func_ty and !Type.fnCallingConventionAllowsZigTypes(target, cc)) {
+ if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(target, cc)) {
return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)});
}
if (!param.ty.isValidParamType(mod)) {
@@ -9275,7 +9281,7 @@ fn analyzeParameter(
};
return sema.failWithOwnedErrorMsg(msg);
}
- if (!this_generic and !Type.fnCallingConventionAllowsZigTypes(target, cc) and !try sema.validateExternType(param.ty, .param_ty)) {
+ if (!this_generic and !target_util.fnCallConvAllowsZigTypes(target, cc) and !try sema.validateExternType(param.ty, .param_ty)) {
const msg = msg: {
const msg = try sema.errMsg(block, param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{
param.ty.fmt(mod), @tagName(cc),
@@ -15986,22 +15992,18 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
),
.Fn => {
// TODO: look into memoizing this result.
- const info = ty.fnInfo();
+ const info = mod.typeToFunc(ty).?;
var params_anon_decl = try block.startAnonDecl();
defer params_anon_decl.deinit();
const param_vals = try params_anon_decl.arena().alloc(Value, info.param_types.len);
- for (param_vals, 0..) |*param_val, i| {
- const param_ty = info.param_types[i];
- const is_generic = param_ty.isGenericPoison();
- const param_ty_val = if (is_generic)
- Value.null
- else
- try Value.Tag.opt_payload.create(
- params_anon_decl.arena(),
- try Value.Tag.ty.create(params_anon_decl.arena(), try param_ty.copy(params_anon_decl.arena())),
- );
+ for (param_vals, info.param_types, 0..) |*param_val, param_ty, i| {
+ const is_generic = param_ty == .generic_poison_type;
+ const param_ty_val = try mod.intern_pool.get(mod.gpa, .{ .opt = .{
+ .ty = try mod.intern_pool.get(mod.gpa, .{ .opt_type = .type_type }),
+ .val = if (is_generic) .none else param_ty,
+ } });
const is_noalias = blk: {
const index = std.math.cast(u5, i) orelse break :blk false;
@@ -16015,7 +16017,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// is_noalias: bool,
Value.makeBool(is_noalias),
// type: ?type,
- param_ty_val,
+ param_ty_val.toValue(),
};
param_val.* = try Value.Tag.aggregate.create(params_anon_decl.arena(), param_fields);
}
@@ -16059,13 +16061,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
});
};
- const ret_ty_opt = if (!info.return_type.isGenericPoison())
- try Value.Tag.opt_payload.create(
- sema.arena,
- try Value.Tag.ty.create(sema.arena, info.return_type),
- )
- else
- Value.null;
+ const ret_ty_opt = try mod.intern_pool.get(mod.gpa, .{ .opt = .{
+ .ty = try mod.intern_pool.get(mod.gpa, .{ .opt_type = .type_type }),
+ .val = if (info.return_type == .generic_poison_type) .none else info.return_type,
+ } });
const callconv_ty = try sema.getBuiltinType("CallingConvention");
@@ -16080,7 +16079,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// is_var_args: bool,
Value.makeBool(info.is_var_args),
// return_type: ?type,
- ret_ty_opt,
+ ret_ty_opt.toValue(),
// args: []const Fn.Param,
args_val,
};
@@ -17788,7 +17787,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
if (inst_data.size != .One) {
return sema.fail(block, elem_ty_src, "function pointers must be single pointers", .{});
}
- const fn_align = elem_ty.fnInfo().alignment;
+ const fn_align = mod.typeToFunc(elem_ty).?.alignment;
if (inst_data.flags.has_align and abi_align != 0 and fn_align != 0 and
abi_align != fn_align)
{
@@ -18939,7 +18938,7 @@ fn zirReify(
if (ptr_size != .One) {
return sema.fail(block, src, "function pointers must be single pointers", .{});
}
- const fn_align = elem_ty.fnInfo().alignment;
+ const fn_align = mod.typeToFunc(elem_ty).?.alignment;
if (abi_align != 0 and fn_align != 0 and
abi_align != fn_align)
{
@@ -19483,12 +19482,10 @@ fn zirReify(
const args_slice_val = args_val.castTag(.slice).?.data;
const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod));
- const param_types = try sema.arena.alloc(Type, args_len);
- const comptime_params = try sema.arena.alloc(bool, args_len);
+ const param_types = try sema.arena.alloc(InternPool.Index, args_len);
var noalias_bits: u32 = 0;
- var i: usize = 0;
- while (i < args_len) : (i += 1) {
+ for (param_types, 0..) |*param_type, i| {
const arg = try args_slice_val.ptr.elemValue(mod, i);
const arg_val = arg.castTag(.aggregate).?.data;
// TODO use reflection instead of magic numbers here
@@ -19505,25 +19502,22 @@ fn zirReify(
const param_type_val = param_type_opt_val.optionalValue(mod) orelse
return sema.fail(block, src, "Type.Fn.Param.arg_type must be non-null for @Type", .{});
- const param_type = try param_type_val.toType().copy(sema.arena);
+ param_type.* = param_type_val.ip_index;
if (arg_is_noalias) {
- if (!param_type.isPtrAtRuntime(mod)) {
+ if (!param_type.toType().isPtrAtRuntime(mod)) {
return sema.fail(block, src, "non-pointer parameter declared noalias", .{});
}
noalias_bits |= @as(u32, 1) << (std.math.cast(u5, i) orelse
return sema.fail(block, src, "this compiler implementation only supports 'noalias' on the first 32 parameters", .{}));
}
-
- param_types[i] = param_type;
- comptime_params[i] = false;
}
- var fn_info = Type.Payload.Function.Data{
+ const ty = try mod.funcType(.{
.param_types = param_types,
- .comptime_params = comptime_params.ptr,
+ .comptime_bits = 0,
.noalias_bits = noalias_bits,
- .return_type = try return_type.toType().copy(sema.arena),
+ .return_type = return_type.toIntern(),
.alignment = alignment,
.cc = cc,
.is_var_args = is_var_args,
@@ -19533,9 +19527,7 @@ fn zirReify(
.cc_is_generic = false,
.section_is_generic = false,
.addrspace_is_generic = false,
- };
-
- const ty = try Type.Tag.function.create(sema.arena, fn_info);
+ });
return sema.addType(ty);
},
.Frame => return sema.failWithUseOfAsync(block, src),
@@ -23435,7 +23427,7 @@ fn explainWhyTypeIsComptimeInner(
.Pointer => {
const elem_ty = ty.elemType2(mod);
if (elem_ty.zigTypeTag(mod) == .Fn) {
- const fn_info = elem_ty.fnInfo();
+ const fn_info = mod.typeToFunc(elem_ty).?;
if (fn_info.is_generic) {
try mod.errNoteNonLazy(src_loc, msg, "function is generic", .{});
}
@@ -23443,7 +23435,7 @@ fn explainWhyTypeIsComptimeInner(
.Inline => try mod.errNoteNonLazy(src_loc, msg, "function has inline calling convention", .{}),
else => {},
}
- if (fn_info.return_type.comptimeOnly(mod)) {
+ if (fn_info.return_type.toType().comptimeOnly(mod)) {
try mod.errNoteNonLazy(src_loc, msg, "function has a comptime-only return type", .{});
}
return;
@@ -23543,10 +23535,10 @@ fn validateExternType(
const target = sema.mod.getTarget();
// For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI.
// The goal is to experiment with more integrated CPU/GPU code.
- if (ty.fnCallingConvention() == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) {
+ if (ty.fnCallingConvention(mod) == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) {
return true;
}
- return !Type.fnCallingConventionAllowsZigTypes(target, ty.fnCallingConvention());
+ return !target_util.fnCallConvAllowsZigTypes(target, ty.fnCallingConvention(mod));
},
.Enum => {
return sema.validateExternType(try ty.intTagType(mod), position);
@@ -23619,7 +23611,7 @@ fn explainWhyTypeIsNotExtern(
try mod.errNoteNonLazy(src_loc, msg, "use '*const ' to make a function pointer type", .{});
return;
}
- switch (ty.fnCallingConvention()) {
+ switch (ty.fnCallingConvention(mod)) {
.Unspecified => try mod.errNoteNonLazy(src_loc, msg, "extern function must specify calling convention", .{}),
.Async => try mod.errNoteNonLazy(src_loc, msg, "async function cannot be extern", .{}),
.Inline => try mod.errNoteNonLazy(src_loc, msg, "inline function cannot be extern", .{}),
@@ -24548,10 +24540,10 @@ fn fieldCallBind(
try sema.addReferencedBy(block, src, decl_idx);
const decl_val = try sema.analyzeDeclVal(block, src, decl_idx);
const decl_type = sema.typeOf(decl_val);
- if (decl_type.zigTypeTag(mod) == .Fn and
- decl_type.fnParamLen() >= 1)
- {
- const first_param_type = decl_type.fnParamType(0);
+ if (mod.typeToFunc(decl_type)) |func_type| f: {
+ if (func_type.param_types.len == 0) break :f;
+
+ const first_param_type = func_type.param_types[0].toType();
// zig fmt: off
if (first_param_type.isGenericPoison() or (
first_param_type.zigTypeTag(mod) == .Pointer and
@@ -27090,8 +27082,9 @@ fn coerceInMemoryAllowedFns(
dest_src: LazySrcLoc,
src_src: LazySrcLoc,
) !InMemoryCoercionResult {
- const dest_info = dest_ty.fnInfo();
- const src_info = src_ty.fnInfo();
+ const mod = sema.mod;
+ const dest_info = mod.typeToFunc(dest_ty).?;
+ const src_info = mod.typeToFunc(src_ty).?;
if (dest_info.is_var_args != src_info.is_var_args) {
return InMemoryCoercionResult{ .fn_var_args = dest_info.is_var_args };
@@ -27108,13 +27101,13 @@ fn coerceInMemoryAllowedFns(
} };
}
- if (!src_info.return_type.isNoReturn()) {
- const rt = try sema.coerceInMemoryAllowed(block, dest_info.return_type, src_info.return_type, false, target, dest_src, src_src);
+ if (src_info.return_type != .noreturn_type) {
+ const rt = try sema.coerceInMemoryAllowed(block, dest_info.return_type.toType(), src_info.return_type.toType(), false, target, dest_src, src_src);
if (rt != .ok) {
return InMemoryCoercionResult{ .fn_return_type = .{
.child = try rt.dupe(sema.arena),
- .actual = src_info.return_type,
- .wanted = dest_info.return_type,
+ .actual = src_info.return_type.toType(),
+ .wanted = dest_info.return_type.toType(),
} };
}
}
@@ -27134,22 +27127,23 @@ fn coerceInMemoryAllowedFns(
}
for (dest_info.param_types, 0..) |dest_param_ty, i| {
- const src_param_ty = src_info.param_types[i];
+ const src_param_ty = src_info.param_types[i].toType();
- if (dest_info.comptime_params[i] != src_info.comptime_params[i]) {
+ const i_small = @intCast(u5, i);
+ if (dest_info.paramIsComptime(i_small) != src_info.paramIsComptime(i_small)) {
return InMemoryCoercionResult{ .fn_param_comptime = .{
.index = i,
- .wanted = dest_info.comptime_params[i],
+ .wanted = dest_info.paramIsComptime(i_small),
} };
}
// Note: Cast direction is reversed here.
- const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty, false, target, dest_src, src_src);
+ const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty.toType(), false, target, dest_src, src_src);
if (param != .ok) {
return InMemoryCoercionResult{ .fn_param = .{
.child = try param.dupe(sema.arena),
.actual = src_param_ty,
- .wanted = dest_param_ty,
+ .wanted = dest_param_ty.toType(),
.index = i,
} };
}
@@ -31205,17 +31199,17 @@ fn resolvePeerTypes(
return chosen_ty;
}
-pub fn resolveFnTypes(sema: *Sema, fn_info: Type.Payload.Function.Data) CompileError!void {
+pub fn resolveFnTypes(sema: *Sema, fn_info: InternPool.Key.FuncType) CompileError!void {
const mod = sema.mod;
- try sema.resolveTypeFully(fn_info.return_type);
+ try sema.resolveTypeFully(fn_info.return_type.toType());
- if (mod.comp.bin_file.options.error_return_tracing and fn_info.return_type.isError(mod)) {
+ if (mod.comp.bin_file.options.error_return_tracing and fn_info.return_type.toType().isError(mod)) {
// Ensure the type exists so that backends can assume that.
_ = try sema.getBuiltinType("StackTrace");
}
for (fn_info.param_types) |param_ty| {
- try sema.resolveTypeFully(param_ty);
+ try sema.resolveTypeFully(param_ty.toType());
}
}
@@ -31286,16 +31280,16 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void {
return sema.resolveTypeLayout(payload_ty);
},
.Fn => {
- const info = ty.fnInfo();
+ const info = mod.typeToFunc(ty).?;
if (info.is_generic) {
// Resolving of generic function types is deferred to when
// the function is instantiated.
return;
}
for (info.param_types) |param_ty| {
- try sema.resolveTypeLayout(param_ty);
+ try sema.resolveTypeLayout(param_ty.toType());
}
- try sema.resolveTypeLayout(info.return_type);
+ try sema.resolveTypeLayout(info.return_type.toType());
},
else => {},
}
@@ -31615,15 +31609,13 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.error_set_merged,
=> false,
- .function => true,
-
.inferred_alloc_mut => unreachable,
.inferred_alloc_const => unreachable,
.pointer => {
const child_ty = ty.childType(mod);
if (child_ty.zigTypeTag(mod) == .Fn) {
- return child_ty.fnInfo().is_generic;
+ return mod.typeToFunc(child_ty).?.is_generic;
} else {
return sema.resolveTypeRequiresComptime(child_ty);
}
@@ -31644,7 +31636,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.ptr_type => |ptr_type| {
const child_ty = ptr_type.elem_type.toType();
if (child_ty.zigTypeTag(mod) == .Fn) {
- return child_ty.fnInfo().is_generic;
+ return mod.typeToFunc(child_ty).?.is_generic;
} else {
return sema.resolveTypeRequiresComptime(child_ty);
}
@@ -31653,6 +31645,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.vector_type => |vector_type| return sema.resolveTypeRequiresComptime(vector_type.child.toType()),
.opt_type => |child| return sema.resolveTypeRequiresComptime(child.toType()),
.error_union_type => |error_union_type| return sema.resolveTypeRequiresComptime(error_union_type.payload_type.toType()),
+ .func_type => true,
+
.simple_type => |t| switch (t) {
.f16,
.f32,
@@ -31799,16 +31793,16 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void {
},
.ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload()),
.Fn => {
- const info = ty.fnInfo();
+ const info = mod.typeToFunc(ty).?;
if (info.is_generic) {
// Resolving of generic function types is deferred to when
// the function is instantiated.
return;
}
for (info.param_types) |param_ty| {
- try sema.resolveTypeFully(param_ty);
+ try sema.resolveTypeFully(param_ty.toType());
}
- try sema.resolveTypeFully(info.return_type);
+ try sema.resolveTypeFully(info.return_type.toType());
},
else => {},
}
@@ -31881,7 +31875,6 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type {
.none => return ty,
.u1_type,
- .u5_type,
.u8_type,
.i8_type,
.u16_type,
@@ -31941,8 +31934,8 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type {
.zero_u8 => unreachable,
.one => unreachable,
.one_usize => unreachable,
- .one_u5 => unreachable,
- .four_u5 => unreachable,
+ .one_u8 => unreachable,
+ .four_u8 => unreachable,
.negative_one => unreachable,
.calling_convention_c => unreachable,
.calling_convention_inline => unreachable,
@@ -32083,14 +32076,14 @@ fn resolveInferredErrorSet(
// `*Module.Fn`. Not only is the function not relevant to the inferred error set
// in this case, it may be a generic function which would cause an assertion failure
// if we called `ensureFuncBodyAnalyzed` on it here.
- const ies_func_owner_decl = sema.mod.declPtr(ies.func.owner_decl);
- const ies_func_info = ies_func_owner_decl.ty.fnInfo();
+ const ies_func_owner_decl = mod.declPtr(ies.func.owner_decl);
+ const ies_func_info = mod.typeToFunc(ies_func_owner_decl.ty).?;
// if ies declared by a inline function with generic return type, the return_type should be generic_poison,
// because inline function does not create a new declaration, and the ies has been filled with analyzeCall,
// so here we can simply skip this case.
- if (ies_func_info.return_type.isGenericPoison()) {
+ if (ies_func_info.return_type == .generic_poison_type) {
assert(ies_func_info.cc == .Inline);
- } else if (ies_func_info.return_type.errorUnionSet().castTag(.error_set_inferred).?.data == ies) {
+ } else if (ies_func_info.return_type.toType().errorUnionSet().castTag(.error_set_inferred).?.data == ies) {
if (ies_func_info.is_generic) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "unable to resolve inferred error set of generic function", .{});
@@ -32285,7 +32278,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
const prev_field_index = struct_obj.fields.getIndex(field_name).?;
const prev_field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = prev_field_index });
- try sema.mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{});
+ try mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{});
try sema.errNote(&block_scope, src, msg, "struct declared here", .{});
break :msg msg;
};
@@ -32387,7 +32380,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
.index = field_i,
.range = .type,
});
- const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern structs cannot contain fields of type '{}'", .{field.ty.fmt(sema.mod)});
+ const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsNotExtern(msg, ty_src, field.ty, .struct_field);
@@ -32402,7 +32395,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
.index = field_i,
.range = .type,
});
- const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed structs cannot contain fields of type '{}'", .{field.ty.fmt(sema.mod)});
+ const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsNotPacked(msg, ty_src, field.ty);
@@ -32580,7 +32573,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
// The provided type is an integer type and we must construct the enum tag type here.
int_tag_ty = provided_ty;
if (int_tag_ty.zigTypeTag(mod) != .Int and int_tag_ty.zigTypeTag(mod) != .ComptimeInt) {
- return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(sema.mod)});
+ return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(mod)});
}
if (fields_len > 0) {
@@ -32590,7 +32583,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
const msg = try sema.errMsg(&block_scope, tag_ty_src, "specified integer tag type cannot represent every field", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(&block_scope, tag_ty_src, msg, "type '{}' cannot fit values in range 0...{d}", .{
- int_tag_ty.fmt(sema.mod),
+ int_tag_ty.fmt(mod),
fields_len - 1,
});
break :msg msg;
@@ -32605,7 +32598,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
union_obj.tag_ty = provided_ty;
const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.ip_index)) {
.enum_type => |x| x,
- else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(sema.mod)}),
+ else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(mod)}),
};
// The fields of the union must match the enum exactly.
// A flag per field is used to check for missing and extraneous fields.
@@ -32705,7 +32698,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
const field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i }).lazy;
const other_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = gop.index }).lazy;
const msg = msg: {
- const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{copied_val.fmtValue(int_tag_ty, sema.mod)});
+ const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{copied_val.fmtValue(int_tag_ty, mod)});
errdefer msg.destroy(gpa);
try sema.errNote(&block_scope, other_field_src, msg, "other occurrence here", .{});
break :msg msg;
@@ -32751,7 +32744,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
const prev_field_index = union_obj.fields.getIndex(field_name).?;
const prev_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = prev_field_index }).lazy;
- try sema.mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl, mod), msg, "other field here", .{});
+ try mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl, mod), msg, "other field here", .{});
try sema.errNote(&block_scope, src, msg, "union declared here", .{});
break :msg msg;
};
@@ -32766,7 +32759,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
.range = .type,
}).lazy;
const msg = try sema.errMsg(&block_scope, ty_src, "no field named '{s}' in enum '{}'", .{
- field_name, union_obj.tag_ty.fmt(sema.mod),
+ field_name, union_obj.tag_ty.fmt(mod),
});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, union_obj.tag_ty);
@@ -32800,7 +32793,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
.index = field_i,
.range = .type,
});
- const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
+ const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsNotExtern(msg, ty_src, field_ty, .union_field);
@@ -32815,7 +32808,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
.index = field_i,
.range = .type,
});
- const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
+ const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsNotPacked(msg, ty_src, field_ty);
@@ -33060,7 +33053,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.error_set,
.error_set_merged,
.error_union,
- .function,
.error_set_inferred,
.anyframe_T,
.pointer,
@@ -33087,7 +33079,12 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
return null;
}
},
- .ptr_type => null,
+
+ .ptr_type,
+ .error_union_type,
+ .func_type,
+ => null,
+
.array_type => |array_type| {
if (array_type.len == 0)
return Value.initTag(.empty_array);
@@ -33102,13 +33099,13 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
return null;
},
.opt_type => |child| {
- if (child.toType().isNoReturn()) {
- return Value.null;
+ if (child == .noreturn_type) {
+ return try mod.nullValue(ty);
} else {
return null;
}
},
- .error_union_type => null,
+
.simple_type => |t| switch (t) {
.f16,
.f32,
@@ -33674,15 +33671,13 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.error_set_merged,
=> false,
- .function => true,
-
.inferred_alloc_mut => unreachable,
.inferred_alloc_const => unreachable,
.pointer => {
const child_ty = ty.childType(mod);
if (child_ty.zigTypeTag(mod) == .Fn) {
- return child_ty.fnInfo().is_generic;
+ return mod.typeToFunc(child_ty).?.is_generic;
} else {
return sema.typeRequiresComptime(child_ty);
}
@@ -33703,7 +33698,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.ptr_type => |ptr_type| {
const child_ty = ptr_type.elem_type.toType();
if (child_ty.zigTypeTag(mod) == .Fn) {
- return child_ty.fnInfo().is_generic;
+ return mod.typeToFunc(child_ty).?.is_generic;
} else {
return sema.typeRequiresComptime(child_ty);
}
@@ -33714,6 +33709,8 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.error_union_type => |error_union_type| {
return sema.typeRequiresComptime(error_union_type.payload_type.toType());
},
+ .func_type => true,
+
.simple_type => |t| return switch (t) {
.f16,
.f32,
@@ -33870,7 +33867,8 @@ fn unionFieldAlignment(sema: *Sema, field: Module.Union.Field) !u32 {
/// Synchronize logic with `Type.isFnOrHasRuntimeBits`.
pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool {
- const fn_info = ty.fnInfo();
+ const mod = sema.mod;
+ const fn_info = mod.typeToFunc(ty).?;
if (fn_info.is_generic) return false;
if (fn_info.is_var_args) return true;
switch (fn_info.cc) {
@@ -33878,7 +33876,7 @@ pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool {
.Inline => return false,
else => {},
}
- if (try sema.typeRequiresComptime(fn_info.return_type)) {
+ if (try sema.typeRequiresComptime(fn_info.return_type.toType())) {
return false;
}
return true;
src/target.zig
@@ -649,3 +649,14 @@ pub fn compilerRtIntAbbrev(bits: u16) []const u8 {
else => "o", // Non-standard
};
}
+
+pub fn fnCallConvAllowsZigTypes(target: std.Target, cc: std.builtin.CallingConvention) bool {
+ return switch (cc) {
+ .Unspecified, .Async, .Inline => true,
+ // For now we want to authorize PTX kernel to use zig objects, even if
+ // we end up exposing the ABI. The goal is to experiment with more
+ // integrated CPU/GPU code.
+ .Kernel => target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64,
+ else => false,
+ };
+}
src/type.zig
@@ -42,8 +42,6 @@ pub const Type = struct {
.error_set_merged,
=> return .ErrorSet,
- .function => return .Fn,
-
.pointer,
.inferred_alloc_const,
.inferred_alloc_mut,
@@ -66,6 +64,7 @@ pub const Type = struct {
.union_type => return .Union,
.opaque_type => return .Opaque,
.enum_type => return .Enum,
+ .func_type => return .Fn,
.simple_type => |s| switch (s) {
.f16,
.f32,
@@ -344,53 +343,6 @@ pub const Type = struct {
return true;
},
- .function => {
- if (b.zigTypeTag(mod) != .Fn) return false;
-
- const a_info = a.fnInfo();
- const b_info = b.fnInfo();
-
- if (!a_info.return_type.isGenericPoison() and
- !b_info.return_type.isGenericPoison() and
- !eql(a_info.return_type, b_info.return_type, mod))
- return false;
-
- if (a_info.is_var_args != b_info.is_var_args)
- return false;
-
- if (a_info.is_generic != b_info.is_generic)
- return false;
-
- if (a_info.is_noinline != b_info.is_noinline)
- return false;
-
- if (a_info.noalias_bits != b_info.noalias_bits)
- return false;
-
- if (!a_info.cc_is_generic and a_info.cc != b_info.cc)
- return false;
-
- if (!a_info.align_is_generic and a_info.alignment != b_info.alignment)
- return false;
-
- if (a_info.param_types.len != b_info.param_types.len)
- return false;
-
- for (a_info.param_types, 0..) |a_param_ty, i| {
- const b_param_ty = b_info.param_types[i];
- if (a_info.comptime_params[i] != b_info.comptime_params[i])
- return false;
-
- if (a_param_ty.isGenericPoison()) continue;
- if (b_param_ty.isGenericPoison()) continue;
-
- if (!eql(a_param_ty, b_param_ty, mod))
- return false;
- }
-
- return true;
- },
-
.pointer,
.inferred_alloc_const,
.inferred_alloc_mut,
@@ -501,32 +453,6 @@ pub const Type = struct {
std.hash.autoHash(hasher, ies);
},
- .function => {
- std.hash.autoHash(hasher, std.builtin.TypeId.Fn);
-
- const fn_info = ty.fnInfo();
- if (!fn_info.return_type.isGenericPoison()) {
- hashWithHasher(fn_info.return_type, hasher, mod);
- }
- if (!fn_info.align_is_generic) {
- std.hash.autoHash(hasher, fn_info.alignment);
- }
- if (!fn_info.cc_is_generic) {
- std.hash.autoHash(hasher, fn_info.cc);
- }
- std.hash.autoHash(hasher, fn_info.is_var_args);
- std.hash.autoHash(hasher, fn_info.is_generic);
- std.hash.autoHash(hasher, fn_info.is_noinline);
- std.hash.autoHash(hasher, fn_info.noalias_bits);
-
- std.hash.autoHash(hasher, fn_info.param_types.len);
- for (fn_info.param_types, 0..) |param_ty, i| {
- std.hash.autoHash(hasher, fn_info.paramIsComptime(i));
- if (param_ty.isGenericPoison()) continue;
- hashWithHasher(param_ty, hasher, mod);
- }
- },
-
.pointer,
.inferred_alloc_const,
.inferred_alloc_mut,
@@ -631,30 +557,6 @@ pub const Type = struct {
};
},
- .function => {
- const payload = self.castTag(.function).?.data;
- const param_types = try allocator.alloc(Type, payload.param_types.len);
- for (payload.param_types, 0..) |param_ty, i| {
- param_types[i] = try param_ty.copy(allocator);
- }
- const other_comptime_params = payload.comptime_params[0..payload.param_types.len];
- const comptime_params = try allocator.dupe(bool, other_comptime_params);
- return Tag.function.create(allocator, .{
- .return_type = try payload.return_type.copy(allocator),
- .param_types = param_types,
- .cc = payload.cc,
- .alignment = payload.alignment,
- .is_var_args = payload.is_var_args,
- .is_generic = payload.is_generic,
- .is_noinline = payload.is_noinline,
- .comptime_params = comptime_params.ptr,
- .align_is_generic = payload.align_is_generic,
- .cc_is_generic = payload.cc_is_generic,
- .section_is_generic = payload.section_is_generic,
- .addrspace_is_generic = payload.addrspace_is_generic,
- .noalias_bits = payload.noalias_bits,
- });
- },
.pointer => {
const payload = self.castTag(.pointer).?.data;
const sent: ?Value = if (payload.sentinel) |some|
@@ -766,32 +668,6 @@ pub const Type = struct {
while (true) {
const t = ty.tag();
switch (t) {
- .function => {
- const payload = ty.castTag(.function).?.data;
- try writer.writeAll("fn(");
- for (payload.param_types, 0..) |param_type, i| {
- if (i != 0) try writer.writeAll(", ");
- try param_type.dump("", .{}, writer);
- }
- if (payload.is_var_args) {
- if (payload.param_types.len != 0) {
- try writer.writeAll(", ");
- }
- try writer.writeAll("...");
- }
- try writer.writeAll(") ");
- if (payload.alignment != 0) {
- try writer.print("align({d}) ", .{payload.alignment});
- }
- if (payload.cc != .Unspecified) {
- try writer.writeAll("callconv(.");
- try writer.writeAll(@tagName(payload.cc));
- try writer.writeAll(") ");
- }
- ty = payload.return_type;
- continue;
- },
-
.anyframe_T => {
const return_type = ty.castTag(.anyframe_T).?.data;
try writer.print("anyframe->", .{});
@@ -909,48 +785,6 @@ pub const Type = struct {
try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set");
},
- .function => {
- const fn_info = ty.fnInfo();
- if (fn_info.is_noinline) {
- try writer.writeAll("noinline ");
- }
- try writer.writeAll("fn(");
- for (fn_info.param_types, 0..) |param_ty, i| {
- if (i != 0) try writer.writeAll(", ");
- if (fn_info.paramIsComptime(i)) {
- try writer.writeAll("comptime ");
- }
- if (std.math.cast(u5, i)) |index| if (@truncate(u1, fn_info.noalias_bits >> index) != 0) {
- try writer.writeAll("noalias ");
- };
- if (param_ty.isGenericPoison()) {
- try writer.writeAll("anytype");
- } else {
- try print(param_ty, writer, mod);
- }
- }
- if (fn_info.is_var_args) {
- if (fn_info.param_types.len != 0) {
- try writer.writeAll(", ");
- }
- try writer.writeAll("...");
- }
- try writer.writeAll(") ");
- if (fn_info.alignment != 0) {
- try writer.print("align({d}) ", .{fn_info.alignment});
- }
- if (fn_info.cc != .Unspecified) {
- try writer.writeAll("callconv(.");
- try writer.writeAll(@tagName(fn_info.cc));
- try writer.writeAll(") ");
- }
- if (fn_info.return_type.isGenericPoison()) {
- try writer.writeAll("anytype");
- } else {
- try print(fn_info.return_type, writer, mod);
- }
- },
-
.error_union => {
const error_union = ty.castTag(.error_union).?.data;
try print(error_union.error_set, writer, mod);
@@ -1158,6 +992,48 @@ pub const Type = struct {
const decl = mod.declPtr(enum_type.decl);
try decl.renderFullyQualifiedName(mod, writer);
},
+ .func_type => |fn_info| {
+ if (fn_info.is_noinline) {
+ try writer.writeAll("noinline ");
+ }
+ try writer.writeAll("fn(");
+ for (fn_info.param_types, 0..) |param_ty, i| {
+ if (i != 0) try writer.writeAll(", ");
+ if (std.math.cast(u5, i)) |index| {
+ if (fn_info.paramIsComptime(index)) {
+ try writer.writeAll("comptime ");
+ }
+ if (fn_info.paramIsNoalias(index)) {
+ try writer.writeAll("noalias ");
+ }
+ }
+ if (param_ty == .generic_poison_type) {
+ try writer.writeAll("anytype");
+ } else {
+ try print(param_ty.toType(), writer, mod);
+ }
+ }
+ if (fn_info.is_var_args) {
+ if (fn_info.param_types.len != 0) {
+ try writer.writeAll(", ");
+ }
+ try writer.writeAll("...");
+ }
+ try writer.writeAll(") ");
+ if (fn_info.alignment != 0) {
+ try writer.print("align({d}) ", .{fn_info.alignment});
+ }
+ if (fn_info.cc != .Unspecified) {
+ try writer.writeAll("callconv(.");
+ try writer.writeAll(@tagName(fn_info.cc));
+ try writer.writeAll(") ");
+ }
+ if (fn_info.return_type == .generic_poison_type) {
+ try writer.writeAll("anytype");
+ } else {
+ try print(fn_info.return_type.toType(), writer, mod);
+ }
+ },
// values, not types
.undef => unreachable,
@@ -1174,6 +1050,11 @@ pub const Type = struct {
}
}
+ pub fn toIntern(ty: Type) InternPool.Index {
+ assert(ty.ip_index != .none);
+ return ty.ip_index;
+ }
+
pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value {
if (self.ip_index != .none) return self.ip_index.toValue();
switch (self.tag()) {
@@ -1223,7 +1104,7 @@ pub const Type = struct {
if (ignore_comptime_only) {
return true;
} else if (ty.childType(mod).zigTypeTag(mod) == .Fn) {
- return !ty.childType(mod).fnInfo().is_generic;
+ return !mod.typeToFunc(ty.childType(mod)).?.is_generic;
} else if (strat == .sema) {
return !(try strat.sema.typeRequiresComptime(ty));
} else {
@@ -1231,12 +1112,6 @@ pub const Type = struct {
}
},
- // These are false because they are comptime-only types.
- // These are function *bodies*, not pointers.
- // Special exceptions have to be made when emitting functions due to
- // this returning false.
- .function => return false,
-
.optional => {
const child_ty = ty.optionalChild(mod);
if (child_ty.isNoReturn()) {
@@ -1262,7 +1137,7 @@ pub const Type = struct {
// to comptime-only types do not, with the exception of function pointers.
if (ignore_comptime_only) return true;
const child_ty = ptr_type.elem_type.toType();
- if (child_ty.zigTypeTag(mod) == .Fn) return !child_ty.fnInfo().is_generic;
+ if (child_ty.zigTypeTag(mod) == .Fn) return !mod.typeToFunc(child_ty).?.is_generic;
if (strat == .sema) return !(try strat.sema.typeRequiresComptime(ty));
return !comptimeOnly(ty, mod);
},
@@ -1293,6 +1168,13 @@ pub const Type = struct {
}
},
.error_union_type => @panic("TODO"),
+
+ // These are function *bodies*, not pointers.
+ // They return false here because they are comptime-only types.
+ // Special exceptions have to be made when emitting functions due to
+ // this returning false.
+ .func_type => false,
+
.simple_type => |t| switch (t) {
.f16,
.f32,
@@ -1436,8 +1318,6 @@ pub const Type = struct {
.error_set_single,
.error_set_inferred,
.error_set_merged,
- // These are function bodies, not function pointers.
- .function,
.error_union,
.anyframe_T,
=> false,
@@ -1448,12 +1328,21 @@ pub const Type = struct {
.optional => ty.isPtrLikeOptional(mod),
},
else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .int_type => true,
- .ptr_type => true,
+ .int_type,
+ .ptr_type,
+ .vector_type,
+ => true,
+
+ .error_union_type,
+ .anon_struct_type,
+ .opaque_type,
+ // These are function bodies, not function pointers.
+ .func_type,
+ => false,
+
.array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod),
- .vector_type => true,
.opt_type => |child| child.toType().isPtrLikeOptional(mod),
- .error_union_type => false,
+
.simple_type => |t| switch (t) {
.f16,
.f32,
@@ -1509,12 +1398,10 @@ pub const Type = struct {
};
return struct_obj.layout != .Auto;
},
- .anon_struct_type => false,
.union_type => |union_type| switch (union_type.runtime_tag) {
.none, .safety => mod.unionPtr(union_type.index).layout != .Auto,
.tagged => false,
},
- .opaque_type => false,
.enum_type => |enum_type| switch (enum_type.tag_mode) {
.auto => false,
.explicit, .nonexhaustive => true,
@@ -1546,7 +1433,7 @@ pub const Type = struct {
pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool {
switch (ty.zigTypeTag(mod)) {
.Fn => {
- const fn_info = ty.fnInfo();
+ const fn_info = mod.typeToFunc(ty).?;
if (fn_info.is_generic) return false;
if (fn_info.is_var_args) return true;
switch (fn_info.cc) {
@@ -1555,7 +1442,7 @@ pub const Type = struct {
.Inline => return false,
else => {},
}
- if (fn_info.return_type.comptimeOnly(mod)) return false;
+ if (fn_info.return_type.toType().comptimeOnly(mod)) return false;
return true;
},
else => return ty.hasRuntimeBits(mod),
@@ -1707,13 +1594,6 @@ pub const Type = struct {
switch (ty.ip_index) {
.empty_struct_type => return AbiAlignmentAdvanced{ .scalar = 0 },
.none => switch (ty.tag()) {
- // represents machine code; not a pointer
- .function => {
- const alignment = ty.castTag(.function).?.data.alignment;
- if (alignment != 0) return AbiAlignmentAdvanced{ .scalar = alignment };
- return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) };
- },
-
.pointer,
.anyframe_T,
=> return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
@@ -1753,6 +1633,13 @@ pub const Type = struct {
.opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat),
.error_union_type => return abiAlignmentAdvancedErrorUnion(ty, mod, strat),
+ // represents machine code; not a pointer
+ .func_type => |func_type| {
+ const alignment = @intCast(u32, func_type.alignment);
+ if (alignment != 0) return AbiAlignmentAdvanced{ .scalar = alignment };
+ return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) };
+ },
+
.simple_type => |t| switch (t) {
.bool,
.atomic_order,
@@ -2086,7 +1973,6 @@ pub const Type = struct {
.empty_struct_type => return AbiSizeAdvanced{ .scalar = 0 },
.none => switch (ty.tag()) {
- .function => unreachable, // represents machine code; not a pointer
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
@@ -2187,6 +2073,7 @@ pub const Type = struct {
.opt_type => return ty.abiSizeAdvancedOptional(mod, strat),
.error_union_type => @panic("TODO"),
+ .func_type => unreachable, // represents machine code; not a pointer
.simple_type => |t| switch (t) {
.bool,
.atomic_order,
@@ -2408,7 +2295,6 @@ pub const Type = struct {
switch (ty.ip_index) {
.none => switch (ty.tag()) {
- .function => unreachable, // represents machine code; not a pointer
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
@@ -2453,6 +2339,7 @@ pub const Type = struct {
},
.opt_type => @panic("TODO"),
.error_union_type => @panic("TODO"),
+ .func_type => unreachable, // represents machine code; not a pointer
.simple_type => |t| switch (t) {
.f16 => return 16,
.f32 => return 32,
@@ -3271,6 +3158,7 @@ pub const Type = struct {
.opt_type => unreachable,
.error_union_type => unreachable,
+ .func_type => unreachable,
.simple_type => unreachable, // handled via Index enum tag above
.union_type => unreachable,
@@ -3356,54 +3244,22 @@ pub const Type = struct {
};
}
- /// Asserts the type is a function.
- pub fn fnParamLen(self: Type) usize {
- return self.castTag(.function).?.data.param_types.len;
- }
-
- /// Asserts the type is a function. The length of the slice must be at least the length
- /// given by `fnParamLen`.
- pub fn fnParamTypes(self: Type, types: []Type) void {
- const payload = self.castTag(.function).?.data;
- @memcpy(types[0..payload.param_types.len], payload.param_types);
- }
-
- /// Asserts the type is a function.
- pub fn fnParamType(self: Type, index: usize) Type {
- switch (self.tag()) {
- .function => {
- const payload = self.castTag(.function).?.data;
- return payload.param_types[index];
- },
-
- else => unreachable,
- }
- }
-
/// Asserts the type is a function or a function pointer.
- pub fn fnReturnType(ty: Type) Type {
- const fn_ty = switch (ty.tag()) {
- .pointer => ty.castTag(.pointer).?.data.pointee_type,
- .function => ty,
- else => unreachable,
- };
- return fn_ty.castTag(.function).?.data.return_type;
+ pub fn fnReturnType(ty: Type, mod: *Module) Type {
+ return fnReturnTypeIp(ty, mod.intern_pool);
}
- /// Asserts the type is a function.
- pub fn fnCallingConvention(self: Type) std.builtin.CallingConvention {
- return self.castTag(.function).?.data.cc;
+ pub fn fnReturnTypeIp(ty: Type, ip: InternPool) Type {
+ return switch (ip.indexToKey(ty.ip_index)) {
+ .ptr_type => |ptr_type| ip.indexToKey(ptr_type.elem_type).func_type.return_type,
+ .func_type => |func_type| func_type.return_type,
+ else => unreachable,
+ }.toType();
}
/// Asserts the type is a function.
- pub fn fnCallingConventionAllowsZigTypes(target: Target, cc: std.builtin.CallingConvention) bool {
- return switch (cc) {
- .Unspecified, .Async, .Inline => true,
- // For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI.
- // The goal is to experiment with more integrated CPU/GPU code.
- .Kernel => target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64,
- else => false,
- };
+ pub fn fnCallingConvention(ty: Type, mod: *Module) std.builtin.CallingConvention {
+ return mod.intern_pool.indexToKey(ty.ip_index).func_type.cc;
}
pub fn isValidParamType(self: Type, mod: *const Module) bool {
@@ -3421,12 +3277,8 @@ pub const Type = struct {
}
/// Asserts the type is a function.
- pub fn fnIsVarArgs(self: Type) bool {
- return self.castTag(.function).?.data.is_var_args;
- }
-
- pub fn fnInfo(ty: Type) Payload.Function.Data {
- return ty.castTag(.function).?.data;
+ pub fn fnIsVarArgs(ty: Type, mod: *Module) bool {
+ return mod.intern_pool.indexToKey(ty.ip_index).func_type.is_var_args;
}
pub fn isNumeric(ty: Type, mod: *const Module) bool {
@@ -3474,7 +3326,6 @@ pub const Type = struct {
.error_set_single,
.error_set,
.error_set_merged,
- .function,
.error_set_inferred,
.anyframe_T,
.pointer,
@@ -3500,7 +3351,12 @@ pub const Type = struct {
return null;
}
},
- .ptr_type => return null,
+
+ .ptr_type,
+ .error_union_type,
+ .func_type,
+ => return null,
+
.array_type => |array_type| {
if (array_type.len == 0)
return Value.initTag(.empty_array);
@@ -3514,13 +3370,13 @@ pub const Type = struct {
return null;
},
.opt_type => |child| {
- if (child.toType().isNoReturn()) {
- return Value.null;
+ if (child == .noreturn_type) {
+ return try mod.nullValue(ty);
} else {
return null;
}
},
- .error_union_type => return null,
+
.simple_type => |t| switch (t) {
.f16,
.f32,
@@ -3682,9 +3538,6 @@ pub const Type = struct {
.error_set_merged,
=> false,
- // These are function bodies, not function pointers.
- .function => true,
-
.inferred_alloc_mut => unreachable,
.inferred_alloc_const => unreachable,
@@ -3721,6 +3574,9 @@ pub const Type = struct {
.vector_type => |vector_type| vector_type.child.toType().comptimeOnly(mod),
.opt_type => |child| child.toType().comptimeOnly(mod),
.error_union_type => |error_union_type| error_union_type.payload_type.toType().comptimeOnly(mod),
+ // These are function bodies, not function pointers.
+ .func_type => true,
+
.simple_type => |t| switch (t) {
.f16,
.f32,
@@ -4367,6 +4223,10 @@ pub const Type = struct {
return ty.ip_index == .generic_poison_type;
}
+ pub fn isBoundFn(ty: Type) bool {
+ return ty.ip_index == .none and ty.tag() == .bound_fn;
+ }
+
/// This enum does not directly correspond to `std.builtin.TypeId` because
/// it has extra enum tags in it, as a way of using less memory. For example,
/// even though Zig recognizes `*align(10) i32` and `*i32` both as Pointer types
@@ -4383,7 +4243,6 @@ pub const Type = struct {
// After this, the tag requires a payload.
pointer,
- function,
optional,
error_union,
anyframe_T,
@@ -4411,7 +4270,6 @@ pub const Type = struct {
.error_set_merged => Payload.ErrorSetMerged,
.pointer => Payload.Pointer,
- .function => Payload.Function,
.error_union => Payload.ErrorUnion,
.error_set_single => Payload.Name,
};
@@ -4508,36 +4366,6 @@ pub const Type = struct {
data: u16,
};
- pub const Function = struct {
- pub const base_tag = Tag.function;
-
- base: Payload = Payload{ .tag = base_tag },
- data: Data,
-
- // TODO look into optimizing this memory to take fewer bytes
- pub const Data = struct {
- param_types: []Type,
- comptime_params: [*]bool,
- return_type: Type,
- /// If zero use default target function code alignment.
- alignment: u32,
- noalias_bits: u32,
- cc: std.builtin.CallingConvention,
- is_var_args: bool,
- is_generic: bool,
- is_noinline: bool,
- align_is_generic: bool,
- cc_is_generic: bool,
- section_is_generic: bool,
- addrspace_is_generic: bool,
-
- pub fn paramIsComptime(self: @This(), i: usize) bool {
- assert(i < self.param_types.len);
- return self.comptime_params[i];
- }
- };
- };
-
pub const ErrorSet = struct {
pub const base_tag = Tag.error_set;
src/value.zig
@@ -602,6 +602,11 @@ pub const Value = struct {
return result;
}
+ pub fn toIntern(val: Value) InternPool.Index {
+ assert(val.ip_index != .none);
+ return val.ip_index;
+ }
+
/// Asserts that the value is representable as a type.
pub fn toType(self: Value) Type {
if (self.ip_index != .none) return self.ip_index.toType();
src/Zir.zig
@@ -2052,7 +2052,6 @@ pub const Inst = struct {
/// and `[]Ref`.
pub const Ref = enum(u32) {
u1_type = @enumToInt(InternPool.Index.u1_type),
- u5_type = @enumToInt(InternPool.Index.u5_type),
u8_type = @enumToInt(InternPool.Index.u8_type),
i8_type = @enumToInt(InternPool.Index.i8_type),
u16_type = @enumToInt(InternPool.Index.u16_type),
@@ -2121,8 +2120,8 @@ pub const Inst = struct {
zero_u8 = @enumToInt(InternPool.Index.zero_u8),
one = @enumToInt(InternPool.Index.one),
one_usize = @enumToInt(InternPool.Index.one_usize),
- one_u5 = @enumToInt(InternPool.Index.one_u5),
- four_u5 = @enumToInt(InternPool.Index.four_u5),
+ one_u8 = @enumToInt(InternPool.Index.one_u8),
+ four_u8 = @enumToInt(InternPool.Index.four_u8),
negative_one = @enumToInt(InternPool.Index.negative_one),
calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c),
calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline),