Commit 525f341f33
Changed files (56)
lib
std
Thread
src
arch
aarch64
sparc64
link
lib/std/Thread/Pool.zig
@@ -9,17 +9,19 @@ run_queue: RunQueue = .{},
is_running: bool = true,
allocator: std.mem.Allocator,
threads: []std.Thread,
+ids: std.AutoArrayHashMapUnmanaged(std.Thread.Id, void),
const RunQueue = std.SinglyLinkedList(Runnable);
const Runnable = struct {
runFn: RunProto,
};
-const RunProto = *const fn (*Runnable) void;
+const RunProto = *const fn (*Runnable, id: ?usize) void;
pub const Options = struct {
allocator: std.mem.Allocator,
n_jobs: ?u32 = null,
+ track_ids: bool = false,
};
pub fn init(pool: *Pool, options: Options) !void {
@@ -28,6 +30,7 @@ pub fn init(pool: *Pool, options: Options) !void {
pool.* = .{
.allocator = allocator,
.threads = &[_]std.Thread{},
+ .ids = .{},
};
if (builtin.single_threaded) {
@@ -35,6 +38,10 @@ pub fn init(pool: *Pool, options: Options) !void {
}
const thread_count = options.n_jobs orelse @max(1, std.Thread.getCpuCount() catch 1);
+ if (options.track_ids) {
+ try pool.ids.ensureTotalCapacity(allocator, 1 + thread_count);
+ pool.ids.putAssumeCapacityNoClobber(std.Thread.getCurrentId(), {});
+ }
// kill and join any threads we spawned and free memory on error.
pool.threads = try allocator.alloc(std.Thread, thread_count);
@@ -49,6 +56,7 @@ pub fn init(pool: *Pool, options: Options) !void {
pub fn deinit(pool: *Pool) void {
pool.join(pool.threads.len); // kill and join all threads.
+ pool.ids.deinit(pool.allocator);
pool.* = undefined;
}
@@ -96,7 +104,7 @@ pub fn spawnWg(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args
run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } },
wait_group: *WaitGroup,
- fn runFn(runnable: *Runnable) void {
+ fn runFn(runnable: *Runnable, _: ?usize) void {
const run_node: *RunQueue.Node = @fieldParentPtr("data", runnable);
const closure: *@This() = @alignCast(@fieldParentPtr("run_node", run_node));
@call(.auto, func, closure.arguments);
@@ -134,6 +142,70 @@ pub fn spawnWg(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args
pool.cond.signal();
}
+/// Runs `func` in the thread pool, calling `WaitGroup.start` beforehand, and
+/// `WaitGroup.finish` after it returns.
+///
+/// The first argument passed to `func` is a dense `usize` thread id, the rest
+/// of the arguments are passed from `args`. Requires the pool to have been
+/// initialized with `.track_ids = true`.
+///
+/// In the case that queuing the function call fails to allocate memory, or the
+/// target is single-threaded, the function is called directly.
+pub fn spawnWgId(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args: anytype) void {
+ wait_group.start();
+
+ if (builtin.single_threaded) {
+ @call(.auto, func, .{0} ++ args);
+ wait_group.finish();
+ return;
+ }
+
+ const Args = @TypeOf(args);
+ const Closure = struct {
+ arguments: Args,
+ pool: *Pool,
+ run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } },
+ wait_group: *WaitGroup,
+
+ fn runFn(runnable: *Runnable, id: ?usize) void {
+ const run_node: *RunQueue.Node = @fieldParentPtr("data", runnable);
+ const closure: *@This() = @alignCast(@fieldParentPtr("run_node", run_node));
+ @call(.auto, func, .{id.?} ++ closure.arguments);
+ closure.wait_group.finish();
+
+ // The thread pool's allocator is protected by the mutex.
+ const mutex = &closure.pool.mutex;
+ mutex.lock();
+ defer mutex.unlock();
+
+ closure.pool.allocator.destroy(closure);
+ }
+ };
+
+ {
+ pool.mutex.lock();
+
+ const closure = pool.allocator.create(Closure) catch {
+ const id = pool.ids.getIndex(std.Thread.getCurrentId());
+ pool.mutex.unlock();
+ @call(.auto, func, .{id.?} ++ args);
+ wait_group.finish();
+ return;
+ };
+ closure.* = .{
+ .arguments = args,
+ .pool = pool,
+ .wait_group = wait_group,
+ };
+
+ pool.run_queue.prepend(&closure.run_node);
+ pool.mutex.unlock();
+ }
+
+ // Notify waiting threads outside the lock to try and keep the critical section small.
+ pool.cond.signal();
+}
+
pub fn spawn(pool: *Pool, comptime func: anytype, args: anytype) !void {
if (builtin.single_threaded) {
@call(.auto, func, args);
@@ -181,14 +253,16 @@ fn worker(pool: *Pool) void {
pool.mutex.lock();
defer pool.mutex.unlock();
+ const id = if (pool.ids.count() > 0) pool.ids.count() else null;
+ if (id) |_| pool.ids.putAssumeCapacityNoClobber(std.Thread.getCurrentId(), {});
+
while (true) {
while (pool.run_queue.popFirst()) |run_node| {
// Temporarily unlock the mutex in order to execute the run_node
pool.mutex.unlock();
defer pool.mutex.lock();
- const runFn = run_node.data.runFn;
- runFn(&run_node.data);
+ run_node.data.runFn(&run_node.data, id);
}
// Stop executing instead of waiting if the thread pool is no longer running.
@@ -201,16 +275,18 @@ fn worker(pool: *Pool) void {
}
pub fn waitAndWork(pool: *Pool, wait_group: *WaitGroup) void {
+ var id: ?usize = null;
+
while (!wait_group.isDone()) {
- if (blk: {
- pool.mutex.lock();
- defer pool.mutex.unlock();
- break :blk pool.run_queue.popFirst();
- }) |run_node| {
- run_node.data.runFn(&run_node.data);
+ pool.mutex.lock();
+ if (pool.run_queue.popFirst()) |run_node| {
+ id = id orelse pool.ids.getIndex(std.Thread.getCurrentId());
+ pool.mutex.unlock();
+ run_node.data.runFn(&run_node.data, id);
continue;
}
+ pool.mutex.unlock();
wait_group.wait();
return;
}
src/arch/aarch64/abi.zig
@@ -5,8 +5,6 @@ const Register = bits.Register;
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
const Type = @import("../../Type.zig");
const Zcu = @import("../../Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
pub const Class = union(enum) {
memory,
@@ -17,44 +15,44 @@ pub const Class = union(enum) {
};
/// For `float_array` the second element will be the amount of floats.
-pub fn classifyType(ty: Type, mod: *Module) Class {
- std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod));
+pub fn classifyType(ty: Type, pt: Zcu.PerThread) Class {
+ std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(pt));
var maybe_float_bits: ?u16 = null;
- switch (ty.zigTypeTag(mod)) {
+ switch (ty.zigTypeTag(pt.zcu)) {
.Struct => {
- if (ty.containerLayout(mod) == .@"packed") return .byval;
- const float_count = countFloats(ty, mod, &maybe_float_bits);
+ if (ty.containerLayout(pt.zcu) == .@"packed") return .byval;
+ const float_count = countFloats(ty, pt.zcu, &maybe_float_bits);
if (float_count <= sret_float_count) return .{ .float_array = float_count };
- const bit_size = ty.bitSize(mod);
+ const bit_size = ty.bitSize(pt);
if (bit_size > 128) return .memory;
if (bit_size > 64) return .double_integer;
return .integer;
},
.Union => {
- if (ty.containerLayout(mod) == .@"packed") return .byval;
- const float_count = countFloats(ty, mod, &maybe_float_bits);
+ if (ty.containerLayout(pt.zcu) == .@"packed") return .byval;
+ const float_count = countFloats(ty, pt.zcu, &maybe_float_bits);
if (float_count <= sret_float_count) return .{ .float_array = float_count };
- const bit_size = ty.bitSize(mod);
+ const bit_size = ty.bitSize(pt);
if (bit_size > 128) return .memory;
if (bit_size > 64) return .double_integer;
return .integer;
},
.Int, .Enum, .ErrorSet, .Float, .Bool => return .byval,
.Vector => {
- const bit_size = ty.bitSize(mod);
+ const bit_size = ty.bitSize(pt);
// TODO is this controlled by a cpu feature?
if (bit_size > 128) return .memory;
return .byval;
},
.Optional => {
- std.debug.assert(ty.isPtrLikeOptional(mod));
+ std.debug.assert(ty.isPtrLikeOptional(pt.zcu));
return .byval;
},
.Pointer => {
- std.debug.assert(!ty.isSlice(mod));
+ std.debug.assert(!ty.isSlice(pt.zcu));
return .byval;
},
.ErrorUnion,
@@ -76,16 +74,16 @@ pub fn classifyType(ty: Type, mod: *Module) Class {
}
const sret_float_count = 4;
-fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 {
- const ip = &mod.intern_pool;
- const target = mod.getTarget();
+fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u8 {
+ const ip = &zcu.intern_pool;
+ const target = zcu.getTarget();
const invalid = std.math.maxInt(u8);
- switch (ty.zigTypeTag(mod)) {
+ switch (ty.zigTypeTag(zcu)) {
.Union => {
- const union_obj = mod.typeToUnion(ty).?;
+ const union_obj = zcu.typeToUnion(ty).?;
var max_count: u8 = 0;
for (union_obj.field_types.get(ip)) |field_ty| {
- const field_count = countFloats(Type.fromInterned(field_ty), mod, maybe_float_bits);
+ const field_count = countFloats(Type.fromInterned(field_ty), zcu, maybe_float_bits);
if (field_count == invalid) return invalid;
if (field_count > max_count) max_count = field_count;
if (max_count > sret_float_count) return invalid;
@@ -93,12 +91,12 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 {
return max_count;
},
.Struct => {
- const fields_len = ty.structFieldCount(mod);
+ const fields_len = ty.structFieldCount(zcu);
var count: u8 = 0;
var i: u32 = 0;
while (i < fields_len) : (i += 1) {
- const field_ty = ty.structFieldType(i, mod);
- const field_count = countFloats(field_ty, mod, maybe_float_bits);
+ const field_ty = ty.structFieldType(i, zcu);
+ const field_count = countFloats(field_ty, zcu, maybe_float_bits);
if (field_count == invalid) return invalid;
count += field_count;
if (count > sret_float_count) return invalid;
@@ -118,22 +116,22 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 {
}
}
-pub fn getFloatArrayType(ty: Type, mod: *Module) ?Type {
- const ip = &mod.intern_pool;
- switch (ty.zigTypeTag(mod)) {
+pub fn getFloatArrayType(ty: Type, zcu: *Zcu) ?Type {
+ const ip = &zcu.intern_pool;
+ switch (ty.zigTypeTag(zcu)) {
.Union => {
- const union_obj = mod.typeToUnion(ty).?;
+ const union_obj = zcu.typeToUnion(ty).?;
for (union_obj.field_types.get(ip)) |field_ty| {
- if (getFloatArrayType(Type.fromInterned(field_ty), mod)) |some| return some;
+ if (getFloatArrayType(Type.fromInterned(field_ty), zcu)) |some| return some;
}
return null;
},
.Struct => {
- const fields_len = ty.structFieldCount(mod);
+ const fields_len = ty.structFieldCount(zcu);
var i: u32 = 0;
while (i < fields_len) : (i += 1) {
- const field_ty = ty.structFieldType(i, mod);
- if (getFloatArrayType(field_ty, mod)) |some| return some;
+ const field_ty = ty.structFieldType(i, zcu);
+ if (getFloatArrayType(field_ty, zcu)) |some| return some;
}
return null;
},
src/arch/aarch64/CodeGen.zig
@@ -12,11 +12,9 @@ const Type = @import("../../Type.zig");
const Value = @import("../../Value.zig");
const link = @import("../../link.zig");
const Zcu = @import("../../Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
const InternPool = @import("../../InternPool.zig");
const Compilation = @import("../../Compilation.zig");
-const ErrorMsg = Module.ErrorMsg;
+const ErrorMsg = Zcu.ErrorMsg;
const Target = std.Target;
const Allocator = mem.Allocator;
const trace = @import("../../tracy.zig").trace;
@@ -47,6 +45,7 @@ const gp = abi.RegisterClass.gp;
const InnerError = CodeGenError || error{OutOfRegisters};
gpa: Allocator,
+pt: Zcu.PerThread,
air: Air,
liveness: Liveness,
bin_file: *link.File,
@@ -59,7 +58,7 @@ args: []MCValue,
ret_mcv: MCValue,
fn_type: Type,
arg_index: u32,
-src_loc: Module.LazySrcLoc,
+src_loc: Zcu.LazySrcLoc,
stack_align: u32,
/// MIR Instructions
@@ -331,15 +330,16 @@ const Self = @This();
pub fn generate(
lf: *link.File,
- src_loc: Module.LazySrcLoc,
+ pt: Zcu.PerThread,
+ src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
) CodeGenError!Result {
- const gpa = lf.comp.gpa;
- const zcu = lf.comp.module.?;
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
const fn_owner_decl = zcu.declPtr(func.owner_decl);
assert(fn_owner_decl.has_tv);
@@ -355,8 +355,9 @@ pub fn generate(
}
try branch_stack.append(.{});
- var function = Self{
+ var function: Self = .{
.gpa = gpa,
+ .pt = pt,
.air = air,
.liveness = liveness,
.debug_output = debug_output,
@@ -476,7 +477,8 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
}
fn gen(self: *Self) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const cc = self.fn_type.fnCallingConvention(mod);
if (cc != .Naked) {
// stp fp, lr, [sp, #-16]!
@@ -526,8 +528,8 @@ fn gen(self: *Self) !void {
const ty = self.typeOfIndex(inst);
- const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
- const abi_align = ty.abiAlignment(mod);
+ const abi_size = @as(u32, @intCast(ty.abiSize(pt)));
+ const abi_align = ty.abiAlignment(pt);
const stack_offset = try self.allocMem(abi_size, abi_align, inst);
try self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
@@ -656,7 +658,8 @@ fn gen(self: *Self) !void {
}
fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const air_tags = self.air.instructions.items(.tag);
@@ -1022,31 +1025,32 @@ fn allocMem(
/// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const elem_ty = self.typeOfIndex(inst).childType(mod);
- if (!elem_ty.hasRuntimeBits(mod)) {
+ if (!elem_ty.hasRuntimeBits(pt)) {
// return the stack offset 0. Stack offset 0 will be where all
// zero-sized stack allocations live as non-zero-sized
// allocations will always have an offset > 0.
return @as(u32, 0);
}
- const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse {
- return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
+ const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse {
+ return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
};
// TODO swap this for inst.ty.ptrAlign
- const abi_align = elem_ty.abiAlignment(mod);
+ const abi_align = elem_ty.abiAlignment(pt);
return self.allocMem(abi_size, abi_align, inst);
}
fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue {
- const mod = self.bin_file.comp.module.?;
- const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse {
- return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
+ const pt = self.pt;
+ const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse {
+ return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
};
- const abi_align = elem_ty.abiAlignment(mod);
+ const abi_align = elem_ty.abiAlignment(pt);
if (reg_ok) {
// Make sure the type can fit in a register before we try to allocate one.
@@ -1133,14 +1137,15 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) !void {
}
fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const result: MCValue = switch (self.ret_mcv) {
.none, .register => .{ .ptr_stack_offset = try self.allocMemPtr(inst) },
.stack_offset => blk: {
// self.ret_mcv is an address to where this function
// should store its result into
const ret_ty = self.fn_type.fnReturnType(mod);
- const ptr_ty = try mod.singleMutPtrType(ret_ty);
+ const ptr_ty = try pt.singleMutPtrType(ret_ty);
// addr_reg will contain the address of where to store the
// result into
@@ -1170,7 +1175,8 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const operand = ty_op.operand;
const operand_mcv = try self.resolveInst(operand);
const operand_ty = self.typeOf(operand);
@@ -1251,7 +1257,8 @@ fn trunc(
operand_ty: Type,
dest_ty: Type,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const info_a = operand_ty.intInfo(mod);
const info_b = dest_ty.intInfo(mod);
@@ -1314,7 +1321,8 @@ fn airIntFromBool(self: *Self, inst: Air.Inst.Index) !void {
fn airNot(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
@@ -1409,7 +1417,8 @@ fn minMax(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO ARM min/max on floats", .{}),
.Vector => return self.fail("TODO ARM min/max on vectors", .{}),
@@ -1899,7 +1908,8 @@ fn addSub(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO binary operations on floats", .{}),
.Vector => return self.fail("TODO binary operations on vectors", .{}),
@@ -1960,7 +1970,8 @@ fn mul(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
@@ -1992,7 +2003,8 @@ fn divFloat(
_ = rhs_ty;
_ = maybe_inst;
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO div_float", .{}),
.Vector => return self.fail("TODO div_float on vectors", .{}),
@@ -2008,7 +2020,8 @@ fn divTrunc(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO div on floats", .{}),
.Vector => return self.fail("TODO div on vectors", .{}),
@@ -2042,7 +2055,8 @@ fn divFloor(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO div on floats", .{}),
.Vector => return self.fail("TODO div on vectors", .{}),
@@ -2075,7 +2089,8 @@ fn divExact(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO div on floats", .{}),
.Vector => return self.fail("TODO div on vectors", .{}),
@@ -2111,7 +2126,8 @@ fn rem(
) InnerError!MCValue {
_ = maybe_inst;
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO rem/mod on floats", .{}),
.Vector => return self.fail("TODO rem/mod on vectors", .{}),
@@ -2182,7 +2198,8 @@ fn modulo(
_ = rhs_ty;
_ = maybe_inst;
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO mod on floats", .{}),
.Vector => return self.fail("TODO mod on vectors", .{}),
@@ -2200,7 +2217,8 @@ fn wrappingArithmetic(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
@@ -2235,7 +2253,8 @@ fn bitwise(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
@@ -2270,7 +2289,8 @@ fn shiftExact(
) InnerError!MCValue {
_ = rhs_ty;
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
@@ -2320,7 +2340,8 @@ fn shiftNormal(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
@@ -2360,7 +2381,8 @@ fn booleanOp(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Bool => {
assert((try lhs_bind.resolveToImmediate(self)) == null); // should have been handled by Sema
@@ -2387,7 +2409,8 @@ fn ptrArithmetic(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Pointer => {
assert(rhs_ty.eql(Type.usize, mod));
@@ -2397,7 +2420,7 @@ fn ptrArithmetic(
.One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type
else => ptr_ty.childType(mod),
};
- const elem_size = elem_ty.abiSize(mod);
+ const elem_size = elem_ty.abiSize(pt);
const base_tag: Air.Inst.Tag = switch (tag) {
.ptr_add => .add,
@@ -2510,7 +2533,8 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs };
const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs };
@@ -2518,9 +2542,9 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.typeOf(extra.rhs);
const tuple_ty = self.typeOfIndex(inst);
- const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod)));
- const tuple_align = tuple_ty.abiAlignment(mod);
- const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod)));
+ const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(pt)));
+ const tuple_align = tuple_ty.abiAlignment(pt);
+ const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, pt)));
switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
@@ -2638,7 +2662,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const result: MCValue = result: {
const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs };
const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs };
@@ -2646,9 +2671,9 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.typeOf(extra.rhs);
const tuple_ty = self.typeOfIndex(inst);
- const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod)));
- const tuple_align = tuple_ty.abiAlignment(mod);
- const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod)));
+ const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(pt)));
+ const tuple_align = tuple_ty.abiAlignment(pt);
+ const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, pt)));
switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
@@ -2862,7 +2887,8 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const result: MCValue = result: {
const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs };
const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs };
@@ -2870,9 +2896,9 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.typeOf(extra.rhs);
const tuple_ty = self.typeOfIndex(inst);
- const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod)));
- const tuple_align = tuple_ty.abiAlignment(mod);
- const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod)));
+ const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(pt)));
+ const tuple_align = tuple_ty.abiAlignment(pt);
+ const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, pt)));
switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}),
@@ -3010,9 +3036,10 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void {
}
fn optionalPayload(self: *Self, inst: Air.Inst.Index, mcv: MCValue, optional_ty: Type) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const payload_ty = optional_ty.optionalChild(mod);
- if (!payload_ty.hasRuntimeBits(mod)) return MCValue.none;
+ if (!payload_ty.hasRuntimeBits(pt)) return MCValue.none;
if (optional_ty.isPtrLikeOptional(mod)) {
// TODO should we reuse the operand here?
const raw_reg = try self.register_manager.allocReg(inst, gp);
@@ -3054,17 +3081,18 @@ fn errUnionErr(
error_union_ty: Type,
maybe_inst: ?Air.Inst.Index,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const err_ty = error_union_ty.errorUnionSet(mod);
const payload_ty = error_union_ty.errorUnionPayload(mod);
if (err_ty.errorSetIsEmpty(mod)) {
return MCValue{ .immediate = 0 };
}
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
return try error_union_bind.resolveToMcv(self);
}
- const err_offset = @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod)));
+ const err_offset: u32 = @intCast(errUnionErrorOffset(payload_ty, pt));
switch (try error_union_bind.resolveToMcv(self)) {
.register => {
var operand_reg: Register = undefined;
@@ -3086,7 +3114,7 @@ fn errUnionErr(
);
const err_bit_offset = err_offset * 8;
- const err_bit_size = @as(u32, @intCast(err_ty.abiSize(mod))) * 8;
+ const err_bit_size = @as(u32, @intCast(err_ty.abiSize(pt))) * 8;
_ = try self.addInst(.{
.tag = .ubfx, // errors are unsigned integers
@@ -3134,17 +3162,18 @@ fn errUnionPayload(
error_union_ty: Type,
maybe_inst: ?Air.Inst.Index,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const err_ty = error_union_ty.errorUnionSet(mod);
const payload_ty = error_union_ty.errorUnionPayload(mod);
if (err_ty.errorSetIsEmpty(mod)) {
return try error_union_bind.resolveToMcv(self);
}
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
return MCValue.none;
}
- const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod)));
+ const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, pt)));
switch (try error_union_bind.resolveToMcv(self)) {
.register => {
var operand_reg: Register = undefined;
@@ -3166,7 +3195,7 @@ fn errUnionPayload(
);
const payload_bit_offset = payload_offset * 8;
- const payload_bit_size = @as(u32, @intCast(payload_ty.abiSize(mod))) * 8;
+ const payload_bit_size = @as(u32, @intCast(payload_ty.abiSize(pt))) * 8;
_ = try self.addInst(.{
.tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx,
@@ -3246,7 +3275,8 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void {
}
fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
if (self.liveness.isUnused(inst)) {
@@ -3255,7 +3285,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = result: {
const payload_ty = self.typeOf(ty_op.operand);
- if (!payload_ty.hasRuntimeBits(mod)) {
+ if (!payload_ty.hasRuntimeBits(pt)) {
break :result MCValue{ .immediate = 1 };
}
@@ -3275,9 +3305,9 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
break :result MCValue{ .register = reg };
}
- const optional_abi_size: u32 = @intCast(optional_ty.abiSize(mod));
- const optional_abi_align = optional_ty.abiAlignment(mod);
- const offset: u32 = @intCast(payload_ty.abiSize(mod));
+ const optional_abi_size: u32 = @intCast(optional_ty.abiSize(pt));
+ const optional_abi_align = optional_ty.abiAlignment(pt);
+ const offset: u32 = @intCast(payload_ty.abiSize(pt));
const stack_offset = try self.allocMem(optional_abi_size, optional_abi_align, inst);
try self.genSetStack(payload_ty, stack_offset, operand);
@@ -3291,20 +3321,21 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
/// T to E!T
fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_ty = ty_op.ty.toType();
const error_ty = error_union_ty.errorUnionSet(mod);
const payload_ty = error_union_ty.errorUnionPayload(mod);
const operand = try self.resolveInst(ty_op.operand);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result operand;
- const abi_size = @as(u32, @intCast(error_union_ty.abiSize(mod)));
- const abi_align = error_union_ty.abiAlignment(mod);
+ const abi_size = @as(u32, @intCast(error_union_ty.abiSize(pt)));
+ const abi_align = error_union_ty.abiAlignment(pt);
const stack_offset = try self.allocMem(abi_size, abi_align, inst);
- const payload_off = errUnionPayloadOffset(payload_ty, mod);
- const err_off = errUnionErrorOffset(payload_ty, mod);
+ const payload_off = errUnionPayloadOffset(payload_ty, pt);
+ const err_off = errUnionErrorOffset(payload_ty, pt);
try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), operand);
try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), .{ .immediate = 0 });
@@ -3317,18 +3348,19 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const error_union_ty = ty_op.ty.toType();
const error_ty = error_union_ty.errorUnionSet(mod);
const payload_ty = error_union_ty.errorUnionPayload(mod);
const operand = try self.resolveInst(ty_op.operand);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result operand;
- const abi_size = @as(u32, @intCast(error_union_ty.abiSize(mod)));
- const abi_align = error_union_ty.abiAlignment(mod);
+ const abi_size = @as(u32, @intCast(error_union_ty.abiSize(pt)));
+ const abi_align = error_union_ty.abiAlignment(pt);
const stack_offset = try self.allocMem(abi_size, abi_align, inst);
- const payload_off = errUnionPayloadOffset(payload_ty, mod);
- const err_off = errUnionErrorOffset(payload_ty, mod);
+ const payload_off = errUnionPayloadOffset(payload_ty, pt);
+ const err_off = errUnionErrorOffset(payload_ty, pt);
try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), operand);
try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), .undef);
@@ -3420,7 +3452,8 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const slice_ty = self.typeOf(bin_op.lhs);
const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: {
@@ -3444,9 +3477,10 @@ fn ptrElemVal(
ptr_ty: Type,
maybe_inst: ?Air.Inst.Index,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const elem_ty = ptr_ty.childType(mod);
- const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
+ const elem_size = @as(u32, @intCast(elem_ty.abiSize(pt)));
// TODO optimize for elem_sizes of 1, 2, 4, 8
switch (elem_size) {
@@ -3486,7 +3520,8 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
}
fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = self.typeOf(bin_op.lhs);
const result: MCValue = if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: {
@@ -3609,9 +3644,10 @@ fn reuseOperand(
}
fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const elem_ty = ptr_ty.childType(mod);
- const elem_size = elem_ty.abiSize(mod);
+ const elem_size = elem_ty.abiSize(pt);
switch (ptr) {
.none => unreachable,
@@ -3857,12 +3893,13 @@ fn genInlineMemsetCode(
}
fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const elem_ty = self.typeOfIndex(inst);
- const elem_size = elem_ty.abiSize(mod);
+ const elem_size = elem_ty.abiSize(pt);
const result: MCValue = result: {
- if (!elem_ty.hasRuntimeBits(mod))
+ if (!elem_ty.hasRuntimeBits(pt))
break :result MCValue.none;
const ptr = try self.resolveInst(ty_op.operand);
@@ -3888,8 +3925,9 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
}
fn genLdrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type) !void {
- const mod = self.bin_file.comp.module.?;
- const abi_size = ty.abiSize(mod);
+ const pt = self.pt;
+ const mod = pt.zcu;
+ const abi_size = ty.abiSize(pt);
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_immediate else .ldrb_immediate,
@@ -3911,8 +3949,8 @@ fn genLdrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type
}
fn genStrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type) !void {
- const mod = self.bin_file.comp.module.?;
- const abi_size = ty.abiSize(mod);
+ const pt = self.pt;
+ const abi_size = ty.abiSize(pt);
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .strb_immediate,
@@ -3933,9 +3971,9 @@ fn genStrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type
}
fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
log.debug("store: storing {} to {}", .{ value, ptr });
- const abi_size = value_ty.abiSize(mod);
+ const abi_size = value_ty.abiSize(pt);
switch (ptr) {
.none => unreachable,
@@ -4087,11 +4125,12 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
return if (self.liveness.isUnused(inst)) .dead else result: {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const mcv = try self.resolveInst(operand);
const ptr_ty = self.typeOf(operand);
const struct_ty = ptr_ty.childType(mod);
- const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod)));
+ const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, pt)));
switch (mcv) {
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };
@@ -4112,11 +4151,12 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const operand = extra.struct_operand;
const index = extra.field_index;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const mcv = try self.resolveInst(operand);
const struct_ty = self.typeOf(operand);
const struct_field_ty = struct_ty.structFieldType(index, mod);
- const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod)));
+ const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, pt)));
switch (mcv) {
.dead, .unreach => unreachable,
@@ -4162,13 +4202,14 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
}
fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const field_ptr = try self.resolveInst(extra.field_ptr);
const struct_ty = ty_pl.ty.toType().childType(mod);
- const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(extra.field_index, mod)));
+ const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(extra.field_index, pt)));
switch (field_ptr) {
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off + struct_field_offset };
@@ -4190,7 +4231,8 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
while (self.args[arg_index] == .none) arg_index += 1;
self.arg_index = arg_index + 1;
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty = self.typeOfIndex(inst);
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
const src_index = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.src_index;
@@ -4245,7 +4287,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]));
const ty = self.typeOf(callee);
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const fn_ty = switch (ty.zigTypeTag(mod)) {
.Fn => ty,
@@ -4269,13 +4312,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (info.return_value == .stack_offset) {
log.debug("airCall: return by reference", .{});
const ret_ty = fn_ty.fnReturnType(mod);
- const ret_abi_size: u32 = @intCast(ret_ty.abiSize(mod));
- const ret_abi_align = ret_ty.abiAlignment(mod);
+ const ret_abi_size: u32 = @intCast(ret_ty.abiSize(pt));
+ const ret_abi_align = ret_ty.abiAlignment(pt);
const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
const ret_ptr_reg = self.registerAlias(.x0, Type.usize);
- const ptr_ty = try mod.singleMutPtrType(ret_ty);
+ const ptr_ty = try pt.singleMutPtrType(ret_ty);
try self.register_manager.getReg(ret_ptr_reg, null);
try self.genSetReg(ptr_ty, ret_ptr_reg, .{ .ptr_stack_offset = stack_offset });
@@ -4308,7 +4351,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Due to incremental compilation, how function calls are generated depends
// on linking.
- if (try self.air.value(callee, mod)) |func_value| {
+ if (try self.air.value(callee, pt)) |func_value| {
if (func_value.getFunction(mod)) |func| {
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const sym_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, func.owner_decl);
@@ -4421,7 +4464,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
}
fn airRet(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.resolveInst(un_op);
const ret_ty = self.fn_type.fnReturnType(mod);
@@ -4440,7 +4484,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
//
// self.ret_mcv is an address to where this function
// should store its result into
- const ptr_ty = try mod.singleMutPtrType(ret_ty);
+ const ptr_ty = try pt.singleMutPtrType(ret_ty);
try self.store(self.ret_mcv, operand, ptr_ty, ret_ty);
},
else => unreachable,
@@ -4453,7 +4497,8 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
}
fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const ptr = try self.resolveInst(un_op);
const ptr_ty = self.typeOf(un_op);
@@ -4477,8 +4522,8 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
// location.
const op_inst = un_op.toIndex().?;
if (self.air.instructions.items(.tag)[@intFromEnum(op_inst)] != .ret_ptr) {
- const abi_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
- const abi_align = ret_ty.abiAlignment(mod);
+ const abi_size = @as(u32, @intCast(ret_ty.abiSize(pt)));
+ const abi_align = ret_ty.abiAlignment(pt);
const offset = try self.allocMem(abi_size, abi_align, null);
@@ -4513,11 +4558,12 @@ fn cmp(
lhs_ty: Type,
op: math.CompareOperator,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const int_ty = switch (lhs_ty.zigTypeTag(mod)) {
.Optional => blk: {
const payload_ty = lhs_ty.optionalChild(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
break :blk Type.u1;
} else if (lhs_ty.isPtrLikeOptional(mod)) {
break :blk Type.usize;
@@ -4620,7 +4666,8 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
}
fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
const func = mod.funcInfo(extra.data.func);
@@ -4825,13 +4872,14 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
}
fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional(mod)) blk: {
const payload_ty = operand_ty.optionalChild(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod))
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt))
break :blk .{ .ty = operand_ty, .bind = operand_bind };
- const offset = @as(u32, @intCast(payload_ty.abiSize(mod)));
+ const offset = @as(u32, @intCast(payload_ty.abiSize(pt)));
const operand_mcv = try operand_bind.resolveToMcv(self);
const new_mcv: MCValue = switch (operand_mcv) {
.register => |source_reg| new: {
@@ -4881,7 +4929,8 @@ fn isErr(
error_union_bind: ReadArg.Bind,
error_union_ty: Type,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const error_type = error_union_ty.errorUnionSet(mod);
if (error_type.errorSetIsEmpty(mod)) {
@@ -4923,7 +4972,8 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
@@ -4950,7 +5000,8 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
@@ -4977,7 +5028,8 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
@@ -5004,7 +5056,8 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
@@ -5225,10 +5278,10 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void {
}
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
const block_data = self.blocks.getPtr(block).?;
- if (self.typeOf(operand).hasRuntimeBits(mod)) {
+ if (self.typeOf(operand).hasRuntimeBits(pt)) {
const operand_mcv = try self.resolveInst(operand);
const block_mcv = block_data.mcv;
if (block_mcv == .none) {
@@ -5402,8 +5455,9 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void {
}
fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
- const mod = self.bin_file.comp.module.?;
- const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
+ const pt = self.pt;
+ const mod = pt.zcu;
+ const abi_size = @as(u32, @intCast(ty.abiSize(pt)));
switch (mcv) {
.dead => unreachable,
.unreach, .none => return, // Nothing to do.
@@ -5462,7 +5516,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg });
const overflow_bit_ty = ty.structFieldType(1, mod);
- const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, mod)));
+ const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, pt)));
const raw_cond_reg = try self.register_manager.allocReg(null, gp);
const cond_reg = self.registerAlias(raw_cond_reg, overflow_bit_ty);
@@ -5495,7 +5549,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
} else {
- const ptr_ty = try mod.singleMutPtrType(ty);
+ const ptr_ty = try pt.singleMutPtrType(ty);
// TODO call extern memcpy
const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp);
@@ -5573,7 +5627,8 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
}
fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (mcv) {
.dead => unreachable,
.unreach, .none => return, // Nothing to do.
@@ -5685,7 +5740,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
try self.genLdrRegister(reg, reg.toX(), ty);
},
.stack_offset => |off| {
- const abi_size = ty.abiSize(mod);
+ const abi_size = ty.abiSize(pt);
switch (abi_size) {
1, 2, 4, 8 => {
@@ -5709,7 +5764,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
}
},
.stack_argument_offset => |off| {
- const abi_size = ty.abiSize(mod);
+ const abi_size = ty.abiSize(pt);
switch (abi_size) {
1, 2, 4, 8 => {
@@ -5736,8 +5791,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
}
fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
- const mod = self.bin_file.comp.module.?;
- const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
+ const pt = self.pt;
+ const abi_size = @as(u32, @intCast(ty.abiSize(pt)));
switch (mcv) {
.dead => unreachable,
.none, .unreach => return,
@@ -5745,7 +5800,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
if (!self.wantSafety())
return; // The already existing value will do just fine.
// TODO Upgrade this to a memset call when we have that available.
- switch (ty.abiSize(mod)) {
+ switch (ty.abiSize(pt)) {
1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }),
2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }),
4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
@@ -5815,7 +5870,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg });
} else {
- const ptr_ty = try mod.singleMutPtrType(ty);
+ const ptr_ty = try pt.singleMutPtrType(ty);
// TODO call extern memcpy
const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp);
@@ -5936,7 +5991,8 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
}
fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_ty = self.typeOf(ty_op.operand);
@@ -6056,7 +6112,8 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
}
fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const vector_ty = self.typeOfIndex(inst);
const len = vector_ty.vectorLen(mod);
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@@ -6100,15 +6157,15 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
}
fn airTry(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Try, pl_op.payload);
const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]);
const result: MCValue = result: {
const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand };
const error_union_ty = self.typeOf(pl_op.operand);
- const error_union_size = @as(u32, @intCast(error_union_ty.abiSize(mod)));
- const error_union_align = error_union_ty.abiAlignment(mod);
+ const error_union_size = @as(u32, @intCast(error_union_ty.abiSize(pt)));
+ const error_union_align = error_union_ty.abiAlignment(pt);
// The error union will die in the body. However, we need the
// error union after the body in order to extract the payload
@@ -6137,14 +6194,15 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void {
}
fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
// If the type has no codegen bits, no need to store it.
const inst_ty = self.typeOf(inst);
- if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod))
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt) and !inst_ty.isError(mod))
return MCValue{ .none = {} };
- const inst_index = inst.toIndex() orelse return self.genTypedValue((try self.air.value(inst, mod)).?);
+ const inst_index = inst.toIndex() orelse return self.genTypedValue((try self.air.value(inst, pt)).?);
return self.getResolvedInstValue(inst_index);
}
@@ -6164,6 +6222,7 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
const mcv: MCValue = switch (try codegen.genTypedValue(
self.bin_file,
+ self.pt,
self.src_loc,
val,
self.owner_decl,
@@ -6199,7 +6258,8 @@ const CallMCValues = struct {
/// Caller must call `CallMCValues.deinit`.
fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const fn_info = mod.typeToFunc(fn_ty).?;
const cc = fn_info.cc;
@@ -6229,10 +6289,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
if (ret_ty.zigTypeTag(mod) == .NoReturn) {
result.return_value = .{ .unreach = {} };
- } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) {
+ } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt) and !ret_ty.isError(mod)) {
result.return_value = .{ .none = {} };
} else {
- const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
+ const ret_ty_size: u32 = @intCast(ret_ty.abiSize(pt));
if (ret_ty_size == 0) {
assert(ret_ty.isError(mod));
result.return_value = .{ .immediate = 0 };
@@ -6244,7 +6304,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
}
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
- const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(mod)));
+ const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(pt)));
if (param_size == 0) {
result_arg.* = .{ .none = {} };
continue;
@@ -6252,7 +6312,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
// We round up NCRN only for non-Apple platforms which allow the 16-byte aligned
// values to spread across odd-numbered registers.
- if (Type.fromInterned(ty).abiAlignment(mod) == .@"16" and !self.target.isDarwin()) {
+ if (Type.fromInterned(ty).abiAlignment(pt) == .@"16" and !self.target.isDarwin()) {
// Round up NCRN to the next even number
ncrn += ncrn % 2;
}
@@ -6270,7 +6330,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
ncrn = 8;
// TODO Apple allows the arguments on the stack to be non-8-byte aligned provided
// that the entire stack space consumed by the arguments is 8-byte aligned.
- if (Type.fromInterned(ty).abiAlignment(mod) == .@"8") {
+ if (Type.fromInterned(ty).abiAlignment(pt) == .@"8") {
if (nsaa % 8 != 0) {
nsaa += 8 - (nsaa % 8);
}
@@ -6287,10 +6347,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
.Unspecified => {
if (ret_ty.zigTypeTag(mod) == .NoReturn) {
result.return_value = .{ .unreach = {} };
- } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) {
+ } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt) and !ret_ty.isError(mod)) {
result.return_value = .{ .none = {} };
} else {
- const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
+ const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(pt)));
if (ret_ty_size == 0) {
assert(ret_ty.isError(mod));
result.return_value = .{ .immediate = 0 };
@@ -6309,9 +6369,9 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var stack_offset: u32 = 0;
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
- if (Type.fromInterned(ty).abiSize(mod) > 0) {
- const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(mod));
- const param_alignment = Type.fromInterned(ty).abiAlignment(mod);
+ if (Type.fromInterned(ty).abiSize(pt) > 0) {
+ const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(pt));
+ const param_alignment = Type.fromInterned(ty).abiAlignment(pt);
stack_offset = @intCast(param_alignment.forward(stack_offset));
result_arg.* = .{ .stack_argument_offset = stack_offset };
@@ -6330,7 +6390,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
return result;
}
-/// TODO support scope overrides. Also note this logic is duplicated with `Module.wantSafety`.
+/// TODO support scope overrides. Also note this logic is duplicated with `Zcu.wantSafety`.
fn wantSafety(self: *Self) bool {
return switch (self.bin_file.comp.root_mod.optimize_mode) {
.Debug => true,
@@ -6362,8 +6422,7 @@ fn parseRegName(name: []const u8) ?Register {
}
fn registerAlias(self: *Self, reg: Register, ty: Type) Register {
- const mod = self.bin_file.comp.module.?;
- const abi_size = ty.abiSize(mod);
+ const abi_size = ty.abiSize(self.pt);
switch (reg.class()) {
.general_purpose => {
@@ -6391,11 +6450,9 @@ fn registerAlias(self: *Self, reg: Register, ty: Type) Register {
}
fn typeOf(self: *Self, inst: Air.Inst.Ref) Type {
- const mod = self.bin_file.comp.module.?;
- return self.air.typeOf(inst, &mod.intern_pool);
+ return self.air.typeOf(inst, &self.pt.zcu.intern_pool);
}
fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type {
- const mod = self.bin_file.comp.module.?;
- return self.air.typeOfIndex(inst, &mod.intern_pool);
+ return self.air.typeOfIndex(inst, &self.pt.zcu.intern_pool);
}
src/arch/aarch64/Emit.zig
@@ -8,9 +8,7 @@ const Mir = @import("Mir.zig");
const bits = @import("bits.zig");
const link = @import("../../link.zig");
const Zcu = @import("../../Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
-const ErrorMsg = Module.ErrorMsg;
+const ErrorMsg = Zcu.ErrorMsg;
const assert = std.debug.assert;
const Instruction = bits.Instruction;
const Register = bits.Register;
@@ -22,7 +20,7 @@ bin_file: *link.File,
debug_output: DebugInfoOutput,
target: *const std.Target,
err_msg: ?*ErrorMsg = null,
-src_loc: Module.LazySrcLoc,
+src_loc: Zcu.LazySrcLoc,
code: *std.ArrayList(u8),
prev_di_line: u32,
src/arch/arm/abi.zig
@@ -5,8 +5,6 @@ const Register = bits.Register;
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
const Type = @import("../../Type.zig");
const Zcu = @import("../../Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
pub const Class = union(enum) {
memory,
@@ -26,29 +24,29 @@ pub const Class = union(enum) {
pub const Context = enum { ret, arg };
-pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
- assert(ty.hasRuntimeBitsIgnoreComptime(mod));
+pub fn classifyType(ty: Type, pt: Zcu.PerThread, ctx: Context) Class {
+ assert(ty.hasRuntimeBitsIgnoreComptime(pt));
var maybe_float_bits: ?u16 = null;
const max_byval_size = 512;
- const ip = &mod.intern_pool;
- switch (ty.zigTypeTag(mod)) {
+ const ip = &pt.zcu.intern_pool;
+ switch (ty.zigTypeTag(pt.zcu)) {
.Struct => {
- const bit_size = ty.bitSize(mod);
- if (ty.containerLayout(mod) == .@"packed") {
+ const bit_size = ty.bitSize(pt);
+ if (ty.containerLayout(pt.zcu) == .@"packed") {
if (bit_size > 64) return .memory;
return .byval;
}
if (bit_size > max_byval_size) return .memory;
- const float_count = countFloats(ty, mod, &maybe_float_bits);
+ const float_count = countFloats(ty, pt.zcu, &maybe_float_bits);
if (float_count <= byval_float_count) return .byval;
- const fields = ty.structFieldCount(mod);
+ const fields = ty.structFieldCount(pt.zcu);
var i: u32 = 0;
while (i < fields) : (i += 1) {
- const field_ty = ty.structFieldType(i, mod);
- const field_alignment = ty.structFieldAlign(i, mod);
- const field_size = field_ty.bitSize(mod);
+ const field_ty = ty.structFieldType(i, pt.zcu);
+ const field_alignment = ty.structFieldAlign(i, pt);
+ const field_size = field_ty.bitSize(pt);
if (field_size > 32 or field_alignment.compare(.gt, .@"32")) {
return Class.arrSize(bit_size, 64);
}
@@ -56,19 +54,19 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
return Class.arrSize(bit_size, 32);
},
.Union => {
- const bit_size = ty.bitSize(mod);
- const union_obj = mod.typeToUnion(ty).?;
+ const bit_size = ty.bitSize(pt);
+ const union_obj = pt.zcu.typeToUnion(ty).?;
if (union_obj.getLayout(ip) == .@"packed") {
if (bit_size > 64) return .memory;
return .byval;
}
if (bit_size > max_byval_size) return .memory;
- const float_count = countFloats(ty, mod, &maybe_float_bits);
+ const float_count = countFloats(ty, pt.zcu, &maybe_float_bits);
if (float_count <= byval_float_count) return .byval;
for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| {
- if (Type.fromInterned(field_ty).bitSize(mod) > 32 or
- mod.unionFieldNormalAlignment(union_obj, @intCast(field_index)).compare(.gt, .@"32"))
+ if (Type.fromInterned(field_ty).bitSize(pt) > 32 or
+ pt.unionFieldNormalAlignment(union_obj, @intCast(field_index)).compare(.gt, .@"32"))
{
return Class.arrSize(bit_size, 64);
}
@@ -79,28 +77,28 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
.Int => {
// TODO this is incorrect for _BitInt(128) but implementing
// this correctly makes implementing compiler-rt impossible.
- // const bit_size = ty.bitSize(mod);
+ // const bit_size = ty.bitSize(pt);
// if (bit_size > 64) return .memory;
return .byval;
},
.Enum, .ErrorSet => {
- const bit_size = ty.bitSize(mod);
+ const bit_size = ty.bitSize(pt);
if (bit_size > 64) return .memory;
return .byval;
},
.Vector => {
- const bit_size = ty.bitSize(mod);
+ const bit_size = ty.bitSize(pt);
// TODO is this controlled by a cpu feature?
if (ctx == .ret and bit_size > 128) return .memory;
if (bit_size > 512) return .memory;
return .byval;
},
.Optional => {
- assert(ty.isPtrLikeOptional(mod));
+ assert(ty.isPtrLikeOptional(pt.zcu));
return .byval;
},
.Pointer => {
- assert(!ty.isSlice(mod));
+ assert(!ty.isSlice(pt.zcu));
return .byval;
},
.ErrorUnion,
@@ -122,16 +120,16 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
}
const byval_float_count = 4;
-fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u32 {
- const ip = &mod.intern_pool;
- const target = mod.getTarget();
+fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u32 {
+ const ip = &zcu.intern_pool;
+ const target = zcu.getTarget();
const invalid = std.math.maxInt(u32);
- switch (ty.zigTypeTag(mod)) {
+ switch (ty.zigTypeTag(zcu)) {
.Union => {
- const union_obj = mod.typeToUnion(ty).?;
+ const union_obj = zcu.typeToUnion(ty).?;
var max_count: u32 = 0;
for (union_obj.field_types.get(ip)) |field_ty| {
- const field_count = countFloats(Type.fromInterned(field_ty), mod, maybe_float_bits);
+ const field_count = countFloats(Type.fromInterned(field_ty), zcu, maybe_float_bits);
if (field_count == invalid) return invalid;
if (field_count > max_count) max_count = field_count;
if (max_count > byval_float_count) return invalid;
@@ -139,12 +137,12 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u32 {
return max_count;
},
.Struct => {
- const fields_len = ty.structFieldCount(mod);
+ const fields_len = ty.structFieldCount(zcu);
var count: u32 = 0;
var i: u32 = 0;
while (i < fields_len) : (i += 1) {
- const field_ty = ty.structFieldType(i, mod);
- const field_count = countFloats(field_ty, mod, maybe_float_bits);
+ const field_ty = ty.structFieldType(i, zcu);
+ const field_count = countFloats(field_ty, zcu, maybe_float_bits);
if (field_count == invalid) return invalid;
count += field_count;
if (count > byval_float_count) return invalid;
src/arch/arm/CodeGen.zig
@@ -12,11 +12,9 @@ const Type = @import("../../Type.zig");
const Value = @import("../../Value.zig");
const link = @import("../../link.zig");
const Zcu = @import("../../Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
const InternPool = @import("../../InternPool.zig");
const Compilation = @import("../../Compilation.zig");
-const ErrorMsg = Module.ErrorMsg;
+const ErrorMsg = Zcu.ErrorMsg;
const Target = std.Target;
const Allocator = mem.Allocator;
const trace = @import("../../tracy.zig").trace;
@@ -48,6 +46,7 @@ const gp = abi.RegisterClass.gp;
const InnerError = CodeGenError || error{OutOfRegisters};
gpa: Allocator,
+pt: Zcu.PerThread,
air: Air,
liveness: Liveness,
bin_file: *link.File,
@@ -59,7 +58,7 @@ args: []MCValue,
ret_mcv: MCValue,
fn_type: Type,
arg_index: u32,
-src_loc: Module.LazySrcLoc,
+src_loc: Zcu.LazySrcLoc,
stack_align: u32,
/// MIR Instructions
@@ -261,7 +260,6 @@ const DbgInfoReloc = struct {
}
fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) error{OutOfMemory}!void {
- const mod = function.bin_file.comp.module.?;
switch (function.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (reloc.mcv) {
@@ -282,7 +280,7 @@ const DbgInfoReloc = struct {
else => unreachable, // not a possible argument
};
- try dw.genArgDbgInfo(reloc.name, reloc.ty, mod.funcOwnerDeclIndex(function.func_index), loc);
+ try dw.genArgDbgInfo(reloc.name, reloc.ty, function.pt.zcu.funcOwnerDeclIndex(function.func_index), loc);
},
.plan9 => {},
.none => {},
@@ -290,7 +288,6 @@ const DbgInfoReloc = struct {
}
fn genVarDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
- const mod = function.bin_file.comp.module.?;
const is_ptr = switch (reloc.tag) {
.dbg_var_ptr => true,
.dbg_var_val => false,
@@ -326,7 +323,7 @@ const DbgInfoReloc = struct {
break :blk .nop;
},
};
- try dw.genVarDbgInfo(reloc.name, reloc.ty, mod.funcOwnerDeclIndex(function.func_index), is_ptr, loc);
+ try dw.genVarDbgInfo(reloc.name, reloc.ty, function.pt.zcu.funcOwnerDeclIndex(function.func_index), is_ptr, loc);
},
.plan9 => {},
.none => {},
@@ -338,15 +335,16 @@ const Self = @This();
pub fn generate(
lf: *link.File,
- src_loc: Module.LazySrcLoc,
+ pt: Zcu.PerThread,
+ src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
) CodeGenError!Result {
- const gpa = lf.comp.gpa;
- const zcu = lf.comp.module.?;
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
const fn_owner_decl = zcu.declPtr(func.owner_decl);
assert(fn_owner_decl.has_tv);
@@ -364,6 +362,7 @@ pub fn generate(
var function: Self = .{
.gpa = gpa,
+ .pt = pt,
.air = air,
.liveness = liveness,
.target = target,
@@ -482,7 +481,8 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
}
fn gen(self: *Self) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const cc = self.fn_type.fnCallingConvention(mod);
if (cc != .Naked) {
// push {fp, lr}
@@ -526,8 +526,8 @@ fn gen(self: *Self) !void {
const ty = self.typeOfIndex(inst);
- const abi_size: u32 = @intCast(ty.abiSize(mod));
- const abi_align = ty.abiAlignment(mod);
+ const abi_size: u32 = @intCast(ty.abiSize(pt));
+ const abi_align = ty.abiAlignment(pt);
const stack_offset = try self.allocMem(abi_size, abi_align, inst);
try self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
@@ -642,7 +642,8 @@ fn gen(self: *Self) !void {
}
fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const air_tags = self.air.instructions.items(.tag);
@@ -1004,10 +1005,11 @@ fn allocMem(
/// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const elem_ty = self.typeOfIndex(inst).childType(mod);
- if (!elem_ty.hasRuntimeBits(mod)) {
+ if (!elem_ty.hasRuntimeBits(pt)) {
// As this stack item will never be dereferenced at runtime,
// return the stack offset 0. Stack offset 0 will be where all
// zero-sized stack allocations live as non-zero-sized
@@ -1015,21 +1017,21 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
return 0;
}
- const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse {
- return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
+ const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse {
+ return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
};
// TODO swap this for inst.ty.ptrAlign
- const abi_align = elem_ty.abiAlignment(mod);
+ const abi_align = elem_ty.abiAlignment(pt);
return self.allocMem(abi_size, abi_align, inst);
}
fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue {
- const mod = self.bin_file.comp.module.?;
- const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse {
- return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
+ const pt = self.pt;
+ const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse {
+ return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
};
- const abi_align = elem_ty.abiAlignment(mod);
+ const abi_align = elem_ty.abiAlignment(pt);
if (reg_ok) {
// Make sure the type can fit in a register before we try to allocate one.
@@ -1112,14 +1114,15 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) !void {
}
fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const result: MCValue = switch (self.ret_mcv) {
.none, .register => .{ .ptr_stack_offset = try self.allocMemPtr(inst) },
.stack_offset => blk: {
// self.ret_mcv is an address to where this function
// should store its result into
const ret_ty = self.fn_type.fnReturnType(mod);
- const ptr_ty = try mod.singleMutPtrType(ret_ty);
+ const ptr_ty = try pt.singleMutPtrType(ret_ty);
// addr_reg will contain the address of where to store the
// result into
@@ -1145,7 +1148,8 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
@@ -1154,8 +1158,8 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
const operand_ty = self.typeOf(ty_op.operand);
const dest_ty = self.typeOfIndex(inst);
- const operand_abi_size = operand_ty.abiSize(mod);
- const dest_abi_size = dest_ty.abiSize(mod);
+ const operand_abi_size = operand_ty.abiSize(pt);
+ const dest_abi_size = dest_ty.abiSize(pt);
const info_a = operand_ty.intInfo(mod);
const info_b = dest_ty.intInfo(mod);
@@ -1211,7 +1215,8 @@ fn trunc(
operand_ty: Type,
dest_ty: Type,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const info_a = operand_ty.intInfo(mod);
const info_b = dest_ty.intInfo(mod);
@@ -1275,7 +1280,8 @@ fn airIntFromBool(self: *Self, inst: Air.Inst.Index) !void {
fn airNot(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand };
const operand_ty = self.typeOf(ty_op.operand);
@@ -1371,7 +1377,8 @@ fn minMax(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO ARM min/max on floats", .{}),
.Vector => return self.fail("TODO ARM min/max on vectors", .{}),
@@ -1580,7 +1587,8 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs };
const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs };
@@ -1588,9 +1596,9 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.typeOf(extra.rhs);
const tuple_ty = self.typeOfIndex(inst);
- const tuple_size: u32 = @intCast(tuple_ty.abiSize(mod));
- const tuple_align = tuple_ty.abiAlignment(mod);
- const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, mod));
+ const tuple_size: u32 = @intCast(tuple_ty.abiSize(pt));
+ const tuple_align = tuple_ty.abiAlignment(pt);
+ const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, pt));
switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
@@ -1693,7 +1701,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const result: MCValue = result: {
const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs };
const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs };
@@ -1701,9 +1710,9 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.typeOf(extra.rhs);
const tuple_ty = self.typeOfIndex(inst);
- const tuple_size: u32 = @intCast(tuple_ty.abiSize(mod));
- const tuple_align = tuple_ty.abiAlignment(mod);
- const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, mod));
+ const tuple_size: u32 = @intCast(tuple_ty.abiSize(pt));
+ const tuple_align = tuple_ty.abiAlignment(pt);
+ const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, pt));
switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
@@ -1857,15 +1866,16 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const result: MCValue = result: {
const lhs_ty = self.typeOf(extra.lhs);
const rhs_ty = self.typeOf(extra.rhs);
const tuple_ty = self.typeOfIndex(inst);
- const tuple_size: u32 = @intCast(tuple_ty.abiSize(mod));
- const tuple_align = tuple_ty.abiAlignment(mod);
- const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, mod));
+ const tuple_size: u32 = @intCast(tuple_ty.abiSize(pt));
+ const tuple_align = tuple_ty.abiAlignment(pt);
+ const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, pt));
switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}),
@@ -2013,11 +2023,11 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
}
fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const optional_ty = self.typeOfIndex(inst);
- const abi_size: u32 = @intCast(optional_ty.abiSize(mod));
+ const abi_size: u32 = @intCast(optional_ty.abiSize(pt));
// Optional with a zero-bit payload type is just a boolean true
if (abi_size == 1) {
@@ -2036,17 +2046,18 @@ fn errUnionErr(
error_union_ty: Type,
maybe_inst: ?Air.Inst.Index,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const err_ty = error_union_ty.errorUnionSet(mod);
const payload_ty = error_union_ty.errorUnionPayload(mod);
if (err_ty.errorSetIsEmpty(mod)) {
return MCValue{ .immediate = 0 };
}
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
return try error_union_bind.resolveToMcv(self);
}
- const err_offset: u32 = @intCast(errUnionErrorOffset(payload_ty, mod));
+ const err_offset: u32 = @intCast(errUnionErrorOffset(payload_ty, pt));
switch (try error_union_bind.resolveToMcv(self)) {
.register => {
var operand_reg: Register = undefined;
@@ -2068,7 +2079,7 @@ fn errUnionErr(
);
const err_bit_offset = err_offset * 8;
- const err_bit_size: u32 = @intCast(err_ty.abiSize(mod) * 8);
+ const err_bit_size: u32 = @intCast(err_ty.abiSize(pt) * 8);
_ = try self.addInst(.{
.tag = .ubfx, // errors are unsigned integers
@@ -2113,17 +2124,18 @@ fn errUnionPayload(
error_union_ty: Type,
maybe_inst: ?Air.Inst.Index,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const err_ty = error_union_ty.errorUnionSet(mod);
const payload_ty = error_union_ty.errorUnionPayload(mod);
if (err_ty.errorSetIsEmpty(mod)) {
return try error_union_bind.resolveToMcv(self);
}
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
return MCValue.none;
}
- const payload_offset: u32 = @intCast(errUnionPayloadOffset(payload_ty, mod));
+ const payload_offset: u32 = @intCast(errUnionPayloadOffset(payload_ty, pt));
switch (try error_union_bind.resolveToMcv(self)) {
.register => {
var operand_reg: Register = undefined;
@@ -2145,7 +2157,7 @@ fn errUnionPayload(
);
const payload_bit_offset = payload_offset * 8;
- const payload_bit_size: u32 = @intCast(payload_ty.abiSize(mod) * 8);
+ const payload_bit_size: u32 = @intCast(payload_ty.abiSize(pt) * 8);
_ = try self.addInst(.{
.tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx,
@@ -2223,20 +2235,21 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void {
/// T to E!T
fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_ty = ty_op.ty.toType();
const error_ty = error_union_ty.errorUnionSet(mod);
const payload_ty = error_union_ty.errorUnionPayload(mod);
const operand = try self.resolveInst(ty_op.operand);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result operand;
- const abi_size: u32 = @intCast(error_union_ty.abiSize(mod));
- const abi_align = error_union_ty.abiAlignment(mod);
+ const abi_size: u32 = @intCast(error_union_ty.abiSize(pt));
+ const abi_align = error_union_ty.abiAlignment(pt);
const stack_offset: u32 = @intCast(try self.allocMem(abi_size, abi_align, inst));
- const payload_off = errUnionPayloadOffset(payload_ty, mod);
- const err_off = errUnionErrorOffset(payload_ty, mod);
+ const payload_off = errUnionPayloadOffset(payload_ty, pt);
+ const err_off = errUnionErrorOffset(payload_ty, pt);
try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), operand);
try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), .{ .immediate = 0 });
@@ -2247,20 +2260,21 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
/// E to E!T
fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_ty = ty_op.ty.toType();
const error_ty = error_union_ty.errorUnionSet(mod);
const payload_ty = error_union_ty.errorUnionPayload(mod);
const operand = try self.resolveInst(ty_op.operand);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result operand;
- const abi_size: u32 = @intCast(error_union_ty.abiSize(mod));
- const abi_align = error_union_ty.abiAlignment(mod);
+ const abi_size: u32 = @intCast(error_union_ty.abiSize(pt));
+ const abi_align = error_union_ty.abiAlignment(pt);
const stack_offset: u32 = @intCast(try self.allocMem(abi_size, abi_align, inst));
- const payload_off = errUnionPayloadOffset(payload_ty, mod);
- const err_off = errUnionErrorOffset(payload_ty, mod);
+ const payload_off = errUnionPayloadOffset(payload_ty, pt);
+ const err_off = errUnionErrorOffset(payload_ty, pt);
try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), operand);
try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), .undef);
@@ -2364,9 +2378,10 @@ fn ptrElemVal(
ptr_ty: Type,
maybe_inst: ?Air.Inst.Index,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const elem_ty = ptr_ty.childType(mod);
- const elem_size: u32 = @intCast(elem_ty.abiSize(mod));
+ const elem_size: u32 = @intCast(elem_ty.abiSize(pt));
switch (elem_size) {
1, 4 => {
@@ -2423,7 +2438,8 @@ fn ptrElemVal(
}
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const slice_ty = self.typeOf(bin_op.lhs);
const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: {
@@ -2466,7 +2482,8 @@ fn arrayElemVal(
array_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const elem_ty = array_ty.childType(mod);
const mcv = try array_bind.resolveToMcv(self);
@@ -2501,7 +2518,7 @@ fn arrayElemVal(
const base_bind: ReadArg.Bind = .{ .mcv = ptr_to_mcv };
- const ptr_ty = try mod.singleMutPtrType(elem_ty);
+ const ptr_ty = try pt.singleMutPtrType(elem_ty);
return try self.ptrElemVal(base_bind, index_bind, ptr_ty, maybe_inst);
},
@@ -2522,7 +2539,8 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
}
fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = self.typeOf(bin_op.lhs);
const result: MCValue = if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: {
@@ -2656,9 +2674,10 @@ fn reuseOperand(
}
fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const elem_ty = ptr_ty.childType(mod);
- const elem_size: u32 = @intCast(elem_ty.abiSize(mod));
+ const elem_size: u32 = @intCast(elem_ty.abiSize(pt));
switch (ptr) {
.none => unreachable,
@@ -2733,11 +2752,12 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
}
fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const elem_ty = self.typeOfIndex(inst);
const result: MCValue = result: {
- if (!elem_ty.hasRuntimeBits(mod))
+ if (!elem_ty.hasRuntimeBits(pt))
break :result MCValue.none;
const ptr = try self.resolveInst(ty_op.operand);
@@ -2746,7 +2766,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
break :result MCValue.dead;
const dest_mcv: MCValue = blk: {
- const ptr_fits_dest = elem_ty.abiSize(mod) <= 4;
+ const ptr_fits_dest = elem_ty.abiSize(pt) <= 4;
if (ptr_fits_dest and self.reuseOperand(inst, ty_op.operand, 0, ptr)) {
// The MCValue that holds the pointer can be re-used as the value.
break :blk ptr;
@@ -2762,8 +2782,8 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
}
fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void {
- const mod = self.bin_file.comp.module.?;
- const elem_size: u32 = @intCast(value_ty.abiSize(mod));
+ const pt = self.pt;
+ const elem_size: u32 = @intCast(value_ty.abiSize(pt));
switch (ptr) {
.none => unreachable,
@@ -2882,11 +2902,12 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
return if (self.liveness.isUnused(inst)) .dead else result: {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const mcv = try self.resolveInst(operand);
const ptr_ty = self.typeOf(operand);
const struct_ty = ptr_ty.childType(mod);
- const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(index, mod));
+ const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(index, pt));
switch (mcv) {
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };
@@ -2906,11 +2927,12 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
const operand = extra.struct_operand;
const index = extra.field_index;
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const mcv = try self.resolveInst(operand);
const struct_ty = self.typeOf(operand);
- const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(index, mod));
+ const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(index, pt));
const struct_field_ty = struct_ty.structFieldType(index, mod);
switch (mcv) {
@@ -2974,7 +2996,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
);
const field_bit_offset = struct_field_offset * 8;
- const field_bit_size: u32 = @intCast(struct_field_ty.abiSize(mod) * 8);
+ const field_bit_size: u32 = @intCast(struct_field_ty.abiSize(pt) * 8);
_ = try self.addInst(.{
.tag = if (struct_field_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx,
@@ -2996,7 +3018,8 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
}
fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
@@ -3007,7 +3030,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.fail("TODO implement @fieldParentPtr codegen for unions", .{});
}
- const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(extra.field_index, mod));
+ const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(extra.field_index, pt));
switch (field_ptr) {
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off + struct_field_offset };
@@ -3390,7 +3413,8 @@ fn addSub(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
@@ -3446,7 +3470,8 @@ fn mul(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
@@ -3479,7 +3504,8 @@ fn divFloat(
_ = rhs_ty;
_ = maybe_inst;
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
@@ -3495,7 +3521,8 @@ fn divTrunc(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
@@ -3538,7 +3565,8 @@ fn divFloor(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
@@ -3586,7 +3614,8 @@ fn divExact(
_ = rhs_ty;
_ = maybe_inst;
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
@@ -3603,7 +3632,8 @@ fn rem(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
@@ -3672,7 +3702,8 @@ fn modulo(
_ = rhs_ty;
_ = maybe_inst;
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
@@ -3690,7 +3721,8 @@ fn wrappingArithmetic(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Int => {
@@ -3728,7 +3760,8 @@ fn bitwise(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Int => {
@@ -3773,7 +3806,8 @@ fn shiftExact(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Int => {
@@ -3812,7 +3846,8 @@ fn shiftNormal(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Int => {
@@ -3855,7 +3890,8 @@ fn booleanOp(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Bool => {
const lhs_immediate = try lhs_bind.resolveToImmediate(self);
@@ -3889,7 +3925,8 @@ fn ptrArithmetic(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (lhs_ty.zigTypeTag(mod)) {
.Pointer => {
assert(rhs_ty.eql(Type.usize, mod));
@@ -3899,7 +3936,7 @@ fn ptrArithmetic(
.One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type
else => ptr_ty.childType(mod),
};
- const elem_size: u32 = @intCast(elem_ty.abiSize(mod));
+ const elem_size: u32 = @intCast(elem_ty.abiSize(pt));
const base_tag: Air.Inst.Tag = switch (tag) {
.ptr_add => .add,
@@ -3926,8 +3963,9 @@ fn ptrArithmetic(
}
fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) !void {
- const mod = self.bin_file.comp.module.?;
- const abi_size = ty.abiSize(mod);
+ const pt = self.pt;
+ const mod = pt.zcu;
+ const abi_size = ty.abiSize(pt);
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb,
@@ -3961,8 +3999,8 @@ fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type)
}
fn genStrRegister(self: *Self, source_reg: Register, addr_reg: Register, ty: Type) !void {
- const mod = self.bin_file.comp.module.?;
- const abi_size = ty.abiSize(mod);
+ const pt = self.pt;
+ const abi_size = ty.abiSize(pt);
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .strb,
@@ -4168,7 +4206,8 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
while (self.args[arg_index] == .none) arg_index += 1;
self.arg_index = arg_index + 1;
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty = self.typeOfIndex(inst);
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
const src_index = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.src_index;
@@ -4223,7 +4262,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]);
const ty = self.typeOf(callee);
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const fn_ty = switch (ty.zigTypeTag(mod)) {
.Fn => ty,
@@ -4253,11 +4293,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: {
log.debug("airCall: return by reference", .{});
const ret_ty = fn_ty.fnReturnType(mod);
- const ret_abi_size: u32 = @intCast(ret_ty.abiSize(mod));
- const ret_abi_align = ret_ty.abiAlignment(mod);
+ const ret_abi_size: u32 = @intCast(ret_ty.abiSize(pt));
+ const ret_abi_align = ret_ty.abiAlignment(pt);
const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
- const ptr_ty = try mod.singleMutPtrType(ret_ty);
+ const ptr_ty = try pt.singleMutPtrType(ret_ty);
try self.register_manager.getReg(.r0, null);
try self.genSetReg(ptr_ty, .r0, .{ .ptr_stack_offset = stack_offset });
@@ -4293,7 +4333,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Due to incremental compilation, how function calls are generated depends
// on linking.
- if (try self.air.value(callee, mod)) |func_value| {
+ if (try self.air.value(callee, pt)) |func_value| {
if (func_value.getFunction(mod)) |func| {
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const sym_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, func.owner_decl);
@@ -4374,7 +4414,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
}
fn airRet(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.resolveInst(un_op);
const ret_ty = self.fn_type.fnReturnType(mod);
@@ -4393,7 +4434,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
//
// self.ret_mcv is an address to where this function
// should store its result into
- const ptr_ty = try mod.singleMutPtrType(ret_ty);
+ const ptr_ty = try pt.singleMutPtrType(ret_ty);
try self.store(self.ret_mcv, operand, ptr_ty, ret_ty);
},
else => unreachable, // invalid return result
@@ -4406,7 +4447,8 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
}
fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const ptr = try self.resolveInst(un_op);
const ptr_ty = self.typeOf(un_op);
@@ -4430,8 +4472,8 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
// location.
const op_inst = un_op.toIndex().?;
if (self.air.instructions.items(.tag)[@intFromEnum(op_inst)] != .ret_ptr) {
- const abi_size: u32 = @intCast(ret_ty.abiSize(mod));
- const abi_align = ret_ty.abiAlignment(mod);
+ const abi_size: u32 = @intCast(ret_ty.abiSize(pt));
+ const abi_align = ret_ty.abiAlignment(pt);
const offset = try self.allocMem(abi_size, abi_align, null);
@@ -4467,11 +4509,12 @@ fn cmp(
lhs_ty: Type,
op: math.CompareOperator,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const int_ty = switch (lhs_ty.zigTypeTag(mod)) {
.Optional => blk: {
const payload_ty = lhs_ty.optionalChild(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
break :blk Type.u1;
} else if (lhs_ty.isPtrLikeOptional(mod)) {
break :blk Type.usize;
@@ -4573,7 +4616,8 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
}
fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
const func = mod.funcInfo(extra.data.func);
@@ -4785,9 +4829,10 @@ fn isNull(
operand_bind: ReadArg.Bind,
operand_ty: Type,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
if (operand_ty.isPtrLikeOptional(mod)) {
- assert(operand_ty.abiSize(mod) == 4);
+ assert(operand_ty.abiSize(pt) == 4);
const imm_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = 0 } };
return self.cmp(operand_bind, imm_bind, Type.usize, .eq);
@@ -4819,7 +4864,8 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
@@ -4846,7 +4892,8 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
@@ -4866,7 +4913,8 @@ fn isErr(
error_union_bind: ReadArg.Bind,
error_union_ty: Type,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const error_type = error_union_ty.errorUnionSet(mod);
if (error_type.errorSetIsEmpty(mod)) {
@@ -4908,7 +4956,8 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
@@ -4935,7 +4984,8 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
@@ -5154,10 +5204,10 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void {
}
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
const block_data = self.blocks.getPtr(block).?;
- if (self.typeOf(operand).hasRuntimeBits(mod)) {
+ if (self.typeOf(operand).hasRuntimeBits(pt)) {
const operand_mcv = try self.resolveInst(operand);
const block_mcv = block_data.mcv;
if (block_mcv == .none) {
@@ -5325,8 +5375,9 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void {
}
fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
- const mod = self.bin_file.comp.module.?;
- const abi_size: u32 = @intCast(ty.abiSize(mod));
+ const pt = self.pt;
+ const mod = pt.zcu;
+ const abi_size: u32 = @intCast(ty.abiSize(pt));
switch (mcv) {
.dead => unreachable,
.unreach, .none => return, // Nothing to do.
@@ -5407,7 +5458,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg });
const overflow_bit_ty = ty.structFieldType(1, mod);
- const overflow_bit_offset: u32 = @intCast(ty.structFieldOffset(1, mod));
+ const overflow_bit_offset: u32 = @intCast(ty.structFieldOffset(1, pt));
const cond_reg = try self.register_manager.allocReg(null, gp);
// C flag: movcs reg, #1
@@ -5445,7 +5496,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
} else {
- const ptr_ty = try mod.singleMutPtrType(ty);
+ const ptr_ty = try pt.singleMutPtrType(ty);
// TODO call extern memcpy
const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp);
@@ -5487,7 +5538,8 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
}
fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (mcv) {
.dead => unreachable,
.unreach, .none => return, // Nothing to do.
@@ -5662,7 +5714,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
},
.stack_offset => |off| {
// TODO: maybe addressing from sp instead of fp
- const abi_size: u32 = @intCast(ty.abiSize(mod));
+ const abi_size: u32 = @intCast(ty.abiSize(pt));
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb,
@@ -5713,7 +5765,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
}
},
.stack_argument_offset => |off| {
- const abi_size = ty.abiSize(mod);
+ const abi_size = ty.abiSize(pt);
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument,
@@ -5734,8 +5786,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
}
fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
- const mod = self.bin_file.comp.module.?;
- const abi_size: u32 = @intCast(ty.abiSize(mod));
+ const pt = self.pt;
+ const abi_size: u32 = @intCast(ty.abiSize(pt));
switch (mcv) {
.dead => unreachable,
.none, .unreach => return,
@@ -5802,7 +5854,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg });
} else {
- const ptr_ty = try mod.singleMutPtrType(ty);
+ const ptr_ty = try pt.singleMutPtrType(ty);
// TODO call extern memcpy
const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp);
@@ -5890,7 +5942,8 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
}
fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_ty = self.typeOf(ty_op.operand);
@@ -6009,7 +6062,8 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
}
fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const vector_ty = self.typeOfIndex(inst);
const len = vector_ty.vectorLen(mod);
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@@ -6054,15 +6108,15 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
}
fn airTry(self: *Self, inst: Air.Inst.Index) !void {
+ const pt = self.pt;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Try, pl_op.payload);
const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]);
const result: MCValue = result: {
const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand };
const error_union_ty = self.typeOf(pl_op.operand);
- const mod = self.bin_file.comp.module.?;
- const error_union_size: u32 = @intCast(error_union_ty.abiSize(mod));
- const error_union_align = error_union_ty.abiAlignment(mod);
+ const error_union_size: u32 = @intCast(error_union_ty.abiSize(pt));
+ const error_union_align = error_union_ty.abiAlignment(pt);
// The error union will die in the body. However, we need the
// error union after the body in order to extract the payload
@@ -6091,14 +6145,15 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void {
}
fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
// If the type has no codegen bits, no need to store it.
const inst_ty = self.typeOf(inst);
- if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod))
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt) and !inst_ty.isError(mod))
return MCValue{ .none = {} };
- const inst_index = inst.toIndex() orelse return self.genTypedValue((try self.air.value(inst, mod)).?);
+ const inst_index = inst.toIndex() orelse return self.genTypedValue((try self.air.value(inst, pt)).?);
return self.getResolvedInstValue(inst_index);
}
@@ -6116,12 +6171,13 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
const mcv: MCValue = switch (try codegen.genTypedValue(
self.bin_file,
+ pt,
self.src_loc,
val,
- mod.funcOwnerDeclIndex(self.func_index),
+ pt.zcu.funcOwnerDeclIndex(self.func_index),
)) {
.mcv => |mcv| switch (mcv) {
.none => .none,
@@ -6152,7 +6208,8 @@ const CallMCValues = struct {
/// Caller must call `CallMCValues.deinit`.
fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const fn_info = mod.typeToFunc(fn_ty).?;
const cc = fn_info.cc;
@@ -6182,10 +6239,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
if (ret_ty.zigTypeTag(mod) == .NoReturn) {
result.return_value = .{ .unreach = {} };
- } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
result.return_value = .{ .none = {} };
} else {
- const ret_ty_size: u32 = @intCast(ret_ty.abiSize(mod));
+ const ret_ty_size: u32 = @intCast(ret_ty.abiSize(pt));
// TODO handle cases where multiple registers are used
if (ret_ty_size <= 4) {
result.return_value = .{ .register = c_abi_int_return_regs[0] };
@@ -6200,10 +6257,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
}
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
- if (Type.fromInterned(ty).abiAlignment(mod) == .@"8")
+ if (Type.fromInterned(ty).abiAlignment(pt) == .@"8")
ncrn = std.mem.alignForward(usize, ncrn, 2);
- const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(mod));
+ const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(pt));
if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) {
if (param_size <= 4) {
result_arg.* = .{ .register = c_abi_int_param_regs[ncrn] };
@@ -6215,7 +6272,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
return self.fail("TODO MCValues split between registers and stack", .{});
} else {
ncrn = 4;
- if (Type.fromInterned(ty).abiAlignment(mod) == .@"8")
+ if (Type.fromInterned(ty).abiAlignment(pt) == .@"8")
nsaa = std.mem.alignForward(u32, nsaa, 8);
result_arg.* = .{ .stack_argument_offset = nsaa };
@@ -6229,10 +6286,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
.Unspecified => {
if (ret_ty.zigTypeTag(mod) == .NoReturn) {
result.return_value = .{ .unreach = {} };
- } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) {
+ } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt) and !ret_ty.isError(mod)) {
result.return_value = .{ .none = {} };
} else {
- const ret_ty_size: u32 = @intCast(ret_ty.abiSize(mod));
+ const ret_ty_size: u32 = @intCast(ret_ty.abiSize(pt));
if (ret_ty_size == 0) {
assert(ret_ty.isError(mod));
result.return_value = .{ .immediate = 0 };
@@ -6250,9 +6307,9 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var stack_offset: u32 = 0;
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
- if (Type.fromInterned(ty).abiSize(mod) > 0) {
- const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(mod));
- const param_alignment = Type.fromInterned(ty).abiAlignment(mod);
+ if (Type.fromInterned(ty).abiSize(pt) > 0) {
+ const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(pt));
+ const param_alignment = Type.fromInterned(ty).abiAlignment(pt);
stack_offset = @intCast(param_alignment.forward(stack_offset));
result_arg.* = .{ .stack_argument_offset = stack_offset };
@@ -6271,7 +6328,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
return result;
}
-/// TODO support scope overrides. Also note this logic is duplicated with `Module.wantSafety`.
+/// TODO support scope overrides. Also note this logic is duplicated with `Zcu.wantSafety`.
fn wantSafety(self: *Self) bool {
return switch (self.bin_file.comp.root_mod.optimize_mode) {
.Debug => true,
@@ -6305,11 +6362,9 @@ fn parseRegName(name: []const u8) ?Register {
}
fn typeOf(self: *Self, inst: Air.Inst.Ref) Type {
- const mod = self.bin_file.comp.module.?;
- return self.air.typeOf(inst, &mod.intern_pool);
+ return self.air.typeOf(inst, &self.pt.zcu.intern_pool);
}
fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type {
- const mod = self.bin_file.comp.module.?;
- return self.air.typeOfIndex(inst, &mod.intern_pool);
+ return self.air.typeOfIndex(inst, &self.pt.zcu.intern_pool);
}
src/arch/arm/Emit.zig
@@ -9,10 +9,8 @@ const Mir = @import("Mir.zig");
const bits = @import("bits.zig");
const link = @import("../../link.zig");
const Zcu = @import("../../Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
const Type = @import("../../Type.zig");
-const ErrorMsg = Module.ErrorMsg;
+const ErrorMsg = Zcu.ErrorMsg;
const Target = std.Target;
const assert = std.debug.assert;
const Instruction = bits.Instruction;
@@ -26,7 +24,7 @@ bin_file: *link.File,
debug_output: DebugInfoOutput,
target: *const std.Target,
err_msg: ?*ErrorMsg = null,
-src_loc: Module.LazySrcLoc,
+src_loc: Zcu.LazySrcLoc,
code: *std.ArrayList(u8),
prev_di_line: u32,
src/arch/riscv64/abi.zig
@@ -9,15 +9,15 @@ const assert = std.debug.assert;
pub const Class = enum { memory, byval, integer, double_integer, fields };
-pub fn classifyType(ty: Type, mod: *Zcu) Class {
- const target = mod.getTarget();
- std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod));
+pub fn classifyType(ty: Type, pt: Zcu.PerThread) Class {
+ const target = pt.zcu.getTarget();
+ std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(pt));
const max_byval_size = target.ptrBitWidth() * 2;
- switch (ty.zigTypeTag(mod)) {
+ switch (ty.zigTypeTag(pt.zcu)) {
.Struct => {
- const bit_size = ty.bitSize(mod);
- if (ty.containerLayout(mod) == .@"packed") {
+ const bit_size = ty.bitSize(pt);
+ if (ty.containerLayout(pt.zcu) == .@"packed") {
if (bit_size > max_byval_size) return .memory;
return .byval;
}
@@ -25,12 +25,12 @@ pub fn classifyType(ty: Type, mod: *Zcu) Class {
if (std.Target.riscv.featureSetHas(target.cpu.features, .d)) fields: {
var any_fp = false;
var field_count: usize = 0;
- for (0..ty.structFieldCount(mod)) |field_index| {
- const field_ty = ty.structFieldType(field_index, mod);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ for (0..ty.structFieldCount(pt.zcu)) |field_index| {
+ const field_ty = ty.structFieldType(field_index, pt.zcu);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
if (field_ty.isRuntimeFloat())
any_fp = true
- else if (!field_ty.isAbiInt(mod))
+ else if (!field_ty.isAbiInt(pt.zcu))
break :fields;
field_count += 1;
if (field_count > 2) break :fields;
@@ -45,8 +45,8 @@ pub fn classifyType(ty: Type, mod: *Zcu) Class {
return .integer;
},
.Union => {
- const bit_size = ty.bitSize(mod);
- if (ty.containerLayout(mod) == .@"packed") {
+ const bit_size = ty.bitSize(pt);
+ if (ty.containerLayout(pt.zcu) == .@"packed") {
if (bit_size > max_byval_size) return .memory;
return .byval;
}
@@ -58,21 +58,21 @@ pub fn classifyType(ty: Type, mod: *Zcu) Class {
.Bool => return .integer,
.Float => return .byval,
.Int, .Enum, .ErrorSet => {
- const bit_size = ty.bitSize(mod);
+ const bit_size = ty.bitSize(pt);
if (bit_size > max_byval_size) return .memory;
return .byval;
},
.Vector => {
- const bit_size = ty.bitSize(mod);
+ const bit_size = ty.bitSize(pt);
if (bit_size > max_byval_size) return .memory;
return .integer;
},
.Optional => {
- std.debug.assert(ty.isPtrLikeOptional(mod));
+ std.debug.assert(ty.isPtrLikeOptional(pt.zcu));
return .byval;
},
.Pointer => {
- std.debug.assert(!ty.isSlice(mod));
+ std.debug.assert(!ty.isSlice(pt.zcu));
return .byval;
},
.ErrorUnion,
@@ -97,18 +97,19 @@ pub const SystemClass = enum { integer, float, memory, none };
/// There are a maximum of 8 possible return slots. Returned values are in
/// the beginning of the array; unused slots are filled with .none.
-pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass {
+pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
+ const zcu = pt.zcu;
var result = [1]SystemClass{.none} ** 8;
const memory_class = [_]SystemClass{
.memory, .none, .none, .none,
.none, .none, .none, .none,
};
- switch (ty.zigTypeTag(zcu)) {
+ switch (ty.zigTypeTag(pt.zcu)) {
.Bool, .Void, .NoReturn => {
result[0] = .integer;
return result;
},
- .Pointer => switch (ty.ptrSize(zcu)) {
+ .Pointer => switch (ty.ptrSize(pt.zcu)) {
.Slice => {
result[0] = .integer;
result[1] = .integer;
@@ -120,17 +121,17 @@ pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass {
},
},
.Optional => {
- if (ty.isPtrLikeOptional(zcu)) {
+ if (ty.isPtrLikeOptional(pt.zcu)) {
result[0] = .integer;
return result;
}
result[0] = .integer;
- if (ty.optionalChild(zcu).abiSize(zcu) == 0) return result;
+ if (ty.optionalChild(zcu).abiSize(pt) == 0) return result;
result[1] = .integer;
return result;
},
.Int, .Enum, .ErrorSet => {
- const int_bits = ty.intInfo(zcu).bits;
+ const int_bits = ty.intInfo(pt.zcu).bits;
if (int_bits <= 64) {
result[0] = .integer;
return result;
@@ -155,8 +156,8 @@ pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass {
unreachable; // support split float args
},
.ErrorUnion => {
- const payload_ty = ty.errorUnionPayload(zcu);
- const payload_bits = payload_ty.bitSize(zcu);
+ const payload_ty = ty.errorUnionPayload(pt.zcu);
+ const payload_bits = payload_ty.bitSize(pt);
// the error union itself
result[0] = .integer;
@@ -167,8 +168,8 @@ pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass {
return memory_class;
},
.Struct => {
- const layout = ty.containerLayout(zcu);
- const ty_size = ty.abiSize(zcu);
+ const layout = ty.containerLayout(pt.zcu);
+ const ty_size = ty.abiSize(pt);
if (layout == .@"packed") {
assert(ty_size <= 16);
@@ -180,7 +181,7 @@ pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass {
return memory_class;
},
.Array => {
- const ty_size = ty.abiSize(zcu);
+ const ty_size = ty.abiSize(pt);
if (ty_size <= 8) {
result[0] = .integer;
return result;
src/arch/riscv64/CodeGen.zig
@@ -46,6 +46,7 @@ const RegisterLock = RegisterManager.RegisterLock;
const InnerError = CodeGenError || error{OutOfRegisters};
gpa: Allocator,
+pt: Zcu.PerThread,
air: Air,
mod: *Package.Module,
liveness: Liveness,
@@ -541,14 +542,14 @@ const FrameAlloc = struct {
.ref_count = 0,
};
}
- fn initType(ty: Type, zcu: *Zcu) FrameAlloc {
+ fn initType(ty: Type, pt: Zcu.PerThread) FrameAlloc {
return init(.{
- .size = ty.abiSize(zcu),
- .alignment = ty.abiAlignment(zcu),
+ .size = ty.abiSize(pt),
+ .alignment = ty.abiAlignment(pt),
});
}
- fn initSpill(ty: Type, zcu: *Zcu) FrameAlloc {
- const abi_size = ty.abiSize(zcu);
+ fn initSpill(ty: Type, pt: Zcu.PerThread) FrameAlloc {
+ const abi_size = ty.abiSize(pt);
const spill_size = if (abi_size < 8)
math.ceilPowerOfTwoAssert(u64, abi_size)
else
@@ -556,7 +557,7 @@ const FrameAlloc = struct {
return init(.{
.size = spill_size,
.pad = @intCast(spill_size - abi_size),
- .alignment = ty.abiAlignment(zcu).maxStrict(
+ .alignment = ty.abiAlignment(pt).maxStrict(
Alignment.fromNonzeroByteUnits(@min(spill_size, 8)),
),
});
@@ -696,6 +697,7 @@ const CallView = enum(u1) {
pub fn generate(
bin_file: *link.File,
+ pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
@@ -703,9 +705,9 @@ pub fn generate(
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
) CodeGenError!Result {
- const comp = bin_file.comp;
- const gpa = comp.gpa;
- const zcu = comp.module.?;
+ const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const func = zcu.funcInfo(func_index);
const fn_owner_decl = zcu.declPtr(func.owner_decl);
@@ -726,6 +728,7 @@ pub fn generate(
var function = Func{
.gpa = gpa,
.air = air,
+ .pt = pt,
.mod = mod,
.liveness = liveness,
.target = target,
@@ -787,11 +790,11 @@ pub fn generate(
function.args = call_info.args;
function.ret_mcv = call_info.return_value;
function.frame_allocs.set(@intFromEnum(FrameIndex.ret_addr), FrameAlloc.init(.{
- .size = Type.usize.abiSize(zcu),
- .alignment = Type.usize.abiAlignment(zcu).min(call_info.stack_align),
+ .size = Type.usize.abiSize(pt),
+ .alignment = Type.usize.abiAlignment(pt).min(call_info.stack_align),
}));
function.frame_allocs.set(@intFromEnum(FrameIndex.base_ptr), FrameAlloc.init(.{
- .size = Type.usize.abiSize(zcu),
+ .size = Type.usize.abiSize(pt),
.alignment = Alignment.min(
call_info.stack_align,
Alignment.fromNonzeroByteUnits(function.target.stackAlignment()),
@@ -803,7 +806,7 @@ pub fn generate(
}));
function.frame_allocs.set(@intFromEnum(FrameIndex.spill_frame), FrameAlloc.init(.{
.size = 0,
- .alignment = Type.usize.abiAlignment(zcu),
+ .alignment = Type.usize.abiAlignment(pt),
}));
function.gen() catch |err| switch (err) {
@@ -821,9 +824,10 @@ pub fn generate(
};
defer mir.deinit(gpa);
- var emit = Emit{
+ var emit: Emit = .{
+ .bin_file = bin_file,
.lower = .{
- .bin_file = bin_file,
+ .pt = pt,
.allocator = gpa,
.mir = mir,
.cc = fn_info.cc,
@@ -875,10 +879,10 @@ fn formatWipMir(
_: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
- const comp = data.func.bin_file.comp;
- const mod = comp.root_mod;
- var lower = Lower{
- .bin_file = data.func.bin_file,
+ const pt = data.func.pt;
+ const comp = pt.zcu.comp;
+ var lower: Lower = .{
+ .pt = pt,
.allocator = data.func.gpa,
.mir = .{
.instructions = data.func.mir_instructions.slice(),
@@ -889,7 +893,7 @@ fn formatWipMir(
.src_loc = data.func.src_loc,
.output_mode = comp.config.output_mode,
.link_mode = comp.config.link_mode,
- .pic = mod.pic,
+ .pic = comp.root_mod.pic,
};
var first = true;
for ((lower.lowerMir(data.inst) catch |err| switch (err) {
@@ -933,7 +937,7 @@ fn formatDecl(
}
fn fmtDecl(func: *Func, decl_index: InternPool.DeclIndex) std.fmt.Formatter(formatDecl) {
return .{ .data = .{
- .mod = func.bin_file.comp.module.?,
+ .mod = func.pt.zcu,
.decl_index = decl_index,
} };
}
@@ -950,7 +954,7 @@ fn formatAir(
) @TypeOf(writer).Error!void {
@import("../../print_air.zig").dumpInst(
data.inst,
- data.func.bin_file.comp.module.?,
+ data.func.pt,
data.func.air,
data.func.liveness,
);
@@ -1044,8 +1048,9 @@ const required_features = [_]Target.riscv.Feature{
};
fn gen(func: *Func) !void {
- const mod = func.bin_file.comp.module.?;
- const fn_info = mod.typeToFunc(func.fn_type).?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
+ const fn_info = zcu.typeToFunc(func.fn_type).?;
inline for (required_features) |feature| {
if (!func.hasFeature(feature)) {
@@ -1071,7 +1076,7 @@ fn gen(func: *Func) !void {
// The address where to store the return value for the caller is in a
// register which the callee is free to clobber. Therefore, we purposely
// spill it to stack immediately.
- const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(Type.usize, mod));
+ const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(Type.usize, pt));
try func.genSetMem(
.{ .frame = frame_index },
0,
@@ -1205,7 +1210,8 @@ fn gen(func: *Func) !void {
}
fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const air_tags = func.air.instructions.items(.tag);
@@ -1672,44 +1678,46 @@ fn ensureProcessDeathCapacity(func: *Func, additional_count: usize) !void {
}
fn memSize(func: *Func, ty: Type) Memory.Size {
- const mod = func.bin_file.comp.module.?;
- return switch (ty.zigTypeTag(mod)) {
+ const pt = func.pt;
+ const zcu = pt.zcu;
+ return switch (ty.zigTypeTag(zcu)) {
.Float => Memory.Size.fromBitSize(ty.floatBits(func.target.*)),
- else => Memory.Size.fromByteSize(ty.abiSize(mod)),
+ else => Memory.Size.fromByteSize(ty.abiSize(pt)),
};
}
fn splitType(func: *Func, ty: Type) ![2]Type {
- const zcu = func.bin_file.comp.module.?;
- const classes = mem.sliceTo(&abi.classifySystem(ty, zcu), .none);
+ const pt = func.pt;
+ const classes = mem.sliceTo(&abi.classifySystem(ty, pt), .none);
var parts: [2]Type = undefined;
if (classes.len == 2) for (&parts, classes, 0..) |*part, class, part_i| {
part.* = switch (class) {
.integer => switch (part_i) {
0 => Type.u64,
1 => part: {
- const elem_size = ty.abiAlignment(zcu).minStrict(.@"8").toByteUnits().?;
- const elem_ty = try zcu.intType(.unsigned, @intCast(elem_size * 8));
- break :part switch (@divExact(ty.abiSize(zcu) - 8, elem_size)) {
+ const elem_size = ty.abiAlignment(pt).minStrict(.@"8").toByteUnits().?;
+ const elem_ty = try pt.intType(.unsigned, @intCast(elem_size * 8));
+ break :part switch (@divExact(ty.abiSize(pt) - 8, elem_size)) {
1 => elem_ty,
- else => |len| try zcu.arrayType(.{ .len = len, .child = elem_ty.toIntern() }),
+ else => |len| try pt.arrayType(.{ .len = len, .child = elem_ty.toIntern() }),
};
},
else => unreachable,
},
else => return func.fail("TODO: splitType class {}", .{class}),
};
- } else if (parts[0].abiSize(zcu) + parts[1].abiSize(zcu) == ty.abiSize(zcu)) return parts;
- return func.fail("TODO implement splitType for {}", .{ty.fmt(zcu)});
+ } else if (parts[0].abiSize(pt) + parts[1].abiSize(pt) == ty.abiSize(pt)) return parts;
+ return func.fail("TODO implement splitType for {}", .{ty.fmt(pt)});
}
/// Truncates the value in the register in place.
/// Clobbers any remaining bits.
fn truncateRegister(func: *Func, ty: Type, reg: Register) !void {
- const mod = func.bin_file.comp.module.?;
- const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{
+ const pt = func.pt;
+ const zcu = pt.zcu;
+ const int_info = if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else std.builtin.Type.Int{
.signedness = .unsigned,
- .bits = @intCast(ty.bitSize(mod)),
+ .bits = @intCast(ty.bitSize(pt)),
};
const shift = math.cast(u6, 64 - int_info.bits % 64) orelse return;
switch (int_info.signedness) {
@@ -1780,7 +1788,8 @@ fn truncateRegister(func: *Func, ty: Type, reg: Register) !void {
}
fn symbolIndex(func: *Func) !u32 {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const decl_index = zcu.funcOwnerDeclIndex(func.func_index);
return switch (func.bin_file.tag) {
.elf => blk: {
@@ -1817,19 +1826,21 @@ fn allocFrameIndex(func: *Func, alloc: FrameAlloc) !FrameIndex {
/// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(func: *Func, inst: Air.Inst.Index) !FrameIndex {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const ptr_ty = func.typeOfIndex(inst);
const val_ty = ptr_ty.childType(zcu);
return func.allocFrameIndex(FrameAlloc.init(.{
- .size = math.cast(u32, val_ty.abiSize(zcu)) orelse {
- return func.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(zcu)});
+ .size = math.cast(u32, val_ty.abiSize(pt)) orelse {
+ return func.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(pt)});
},
- .alignment = ptr_ty.ptrAlignment(zcu).max(.@"1"),
+ .alignment = ptr_ty.ptrAlignment(pt).max(.@"1"),
}));
}
fn typeRegClass(func: *Func, ty: Type) abi.RegisterClass {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
return switch (ty.zigTypeTag(zcu)) {
.Float => .float,
.Vector => @panic("TODO: typeRegClass for Vectors"),
@@ -1838,7 +1849,8 @@ fn typeRegClass(func: *Func, ty: Type) abi.RegisterClass {
}
fn regGeneralClassForType(func: *Func, ty: Type) RegisterManager.RegisterBitSet {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
return switch (ty.zigTypeTag(zcu)) {
.Float => abi.Registers.Float.general_purpose,
.Vector => @panic("TODO: regGeneralClassForType for Vectors"),
@@ -1847,7 +1859,8 @@ fn regGeneralClassForType(func: *Func, ty: Type) RegisterManager.RegisterBitSet
}
fn regTempClassForType(func: *Func, ty: Type) RegisterManager.RegisterBitSet {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
return switch (ty.zigTypeTag(zcu)) {
.Float => abi.Registers.Float.temporary,
.Vector => @panic("TODO: regTempClassForType for Vectors"),
@@ -1856,13 +1869,13 @@ fn regTempClassForType(func: *Func, ty: Type) RegisterManager.RegisterBitSet {
}
fn allocRegOrMem(func: *Func, elem_ty: Type, inst: ?Air.Inst.Index, reg_ok: bool) !MCValue {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
- const abi_size = math.cast(u32, elem_ty.abiSize(zcu)) orelse {
- return func.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(zcu)});
+ const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse {
+ return func.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
};
- const min_size: u32 = switch (elem_ty.zigTypeTag(zcu)) {
+ const min_size: u32 = switch (elem_ty.zigTypeTag(pt.zcu)) {
.Float => 4,
.Vector => @panic("allocRegOrMem Vector"),
else => 8,
@@ -1874,7 +1887,7 @@ fn allocRegOrMem(func: *Func, elem_ty: Type, inst: ?Air.Inst.Index, reg_ok: bool
}
}
- const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(elem_ty, zcu));
+ const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(elem_ty, pt));
return .{ .load_frame = .{ .index = frame_index } };
}
@@ -1955,7 +1968,7 @@ pub fn spillInstruction(func: *Func, reg: Register, inst: Air.Inst.Index) !void
/// allocated. A second call to `copyToTmpRegister` may return the same register.
/// This can have a side effect of spilling instructions to the stack to free up a register.
fn copyToTmpRegister(func: *Func, ty: Type, mcv: MCValue) !Register {
- log.debug("copyToTmpRegister ty: {}", .{ty.fmt(func.bin_file.comp.module.?)});
+ log.debug("copyToTmpRegister ty: {}", .{ty.fmt(func.pt)});
const reg = try func.register_manager.allocReg(null, func.regTempClassForType(ty));
try func.genSetReg(ty, reg, mcv);
return reg;
@@ -2004,7 +2017,8 @@ fn airFpext(func: *Func, inst: Air.Inst.Index) !void {
}
fn airIntCast(func: *Func, inst: Air.Inst.Index) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const src_ty = func.typeOf(ty_op.operand);
const dst_ty = func.typeOfIndex(inst);
@@ -2040,7 +2054,7 @@ fn airIntCast(func: *Func, inst: Air.Inst.Index) !void {
break :result dst_mcv;
} orelse return func.fail("TODO: implement airIntCast from {} to {}", .{
- src_ty.fmt(zcu), dst_ty.fmt(zcu),
+ src_ty.fmt(pt), dst_ty.fmt(pt),
});
return func.finishAir(inst, result, .{ ty_op.operand, .none, .none });
@@ -2067,7 +2081,8 @@ fn airIntFromBool(func: *Func, inst: Air.Inst.Index) !void {
fn airNot(func: *Func, inst: Air.Inst.Index) !void {
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const operand = try func.resolveInst(ty_op.operand);
const ty = func.typeOf(ty_op.operand);
@@ -2106,12 +2121,12 @@ fn airNot(func: *Func, inst: Air.Inst.Index) !void {
}
fn airSlice(func: *Func, inst: Air.Inst.Index) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
const slice_ty = func.typeOfIndex(inst);
- const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(slice_ty, zcu));
+ const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(slice_ty, pt));
const ptr_ty = func.typeOf(bin_op.lhs);
try func.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, .{ .air_ref = bin_op.lhs });
@@ -2119,7 +2134,7 @@ fn airSlice(func: *Func, inst: Air.Inst.Index) !void {
const len_ty = func.typeOf(bin_op.rhs);
try func.genSetMem(
.{ .frame = frame_index },
- @intCast(ptr_ty.abiSize(zcu)),
+ @intCast(ptr_ty.abiSize(pt)),
len_ty,
.{ .air_ref = bin_op.rhs },
);
@@ -2129,14 +2144,15 @@ fn airSlice(func: *Func, inst: Air.Inst.Index) !void {
}
fn airBinOp(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const dst_mcv = try func.binOp(inst, tag, bin_op.lhs, bin_op.rhs);
const dst_ty = func.typeOfIndex(inst);
if (dst_ty.isAbiInt(zcu)) {
- const abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
- const bit_size: u32 = @intCast(dst_ty.bitSize(zcu));
+ const abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+ const bit_size: u32 = @intCast(dst_ty.bitSize(pt));
if (abi_size * 8 > bit_size) {
const dst_lock = switch (dst_mcv) {
.register => |dst_reg| func.register_manager.lockRegAssumeUnused(dst_reg),
@@ -2150,7 +2166,7 @@ fn airBinOp(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const tmp_reg, const tmp_lock = try func.allocReg(.int);
defer func.register_manager.unlockReg(tmp_lock);
- const hi_ty = try zcu.intType(.unsigned, @intCast((dst_ty.bitSize(zcu) - 1) % 64 + 1));
+ const hi_ty = try pt.intType(.unsigned, @intCast((dst_ty.bitSize(pt) - 1) % 64 + 1));
const hi_mcv = dst_mcv.address().offset(@intCast(bit_size / 64 * 8)).deref();
try func.genSetReg(hi_ty, tmp_reg, hi_mcv);
try func.truncateRegister(dst_ty, tmp_reg);
@@ -2170,7 +2186,7 @@ fn binOp(
rhs_air: Air.Inst.Ref,
) !MCValue {
_ = maybe_inst;
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
const lhs_ty = func.typeOf(lhs_air);
const rhs_ty = func.typeOf(rhs_air);
@@ -2189,7 +2205,7 @@ fn binOp(
return func.fail("binOp libcall runtime-float ops", .{});
}
- if (lhs_ty.bitSize(zcu) > 64) return func.fail("TODO: binOp >= 64 bits", .{});
+ if (lhs_ty.bitSize(pt) > 64) return func.fail("TODO: binOp >= 64 bits", .{});
const lhs_mcv = try func.resolveInst(lhs_air);
const rhs_mcv = try func.resolveInst(rhs_air);
@@ -2237,8 +2253,9 @@ fn genBinOp(
rhs_ty: Type,
dst_reg: Register,
) !void {
- const zcu = func.bin_file.comp.module.?;
- const bit_size = lhs_ty.bitSize(zcu);
+ const pt = func.pt;
+ const zcu = pt.zcu;
+ const bit_size = lhs_ty.bitSize(pt);
assert(bit_size <= 64);
const is_unsigned = lhs_ty.isUnsignedInt(zcu);
@@ -2349,7 +2366,7 @@ fn genBinOp(
defer func.register_manager.unlockReg(tmp_lock);
// RISC-V has no immediate mul, so we copy the size to a temporary register
- const elem_size = lhs_ty.elemType2(zcu).abiSize(zcu);
+ const elem_size = lhs_ty.elemType2(zcu).abiSize(pt);
const elem_size_reg = try func.copyToTmpRegister(Type.usize, .{ .immediate = elem_size });
try func.genBinOp(
@@ -2613,7 +2630,8 @@ fn airPtrArithmetic(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void
}
fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -2632,7 +2650,7 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
const add_result_reg_lock = func.register_manager.lockRegAssumeUnused(add_result_reg);
defer func.register_manager.unlockReg(add_result_reg_lock);
- const shift_amount: u6 = @intCast(Type.usize.bitSize(zcu) - int_info.bits);
+ const shift_amount: u6 = @intCast(Type.usize.bitSize(pt) - int_info.bits);
const shift_reg, const shift_lock = try func.allocReg(.int);
defer func.register_manager.unlockReg(shift_lock);
@@ -2663,7 +2681,7 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
try func.genSetMem(
.{ .frame = offset.index },
- offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))),
+ offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, pt))),
lhs_ty,
add_result,
);
@@ -2682,7 +2700,7 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
try func.genSetMem(
.{ .frame = offset.index },
- offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))),
+ offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, pt))),
Type.u1,
.{ .register = overflow_reg },
);
@@ -2697,7 +2715,8 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
}
fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -2727,7 +2746,7 @@ fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
try func.genSetMem(
.{ .frame = offset.index },
- offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))),
+ offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, pt))),
lhs_ty,
.{ .register = dest_reg },
);
@@ -2757,7 +2776,7 @@ fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
try func.genSetMem(
.{ .frame = offset.index },
- offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))),
+ offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, pt))),
Type.u1,
.{ .register = overflow_reg },
);
@@ -2808,7 +2827,7 @@ fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
try func.genSetMem(
.{ .frame = offset.index },
- offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))),
+ offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, pt))),
Type.u1,
.{ .register = overflow_reg },
);
@@ -2825,7 +2844,8 @@ fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
}
fn airMulWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -2840,8 +2860,8 @@ fn airMulWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
// genSetReg needs to support register_offset src_mcv for this to be true.
const result_mcv = try func.allocRegOrMem(tuple_ty, inst, false);
- const result_off: i32 = @intCast(tuple_ty.structFieldOffset(0, zcu));
- const overflow_off: i32 = @intCast(tuple_ty.structFieldOffset(1, zcu));
+ const result_off: i32 = @intCast(tuple_ty.structFieldOffset(0, pt));
+ const overflow_off: i32 = @intCast(tuple_ty.structFieldOffset(1, pt));
const dest_reg, const dest_lock = try func.allocReg(.int);
defer func.register_manager.unlockReg(dest_lock);
@@ -2957,11 +2977,11 @@ fn airShlSat(func: *Func, inst: Air.Inst.Index) !void {
}
fn airOptionalPayload(func: *Func, inst: Air.Inst.Index) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = result: {
const pl_ty = func.typeOfIndex(inst);
- if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
const opt_mcv = try func.resolveInst(ty_op.operand);
if (func.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) {
@@ -2993,7 +3013,8 @@ fn airOptionalPayloadPtrSet(func: *Func, inst: Air.Inst.Index) !void {
fn airUnwrapErrErr(func: *Func, inst: Air.Inst.Index) !void {
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const err_union_ty = func.typeOf(ty_op.operand);
const err_ty = err_union_ty.errorUnionSet(zcu);
const payload_ty = err_union_ty.errorUnionPayload(zcu);
@@ -3004,11 +3025,11 @@ fn airUnwrapErrErr(func: *Func, inst: Air.Inst.Index) !void {
break :result .{ .immediate = 0 };
}
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
break :result operand;
}
- const err_off: u32 = @intCast(errUnionErrorOffset(payload_ty, zcu));
+ const err_off: u32 = @intCast(errUnionErrorOffset(payload_ty, pt));
switch (operand) {
.register => |reg| {
@@ -3052,13 +3073,14 @@ fn genUnwrapErrUnionPayloadMir(
err_union_ty: Type,
err_union: MCValue,
) !MCValue {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const payload_ty = err_union_ty.errorUnionPayload(zcu);
const result: MCValue = result: {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
- const payload_off: u31 = @intCast(errUnionPayloadOffset(payload_ty, zcu));
+ const payload_off: u31 = @intCast(errUnionPayloadOffset(payload_ty, pt));
switch (err_union) {
.load_frame => |frame_addr| break :result .{ .load_frame = .{
.index = frame_addr.index,
@@ -3127,11 +3149,12 @@ fn airSaveErrReturnTraceIndex(func: *Func, inst: Air.Inst.Index) !void {
}
fn airWrapOptional(func: *Func, inst: Air.Inst.Index) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = result: {
const pl_ty = func.typeOf(ty_op.operand);
- if (!pl_ty.hasRuntimeBits(zcu)) break :result .{ .immediate = 1 };
+ if (!pl_ty.hasRuntimeBits(pt)) break :result .{ .immediate = 1 };
const opt_ty = func.typeOfIndex(inst);
const pl_mcv = try func.resolveInst(ty_op.operand);
@@ -3148,7 +3171,7 @@ fn airWrapOptional(func: *Func, inst: Air.Inst.Index) !void {
try func.genCopy(pl_ty, opt_mcv, pl_mcv);
if (!same_repr) {
- const pl_abi_size: i32 = @intCast(pl_ty.abiSize(zcu));
+ const pl_abi_size: i32 = @intCast(pl_ty.abiSize(pt));
switch (opt_mcv) {
.load_frame => |frame_addr| try func.genSetMem(
.{ .frame = frame_addr.index },
@@ -3167,7 +3190,8 @@ fn airWrapOptional(func: *Func, inst: Air.Inst.Index) !void {
/// T to E!T
fn airWrapErrUnionPayload(func: *Func, inst: Air.Inst.Index) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const eu_ty = ty_op.ty.toType();
@@ -3176,11 +3200,11 @@ fn airWrapErrUnionPayload(func: *Func, inst: Air.Inst.Index) !void {
const operand = try func.resolveInst(ty_op.operand);
const result: MCValue = result: {
- if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .{ .immediate = 0 };
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .{ .immediate = 0 };
- const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(eu_ty, zcu));
- const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, zcu));
- const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
+ const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(eu_ty, pt));
+ const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, pt));
+ const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt));
try func.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand);
try func.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 });
break :result .{ .load_frame = .{ .index = frame_index } };
@@ -3191,7 +3215,8 @@ fn airWrapErrUnionPayload(func: *Func, inst: Air.Inst.Index) !void {
/// E to E!T
fn airWrapErrUnionErr(func: *Func, inst: Air.Inst.Index) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const eu_ty = ty_op.ty.toType();
@@ -3199,11 +3224,11 @@ fn airWrapErrUnionErr(func: *Func, inst: Air.Inst.Index) !void {
const err_ty = eu_ty.errorUnionSet(zcu);
const result: MCValue = result: {
- if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result try func.resolveInst(ty_op.operand);
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result try func.resolveInst(ty_op.operand);
- const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(eu_ty, zcu));
- const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, zcu));
- const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
+ const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(eu_ty, pt));
+ const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, pt));
+ const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt));
try func.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef);
const operand = try func.resolveInst(ty_op.operand);
try func.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand);
@@ -3327,15 +3352,16 @@ fn airPtrSlicePtrPtr(func: *Func, inst: Air.Inst.Index) !void {
}
fn airSliceElemVal(func: *Func, inst: Air.Inst.Index) !void {
- const mod = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = result: {
const elem_ty = func.typeOfIndex(inst);
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none;
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
const slice_ty = func.typeOf(bin_op.lhs);
- const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod);
+ const slice_ptr_field_type = slice_ty.slicePtrFieldType(zcu);
const elem_ptr = try func.genSliceElemPtr(bin_op.lhs, bin_op.rhs);
const dst_mcv = try func.allocRegOrMem(elem_ty, inst, false);
try func.load(dst_mcv, elem_ptr, slice_ptr_field_type);
@@ -3352,7 +3378,8 @@ fn airSliceElemPtr(func: *Func, inst: Air.Inst.Index) !void {
}
fn genSliceElemPtr(func: *Func, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const slice_ty = func.typeOf(lhs);
const slice_mcv = try func.resolveInst(lhs);
const slice_mcv_lock: ?RegisterLock = switch (slice_mcv) {
@@ -3362,7 +3389,7 @@ fn genSliceElemPtr(func: *Func, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
defer if (slice_mcv_lock) |lock| func.register_manager.unlockReg(lock);
const elem_ty = slice_ty.childType(zcu);
- const elem_size = elem_ty.abiSize(zcu);
+ const elem_size = elem_ty.abiSize(pt);
const index_ty = func.typeOf(rhs);
const index_mcv = try func.resolveInst(rhs);
@@ -3394,7 +3421,8 @@ fn genSliceElemPtr(func: *Func, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
}
fn airArrayElemVal(func: *Func, inst: Air.Inst.Index) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
const result_ty = func.typeOfIndex(inst);
@@ -3406,14 +3434,14 @@ fn airArrayElemVal(func: *Func, inst: Air.Inst.Index) !void {
const index_ty = func.typeOf(bin_op.rhs);
const elem_ty = array_ty.childType(zcu);
- const elem_abi_size = elem_ty.abiSize(zcu);
+ const elem_abi_size = elem_ty.abiSize(pt);
const addr_reg, const addr_reg_lock = try func.allocReg(.int);
defer func.register_manager.unlockReg(addr_reg_lock);
switch (array_mcv) {
.register => {
- const frame_index = try func.allocFrameIndex(FrameAlloc.initType(array_ty, zcu));
+ const frame_index = try func.allocFrameIndex(FrameAlloc.initType(array_ty, pt));
try func.genSetMem(.{ .frame = frame_index }, 0, array_ty, array_mcv);
try func.genSetReg(Type.usize, addr_reg, .{ .lea_frame = .{ .index = frame_index } });
},
@@ -3451,7 +3479,8 @@ fn airPtrElemVal(func: *Func, inst: Air.Inst.Index) !void {
}
fn airPtrElemPtr(func: *Func, inst: Air.Inst.Index) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -3474,7 +3503,7 @@ fn airPtrElemPtr(func: *Func, inst: Air.Inst.Index) !void {
}
const elem_ty = base_ptr_ty.elemType2(zcu);
- const elem_abi_size = elem_ty.abiSize(zcu);
+ const elem_abi_size = elem_ty.abiSize(pt);
const index_ty = func.typeOf(extra.rhs);
const index_mcv = try func.resolveInst(extra.rhs);
const index_lock: ?RegisterLock = switch (index_mcv) {
@@ -3536,7 +3565,8 @@ fn airPopcount(func: *Func, inst: Air.Inst.Index) !void {
}
fn airAbs(func: *Func, inst: Air.Inst.Index) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
const ty = func.typeOf(ty_op.operand);
@@ -3545,7 +3575,7 @@ fn airAbs(func: *Func, inst: Air.Inst.Index) !void {
switch (scalar_ty.zigTypeTag(zcu)) {
.Int => if (ty.zigTypeTag(zcu) == .Vector) {
- return func.fail("TODO implement airAbs for {}", .{ty.fmt(zcu)});
+ return func.fail("TODO implement airAbs for {}", .{ty.fmt(pt)});
} else {
const return_mcv = try func.copyToNewRegister(inst, operand);
const operand_reg = return_mcv.register;
@@ -3615,7 +3645,7 @@ fn airAbs(func: *Func, inst: Air.Inst.Index) !void {
break :result return_mcv;
},
- else => return func.fail("TODO: implement airAbs {}", .{scalar_ty.fmt(zcu)}),
+ else => return func.fail("TODO: implement airAbs {}", .{scalar_ty.fmt(pt)}),
}
break :result .unreach;
@@ -3626,7 +3656,8 @@ fn airAbs(func: *Func, inst: Air.Inst.Index) !void {
fn airByteSwap(func: *Func, inst: Air.Inst.Index) !void {
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const ty = func.typeOf(ty_op.operand);
const operand = try func.resolveInst(ty_op.operand);
@@ -3746,12 +3777,13 @@ fn reuseOperandAdvanced(
}
fn airLoad(func: *Func, inst: Air.Inst.Index) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const elem_ty = func.typeOfIndex(inst);
const result: MCValue = result: {
- if (!elem_ty.hasRuntimeBits(zcu))
+ if (!elem_ty.hasRuntimeBits(pt))
break :result .none;
const ptr = try func.resolveInst(ty_op.operand);
@@ -3759,7 +3791,7 @@ fn airLoad(func: *Func, inst: Air.Inst.Index) !void {
if (func.liveness.isUnused(inst) and !is_volatile)
break :result .unreach;
- const elem_size = elem_ty.abiSize(zcu);
+ const elem_size = elem_ty.abiSize(pt);
const dst_mcv: MCValue = blk: {
// Pointer is 8 bytes, and if the element is more than that, we cannot reuse it.
@@ -3778,10 +3810,11 @@ fn airLoad(func: *Func, inst: Air.Inst.Index) !void {
}
fn load(func: *Func, dst_mcv: MCValue, ptr_mcv: MCValue, ptr_ty: Type) InnerError!void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const dst_ty = ptr_ty.childType(zcu);
- log.debug("loading {}:{} into {}", .{ ptr_mcv, ptr_ty.fmt(zcu), dst_mcv });
+ log.debug("loading {}:{} into {}", .{ ptr_mcv, ptr_ty.fmt(pt), dst_mcv });
switch (ptr_mcv) {
.none,
@@ -3833,9 +3866,7 @@ fn airStore(func: *Func, inst: Air.Inst.Index, safety: bool) !void {
/// Loads `value` into the "payload" of `pointer`.
fn store(func: *Func, ptr_mcv: MCValue, src_mcv: MCValue, ptr_ty: Type, src_ty: Type) !void {
- const zcu = func.bin_file.comp.module.?;
-
- log.debug("storing {}:{} in {}:{}", .{ src_mcv, src_ty.fmt(zcu), ptr_mcv, ptr_ty.fmt(zcu) });
+ log.debug("storing {}:{} in {}:{}", .{ src_mcv, src_ty.fmt(func.pt), ptr_mcv, ptr_ty.fmt(func.pt) });
switch (ptr_mcv) {
.none => unreachable,
@@ -3881,7 +3912,8 @@ fn airStructFieldPtrIndex(func: *Func, inst: Air.Inst.Index, index: u8) !void {
}
fn structFieldPtr(func: *Func, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const ptr_field_ty = func.typeOfIndex(inst);
const ptr_container_ty = func.typeOf(operand);
const ptr_container_ty_info = ptr_container_ty.ptrInfo(zcu);
@@ -3889,12 +3921,12 @@ fn structFieldPtr(func: *Func, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
const field_offset: i32 = if (zcu.typeToPackedStruct(container_ty)) |struct_obj|
if (ptr_field_ty.ptrInfo(zcu).packed_offset.host_size == 0)
- @divExact(zcu.structPackedFieldBitOffset(struct_obj, index) +
+ @divExact(pt.structPackedFieldBitOffset(struct_obj, index) +
ptr_container_ty_info.packed_offset.bit_offset, 8)
else
0
else
- @intCast(container_ty.structFieldOffset(index, zcu));
+ @intCast(container_ty.structFieldOffset(index, pt));
const src_mcv = try func.resolveInst(operand);
const dst_mcv = if (switch (src_mcv) {
@@ -3906,7 +3938,8 @@ fn structFieldPtr(func: *Func, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
}
fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void {
- const mod = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.StructField, ty_pl.payload).data;
@@ -3914,16 +3947,15 @@ fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void {
const index = extra.field_index;
const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
- const zcu = func.bin_file.comp.module.?;
const src_mcv = try func.resolveInst(operand);
const struct_ty = func.typeOf(operand);
const field_ty = struct_ty.structFieldType(index, zcu);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
const field_off: u32 = switch (struct_ty.containerLayout(zcu)) {
- .auto, .@"extern" => @intCast(struct_ty.structFieldOffset(index, zcu) * 8),
+ .auto, .@"extern" => @intCast(struct_ty.structFieldOffset(index, pt) * 8),
.@"packed" => if (zcu.typeToStruct(struct_ty)) |struct_type|
- zcu.structPackedFieldBitOffset(struct_type, index)
+ pt.structPackedFieldBitOffset(struct_type, index)
else
0,
};
@@ -3958,15 +3990,15 @@ fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void {
break :result if (field_off == 0) dst_mcv else try func.copyToNewRegister(inst, dst_mcv);
},
.load_frame => {
- const field_abi_size: u32 = @intCast(field_ty.abiSize(mod));
+ const field_abi_size: u32 = @intCast(field_ty.abiSize(pt));
if (field_off % 8 == 0) {
const field_byte_off = @divExact(field_off, 8);
const off_mcv = src_mcv.address().offset(@intCast(field_byte_off)).deref();
- const field_bit_size = field_ty.bitSize(mod);
+ const field_bit_size = field_ty.bitSize(pt);
if (field_abi_size <= 8) {
- const int_ty = try mod.intType(
- if (field_ty.isAbiInt(mod)) field_ty.intInfo(mod).signedness else .unsigned,
+ const int_ty = try pt.intType(
+ if (field_ty.isAbiInt(zcu)) field_ty.intInfo(zcu).signedness else .unsigned,
@intCast(field_bit_size),
);
@@ -3978,7 +4010,7 @@ fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void {
break :result try func.copyToNewRegister(inst, dst_mcv);
}
- const container_abi_size: u32 = @intCast(struct_ty.abiSize(mod));
+ const container_abi_size: u32 = @intCast(struct_ty.abiSize(pt));
const dst_mcv = if (field_byte_off + field_abi_size <= container_abi_size and
func.reuseOperand(inst, operand, 0, src_mcv))
off_mcv
@@ -4014,7 +4046,8 @@ fn airFieldParentPtr(func: *Func, inst: Air.Inst.Index) !void {
}
fn genArgDbgInfo(func: Func, inst: Air.Inst.Index, mcv: MCValue) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const arg = func.air.instructions.items(.data)[@intFromEnum(inst)].arg;
const ty = arg.ty.toType();
const owner_decl = zcu.funcOwnerDeclIndex(func.func_index);
@@ -4139,7 +4172,8 @@ fn genCall(
arg_tys: []const Type,
args: []const MCValue,
) !MCValue {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const fn_ty = switch (info) {
.air => |callee| fn_info: {
@@ -4150,7 +4184,7 @@ fn genCall(
else => unreachable,
};
},
- .lib => |lib| try zcu.funcType(.{
+ .lib => |lib| try pt.funcType(.{
.param_types = lib.param_types,
.return_type = lib.return_type,
.cc = .C,
@@ -4208,7 +4242,7 @@ fn genCall(
try reg_locks.appendSlice(&func.register_manager.lockRegs(2, regs));
},
.indirect => |reg_off| {
- frame_index.* = try func.allocFrameIndex(FrameAlloc.initType(arg_ty, zcu));
+ frame_index.* = try func.allocFrameIndex(FrameAlloc.initType(arg_ty, pt));
try func.genSetMem(.{ .frame = frame_index.* }, 0, arg_ty, src_arg);
try func.register_manager.getReg(reg_off.reg, null);
try reg_locks.append(func.register_manager.lockReg(reg_off.reg));
@@ -4221,7 +4255,7 @@ fn genCall(
.none, .unreach => {},
.indirect => |reg_off| {
const ret_ty = Type.fromInterned(fn_info.return_type);
- const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(ret_ty, zcu));
+ const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(ret_ty, pt));
try func.genSetReg(Type.usize, reg_off.reg, .{
.lea_frame = .{ .index = frame_index, .off = -reg_off.off },
});
@@ -4251,7 +4285,7 @@ fn genCall(
// on linking.
switch (info) {
.air => |callee| {
- if (try func.air.value(callee, zcu)) |func_value| {
+ if (try func.air.value(callee, pt)) |func_value| {
const func_key = zcu.intern_pool.indexToKey(func_value.ip_index);
switch (switch (func_key) {
else => func_key,
@@ -4324,7 +4358,8 @@ fn genCall(
}
fn airRet(func: *Func, inst: Air.Inst.Index, safety: bool) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
if (safety) {
@@ -4394,7 +4429,8 @@ fn airRetLoad(func: *Func, inst: Air.Inst.Index) !void {
fn airCmp(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
const lhs_ty = func.typeOf(bin_op.lhs);
@@ -4415,7 +4451,7 @@ fn airCmp(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
.ErrorSet => Type.anyerror,
.Optional => blk: {
const payload_ty = lhs_ty.optionalChild(zcu);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
break :blk Type.u1;
} else if (lhs_ty.isPtrLikeOptional(zcu)) {
break :blk Type.usize;
@@ -4503,7 +4539,8 @@ fn genVarDbgInfo(
mcv: MCValue,
name: [:0]const u8,
) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const is_ptr = switch (tag) {
.dbg_var_ptr => true,
.dbg_var_val => false,
@@ -4595,13 +4632,14 @@ fn condBr(func: *Func, cond_ty: Type, condition: MCValue) !Mir.Inst.Index {
}
fn isNull(func: *Func, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MCValue {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const pl_ty = opt_ty.optionalChild(zcu);
const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(zcu))
.{ .off = 0, .ty = if (pl_ty.isSlice(zcu)) pl_ty.slicePtrFieldType(zcu) else pl_ty }
else
- .{ .off = @intCast(pl_ty.abiSize(zcu)), .ty = Type.bool };
+ .{ .off = @intCast(pl_ty.abiSize(pt)), .ty = Type.bool };
const return_mcv = try func.allocRegOrMem(func.typeOfIndex(inst), inst, true);
assert(return_mcv == .register); // should not be larger 8 bytes
@@ -4642,7 +4680,7 @@ fn isNull(func: *Func, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
return return_mcv;
}
assert(some_info.ty.ip_index == .bool_type);
- const opt_abi_size: u32 = @intCast(opt_ty.abiSize(zcu));
+ const opt_abi_size: u32 = @intCast(opt_ty.abiSize(pt));
_ = opt_abi_size;
return func.fail("TODO: isNull some_info.off != 0 register", .{});
},
@@ -4742,7 +4780,8 @@ fn airIsErr(func: *Func, inst: Air.Inst.Index) !void {
}
fn airIsErrPtr(func: *Func, inst: Air.Inst.Index) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
const operand_ptr = try func.resolveInst(un_op);
@@ -4768,10 +4807,11 @@ fn airIsErrPtr(func: *Func, inst: Air.Inst.Index) !void {
/// Result is in the return register.
fn isErr(func: *Func, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MCValue {
_ = maybe_inst;
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const err_ty = eu_ty.errorUnionSet(zcu);
if (err_ty.errorSetIsEmpty(zcu)) return MCValue{ .immediate = 0 }; // always false
- const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(zcu), zcu));
+ const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(zcu), pt));
const return_reg, const return_lock = try func.allocReg(.int);
defer func.register_manager.unlockReg(return_lock);
@@ -4858,7 +4898,8 @@ fn isNonErr(func: *Func, inst: Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MC
}
fn airIsNonErrPtr(func: *Func, inst: Air.Inst.Index) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
const operand_ptr = try func.resolveInst(un_op);
@@ -5063,12 +5104,12 @@ fn performReloc(func: *Func, inst: Mir.Inst.Index) void {
}
fn airBr(func: *Func, inst: Air.Inst.Index) !void {
- const mod = func.bin_file.comp.module.?;
+ const pt = func.pt;
const br = func.air.instructions.items(.data)[@intFromEnum(inst)].br;
const block_ty = func.typeOfIndex(br.block_inst);
const block_unused =
- !block_ty.hasRuntimeBitsIgnoreComptime(mod) or func.liveness.isUnused(br.block_inst);
+ !block_ty.hasRuntimeBitsIgnoreComptime(pt) or func.liveness.isUnused(br.block_inst);
const block_tracking = func.inst_tracking.getPtr(br.block_inst).?;
const block_data = func.blocks.getPtr(br.block_inst).?;
const first_br = block_data.relocs.items.len == 0;
@@ -5288,8 +5329,6 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
/// Sets the value of `dst_mcv` to the value of `src_mcv`.
fn genCopy(func: *Func, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void {
- const zcu = func.bin_file.comp.module.?;
-
// There isn't anything to store
if (dst_mcv == .none) return;
@@ -5362,7 +5401,7 @@ fn genCopy(func: *Func, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void {
} },
else => unreachable,
});
- part_disp += @intCast(dst_ty.abiSize(zcu));
+ part_disp += @intCast(dst_ty.abiSize(func.pt));
}
},
else => return func.fail("TODO: genCopy to {s} from {s}", .{ @tagName(dst_mcv), @tagName(src_mcv) }),
@@ -5555,8 +5594,9 @@ fn genInlineMemset(
/// Sets the value of `src_mcv` into `reg`. Assumes you have a lock on it.
fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!void {
- const zcu = func.bin_file.comp.module.?;
- const abi_size: u32 = @intCast(ty.abiSize(zcu));
+ const pt = func.pt;
+ const zcu = pt.zcu;
+ const abi_size: u32 = @intCast(ty.abiSize(pt));
if (abi_size > 8) return std.debug.panic("tried to set reg with size {}", .{abi_size});
@@ -5784,8 +5824,8 @@ fn genSetMem(
ty: Type,
src_mcv: MCValue,
) InnerError!void {
- const mod = func.bin_file.comp.module.?;
- const abi_size: u32 = @intCast(ty.abiSize(mod));
+ const pt = func.pt;
+ const abi_size: u32 = @intCast(ty.abiSize(pt));
const dst_ptr_mcv: MCValue = switch (base) {
.reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } },
.frame => |base_frame_index| .{ .lea_frame = .{ .index = base_frame_index, .off = disp } },
@@ -5883,7 +5923,7 @@ fn genSetMem(
var part_disp: i32 = disp;
for (try func.splitType(ty), src_regs) |src_ty, src_reg| {
try func.genSetMem(base, part_disp, src_ty, .{ .register = src_reg });
- part_disp += @intCast(src_ty.abiSize(mod));
+ part_disp += @intCast(src_ty.abiSize(pt));
}
},
.immediate => {
@@ -5914,7 +5954,8 @@ fn airIntFromPtr(func: *Func, inst: Air.Inst.Index) !void {
}
fn airBitCast(func: *Func, inst: Air.Inst.Index) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result = if (func.liveness.isUnused(inst)) .unreach else result: {
@@ -5926,10 +5967,10 @@ fn airBitCast(func: *Func, inst: Air.Inst.Index) !void {
const src_lock = if (src_mcv.getReg()) |reg| func.register_manager.lockReg(reg) else null;
defer if (src_lock) |lock| func.register_manager.unlockReg(lock);
- const dst_mcv = if (dst_ty.abiSize(zcu) <= src_ty.abiSize(zcu) and
+ const dst_mcv = if (dst_ty.abiSize(pt) <= src_ty.abiSize(pt) and
func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: {
const dst_mcv = try func.allocRegOrMem(dst_ty, inst, true);
- try func.genCopy(switch (math.order(dst_ty.abiSize(zcu), src_ty.abiSize(zcu))) {
+ try func.genCopy(switch (math.order(dst_ty.abiSize(pt), src_ty.abiSize(pt))) {
.lt => dst_ty,
.eq => if (!dst_mcv.isMemory() or src_mcv.isMemory()) dst_ty else src_ty,
.gt => src_ty,
@@ -5940,17 +5981,18 @@ fn airBitCast(func: *Func, inst: Air.Inst.Index) !void {
if (dst_ty.isAbiInt(zcu) and src_ty.isAbiInt(zcu) and
dst_ty.intInfo(zcu).signedness == src_ty.intInfo(zcu).signedness) break :result dst_mcv;
- const abi_size = dst_ty.abiSize(zcu);
- const bit_size = dst_ty.bitSize(zcu);
+ const abi_size = dst_ty.abiSize(pt);
+ const bit_size = dst_ty.bitSize(pt);
if (abi_size * 8 <= bit_size) break :result dst_mcv;
- return func.fail("TODO: airBitCast {} to {}", .{ src_ty.fmt(zcu), dst_ty.fmt(zcu) });
+ return func.fail("TODO: airBitCast {} to {}", .{ src_ty.fmt(pt), dst_ty.fmt(pt) });
};
return func.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airArrayToSlice(func: *Func, inst: Air.Inst.Index) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const slice_ty = func.typeOfIndex(inst);
@@ -5959,11 +6001,11 @@ fn airArrayToSlice(func: *Func, inst: Air.Inst.Index) !void {
const array_ty = ptr_ty.childType(zcu);
const array_len = array_ty.arrayLen(zcu);
- const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(slice_ty, zcu));
+ const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(slice_ty, pt));
try func.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr);
try func.genSetMem(
.{ .frame = frame_index },
- @intCast(ptr_ty.abiSize(zcu)),
+ @intCast(ptr_ty.abiSize(pt)),
Type.usize,
.{ .immediate = array_len },
);
@@ -6015,7 +6057,8 @@ fn airAtomicStore(func: *Func, inst: Air.Inst.Index, order: std.builtin.AtomicOr
}
fn airMemset(func: *Func, inst: Air.Inst.Index, safety: bool) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
result: {
@@ -6037,7 +6080,7 @@ fn airMemset(func: *Func, inst: Air.Inst.Index, safety: bool) !void {
};
defer if (src_val_lock) |lock| func.register_manager.unlockReg(lock);
- const elem_abi_size: u31 = @intCast(elem_ty.abiSize(zcu));
+ const elem_abi_size: u31 = @intCast(elem_ty.abiSize(pt));
if (elem_abi_size == 1) {
const ptr: MCValue = switch (dst_ptr_ty.ptrSize(zcu)) {
@@ -6068,7 +6111,7 @@ fn airMemset(func: *Func, inst: Air.Inst.Index, safety: bool) !void {
switch (dst_ptr_ty.ptrSize(zcu)) {
.Slice => return func.fail("TODO: airMemset Slices", .{}),
.One => {
- const elem_ptr_ty = try zcu.singleMutPtrType(elem_ty);
+ const elem_ptr_ty = try pt.singleMutPtrType(elem_ty);
const len = dst_ptr_ty.childType(zcu).arrayLen(zcu);
@@ -6110,7 +6153,8 @@ fn airTagName(func: *Func, inst: Air.Inst.Index) !void {
}
fn airErrorName(func: *Func, inst: Air.Inst.Index) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const err_ty = func.typeOf(un_op);
@@ -6126,7 +6170,7 @@ fn airErrorName(func: *Func, inst: Air.Inst.Index) !void {
// this is now the base address of the error name table
const lazy_sym = link.File.LazySymbol.initDecl(.const_data, null, zcu);
if (func.bin_file.cast(link.File.Elf)) |elf_file| {
- const sym_index = elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, lazy_sym) catch |err|
+ const sym_index = elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, pt, lazy_sym) catch |err|
return func.fail("{s} creating lazy symbol", .{@errorName(err)});
const sym = elf_file.symbol(sym_index);
try func.genSetReg(Type.usize, addr_reg, .{ .load_symbol = .{ .sym = sym.esym_index } });
@@ -6239,7 +6283,8 @@ fn airReduce(func: *Func, inst: Air.Inst.Index) !void {
}
fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const result_ty = func.typeOfIndex(inst);
const len: usize = @intCast(result_ty.arrayLen(zcu));
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@@ -6248,21 +6293,21 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void {
const result: MCValue = result: {
switch (result_ty.zigTypeTag(zcu)) {
.Struct => {
- const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu));
+ const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(result_ty, pt));
if (result_ty.containerLayout(zcu) == .@"packed") {
const struct_obj = zcu.typeToStruct(result_ty).?;
try func.genInlineMemset(
.{ .lea_frame = .{ .index = frame_index } },
.{ .immediate = 0 },
- .{ .immediate = result_ty.abiSize(zcu) },
+ .{ .immediate = result_ty.abiSize(pt) },
);
for (elements, 0..) |elem, elem_i_usize| {
const elem_i: u32 = @intCast(elem_i_usize);
- if ((try result_ty.structFieldValueComptime(zcu, elem_i)) != null) continue;
+ if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue;
const elem_ty = result_ty.structFieldType(elem_i, zcu);
- const elem_bit_size: u32 = @intCast(elem_ty.bitSize(zcu));
+ const elem_bit_size: u32 = @intCast(elem_ty.bitSize(pt));
if (elem_bit_size > 64) {
return func.fail(
"TODO airAggregateInit implement packed structs with large fields",
@@ -6270,9 +6315,9 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void {
);
}
- const elem_abi_size: u32 = @intCast(elem_ty.abiSize(zcu));
+ const elem_abi_size: u32 = @intCast(elem_ty.abiSize(pt));
const elem_abi_bits = elem_abi_size * 8;
- const elem_off = zcu.structPackedFieldBitOffset(struct_obj, elem_i);
+ const elem_off = pt.structPackedFieldBitOffset(struct_obj, elem_i);
const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size);
const elem_bit_off = elem_off % elem_abi_bits;
const elem_mcv = try func.resolveInst(elem);
@@ -6293,10 +6338,10 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void {
return func.fail("TODO: airAggregateInit packed structs", .{});
}
} else for (elements, 0..) |elem, elem_i| {
- if ((try result_ty.structFieldValueComptime(zcu, elem_i)) != null) continue;
+ if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue;
const elem_ty = result_ty.structFieldType(elem_i, zcu);
- const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, zcu));
+ const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, pt));
const elem_mcv = try func.resolveInst(elem);
try func.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, elem_mcv);
}
@@ -6304,8 +6349,8 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void {
},
.Array => {
const elem_ty = result_ty.childType(zcu);
- const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu));
- const elem_size: u32 = @intCast(elem_ty.abiSize(zcu));
+ const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(result_ty, pt));
+ const elem_size: u32 = @intCast(elem_ty.abiSize(pt));
for (elements, 0..) |elem, elem_i| {
const elem_mcv = try func.resolveInst(elem);
@@ -6325,7 +6370,7 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void {
);
break :result .{ .load_frame = .{ .index = frame_index } };
},
- else => return func.fail("TODO: airAggregate {}", .{result_ty.fmt(zcu)}),
+ else => return func.fail("TODO: airAggregate {}", .{result_ty.fmt(pt)}),
}
};
@@ -6364,11 +6409,11 @@ fn airMulAdd(func: *Func, inst: Air.Inst.Index) !void {
}
fn resolveInst(func: *Func, ref: Air.Inst.Ref) InnerError!MCValue {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
// If the type has no codegen bits, no need to store it.
const inst_ty = func.typeOf(ref);
- if (!inst_ty.hasRuntimeBits(zcu))
+ if (!inst_ty.hasRuntimeBits(pt))
return .none;
const mcv = if (ref.toIndex()) |inst| mcv: {
@@ -6394,9 +6439,11 @@ fn getResolvedInstValue(func: *Func, inst: Air.Inst.Index) *InstTracking {
}
fn genTypedValue(func: *Func, val: Value) InnerError!MCValue {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const result = try codegen.genTypedValue(
func.bin_file,
+ pt,
func.src_loc,
val,
zcu.funcOwnerDeclIndex(func.func_index),
@@ -6438,7 +6485,8 @@ fn resolveCallingConventionValues(
fn_info: InternPool.Key.FuncType,
var_args: []const Type,
) !CallMCValues {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const param_types = try func.gpa.alloc(Type, fn_info.param_types.len + var_args.len);
@@ -6481,14 +6529,14 @@ fn resolveCallingConventionValues(
// Return values
if (ret_ty.zigTypeTag(zcu) == .NoReturn) {
result.return_value = InstTracking.init(.unreach);
- } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
result.return_value = InstTracking.init(.none);
} else {
var ret_tracking: [2]InstTracking = undefined;
var ret_tracking_i: usize = 0;
var ret_float_reg_i: usize = 0;
- const classes = mem.sliceTo(&abi.classifySystem(ret_ty, zcu), .none);
+ const classes = mem.sliceTo(&abi.classifySystem(ret_ty, pt), .none);
for (classes) |class| switch (class) {
.integer => {
@@ -6521,7 +6569,7 @@ fn resolveCallingConventionValues(
};
result.return_value = switch (ret_tracking_i) {
- else => return func.fail("ty {} took {} tracking return indices", .{ ret_ty.fmt(zcu), ret_tracking_i }),
+ else => return func.fail("ty {} took {} tracking return indices", .{ ret_ty.fmt(pt), ret_tracking_i }),
1 => ret_tracking[0],
2 => InstTracking.init(.{ .register_pair = .{
ret_tracking[0].short.register, ret_tracking[1].short.register,
@@ -6532,7 +6580,7 @@ fn resolveCallingConventionValues(
var param_float_reg_i: usize = 0;
for (param_types, result.args) |ty, *arg| {
- if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ if (!ty.hasRuntimeBitsIgnoreComptime(pt)) {
assert(cc == .Unspecified);
arg.* = .none;
continue;
@@ -6541,7 +6589,7 @@ fn resolveCallingConventionValues(
var arg_mcv: [2]MCValue = undefined;
var arg_mcv_i: usize = 0;
- const classes = mem.sliceTo(&abi.classifySystem(ty, zcu), .none);
+ const classes = mem.sliceTo(&abi.classifySystem(ty, pt), .none);
for (classes) |class| switch (class) {
.integer => {
@@ -6576,7 +6624,7 @@ fn resolveCallingConventionValues(
else => return func.fail("TODO: C calling convention arg class {}", .{class}),
} else {
arg.* = switch (arg_mcv_i) {
- else => return func.fail("ty {} took {} tracking arg indices", .{ ty.fmt(zcu), arg_mcv_i }),
+ else => return func.fail("ty {} took {} tracking arg indices", .{ ty.fmt(pt), arg_mcv_i }),
1 => arg_mcv[0],
2 => .{ .register_pair = .{ arg_mcv[0].register, arg_mcv[1].register } },
};
@@ -6621,12 +6669,14 @@ fn parseRegName(name: []const u8) ?Register {
}
fn typeOf(func: *Func, inst: Air.Inst.Ref) Type {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
return func.air.typeOf(inst, &zcu.intern_pool);
}
fn typeOfIndex(func: *Func, inst: Air.Inst.Index) Type {
- const zcu = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
return func.air.typeOfIndex(inst, &zcu.intern_pool);
}
@@ -6634,40 +6684,41 @@ fn hasFeature(func: *Func, feature: Target.riscv.Feature) bool {
return Target.riscv.featureSetHas(func.target.cpu.features, feature);
}
-pub fn errUnionPayloadOffset(payload_ty: Type, zcu: *Zcu) u64 {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return 0;
- const payload_align = payload_ty.abiAlignment(zcu);
- const error_align = Type.anyerror.abiAlignment(zcu);
- if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+pub fn errUnionPayloadOffset(payload_ty: Type, pt: Zcu.PerThread) u64 {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return 0;
+ const payload_align = payload_ty.abiAlignment(pt);
+ const error_align = Type.anyerror.abiAlignment(pt);
+ if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
return 0;
} else {
- return payload_align.forward(Type.anyerror.abiSize(zcu));
+ return payload_align.forward(Type.anyerror.abiSize(pt));
}
}
-pub fn errUnionErrorOffset(payload_ty: Type, zcu: *Zcu) u64 {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return 0;
- const payload_align = payload_ty.abiAlignment(zcu);
- const error_align = Type.anyerror.abiAlignment(zcu);
- if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
- return error_align.forward(payload_ty.abiSize(zcu));
+pub fn errUnionErrorOffset(payload_ty: Type, pt: Zcu.PerThread) u64 {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return 0;
+ const payload_align = payload_ty.abiAlignment(pt);
+ const error_align = Type.anyerror.abiAlignment(pt);
+ if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ return error_align.forward(payload_ty.abiSize(pt));
} else {
return 0;
}
}
fn promoteInt(func: *Func, ty: Type) Type {
- const mod = func.bin_file.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const int_info: InternPool.Key.IntType = switch (ty.toIntern()) {
.bool_type => .{ .signedness = .unsigned, .bits = 1 },
- else => if (ty.isAbiInt(mod)) ty.intInfo(mod) else return ty,
+ else => if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else return ty,
};
for ([_]Type{
Type.c_int, Type.c_uint,
Type.c_long, Type.c_ulong,
Type.c_longlong, Type.c_ulonglong,
}) |promote_ty| {
- const promote_info = promote_ty.intInfo(mod);
+ const promote_info = promote_ty.intInfo(zcu);
if (int_info.signedness == .signed and promote_info.signedness == .unsigned) continue;
if (int_info.bits + @intFromBool(int_info.signedness == .unsigned and
promote_info.signedness == .signed) <= promote_info.bits) return promote_ty;
src/arch/riscv64/Emit.zig
@@ -1,5 +1,6 @@
//! This file contains the functionality for emitting RISC-V MIR as machine code
+bin_file: *link.File,
lower: Lower,
debug_output: DebugInfoOutput,
code: *std.ArrayList(u8),
@@ -48,7 +49,7 @@ pub fn emitMir(emit: *Emit) Error!void {
.Lib => emit.lower.link_mode == .static,
};
- if (emit.lower.bin_file.cast(link.File.Elf)) |elf_file| {
+ if (emit.bin_file.cast(link.File.Elf)) |elf_file| {
const atom_ptr = elf_file.symbol(symbol.atom_index).atom(elf_file).?;
const sym_index = elf_file.zigObjectPtr().?.symbol(symbol.sym_index);
const sym = elf_file.symbol(sym_index);
@@ -77,7 +78,7 @@ pub fn emitMir(emit: *Emit) Error!void {
} else return emit.fail("TODO: load_symbol_reloc non-ELF", .{});
},
.call_extern_fn_reloc => |symbol| {
- if (emit.lower.bin_file.cast(link.File.Elf)) |elf_file| {
+ if (emit.bin_file.cast(link.File.Elf)) |elf_file| {
const atom_ptr = elf_file.symbol(symbol.atom_index).atom(elf_file).?;
const r_type: u32 = @intFromEnum(std.elf.R_RISCV.CALL_PLT);
src/arch/riscv64/Lower.zig
@@ -1,6 +1,6 @@
//! This file contains the functionality for lowering RISC-V MIR to Instructions
-bin_file: *link.File,
+pt: Zcu.PerThread,
output_mode: std.builtin.OutputMode,
link_mode: std.builtin.LinkMode,
pic: bool,
@@ -44,7 +44,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
insts: []const Instruction,
relocs: []const Reloc,
} {
- const zcu = lower.bin_file.comp.module.?;
+ const pt = lower.pt;
lower.result_insts = undefined;
lower.result_relocs = undefined;
@@ -243,11 +243,11 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
const class = rs1.class();
const ty = compare.ty;
- const size = std.math.ceilPowerOfTwo(u64, ty.bitSize(zcu)) catch {
- return lower.fail("pseudo_compare size {}", .{ty.bitSize(zcu)});
+ const size = std.math.ceilPowerOfTwo(u64, ty.bitSize(pt)) catch {
+ return lower.fail("pseudo_compare size {}", .{ty.bitSize(pt)});
};
- const is_unsigned = ty.isUnsignedInt(zcu);
+ const is_unsigned = ty.isUnsignedInt(pt.zcu);
const less_than: Encoding.Mnemonic = if (is_unsigned) .sltu else .slt;
@@ -502,7 +502,7 @@ pub fn fail(lower: *Lower, comptime format: []const u8, args: anytype) Error {
}
fn hasFeature(lower: *Lower, feature: std.Target.riscv.Feature) bool {
- const target = lower.bin_file.comp.module.?.getTarget();
+ const target = lower.pt.zcu.getTarget();
const features = target.cpu.features;
return std.Target.riscv.featureSetHas(features, feature);
}
src/arch/sparc64/CodeGen.zig
@@ -11,11 +11,9 @@ const Allocator = mem.Allocator;
const builtin = @import("builtin");
const link = @import("../../link.zig");
const Zcu = @import("../../Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
const InternPool = @import("../../InternPool.zig");
const Value = @import("../../Value.zig");
-const ErrorMsg = Module.ErrorMsg;
+const ErrorMsg = Zcu.ErrorMsg;
const codegen = @import("../../codegen.zig");
const Air = @import("../../Air.zig");
const Mir = @import("Mir.zig");
@@ -52,6 +50,7 @@ const RegisterView = enum(u1) {
};
gpa: Allocator,
+pt: Zcu.PerThread,
air: Air,
liveness: Liveness,
bin_file: *link.File,
@@ -64,7 +63,7 @@ args: []MCValue,
ret_mcv: MCValue,
fn_type: Type,
arg_index: usize,
-src_loc: Module.LazySrcLoc,
+src_loc: Zcu.LazySrcLoc,
stack_align: Alignment,
/// MIR Instructions
@@ -263,15 +262,16 @@ const BigTomb = struct {
pub fn generate(
lf: *link.File,
- src_loc: Module.LazySrcLoc,
+ pt: Zcu.PerThread,
+ src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
) CodeGenError!Result {
- const gpa = lf.comp.gpa;
- const zcu = lf.comp.module.?;
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
const fn_owner_decl = zcu.declPtr(func.owner_decl);
assert(fn_owner_decl.has_tv);
@@ -289,11 +289,12 @@ pub fn generate(
var function = Self{
.gpa = gpa,
+ .pt = pt,
.air = air,
.liveness = liveness,
.target = target,
- .func_index = func_index,
.bin_file = lf,
+ .func_index = func_index,
.code = code,
.debug_output = debug_output,
.err_msg = null,
@@ -365,7 +366,8 @@ pub fn generate(
}
fn gen(self: *Self) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const cc = self.fn_type.fnCallingConvention(mod);
if (cc != .Naked) {
// TODO Finish function prologue and epilogue for sparc64.
@@ -493,7 +495,8 @@ fn gen(self: *Self) !void {
}
fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const air_tags = self.air.instructions.items(.tag);
@@ -757,7 +760,8 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const lhs = try self.resolveInst(extra.lhs);
const rhs = try self.resolveInst(extra.rhs);
@@ -835,7 +839,8 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
}
fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const vector_ty = self.typeOfIndex(inst);
const len = vector_ty.vectorLen(mod);
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@@ -869,7 +874,8 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
}
fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_ty = self.typeOf(ty_op.operand);
@@ -1006,7 +1012,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
}
fn airArg(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
const arg_index = self.arg_index;
self.arg_index += 1;
@@ -1016,8 +1022,8 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
const mcv = blk: {
switch (arg) {
.stack_offset => |off| {
- const abi_size = math.cast(u32, ty.abiSize(mod)) orelse {
- return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)});
+ const abi_size = math.cast(u32, ty.abiSize(pt)) orelse {
+ return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(pt)});
};
const offset = off + abi_size;
break :blk MCValue{ .stack_offset = offset };
@@ -1205,7 +1211,8 @@ fn airBreakpoint(self: *Self) !void {
}
fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
// We have hardware byteswapper in SPARCv9, don't let mainstream compilers mislead you.
@@ -1228,7 +1235,7 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
if (int_info.bits == 8) break :result operand;
const abi_size = int_info.bits >> 3;
- const abi_align = operand_ty.abiAlignment(mod);
+ const abi_align = operand_ty.abiAlignment(pt);
const opposite_endian_asi = switch (self.target.cpu.arch.endian()) {
Endian.big => ASI.asi_primary_little,
Endian.little => ASI.asi_primary,
@@ -1297,7 +1304,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end .. extra.end + extra.data.args_len]));
const ty = self.typeOf(callee);
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const fn_ty = switch (ty.zigTypeTag(mod)) {
.Fn => ty,
.Pointer => ty.childType(mod),
@@ -1341,7 +1349,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Due to incremental compilation, how function calls are generated depends
// on linking.
- if (try self.air.value(callee, mod)) |func_value| {
+ if (try self.air.value(callee, pt)) |func_value| {
if (self.bin_file.tag == link.File.Elf.base_tag) {
switch (mod.intern_pool.indexToKey(func_value.ip_index)) {
.func => |func| {
@@ -1429,7 +1437,8 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -1444,7 +1453,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
.ErrorSet => Type.u16,
.Optional => blk: {
const payload_ty = lhs_ty.optionalChild(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
break :blk Type.u1;
} else if (lhs_ty.isPtrLikeOptional(mod)) {
break :blk Type.usize;
@@ -1655,7 +1664,8 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
}
fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
const func = mod.funcInfo(extra.data.func);
@@ -1753,7 +1763,8 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const operand_ty = self.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
const info_a = operand_ty.intInfo(mod);
@@ -1814,12 +1825,13 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
}
fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const elem_ty = self.typeOfIndex(inst);
- const elem_size = elem_ty.abiSize(mod);
+ const elem_size = elem_ty.abiSize(pt);
const result: MCValue = result: {
- if (!elem_ty.hasRuntimeBits(mod))
+ if (!elem_ty.hasRuntimeBits(pt))
break :result MCValue.none;
const ptr = try self.resolveInst(ty_op.operand);
@@ -1898,7 +1910,7 @@ fn airMod(self: *Self, inst: Air.Inst.Index) !void {
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.typeOf(bin_op.lhs);
const rhs_ty = self.typeOf(bin_op.rhs);
- assert(lhs_ty.eql(rhs_ty, self.bin_file.comp.module.?));
+ assert(lhs_ty.eql(rhs_ty, self.pt.zcu));
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
@@ -2040,7 +2052,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
//const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const lhs = try self.resolveInst(extra.lhs);
const rhs = try self.resolveInst(extra.rhs);
@@ -2104,7 +2117,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
fn airNot(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
@@ -2336,7 +2350,8 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void {
fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const lhs = try self.resolveInst(extra.lhs);
const rhs = try self.resolveInst(extra.rhs);
@@ -2441,7 +2456,8 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
}
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const is_volatile = false; // TODO
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
@@ -2452,7 +2468,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const slice_ty = self.typeOf(bin_op.lhs);
const elem_ty = slice_ty.childType(mod);
- const elem_size = elem_ty.abiSize(mod);
+ const elem_size = elem_ty.abiSize(pt);
const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod);
@@ -2566,10 +2582,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const operand = extra.struct_operand;
const index = extra.field_index;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
const mcv = try self.resolveInst(operand);
const struct_ty = self.typeOf(operand);
- const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod)));
+ const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, pt)));
switch (mcv) {
.dead, .unreach => unreachable,
@@ -2699,13 +2715,14 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
}
fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_ty = self.typeOf(ty_op.operand);
const payload_ty = error_union_ty.errorUnionPayload(mod);
const mcv = try self.resolveInst(ty_op.operand);
- if (!payload_ty.hasRuntimeBits(mod)) break :result mcv;
+ if (!payload_ty.hasRuntimeBits(pt)) break :result mcv;
return self.fail("TODO implement unwrap error union error for non-empty payloads", .{});
};
@@ -2713,12 +2730,13 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_ty = self.typeOf(ty_op.operand);
const payload_ty = error_union_ty.errorUnionPayload(mod);
- if (!payload_ty.hasRuntimeBits(mod)) break :result MCValue.none;
+ if (!payload_ty.hasRuntimeBits(pt)) break :result MCValue.none;
return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{});
};
@@ -2727,13 +2745,14 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
/// E to E!T
fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_ty = ty_op.ty.toType();
const payload_ty = error_union_ty.errorUnionPayload(mod);
const mcv = try self.resolveInst(ty_op.operand);
- if (!payload_ty.hasRuntimeBits(mod)) break :result mcv;
+ if (!payload_ty.hasRuntimeBits(pt)) break :result mcv;
return self.fail("TODO implement wrap errunion error for non-empty payloads", .{});
};
@@ -2748,13 +2767,13 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
}
fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const optional_ty = self.typeOfIndex(inst);
// Optional with a zero-bit payload type is just a boolean true
- if (optional_ty.abiSize(mod) == 1)
+ if (optional_ty.abiSize(pt) == 1)
break :result MCValue{ .immediate = 1 };
return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch});
@@ -2788,10 +2807,11 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: Alignme
/// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const elem_ty = self.typeOfIndex(inst).childType(mod);
- if (!elem_ty.hasRuntimeBits(mod)) {
+ if (!elem_ty.hasRuntimeBits(pt)) {
// As this stack item will never be dereferenced at runtime,
// return the stack offset 0. Stack offset 0 will be where all
// zero-sized stack allocations live as non-zero-sized
@@ -2799,21 +2819,21 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
return @as(u32, 0);
}
- const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse {
- return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
+ const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse {
+ return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
};
// TODO swap this for inst.ty.ptrAlign
- const abi_align = elem_ty.abiAlignment(mod);
+ const abi_align = elem_ty.abiAlignment(pt);
return self.allocMem(inst, abi_size, abi_align);
}
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
const elem_ty = self.typeOfIndex(inst);
- const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse {
- return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
+ const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse {
+ return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
};
- const abi_align = elem_ty.abiAlignment(mod);
+ const abi_align = elem_ty.abiAlignment(pt);
self.stack_align = self.stack_align.max(abi_align);
if (reg_ok) {
@@ -2855,7 +2875,8 @@ fn binOp(
rhs_ty: Type,
metadata: ?BinOpMetadata,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (tag) {
.add,
.sub,
@@ -2996,7 +3017,7 @@ fn binOp(
.One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type
else => ptr_ty.childType(mod),
};
- const elem_size = elem_ty.abiSize(mod);
+ const elem_size = elem_ty.abiSize(pt);
if (elem_size == 1) {
const base_tag: Mir.Inst.Tag = switch (tag) {
@@ -3396,8 +3417,8 @@ fn binOpRegister(
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
const block_data = self.blocks.getPtr(block).?;
- const mod = self.bin_file.comp.module.?;
- if (self.typeOf(operand).hasRuntimeBits(mod)) {
+ const pt = self.pt;
+ if (self.typeOf(operand).hasRuntimeBits(pt)) {
const operand_mcv = try self.resolveInst(operand);
const block_mcv = block_data.mcv;
if (block_mcv == .none) {
@@ -3516,17 +3537,18 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
/// Given an error union, returns the payload
fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const err_ty = error_union_ty.errorUnionSet(mod);
const payload_ty = error_union_ty.errorUnionPayload(mod);
if (err_ty.errorSetIsEmpty(mod)) {
return error_union_mcv;
}
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
return MCValue.none;
}
- const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod)));
+ const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, pt)));
switch (error_union_mcv) {
.register => return self.fail("TODO errUnionPayload for registers", .{}),
.stack_offset => |off| {
@@ -3587,7 +3609,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live
}
fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const arg = self.air.instructions.items(.data)[@intFromEnum(inst)].arg;
const ty = arg.ty.toType();
const owner_decl = mod.funcOwnerDeclIndex(self.func_index);
@@ -3736,7 +3759,7 @@ fn genLoadASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Reg
}
fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
switch (mcv) {
.dead => unreachable,
.unreach, .none => return, // Nothing to do.
@@ -3935,20 +3958,21 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
// The value is in memory at a hard-coded address.
// If the type is a pointer, it means the pointer address is at this memory location.
try self.genSetReg(ty, reg, .{ .immediate = addr });
- try self.genLoad(reg, reg, i13, 0, ty.abiSize(mod));
+ try self.genLoad(reg, reg, i13, 0, ty.abiSize(pt));
},
.stack_offset => |off| {
const real_offset = realStackOffset(off);
const simm13 = math.cast(i13, real_offset) orelse
return self.fail("TODO larger stack offsets: {}", .{real_offset});
- try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(mod));
+ try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(pt));
},
}
}
fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
- const mod = self.bin_file.comp.module.?;
- const abi_size = ty.abiSize(mod);
+ const pt = self.pt;
+ const mod = pt.zcu;
+ const abi_size = ty.abiSize(pt);
switch (mcv) {
.dead => unreachable,
.unreach, .none => return, // Nothing to do.
@@ -3956,7 +3980,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
if (!self.wantSafety())
return; // The already existing value will do just fine.
// TODO Upgrade this to a memset call when we have that available.
- switch (ty.abiSize(mod)) {
+ switch (ty.abiSize(pt)) {
1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }),
2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }),
4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
@@ -3986,7 +4010,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg });
const overflow_bit_ty = ty.structFieldType(1, mod);
- const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, mod)));
+ const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, pt)));
const cond_reg = try self.register_manager.allocReg(null, gp);
// TODO handle floating point CCRs
@@ -4032,7 +4056,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
} else {
- const ptr_ty = try mod.singleMutPtrType(ty);
+ const ptr_ty = try pt.singleMutPtrType(ty);
const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }, gp);
const regs_locks = self.register_manager.lockRegsAssumeUnused(4, regs);
@@ -4121,12 +4145,13 @@ fn genStoreASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Re
}
fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
const mcv: MCValue = switch (try codegen.genTypedValue(
self.bin_file,
+ pt,
self.src_loc,
val,
- mod.funcOwnerDeclIndex(self.func_index),
+ pt.zcu.funcOwnerDeclIndex(self.func_index),
)) {
.mcv => |mcv| switch (mcv) {
.none => .none,
@@ -4157,14 +4182,15 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const error_type = ty.errorUnionSet(mod);
const payload_type = ty.errorUnionPayload(mod);
- if (!error_type.hasRuntimeBits(mod)) {
+ if (!error_type.hasRuntimeBits(pt)) {
return MCValue{ .immediate = 0 }; // always false
- } else if (!payload_type.hasRuntimeBits(mod)) {
- if (error_type.abiSize(mod) <= 8) {
+ } else if (!payload_type.hasRuntimeBits(pt)) {
+ if (error_type.abiSize(pt) <= 8) {
const reg_mcv: MCValue = switch (operand) {
.register => operand,
else => .{ .register = try self.copyToTmpRegister(error_type, operand) },
@@ -4255,9 +4281,10 @@ fn jump(self: *Self, inst: Mir.Inst.Index) !void {
}
fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const elem_ty = ptr_ty.childType(mod);
- const elem_size = elem_ty.abiSize(mod);
+ const elem_size = elem_ty.abiSize(pt);
switch (ptr) {
.none => unreachable,
@@ -4326,7 +4353,8 @@ fn minMax(
lhs_ty: Type,
rhs_ty: Type,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
assert(lhs_ty.eql(rhs_ty, mod));
switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO min/max on floats", .{}),
@@ -4446,7 +4474,8 @@ fn realStackOffset(off: u32) u32 {
/// Caller must call `CallMCValues.deinit`.
fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) !CallMCValues {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const fn_info = mod.typeToFunc(fn_ty).?;
const cc = fn_info.cc;
@@ -4487,7 +4516,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
};
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
- const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(mod)));
+ const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(pt)));
if (param_size <= 8) {
if (next_register < argument_registers.len) {
result_arg.* = .{ .register = argument_registers[next_register] };
@@ -4516,10 +4545,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
if (ret_ty.zigTypeTag(mod) == .NoReturn) {
result.return_value = .{ .unreach = {} };
- } else if (!ret_ty.hasRuntimeBits(mod)) {
+ } else if (!ret_ty.hasRuntimeBits(pt)) {
result.return_value = .{ .none = {} };
} else {
- const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
+ const ret_ty_size: u32 = @intCast(ret_ty.abiSize(pt));
// The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller.
if (ret_ty_size <= 8) {
result.return_value = switch (role) {
@@ -4538,21 +4567,22 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
}
fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
const ty = self.typeOf(ref);
// If the type has no codegen bits, no need to store it.
- if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none;
+ if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return .none;
if (ref.toIndex()) |inst| {
return self.getResolvedInstValue(inst);
}
- return self.genTypedValue((try self.air.value(ref, mod)).?);
+ return self.genTypedValue((try self.air.value(ref, pt)).?);
}
fn ret(self: *Self, mcv: MCValue) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ret_ty = self.fn_type.fnReturnType(mod);
try self.setRegOrMem(ret_ty, self.ret_mcv, mcv);
@@ -4654,8 +4684,8 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
}
fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void {
- const mod = self.bin_file.comp.module.?;
- const abi_size = value_ty.abiSize(mod);
+ const pt = self.pt;
+ const abi_size = value_ty.abiSize(pt);
switch (ptr) {
.none => unreachable,
@@ -4696,11 +4726,12 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
return if (self.liveness.isUnused(inst)) .dead else result: {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const mcv = try self.resolveInst(operand);
const ptr_ty = self.typeOf(operand);
const struct_ty = ptr_ty.childType(mod);
- const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod)));
+ const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, pt)));
switch (mcv) {
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };
@@ -4738,7 +4769,8 @@ fn trunc(
operand_ty: Type,
dest_ty: Type,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const info_a = operand_ty.intInfo(mod);
const info_b = dest_ty.intInfo(mod);
@@ -4848,7 +4880,7 @@ fn truncRegister(
}
}
-/// TODO support scope overrides. Also note this logic is duplicated with `Module.wantSafety`.
+/// TODO support scope overrides. Also note this logic is duplicated with `Zcu.wantSafety`.
fn wantSafety(self: *Self) bool {
return switch (self.bin_file.comp.root_mod.optimize_mode) {
.Debug => true,
@@ -4859,11 +4891,9 @@ fn wantSafety(self: *Self) bool {
}
fn typeOf(self: *Self, inst: Air.Inst.Ref) Type {
- const mod = self.bin_file.comp.module.?;
- return self.air.typeOf(inst, &mod.intern_pool);
+ return self.air.typeOf(inst, &self.pt.zcu.intern_pool);
}
fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type {
- const mod = self.bin_file.comp.module.?;
- return self.air.typeOfIndex(inst, &mod.intern_pool);
+ return self.air.typeOfIndex(inst, &self.pt.zcu.intern_pool);
}
src/arch/sparc64/Emit.zig
@@ -6,9 +6,7 @@ const Endian = std.builtin.Endian;
const assert = std.debug.assert;
const link = @import("../../link.zig");
const Zcu = @import("../../Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
-const ErrorMsg = Module.ErrorMsg;
+const ErrorMsg = Zcu.ErrorMsg;
const Liveness = @import("../../Liveness.zig");
const log = std.log.scoped(.sparcv9_emit);
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
@@ -24,7 +22,7 @@ bin_file: *link.File,
debug_output: DebugInfoOutput,
target: *const std.Target,
err_msg: ?*ErrorMsg = null,
-src_loc: Module.LazySrcLoc,
+src_loc: Zcu.LazySrcLoc,
code: *std.ArrayList(u8),
prev_di_line: u32,
src/arch/wasm/abi.zig
@@ -22,15 +22,16 @@ const direct: [2]Class = .{ .direct, .none };
/// Classifies a given Zig type to determine how they must be passed
/// or returned as value within a wasm function.
/// When all elements result in `.none`, no value must be passed in or returned.
-pub fn classifyType(ty: Type, mod: *Zcu) [2]Class {
+pub fn classifyType(ty: Type, pt: Zcu.PerThread) [2]Class {
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const target = mod.getTarget();
- if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none;
+ if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return none;
switch (ty.zigTypeTag(mod)) {
.Struct => {
- const struct_type = mod.typeToStruct(ty).?;
+ const struct_type = pt.zcu.typeToStruct(ty).?;
if (struct_type.layout == .@"packed") {
- if (ty.bitSize(mod) <= 64) return direct;
+ if (ty.bitSize(pt) <= 64) return direct;
return .{ .direct, .direct };
}
if (struct_type.field_types.len > 1) {
@@ -40,13 +41,13 @@ pub fn classifyType(ty: Type, mod: *Zcu) [2]Class {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[0]);
const explicit_align = struct_type.fieldAlign(ip, 0);
if (explicit_align != .none) {
- if (explicit_align.compareStrict(.gt, field_ty.abiAlignment(mod)))
+ if (explicit_align.compareStrict(.gt, field_ty.abiAlignment(pt)))
return memory;
}
- return classifyType(field_ty, mod);
+ return classifyType(field_ty, pt);
},
.Int, .Enum, .ErrorSet => {
- const int_bits = ty.intInfo(mod).bits;
+ const int_bits = ty.intInfo(pt.zcu).bits;
if (int_bits <= 64) return direct;
if (int_bits <= 128) return .{ .direct, .direct };
return memory;
@@ -61,24 +62,24 @@ pub fn classifyType(ty: Type, mod: *Zcu) [2]Class {
.Vector => return direct,
.Array => return memory,
.Optional => {
- assert(ty.isPtrLikeOptional(mod));
+ assert(ty.isPtrLikeOptional(pt.zcu));
return direct;
},
.Pointer => {
- assert(!ty.isSlice(mod));
+ assert(!ty.isSlice(pt.zcu));
return direct;
},
.Union => {
- const union_obj = mod.typeToUnion(ty).?;
+ const union_obj = pt.zcu.typeToUnion(ty).?;
if (union_obj.getLayout(ip) == .@"packed") {
- if (ty.bitSize(mod) <= 64) return direct;
+ if (ty.bitSize(pt) <= 64) return direct;
return .{ .direct, .direct };
}
- const layout = ty.unionGetLayout(mod);
+ const layout = ty.unionGetLayout(pt);
assert(layout.tag_size == 0);
if (union_obj.field_types.len > 1) return memory;
const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
- return classifyType(first_field_ty, mod);
+ return classifyType(first_field_ty, pt);
},
.ErrorUnion,
.Frame,
@@ -100,28 +101,29 @@ pub fn classifyType(ty: Type, mod: *Zcu) [2]Class {
/// Returns the scalar type a given type can represent.
/// Asserts given type can be represented as scalar, such as
/// a struct with a single scalar field.
-pub fn scalarType(ty: Type, mod: *Zcu) Type {
+pub fn scalarType(ty: Type, pt: Zcu.PerThread) Type {
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
switch (ty.zigTypeTag(mod)) {
.Struct => {
if (mod.typeToPackedStruct(ty)) |packed_struct| {
- return scalarType(Type.fromInterned(packed_struct.backingIntType(ip).*), mod);
+ return scalarType(Type.fromInterned(packed_struct.backingIntType(ip).*), pt);
} else {
assert(ty.structFieldCount(mod) == 1);
- return scalarType(ty.structFieldType(0, mod), mod);
+ return scalarType(ty.structFieldType(0, mod), pt);
}
},
.Union => {
const union_obj = mod.typeToUnion(ty).?;
if (union_obj.getLayout(ip) != .@"packed") {
- const layout = mod.getUnionLayout(union_obj);
+ const layout = pt.getUnionLayout(union_obj);
if (layout.payload_size == 0 and layout.tag_size != 0) {
- return scalarType(ty.unionTagTypeSafety(mod).?, mod);
+ return scalarType(ty.unionTagTypeSafety(mod).?, pt);
}
assert(union_obj.field_types.len == 1);
}
const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
- return scalarType(first_field_ty, mod);
+ return scalarType(first_field_ty, pt);
},
else => return ty,
}
src/arch/wasm/CodeGen.zig
@@ -684,6 +684,7 @@ simd_immediates: std.ArrayListUnmanaged([16]u8) = .{},
target: std.Target,
/// Represents the wasm binary file that is being linked.
bin_file: *link.File.Wasm,
+pt: Zcu.PerThread,
/// List of MIR Instructions
mir_instructions: std.MultiArrayList(Mir.Inst) = .{},
/// Contains extra data for MIR
@@ -764,8 +765,7 @@ pub fn deinit(func: *CodeGen) void {
/// Sets `err_msg` on `CodeGen` and returns `error.CodegenFail` which is caught in link/Wasm.zig
fn fail(func: *CodeGen, comptime fmt: []const u8, args: anytype) InnerError {
- const mod = func.bin_file.base.comp.module.?;
- const src_loc = func.decl.navSrcLoc(mod);
+ const src_loc = func.decl.navSrcLoc(func.pt.zcu);
func.err_msg = try Zcu.ErrorMsg.create(func.gpa, src_loc, fmt, args);
return error.CodegenFail;
}
@@ -788,10 +788,11 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
const gop = try func.branches.items[0].values.getOrPut(func.gpa, ref);
assert(!gop.found_existing);
- const mod = func.bin_file.base.comp.module.?;
- const val = (try func.air.value(ref, mod)).?;
+ const pt = func.pt;
+ const mod = pt.zcu;
+ const val = (try func.air.value(ref, pt)).?;
const ty = func.typeOf(ref);
- if (!ty.hasRuntimeBitsIgnoreComptime(mod) and !ty.isInt(mod) and !ty.isError(mod)) {
+ if (!ty.hasRuntimeBitsIgnoreComptime(pt) and !ty.isInt(mod) and !ty.isError(mod)) {
gop.value_ptr.* = WValue{ .none = {} };
return gop.value_ptr.*;
}
@@ -802,8 +803,8 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
//
// In the other cases, we will simply lower the constant to a value that fits
// into a single local (such as a pointer, integer, bool, etc).
- const result = if (isByRef(ty, mod)) blk: {
- const sym_index = try func.bin_file.lowerUnnamedConst(val, func.decl_index);
+ const result = if (isByRef(ty, pt)) blk: {
+ const sym_index = try func.bin_file.lowerUnnamedConst(pt, val, func.decl_index);
break :blk WValue{ .memory = sym_index };
} else try func.lowerConstant(val, ty);
@@ -990,7 +991,8 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32
}
/// Using a given `Type`, returns the corresponding type
-fn typeToValtype(ty: Type, mod: *Zcu) wasm.Valtype {
+fn typeToValtype(ty: Type, pt: Zcu.PerThread) wasm.Valtype {
+ const mod = pt.zcu;
const target = mod.getTarget();
const ip = &mod.intern_pool;
return switch (ty.zigTypeTag(mod)) {
@@ -1002,26 +1004,26 @@ fn typeToValtype(ty: Type, mod: *Zcu) wasm.Valtype {
else => unreachable,
},
.Int, .Enum => blk: {
- const info = ty.intInfo(mod);
+ const info = ty.intInfo(pt.zcu);
if (info.bits <= 32) break :blk wasm.Valtype.i32;
if (info.bits > 32 and info.bits <= 128) break :blk wasm.Valtype.i64;
break :blk wasm.Valtype.i32; // represented as pointer to stack
},
.Struct => {
- if (mod.typeToPackedStruct(ty)) |packed_struct| {
- return typeToValtype(Type.fromInterned(packed_struct.backingIntType(ip).*), mod);
+ if (pt.zcu.typeToPackedStruct(ty)) |packed_struct| {
+ return typeToValtype(Type.fromInterned(packed_struct.backingIntType(ip).*), pt);
} else {
return wasm.Valtype.i32;
}
},
- .Vector => switch (determineSimdStoreStrategy(ty, mod)) {
+ .Vector => switch (determineSimdStoreStrategy(ty, pt)) {
.direct => wasm.Valtype.v128,
.unrolled => wasm.Valtype.i32,
},
- .Union => switch (ty.containerLayout(mod)) {
+ .Union => switch (ty.containerLayout(pt.zcu)) {
.@"packed" => {
- const int_ty = mod.intType(.unsigned, @as(u16, @intCast(ty.bitSize(mod)))) catch @panic("out of memory");
- return typeToValtype(int_ty, mod);
+ const int_ty = pt.intType(.unsigned, @as(u16, @intCast(ty.bitSize(pt)))) catch @panic("out of memory");
+ return typeToValtype(int_ty, pt);
},
else => wasm.Valtype.i32,
},
@@ -1030,17 +1032,17 @@ fn typeToValtype(ty: Type, mod: *Zcu) wasm.Valtype {
}
/// Using a given `Type`, returns the byte representation of its wasm value type
-fn genValtype(ty: Type, mod: *Zcu) u8 {
- return wasm.valtype(typeToValtype(ty, mod));
+fn genValtype(ty: Type, pt: Zcu.PerThread) u8 {
+ return wasm.valtype(typeToValtype(ty, pt));
}
/// Using a given `Type`, returns the corresponding wasm value type
/// Differently from `genValtype` this also allows `void` to create a block
/// with no return type
-fn genBlockType(ty: Type, mod: *Zcu) u8 {
+fn genBlockType(ty: Type, pt: Zcu.PerThread) u8 {
return switch (ty.ip_index) {
.void_type, .noreturn_type => wasm.block_empty,
- else => genValtype(ty, mod),
+ else => genValtype(ty, pt),
};
}
@@ -1101,8 +1103,8 @@ fn getResolvedInst(func: *CodeGen, ref: Air.Inst.Ref) *WValue {
/// Creates one locals for a given `Type`.
/// Returns a corresponding `Wvalue` with `local` as active tag
fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
- const valtype = typeToValtype(ty, mod);
+ const pt = func.pt;
+ const valtype = typeToValtype(ty, pt);
switch (valtype) {
.i32 => if (func.free_locals_i32.popOrNull()) |index| {
log.debug("reusing local ({d}) of type {}", .{ index, valtype });
@@ -1133,8 +1135,8 @@ fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
/// Ensures a new local will be created. This is useful when it's useful
/// to use a zero-initialized local.
fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
- try func.locals.append(func.gpa, genValtype(ty, mod));
+ const pt = func.pt;
+ try func.locals.append(func.gpa, genValtype(ty, pt));
const initial_index = func.local_index;
func.local_index += 1;
return WValue{ .local = .{ .value = initial_index, .references = 1 } };
@@ -1147,23 +1149,24 @@ fn genFunctype(
cc: std.builtin.CallingConvention,
params: []const InternPool.Index,
return_type: Type,
- mod: *Zcu,
+ pt: Zcu.PerThread,
) !wasm.Type {
+ const mod = pt.zcu;
var temp_params = std.ArrayList(wasm.Valtype).init(gpa);
defer temp_params.deinit();
var returns = std.ArrayList(wasm.Valtype).init(gpa);
defer returns.deinit();
- if (firstParamSRet(cc, return_type, mod)) {
+ if (firstParamSRet(cc, return_type, pt)) {
try temp_params.append(.i32); // memory address is always a 32-bit handle
- } else if (return_type.hasRuntimeBitsIgnoreComptime(mod)) {
+ } else if (return_type.hasRuntimeBitsIgnoreComptime(pt)) {
if (cc == .C) {
- const res_classes = abi.classifyType(return_type, mod);
+ const res_classes = abi.classifyType(return_type, pt);
assert(res_classes[0] == .direct and res_classes[1] == .none);
- const scalar_type = abi.scalarType(return_type, mod);
- try returns.append(typeToValtype(scalar_type, mod));
+ const scalar_type = abi.scalarType(return_type, pt);
+ try returns.append(typeToValtype(scalar_type, pt));
} else {
- try returns.append(typeToValtype(return_type, mod));
+ try returns.append(typeToValtype(return_type, pt));
}
} else if (return_type.isError(mod)) {
try returns.append(.i32);
@@ -1172,25 +1175,25 @@ fn genFunctype(
// param types
for (params) |param_type_ip| {
const param_type = Type.fromInterned(param_type_ip);
- if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!param_type.hasRuntimeBitsIgnoreComptime(pt)) continue;
switch (cc) {
.C => {
- const param_classes = abi.classifyType(param_type, mod);
+ const param_classes = abi.classifyType(param_type, pt);
for (param_classes) |class| {
if (class == .none) continue;
if (class == .direct) {
- const scalar_type = abi.scalarType(param_type, mod);
- try temp_params.append(typeToValtype(scalar_type, mod));
+ const scalar_type = abi.scalarType(param_type, pt);
+ try temp_params.append(typeToValtype(scalar_type, pt));
} else {
- try temp_params.append(typeToValtype(param_type, mod));
+ try temp_params.append(typeToValtype(param_type, pt));
}
}
},
- else => if (isByRef(param_type, mod))
+ else => if (isByRef(param_type, pt))
try temp_params.append(.i32)
else
- try temp_params.append(typeToValtype(param_type, mod)),
+ try temp_params.append(typeToValtype(param_type, pt)),
}
}
@@ -1202,6 +1205,7 @@ fn genFunctype(
pub fn generate(
bin_file: *link.File,
+ pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
@@ -1210,15 +1214,15 @@ pub fn generate(
debug_output: codegen.DebugInfoOutput,
) codegen.CodeGenError!codegen.Result {
_ = src_loc;
- const comp = bin_file.comp;
- const gpa = comp.gpa;
- const zcu = comp.module.?;
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
const decl = zcu.declPtr(func.owner_decl);
const namespace = zcu.namespacePtr(decl.src_namespace);
const target = namespace.fileScope(zcu).mod.resolved_target.result;
var code_gen: CodeGen = .{
.gpa = gpa,
+ .pt = pt,
.air = air,
.liveness = liveness,
.code = code,
@@ -1242,10 +1246,11 @@ pub fn generate(
}
fn genFunc(func: *CodeGen) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const fn_info = mod.typeToFunc(func.decl.typeOf(mod)).?;
- var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), mod);
+ var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt);
defer func_type.deinit(func.gpa);
_ = try func.bin_file.storeDeclType(func.decl_index, func_type);
@@ -1272,7 +1277,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
if (func_type.returns.len != 0 and func.air.instructions.len > 0) {
const inst: Air.Inst.Index = @enumFromInt(func.air.instructions.len - 1);
const last_inst_ty = func.typeOfIndex(inst);
- if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(mod) or last_inst_ty.isNoReturn(mod)) {
+ if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(pt) or last_inst_ty.isNoReturn(mod)) {
try func.addTag(.@"unreachable");
}
}
@@ -1354,7 +1359,8 @@ const CallWValues = struct {
};
fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const fn_info = mod.typeToFunc(fn_ty).?;
const cc = fn_info.cc;
@@ -1369,7 +1375,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
// Check if we store the result as a pointer to the stack rather than
// by value
- if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), mod)) {
+ if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt)) {
// the sret arg will be passed as first argument, therefore we
// set the `return_value` before allocating locals for regular args.
result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } };
@@ -1379,7 +1385,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
switch (cc) {
.Unspecified => {
for (fn_info.param_types.get(ip)) |ty| {
- if (!Type.fromInterned(ty).hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!Type.fromInterned(ty).hasRuntimeBitsIgnoreComptime(pt)) {
continue;
}
@@ -1389,7 +1395,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
},
.C => {
for (fn_info.param_types.get(ip)) |ty| {
- const ty_classes = abi.classifyType(Type.fromInterned(ty), mod);
+ const ty_classes = abi.classifyType(Type.fromInterned(ty), pt);
for (ty_classes) |class| {
if (class == .none) continue;
try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } });
@@ -1403,11 +1409,11 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
return result;
}
-fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, mod: *Zcu) bool {
+fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, pt: Zcu.PerThread) bool {
switch (cc) {
- .Unspecified, .Inline => return isByRef(return_type, mod),
+ .Unspecified, .Inline => return isByRef(return_type, pt),
.C => {
- const ty_classes = abi.classifyType(return_type, mod);
+ const ty_classes = abi.classifyType(return_type, pt);
if (ty_classes[0] == .indirect) return true;
if (ty_classes[0] == .direct and ty_classes[1] == .direct) return true;
return false;
@@ -1423,8 +1429,9 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
return func.lowerToStack(value);
}
- const mod = func.bin_file.base.comp.module.?;
- const ty_classes = abi.classifyType(ty, mod);
+ const pt = func.pt;
+ const mod = pt.zcu;
+ const ty_classes = abi.classifyType(ty, pt);
assert(ty_classes[0] != .none);
switch (ty.zigTypeTag(mod)) {
.Struct, .Union => {
@@ -1432,7 +1439,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
return func.lowerToStack(value);
}
assert(ty_classes[0] == .direct);
- const scalar_type = abi.scalarType(ty, mod);
+ const scalar_type = abi.scalarType(ty, pt);
switch (value) {
.memory,
.memory_offset,
@@ -1447,7 +1454,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
return func.lowerToStack(value);
}
assert(ty_classes[0] == .direct and ty_classes[1] == .direct);
- assert(ty.abiSize(mod) == 16);
+ assert(ty.abiSize(pt) == 16);
// in this case we have an integer or float that must be lowered as 2 i64's.
try func.emitWValue(value);
try func.addMemArg(.i64_load, .{ .offset = value.offset(), .alignment = 8 });
@@ -1514,18 +1521,18 @@ fn restoreStackPointer(func: *CodeGen) !void {
///
/// Asserts Type has codegenbits
fn allocStack(func: *CodeGen, ty: Type) !WValue {
- const mod = func.bin_file.base.comp.module.?;
- assert(ty.hasRuntimeBitsIgnoreComptime(mod));
+ const pt = func.pt;
+ assert(ty.hasRuntimeBitsIgnoreComptime(pt));
if (func.initial_stack_value == .none) {
try func.initializeStack();
}
- const abi_size = std.math.cast(u32, ty.abiSize(mod)) orelse {
+ const abi_size = std.math.cast(u32, ty.abiSize(pt)) orelse {
return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
- ty.fmt(mod), ty.abiSize(mod),
+ ty.fmt(pt), ty.abiSize(pt),
});
};
- const abi_align = ty.abiAlignment(mod);
+ const abi_align = ty.abiAlignment(pt);
func.stack_alignment = func.stack_alignment.max(abi_align);
@@ -1540,7 +1547,8 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue {
/// This is different from allocStack where this will use the pointer's alignment
/// if it is set, to ensure the stack alignment will be set correctly.
fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ptr_ty = func.typeOfIndex(inst);
const pointee_ty = ptr_ty.childType(mod);
@@ -1548,14 +1556,14 @@ fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue {
try func.initializeStack();
}
- if (!pointee_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!pointee_ty.hasRuntimeBitsIgnoreComptime(pt)) {
return func.allocStack(Type.usize); // create a value containing just the stack pointer.
}
- const abi_alignment = ptr_ty.ptrAlignment(mod);
- const abi_size = std.math.cast(u32, pointee_ty.abiSize(mod)) orelse {
+ const abi_alignment = ptr_ty.ptrAlignment(pt);
+ const abi_size = std.math.cast(u32, pointee_ty.abiSize(pt)) orelse {
return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
- pointee_ty.fmt(mod), pointee_ty.abiSize(mod),
+ pointee_ty.fmt(pt), pointee_ty.abiSize(pt),
});
};
func.stack_alignment = func.stack_alignment.max(abi_alignment);
@@ -1711,7 +1719,8 @@ fn arch(func: *const CodeGen) std.Target.Cpu.Arch {
/// For a given `Type`, will return true when the type will be passed
/// by reference, rather than by value
-fn isByRef(ty: Type, mod: *Zcu) bool {
+fn isByRef(ty: Type, pt: Zcu.PerThread) bool {
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const target = mod.getTarget();
switch (ty.zigTypeTag(mod)) {
@@ -1734,28 +1743,28 @@ fn isByRef(ty: Type, mod: *Zcu) bool {
.Array,
.Frame,
- => return ty.hasRuntimeBitsIgnoreComptime(mod),
+ => return ty.hasRuntimeBitsIgnoreComptime(pt),
.Union => {
if (mod.typeToUnion(ty)) |union_obj| {
if (union_obj.getLayout(ip) == .@"packed") {
- return ty.abiSize(mod) > 8;
+ return ty.abiSize(pt) > 8;
}
}
- return ty.hasRuntimeBitsIgnoreComptime(mod);
+ return ty.hasRuntimeBitsIgnoreComptime(pt);
},
.Struct => {
if (mod.typeToPackedStruct(ty)) |packed_struct| {
- return isByRef(Type.fromInterned(packed_struct.backingIntType(ip).*), mod);
+ return isByRef(Type.fromInterned(packed_struct.backingIntType(ip).*), pt);
}
- return ty.hasRuntimeBitsIgnoreComptime(mod);
+ return ty.hasRuntimeBitsIgnoreComptime(pt);
},
- .Vector => return determineSimdStoreStrategy(ty, mod) == .unrolled,
+ .Vector => return determineSimdStoreStrategy(ty, pt) == .unrolled,
.Int => return ty.intInfo(mod).bits > 64,
.Enum => return ty.intInfo(mod).bits > 64,
.Float => return ty.floatBits(target) > 64,
.ErrorUnion => {
const pl_ty = ty.errorUnionPayload(mod);
- if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) {
return false;
}
return true;
@@ -1764,7 +1773,7 @@ fn isByRef(ty: Type, mod: *Zcu) bool {
if (ty.isPtrLikeOptional(mod)) return false;
const pl_type = ty.optionalChild(mod);
if (pl_type.zigTypeTag(mod) == .ErrorSet) return false;
- return pl_type.hasRuntimeBitsIgnoreComptime(mod);
+ return pl_type.hasRuntimeBitsIgnoreComptime(pt);
},
.Pointer => {
// Slices act like struct and will be passed by reference
@@ -1783,11 +1792,11 @@ const SimdStoreStrategy = enum {
/// This means when a given type is 128 bits and either the simd128 or relaxed-simd
/// features are enabled, the function will return `.direct`. This would allow to store
/// it using a instruction, rather than an unrolled version.
-fn determineSimdStoreStrategy(ty: Type, mod: *Zcu) SimdStoreStrategy {
- std.debug.assert(ty.zigTypeTag(mod) == .Vector);
- if (ty.bitSize(mod) != 128) return .unrolled;
+fn determineSimdStoreStrategy(ty: Type, pt: Zcu.PerThread) SimdStoreStrategy {
+ std.debug.assert(ty.zigTypeTag(pt.zcu) == .Vector);
+ if (ty.bitSize(pt) != 128) return .unrolled;
const hasFeature = std.Target.wasm.featureSetHas;
- const target = mod.getTarget();
+ const target = pt.zcu.getTarget();
const features = target.cpu.features;
if (hasFeature(features, .relaxed_simd) or hasFeature(features, .simd128)) {
return .direct;
@@ -2064,7 +2073,8 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
for (body) |inst| {
@@ -2085,7 +2095,8 @@ fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
}
fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try func.resolveInst(un_op);
const fn_info = mod.typeToFunc(func.decl.typeOf(mod)).?;
@@ -2095,27 +2106,27 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// to the stack instead
if (func.return_value != .none) {
try func.store(func.return_value, operand, ret_ty, 0);
- } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
switch (ret_ty.zigTypeTag(mod)) {
// Aggregate types can be lowered as a singular value
.Struct, .Union => {
- const scalar_type = abi.scalarType(ret_ty, mod);
+ const scalar_type = abi.scalarType(ret_ty, pt);
try func.emitWValue(operand);
const opcode = buildOpcode(.{
.op = .load,
- .width = @as(u8, @intCast(scalar_type.abiSize(mod) * 8)),
+ .width = @as(u8, @intCast(scalar_type.abiSize(pt) * 8)),
.signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned,
- .valtype1 = typeToValtype(scalar_type, mod),
+ .valtype1 = typeToValtype(scalar_type, pt),
});
try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
.offset = operand.offset(),
- .alignment = @intCast(scalar_type.abiAlignment(mod).toByteUnits().?),
+ .alignment = @intCast(scalar_type.abiAlignment(pt).toByteUnits().?),
});
},
else => try func.emitWValue(operand),
}
} else {
- if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and ret_ty.isError(mod)) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt) and ret_ty.isError(mod)) {
try func.addImm32(0);
} else {
try func.emitWValue(operand);
@@ -2128,16 +2139,17 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const child_type = func.typeOfIndex(inst).childType(mod);
const result = result: {
- if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
+ if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
break :result try func.allocStack(Type.usize); // create pointer to void
}
const fn_info = mod.typeToFunc(func.decl.typeOf(mod)).?;
- if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), mod)) {
+ if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt)) {
break :result func.return_value;
}
@@ -2148,17 +2160,18 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try func.resolveInst(un_op);
const ret_ty = func.typeOf(un_op).childType(mod);
const fn_info = mod.typeToFunc(func.decl.typeOf(mod)).?;
- if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
if (ret_ty.isError(mod)) {
try func.addImm32(0);
}
- } else if (!firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), mod)) {
+ } else if (!firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt)) {
// leave on the stack
_ = try func.load(operand, ret_ty, 0);
}
@@ -2175,7 +2188,8 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const args = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[extra.end..][0..extra.data.args_len]));
const ty = func.typeOf(pl_op.operand);
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const fn_ty = switch (ty.zigTypeTag(mod)) {
.Fn => ty,
@@ -2184,10 +2198,10 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
};
const ret_ty = fn_ty.fnReturnType(mod);
const fn_info = mod.typeToFunc(fn_ty).?;
- const first_param_sret = firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), mod);
+ const first_param_sret = firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt);
const callee: ?InternPool.DeclIndex = blk: {
- const func_val = (try func.air.value(pl_op.operand, mod)) orelse break :blk null;
+ const func_val = (try func.air.value(pl_op.operand, pt)) orelse break :blk null;
if (func_val.getFunction(mod)) |function| {
_ = try func.bin_file.getOrCreateAtomForDecl(function.owner_decl);
@@ -2195,7 +2209,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
} else if (func_val.getExternFunc(mod)) |extern_func| {
const ext_decl = mod.declPtr(extern_func.decl);
const ext_info = mod.typeToFunc(ext_decl.typeOf(mod)).?;
- var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types.get(ip), Type.fromInterned(ext_info.return_type), mod);
+ var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types.get(ip), Type.fromInterned(ext_info.return_type), pt);
defer func_type.deinit(func.gpa);
const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_func.decl);
const atom = func.bin_file.getAtomPtr(atom_index);
@@ -2230,7 +2244,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const arg_val = try func.resolveInst(arg);
const arg_ty = func.typeOf(arg);
- if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!arg_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
try func.lowerArg(mod.typeToFunc(fn_ty).?.cc, arg_ty, arg_val);
}
@@ -2245,7 +2259,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const operand = try func.resolveInst(pl_op.operand);
try func.emitWValue(operand);
- var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), mod);
+ var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt);
defer fn_type.deinit(func.gpa);
const fn_type_index = try func.bin_file.zigObjectPtr().?.putOrGetFuncType(func.gpa, fn_type);
@@ -2253,7 +2267,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
}
const result_value = result_value: {
- if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt) and !ret_ty.isError(mod)) {
break :result_value WValue{ .none = {} };
} else if (ret_ty.isNoReturn(mod)) {
try func.addTag(.@"unreachable");
@@ -2264,7 +2278,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
} else if (mod.typeToFunc(fn_ty).?.cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) {
const result_local = try func.allocLocal(ret_ty);
try func.addLabel(.local_set, result_local.local.value);
- const scalar_type = abi.scalarType(ret_ty, mod);
+ const scalar_type = abi.scalarType(ret_ty, pt);
const result = try func.allocStack(scalar_type);
try func.store(result, result_local, scalar_type, 0);
break :result_value result;
@@ -2287,7 +2301,8 @@ fn airAlloc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
if (safety) {
// TODO if the value is undef, write 0xaa bytes to dest
} else {
@@ -2306,13 +2321,13 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
} else {
// at this point we have a non-natural alignment, we must
// load the value, and then shift+or the rhs into the result location.
- const int_elem_ty = try mod.intType(.unsigned, ptr_info.packed_offset.host_size * 8);
+ const int_elem_ty = try pt.intType(.unsigned, ptr_info.packed_offset.host_size * 8);
- if (isByRef(int_elem_ty, mod)) {
+ if (isByRef(int_elem_ty, pt)) {
return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{});
}
- var mask = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(ty.bitSize(mod)))) - 1));
+ var mask = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(ty.bitSize(pt)))) - 1));
mask <<= @as(u6, @intCast(ptr_info.packed_offset.bit_offset));
mask ^= ~@as(u64, 0);
const shift_val = if (ptr_info.packed_offset.host_size <= 4)
@@ -2324,9 +2339,9 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
else
WValue{ .imm64 = mask };
const wrap_mask_val = if (ptr_info.packed_offset.host_size <= 4)
- WValue{ .imm32 = @truncate(~@as(u64, 0) >> @intCast(64 - ty.bitSize(mod))) }
+ WValue{ .imm32 = @truncate(~@as(u64, 0) >> @intCast(64 - ty.bitSize(pt))) }
else
- WValue{ .imm64 = ~@as(u64, 0) >> @intCast(64 - ty.bitSize(mod)) };
+ WValue{ .imm64 = ~@as(u64, 0) >> @intCast(64 - ty.bitSize(pt)) };
try func.emitWValue(lhs);
const loaded = try func.load(lhs, int_elem_ty, 0);
@@ -2346,12 +2361,13 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void {
assert(!(lhs != .stack and rhs == .stack));
- const mod = func.bin_file.base.comp.module.?;
- const abi_size = ty.abiSize(mod);
+ const pt = func.pt;
+ const mod = pt.zcu;
+ const abi_size = ty.abiSize(pt);
switch (ty.zigTypeTag(mod)) {
.ErrorUnion => {
const pl_ty = ty.errorUnionPayload(mod);
- if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) {
return func.store(lhs, rhs, Type.anyerror, 0);
}
@@ -2363,7 +2379,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
return func.store(lhs, rhs, Type.usize, 0);
}
const pl_ty = ty.optionalChild(mod);
- if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) {
return func.store(lhs, rhs, Type.u8, 0);
}
if (pl_ty.zigTypeTag(mod) == .ErrorSet) {
@@ -2373,11 +2389,11 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
const len = @as(u32, @intCast(abi_size));
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
- .Struct, .Array, .Union => if (isByRef(ty, mod)) {
+ .Struct, .Array, .Union => if (isByRef(ty, pt)) {
const len = @as(u32, @intCast(abi_size));
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
- .Vector => switch (determineSimdStoreStrategy(ty, mod)) {
+ .Vector => switch (determineSimdStoreStrategy(ty, pt)) {
.unrolled => {
const len: u32 = @intCast(abi_size);
return func.memcpy(lhs, rhs, .{ .imm32 = len });
@@ -2391,7 +2407,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_store),
offset + lhs.offset(),
- @intCast(ty.abiAlignment(mod).toByteUnits() orelse 0),
+ @intCast(ty.abiAlignment(pt).toByteUnits() orelse 0),
});
return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
},
@@ -2421,11 +2437,11 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
try func.store(.{ .stack = {} }, msb, Type.u64, 8 + lhs.offset());
return;
} else if (abi_size > 16) {
- try func.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(mod))) });
+ try func.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(pt))) });
},
else => if (abi_size > 8) {
return func.fail("TODO: `store` for type `{}` with abisize `{d}`", .{
- ty.fmt(func.bin_file.base.comp.module.?),
+ ty.fmt(pt),
abi_size,
});
},
@@ -2435,7 +2451,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
// into lhs, so we calculate that and emit that instead
try func.lowerToStack(rhs);
- const valtype = typeToValtype(ty, mod);
+ const valtype = typeToValtype(ty, pt);
const opcode = buildOpcode(.{
.valtype1 = valtype,
.width = @as(u8, @intCast(abi_size * 8)),
@@ -2447,23 +2463,24 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
Mir.Inst.Tag.fromOpcode(opcode),
.{
.offset = offset + lhs.offset(),
- .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?),
+ .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?),
},
);
}
fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const ty = ty_op.ty.toType();
const ptr_ty = func.typeOf(ty_op.operand);
const ptr_info = ptr_ty.ptrInfo(mod);
- if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{ty_op.operand});
+ if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return func.finishAir(inst, .none, &.{ty_op.operand});
const result = result: {
- if (isByRef(ty, mod)) {
+ if (isByRef(ty, pt)) {
const new_local = try func.allocStack(ty);
try func.store(new_local, operand, ty, 0);
break :result new_local;
@@ -2476,7 +2493,7 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// at this point we have a non-natural alignment, we must
// shift the value to obtain the correct bit.
- const int_elem_ty = try mod.intType(.unsigned, ptr_info.packed_offset.host_size * 8);
+ const int_elem_ty = try pt.intType(.unsigned, ptr_info.packed_offset.host_size * 8);
const shift_val = if (ptr_info.packed_offset.host_size <= 4)
WValue{ .imm32 = ptr_info.packed_offset.bit_offset }
else if (ptr_info.packed_offset.host_size <= 8)
@@ -2496,7 +2513,8 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// Loads an operand from the linear memory section.
/// NOTE: Leaves the value on the stack.
fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
// load local's value from memory by its stack position
try func.emitWValue(operand);
@@ -2507,15 +2525,15 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_load),
offset + operand.offset(),
- @intCast(ty.abiAlignment(mod).toByteUnits().?),
+ @intCast(ty.abiAlignment(pt).toByteUnits().?),
});
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
return WValue{ .stack = {} };
}
- const abi_size: u8 = @intCast(ty.abiSize(mod));
+ const abi_size: u8 = @intCast(ty.abiSize(pt));
const opcode = buildOpcode(.{
- .valtype1 = typeToValtype(ty, mod),
+ .valtype1 = typeToValtype(ty, pt),
.width = abi_size * 8,
.op = .load,
.signedness = if (ty.isSignedInt(mod)) .signed else .unsigned,
@@ -2525,7 +2543,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
Mir.Inst.Tag.fromOpcode(opcode),
.{
.offset = offset + operand.offset(),
- .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?),
+ .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?),
},
);
@@ -2533,13 +2551,14 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
}
fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const arg_index = func.arg_index;
const arg = func.args[arg_index];
const cc = mod.typeToFunc(func.decl.typeOf(mod)).?.cc;
const arg_ty = func.typeOfIndex(inst);
if (cc == .C) {
- const arg_classes = abi.classifyType(arg_ty, mod);
+ const arg_classes = abi.classifyType(arg_ty, pt);
for (arg_classes) |class| {
if (class != .none) {
func.arg_index += 1;
@@ -2552,7 +2571,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (arg_ty.zigTypeTag(mod) != .Int and arg_ty.zigTypeTag(mod) != .Float) {
return func.fail(
"TODO: Implement C-ABI argument for type '{}'",
- .{arg_ty.fmt(func.bin_file.base.comp.module.?)},
+ .{arg_ty.fmt(pt)},
);
}
const result = try func.allocStack(arg_ty);
@@ -2579,7 +2598,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
@@ -2593,10 +2612,10 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
// For big integers we can ignore this as we will call into compiler-rt which handles this.
const result = switch (op) {
.shr, .shl => res: {
- const lhs_wasm_bits = toWasmBits(@as(u16, @intCast(lhs_ty.bitSize(mod)))) orelse {
+ const lhs_wasm_bits = toWasmBits(@intCast(lhs_ty.bitSize(pt))) orelse {
return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
};
- const rhs_wasm_bits = toWasmBits(@as(u16, @intCast(rhs_ty.bitSize(mod)))).?;
+ const rhs_wasm_bits = toWasmBits(@intCast(rhs_ty.bitSize(pt))).?;
const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: {
const tmp = try func.intcast(rhs, rhs_ty, lhs_ty);
break :blk try tmp.toLocal(func, lhs_ty);
@@ -2616,7 +2635,8 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
/// Performs a binary operation on the given `WValue`'s
/// NOTE: THis leaves the value on top of the stack.
fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
assert(!(lhs != .stack and rhs == .stack));
if (ty.isAnyFloat()) {
@@ -2624,20 +2644,20 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
return func.floatOp(float_op, ty, &.{ lhs, rhs });
}
- if (isByRef(ty, mod)) {
+ if (isByRef(ty, pt)) {
if (ty.zigTypeTag(mod) == .Int) {
return func.binOpBigInt(lhs, rhs, ty, op);
} else {
return func.fail(
"TODO: Implement binary operation for type: {}",
- .{ty.fmt(func.bin_file.base.comp.module.?)},
+ .{ty.fmt(pt)},
);
}
}
const opcode: wasm.Opcode = buildOpcode(.{
.op = op,
- .valtype1 = typeToValtype(ty, mod),
+ .valtype1 = typeToValtype(ty, pt),
.signedness = if (ty.isSignedInt(mod)) .signed else .unsigned,
});
try func.emitWValue(lhs);
@@ -2649,7 +2669,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
}
fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const int_info = ty.intInfo(mod);
if (int_info.bits > 128) {
return func.fail("TODO: Implement binary operation for big integers larger than 128 bits", .{});
@@ -2785,7 +2806,8 @@ const FloatOp = enum {
};
fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const ty = func.typeOf(ty_op.operand);
@@ -2793,7 +2815,7 @@ fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
switch (scalar_ty.zigTypeTag(mod)) {
.Int => if (ty.zigTypeTag(mod) == .Vector) {
- return func.fail("TODO implement airAbs for {}", .{ty.fmt(mod)});
+ return func.fail("TODO implement airAbs for {}", .{ty.fmt(pt)});
} else {
const int_bits = ty.intInfo(mod).bits;
const wasm_bits = toWasmBits(int_bits) orelse {
@@ -2877,7 +2899,8 @@ fn airUnaryFloatOp(func: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError
}
fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: Implement floatOps for vectors", .{});
}
@@ -2893,7 +2916,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In
for (args) |operand| {
try func.emitWValue(operand);
}
- const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, mod) });
+ const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, pt) });
try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
return .stack;
}
@@ -2983,7 +3006,8 @@ fn floatNeg(func: *CodeGen, ty: Type, arg: WValue) InnerError!WValue {
}
fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try func.resolveInst(bin_op.lhs);
@@ -3002,10 +3026,10 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
// For big integers we can ignore this as we will call into compiler-rt which handles this.
const result = switch (op) {
.shr, .shl => res: {
- const lhs_wasm_bits = toWasmBits(@as(u16, @intCast(lhs_ty.bitSize(mod)))) orelse {
+ const lhs_wasm_bits = toWasmBits(@intCast(lhs_ty.bitSize(pt))) orelse {
return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
};
- const rhs_wasm_bits = toWasmBits(@as(u16, @intCast(rhs_ty.bitSize(mod)))).?;
+ const rhs_wasm_bits = toWasmBits(@intCast(rhs_ty.bitSize(pt))).?;
const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: {
const tmp = try func.intcast(rhs, rhs_ty, lhs_ty);
break :blk try tmp.toLocal(func, lhs_ty);
@@ -3034,9 +3058,10 @@ fn wrapBinOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr
/// Asserts `Type` is <= 128 bits.
/// NOTE: When the Type is <= 64 bits, leaves the value on top of the stack, if wrapping was needed.
fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
- assert(ty.abiSize(mod) <= 16);
- const int_bits = @as(u16, @intCast(ty.bitSize(mod))); // TODO use ty.intInfo(mod).bits
+ const pt = func.pt;
+ const mod = pt.zcu;
+ assert(ty.abiSize(pt) <= 16);
+ const int_bits: u16 = @intCast(ty.bitSize(pt)); // TODO use ty.intInfo(mod).bits
const wasm_bits = toWasmBits(int_bits) orelse {
return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{int_bits});
};
@@ -3098,13 +3123,14 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
}
fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerError!WValue {
- const zcu = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
const offset: u64 = prev_offset + ptr.byte_offset;
return switch (ptr.base_addr) {
.decl => |decl| return func.lowerDeclRefValue(decl, @intCast(offset)),
.anon_decl => |ad| return func.lowerAnonDeclRef(ad, @intCast(offset)),
- .int => return func.lowerConstant(try zcu.intValue(Type.usize, offset), Type.usize),
+ .int => return func.lowerConstant(try pt.intValue(Type.usize, offset), Type.usize),
.eu_payload => return func.fail("Wasm TODO: lower error union payload pointer", .{}),
.opt_payload => |opt_ptr| return func.lowerPtr(opt_ptr, offset),
.field => |field| {
@@ -3120,13 +3146,13 @@ fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerEr
};
},
.Struct => switch (base_ty.containerLayout(zcu)) {
- .auto => base_ty.structFieldOffset(@intCast(field.index), zcu),
+ .auto => base_ty.structFieldOffset(@intCast(field.index), pt),
.@"extern", .@"packed" => unreachable,
},
.Union => switch (base_ty.containerLayout(zcu)) {
.auto => off: {
// Keep in sync with the `un` case of `generateSymbol`.
- const layout = base_ty.unionGetLayout(zcu);
+ const layout = base_ty.unionGetLayout(pt);
if (layout.payload_size == 0) break :off 0;
if (layout.tag_size == 0) break :off 0;
if (layout.tag_align.compare(.gte, layout.payload_align)) {
@@ -3152,17 +3178,18 @@ fn lowerAnonDeclRef(
anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl,
offset: u32,
) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const decl_val = anon_decl.val;
const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val));
const is_fn_body = ty.zigTypeTag(mod) == .Fn;
- if (!is_fn_body and !ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!is_fn_body and !ty.hasRuntimeBitsIgnoreComptime(pt)) {
return WValue{ .imm32 = 0xaaaaaaaa };
}
const decl_align = mod.intern_pool.indexToKey(anon_decl.orig_ty).ptr_type.flags.alignment;
- const res = try func.bin_file.lowerAnonDecl(decl_val, decl_align, func.decl.navSrcLoc(mod));
+ const res = try func.bin_file.lowerAnonDecl(pt, decl_val, decl_align, func.decl.navSrcLoc(mod));
switch (res) {
.ok => {},
.fail => |em| {
@@ -3180,7 +3207,8 @@ fn lowerAnonDeclRef(
}
fn lowerDeclRefValue(func: *CodeGen, decl_index: InternPool.DeclIndex, offset: u32) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const decl = mod.declPtr(decl_index);
// check if decl is an alias to a function, in which case we
@@ -3195,7 +3223,7 @@ fn lowerDeclRefValue(func: *CodeGen, decl_index: InternPool.DeclIndex, offset: u
}
}
const decl_ty = decl.typeOf(mod);
- if (decl_ty.zigTypeTag(mod) != .Fn and !decl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (decl_ty.zigTypeTag(mod) != .Fn and !decl_ty.hasRuntimeBitsIgnoreComptime(pt)) {
return WValue{ .imm32 = 0xaaaaaaaa };
}
@@ -3212,8 +3240,9 @@ fn lowerDeclRefValue(func: *CodeGen, decl_index: InternPool.DeclIndex, offset: u
/// Asserts that `isByRef` returns `false` for `ty`.
fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
- assert(!isByRef(ty, mod));
+ const pt = func.pt;
+ const mod = pt.zcu;
+ assert(!isByRef(ty, pt));
const ip = &mod.intern_pool;
if (val.isUndefDeep(mod)) return func.emitUndefined(ty);
@@ -3261,13 +3290,13 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
const int_info = ty.intInfo(mod);
switch (int_info.signedness) {
.signed => switch (int_info.bits) {
- 0...32 => return WValue{ .imm32 = @bitCast(@as(i32, @intCast(val.toSignedInt(mod)))) },
- 33...64 => return WValue{ .imm64 = @bitCast(val.toSignedInt(mod)) },
+ 0...32 => return WValue{ .imm32 = @bitCast(@as(i32, @intCast(val.toSignedInt(pt)))) },
+ 33...64 => return WValue{ .imm64 = @bitCast(val.toSignedInt(pt)) },
else => unreachable,
},
.unsigned => switch (int_info.bits) {
- 0...32 => return WValue{ .imm32 = @intCast(val.toUnsignedInt(mod)) },
- 33...64 => return WValue{ .imm64 = val.toUnsignedInt(mod) },
+ 0...32 => return WValue{ .imm32 = @intCast(val.toUnsignedInt(pt)) },
+ 33...64 => return WValue{ .imm64 = val.toUnsignedInt(pt) },
else => unreachable,
},
}
@@ -3277,22 +3306,22 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
return WValue{ .imm32 = int };
},
.error_union => |error_union| {
- const err_int_ty = try mod.errorIntType();
+ const err_int_ty = try pt.errorIntType();
const err_ty, const err_val = switch (error_union.val) {
.err_name => |err_name| .{
ty.errorUnionSet(mod),
- Value.fromInterned((try mod.intern(.{ .err = .{
+ Value.fromInterned(try pt.intern(.{ .err = .{
.ty = ty.errorUnionSet(mod).toIntern(),
.name = err_name,
- } }))),
+ } })),
},
.payload => .{
err_int_ty,
- try mod.intValue(err_int_ty, 0),
+ try pt.intValue(err_int_ty, 0),
},
};
const payload_type = ty.errorUnionPayload(mod);
- if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_type.hasRuntimeBitsIgnoreComptime(pt)) {
// We use the error type directly as the type.
return func.lowerConstant(err_val, err_ty);
}
@@ -3318,7 +3347,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
.field => |base_index| ptr = ip.indexToKey(base_index.base).ptr,
.arr_elem, .comptime_field, .comptime_alloc => unreachable,
};
- return .{ .memory = try func.bin_file.lowerUnnamedConst(val, owner_decl) };
+ return .{ .memory = try func.bin_file.lowerUnnamedConst(pt, val, owner_decl) };
},
.ptr => return func.lowerPtr(val.toIntern(), 0),
.opt => if (ty.optionalReprIsPayload(mod)) {
@@ -3332,11 +3361,11 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
return WValue{ .imm32 = @intFromBool(!val.isNull(mod)) };
},
.aggregate => switch (ip.indexToKey(ty.ip_index)) {
- .array_type => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(mod)}),
+ .array_type => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(pt)}),
.vector_type => {
- assert(determineSimdStoreStrategy(ty, mod) == .direct);
+ assert(determineSimdStoreStrategy(ty, pt) == .direct);
var buf: [16]u8 = undefined;
- val.writeToMemory(ty, mod, &buf) catch unreachable;
+ val.writeToMemory(ty, pt, &buf) catch unreachable;
return func.storeSimdImmd(buf);
},
.struct_type => {
@@ -3345,9 +3374,9 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
// are by-ref types.
assert(struct_type.layout == .@"packed");
var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
- val.writeToPackedMemory(ty, mod, &buf, 0) catch unreachable;
+ val.writeToPackedMemory(ty, pt, &buf, 0) catch unreachable;
const backing_int_ty = Type.fromInterned(struct_type.backingIntType(ip).*);
- const int_val = try mod.intValue(
+ const int_val = try pt.intValue(
backing_int_ty,
mem.readInt(u64, &buf, .little),
);
@@ -3358,7 +3387,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
.un => |un| {
// in this case we have a packed union which will not be passed by reference.
const constant_ty = if (un.tag == .none)
- try ty.unionBackingType(mod)
+ try ty.unionBackingType(pt)
else field_ty: {
const union_obj = mod.typeToUnion(ty).?;
const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
@@ -3379,7 +3408,8 @@ fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue {
}
fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
switch (ty.zigTypeTag(mod)) {
.Bool, .ErrorSet => return WValue{ .imm32 = 0xaaaaaaaa },
@@ -3421,15 +3451,16 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
/// It's illegal to provide a value with a type that cannot be represented
/// as an integer value.
fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
switch (val.ip_index) {
.none => {},
.bool_true => return 1,
.bool_false => return 0,
else => return switch (mod.intern_pool.indexToKey(val.ip_index)) {
- .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, mod),
- .int => |int| intStorageAsI32(int.storage, mod),
+ .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, pt),
+ .int => |int| intStorageAsI32(int.storage, pt),
.ptr => |ptr| {
assert(ptr.base_addr == .int);
return @intCast(ptr.byte_offset);
@@ -3445,17 +3476,17 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 {
};
}
-fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, mod: *Zcu) i32 {
- return intStorageAsI32(ip.indexToKey(int).int.storage, mod);
+fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, pt: Zcu.PerThread) i32 {
+ return intStorageAsI32(ip.indexToKey(int).int.storage, pt);
}
-fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Zcu) i32 {
+fn intStorageAsI32(storage: InternPool.Key.Int.Storage, pt: Zcu.PerThread) i32 {
return switch (storage) {
.i64 => |x| @as(i32, @intCast(x)),
.u64 => |x| @as(i32, @bitCast(@as(u32, @intCast(x)))),
.big_int => unreachable,
- .lazy_align => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0)))),
- .lazy_size => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiSize(mod))))),
+ .lazy_align => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiAlignment(pt).toByteUnits() orelse 0)))),
+ .lazy_size => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiSize(pt))))),
};
}
@@ -3466,12 +3497,12 @@ fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn lowerBlock(func: *CodeGen, inst: Air.Inst.Index, block_ty: Type, body: []const Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
- const wasm_block_ty = genBlockType(block_ty, mod);
+ const pt = func.pt;
+ const wasm_block_ty = genBlockType(block_ty, pt);
// if wasm_block_ty is non-empty, we create a register to store the temporary value
const block_result: WValue = if (wasm_block_ty != wasm.block_empty) blk: {
- const ty: Type = if (isByRef(block_ty, mod)) Type.u32 else block_ty;
+ const ty: Type = if (isByRef(block_ty, pt)) Type.u32 else block_ty;
break :blk try func.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten
} else WValue.none;
@@ -3583,10 +3614,11 @@ fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) In
/// NOTE: This leaves the result on top of the stack, rather than a new local.
fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue {
assert(!(lhs != .stack and rhs == .stack));
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) == .Optional and !ty.optionalReprIsPayload(mod)) {
const payload_ty = ty.optionalChild(mod);
- if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
// When we hit this case, we must check the value of optionals
// that are not pointers. This means first checking against non-null for
// both lhs and rhs, as well as checking the payload are matching of lhs and rhs
@@ -3594,7 +3626,7 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO
}
} else if (ty.isAnyFloat()) {
return func.cmpFloat(ty, lhs, rhs, op);
- } else if (isByRef(ty, mod)) {
+ } else if (isByRef(ty, pt)) {
return func.cmpBigInt(lhs, rhs, ty, op);
}
@@ -3612,7 +3644,7 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO
try func.lowerToStack(rhs);
const opcode: wasm.Opcode = buildOpcode(.{
- .valtype1 = typeToValtype(ty, mod),
+ .valtype1 = typeToValtype(ty, pt),
.op = switch (op) {
.lt => .lt,
.lte => .le,
@@ -3683,8 +3715,8 @@ fn airCmpLtErrorsLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const errors_len = WValue{ .memory = @intFromEnum(sym_index) };
try func.emitWValue(operand);
- const mod = func.bin_file.base.comp.module.?;
- const err_int_ty = try mod.errorIntType();
+ const pt = func.pt;
+ const err_int_ty = try pt.errorIntType();
const errors_len_val = try func.load(errors_len, err_int_ty, 0);
const result = try func.cmp(.stack, errors_len_val, err_int_ty, .lt);
@@ -3692,12 +3724,12 @@ fn airCmpLtErrorsLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
const br = func.air.instructions.items(.data)[@intFromEnum(inst)].br;
const block = func.blocks.get(br.block_inst).?;
// if operand has codegen bits we should break with a value
- if (func.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(mod)) {
+ if (func.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(pt)) {
const operand = try func.resolveInst(br.operand);
try func.lowerToStack(operand);
@@ -3719,7 +3751,8 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
const operand_ty = func.typeOf(ty_op.operand);
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const result = result: {
if (operand_ty.zigTypeTag(mod) == .Bool) {
@@ -3731,7 +3764,7 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
} else {
const int_info = operand_ty.intInfo(mod);
const wasm_bits = toWasmBits(int_info.bits) orelse {
- return func.fail("TODO: Implement binary NOT for {}", .{operand_ty.fmt(mod)});
+ return func.fail("TODO: Implement binary NOT for {}", .{operand_ty.fmt(pt)});
};
switch (wasm_bits) {
@@ -3798,13 +3831,14 @@ fn airUnreachable(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const wanted_ty = func.typeOfIndex(inst);
const given_ty = func.typeOf(ty_op.operand);
- const bit_size = given_ty.bitSize(mod);
+ const bit_size = given_ty.bitSize(pt);
const needs_wrapping = (given_ty.isSignedInt(mod) != wanted_ty.isSignedInt(mod)) and
bit_size != 32 and bit_size != 64 and bit_size != 128;
@@ -3814,7 +3848,7 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result try bitcast_result.toLocal(func, wanted_ty);
}
- if (isByRef(given_ty, mod) and !isByRef(wanted_ty, mod)) {
+ if (isByRef(given_ty, pt) and !isByRef(wanted_ty, pt)) {
const loaded_memory = try func.load(operand, wanted_ty, 0);
if (needs_wrapping) {
break :result try (try func.wrapOperand(loaded_memory, wanted_ty)).toLocal(func, wanted_ty);
@@ -3822,7 +3856,7 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result try loaded_memory.toLocal(func, wanted_ty);
}
}
- if (!isByRef(given_ty, mod) and isByRef(wanted_ty, mod)) {
+ if (!isByRef(given_ty, pt) and isByRef(wanted_ty, pt)) {
const stack_memory = try func.allocStack(wanted_ty);
try func.store(stack_memory, operand, given_ty, 0);
if (needs_wrapping) {
@@ -3842,17 +3876,18 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
// if we bitcast a float to or from an integer we must use the 'reinterpret' instruction
if (!(wanted_ty.isAnyFloat() or given_ty.isAnyFloat())) return operand;
if (wanted_ty.ip_index == .f16_type or given_ty.ip_index == .f16_type) return operand;
- if (wanted_ty.bitSize(mod) > 64) return operand;
+ if (wanted_ty.bitSize(pt) > 64) return operand;
assert((wanted_ty.isInt(mod) and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt(mod)));
const opcode = buildOpcode(.{
.op = .reinterpret,
- .valtype1 = typeToValtype(wanted_ty, mod),
- .valtype2 = typeToValtype(given_ty, mod),
+ .valtype1 = typeToValtype(wanted_ty, pt),
+ .valtype2 = typeToValtype(given_ty, pt),
});
try func.emitWValue(operand);
try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
@@ -3860,7 +3895,8 @@ fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) Inn
}
fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.StructField, ty_pl.payload);
@@ -3872,7 +3908,8 @@ fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airStructFieldPtrIndex(func: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const struct_ptr = try func.resolveInst(ty_op.operand);
const struct_ptr_ty = func.typeOf(ty_op.operand);
@@ -3891,7 +3928,8 @@ fn structFieldPtr(
struct_ty: Type,
index: u32,
) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const result_ty = func.typeOfIndex(inst);
const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod);
@@ -3902,12 +3940,12 @@ fn structFieldPtr(
break :offset @as(u32, 0);
}
const struct_type = mod.typeToStruct(struct_ty).?;
- break :offset @divExact(mod.structPackedFieldBitOffset(struct_type, index) + struct_ptr_ty_info.packed_offset.bit_offset, 8);
+ break :offset @divExact(pt.structPackedFieldBitOffset(struct_type, index) + struct_ptr_ty_info.packed_offset.bit_offset, 8);
},
.Union => 0,
else => unreachable,
},
- else => struct_ty.structFieldOffset(index, mod),
+ else => struct_ty.structFieldOffset(index, pt),
};
// save a load and store when we can simply reuse the operand
if (offset == 0) {
@@ -3922,7 +3960,8 @@ fn structFieldPtr(
}
fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data;
@@ -3931,13 +3970,13 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(struct_field.struct_operand);
const field_index = struct_field.field_index;
const field_ty = struct_ty.structFieldType(field_index, mod);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
const result = switch (struct_ty.containerLayout(mod)) {
.@"packed" => switch (struct_ty.zigTypeTag(mod)) {
.Struct => result: {
const packed_struct = mod.typeToPackedStruct(struct_ty).?;
- const offset = mod.structPackedFieldBitOffset(packed_struct, field_index);
+ const offset = pt.structPackedFieldBitOffset(packed_struct, field_index);
const backing_ty = Type.fromInterned(packed_struct.backingIntType(ip).*);
const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse {
return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{});
@@ -3956,7 +3995,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.binOp(operand, const_wvalue, backing_ty, .shr);
if (field_ty.zigTypeTag(mod) == .Float) {
- const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod))));
+ const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(pt))));
const truncated = try func.trunc(shifted_value, int_type, backing_ty);
const bitcasted = try func.bitcast(field_ty, int_type, truncated);
break :result try bitcasted.toLocal(func, field_ty);
@@ -3965,7 +4004,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// we can simply reuse the operand.
break :result func.reuseOperand(struct_field.struct_operand, operand);
} else if (field_ty.isPtrAtRuntime(mod)) {
- const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod))));
+ const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(pt))));
const truncated = try func.trunc(shifted_value, int_type, backing_ty);
break :result try truncated.toLocal(func, field_ty);
}
@@ -3973,8 +4012,8 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result try truncated.toLocal(func, field_ty);
},
.Union => result: {
- if (isByRef(struct_ty, mod)) {
- if (!isByRef(field_ty, mod)) {
+ if (isByRef(struct_ty, pt)) {
+ if (!isByRef(field_ty, pt)) {
const val = try func.load(operand, field_ty, 0);
break :result try val.toLocal(func, field_ty);
} else {
@@ -3984,14 +4023,14 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
}
- const union_int_type = try mod.intType(.unsigned, @as(u16, @intCast(struct_ty.bitSize(mod))));
+ const union_int_type = try pt.intType(.unsigned, @as(u16, @intCast(struct_ty.bitSize(pt))));
if (field_ty.zigTypeTag(mod) == .Float) {
- const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod))));
+ const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(pt))));
const truncated = try func.trunc(operand, int_type, union_int_type);
const bitcasted = try func.bitcast(field_ty, int_type, truncated);
break :result try bitcasted.toLocal(func, field_ty);
} else if (field_ty.isPtrAtRuntime(mod)) {
- const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod))));
+ const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(pt))));
const truncated = try func.trunc(operand, int_type, union_int_type);
break :result try truncated.toLocal(func, field_ty);
}
@@ -4001,10 +4040,10 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => unreachable,
},
else => result: {
- const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, mod)) orelse {
- return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(mod)});
+ const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, pt)) orelse {
+ return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(pt)});
};
- if (isByRef(field_ty, mod)) {
+ if (isByRef(field_ty, pt)) {
switch (operand) {
.stack_offset => |stack_offset| {
break :result WValue{ .stack_offset = .{ .value = stack_offset.value + offset, .references = 1 } };
@@ -4021,7 +4060,8 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
// result type is always 'noreturn'
const blocktype = wasm.block_empty;
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
@@ -4055,7 +4095,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
errdefer func.gpa.free(values);
for (items, 0..) |ref, i| {
- const item_val = (try func.air.value(ref, mod)).?;
+ const item_val = (try func.air.value(ref, pt)).?;
const int_val = func.valueAsI32(item_val, target_ty);
if (lowest_maybe == null or int_val < lowest_maybe.?) {
lowest_maybe = int_val;
@@ -4078,7 +4118,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// When the target is an integer size larger than u32, we have no way to use the value
// as an index, therefore we also use an if/else-chain for those cases.
// TODO: Benchmark this to find a proper value, LLVM seems to draw the line at '40~45'.
- const is_sparse = highest - lowest > 50 or target_ty.bitSize(mod) > 32;
+ const is_sparse = highest - lowest > 50 or target_ty.bitSize(pt) > 32;
const else_body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra_index..][0..switch_br.data.else_body_len]);
const has_else_body = else_body.len != 0;
@@ -4150,7 +4190,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const val = try func.lowerConstant(case.values[0].value, target_ty);
try func.emitWValue(val);
const opcode = buildOpcode(.{
- .valtype1 = typeToValtype(target_ty, mod),
+ .valtype1 = typeToValtype(target_ty, pt),
.op = .ne, // not equal, because we want to jump out of this block if it does not match the condition.
.signedness = signedness,
});
@@ -4164,7 +4204,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const val = try func.lowerConstant(value.value, target_ty);
try func.emitWValue(val);
const opcode = buildOpcode(.{
- .valtype1 = typeToValtype(target_ty, mod),
+ .valtype1 = typeToValtype(target_ty, pt),
.op = .eq,
.signedness = signedness,
});
@@ -4201,7 +4241,8 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try func.resolveInst(un_op);
const err_union_ty = func.typeOf(un_op);
@@ -4217,10 +4258,10 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
}
try func.emitWValue(operand);
- if (pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (pl_ty.hasRuntimeBitsIgnoreComptime(pt)) {
try func.addMemArg(.i32_load16_u, .{
- .offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))),
- .alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnits().?),
+ .offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, pt))),
+ .alignment = @intCast(Type.anyerror.abiAlignment(pt).toByteUnits().?),
});
}
@@ -4236,7 +4277,8 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
}
fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
@@ -4245,15 +4287,15 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo
const payload_ty = err_ty.errorUnionPayload(mod);
const result = result: {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
if (op_is_ptr) {
break :result func.reuseOperand(ty_op.operand, operand);
}
break :result WValue{ .none = {} };
}
- const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod)));
- if (op_is_ptr or isByRef(payload_ty, mod)) {
+ const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, pt)));
+ if (op_is_ptr or isByRef(payload_ty, pt)) {
break :result try func.buildPointerOffset(operand, pl_offset, .new);
}
@@ -4264,7 +4306,8 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo
}
fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
@@ -4277,18 +4320,18 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool)
break :result WValue{ .imm32 = 0 };
}
- if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
- const error_val = try func.load(operand, Type.anyerror, @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod))));
+ const error_val = try func.load(operand, Type.anyerror, @intCast(errUnionErrorOffset(payload_ty, pt)));
break :result try error_val.toLocal(func, Type.anyerror);
};
func.finishAir(inst, result, &.{ty_op.operand});
}
fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
@@ -4296,18 +4339,18 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void
const pl_ty = func.typeOf(ty_op.operand);
const result = result: {
- if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
const err_union = try func.allocStack(err_ty);
- const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, mod))), .new);
+ const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, pt))), .new);
try func.store(payload_ptr, operand, pl_ty, 0);
// ensure we also write '0' to the error part, so any present stack value gets overwritten by it.
try func.emitWValue(err_union);
try func.addImm32(0);
- const err_val_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod)));
+ const err_val_offset: u32 = @intCast(errUnionErrorOffset(pl_ty, pt));
try func.addMemArg(.i32_store16, .{
.offset = err_union.offset() + err_val_offset,
.alignment = 2,
@@ -4318,7 +4361,8 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void
}
fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
@@ -4326,17 +4370,17 @@ fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pl_ty = err_ty.errorUnionPayload(mod);
const result = result: {
- if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
const err_union = try func.allocStack(err_ty);
// store error value
- try func.store(err_union, operand, Type.anyerror, @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))));
+ try func.store(err_union, operand, Type.anyerror, @intCast(errUnionErrorOffset(pl_ty, pt)));
// write 'undefined' to the payload
- const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, mod))), .new);
- const len = @as(u32, @intCast(err_ty.errorUnionPayload(mod).abiSize(mod)));
+ const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, pt))), .new);
+ const len = @as(u32, @intCast(err_ty.errorUnionPayload(mod).abiSize(pt)));
try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa });
break :result err_union;
@@ -4350,16 +4394,17 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = ty_op.ty.toType();
const operand = try func.resolveInst(ty_op.operand);
const operand_ty = func.typeOf(ty_op.operand);
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) == .Vector or operand_ty.zigTypeTag(mod) == .Vector) {
return func.fail("todo Wasm intcast for vectors", .{});
}
- if (ty.abiSize(mod) > 16 or operand_ty.abiSize(mod) > 16) {
+ if (ty.abiSize(pt) > 16 or operand_ty.abiSize(pt) > 16) {
return func.fail("todo Wasm intcast for bitsize > 128", .{});
}
- const op_bits = toWasmBits(@as(u16, @intCast(operand_ty.bitSize(mod)))).?;
- const wanted_bits = toWasmBits(@as(u16, @intCast(ty.bitSize(mod)))).?;
+ const op_bits = toWasmBits(@intCast(operand_ty.bitSize(pt))).?;
+ const wanted_bits = toWasmBits(@intCast(ty.bitSize(pt))).?;
const result = if (op_bits == wanted_bits)
func.reuseOperand(ty_op.operand, operand)
else
@@ -4373,9 +4418,10 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// Asserts type's bitsize <= 128
/// NOTE: May leave the result on the top of the stack.
fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
- const given_bitsize = @as(u16, @intCast(given.bitSize(mod)));
- const wanted_bitsize = @as(u16, @intCast(wanted.bitSize(mod)));
+ const pt = func.pt;
+ const mod = pt.zcu;
+ const given_bitsize = @as(u16, @intCast(given.bitSize(pt)));
+ const wanted_bitsize = @as(u16, @intCast(wanted.bitSize(pt)));
assert(given_bitsize <= 128);
assert(wanted_bitsize <= 128);
@@ -4422,7 +4468,8 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
}
fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try func.resolveInst(un_op);
@@ -4436,15 +4483,16 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind:
/// For a given type and operand, checks if it's considered `null`.
/// NOTE: Leaves the result on the stack
fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
try func.emitWValue(operand);
const payload_ty = optional_ty.optionalChild(mod);
if (!optional_ty.optionalReprIsPayload(mod)) {
// When payload is zero-bits, we can treat operand as a value, rather than
// a pointer to the stack value
- if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse {
- return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(mod)});
+ if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ const offset = std.math.cast(u32, payload_ty.abiSize(pt)) orelse {
+ return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(pt)});
};
try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 });
}
@@ -4464,11 +4512,12 @@ fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcod
}
fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const opt_ty = func.typeOf(ty_op.operand);
const payload_ty = func.typeOfIndex(inst);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
return func.finishAir(inst, .none, &.{ty_op.operand});
}
@@ -4476,7 +4525,7 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
if (opt_ty.optionalReprIsPayload(mod)) break :result func.reuseOperand(ty_op.operand, operand);
- if (isByRef(payload_ty, mod)) {
+ if (isByRef(payload_ty, pt)) {
break :result try func.buildPointerOffset(operand, 0, .new);
}
@@ -4487,14 +4536,15 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const opt_ty = func.typeOf(ty_op.operand).childType(mod);
const result = result: {
const payload_ty = opt_ty.optionalChild(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or opt_ty.optionalReprIsPayload(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt) or opt_ty.optionalReprIsPayload(mod)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
@@ -4504,12 +4554,13 @@ fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const opt_ty = func.typeOf(ty_op.operand).childType(mod);
const payload_ty = opt_ty.optionalChild(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
return func.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()});
}
@@ -4517,8 +4568,8 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
return func.finishAir(inst, operand, &.{ty_op.operand});
}
- const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse {
- return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(mod)});
+ const offset = std.math.cast(u32, payload_ty.abiSize(pt)) orelse {
+ return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(pt)});
};
try func.emitWValue(operand);
@@ -4532,10 +4583,11 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const payload_ty = func.typeOf(ty_op.operand);
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const result = result: {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
const non_null_bit = try func.allocStack(Type.u1);
try func.emitWValue(non_null_bit);
try func.addImm32(1);
@@ -4548,8 +4600,8 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (op_ty.optionalReprIsPayload(mod)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
- const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse {
- return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(mod)});
+ const offset = std.math.cast(u32, payload_ty.abiSize(pt)) orelse {
+ return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(pt)});
};
// Create optional type, set the non-null bit, and store the operand inside the optional type
@@ -4589,14 +4641,15 @@ fn airSliceLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const slice_ty = func.typeOf(bin_op.lhs);
const slice = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
const elem_ty = slice_ty.childType(mod);
- const elem_size = elem_ty.abiSize(mod);
+ const elem_size = elem_ty.abiSize(pt);
// load pointer onto stack
_ = try func.load(slice, Type.usize, 0);
@@ -4610,7 +4663,7 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result_ptr = try func.allocLocal(Type.usize);
try func.addLabel(.local_set, result_ptr.local.value);
- const result = if (!isByRef(elem_ty, mod)) result: {
+ const result = if (!isByRef(elem_ty, pt)) result: {
const elem_val = try func.load(result_ptr, elem_ty, 0);
break :result try elem_val.toLocal(func, elem_ty);
} else result_ptr;
@@ -4619,12 +4672,13 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
const elem_ty = ty_pl.ty.toType().childType(mod);
- const elem_size = elem_ty.abiSize(mod);
+ const elem_size = elem_ty.abiSize(pt);
const slice = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
@@ -4672,14 +4726,14 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// Truncates a given operand to a given type, discarding any overflown bits.
/// NOTE: Resulting value is left on the stack.
fn trunc(func: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
- const given_bits = @as(u16, @intCast(given_ty.bitSize(mod)));
+ const pt = func.pt;
+ const given_bits = @as(u16, @intCast(given_ty.bitSize(pt)));
if (toWasmBits(given_bits) == null) {
return func.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits});
}
var result = try func.intcast(operand, given_ty, wanted_ty);
- const wanted_bits = @as(u16, @intCast(wanted_ty.bitSize(mod)));
+ const wanted_bits = @as(u16, @intCast(wanted_ty.bitSize(pt)));
const wasm_bits = toWasmBits(wanted_bits).?;
if (wasm_bits != wanted_bits) {
result = try func.wrapOperand(result, wanted_ty);
@@ -4696,7 +4750,8 @@ fn airIntFromBool(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
@@ -4707,7 +4762,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const slice_local = try func.allocStack(slice_ty);
// store the array ptr in the slice
- if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (array_ty.hasRuntimeBitsIgnoreComptime(pt)) {
try func.store(slice_local, operand, Type.usize, 0);
}
@@ -4719,7 +4774,8 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airIntFromPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try func.resolveInst(un_op);
const ptr_ty = func.typeOf(un_op);
@@ -4734,14 +4790,15 @@ fn airIntFromPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = func.typeOf(bin_op.lhs);
const ptr = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
const elem_ty = ptr_ty.childType(mod);
- const elem_size = elem_ty.abiSize(mod);
+ const elem_size = elem_ty.abiSize(pt);
// load pointer onto the stack
if (ptr_ty.isSlice(mod)) {
@@ -4759,7 +4816,7 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const elem_result = val: {
var result = try func.allocLocal(Type.usize);
try func.addLabel(.local_set, result.local.value);
- if (isByRef(elem_ty, mod)) {
+ if (isByRef(elem_ty, pt)) {
break :val result;
}
defer result.free(func); // only free if it's not returned like above
@@ -4771,13 +4828,14 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_ty = func.typeOf(bin_op.lhs);
const elem_ty = ty_pl.ty.toType().childType(mod);
- const elem_size = elem_ty.abiSize(mod);
+ const elem_size = elem_ty.abiSize(pt);
const ptr = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
@@ -4801,7 +4859,8 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -4813,13 +4872,13 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
else => ptr_ty.childType(mod),
};
- const valtype = typeToValtype(Type.usize, mod);
+ const valtype = typeToValtype(Type.usize, pt);
const mul_opcode = buildOpcode(.{ .valtype1 = valtype, .op = .mul });
const bin_opcode = buildOpcode(.{ .valtype1 = valtype, .op = op });
try func.lowerToStack(ptr);
try func.emitWValue(offset);
- try func.addImm32(@intCast(pointee_ty.abiSize(mod)));
+ try func.addImm32(@intCast(pointee_ty.abiSize(pt)));
try func.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode));
try func.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode));
@@ -4829,7 +4888,8 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
}
fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
if (safety) {
// TODO if the value is undef, write 0xaa bytes to dest
} else {
@@ -4862,8 +4922,8 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
/// this to wasm's memset instruction. When the feature is not present,
/// we implement it manually.
fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
- const abi_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
+ const pt = func.pt;
+ const abi_size = @as(u32, @intCast(elem_ty.abiSize(pt)));
// When bulk_memory is enabled, we lower it to wasm's memset instruction.
// If not, we lower it ourselves.
@@ -4951,16 +5011,17 @@ fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue
}
fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const array_ty = func.typeOf(bin_op.lhs);
const array = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
const elem_ty = array_ty.childType(mod);
- const elem_size = elem_ty.abiSize(mod);
+ const elem_size = elem_ty.abiSize(pt);
- if (isByRef(array_ty, mod)) {
+ if (isByRef(array_ty, pt)) {
try func.lowerToStack(array);
try func.emitWValue(index);
try func.addImm32(@intCast(elem_size));
@@ -4971,7 +5032,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
switch (index) {
inline .imm32, .imm64 => |lane| {
- const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(mod)) {
+ const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(pt)) {
8 => if (elem_ty.isSignedInt(mod)) .i8x16_extract_lane_s else .i8x16_extract_lane_u,
16 => if (elem_ty.isSignedInt(mod)) .i16x8_extract_lane_s else .i16x8_extract_lane_u,
32 => if (elem_ty.isInt(mod)) .i32x4_extract_lane else .f32x4_extract_lane,
@@ -5007,7 +5068,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var result = try func.allocLocal(Type.usize);
try func.addLabel(.local_set, result.local.value);
- if (isByRef(elem_ty, mod)) {
+ if (isByRef(elem_ty, pt)) {
break :val result;
}
defer result.free(func); // only free if no longer needed and not returned like above
@@ -5020,7 +5081,8 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airIntFromFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
@@ -5054,8 +5116,8 @@ fn airIntFromFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(operand);
const op = buildOpcode(.{
.op = .trunc,
- .valtype1 = typeToValtype(dest_ty, mod),
- .valtype2 = typeToValtype(op_ty, mod),
+ .valtype1 = typeToValtype(dest_ty, pt),
+ .valtype2 = typeToValtype(op_ty, pt),
.signedness = dest_info.signedness,
});
try func.addTag(Mir.Inst.Tag.fromOpcode(op));
@@ -5065,7 +5127,8 @@ fn airIntFromFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airFloatFromInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
@@ -5099,8 +5162,8 @@ fn airFloatFromInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(operand);
const op = buildOpcode(.{
.op = .convert,
- .valtype1 = typeToValtype(dest_ty, mod),
- .valtype2 = typeToValtype(op_ty, mod),
+ .valtype1 = typeToValtype(dest_ty, pt),
+ .valtype2 = typeToValtype(op_ty, pt),
.signedness = op_info.signedness,
});
try func.addTag(Mir.Inst.Tag.fromOpcode(op));
@@ -5111,19 +5174,20 @@ fn airFloatFromInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const ty = func.typeOfIndex(inst);
const elem_ty = ty.childType(mod);
- if (determineSimdStoreStrategy(ty, mod) == .direct) blk: {
+ if (determineSimdStoreStrategy(ty, pt) == .direct) blk: {
switch (operand) {
// when the operand lives in the linear memory section, we can directly
// load and splat the value at once. Meaning we do not first have to load
// the scalar value onto the stack.
.stack_offset, .memory, .memory_offset => {
- const opcode = switch (elem_ty.bitSize(mod)) {
+ const opcode = switch (elem_ty.bitSize(pt)) {
8 => std.wasm.simdOpcode(.v128_load8_splat),
16 => std.wasm.simdOpcode(.v128_load16_splat),
32 => std.wasm.simdOpcode(.v128_load32_splat),
@@ -5138,14 +5202,14 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
opcode,
operand.offset(),
- @intCast(elem_ty.abiAlignment(mod).toByteUnits().?),
+ @intCast(elem_ty.abiAlignment(pt).toByteUnits().?),
});
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
try func.addLabel(.local_set, result.local.value);
return func.finishAir(inst, result, &.{ty_op.operand});
},
.local => {
- const opcode = switch (elem_ty.bitSize(mod)) {
+ const opcode = switch (elem_ty.bitSize(pt)) {
8 => std.wasm.simdOpcode(.i8x16_splat),
16 => std.wasm.simdOpcode(.i16x8_splat),
32 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat),
@@ -5163,14 +5227,14 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => unreachable,
}
}
- const elem_size = elem_ty.bitSize(mod);
+ const elem_size = elem_ty.bitSize(pt);
const vector_len = @as(usize, @intCast(ty.vectorLen(mod)));
if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) {
return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size});
}
const result = try func.allocStack(ty);
- const elem_byte_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
+ const elem_byte_size = @as(u32, @intCast(elem_ty.abiSize(pt)));
var index: usize = 0;
var offset: u32 = 0;
while (index < vector_len) : (index += 1) {
@@ -5190,7 +5254,8 @@ fn airSelect(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const inst_ty = func.typeOfIndex(inst);
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.Shuffle, ty_pl.payload).data;
@@ -5201,14 +5266,14 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mask_len = extra.mask_len;
const child_ty = inst_ty.childType(mod);
- const elem_size = child_ty.abiSize(mod);
+ const elem_size = child_ty.abiSize(pt);
// TODO: One of them could be by ref; handle in loop
- if (isByRef(func.typeOf(extra.a), mod) or isByRef(inst_ty, mod)) {
+ if (isByRef(func.typeOf(extra.a), pt) or isByRef(inst_ty, pt)) {
const result = try func.allocStack(inst_ty);
for (0..mask_len) |index| {
- const value = (try mask.elemValue(mod, index)).toSignedInt(mod);
+ const value = (try mask.elemValue(pt, index)).toSignedInt(pt);
try func.emitWValue(result);
@@ -5228,7 +5293,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var lanes = mem.asBytes(operands[1..]);
for (0..@as(usize, @intCast(mask_len))) |index| {
- const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod);
+ const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(pt);
const base_index = if (mask_elem >= 0)
@as(u8, @intCast(@as(i64, @intCast(elem_size)) * mask_elem))
else
@@ -5259,7 +5324,8 @@ fn airReduce(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const result_ty = func.typeOfIndex(inst);
@@ -5271,7 +5337,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.Array => {
const result = try func.allocStack(result_ty);
const elem_ty = result_ty.childType(mod);
- const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
+ const elem_size = @as(u32, @intCast(elem_ty.abiSize(pt)));
const sentinel = if (result_ty.sentinel(mod)) |sent| blk: {
break :blk try func.lowerConstant(sent, elem_ty);
} else null;
@@ -5279,7 +5345,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// When the element type is by reference, we must copy the entire
// value. It is therefore safer to move the offset pointer and store
// each value individually, instead of using store offsets.
- if (isByRef(elem_ty, mod)) {
+ if (isByRef(elem_ty, pt)) {
// copy stack pointer into a temporary local, which is
// moved for each element to store each value in the right position.
const offset = try func.buildPointerOffset(result, 0, .new);
@@ -5309,7 +5375,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
.Struct => switch (result_ty.containerLayout(mod)) {
.@"packed" => {
- if (isByRef(result_ty, mod)) {
+ if (isByRef(result_ty, pt)) {
return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{});
}
const packed_struct = mod.typeToPackedStruct(result_ty).?;
@@ -5318,7 +5384,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// ensure the result is zero'd
const result = try func.allocLocal(backing_type);
- if (backing_type.bitSize(mod) <= 32)
+ if (backing_type.bitSize(pt) <= 32)
try func.addImm32(0)
else
try func.addImm64(0);
@@ -5327,16 +5393,16 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var current_bit: u16 = 0;
for (elements, 0..) |elem, elem_index| {
const field_ty = Type.fromInterned(field_types.get(ip)[elem_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
- const shift_val = if (backing_type.bitSize(mod) <= 32)
+ const shift_val = if (backing_type.bitSize(pt) <= 32)
WValue{ .imm32 = current_bit }
else
WValue{ .imm64 = current_bit };
const value = try func.resolveInst(elem);
- const value_bit_size: u16 = @intCast(field_ty.bitSize(mod));
- const int_ty = try mod.intType(.unsigned, value_bit_size);
+ const value_bit_size: u16 = @intCast(field_ty.bitSize(pt));
+ const int_ty = try pt.intType(.unsigned, value_bit_size);
// load our current result on stack so we can perform all transformations
// using only stack values. Saving the cost of loads and stores.
@@ -5359,10 +5425,10 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const offset = try func.buildPointerOffset(result, 0, .new); // pointer to offset
var prev_field_offset: u64 = 0;
for (elements, 0..) |elem, elem_index| {
- if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue;
+ if (try result_ty.structFieldValueComptime(pt, elem_index) != null) continue;
const elem_ty = result_ty.structFieldType(elem_index, mod);
- const field_offset = result_ty.structFieldOffset(elem_index, mod);
+ const field_offset = result_ty.structFieldOffset(elem_index, pt);
_ = try func.buildPointerOffset(offset, @intCast(field_offset - prev_field_offset), .modify);
prev_field_offset = field_offset;
@@ -5389,14 +5455,15 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data;
const result = result: {
const union_ty = func.typeOfIndex(inst);
- const layout = union_ty.unionGetLayout(mod);
+ const layout = union_ty.unionGetLayout(pt);
const union_obj = mod.typeToUnion(union_ty).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
const field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
@@ -5404,22 +5471,22 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const tag_int = blk: {
const tag_ty = union_ty.unionTagTypeHypothetical(mod);
const enum_field_index = tag_ty.enumFieldIndex(field_name, mod).?;
- const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
+ const tag_val = try pt.enumValueFieldIndex(tag_ty, enum_field_index);
break :blk try func.lowerConstant(tag_val, tag_ty);
};
if (layout.payload_size == 0) {
if (layout.tag_size == 0) {
break :result WValue{ .none = {} };
}
- assert(!isByRef(union_ty, mod));
+ assert(!isByRef(union_ty, pt));
break :result tag_int;
}
- if (isByRef(union_ty, mod)) {
+ if (isByRef(union_ty, pt)) {
const result_ptr = try func.allocStack(union_ty);
const payload = try func.resolveInst(extra.init);
if (layout.tag_align.compare(.gte, layout.payload_align)) {
- if (isByRef(field_ty, mod)) {
+ if (isByRef(field_ty, pt)) {
const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new);
try func.store(payload_ptr, payload, field_ty, 0);
} else {
@@ -5443,14 +5510,14 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result result_ptr;
} else {
const operand = try func.resolveInst(extra.init);
- const union_int_type = try mod.intType(.unsigned, @as(u16, @intCast(union_ty.bitSize(mod))));
+ const union_int_type = try pt.intType(.unsigned, @as(u16, @intCast(union_ty.bitSize(pt))));
if (field_ty.zigTypeTag(mod) == .Float) {
- const int_type = try mod.intType(.unsigned, @intCast(field_ty.bitSize(mod)));
+ const int_type = try pt.intType(.unsigned, @intCast(field_ty.bitSize(pt)));
const bitcasted = try func.bitcast(field_ty, int_type, operand);
const casted = try func.trunc(bitcasted, int_type, union_int_type);
break :result try casted.toLocal(func, field_ty);
} else if (field_ty.isPtrAtRuntime(mod)) {
- const int_type = try mod.intType(.unsigned, @intCast(field_ty.bitSize(mod)));
+ const int_type = try pt.intType(.unsigned, @intCast(field_ty.bitSize(pt)));
const casted = try func.intcast(operand, int_type, union_int_type);
break :result try casted.toLocal(func, field_ty);
}
@@ -5488,8 +5555,9 @@ fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void {
}
fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
- assert(operand_ty.hasRuntimeBitsIgnoreComptime(mod));
+ const pt = func.pt;
+ const mod = pt.zcu;
+ assert(operand_ty.hasRuntimeBitsIgnoreComptime(pt));
assert(op == .eq or op == .neq);
const payload_ty = operand_ty.optionalChild(mod);
@@ -5506,7 +5574,7 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op:
_ = try func.load(lhs, payload_ty, 0);
_ = try func.load(rhs, payload_ty, 0);
- const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, mod) });
+ const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, pt) });
try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
try func.addLabel(.br_if, 0);
@@ -5524,11 +5592,12 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op:
/// NOTE: Leaves the result of the comparison on top of the stack.
/// TODO: Lower this to compiler_rt call when bitsize > 128
fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
- assert(operand_ty.abiSize(mod) >= 16);
+ const pt = func.pt;
+ const mod = pt.zcu;
+ assert(operand_ty.abiSize(pt) >= 16);
assert(!(lhs != .stack and rhs == .stack));
- if (operand_ty.bitSize(mod) > 128) {
- return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(mod)});
+ if (operand_ty.bitSize(pt) > 128) {
+ return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(pt)});
}
var lhs_high_bit = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64);
@@ -5566,11 +5635,12 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std
}
fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const un_ty = func.typeOf(bin_op.lhs).childType(mod);
const tag_ty = func.typeOf(bin_op.rhs);
- const layout = un_ty.unionGetLayout(mod);
+ const layout = un_ty.unionGetLayout(pt);
if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
const union_ptr = try func.resolveInst(bin_op.lhs);
@@ -5590,12 +5660,12 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const un_ty = func.typeOf(ty_op.operand);
const tag_ty = func.typeOfIndex(inst);
- const layout = un_ty.unionGetLayout(mod);
+ const layout = un_ty.unionGetLayout(pt);
if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ty_op.operand});
const operand = try func.resolveInst(ty_op.operand);
@@ -5695,7 +5765,8 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
}
fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const err_set_ty = func.typeOf(ty_op.operand).childType(mod);
@@ -5707,27 +5778,28 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
operand,
.{ .imm32 = 0 },
Type.anyerror,
- @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod))),
+ @intCast(errUnionErrorOffset(payload_ty, pt)),
);
const result = result: {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
- break :result try func.buildPointerOffset(operand, @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod))), .new);
+ break :result try func.buildPointerOffset(operand, @as(u32, @intCast(errUnionPayloadOffset(payload_ty, pt))), .new);
};
func.finishAir(inst, result, &.{ty_op.operand});
}
fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const field_ptr = try func.resolveInst(extra.field_ptr);
const parent_ty = ty_pl.ty.toType().childType(mod);
- const field_offset = parent_ty.structFieldOffset(extra.field_index, mod);
+ const field_offset = parent_ty.structFieldOffset(extra.field_index, pt);
const result = if (field_offset != 0) result: {
const base = try func.buildPointerOffset(field_ptr, 0, .new);
@@ -5742,7 +5814,8 @@ fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
if (ptr_ty.isSlice(mod)) {
return func.slicePtr(ptr);
} else {
@@ -5751,7 +5824,8 @@ fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue
}
fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const dst = try func.resolveInst(bin_op.lhs);
const dst_ty = func.typeOf(bin_op.lhs);
@@ -5761,16 +5835,16 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const len = switch (dst_ty.ptrSize(mod)) {
.Slice => blk: {
const slice_len = try func.sliceLen(dst);
- if (ptr_elem_ty.abiSize(mod) != 1) {
+ if (ptr_elem_ty.abiSize(pt) != 1) {
try func.emitWValue(slice_len);
- try func.emitWValue(.{ .imm32 = @as(u32, @intCast(ptr_elem_ty.abiSize(mod))) });
+ try func.emitWValue(.{ .imm32 = @as(u32, @intCast(ptr_elem_ty.abiSize(pt))) });
try func.addTag(.i32_mul);
try func.addLabel(.local_set, slice_len.local.value);
}
break :blk slice_len;
},
.One => @as(WValue, .{
- .imm32 = @as(u32, @intCast(ptr_elem_ty.arrayLen(mod) * ptr_elem_ty.childType(mod).abiSize(mod))),
+ .imm32 = @as(u32, @intCast(ptr_elem_ty.arrayLen(mod) * ptr_elem_ty.childType(mod).abiSize(pt))),
}),
.C, .Many => unreachable,
};
@@ -5791,7 +5865,8 @@ fn airRetAddr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
@@ -5812,14 +5887,14 @@ fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
32 => {
try func.emitWValue(operand);
if (op_ty.isSignedInt(mod) and bits != wasm_bits) {
- _ = try func.wrapOperand(.stack, try mod.intType(.unsigned, bits));
+ _ = try func.wrapOperand(.stack, try pt.intType(.unsigned, bits));
}
try func.addTag(.i32_popcnt);
},
64 => {
try func.emitWValue(operand);
if (op_ty.isSignedInt(mod) and bits != wasm_bits) {
- _ = try func.wrapOperand(.stack, try mod.intType(.unsigned, bits));
+ _ = try func.wrapOperand(.stack, try pt.intType(.unsigned, bits));
}
try func.addTag(.i64_popcnt);
try func.addTag(.i32_wrap_i64);
@@ -5830,7 +5905,7 @@ fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addTag(.i64_popcnt);
_ = try func.load(operand, Type.u64, 8);
if (op_ty.isSignedInt(mod) and bits != wasm_bits) {
- _ = try func.wrapOperand(.stack, try mod.intType(.unsigned, bits - 64));
+ _ = try func.wrapOperand(.stack, try pt.intType(.unsigned, bits - 64));
}
try func.addTag(.i64_popcnt);
try func.addTag(.i64_add);
@@ -5845,7 +5920,8 @@ fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
@@ -5956,10 +6032,10 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
//
// As the names are global and the slice elements are constant, we do not have
// to make a copy of the ptr+value but can point towards them directly.
- const error_table_symbol = try func.bin_file.getErrorTableSymbol();
+ const pt = func.pt;
+ const error_table_symbol = try func.bin_file.getErrorTableSymbol(pt);
const name_ty = Type.slice_const_u8_sentinel_0;
- const mod = func.bin_file.base.comp.module.?;
- const abi_size = name_ty.abiSize(mod);
+ const abi_size = name_ty.abiSize(pt);
const error_name_value: WValue = .{ .memory = error_table_symbol }; // emitting this will create a relocation
try func.emitWValue(error_name_value);
@@ -5998,7 +6074,8 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
const lhs = try func.resolveInst(extra.lhs);
const rhs = try func.resolveInst(extra.rhs);
const lhs_ty = func.typeOf(extra.lhs);
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
if (lhs_ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
@@ -6044,14 +6121,15 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
const result_ptr = try func.allocStack(func.typeOfIndex(inst));
try func.store(result_ptr, result, lhs_ty, 0);
- const offset = @as(u32, @intCast(lhs_ty.abiSize(mod)));
+ const offset = @as(u32, @intCast(lhs_ty.abiSize(pt)));
try func.store(result_ptr, overflow_local, Type.u1, offset);
func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
}
fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, result_ty: Type, op: Op) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
assert(op == .add or op == .sub);
const int_info = ty.intInfo(mod);
const is_signed = int_info.signedness == .signed;
@@ -6116,7 +6194,8 @@ fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type,
}
fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -6159,7 +6238,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result_ptr = try func.allocStack(func.typeOfIndex(inst));
try func.store(result_ptr, result, lhs_ty, 0);
- const offset = @as(u32, @intCast(lhs_ty.abiSize(mod)));
+ const offset = @as(u32, @intCast(lhs_ty.abiSize(pt)));
try func.store(result_ptr, overflow_local, Type.u1, offset);
func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
@@ -6172,7 +6251,8 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const lhs = try func.resolveInst(extra.lhs);
const rhs = try func.resolveInst(extra.rhs);
const lhs_ty = func.typeOf(extra.lhs);
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
if (lhs_ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
@@ -6332,7 +6412,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result_ptr = try func.allocStack(func.typeOfIndex(inst));
try func.store(result_ptr, bin_op_local, lhs_ty, 0);
- const offset = @as(u32, @intCast(lhs_ty.abiSize(mod)));
+ const offset = @as(u32, @intCast(lhs_ty.abiSize(pt)));
try func.store(result_ptr, overflow_bit, Type.u1, offset);
func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
@@ -6340,7 +6420,8 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
assert(op == .max or op == .min);
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const target = mod.getTarget();
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
@@ -6349,7 +6430,7 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
return func.fail("TODO: `@maximum` and `@minimum` for vectors", .{});
}
- if (ty.abiSize(mod) > 16) {
+ if (ty.abiSize(pt) > 16) {
return func.fail("TODO: `@maximum` and `@minimum` for types larger than 16 bytes", .{});
}
@@ -6377,14 +6458,15 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
}
// store result in local
- const result_ty = if (isByRef(ty, mod)) Type.u32 else ty;
+ const result_ty = if (isByRef(ty, pt)) Type.u32 else ty;
const result = try func.allocLocal(result_ty);
try func.addLabel(.local_set, result.local.value);
func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
}
fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data;
@@ -6418,7 +6500,8 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const ty = func.typeOf(ty_op.operand);
@@ -6471,7 +6554,8 @@ fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const ty = func.typeOf(ty_op.operand);
@@ -6558,7 +6642,8 @@ fn airDbgInlineBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) InnerError!void {
if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{});
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const ty = func.typeOf(pl_op.operand);
const operand = try func.resolveInst(pl_op.operand);
@@ -6591,7 +6676,8 @@ fn airTry(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airTryPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.TryPtr, ty_pl.payload);
const err_union_ptr = try func.resolveInst(extra.data.ptr);
@@ -6609,13 +6695,14 @@ fn lowerTry(
err_union_ty: Type,
operand_is_ptr: bool,
) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
if (operand_is_ptr) {
return func.fail("TODO: lowerTry for pointers", .{});
}
const pl_ty = err_union_ty.errorUnionPayload(mod);
- const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(mod);
+ const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(pt);
if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
// Block we can jump out of when error is not set
@@ -6624,10 +6711,10 @@ fn lowerTry(
// check if the error tag is set for the error union.
try func.emitWValue(err_union);
if (pl_has_bits) {
- const err_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod)));
+ const err_offset: u32 = @intCast(errUnionErrorOffset(pl_ty, pt));
try func.addMemArg(.i32_load16_u, .{
.offset = err_union.offset() + err_offset,
- .alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnits().?),
+ .alignment = @intCast(Type.anyerror.abiAlignment(pt).toByteUnits().?),
});
}
try func.addTag(.i32_eqz);
@@ -6649,8 +6736,8 @@ fn lowerTry(
return WValue{ .none = {} };
}
- const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(pl_ty, mod)));
- if (isByRef(pl_ty, mod)) {
+ const pl_offset: u32 = @intCast(errUnionPayloadOffset(pl_ty, pt));
+ if (isByRef(pl_ty, pt)) {
return buildPointerOffset(func, err_union, pl_offset, .new);
}
const payload = try func.load(err_union, pl_ty, pl_offset);
@@ -6658,7 +6745,8 @@ fn lowerTry(
}
fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const ty = func.typeOfIndex(inst);
@@ -6744,7 +6832,8 @@ fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty = func.typeOfIndex(inst);
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
@@ -6864,7 +6953,8 @@ fn airRem(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airMod(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty = func.typeOfIndex(inst);
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
@@ -6901,7 +6991,8 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
assert(op == .add or op == .sub);
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty = func.typeOfIndex(inst);
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
@@ -6949,11 +7040,12 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
}
fn signedSat(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const int_info = ty.intInfo(mod);
const wasm_bits = toWasmBits(int_info.bits).?;
const is_wasm_bits = wasm_bits == int_info.bits;
- const ext_ty = if (!is_wasm_bits) try mod.intType(int_info.signedness, wasm_bits) else ty;
+ const ext_ty = if (!is_wasm_bits) try pt.intType(int_info.signedness, wasm_bits) else ty;
const max_val: u64 = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(int_info.bits - 1))) - 1));
const min_val: i64 = (-@as(i64, @intCast(@as(u63, @intCast(max_val))))) - 1;
@@ -7007,7 +7099,8 @@ fn signedSat(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr
fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty = func.typeOfIndex(inst);
const int_info = ty.intInfo(mod);
const is_signed = int_info.signedness == .signed;
@@ -7061,7 +7154,7 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
64 => WValue{ .imm64 = shift_size },
else => unreachable,
};
- const ext_ty = try mod.intType(int_info.signedness, wasm_bits);
+ const ext_ty = try pt.intType(int_info.signedness, wasm_bits);
var shl_res = try (try func.binOp(lhs, shift_value, ext_ty, .shl)).toLocal(func, ext_ty);
defer shl_res.free(func);
@@ -7128,13 +7221,14 @@ fn callIntrinsic(
};
// Always pass over C-ABI
- const mod = func.bin_file.base.comp.module.?;
- var func_type = try genFunctype(func.gpa, .C, param_types, return_type, mod);
+ const pt = func.pt;
+ const mod = pt.zcu;
+ var func_type = try genFunctype(func.gpa, .C, param_types, return_type, pt);
defer func_type.deinit(func.gpa);
const func_type_index = try func.bin_file.zigObjectPtr().?.putOrGetFuncType(func.gpa, func_type);
try func.bin_file.addOrUpdateImport(name, symbol_index, null, func_type_index);
- const want_sret_param = firstParamSRet(.C, return_type, mod);
+ const want_sret_param = firstParamSRet(.C, return_type, pt);
// if we want return as first param, we allocate a pointer to stack,
// and emit it as our first argument
const sret = if (want_sret_param) blk: {
@@ -7146,14 +7240,14 @@ fn callIntrinsic(
// Lower all arguments to the stack before we call our function
for (args, 0..) |arg, arg_i| {
assert(!(want_sret_param and arg == .stack));
- assert(Type.fromInterned(param_types[arg_i]).hasRuntimeBitsIgnoreComptime(mod));
+ assert(Type.fromInterned(param_types[arg_i]).hasRuntimeBitsIgnoreComptime(pt));
try func.lowerArg(.C, Type.fromInterned(param_types[arg_i]), arg);
}
// Actually call our intrinsic
try func.addLabel(.call, @intFromEnum(symbol_index));
- if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!return_type.hasRuntimeBitsIgnoreComptime(pt)) {
return WValue.none;
} else if (return_type.isNoReturn(mod)) {
try func.addTag(.@"unreachable");
@@ -7181,7 +7275,8 @@ fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const enum_decl_index = enum_ty.getOwnerDecl(mod);
@@ -7199,7 +7294,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
const int_tag_ty = enum_ty.intTagType(mod);
- if (int_tag_ty.bitSize(mod) > 64) {
+ if (int_tag_ty.bitSize(pt) > 64) {
return func.fail("TODO: Implement @tagName for enums with tag size larger than 64 bits", .{});
}
@@ -7225,16 +7320,17 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
const tag_name_len = tag_name.length(ip);
// for each tag name, create an unnamed const,
// and then get a pointer to its value.
- const name_ty = try mod.arrayType(.{
+ const name_ty = try pt.arrayType(.{
.len = tag_name_len,
.child = .u8_type,
.sentinel = .zero_u8,
});
- const name_val = try mod.intern(.{ .aggregate = .{
+ const name_val = try pt.intern(.{ .aggregate = .{
.ty = name_ty.toIntern(),
.storage = .{ .bytes = tag_name.toString() },
} });
const tag_sym_index = try func.bin_file.lowerUnnamedConst(
+ pt,
Value.fromInterned(name_val),
enum_decl_index,
);
@@ -7247,7 +7343,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
try writer.writeByte(std.wasm.opcode(.local_get));
try leb.writeUleb128(writer, @as(u32, 1));
- const tag_val = try mod.enumValueFieldIndex(enum_ty, @intCast(tag_index));
+ const tag_val = try pt.enumValueFieldIndex(enum_ty, @intCast(tag_index));
const tag_value = try func.lowerConstant(tag_val, enum_ty);
switch (tag_value) {
@@ -7334,13 +7430,14 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
try writer.writeByte(std.wasm.opcode(.end));
const slice_ty = Type.slice_const_u8_sentinel_0;
- const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, mod);
+ const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, pt);
const sym_index = try func.bin_file.createFunction(func_name, func_type, &body_list, &relocs);
return @intFromEnum(sym_index);
}
fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@@ -7426,7 +7523,8 @@ inline fn useAtomicFeature(func: *const CodeGen) bool {
}
fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
@@ -7445,7 +7543,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(ptr_operand);
try func.lowerToStack(expected_val);
try func.lowerToStack(new_val);
- try func.addAtomicMemArg(switch (ty.abiSize(mod)) {
+ try func.addAtomicMemArg(switch (ty.abiSize(pt)) {
1 => .i32_atomic_rmw8_cmpxchg_u,
2 => .i32_atomic_rmw16_cmpxchg_u,
4 => .i32_atomic_rmw_cmpxchg,
@@ -7453,14 +7551,14 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}),
}, .{
.offset = ptr_operand.offset(),
- .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?),
+ .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?),
});
try func.addLabel(.local_tee, val_local.local.value);
_ = try func.cmp(.stack, expected_val, ty, .eq);
try func.addLabel(.local_set, cmp_result.local.value);
break :val val_local;
} else val: {
- if (ty.abiSize(mod) > 8) {
+ if (ty.abiSize(pt) > 8) {
return func.fail("TODO: Implement `@cmpxchg` for types larger than abi size of 8 bytes", .{});
}
const ptr_val = try WValue.toLocal(try func.load(ptr_operand, ty, 0), func, ty);
@@ -7476,7 +7574,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :val ptr_val;
};
- const result_ptr = if (isByRef(result_ty, mod)) val: {
+ const result_ptr = if (isByRef(result_ty, pt)) val: {
try func.emitWValue(cmp_result);
try func.addImm32(~@as(u32, 0));
try func.addTag(.i32_xor);
@@ -7484,7 +7582,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addTag(.i32_and);
const and_result = try WValue.toLocal(.stack, func, Type.bool);
const result_ptr = try func.allocStack(result_ty);
- try func.store(result_ptr, and_result, Type.bool, @as(u32, @intCast(ty.abiSize(mod))));
+ try func.store(result_ptr, and_result, Type.bool, @as(u32, @intCast(ty.abiSize(pt))));
try func.store(result_ptr, ptr_val, ty, 0);
break :val result_ptr;
} else val: {
@@ -7499,13 +7597,13 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
const atomic_load = func.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load;
const ptr = try func.resolveInst(atomic_load.ptr);
const ty = func.typeOfIndex(inst);
if (func.useAtomicFeature()) {
- const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) {
+ const tag: wasm.AtomicsOpcode = switch (ty.abiSize(pt)) {
1 => .i32_atomic_load8_u,
2 => .i32_atomic_load16_u,
4 => .i32_atomic_load,
@@ -7515,7 +7613,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(ptr);
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?),
+ .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?),
});
} else {
_ = try func.load(ptr, ty, 0);
@@ -7526,7 +7624,8 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = func.air.extraData(Air.AtomicRmw, pl_op.payload).data;
@@ -7550,7 +7649,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(ptr);
try func.emitWValue(value);
if (op == .Nand) {
- const wasm_bits = toWasmBits(@as(u16, @intCast(ty.bitSize(mod)))).?;
+ const wasm_bits = toWasmBits(@intCast(ty.bitSize(pt))).?;
const and_res = try func.binOp(value, operand, ty, .@"and");
if (wasm_bits == 32)
@@ -7567,7 +7666,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addTag(.select);
}
try func.addAtomicMemArg(
- switch (ty.abiSize(mod)) {
+ switch (ty.abiSize(pt)) {
1 => .i32_atomic_rmw8_cmpxchg_u,
2 => .i32_atomic_rmw16_cmpxchg_u,
4 => .i32_atomic_rmw_cmpxchg,
@@ -7576,7 +7675,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
.{
.offset = ptr.offset(),
- .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?),
+ .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?),
},
);
const select_res = try func.allocLocal(ty);
@@ -7595,7 +7694,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => {
try func.emitWValue(ptr);
try func.emitWValue(operand);
- const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) {
+ const tag: wasm.AtomicsOpcode = switch (ty.abiSize(pt)) {
1 => switch (op) {
.Xchg => .i32_atomic_rmw8_xchg_u,
.Add => .i32_atomic_rmw8_add_u,
@@ -7636,7 +7735,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
};
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?),
+ .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?),
});
const result = try WValue.toLocal(.stack, func, ty);
return func.finishAir(inst, result, &.{ pl_op.operand, extra.operand });
@@ -7681,7 +7780,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.store(.stack, .stack, ty, ptr.offset());
},
.Nand => {
- const wasm_bits = toWasmBits(@as(u16, @intCast(ty.bitSize(mod)))).?;
+ const wasm_bits = toWasmBits(@intCast(ty.bitSize(pt))).?;
try func.emitWValue(ptr);
const and_res = try func.binOp(result, operand, ty, .@"and");
@@ -7701,7 +7800,8 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airFence(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const zcu = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const zcu = pt.zcu;
// Only when the atomic feature is enabled, and we're not building
// for a single-threaded build, can we emit the `fence` instruction.
// In all other cases, we emit no instructions for a fence.
@@ -7715,7 +7815,8 @@ fn airFence(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr = try func.resolveInst(bin_op.lhs);
@@ -7724,7 +7825,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = ptr_ty.childType(mod);
if (func.useAtomicFeature()) {
- const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) {
+ const tag: wasm.AtomicsOpcode = switch (ty.abiSize(pt)) {
1 => .i32_atomic_store8,
2 => .i32_atomic_store16,
4 => .i32_atomic_store,
@@ -7735,7 +7836,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.lowerToStack(operand);
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?),
+ .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?),
});
} else {
try func.store(ptr, operand, ty, 0);
@@ -7754,11 +7855,13 @@ fn airFrameAddress(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn typeOf(func: *CodeGen, inst: Air.Inst.Ref) Type {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
return func.air.typeOf(inst, &mod.intern_pool);
}
fn typeOfIndex(func: *CodeGen, inst: Air.Inst.Index) Type {
- const mod = func.bin_file.base.comp.module.?;
+ const pt = func.pt;
+ const mod = pt.zcu;
return func.air.typeOfIndex(inst, &mod.intern_pool);
}
src/arch/x86_64/abi.zig
@@ -44,7 +44,7 @@ pub const Class = enum {
}
};
-pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
+pub fn classifyWindows(ty: Type, pt: Zcu.PerThread) Class {
// https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017
// "There's a strict one-to-one correspondence between a function call's arguments
// and the registers used for those arguments. Any argument that doesn't fit in 8
@@ -53,7 +53,7 @@ pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
// "All floating point operations are done using the 16 XMM registers."
// "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed
// as if they were integers of the same size."
- switch (ty.zigTypeTag(zcu)) {
+ switch (ty.zigTypeTag(pt.zcu)) {
.Pointer,
.Int,
.Bool,
@@ -68,12 +68,12 @@ pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
.ErrorUnion,
.AnyFrame,
.Frame,
- => switch (ty.abiSize(zcu)) {
+ => switch (ty.abiSize(pt)) {
0 => unreachable,
1, 2, 4, 8 => return .integer,
- else => switch (ty.zigTypeTag(zcu)) {
+ else => switch (ty.zigTypeTag(pt.zcu)) {
.Int => return .win_i128,
- .Struct, .Union => if (ty.containerLayout(zcu) == .@"packed") {
+ .Struct, .Union => if (ty.containerLayout(pt.zcu) == .@"packed") {
return .win_i128;
} else {
return .memory;
@@ -100,14 +100,14 @@ pub const Context = enum { ret, arg, field, other };
/// There are a maximum of 8 possible return slots. Returned values are in
/// the beginning of the array; unused slots are filled with .none.
-pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8]Class {
+pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Context) [8]Class {
const memory_class = [_]Class{
.memory, .none, .none, .none,
.none, .none, .none, .none,
};
var result = [1]Class{.none} ** 8;
- switch (ty.zigTypeTag(zcu)) {
- .Pointer => switch (ty.ptrSize(zcu)) {
+ switch (ty.zigTypeTag(pt.zcu)) {
+ .Pointer => switch (ty.ptrSize(pt.zcu)) {
.Slice => {
result[0] = .integer;
result[1] = .integer;
@@ -119,7 +119,7 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8
},
},
.Int, .Enum, .ErrorSet => {
- const bits = ty.intInfo(zcu).bits;
+ const bits = ty.intInfo(pt.zcu).bits;
if (bits <= 64) {
result[0] = .integer;
return result;
@@ -185,8 +185,8 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8
else => unreachable,
},
.Vector => {
- const elem_ty = ty.childType(zcu);
- const bits = elem_ty.bitSize(zcu) * ty.arrayLen(zcu);
+ const elem_ty = ty.childType(pt.zcu);
+ const bits = elem_ty.bitSize(pt) * ty.arrayLen(pt.zcu);
if (elem_ty.toIntern() == .bool_type) {
if (bits <= 32) return .{
.integer, .none, .none, .none,
@@ -250,7 +250,7 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8
return memory_class;
},
.Optional => {
- if (ty.isPtrLikeOptional(zcu)) {
+ if (ty.isPtrLikeOptional(pt.zcu)) {
result[0] = .integer;
return result;
}
@@ -261,8 +261,8 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8
// it contains unaligned fields, it has class MEMORY"
// "If the size of the aggregate exceeds a single eightbyte, each is classified
// separately.".
- const ty_size = ty.abiSize(zcu);
- switch (ty.containerLayout(zcu)) {
+ const ty_size = ty.abiSize(pt);
+ switch (ty.containerLayout(pt.zcu)) {
.auto, .@"extern" => {},
.@"packed" => {
assert(ty_size <= 16);
@@ -274,10 +274,10 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8
if (ty_size > 64)
return memory_class;
- _ = if (zcu.typeToStruct(ty)) |loaded_struct|
- classifySystemVStruct(&result, 0, loaded_struct, zcu, target)
- else if (zcu.typeToUnion(ty)) |loaded_union|
- classifySystemVUnion(&result, 0, loaded_union, zcu, target)
+ _ = if (pt.zcu.typeToStruct(ty)) |loaded_struct|
+ classifySystemVStruct(&result, 0, loaded_struct, pt, target)
+ else if (pt.zcu.typeToUnion(ty)) |loaded_union|
+ classifySystemVUnion(&result, 0, loaded_union, pt, target)
else
unreachable;
@@ -306,7 +306,7 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8
return result;
},
.Array => {
- const ty_size = ty.abiSize(zcu);
+ const ty_size = ty.abiSize(pt);
if (ty_size <= 8) {
result[0] = .integer;
return result;
@@ -326,10 +326,10 @@ fn classifySystemVStruct(
result: *[8]Class,
starting_byte_offset: u64,
loaded_struct: InternPool.LoadedStructType,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
target: std.Target,
) u64 {
- const ip = &zcu.intern_pool;
+ const ip = &pt.zcu.intern_pool;
var byte_offset = starting_byte_offset;
var field_it = loaded_struct.iterateRuntimeOrder(ip);
while (field_it.next()) |field_index| {
@@ -338,29 +338,29 @@ fn classifySystemVStruct(
byte_offset = std.mem.alignForward(
u64,
byte_offset,
- field_align.toByteUnits() orelse field_ty.abiAlignment(zcu).toByteUnits().?,
+ field_align.toByteUnits() orelse field_ty.abiAlignment(pt).toByteUnits().?,
);
- if (zcu.typeToStruct(field_ty)) |field_loaded_struct| {
+ if (pt.zcu.typeToStruct(field_ty)) |field_loaded_struct| {
switch (field_loaded_struct.layout) {
.auto, .@"extern" => {
- byte_offset = classifySystemVStruct(result, byte_offset, field_loaded_struct, zcu, target);
+ byte_offset = classifySystemVStruct(result, byte_offset, field_loaded_struct, pt, target);
continue;
},
.@"packed" => {},
}
- } else if (zcu.typeToUnion(field_ty)) |field_loaded_union| {
+ } else if (pt.zcu.typeToUnion(field_ty)) |field_loaded_union| {
switch (field_loaded_union.getLayout(ip)) {
.auto, .@"extern" => {
- byte_offset = classifySystemVUnion(result, byte_offset, field_loaded_union, zcu, target);
+ byte_offset = classifySystemVUnion(result, byte_offset, field_loaded_union, pt, target);
continue;
},
.@"packed" => {},
}
}
- const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
+ const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, pt, target, .field), .none);
for (result[@intCast(byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
result_class.* = result_class.combineSystemV(field_class);
- byte_offset += field_ty.abiSize(zcu);
+ byte_offset += field_ty.abiSize(pt);
}
const final_byte_offset = starting_byte_offset + loaded_struct.size(ip).*;
std.debug.assert(final_byte_offset == std.mem.alignForward(
@@ -375,30 +375,30 @@ fn classifySystemVUnion(
result: *[8]Class,
starting_byte_offset: u64,
loaded_union: InternPool.LoadedUnionType,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
target: std.Target,
) u64 {
- const ip = &zcu.intern_pool;
+ const ip = &pt.zcu.intern_pool;
for (0..loaded_union.field_types.len) |field_index| {
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
- if (zcu.typeToStruct(field_ty)) |field_loaded_struct| {
+ if (pt.zcu.typeToStruct(field_ty)) |field_loaded_struct| {
switch (field_loaded_struct.layout) {
.auto, .@"extern" => {
- _ = classifySystemVStruct(result, starting_byte_offset, field_loaded_struct, zcu, target);
+ _ = classifySystemVStruct(result, starting_byte_offset, field_loaded_struct, pt, target);
continue;
},
.@"packed" => {},
}
- } else if (zcu.typeToUnion(field_ty)) |field_loaded_union| {
+ } else if (pt.zcu.typeToUnion(field_ty)) |field_loaded_union| {
switch (field_loaded_union.getLayout(ip)) {
.auto, .@"extern" => {
- _ = classifySystemVUnion(result, starting_byte_offset, field_loaded_union, zcu, target);
+ _ = classifySystemVUnion(result, starting_byte_offset, field_loaded_union, pt, target);
continue;
},
.@"packed" => {},
}
}
- const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
+ const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, pt, target, .field), .none);
for (result[@intCast(starting_byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
result_class.* = result_class.combineSystemV(field_class);
}
src/arch/x86_64/CodeGen.zig
@@ -19,7 +19,7 @@ const CodeGenError = codegen.CodeGenError;
const Compilation = @import("../../Compilation.zig");
const DebugInfoOutput = codegen.DebugInfoOutput;
const DW = std.dwarf;
-const ErrorMsg = Module.ErrorMsg;
+const ErrorMsg = Zcu.ErrorMsg;
const Result = codegen.Result;
const Emit = @import("Emit.zig");
const Liveness = @import("../../Liveness.zig");
@@ -27,8 +27,6 @@ const Lower = @import("Lower.zig");
const Mir = @import("Mir.zig");
const Package = @import("../../Package.zig");
const Zcu = @import("../../Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
const InternPool = @import("../../InternPool.zig");
const Alignment = InternPool.Alignment;
const Target = std.Target;
@@ -52,6 +50,7 @@ const FrameIndex = bits.FrameIndex;
const InnerError = CodeGenError || error{OutOfRegisters};
gpa: Allocator,
+pt: Zcu.PerThread,
air: Air,
liveness: Liveness,
bin_file: *link.File,
@@ -74,7 +73,7 @@ va_info: union {
ret_mcv: InstTracking,
fn_type: Type,
arg_index: u32,
-src_loc: Module.LazySrcLoc,
+src_loc: Zcu.LazySrcLoc,
eflags_inst: ?Air.Inst.Index = null,
@@ -120,18 +119,18 @@ const Owner = union(enum) {
func_index: InternPool.Index,
lazy_sym: link.File.LazySymbol,
- fn getDecl(owner: Owner, mod: *Module) InternPool.DeclIndex {
+ fn getDecl(owner: Owner, zcu: *Zcu) InternPool.DeclIndex {
return switch (owner) {
- .func_index => |func_index| mod.funcOwnerDeclIndex(func_index),
- .lazy_sym => |lazy_sym| lazy_sym.ty.getOwnerDecl(mod),
+ .func_index => |func_index| zcu.funcOwnerDeclIndex(func_index),
+ .lazy_sym => |lazy_sym| lazy_sym.ty.getOwnerDecl(zcu),
};
}
fn getSymbolIndex(owner: Owner, ctx: *Self) !u32 {
+ const pt = ctx.pt;
switch (owner) {
.func_index => |func_index| {
- const mod = ctx.bin_file.comp.module.?;
- const decl_index = mod.funcOwnerDeclIndex(func_index);
+ const decl_index = ctx.pt.zcu.funcOwnerDeclIndex(func_index);
if (ctx.bin_file.cast(link.File.Elf)) |elf_file| {
return elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, decl_index);
} else if (ctx.bin_file.cast(link.File.MachO)) |macho_file| {
@@ -145,17 +144,17 @@ const Owner = union(enum) {
},
.lazy_sym => |lazy_sym| {
if (ctx.bin_file.cast(link.File.Elf)) |elf_file| {
- return elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, lazy_sym) catch |err|
+ return elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, pt, lazy_sym) catch |err|
ctx.fail("{s} creating lazy symbol", .{@errorName(err)});
} else if (ctx.bin_file.cast(link.File.MachO)) |macho_file| {
- return macho_file.getZigObject().?.getOrCreateMetadataForLazySymbol(macho_file, lazy_sym) catch |err|
+ return macho_file.getZigObject().?.getOrCreateMetadataForLazySymbol(macho_file, pt, lazy_sym) catch |err|
ctx.fail("{s} creating lazy symbol", .{@errorName(err)});
} else if (ctx.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom = coff_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err|
+ const atom = coff_file.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err|
return ctx.fail("{s} creating lazy symbol", .{@errorName(err)});
return coff_file.getAtom(atom).getSymbolIndex().?;
} else if (ctx.bin_file.cast(link.File.Plan9)) |p9_file| {
- return p9_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err|
+ return p9_file.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err|
return ctx.fail("{s} creating lazy symbol", .{@errorName(err)});
} else unreachable;
},
@@ -753,14 +752,14 @@ const FrameAlloc = struct {
.ref_count = 0,
};
}
- fn initType(ty: Type, mod: *Module) FrameAlloc {
+ fn initType(ty: Type, pt: Zcu.PerThread) FrameAlloc {
return init(.{
- .size = ty.abiSize(mod),
- .alignment = ty.abiAlignment(mod),
+ .size = ty.abiSize(pt),
+ .alignment = ty.abiAlignment(pt),
});
}
- fn initSpill(ty: Type, mod: *Module) FrameAlloc {
- const abi_size = ty.abiSize(mod);
+ fn initSpill(ty: Type, pt: Zcu.PerThread) FrameAlloc {
+ const abi_size = ty.abiSize(pt);
const spill_size = if (abi_size < 8)
math.ceilPowerOfTwoAssert(u64, abi_size)
else
@@ -768,7 +767,7 @@ const FrameAlloc = struct {
return init(.{
.size = spill_size,
.pad = @intCast(spill_size - abi_size),
- .alignment = ty.abiAlignment(mod).maxStrict(
+ .alignment = ty.abiAlignment(pt).maxStrict(
Alignment.fromNonzeroByteUnits(@min(spill_size, 8)),
),
});
@@ -777,7 +776,7 @@ const FrameAlloc = struct {
const StackAllocation = struct {
inst: ?Air.Inst.Index,
- /// TODO do we need size? should be determined by inst.ty.abiSize(mod)
+ /// TODO do we need size? should be determined by inst.ty.abiSize(pt)
size: u32,
};
@@ -795,16 +794,17 @@ const Self = @This();
pub fn generate(
bin_file: *link.File,
- src_loc: Module.LazySrcLoc,
+ pt: Zcu.PerThread,
+ src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
) CodeGenError!Result {
- const comp = bin_file.comp;
- const gpa = comp.gpa;
- const zcu = comp.module.?;
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
+ const comp = zcu.comp;
const func = zcu.funcInfo(func_index);
const fn_owner_decl = zcu.declPtr(func.owner_decl);
assert(fn_owner_decl.has_tv);
@@ -812,8 +812,9 @@ pub fn generate(
const namespace = zcu.namespacePtr(fn_owner_decl.src_namespace);
const mod = namespace.fileScope(zcu).mod;
- var function = Self{
+ var function: Self = .{
.gpa = gpa,
+ .pt = pt,
.air = air,
.liveness = liveness,
.target = &mod.resolved_target.result,
@@ -882,11 +883,11 @@ pub fn generate(
function.args = call_info.args;
function.ret_mcv = call_info.return_value;
function.frame_allocs.set(@intFromEnum(FrameIndex.ret_addr), FrameAlloc.init(.{
- .size = Type.usize.abiSize(zcu),
- .alignment = Type.usize.abiAlignment(zcu).min(call_info.stack_align),
+ .size = Type.usize.abiSize(pt),
+ .alignment = Type.usize.abiAlignment(pt).min(call_info.stack_align),
}));
function.frame_allocs.set(@intFromEnum(FrameIndex.base_ptr), FrameAlloc.init(.{
- .size = Type.usize.abiSize(zcu),
+ .size = Type.usize.abiSize(pt),
.alignment = Alignment.min(
call_info.stack_align,
Alignment.fromNonzeroByteUnits(function.target.stackAlignment()),
@@ -971,7 +972,8 @@ pub fn generate(
pub fn generateLazy(
bin_file: *link.File,
- src_loc: Module.LazySrcLoc,
+ pt: Zcu.PerThread,
+ src_loc: Zcu.LazySrcLoc,
lazy_sym: link.File.LazySymbol,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
@@ -980,8 +982,9 @@ pub fn generateLazy(
const gpa = comp.gpa;
// This function is for generating global code, so we use the root module.
const mod = comp.root_mod;
- var function = Self{
+ var function: Self = .{
.gpa = gpa,
+ .pt = pt,
.air = undefined,
.liveness = undefined,
.target = &mod.resolved_target.result,
@@ -1065,7 +1068,7 @@ pub fn generateLazy(
}
const FormatDeclData = struct {
- mod: *Module,
+ zcu: *Zcu,
decl_index: InternPool.DeclIndex,
};
fn formatDecl(
@@ -1074,11 +1077,11 @@ fn formatDecl(
_: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
- try data.mod.declPtr(data.decl_index).renderFullyQualifiedName(data.mod, writer);
+ try data.zcu.declPtr(data.decl_index).renderFullyQualifiedName(data.zcu, writer);
}
fn fmtDecl(self: *Self, decl_index: InternPool.DeclIndex) std.fmt.Formatter(formatDecl) {
return .{ .data = .{
- .mod = self.bin_file.comp.module.?,
+ .zcu = self.pt.zcu,
.decl_index = decl_index,
} };
}
@@ -1095,7 +1098,7 @@ fn formatAir(
) @TypeOf(writer).Error!void {
@import("../../print_air.zig").dumpInst(
data.inst,
- data.self.bin_file.comp.module.?,
+ data.self.pt,
data.self.air,
data.self.liveness,
);
@@ -1746,7 +1749,8 @@ fn asmMemoryRegisterImmediate(
}
fn gen(self: *Self) InnerError!void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const fn_info = mod.typeToFunc(self.fn_type).?;
const cc = abi.resolveCallingConvention(fn_info.cc, self.target.*);
if (cc != .Naked) {
@@ -1764,7 +1768,7 @@ fn gen(self: *Self) InnerError!void {
// The address where to store the return value for the caller is in a
// register which the callee is free to clobber. Therefore, we purposely
// spill it to stack immediately.
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(Type.usize, mod));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(Type.usize, pt));
try self.genSetMem(
.{ .frame = frame_index },
0,
@@ -1800,7 +1804,7 @@ fn gen(self: *Self) InnerError!void {
try self.asmRegisterImmediate(.{ ._, .cmp }, .al, Immediate.u(info.fp_count));
const skip_sse_reloc = try self.asmJccReloc(.na, undefined);
- const vec_2_f64 = try mod.vectorType(.{ .len = 2, .child = .f64_type });
+ const vec_2_f64 = try pt.vectorType(.{ .len = 2, .child = .f64_type });
for (abi.SysV.c_abi_sse_param_regs[info.fp_count..], info.fp_count..) |reg, reg_i|
try self.genSetMem(
.{ .frame = reg_save_area_fi },
@@ -1951,7 +1955,8 @@ fn gen(self: *Self) InnerError!void {
}
fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const air_tags = self.air.instructions.items(.tag);
@@ -2222,12 +2227,13 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
}
fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
switch (lazy_sym.ty.zigTypeTag(mod)) {
.Enum => {
const enum_ty = lazy_sym.ty;
- wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(mod)});
+ wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(pt)});
const resolved_cc = abi.resolveCallingConvention(.Unspecified, self.target.*);
const param_regs = abi.getCAbiIntParamRegs(resolved_cc);
@@ -2249,7 +2255,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void {
const tag_names = enum_ty.enumFields(mod);
for (exitlude_jump_relocs, 0..) |*exitlude_jump_reloc, tag_index| {
const tag_name_len = tag_names.get(ip)[tag_index].length(ip);
- const tag_val = try mod.enumValueFieldIndex(enum_ty, @intCast(tag_index));
+ const tag_val = try pt.enumValueFieldIndex(enum_ty, @intCast(tag_index));
const tag_mcv = try self.genTypedValue(tag_val);
try self.genBinOpMir(.{ ._, .cmp }, enum_ty, enum_mcv, tag_mcv);
const skip_reloc = try self.asmJccReloc(.ne, undefined);
@@ -2282,7 +2288,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void {
},
else => return self.fail(
"TODO implement {s} for {}",
- .{ @tagName(lazy_sym.kind), lazy_sym.ty.fmt(mod) },
+ .{ @tagName(lazy_sym.kind), lazy_sym.ty.fmt(pt) },
),
}
}
@@ -2481,14 +2487,15 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex {
/// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ptr_ty = self.typeOfIndex(inst);
const val_ty = ptr_ty.childType(mod);
return self.allocFrameIndex(FrameAlloc.init(.{
- .size = math.cast(u32, val_ty.abiSize(mod)) orelse {
- return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(mod)});
+ .size = math.cast(u32, val_ty.abiSize(pt)) orelse {
+ return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(pt)});
},
- .alignment = ptr_ty.ptrAlignment(mod).max(.@"1"),
+ .alignment = ptr_ty.ptrAlignment(pt).max(.@"1"),
}));
}
@@ -2501,9 +2508,10 @@ fn allocTempRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool) !MCValue {
}
fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: bool) !MCValue {
- const mod = self.bin_file.comp.module.?;
- const abi_size = math.cast(u32, ty.abiSize(mod)) orelse {
- return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)});
+ const pt = self.pt;
+ const mod = pt.zcu;
+ const abi_size = math.cast(u32, ty.abiSize(pt)) orelse {
+ return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(pt)});
};
if (reg_ok) need_mem: {
@@ -2529,12 +2537,13 @@ fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: b
}
}
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(ty, mod));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(ty, pt));
return .{ .load_frame = .{ .index = frame_index } };
}
fn regClassForType(self: *Self, ty: Type) RegisterManager.RegisterBitSet {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
return switch (ty.zigTypeTag(mod)) {
.Float => switch (ty.floatBits(self.target.*)) {
80 => abi.RegisterClass.x87,
@@ -2849,7 +2858,8 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void {
}
fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const dst_ty = self.typeOfIndex(inst);
const dst_scalar_ty = dst_ty.scalarType(mod);
@@ -2892,14 +2902,14 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
} }, &.{src_scalar_ty}, &.{.{ .air_ref = ty_op.operand }});
}
- const src_abi_size: u32 = @intCast(src_ty.abiSize(mod));
+ const src_abi_size: u32 = @intCast(src_ty.abiSize(pt));
const src_mcv = try self.resolveInst(ty_op.operand);
const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
src_mcv
else
try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
const dst_reg = dst_mcv.getReg().?;
- const dst_alias = registerAlias(dst_reg, @intCast(@max(dst_ty.abiSize(mod), 16)));
+ const dst_alias = registerAlias(dst_reg, @intCast(@max(dst_ty.abiSize(pt), 16)));
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
@@ -2978,19 +2988,20 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
}
break :result dst_mcv;
} orelse return self.fail("TODO implement airFpext from {} to {}", .{
- src_ty.fmt(mod), dst_ty.fmt(mod),
+ src_ty.fmt(pt), dst_ty.fmt(pt),
});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const src_ty = self.typeOf(ty_op.operand);
const dst_ty = self.typeOfIndex(inst);
const result = @as(?MCValue, result: {
- const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod));
+ const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt));
const src_int_info = src_ty.intInfo(mod);
const dst_int_info = dst_ty.intInfo(mod);
@@ -3001,13 +3012,13 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
const src_mcv = try self.resolveInst(ty_op.operand);
if (dst_ty.isVector(mod)) {
- const src_abi_size: u32 = @intCast(src_ty.abiSize(mod));
+ const src_abi_size: u32 = @intCast(src_ty.abiSize(pt));
const max_abi_size = @max(dst_abi_size, src_abi_size);
if (max_abi_size > @as(u32, if (self.hasFeature(.avx2)) 32 else 16)) break :result null;
const has_avx = self.hasFeature(.avx);
- const dst_elem_abi_size = dst_ty.childType(mod).abiSize(mod);
- const src_elem_abi_size = src_ty.childType(mod).abiSize(mod);
+ const dst_elem_abi_size = dst_ty.childType(mod).abiSize(pt);
+ const src_elem_abi_size = src_ty.childType(mod).abiSize(pt);
switch (math.order(dst_elem_abi_size, src_elem_abi_size)) {
.lt => {
const mir_tag: Mir.Inst.FixedTag = switch (dst_elem_abi_size) {
@@ -3236,19 +3247,20 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
break :result dst_mcv;
}) orelse return self.fail("TODO implement airIntCast from {} to {}", .{
- src_ty.fmt(mod), dst_ty.fmt(mod),
+ src_ty.fmt(pt), dst_ty.fmt(pt),
});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const dst_ty = self.typeOfIndex(inst);
- const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod));
+ const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt));
const src_ty = self.typeOf(ty_op.operand);
- const src_abi_size: u32 = @intCast(src_ty.abiSize(mod));
+ const src_abi_size: u32 = @intCast(src_ty.abiSize(pt));
const result = result: {
const src_mcv = try self.resolveInst(ty_op.operand);
@@ -3278,9 +3290,9 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
if (dst_ty.zigTypeTag(mod) == .Vector) {
assert(src_ty.zigTypeTag(mod) == .Vector and dst_ty.vectorLen(mod) == src_ty.vectorLen(mod));
const dst_elem_ty = dst_ty.childType(mod);
- const dst_elem_abi_size: u32 = @intCast(dst_elem_ty.abiSize(mod));
+ const dst_elem_abi_size: u32 = @intCast(dst_elem_ty.abiSize(pt));
const src_elem_ty = src_ty.childType(mod);
- const src_elem_abi_size: u32 = @intCast(src_elem_ty.abiSize(mod));
+ const src_elem_abi_size: u32 = @intCast(src_elem_ty.abiSize(pt));
const mir_tag = @as(?Mir.Inst.FixedTag, switch (dst_elem_abi_size) {
1 => switch (src_elem_abi_size) {
@@ -3305,20 +3317,20 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
else => null,
},
else => null,
- }) orelse return self.fail("TODO implement airTrunc for {}", .{dst_ty.fmt(mod)});
+ }) orelse return self.fail("TODO implement airTrunc for {}", .{dst_ty.fmt(pt)});
const dst_info = dst_elem_ty.intInfo(mod);
const src_info = src_elem_ty.intInfo(mod);
- const mask_val = try mod.intValue(src_elem_ty, @as(u64, math.maxInt(u64)) >> @intCast(64 - dst_info.bits));
+ const mask_val = try pt.intValue(src_elem_ty, @as(u64, math.maxInt(u64)) >> @intCast(64 - dst_info.bits));
- const splat_ty = try mod.vectorType(.{
+ const splat_ty = try pt.vectorType(.{
.len = @intCast(@divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)),
.child = src_elem_ty.ip_index,
});
- const splat_abi_size: u32 = @intCast(splat_ty.abiSize(mod));
+ const splat_abi_size: u32 = @intCast(splat_ty.abiSize(pt));
- const splat_val = try mod.intern(.{ .aggregate = .{
+ const splat_val = try pt.intern(.{ .aggregate = .{
.ty = splat_ty.ip_index,
.storage = .{ .repeated_elem = mask_val.ip_index },
} });
@@ -3375,7 +3387,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
}
} else if (dst_abi_size <= 16) {
const dst_info = dst_ty.intInfo(mod);
- const high_ty = try mod.intType(dst_info.signedness, dst_info.bits - 64);
+ const high_ty = try pt.intType(dst_info.signedness, dst_info.bits - 64);
if (self.regExtraBits(high_ty) > 0) {
try self.truncateRegister(high_ty, dst_mcv.register_pair[1].to64());
}
@@ -3400,12 +3412,12 @@ fn airIntFromBool(self: *Self, inst: Air.Inst.Index) !void {
}
fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const slice_ty = self.typeOfIndex(inst);
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(slice_ty, mod));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(slice_ty, pt));
const ptr_ty = self.typeOf(bin_op.lhs);
try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, .{ .air_ref = bin_op.lhs }, .{});
@@ -3413,7 +3425,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
const len_ty = self.typeOf(bin_op.rhs);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(ptr_ty.abiSize(mod)),
+ @intCast(ptr_ty.abiSize(pt)),
len_ty,
.{ .air_ref = bin_op.rhs },
.{},
@@ -3430,14 +3442,15 @@ fn airUnOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
}
fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const dst_mcv = try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs);
const dst_ty = self.typeOfIndex(inst);
if (dst_ty.isAbiInt(mod)) {
- const abi_size: u32 = @intCast(dst_ty.abiSize(mod));
- const bit_size: u32 = @intCast(dst_ty.bitSize(mod));
+ const abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+ const bit_size: u32 = @intCast(dst_ty.bitSize(pt));
if (abi_size * 8 > bit_size) {
const dst_lock = switch (dst_mcv) {
.register => |dst_reg| self.register_manager.lockRegAssumeUnused(dst_reg),
@@ -3452,7 +3465,7 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
defer self.register_manager.unlockReg(tmp_lock);
- const hi_ty = try mod.intType(.unsigned, @intCast((dst_ty.bitSize(mod) - 1) % 64 + 1));
+ const hi_ty = try pt.intType(.unsigned, @intCast((dst_ty.bitSize(pt) - 1) % 64 + 1));
const hi_mcv = dst_mcv.address().offset(@intCast(bit_size / 64 * 8)).deref();
try self.genSetReg(tmp_reg, hi_ty, hi_mcv, .{});
try self.truncateRegister(dst_ty, tmp_reg);
@@ -3471,7 +3484,8 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void
}
fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const air_tag = self.air.instructions.items(.tag);
const air_data = self.air.instructions.items(.data);
@@ -3497,7 +3511,7 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 {
}
} else if (dst_air.toInterned()) |ip_index| {
var space: Value.BigIntSpace = undefined;
- const src_int = Value.fromInterned(ip_index).toBigInt(&space, mod);
+ const src_int = Value.fromInterned(ip_index).toBigInt(&space, pt);
return @as(u16, @intCast(src_int.bitCountTwosComp())) +
@intFromBool(src_int.positive and dst_info.signedness == .signed);
}
@@ -3505,7 +3519,8 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 {
}
fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result = result: {
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
@@ -3514,10 +3529,10 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
.Float, .Vector => break :result try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs),
else => {},
}
- const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod));
+ const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt));
const dst_info = dst_ty.intInfo(mod);
- const src_ty = try mod.intType(dst_info.signedness, switch (tag) {
+ const src_ty = try pt.intType(dst_info.signedness, switch (tag) {
else => unreachable,
.mul, .mul_wrap => @max(
self.activeIntBits(bin_op.lhs),
@@ -3526,7 +3541,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
),
.div_trunc, .div_floor, .div_exact, .rem, .mod => dst_info.bits,
});
- const src_abi_size: u32 = @intCast(src_ty.abiSize(mod));
+ const src_abi_size: u32 = @intCast(src_ty.abiSize(pt));
if (dst_abi_size == 16 and src_abi_size == 16) switch (tag) {
else => unreachable,
@@ -3539,7 +3554,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
state: State,
reloc: Mir.Inst.Index,
} = if (signed and tag == .div_floor) state: {
- const frame_index = try self.allocFrameIndex(FrameAlloc.initType(Type.usize, mod));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initType(Type.usize, pt));
try self.asmMemoryImmediate(
.{ ._, .mov },
.{ .base = .{ .frame = frame_index }, .mod = .{ .rm = .{ .size = .qword } } },
@@ -3614,7 +3629,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
.rem, .mod => "mod",
else => unreachable,
},
- intCompilerRtAbiName(@intCast(dst_ty.bitSize(mod))),
+ intCompilerRtAbiName(@intCast(dst_ty.bitSize(pt))),
}) catch unreachable,
} },
&.{ src_ty, src_ty },
@@ -3643,7 +3658,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
.return_type = dst_ty.toIntern(),
.param_types = &.{ src_ty.toIntern(), src_ty.toIntern() },
.callee = std.fmt.bufPrint(&callee_buf, "__div{c}i3", .{
- intCompilerRtAbiName(@intCast(dst_ty.bitSize(mod))),
+ intCompilerRtAbiName(@intCast(dst_ty.bitSize(pt))),
}) catch unreachable,
} },
&.{ src_ty, src_ty },
@@ -3734,12 +3749,13 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
}
fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ty = self.typeOf(bin_op.lhs);
- if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(mod) > 8) return self.fail(
+ if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(pt) > 8) return self.fail(
"TODO implement airAddSat for {}",
- .{ty.fmt(mod)},
+ .{ty.fmt(pt)},
);
const lhs_mcv = try self.resolveInst(bin_op.lhs);
@@ -3804,7 +3820,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
break :cc .o;
} else cc: {
try self.genSetReg(limit_reg, ty, .{
- .immediate = @as(u64, math.maxInt(u64)) >> @intCast(64 - ty.bitSize(mod)),
+ .immediate = @as(u64, math.maxInt(u64)) >> @intCast(64 - ty.bitSize(pt)),
}, .{});
try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, rhs_mcv);
@@ -3815,7 +3831,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
break :cc .c;
};
- const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2);
+ const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(pt))), 2);
try self.asmCmovccRegisterRegister(
cc,
registerAlias(dst_reg, cmov_abi_size),
@@ -3834,12 +3850,13 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
}
fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ty = self.typeOf(bin_op.lhs);
- if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(mod) > 8) return self.fail(
+ if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(pt) > 8) return self.fail(
"TODO implement airSubSat for {}",
- .{ty.fmt(mod)},
+ .{ty.fmt(pt)},
);
const lhs_mcv = try self.resolveInst(bin_op.lhs);
@@ -3908,7 +3925,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
break :cc .c;
};
- const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2);
+ const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(pt))), 2);
try self.asmCmovccRegisterRegister(
cc,
registerAlias(dst_reg, cmov_abi_size),
@@ -3927,13 +3944,14 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
}
fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ty = self.typeOf(bin_op.lhs);
const result = result: {
if (ty.toIntern() == .i128_type) {
- const ptr_c_int = try mod.singleMutPtrType(Type.c_int);
+ const ptr_c_int = try pt.singleMutPtrType(Type.c_int);
const overflow = try self.allocTempRegOrMem(Type.c_int, false);
const dst_mcv = try self.genCall(.{ .lib = .{
@@ -4010,9 +4028,9 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
break :result dst_mcv;
}
- if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(mod) > 8) return self.fail(
+ if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(pt) > 8) return self.fail(
"TODO implement airMulSat for {}",
- .{ty.fmt(mod)},
+ .{ty.fmt(pt)},
);
try self.spillRegisters(&.{ .rax, .rcx, .rdx });
@@ -4061,7 +4079,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
};
const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, ty, lhs_mcv, rhs_mcv);
- const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2);
+ const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(pt))), 2);
try self.asmCmovccRegisterRegister(
cc,
registerAlias(dst_mcv.register, cmov_abi_size),
@@ -4073,7 +4091,8 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
}
fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const result: MCValue = result: {
@@ -4109,17 +4128,17 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
}
const frame_index =
- try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, mod));
+ try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt));
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(tuple_ty.structFieldOffset(1, mod)),
+ @intCast(tuple_ty.structFieldOffset(1, pt)),
Type.u1,
.{ .eflags = cc },
.{},
);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(tuple_ty.structFieldOffset(0, mod)),
+ @intCast(tuple_ty.structFieldOffset(0, pt)),
ty,
partial_mcv,
.{},
@@ -4128,7 +4147,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
}
const frame_index =
- try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, mod));
+ try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt));
try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc);
break :result .{ .load_frame = .{ .index = frame_index } };
},
@@ -4139,7 +4158,8 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
}
fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const result: MCValue = result: {
@@ -4186,17 +4206,17 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
}
const frame_index =
- try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, mod));
+ try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt));
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(tuple_ty.structFieldOffset(1, mod)),
+ @intCast(tuple_ty.structFieldOffset(1, pt)),
tuple_ty.structFieldType(1, mod),
.{ .eflags = cc },
.{},
);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(tuple_ty.structFieldOffset(0, mod)),
+ @intCast(tuple_ty.structFieldOffset(0, pt)),
tuple_ty.structFieldType(0, mod),
partial_mcv,
.{},
@@ -4205,7 +4225,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
}
const frame_index =
- try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, mod));
+ try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt));
try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc);
break :result .{ .load_frame = .{ .index = frame_index } };
},
@@ -4222,7 +4242,8 @@ fn genSetFrameTruncatedOverflowCompare(
src_mcv: MCValue,
overflow_cc: ?Condition,
) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const src_lock = switch (src_mcv) {
.register => |reg| self.register_manager.lockReg(reg),
else => null,
@@ -4233,12 +4254,12 @@ fn genSetFrameTruncatedOverflowCompare(
const int_info = ty.intInfo(mod);
const hi_bits = (int_info.bits - 1) % 64 + 1;
- const hi_ty = try mod.intType(int_info.signedness, hi_bits);
+ const hi_ty = try pt.intType(int_info.signedness, hi_bits);
const limb_bits: u16 = @intCast(if (int_info.bits <= 64) self.regBitSize(ty) else 64);
- const limb_ty = try mod.intType(int_info.signedness, limb_bits);
+ const limb_ty = try pt.intType(int_info.signedness, limb_bits);
- const rest_ty = try mod.intType(.unsigned, int_info.bits - hi_bits);
+ const rest_ty = try pt.intType(.unsigned, int_info.bits - hi_bits);
const temp_regs =
try self.register_manager.allocRegs(3, .{null} ** 3, abi.RegisterClass.gp);
@@ -4269,7 +4290,7 @@ fn genSetFrameTruncatedOverflowCompare(
);
}
- const payload_off: i32 = @intCast(tuple_ty.structFieldOffset(0, mod));
+ const payload_off: i32 = @intCast(tuple_ty.structFieldOffset(0, pt));
if (hi_limb_off > 0) try self.genSetMem(
.{ .frame = frame_index },
payload_off,
@@ -4286,7 +4307,7 @@ fn genSetFrameTruncatedOverflowCompare(
);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(tuple_ty.structFieldOffset(1, mod)),
+ @intCast(tuple_ty.structFieldOffset(1, pt)),
tuple_ty.structFieldType(1, mod),
if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne },
.{},
@@ -4294,18 +4315,19 @@ fn genSetFrameTruncatedOverflowCompare(
}
fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const tuple_ty = self.typeOfIndex(inst);
const dst_ty = self.typeOf(bin_op.lhs);
const result: MCValue = switch (dst_ty.zigTypeTag(mod)) {
- .Vector => return self.fail("TODO implement airMulWithOverflow for {}", .{dst_ty.fmt(mod)}),
+ .Vector => return self.fail("TODO implement airMulWithOverflow for {}", .{dst_ty.fmt(pt)}),
.Int => result: {
const dst_info = dst_ty.intInfo(mod);
if (dst_info.bits > 128 and dst_info.signedness == .unsigned) {
const slow_inc = self.hasFeature(.slow_incdec);
- const abi_size: u32 = @intCast(dst_ty.abiSize(mod));
+ const abi_size: u32 = @intCast(dst_ty.abiSize(pt));
const limb_len = math.divCeil(u32, abi_size, 8) catch unreachable;
try self.spillRegisters(&.{ .rax, .rcx, .rdx });
@@ -4316,7 +4338,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
try self.genInlineMemset(
dst_mcv.address(),
.{ .immediate = 0 },
- .{ .immediate = tuple_ty.abiSize(mod) },
+ .{ .immediate = tuple_ty.abiSize(pt) },
.{},
);
const lhs_mcv = try self.resolveInst(bin_op.lhs);
@@ -4356,7 +4378,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
.index = temp_regs[3].to64(),
.scale = .@"8",
.disp = dst_mcv.load_frame.off +
- @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))),
+ @as(i32, @intCast(tuple_ty.structFieldOffset(0, pt))),
} },
}, .rdx);
try self.asmSetccRegister(.c, .cl);
@@ -4380,7 +4402,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
.index = temp_regs[3].to64(),
.scale = .@"8",
.disp = dst_mcv.load_frame.off +
- @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))),
+ @as(i32, @intCast(tuple_ty.structFieldOffset(0, pt))),
} },
}, .rax);
try self.asmSetccRegister(.c, .ch);
@@ -4429,7 +4451,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
.mod = .{ .rm = .{
.size = .byte,
.disp = dst_mcv.load_frame.off +
- @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))),
+ @as(i32, @intCast(tuple_ty.structFieldOffset(1, pt))),
} },
}, Immediate.u(1));
self.performReloc(no_overflow);
@@ -4453,11 +4475,11 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const lhs_active_bits = self.activeIntBits(bin_op.lhs);
const rhs_active_bits = self.activeIntBits(bin_op.rhs);
const src_bits = @max(lhs_active_bits, rhs_active_bits, dst_info.bits / 2);
- const src_ty = try mod.intType(dst_info.signedness, src_bits);
+ const src_ty = try pt.intType(dst_info.signedness, src_bits);
if (src_bits > 64 and src_bits <= 128 and
dst_info.bits > 64 and dst_info.bits <= 128) switch (dst_info.signedness) {
.signed => {
- const ptr_c_int = try mod.singleMutPtrType(Type.c_int);
+ const ptr_c_int = try pt.singleMutPtrType(Type.c_int);
const overflow = try self.allocTempRegOrMem(Type.c_int, false);
const result = try self.genCall(.{ .lib = .{
.return_type = .i128_type,
@@ -4472,7 +4494,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const dst_mcv = try self.allocRegOrMem(inst, false);
try self.genSetMem(
.{ .frame = dst_mcv.load_frame.index },
- @intCast(tuple_ty.structFieldOffset(0, mod)),
+ @intCast(tuple_ty.structFieldOffset(0, pt)),
tuple_ty.structFieldType(0, mod),
result,
.{},
@@ -4484,7 +4506,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
);
try self.genSetMem(
.{ .frame = dst_mcv.load_frame.index },
- @intCast(tuple_ty.structFieldOffset(1, mod)),
+ @intCast(tuple_ty.structFieldOffset(1, pt)),
tuple_ty.structFieldType(1, mod),
.{ .eflags = .ne },
.{},
@@ -4596,14 +4618,14 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const dst_mcv = try self.allocRegOrMem(inst, false);
try self.genSetMem(
.{ .frame = dst_mcv.load_frame.index },
- @intCast(tuple_ty.structFieldOffset(0, mod)),
+ @intCast(tuple_ty.structFieldOffset(0, pt)),
tuple_ty.structFieldType(0, mod),
.{ .register_pair = .{ .rax, .rdx } },
.{},
);
try self.genSetMem(
.{ .frame = dst_mcv.load_frame.index },
- @intCast(tuple_ty.structFieldOffset(1, mod)),
+ @intCast(tuple_ty.structFieldOffset(1, pt)),
tuple_ty.structFieldType(1, mod),
.{ .register = tmp_regs[1] },
.{},
@@ -4636,7 +4658,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
self.eflags_inst = inst;
break :result .{ .register_overflow = .{ .reg = reg, .eflags = cc } };
} else {
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, mod));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt));
try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc);
break :result .{ .load_frame = .{ .index = frame_index } };
},
@@ -4644,21 +4666,21 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
// For now, this is the only supported multiply that doesn't fit in a register.
if (dst_info.bits > 128 or src_bits != 64)
return self.fail("TODO implement airWithOverflow from {} to {}", .{
- src_ty.fmt(mod), dst_ty.fmt(mod),
+ src_ty.fmt(pt), dst_ty.fmt(pt),
});
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, mod));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt));
if (dst_info.bits >= lhs_active_bits + rhs_active_bits) {
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(tuple_ty.structFieldOffset(0, mod)),
+ @intCast(tuple_ty.structFieldOffset(0, pt)),
tuple_ty.structFieldType(0, mod),
partial_mcv,
.{},
);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(tuple_ty.structFieldOffset(1, mod)),
+ @intCast(tuple_ty.structFieldOffset(1, pt)),
tuple_ty.structFieldType(1, mod),
.{ .immediate = 0 }, // cc being set is impossible
.{},
@@ -4682,8 +4704,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
/// Clobbers .rax and .rdx registers.
/// Quotient is saved in .rax and remainder in .rdx.
fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue, rhs: MCValue) !void {
- const mod = self.bin_file.comp.module.?;
- const abi_size: u32 = @intCast(ty.abiSize(mod));
+ const pt = self.pt;
+ const abi_size: u32 = @intCast(ty.abiSize(pt));
const bit_size: u32 = @intCast(self.regBitSize(ty));
if (abi_size > 8) {
return self.fail("TODO implement genIntMulDivOpMir for ABI size larger than 8", .{});
@@ -4732,8 +4754,9 @@ fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue
/// Always returns a register.
/// Clobbers .rax and .rdx registers.
fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCValue {
- const mod = self.bin_file.comp.module.?;
- const abi_size: u32 = @intCast(ty.abiSize(mod));
+ const pt = self.pt;
+ const mod = pt.zcu;
+ const abi_size: u32 = @intCast(ty.abiSize(pt));
const int_info = ty.intInfo(mod);
const dividend = switch (lhs) {
.register => |reg| reg,
@@ -4784,7 +4807,8 @@ fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCVa
}
fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const air_tags = self.air.instructions.items(.tag);
@@ -4811,7 +4835,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
defer self.register_manager.unlockReg(tmp_lock);
- const lhs_bits: u31 = @intCast(lhs_ty.bitSize(mod));
+ const lhs_bits: u31 = @intCast(lhs_ty.bitSize(pt));
const tmp_ty = if (lhs_bits > 64) Type.usize else lhs_ty;
const off = frame_addr.off + (lhs_bits - 1) / 64 * 8;
try self.genSetReg(
@@ -4922,11 +4946,11 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
.shl, .shl_exact => if (self.hasFeature(.avx2)) .{ .vp_q, .sll } else null,
},
},
- })) |mir_tag| if (try self.air.value(bin_op.rhs, mod)) |rhs_val| {
+ })) |mir_tag| if (try self.air.value(bin_op.rhs, pt)) |rhs_val| {
switch (mod.intern_pool.indexToKey(rhs_val.toIntern())) {
.aggregate => |rhs_aggregate| switch (rhs_aggregate.storage) {
.repeated_elem => |rhs_elem| {
- const abi_size: u32 = @intCast(lhs_ty.abiSize(mod));
+ const abi_size: u32 = @intCast(lhs_ty.abiSize(pt));
const lhs_mcv = try self.resolveInst(bin_op.lhs);
const dst_reg, const lhs_reg = if (lhs_mcv.isRegister() and
@@ -4946,7 +4970,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
self.register_manager.unlockReg(lock);
const shift_imm =
- Immediate.u(@intCast(Value.fromInterned(rhs_elem).toUnsignedInt(mod)));
+ Immediate.u(@intCast(Value.fromInterned(rhs_elem).toUnsignedInt(pt)));
if (self.hasFeature(.avx)) try self.asmRegisterRegisterImmediate(
mir_tag,
registerAlias(dst_reg, abi_size),
@@ -4968,7 +4992,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
}
} else if (bin_op.rhs.toIndex()) |rhs_inst| switch (air_tags[@intFromEnum(rhs_inst)]) {
.splat => {
- const abi_size: u32 = @intCast(lhs_ty.abiSize(mod));
+ const abi_size: u32 = @intCast(lhs_ty.abiSize(pt));
const lhs_mcv = try self.resolveInst(bin_op.lhs);
const dst_reg, const lhs_reg = if (lhs_mcv.isRegister() and
@@ -4991,13 +5015,13 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
const shift_lock = self.register_manager.lockRegAssumeUnused(shift_reg);
defer self.register_manager.unlockReg(shift_lock);
- const mask_ty = try mod.vectorType(.{ .len = 16, .child = .u8_type });
- const mask_mcv = try self.genTypedValue(Value.fromInterned(try mod.intern(.{ .aggregate = .{
+ const mask_ty = try pt.vectorType(.{ .len = 16, .child = .u8_type });
+ const mask_mcv = try self.genTypedValue(Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = mask_ty.toIntern(),
.storage = .{ .elems = &([1]InternPool.Index{
- (try rhs_ty.childType(mod).maxIntScalar(mod, Type.u8)).toIntern(),
+ (try rhs_ty.childType(mod).maxIntScalar(pt, Type.u8)).toIntern(),
} ++ [1]InternPool.Index{
- (try mod.intValue(Type.u8, 0)).toIntern(),
+ (try pt.intValue(Type.u8, 0)).toIntern(),
} ** 15) },
} })));
const mask_addr_reg =
@@ -5045,7 +5069,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
},
else => {},
}
- return self.fail("TODO implement airShlShrBinOp for {}", .{lhs_ty.fmt(mod)});
+ return self.fail("TODO implement airShlShrBinOp for {}", .{lhs_ty.fmt(pt)});
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -5058,11 +5082,11 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void {
}
fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = result: {
const pl_ty = self.typeOfIndex(inst);
- if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none;
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
const opt_mcv = try self.resolveInst(ty_op.operand);
if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) {
@@ -5104,7 +5128,8 @@ fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result = result: {
const dst_ty = self.typeOfIndex(inst);
@@ -5130,7 +5155,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
const pl_ty = dst_ty.childType(mod);
- const pl_abi_size: i32 = @intCast(pl_ty.abiSize(mod));
+ const pl_abi_size: i32 = @intCast(pl_ty.abiSize(pt));
try self.genSetMem(
.{ .reg = dst_mcv.getReg().? },
pl_abi_size,
@@ -5144,7 +5169,8 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
}
fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const err_union_ty = self.typeOf(ty_op.operand);
const err_ty = err_union_ty.errorUnionSet(mod);
@@ -5156,11 +5182,11 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
break :result MCValue{ .immediate = 0 };
}
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
break :result operand;
}
- const err_off = errUnionErrorOffset(payload_ty, mod);
+ const err_off = errUnionErrorOffset(payload_ty, pt);
switch (operand) {
.register => |reg| {
// TODO reuse operand
@@ -5197,7 +5223,8 @@ fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
// *(E!T) -> E
fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const src_ty = self.typeOf(ty_op.operand);
@@ -5217,8 +5244,8 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const eu_ty = src_ty.childType(mod);
const pl_ty = eu_ty.errorUnionPayload(mod);
const err_ty = eu_ty.errorUnionSet(mod);
- const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, mod));
- const err_abi_size: u32 = @intCast(err_ty.abiSize(mod));
+ const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt));
+ const err_abi_size: u32 = @intCast(err_ty.abiSize(pt));
try self.asmRegisterMemory(
.{ ._, .mov },
registerAlias(dst_reg, err_abi_size),
@@ -5244,7 +5271,8 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = result: {
const src_ty = self.typeOf(ty_op.operand);
@@ -5259,8 +5287,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
const eu_ty = src_ty.childType(mod);
const pl_ty = eu_ty.errorUnionPayload(mod);
const err_ty = eu_ty.errorUnionSet(mod);
- const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, mod));
- const err_abi_size: u32 = @intCast(err_ty.abiSize(mod));
+ const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt));
+ const err_abi_size: u32 = @intCast(err_ty.abiSize(pt));
try self.asmMemoryImmediate(
.{ ._, .mov },
.{
@@ -5283,8 +5311,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, mod));
- const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod));
+ const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, pt));
+ const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt));
try self.asmRegisterMemory(
.{ ._, .lea },
registerAlias(dst_reg, dst_abi_size),
@@ -5304,13 +5332,14 @@ fn genUnwrapErrUnionPayloadMir(
err_union_ty: Type,
err_union: MCValue,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const payload_ty = err_union_ty.errorUnionPayload(mod);
const result: MCValue = result: {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
- const payload_off: u31 = @intCast(errUnionPayloadOffset(payload_ty, mod));
+ const payload_off: u31 = @intCast(errUnionPayloadOffset(payload_ty, pt));
switch (err_union) {
.load_frame => |frame_addr| break :result .{ .load_frame = .{
.index = frame_addr.index,
@@ -5353,12 +5382,13 @@ fn genUnwrapErrUnionPayloadPtrMir(
ptr_ty: Type,
ptr_mcv: MCValue,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const err_union_ty = ptr_ty.childType(mod);
const payload_ty = err_union_ty.errorUnionPayload(mod);
const result: MCValue = result: {
- const payload_off = errUnionPayloadOffset(payload_ty, mod);
+ const payload_off = errUnionPayloadOffset(payload_ty, pt);
const result_mcv: MCValue = if (maybe_inst) |inst|
try self.copyToRegisterWithInstTracking(inst, ptr_ty, ptr_mcv)
else
@@ -5387,11 +5417,12 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void {
}
fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = result: {
const pl_ty = self.typeOf(ty_op.operand);
- if (!pl_ty.hasRuntimeBits(mod)) break :result .{ .immediate = 1 };
+ if (!pl_ty.hasRuntimeBits(pt)) break :result .{ .immediate = 1 };
const opt_ty = self.typeOfIndex(inst);
const pl_mcv = try self.resolveInst(ty_op.operand);
@@ -5408,7 +5439,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
try self.genCopy(pl_ty, opt_mcv, pl_mcv, .{});
if (!same_repr) {
- const pl_abi_size: i32 = @intCast(pl_ty.abiSize(mod));
+ const pl_abi_size: i32 = @intCast(pl_ty.abiSize(pt));
switch (opt_mcv) {
else => unreachable,
@@ -5441,7 +5472,8 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
/// T to E!T
fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const eu_ty = ty_op.ty.toType();
@@ -5450,11 +5482,11 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
const operand = try self.resolveInst(ty_op.operand);
const result: MCValue = result: {
- if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .{ .immediate = 0 };
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .{ .immediate = 0 };
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, mod));
- const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, mod));
- const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, mod));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, pt));
+ const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, pt));
+ const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt));
try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand, .{});
try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 }, .{});
break :result .{ .load_frame = .{ .index = frame_index } };
@@ -5464,7 +5496,8 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
/// E to E!T
fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const eu_ty = ty_op.ty.toType();
@@ -5472,11 +5505,11 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const err_ty = eu_ty.errorUnionSet(mod);
const result: MCValue = result: {
- if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result try self.resolveInst(ty_op.operand);
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result try self.resolveInst(ty_op.operand);
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, mod));
- const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, mod));
- const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, mod));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, pt));
+ const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, pt));
+ const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt));
try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef, .{});
const operand = try self.resolveInst(ty_op.operand);
try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand, .{});
@@ -5523,7 +5556,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
}
fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const src_ty = self.typeOf(ty_op.operand);
@@ -5544,7 +5577,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod));
+ const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt));
try self.asmRegisterMemory(
.{ ._, .lea },
registerAlias(dst_reg, dst_abi_size),
@@ -5591,7 +5624,8 @@ fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Regi
}
fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const slice_ty = self.typeOf(lhs);
const slice_mcv = try self.resolveInst(lhs);
const slice_mcv_lock: ?RegisterLock = switch (slice_mcv) {
@@ -5601,7 +5635,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
defer if (slice_mcv_lock) |lock| self.register_manager.unlockReg(lock);
const elem_ty = slice_ty.childType(mod);
- const elem_size = elem_ty.abiSize(mod);
+ const elem_size = elem_ty.abiSize(pt);
const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod);
const index_ty = self.typeOf(rhs);
@@ -5627,12 +5661,13 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
}
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = result: {
const elem_ty = self.typeOfIndex(inst);
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none;
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
const slice_ty = self.typeOf(bin_op.lhs);
const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod);
@@ -5652,7 +5687,8 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = result: {
@@ -5675,7 +5711,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
defer if (index_lock) |lock| self.register_manager.unlockReg(lock);
try self.spillEflagsIfOccupied();
- if (array_ty.isVector(mod) and elem_ty.bitSize(mod) == 1) {
+ if (array_ty.isVector(mod) and elem_ty.bitSize(pt) == 1) {
const index_reg = switch (index_mcv) {
.register => |reg| reg,
else => try self.copyToTmpRegister(index_ty, index_mcv),
@@ -5688,7 +5724,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
index_reg.to64(),
),
.sse => {
- const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, mod));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, pt));
try self.genSetMem(.{ .frame = frame_index }, 0, array_ty, array_mcv, .{});
try self.asmMemoryRegister(
.{ ._, .bt },
@@ -5717,7 +5753,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
index_reg.to64(),
),
else => return self.fail("TODO airArrayElemVal for {s} of {}", .{
- @tagName(array_mcv), array_ty.fmt(mod),
+ @tagName(array_mcv), array_ty.fmt(pt),
}),
}
@@ -5726,14 +5762,14 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
break :result .{ .register = dst_reg };
}
- const elem_abi_size = elem_ty.abiSize(mod);
+ const elem_abi_size = elem_ty.abiSize(pt);
const addr_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
defer self.register_manager.unlockReg(addr_lock);
switch (array_mcv) {
.register => {
- const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, mod));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, pt));
try self.genSetMem(.{ .frame = frame_index }, 0, array_ty, array_mcv, .{});
try self.asmRegisterMemory(
.{ ._, .lea },
@@ -5757,7 +5793,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
=> try self.genSetReg(addr_reg, Type.usize, array_mcv.address(), .{}),
.lea_symbol, .lea_direct, .lea_tlv => unreachable,
else => return self.fail("TODO airArrayElemVal_val for {s} of {}", .{
- @tagName(array_mcv), array_ty.fmt(mod),
+ @tagName(array_mcv), array_ty.fmt(pt),
}),
}
@@ -5781,7 +5817,8 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
}
fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = self.typeOf(bin_op.lhs);
@@ -5790,9 +5827,9 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
const result = result: {
const elem_ty = ptr_ty.elemType2(mod);
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none;
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
- const elem_abi_size: u32 = @intCast(elem_ty.abiSize(mod));
+ const elem_abi_size: u32 = @intCast(elem_ty.abiSize(pt));
const index_ty = self.typeOf(bin_op.rhs);
const index_mcv = try self.resolveInst(bin_op.rhs);
const index_lock = switch (index_mcv) {
@@ -5831,7 +5868,8 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
}
fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -5854,7 +5892,7 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
}
const elem_ty = base_ptr_ty.elemType2(mod);
- const elem_abi_size = elem_ty.abiSize(mod);
+ const elem_abi_size = elem_ty.abiSize(pt);
const index_ty = self.typeOf(extra.rhs);
const index_mcv = try self.resolveInst(extra.rhs);
const index_lock: ?RegisterLock = switch (index_mcv) {
@@ -5876,12 +5914,13 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_union_ty = self.typeOf(bin_op.lhs);
const union_ty = ptr_union_ty.childType(mod);
const tag_ty = self.typeOf(bin_op.rhs);
- const layout = union_ty.unionGetLayout(mod);
+ const layout = union_ty.unionGetLayout(pt);
if (layout.tag_size == 0) {
return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none });
@@ -5913,19 +5952,19 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
break :blk MCValue{ .register = reg };
} else ptr;
- const ptr_tag_ty = try mod.adjustPtrTypeChild(ptr_union_ty, tag_ty);
+ const ptr_tag_ty = try pt.adjustPtrTypeChild(ptr_union_ty, tag_ty);
try self.store(ptr_tag_ty, adjusted_ptr, tag, .{});
return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const tag_ty = self.typeOfIndex(inst);
const union_ty = self.typeOf(ty_op.operand);
- const layout = union_ty.unionGetLayout(mod);
+ const layout = union_ty.unionGetLayout(pt);
if (layout.tag_size == 0) {
return self.finishAir(inst, .none, .{ ty_op.operand, .none, .none });
@@ -5939,7 +5978,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
};
defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
- const tag_abi_size = tag_ty.abiSize(mod);
+ const tag_abi_size = tag_ty.abiSize(pt);
const dst_mcv: MCValue = blk: {
switch (operand) {
.load_frame => |frame_addr| {
@@ -5983,7 +6022,8 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
}
fn airClz(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result = result: {
try self.spillEflagsIfOccupied();
@@ -5991,7 +6031,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
const dst_ty = self.typeOfIndex(inst);
const src_ty = self.typeOf(ty_op.operand);
if (src_ty.zigTypeTag(mod) == .Vector) return self.fail("TODO implement airClz for {}", .{
- src_ty.fmt(mod),
+ src_ty.fmt(pt),
});
const src_mcv = try self.resolveInst(ty_op.operand);
@@ -6010,8 +6050,8 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
defer self.register_manager.unlockReg(dst_lock);
- const abi_size: u31 = @intCast(src_ty.abiSize(mod));
- const src_bits: u31 = @intCast(src_ty.bitSize(mod));
+ const abi_size: u31 = @intCast(src_ty.abiSize(pt));
+ const src_bits: u31 = @intCast(src_ty.bitSize(pt));
const has_lzcnt = self.hasFeature(.lzcnt);
if (src_bits > @as(u32, if (has_lzcnt) 128 else 64)) {
const limbs_len = math.divCeil(u32, abi_size, 8) catch unreachable;
@@ -6121,7 +6161,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
}
assert(src_bits <= 64);
- const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(mod))), 2);
+ const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(pt))), 2);
if (math.isPowerOfTwo(src_bits)) {
const imm_reg = try self.copyToTmpRegister(dst_ty, .{
.immediate = src_bits ^ (src_bits - 1),
@@ -6179,7 +6219,8 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
}
fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result = result: {
try self.spillEflagsIfOccupied();
@@ -6187,7 +6228,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
const dst_ty = self.typeOfIndex(inst);
const src_ty = self.typeOf(ty_op.operand);
if (src_ty.zigTypeTag(mod) == .Vector) return self.fail("TODO implement airCtz for {}", .{
- src_ty.fmt(mod),
+ src_ty.fmt(pt),
});
const src_mcv = try self.resolveInst(ty_op.operand);
@@ -6206,8 +6247,8 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const abi_size: u31 = @intCast(src_ty.abiSize(mod));
- const src_bits: u31 = @intCast(src_ty.bitSize(mod));
+ const abi_size: u31 = @intCast(src_ty.abiSize(pt));
+ const src_bits: u31 = @intCast(src_ty.bitSize(pt));
const has_bmi = self.hasFeature(.bmi);
if (src_bits > @as(u32, if (has_bmi) 128 else 64)) {
const limbs_len = math.divCeil(u32, abi_size, 8) catch unreachable;
@@ -6328,7 +6369,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
try self.genBinOpMir(.{ ._, .bsf }, wide_ty, dst_mcv, .{ .register = wide_reg });
} else try self.genBinOpMir(.{ ._, .bsf }, src_ty, dst_mcv, mat_src_mcv);
- const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(mod))), 2);
+ const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(pt))), 2);
try self.asmCmovccRegisterRegister(
.z,
registerAlias(dst_reg, cmov_abi_size),
@@ -6340,15 +6381,16 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
}
fn airPopCount(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = result: {
try self.spillEflagsIfOccupied();
const src_ty = self.typeOf(ty_op.operand);
- const src_abi_size: u32 = @intCast(src_ty.abiSize(mod));
+ const src_abi_size: u32 = @intCast(src_ty.abiSize(pt));
if (src_ty.zigTypeTag(mod) == .Vector or src_abi_size > 16)
- return self.fail("TODO implement airPopCount for {}", .{src_ty.fmt(mod)});
+ return self.fail("TODO implement airPopCount for {}", .{src_ty.fmt(pt)});
const src_mcv = try self.resolveInst(ty_op.operand);
const mat_src_mcv = switch (src_mcv) {
@@ -6385,7 +6427,7 @@ fn airPopCount(self: *Self, inst: Air.Inst.Index) !void {
else
.{ .register = mat_src_mcv.register_pair[0] }, false);
const src_info = src_ty.intInfo(mod);
- const hi_ty = try mod.intType(src_info.signedness, (src_info.bits - 1) % 64 + 1);
+ const hi_ty = try pt.intType(src_info.signedness, (src_info.bits - 1) % 64 + 1);
try self.genPopCount(tmp_regs[1], hi_ty, if (mat_src_mcv.isMemory())
mat_src_mcv.address().offset(8).deref()
else
@@ -6403,16 +6445,16 @@ fn genPopCount(
src_mcv: MCValue,
dst_contains_src: bool,
) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
- const src_abi_size: u32 = @intCast(src_ty.abiSize(mod));
+ const src_abi_size: u32 = @intCast(src_ty.abiSize(pt));
if (self.hasFeature(.popcnt)) return self.genBinOpMir(
.{ ._, .popcnt },
if (src_abi_size > 1) src_ty else Type.u32,
.{ .register = dst_reg },
if (src_abi_size > 1) src_mcv else src: {
if (!dst_contains_src) try self.genSetReg(dst_reg, src_ty, src_mcv, .{});
- try self.truncateRegister(try src_ty.toUnsigned(mod), dst_reg);
+ try self.truncateRegister(try src_ty.toUnsigned(pt), dst_reg);
break :src .{ .register = dst_reg };
},
);
@@ -6495,13 +6537,14 @@ fn genByteSwap(
src_mcv: MCValue,
mem_ok: bool,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const has_movbe = self.hasFeature(.movbe);
if (src_ty.zigTypeTag(mod) == .Vector) return self.fail(
"TODO implement genByteSwap for {}",
- .{src_ty.fmt(mod)},
+ .{src_ty.fmt(pt)},
);
const src_lock = switch (src_mcv) {
@@ -6510,7 +6553,7 @@ fn genByteSwap(
};
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
- const abi_size: u32 = @intCast(src_ty.abiSize(mod));
+ const abi_size: u32 = @intCast(src_ty.abiSize(pt));
switch (abi_size) {
0 => unreachable,
1 => return if ((mem_ok or src_mcv.isRegister()) and
@@ -6658,11 +6701,12 @@ fn genByteSwap(
}
fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const src_ty = self.typeOf(ty_op.operand);
- const src_bits: u32 = @intCast(src_ty.bitSize(mod));
+ const src_bits: u32 = @intCast(src_ty.bitSize(pt));
const src_mcv = try self.resolveInst(ty_op.operand);
const dst_mcv = try self.genByteSwap(inst, src_ty, src_mcv, true);
@@ -6674,18 +6718,19 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
src_ty,
dst_mcv,
if (src_bits > 256) Type.u16 else Type.u8,
- .{ .immediate = src_ty.abiSize(mod) * 8 - src_bits },
+ .{ .immediate = src_ty.abiSize(pt) * 8 - src_bits },
);
return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const src_ty = self.typeOf(ty_op.operand);
- const abi_size: u32 = @intCast(src_ty.abiSize(mod));
- const bit_size: u32 = @intCast(src_ty.bitSize(mod));
+ const abi_size: u32 = @intCast(src_ty.abiSize(pt));
+ const bit_size: u32 = @intCast(src_ty.bitSize(pt));
const src_mcv = try self.resolveInst(ty_op.operand);
const dst_mcv = try self.genByteSwap(inst, src_ty, src_mcv, false);
@@ -6802,14 +6847,15 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
}
fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
const result = result: {
const scalar_bits = ty.scalarType(mod).floatBits(self.target.*);
if (scalar_bits == 80) {
if (ty.zigTypeTag(mod) != .Float) return self.fail("TODO implement floatSign for {}", .{
- ty.fmt(mod),
+ ty.fmt(pt),
});
const src_mcv = try self.resolveInst(operand);
@@ -6829,11 +6875,11 @@ fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type)
break :result dst_mcv;
}
- const abi_size: u32 = switch (ty.abiSize(mod)) {
+ const abi_size: u32 = switch (ty.abiSize(pt)) {
1...16 => 16,
17...32 => 32,
else => return self.fail("TODO implement floatSign for {}", .{
- ty.fmt(mod),
+ ty.fmt(pt),
}),
};
@@ -6852,14 +6898,14 @@ fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type)
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const vec_ty = try mod.vectorType(.{
+ const vec_ty = try pt.vectorType(.{
.len = @divExact(abi_size * 8, scalar_bits),
- .child = (try mod.intType(.signed, scalar_bits)).ip_index,
+ .child = (try pt.intType(.signed, scalar_bits)).ip_index,
});
const sign_mcv = try self.genTypedValue(switch (tag) {
- .neg => try vec_ty.minInt(mod, vec_ty),
- .abs => try vec_ty.maxInt(mod, vec_ty),
+ .neg => try vec_ty.minInt(pt, vec_ty),
+ .abs => try vec_ty.maxInt(pt, vec_ty),
else => unreachable,
});
const sign_mem: Memory = if (sign_mcv.isMemory())
@@ -6891,7 +6937,7 @@ fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type)
.abs => .{ .v_pd, .@"and" },
else => unreachable,
},
- 80 => return self.fail("TODO implement floatSign for {}", .{ty.fmt(mod)}),
+ 80 => return self.fail("TODO implement floatSign for {}", .{ty.fmt(pt)}),
else => unreachable,
},
registerAlias(dst_reg, abi_size),
@@ -6917,7 +6963,7 @@ fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type)
.abs => .{ ._pd, .@"and" },
else => unreachable,
},
- 80 => return self.fail("TODO implement floatSign for {}", .{ty.fmt(mod)}),
+ 80 => return self.fail("TODO implement floatSign for {}", .{ty.fmt(pt)}),
else => unreachable,
},
registerAlias(dst_reg, abi_size),
@@ -6978,7 +7024,8 @@ fn airRound(self: *Self, inst: Air.Inst.Index, mode: RoundMode) !void {
}
fn getRoundTag(self: *Self, ty: Type) ?Mir.Inst.FixedTag {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
return if (self.hasFeature(.sse4_1)) switch (ty.zigTypeTag(mod)) {
.Float => switch (ty.floatBits(self.target.*)) {
32 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round },
@@ -7010,11 +7057,12 @@ fn getRoundTag(self: *Self, ty: Type) ?Mir.Inst.FixedTag {
}
fn genRoundLibcall(self: *Self, ty: Type, src_mcv: MCValue, mode: RoundMode) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
if (self.getRoundTag(ty)) |_| return .none;
if (ty.zigTypeTag(mod) != .Float)
- return self.fail("TODO implement genRound for {}", .{ty.fmt(mod)});
+ return self.fail("TODO implement genRound for {}", .{ty.fmt(pt)});
var callee_buf: ["__trunc?".len]u8 = undefined;
return try self.genCall(.{ .lib = .{
@@ -7034,12 +7082,12 @@ fn genRoundLibcall(self: *Self, ty: Type, src_mcv: MCValue, mode: RoundMode) !MC
}
fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: RoundMode) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
const mir_tag = self.getRoundTag(ty) orelse {
const result = try self.genRoundLibcall(ty, src_mcv, mode);
return self.genSetReg(dst_reg, ty, result, .{});
};
- const abi_size: u32 = @intCast(ty.abiSize(mod));
+ const abi_size: u32 = @intCast(ty.abiSize(pt));
const dst_alias = registerAlias(dst_reg, abi_size);
switch (mir_tag[0]) {
.v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate(
@@ -7076,14 +7124,15 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: Ro
}
fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const ty = self.typeOf(ty_op.operand);
const result: MCValue = result: {
const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) {
else => null,
- .Int => switch (ty.abiSize(mod)) {
+ .Int => switch (ty.abiSize(pt)) {
0 => unreachable,
1...8 => {
try self.spillEflagsIfOccupied();
@@ -7092,7 +7141,7 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
try self.genUnOpMir(.{ ._, .neg }, ty, dst_mcv);
- const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2);
+ const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(pt))), 2);
switch (src_mcv) {
.register => |val_reg| try self.asmCmovccRegisterRegister(
.l,
@@ -7151,7 +7200,7 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
break :result dst_mcv;
},
else => {
- const abi_size: u31 = @intCast(ty.abiSize(mod));
+ const abi_size: u31 = @intCast(ty.abiSize(pt));
const limb_len = math.divCeil(u31, abi_size, 8) catch unreachable;
const tmp_regs =
@@ -7249,9 +7298,9 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
},
.Float => return self.floatSign(inst, ty_op.operand, ty),
},
- }) orelse return self.fail("TODO implement airAbs for {}", .{ty.fmt(mod)});
+ }) orelse return self.fail("TODO implement airAbs for {}", .{ty.fmt(pt)});
- const abi_size: u32 = @intCast(ty.abiSize(mod));
+ const abi_size: u32 = @intCast(ty.abiSize(pt));
const src_mcv = try self.resolveInst(ty_op.operand);
const dst_reg = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
src_mcv.getReg().?
@@ -7276,10 +7325,11 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
}
fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const ty = self.typeOf(un_op);
- const abi_size: u32 = @intCast(ty.abiSize(mod));
+ const abi_size: u32 = @intCast(ty.abiSize(pt));
const result: MCValue = result: {
switch (ty.zigTypeTag(mod)) {
@@ -7408,7 +7458,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
},
else => unreachable,
}) orelse return self.fail("TODO implement airSqrt for {}", .{
- ty.fmt(mod),
+ ty.fmt(pt),
});
switch (mir_tag[0]) {
.v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory(
@@ -7521,14 +7571,15 @@ fn reuseOperandAdvanced(
}
fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ptr_info = ptr_ty.ptrInfo(mod);
const val_ty = Type.fromInterned(ptr_info.child);
- if (!val_ty.hasRuntimeBitsIgnoreComptime(mod)) return;
- const val_abi_size: u32 = @intCast(val_ty.abiSize(mod));
+ if (!val_ty.hasRuntimeBitsIgnoreComptime(pt)) return;
+ const val_abi_size: u32 = @intCast(val_ty.abiSize(pt));
- const val_bit_size: u32 = @intCast(val_ty.bitSize(mod));
+ const val_bit_size: u32 = @intCast(val_ty.bitSize(pt));
const ptr_bit_off = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
.none => 0,
.runtime => unreachable,
@@ -7566,7 +7617,7 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn
return;
}
- if (val_abi_size > 8) return self.fail("TODO implement packed load of {}", .{val_ty.fmt(mod)});
+ if (val_abi_size > 8) return self.fail("TODO implement packed load of {}", .{val_ty.fmt(pt)});
const limb_abi_size: u31 = @min(val_abi_size, 8);
const limb_abi_bits = limb_abi_size * 8;
@@ -7633,9 +7684,10 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn
}
fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const dst_ty = ptr_ty.childType(mod);
- if (!dst_ty.hasRuntimeBitsIgnoreComptime(mod)) return;
+ if (!dst_ty.hasRuntimeBitsIgnoreComptime(pt)) return;
switch (ptr_mcv) {
.none,
.unreach,
@@ -7675,18 +7727,19 @@ fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerErro
}
fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const elem_ty = self.typeOfIndex(inst);
const result: MCValue = result: {
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none;
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
try self.spillRegisters(&.{ .rdi, .rsi, .rcx });
const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rdi, .rsi, .rcx });
defer for (reg_locks) |lock| self.register_manager.unlockReg(lock);
const ptr_ty = self.typeOf(ty_op.operand);
- const elem_size = elem_ty.abiSize(mod);
+ const elem_size = elem_ty.abiSize(pt);
const elem_rc = self.regClassForType(elem_ty);
const ptr_rc = self.regClassForType(ptr_ty);
@@ -7706,7 +7759,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
try self.load(dst_mcv, ptr_ty, ptr_mcv);
}
- if (elem_ty.isAbiInt(mod) and elem_size * 8 > elem_ty.bitSize(mod)) {
+ if (elem_ty.isAbiInt(mod) and elem_size * 8 > elem_ty.bitSize(pt)) {
const high_mcv: MCValue = switch (dst_mcv) {
.register => |dst_reg| .{ .register = dst_reg },
.register_pair => |dst_regs| .{ .register = dst_regs[1] },
@@ -7733,16 +7786,17 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
}
fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ptr_info = ptr_ty.ptrInfo(mod);
const src_ty = Type.fromInterned(ptr_info.child);
- if (!src_ty.hasRuntimeBitsIgnoreComptime(mod)) return;
+ if (!src_ty.hasRuntimeBitsIgnoreComptime(pt)) return;
const limb_abi_size: u16 = @min(ptr_info.packed_offset.host_size, 8);
const limb_abi_bits = limb_abi_size * 8;
- const limb_ty = try mod.intType(.unsigned, limb_abi_bits);
+ const limb_ty = try pt.intType(.unsigned, limb_abi_bits);
- const src_bit_size = src_ty.bitSize(mod);
+ const src_bit_size = src_ty.bitSize(pt);
const ptr_bit_off = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
.none => 0,
.runtime => unreachable,
@@ -7827,7 +7881,7 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In
limb_mem,
registerAlias(tmp_reg, limb_abi_size),
);
- } else return self.fail("TODO: implement packed store of {}", .{src_ty.fmt(mod)});
+ } else return self.fail("TODO: implement packed store of {}", .{src_ty.fmt(pt)});
}
}
@@ -7838,9 +7892,10 @@ fn store(
src_mcv: MCValue,
opts: CopyOptions,
) InnerError!void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const src_ty = ptr_ty.childType(mod);
- if (!src_ty.hasRuntimeBitsIgnoreComptime(mod)) return;
+ if (!src_ty.hasRuntimeBitsIgnoreComptime(pt)) return;
switch (ptr_mcv) {
.none,
.unreach,
@@ -7880,7 +7935,8 @@ fn store(
}
fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
result: {
@@ -7918,15 +7974,16 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
}
fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ptr_field_ty = self.typeOfIndex(inst);
const ptr_container_ty = self.typeOf(operand);
const container_ty = ptr_container_ty.childType(mod);
const field_off: i32 = switch (container_ty.containerLayout(mod)) {
- .auto, .@"extern" => @intCast(container_ty.structFieldOffset(index, mod)),
+ .auto, .@"extern" => @intCast(container_ty.structFieldOffset(index, pt)),
.@"packed" => @divExact(@as(i32, ptr_container_ty.ptrInfo(mod).packed_offset.bit_offset) +
- (if (mod.typeToStruct(container_ty)) |struct_obj| mod.structPackedFieldBitOffset(struct_obj, index) else 0) -
+ (if (mod.typeToStruct(container_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, index) else 0) -
ptr_field_ty.ptrInfo(mod).packed_offset.bit_offset, 8),
};
@@ -7940,7 +7997,8 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32
}
fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
const result: MCValue = result: {
@@ -7950,14 +8008,14 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const container_ty = self.typeOf(operand);
const container_rc = self.regClassForType(container_ty);
const field_ty = container_ty.structFieldType(index, mod);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
const field_rc = self.regClassForType(field_ty);
const field_is_gp = field_rc.supersetOf(abi.RegisterClass.gp);
const src_mcv = try self.resolveInst(operand);
const field_off: u32 = switch (container_ty.containerLayout(mod)) {
- .auto, .@"extern" => @intCast(container_ty.structFieldOffset(extra.field_index, mod) * 8),
- .@"packed" => if (mod.typeToStruct(container_ty)) |struct_obj| mod.structPackedFieldBitOffset(struct_obj, extra.field_index) else 0,
+ .auto, .@"extern" => @intCast(container_ty.structFieldOffset(extra.field_index, pt) * 8),
+ .@"packed" => if (mod.typeToStruct(container_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, extra.field_index) else 0,
};
switch (src_mcv) {
@@ -7988,7 +8046,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
);
}
if (abi.RegisterClass.gp.isSet(RegisterManager.indexOfRegIntoTracked(dst_reg).?) and
- container_ty.abiSize(mod) * 8 > field_ty.bitSize(mod))
+ container_ty.abiSize(pt) * 8 > field_ty.bitSize(pt))
try self.truncateRegister(field_ty, dst_reg);
break :result if (field_off == 0 or field_rc.supersetOf(abi.RegisterClass.gp))
@@ -8000,7 +8058,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const src_regs_lock = self.register_manager.lockRegsAssumeUnused(2, src_regs);
defer for (src_regs_lock) |lock| self.register_manager.unlockReg(lock);
- const field_bit_size: u32 = @intCast(field_ty.bitSize(mod));
+ const field_bit_size: u32 = @intCast(field_ty.bitSize(pt));
const src_reg = if (field_off + field_bit_size <= 64)
src_regs[0]
else if (field_off >= 64)
@@ -8044,7 +8102,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
}
if (field_bit_size < 128) try self.truncateRegister(
- try mod.intType(.unsigned, @intCast(field_bit_size - 64)),
+ try pt.intType(.unsigned, @intCast(field_bit_size - 64)),
dst_regs[1],
);
break :result if (field_rc.supersetOf(abi.RegisterClass.gp))
@@ -8099,14 +8157,14 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
}
},
.load_frame => |frame_addr| {
- const field_abi_size: u32 = @intCast(field_ty.abiSize(mod));
+ const field_abi_size: u32 = @intCast(field_ty.abiSize(pt));
if (field_off % 8 == 0) {
const field_byte_off = @divExact(field_off, 8);
const off_mcv = src_mcv.address().offset(@intCast(field_byte_off)).deref();
- const field_bit_size = field_ty.bitSize(mod);
+ const field_bit_size = field_ty.bitSize(pt);
if (field_abi_size <= 8) {
- const int_ty = try mod.intType(
+ const int_ty = try pt.intType(
if (field_ty.isAbiInt(mod)) field_ty.intInfo(mod).signedness else .unsigned,
@intCast(field_bit_size),
);
@@ -8127,7 +8185,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
try self.copyToRegisterWithInstTracking(inst, field_ty, dst_mcv);
}
- const container_abi_size: u32 = @intCast(container_ty.abiSize(mod));
+ const container_abi_size: u32 = @intCast(container_ty.abiSize(pt));
const dst_mcv = if (field_byte_off + field_abi_size <= container_abi_size and
self.reuseOperand(inst, operand, 0, src_mcv))
off_mcv
@@ -8228,16 +8286,17 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
}
fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const inst_ty = self.typeOfIndex(inst);
const parent_ty = inst_ty.childType(mod);
const field_off: i32 = switch (parent_ty.containerLayout(mod)) {
- .auto, .@"extern" => @intCast(parent_ty.structFieldOffset(extra.field_index, mod)),
+ .auto, .@"extern" => @intCast(parent_ty.structFieldOffset(extra.field_index, pt)),
.@"packed" => @divExact(@as(i32, inst_ty.ptrInfo(mod).packed_offset.bit_offset) +
- (if (mod.typeToStruct(parent_ty)) |struct_obj| mod.structPackedFieldBitOffset(struct_obj, extra.field_index) else 0) -
+ (if (mod.typeToStruct(parent_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, extra.field_index) else 0) -
self.typeOf(extra.field_ptr).ptrInfo(mod).packed_offset.bit_offset, 8),
};
@@ -8252,10 +8311,11 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
}
fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const src_ty = self.typeOf(src_air);
if (src_ty.zigTypeTag(mod) == .Vector)
- return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(mod)});
+ return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(pt)});
var src_mcv = try self.resolveInst(src_air);
switch (src_mcv) {
@@ -8290,7 +8350,7 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air:
};
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const abi_size: u16 = @intCast(src_ty.abiSize(mod));
+ const abi_size: u16 = @intCast(src_ty.abiSize(pt));
switch (tag) {
.not => {
const limb_abi_size: u16 = @min(abi_size, 8);
@@ -8304,7 +8364,7 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air:
.signed => abi_size * 8,
.unsigned => int_info.bits,
} - byte_off * 8, limb_abi_size * 8));
- const limb_ty = try mod.intType(int_info.signedness, limb_bits);
+ const limb_ty = try pt.intType(int_info.signedness, limb_bits);
const limb_mcv = switch (byte_off) {
0 => dst_mcv,
else => dst_mcv.address().offset(byte_off).deref(),
@@ -8340,9 +8400,9 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air:
}
fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: MCValue) !void {
- const mod = self.bin_file.comp.module.?;
- const abi_size: u32 = @intCast(dst_ty.abiSize(mod));
- if (abi_size > 8) return self.fail("TODO implement {} for {}", .{ mir_tag, dst_ty.fmt(mod) });
+ const pt = self.pt;
+ const abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+ if (abi_size > 8) return self.fail("TODO implement {} for {}", .{ mir_tag, dst_ty.fmt(pt) });
switch (dst_mcv) {
.none,
.unreach,
@@ -8389,9 +8449,9 @@ fn genShiftBinOpMir(
rhs_ty: Type,
rhs_mcv: MCValue,
) !void {
- const mod = self.bin_file.comp.module.?;
- const abi_size: u32 = @intCast(lhs_ty.abiSize(mod));
- const shift_abi_size: u32 = @intCast(rhs_ty.abiSize(mod));
+ const pt = self.pt;
+ const abi_size: u32 = @intCast(lhs_ty.abiSize(pt));
+ const shift_abi_size: u32 = @intCast(rhs_ty.abiSize(pt));
try self.spillEflagsIfOccupied();
if (abi_size > 16) {
@@ -9046,9 +9106,10 @@ fn genShiftBinOp(
lhs_ty: Type,
rhs_ty: Type,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
if (lhs_ty.zigTypeTag(mod) == .Vector) return self.fail("TODO implement genShiftBinOp for {}", .{
- lhs_ty.fmt(mod),
+ lhs_ty.fmt(pt),
});
try self.register_manager.getKnownReg(.rcx, null);
@@ -9104,13 +9165,14 @@ fn genMulDivBinOp(
lhs_mcv: MCValue,
rhs_mcv: MCValue,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
if (dst_ty.zigTypeTag(mod) == .Vector or dst_ty.zigTypeTag(mod) == .Float) return self.fail(
"TODO implement genMulDivBinOp for {s} from {} to {}",
- .{ @tagName(tag), src_ty.fmt(mod), dst_ty.fmt(mod) },
+ .{ @tagName(tag), src_ty.fmt(pt), dst_ty.fmt(pt) },
);
- const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod));
- const src_abi_size: u32 = @intCast(src_ty.abiSize(mod));
+ const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+ const src_abi_size: u32 = @intCast(src_ty.abiSize(pt));
assert(self.register_manager.isRegFree(.rax));
assert(self.register_manager.isRegFree(.rcx));
@@ -9299,13 +9361,13 @@ fn genMulDivBinOp(
.signed => {},
.unsigned => {
const dst_mcv = try self.allocRegOrMemAdvanced(dst_ty, maybe_inst, false);
- const manyptr_u32_ty = try mod.ptrType(.{
+ const manyptr_u32_ty = try pt.ptrType(.{
.child = .u32_type,
.flags = .{
.size = .Many,
},
});
- const manyptr_const_u32_ty = try mod.ptrType(.{
+ const manyptr_const_u32_ty = try pt.ptrType(.{
.child = .u32_type,
.flags = .{
.size = .Many,
@@ -9348,7 +9410,7 @@ fn genMulDivBinOp(
}
return self.fail(
"TODO implement genMulDivBinOp for {s} from {} to {}",
- .{ @tagName(tag), src_ty.fmt(mod), dst_ty.fmt(mod) },
+ .{ @tagName(tag), src_ty.fmt(pt), dst_ty.fmt(pt) },
);
}
const ty = if (dst_abi_size <= 8) dst_ty else src_ty;
@@ -9515,10 +9577,11 @@ fn genBinOp(
lhs_air: Air.Inst.Ref,
rhs_air: Air.Inst.Ref,
) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const lhs_ty = self.typeOf(lhs_air);
const rhs_ty = self.typeOf(rhs_air);
- const abi_size: u32 = @intCast(lhs_ty.abiSize(mod));
+ const abi_size: u32 = @intCast(lhs_ty.abiSize(pt));
if (lhs_ty.isRuntimeFloat()) libcall: {
const float_bits = lhs_ty.floatBits(self.target.*);
@@ -9556,7 +9619,7 @@ fn genBinOp(
floatLibcAbiSuffix(lhs_ty),
}),
else => return self.fail("TODO implement genBinOp for {s} {}", .{
- @tagName(air_tag), lhs_ty.fmt(mod),
+ @tagName(air_tag), lhs_ty.fmt(pt),
}),
} catch unreachable;
const result = try self.genCall(.{ .lib = .{
@@ -9668,7 +9731,7 @@ fn genBinOp(
break :adjusted .{ .register = dst_reg };
},
80, 128 => return self.fail("TODO implement genBinOp for {s} of {}", .{
- @tagName(air_tag), lhs_ty.fmt(mod),
+ @tagName(air_tag), lhs_ty.fmt(pt),
}),
else => unreachable,
};
@@ -9700,8 +9763,8 @@ fn genBinOp(
};
if (sse_op and ((lhs_ty.scalarType(mod).isRuntimeFloat() and
lhs_ty.scalarType(mod).floatBits(self.target.*) == 80) or
- lhs_ty.abiSize(mod) > @as(u6, if (self.hasFeature(.avx)) 32 else 16)))
- return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(air_tag), lhs_ty.fmt(mod) });
+ lhs_ty.abiSize(pt) > @as(u6, if (self.hasFeature(.avx)) 32 else 16)))
+ return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(air_tag), lhs_ty.fmt(pt) });
const maybe_mask_reg = switch (air_tag) {
else => null,
@@ -9857,7 +9920,7 @@ fn genBinOp(
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
defer self.register_manager.unlockReg(tmp_lock);
- const elem_size = lhs_ty.elemType2(mod).abiSize(mod);
+ const elem_size = lhs_ty.elemType2(mod).abiSize(pt);
try self.genIntMulComplexOpMir(rhs_ty, tmp_mcv, .{ .immediate = elem_size });
try self.genBinOpMir(
switch (air_tag) {
@@ -10003,7 +10066,7 @@ fn genBinOp(
},
};
- const cmov_abi_size = @max(@as(u32, @intCast(lhs_ty.abiSize(mod))), 2);
+ const cmov_abi_size = @max(@as(u32, @intCast(lhs_ty.abiSize(pt))), 2);
const tmp_reg = switch (dst_mcv) {
.register => |reg| reg,
else => try self.copyToTmpRegister(lhs_ty, dst_mcv),
@@ -10082,7 +10145,7 @@ fn genBinOp(
},
else => return self.fail("TODO implement genBinOp for {s} {}", .{
- @tagName(air_tag), lhs_ty.fmt(mod),
+ @tagName(air_tag), lhs_ty.fmt(pt),
}),
}
return dst_mcv;
@@ -10835,7 +10898,7 @@ fn genBinOp(
},
},
}) orelse return self.fail("TODO implement genBinOp for {s} {}", .{
- @tagName(air_tag), lhs_ty.fmt(mod),
+ @tagName(air_tag), lhs_ty.fmt(pt),
});
const lhs_copy_reg = if (maybe_mask_reg) |_| registerAlias(
@@ -10978,7 +11041,7 @@ fn genBinOp(
},
else => unreachable,
}) orelse return self.fail("TODO implement genBinOp for {s} {}", .{
- @tagName(air_tag), lhs_ty.fmt(mod),
+ @tagName(air_tag), lhs_ty.fmt(pt),
}),
mask_reg,
rhs_copy_reg,
@@ -11010,7 +11073,7 @@ fn genBinOp(
},
else => unreachable,
}) orelse return self.fail("TODO implement genBinOp for {s} {}", .{
- @tagName(air_tag), lhs_ty.fmt(mod),
+ @tagName(air_tag), lhs_ty.fmt(pt),
}),
dst_reg,
dst_reg,
@@ -11046,7 +11109,7 @@ fn genBinOp(
},
else => unreachable,
}) orelse return self.fail("TODO implement genBinOp for {s} {}", .{
- @tagName(air_tag), lhs_ty.fmt(mod),
+ @tagName(air_tag), lhs_ty.fmt(pt),
}),
mask_reg,
mask_reg,
@@ -11077,7 +11140,7 @@ fn genBinOp(
},
else => unreachable,
}) orelse return self.fail("TODO implement genBinOp for {s} {}", .{
- @tagName(air_tag), lhs_ty.fmt(mod),
+ @tagName(air_tag), lhs_ty.fmt(pt),
}),
dst_reg,
lhs_copy_reg.?,
@@ -11107,7 +11170,7 @@ fn genBinOp(
},
else => unreachable,
}) orelse return self.fail("TODO implement genBinOp for {s} {}", .{
- @tagName(air_tag), lhs_ty.fmt(mod),
+ @tagName(air_tag), lhs_ty.fmt(pt),
});
try self.asmRegisterRegister(.{ mir_fixes, .@"and" }, dst_reg, mask_reg);
try self.asmRegisterRegister(.{ mir_fixes, .andn }, mask_reg, lhs_copy_reg.?);
@@ -11125,8 +11188,8 @@ fn genBinOp(
.cmp_gte,
.cmp_neq,
=> {
- const unsigned_ty = try lhs_ty.toUnsigned(mod);
- const not_mcv = try self.genTypedValue(try unsigned_ty.maxInt(mod, unsigned_ty));
+ const unsigned_ty = try lhs_ty.toUnsigned(pt);
+ const not_mcv = try self.genTypedValue(try unsigned_ty.maxInt(pt, unsigned_ty));
const not_mem: Memory = if (not_mcv.isMemory())
try not_mcv.mem(self, Memory.Size.fromSize(abi_size))
else
@@ -11195,8 +11258,9 @@ fn genBinOpMir(
dst_mcv: MCValue,
src_mcv: MCValue,
) !void {
- const mod = self.bin_file.comp.module.?;
- const abi_size: u32 = @intCast(ty.abiSize(mod));
+ const pt = self.pt;
+ const mod = pt.zcu;
+ const abi_size: u32 = @intCast(ty.abiSize(pt));
try self.spillEflagsIfOccupied();
switch (dst_mcv) {
.none,
@@ -11358,7 +11422,7 @@ fn genBinOpMir(
.load_got,
.load_tlv,
=> {
- const ptr_ty = try mod.singleConstPtrType(ty);
+ const ptr_ty = try pt.singleConstPtrType(ty);
const addr_reg = try self.copyToTmpRegister(ptr_ty, src_mcv.address());
return self.genBinOpMir(mir_limb_tag, ty, dst_mcv, .{
.indirect = .{ .reg = addr_reg, .off = off },
@@ -11619,8 +11683,8 @@ fn genBinOpMir(
/// Performs multi-operand integer multiplication between dst_mcv and src_mcv, storing the result in dst_mcv.
/// Does not support byte-size operands.
fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError!void {
- const mod = self.bin_file.comp.module.?;
- const abi_size: u32 = @intCast(dst_ty.abiSize(mod));
+ const pt = self.pt;
+ const abi_size: u32 = @intCast(dst_ty.abiSize(pt));
try self.spillEflagsIfOccupied();
switch (dst_mcv) {
.none,
@@ -11746,7 +11810,8 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
}
fn airArg(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
// skip zero-bit arguments as they don't have a corresponding arg instruction
var arg_index = self.arg_index;
while (self.args[arg_index] == .none) arg_index += 1;
@@ -11808,7 +11873,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
try self.genInlineMemset(
dst_mcv.address().offset(@intFromBool(regs_frame_addr.regs > 0)),
.{ .immediate = 0 },
- .{ .immediate = arg_ty.abiSize(mod) - @intFromBool(regs_frame_addr.regs > 0) },
+ .{ .immediate = arg_ty.abiSize(pt) - @intFromBool(regs_frame_addr.regs > 0) },
.{},
);
@@ -11865,7 +11930,8 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
}
fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (self.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (mcv) {
@@ -11901,7 +11967,8 @@ fn genVarDbgInfo(
mcv: MCValue,
name: [:0]const u8,
) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const is_ptr = switch (tag) {
.dbg_var_ptr => true,
.dbg_var_val => false,
@@ -12020,7 +12087,8 @@ fn genCall(self: *Self, info: union(enum) {
callee: []const u8,
},
}, arg_types: []const Type, args: []const MCValue) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const fn_ty = switch (info) {
.air => |callee| fn_info: {
@@ -12031,7 +12099,7 @@ fn genCall(self: *Self, info: union(enum) {
else => unreachable,
};
},
- .lib => |lib| try mod.funcType(.{
+ .lib => |lib| try pt.funcType(.{
.param_types = lib.param_types,
.return_type = lib.return_type,
.cc = .C,
@@ -12101,7 +12169,7 @@ fn genCall(self: *Self, info: union(enum) {
try reg_locks.appendSlice(&self.register_manager.lockRegs(2, regs));
},
.indirect => |reg_off| {
- frame_index.* = try self.allocFrameIndex(FrameAlloc.initType(arg_ty, mod));
+ frame_index.* = try self.allocFrameIndex(FrameAlloc.initType(arg_ty, pt));
try self.genSetMem(.{ .frame = frame_index.* }, 0, arg_ty, src_arg, .{});
try self.register_manager.getReg(reg_off.reg, null);
try reg_locks.append(self.register_manager.lockReg(reg_off.reg));
@@ -12173,7 +12241,7 @@ fn genCall(self: *Self, info: union(enum) {
.none, .unreach => {},
.indirect => |reg_off| {
const ret_ty = Type.fromInterned(fn_info.return_type);
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(ret_ty, mod));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(ret_ty, pt));
try self.genSetReg(reg_off.reg, Type.usize, .{
.lea_frame = .{ .index = frame_index, .off = -reg_off.off },
}, .{});
@@ -12188,14 +12256,14 @@ fn genCall(self: *Self, info: union(enum) {
.none, .load_frame => {},
.register => |dst_reg| switch (fn_info.cc) {
else => try self.genSetReg(
- registerAlias(dst_reg, @intCast(arg_ty.abiSize(mod))),
+ registerAlias(dst_reg, @intCast(arg_ty.abiSize(pt))),
arg_ty,
src_arg,
.{},
),
.C, .SysV, .Win64 => {
const promoted_ty = self.promoteInt(arg_ty);
- const promoted_abi_size: u32 = @intCast(promoted_ty.abiSize(mod));
+ const promoted_abi_size: u32 = @intCast(promoted_ty.abiSize(pt));
const dst_alias = registerAlias(dst_reg, promoted_abi_size);
try self.genSetReg(dst_alias, promoted_ty, src_arg, .{});
if (promoted_ty.toIntern() != arg_ty.toIntern())
@@ -12246,7 +12314,7 @@ fn genCall(self: *Self, info: union(enum) {
// Due to incremental compilation, how function calls are generated depends
// on linking.
switch (info) {
- .air => |callee| if (try self.air.value(callee, mod)) |func_value| {
+ .air => |callee| if (try self.air.value(callee, pt)) |func_value| {
const func_key = mod.intern_pool.indexToKey(func_value.ip_index);
switch (switch (func_key) {
else => func_key,
@@ -12332,7 +12400,8 @@ fn genCall(self: *Self, info: union(enum) {
}
fn airRet(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const ret_ty = self.fn_type.fnReturnType(mod);
@@ -12387,7 +12456,8 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
}
fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
var ty = self.typeOf(bin_op.lhs);
var null_compare: ?Mir.Inst.Index = null;
@@ -12457,9 +12527,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
},
.Optional => if (!ty.optionalReprIsPayload(mod)) {
const opt_ty = ty;
- const opt_abi_size: u31 = @intCast(opt_ty.abiSize(mod));
+ const opt_abi_size: u31 = @intCast(opt_ty.abiSize(pt));
ty = opt_ty.optionalChild(mod);
- const payload_abi_size: u31 = @intCast(ty.abiSize(mod));
+ const payload_abi_size: u31 = @intCast(ty.abiSize(pt));
const temp_lhs_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
const temp_lhs_lock = self.register_manager.lockRegAssumeUnused(temp_lhs_reg);
@@ -12518,7 +12588,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
switch (ty.zigTypeTag(mod)) {
else => {
- const abi_size: u16 = @intCast(ty.abiSize(mod));
+ const abi_size: u16 = @intCast(ty.abiSize(pt));
const may_flip: enum {
may_flip,
must_flip,
@@ -12845,7 +12915,8 @@ fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void {
}
fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const addr_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
@@ -12856,7 +12927,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
try self.spillEflagsIfOccupied();
const op_ty = self.typeOf(un_op);
- const op_abi_size: u32 = @intCast(op_ty.abiSize(mod));
+ const op_abi_size: u32 = @intCast(op_ty.abiSize(pt));
const op_mcv = try self.resolveInst(un_op);
const dst_reg = switch (op_mcv) {
.register => |reg| reg,
@@ -12987,8 +13058,8 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void {
}
fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !Mir.Inst.Index {
- const mod = self.bin_file.comp.module.?;
- const abi_size = ty.abiSize(mod);
+ const pt = self.pt;
+ const abi_size = ty.abiSize(pt);
switch (mcv) {
.eflags => |cc| {
// Here we map the opposites since the jump is to the false branch.
@@ -13060,7 +13131,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
}
fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (opt_mcv) {
.register_overflow => |ro| return .{ .eflags = ro.eflags.negate() },
else => {},
@@ -13073,7 +13145,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod))
.{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty }
else
- .{ .off = @intCast(pl_ty.abiSize(mod)), .ty = Type.bool };
+ .{ .off = @intCast(pl_ty.abiSize(pt)), .ty = Type.bool };
self.eflags_inst = inst;
switch (opt_mcv) {
@@ -13098,14 +13170,14 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
.register => |opt_reg| {
if (some_info.off == 0) {
- const some_abi_size: u32 = @intCast(some_info.ty.abiSize(mod));
+ const some_abi_size: u32 = @intCast(some_info.ty.abiSize(pt));
const alias_reg = registerAlias(opt_reg, some_abi_size);
assert(some_abi_size * 8 == alias_reg.bitSize());
try self.asmRegisterRegister(.{ ._, .@"test" }, alias_reg, alias_reg);
return .{ .eflags = .z };
}
assert(some_info.ty.ip_index == .bool_type);
- const opt_abi_size: u32 = @intCast(opt_ty.abiSize(mod));
+ const opt_abi_size: u32 = @intCast(opt_ty.abiSize(pt));
try self.asmRegisterImmediate(
.{ ._, .bt },
registerAlias(opt_reg, opt_abi_size),
@@ -13125,7 +13197,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
defer self.register_manager.unlockReg(addr_reg_lock);
try self.genSetReg(addr_reg, Type.usize, opt_mcv.address(), .{});
- const some_abi_size: u32 = @intCast(some_info.ty.abiSize(mod));
+ const some_abi_size: u32 = @intCast(some_info.ty.abiSize(pt));
try self.asmMemoryImmediate(
.{ ._, .cmp },
.{
@@ -13141,7 +13213,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
},
.indirect, .load_frame => {
- const some_abi_size: u32 = @intCast(some_info.ty.abiSize(mod));
+ const some_abi_size: u32 = @intCast(some_info.ty.abiSize(pt));
try self.asmMemoryImmediate(
.{ ._, .cmp },
switch (opt_mcv) {
@@ -13169,7 +13241,8 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
}
fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const opt_ty = ptr_ty.childType(mod);
const pl_ty = opt_ty.optionalChild(mod);
@@ -13178,7 +13251,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue)
const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod))
.{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty }
else
- .{ .off = @intCast(pl_ty.abiSize(mod)), .ty = Type.bool };
+ .{ .off = @intCast(pl_ty.abiSize(pt)), .ty = Type.bool };
const ptr_reg = switch (ptr_mcv) {
.register => |reg| reg,
@@ -13187,7 +13260,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue)
const ptr_lock = self.register_manager.lockReg(ptr_reg);
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
- const some_abi_size: u32 = @intCast(some_info.ty.abiSize(mod));
+ const some_abi_size: u32 = @intCast(some_info.ty.abiSize(pt));
try self.asmMemoryImmediate(
.{ ._, .cmp },
.{
@@ -13205,13 +13278,14 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue)
}
fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const err_ty = eu_ty.errorUnionSet(mod);
if (err_ty.errorSetIsEmpty(mod)) return MCValue{ .immediate = 0 }; // always false
try self.spillEflagsIfOccupied();
- const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(mod), mod));
+ const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(mod), pt));
switch (eu_mcv) {
.register => |reg| {
const eu_lock = self.register_manager.lockReg(reg);
@@ -13253,7 +13327,8 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue)
}
fn isErrPtr(self: *Self, maybe_inst: ?Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) !MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const eu_ty = ptr_ty.childType(mod);
const err_ty = eu_ty.errorUnionSet(mod);
if (err_ty.errorSetIsEmpty(mod)) return MCValue{ .immediate = 0 }; // always false
@@ -13267,7 +13342,7 @@ fn isErrPtr(self: *Self, maybe_inst: ?Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCV
const ptr_lock = self.register_manager.lockReg(ptr_reg);
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
- const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(mod), mod));
+ const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(mod), pt));
try self.asmMemoryImmediate(
.{ ._, .cmp },
.{
@@ -13539,12 +13614,12 @@ fn performReloc(self: *Self, reloc: Mir.Inst.Index) void {
}
fn airBr(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
const br = self.air.instructions.items(.data)[@intFromEnum(inst)].br;
const block_ty = self.typeOfIndex(br.block_inst);
const block_unused =
- !block_ty.hasRuntimeBitsIgnoreComptime(mod) or self.liveness.isUnused(br.block_inst);
+ !block_ty.hasRuntimeBitsIgnoreComptime(pt) or self.liveness.isUnused(br.block_inst);
const block_tracking = self.inst_tracking.getPtr(br.block_inst).?;
const block_data = self.blocks.getPtr(br.block_inst).?;
const first_br = block_data.relocs.items.len == 0;
@@ -13600,7 +13675,8 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
const clobbers_len: u31 = @truncate(extra.data.flags);
@@ -13664,7 +13740,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
'x' => abi.RegisterClass.sse,
else => unreachable,
}) orelse return self.fail("ran out of registers lowering inline asm", .{}),
- @intCast(ty.abiSize(mod)),
+ @intCast(ty.abiSize(pt)),
)
else if (mem.eql(u8, rest, "m"))
if (output != .none) null else return self.fail(
@@ -13734,7 +13810,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
break :arg input_mcv;
const reg = try self.register_manager.allocReg(null, rc);
try self.genSetReg(reg, ty, input_mcv, .{});
- break :arg .{ .register = registerAlias(reg, @intCast(ty.abiSize(mod))) };
+ break :arg .{ .register = registerAlias(reg, @intCast(ty.abiSize(pt))) };
} else if (mem.eql(u8, constraint, "i") or mem.eql(u8, constraint, "n"))
switch (input_mcv) {
.immediate => |imm| .{ .immediate = imm },
@@ -14310,18 +14386,19 @@ const MoveStrategy = union(enum) {
}
};
fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !MoveStrategy {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (class) {
.general_purpose, .segment => return .{ .move = .{ ._, .mov } },
.x87 => return .x87_load_store,
.mmx => {},
.sse => switch (ty.zigTypeTag(mod)) {
else => {
- const classes = mem.sliceTo(&abi.classifySystemV(ty, mod, self.target.*, .other), .none);
+ const classes = mem.sliceTo(&abi.classifySystemV(ty, pt, self.target.*, .other), .none);
assert(std.mem.indexOfNone(abi.Class, classes, &.{
.integer, .sse, .sseup, .memory, .float, .float_combine,
}) == null);
- const abi_size = ty.abiSize(mod);
+ const abi_size = ty.abiSize(pt);
if (abi_size < 4 or
std.mem.indexOfScalar(abi.Class, classes, .integer) != null) switch (abi_size) {
1 => if (self.hasFeature(.avx)) return .{ .vex_insert_extract = .{
@@ -14532,7 +14609,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
},
.ip => {},
}
- return self.fail("TODO moveStrategy for {}", .{ty.fmt(mod)});
+ return self.fail("TODO moveStrategy for {}", .{ty.fmt(pt)});
}
const CopyOptions = struct {
@@ -14540,7 +14617,7 @@ const CopyOptions = struct {
};
fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue, opts: CopyOptions) InnerError!void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null;
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
@@ -14601,7 +14678,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue, opts: Copy
opts,
),
else => return self.fail("TODO implement genCopy for {s} of {}", .{
- @tagName(src_mcv), ty.fmt(mod),
+ @tagName(src_mcv), ty.fmt(pt),
}),
};
defer if (src_info) |info| self.register_manager.unlockReg(info.addr_lock);
@@ -14617,7 +14694,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue, opts: Copy
} },
else => unreachable,
}, opts);
- part_disp += @intCast(dst_ty.abiSize(mod));
+ part_disp += @intCast(dst_ty.abiSize(pt));
}
},
.indirect => |reg_off| try self.genSetMem(
@@ -14658,9 +14735,10 @@ fn genSetReg(
src_mcv: MCValue,
opts: CopyOptions,
) InnerError!void {
- const mod = self.bin_file.comp.module.?;
- const abi_size: u32 = @intCast(ty.abiSize(mod));
- if (ty.bitSize(mod) > dst_reg.bitSize())
+ const pt = self.pt;
+ const mod = pt.zcu;
+ const abi_size: u32 = @intCast(ty.abiSize(pt));
+ if (ty.bitSize(pt) > dst_reg.bitSize())
return self.fail("genSetReg called with a value larger than dst_reg", .{});
switch (src_mcv) {
.none,
@@ -14686,7 +14764,7 @@ fn genSetReg(
),
else => unreachable,
},
- .segment, .x87, .mmx, .sse => try self.genSetReg(dst_reg, ty, try self.genTypedValue(try mod.undefValue(ty)), opts),
+ .segment, .x87, .mmx, .sse => try self.genSetReg(dst_reg, ty, try self.genTypedValue(try pt.undefValue(ty)), opts),
.ip => unreachable,
},
.eflags => |cc| try self.asmSetccRegister(cc, dst_reg.to8()),
@@ -14797,7 +14875,7 @@ fn genSetReg(
80 => null,
else => unreachable,
},
- }) orelse return self.fail("TODO implement genSetReg for {}", .{ty.fmt(mod)}),
+ }) orelse return self.fail("TODO implement genSetReg for {}", .{ty.fmt(pt)}),
registerAlias(dst_reg, abi_size),
registerAlias(src_reg, abi_size),
),
@@ -14847,7 +14925,7 @@ fn genSetReg(
return (try self.moveStrategy(
ty,
dst_reg.class(),
- ty.abiAlignment(mod).check(@as(u32, @bitCast(small_addr))),
+ ty.abiAlignment(pt).check(@as(u32, @bitCast(small_addr))),
)).read(self, registerAlias(dst_reg, abi_size), .{
.base = .{ .reg = .ds },
.mod = .{ .rm = .{
@@ -14967,8 +15045,9 @@ fn genSetMem(
src_mcv: MCValue,
opts: CopyOptions,
) InnerError!void {
- const mod = self.bin_file.comp.module.?;
- const abi_size: u32 = @intCast(ty.abiSize(mod));
+ const pt = self.pt;
+ const mod = pt.zcu;
+ const abi_size: u32 = @intCast(ty.abiSize(pt));
const dst_ptr_mcv: MCValue = switch (base) {
.none => .{ .immediate = @bitCast(@as(i64, disp)) },
.reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } },
@@ -15094,21 +15173,21 @@ fn genSetMem(
var part_disp: i32 = disp;
for (try self.splitType(ty), src_regs) |src_ty, src_reg| {
try self.genSetMem(base, part_disp, src_ty, .{ .register = src_reg }, opts);
- part_disp += @intCast(src_ty.abiSize(mod));
+ part_disp += @intCast(src_ty.abiSize(pt));
}
},
.register_overflow => |ro| switch (ty.zigTypeTag(mod)) {
.Struct => {
try self.genSetMem(
base,
- disp + @as(i32, @intCast(ty.structFieldOffset(0, mod))),
+ disp + @as(i32, @intCast(ty.structFieldOffset(0, pt))),
ty.structFieldType(0, mod),
.{ .register = ro.reg },
opts,
);
try self.genSetMem(
base,
- disp + @as(i32, @intCast(ty.structFieldOffset(1, mod))),
+ disp + @as(i32, @intCast(ty.structFieldOffset(1, pt))),
ty.structFieldType(1, mod),
.{ .eflags = ro.eflags },
opts,
@@ -15120,14 +15199,14 @@ fn genSetMem(
try self.genSetMem(base, disp, child_ty, .{ .register = ro.reg }, opts);
try self.genSetMem(
base,
- disp + @as(i32, @intCast(child_ty.abiSize(mod))),
+ disp + @as(i32, @intCast(child_ty.abiSize(pt))),
Type.bool,
.{ .eflags = ro.eflags },
opts,
);
},
else => return self.fail("TODO implement genSetMem for {s} of {}", .{
- @tagName(src_mcv), ty.fmt(mod),
+ @tagName(src_mcv), ty.fmt(pt),
}),
},
.register_offset,
@@ -15236,8 +15315,9 @@ fn genLazySymbolRef(
reg: Register,
lazy_sym: link.File.LazySymbol,
) InnerError!void {
+ const pt = self.pt;
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const sym_index = elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, lazy_sym) catch |err|
+ const sym_index = elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, pt, lazy_sym) catch |err|
return self.fail("{s} creating lazy symbol", .{@errorName(err)});
const sym = elf_file.symbol(sym_index);
if (self.mod.pic) {
@@ -15273,7 +15353,7 @@ fn genLazySymbolRef(
}
}
} else if (self.bin_file.cast(link.File.Plan9)) |p9_file| {
- const atom_index = p9_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err|
+ const atom_index = p9_file.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err|
return self.fail("{s} creating lazy symbol", .{@errorName(err)});
var atom = p9_file.getAtom(atom_index);
_ = atom.getOrCreateOffsetTableEntry(p9_file);
@@ -15300,7 +15380,7 @@ fn genLazySymbolRef(
else => unreachable,
}
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom_index = coff_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err|
+ const atom_index = coff_file.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err|
return self.fail("{s} creating lazy symbol", .{@errorName(err)});
const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
switch (tag) {
@@ -15314,7 +15394,7 @@ fn genLazySymbolRef(
else => unreachable,
}
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- const sym_index = macho_file.getZigObject().?.getOrCreateMetadataForLazySymbol(macho_file, lazy_sym) catch |err|
+ const sym_index = macho_file.getZigObject().?.getOrCreateMetadataForLazySymbol(macho_file, pt, lazy_sym) catch |err|
return self.fail("{s} creating lazy symbol", .{@errorName(err)});
const sym = macho_file.getSymbol(sym_index);
switch (tag) {
@@ -15353,7 +15433,8 @@ fn airIntFromPtr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const dst_ty = self.typeOfIndex(inst);
const src_ty = self.typeOf(ty_op.operand);
@@ -15366,10 +15447,10 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null;
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
- const dst_mcv = if (dst_rc.supersetOf(src_rc) and dst_ty.abiSize(mod) <= src_ty.abiSize(mod) and
+ const dst_mcv = if (dst_rc.supersetOf(src_rc) and dst_ty.abiSize(pt) <= src_ty.abiSize(pt) and
self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: {
const dst_mcv = try self.allocRegOrMem(inst, true);
- try self.genCopy(switch (math.order(dst_ty.abiSize(mod), src_ty.abiSize(mod))) {
+ try self.genCopy(switch (math.order(dst_ty.abiSize(pt), src_ty.abiSize(pt))) {
.lt => dst_ty,
.eq => if (!dst_mcv.isMemory() or src_mcv.isMemory()) dst_ty else src_ty,
.gt => src_ty,
@@ -15382,8 +15463,8 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
if (dst_ty.isAbiInt(mod) and src_ty.isAbiInt(mod) and
dst_ty.intInfo(mod).signedness == src_ty.intInfo(mod).signedness) break :result dst_mcv;
- const abi_size = dst_ty.abiSize(mod);
- const bit_size = dst_ty.bitSize(mod);
+ const abi_size = dst_ty.abiSize(pt);
+ const bit_size = dst_ty.bitSize(pt);
if (abi_size * 8 <= bit_size or dst_ty.isVector(mod)) break :result dst_mcv;
const dst_limbs_len = math.divCeil(i32, @intCast(bit_size), 64) catch unreachable;
@@ -15412,7 +15493,8 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
}
fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const slice_ty = self.typeOfIndex(inst);
@@ -15421,11 +15503,11 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
const array_ty = ptr_ty.childType(mod);
const array_len = array_ty.arrayLen(mod);
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(slice_ty, mod));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(slice_ty, pt));
try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr, .{});
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(ptr_ty.abiSize(mod)),
+ @intCast(ptr_ty.abiSize(pt)),
Type.usize,
.{ .immediate = array_len },
.{},
@@ -15436,14 +15518,15 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
}
fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const dst_ty = self.typeOfIndex(inst);
const dst_bits = dst_ty.floatBits(self.target.*);
const src_ty = self.typeOf(ty_op.operand);
- const src_bits: u32 = @intCast(src_ty.bitSize(mod));
+ const src_bits: u32 = @intCast(src_ty.bitSize(pt));
const src_signedness =
if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned;
const src_size = math.divCeil(u32, @max(switch (src_signedness) {
@@ -15458,7 +15541,7 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void {
else => unreachable,
}) {
if (src_bits > 128) return self.fail("TODO implement airFloatFromInt from {} to {}", .{
- src_ty.fmt(mod), dst_ty.fmt(mod),
+ src_ty.fmt(pt), dst_ty.fmt(pt),
});
var callee_buf: ["__floatun?i?f".len]u8 = undefined;
@@ -15500,7 +15583,7 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void {
},
else => null,
}) orelse return self.fail("TODO implement airFloatFromInt from {} to {}", .{
- src_ty.fmt(mod), dst_ty.fmt(mod),
+ src_ty.fmt(pt), dst_ty.fmt(pt),
});
const dst_alias = dst_reg.to128();
const src_alias = registerAlias(src_reg, src_size);
@@ -15515,11 +15598,12 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const dst_ty = self.typeOfIndex(inst);
- const dst_bits: u32 = @intCast(dst_ty.bitSize(mod));
+ const dst_bits: u32 = @intCast(dst_ty.bitSize(pt));
const dst_signedness =
if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned;
const dst_size = math.divCeil(u32, @max(switch (dst_signedness) {
@@ -15537,7 +15621,7 @@ fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void {
else => unreachable,
}) {
if (dst_bits > 128) return self.fail("TODO implement airIntFromFloat from {} to {}", .{
- src_ty.fmt(mod), dst_ty.fmt(mod),
+ src_ty.fmt(pt), dst_ty.fmt(pt),
});
var callee_buf: ["__fixuns?f?i".len]u8 = undefined;
@@ -15586,13 +15670,13 @@ fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void {
}
fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
const ptr_ty = self.typeOf(extra.ptr);
const val_ty = self.typeOf(extra.expected_value);
- const val_abi_size: u32 = @intCast(val_ty.abiSize(mod));
+ const val_abi_size: u32 = @intCast(val_ty.abiSize(pt));
try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx });
const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx });
@@ -15682,7 +15766,8 @@ fn atomicOp(
rmw_op: ?std.builtin.AtomicRmwOp,
order: std.builtin.AtomicOrder,
) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ptr_lock = switch (ptr_mcv) {
.register => |reg| self.register_manager.lockReg(reg),
else => null,
@@ -15695,7 +15780,7 @@ fn atomicOp(
};
defer if (val_lock) |lock| self.register_manager.unlockReg(lock);
- const val_abi_size: u32 = @intCast(val_ty.abiSize(mod));
+ const val_abi_size: u32 = @intCast(val_ty.abiSize(pt));
const mem_size = Memory.Size.fromSize(val_abi_size);
const ptr_mem: Memory = switch (ptr_mcv) {
.immediate, .register, .register_offset, .lea_frame => try ptr_mcv.deref().mem(self, mem_size),
@@ -15809,7 +15894,7 @@ fn atomicOp(
},
else => unreachable,
}) orelse return self.fail("TODO implement atomicOp of {s} for {}", .{
- @tagName(op), val_ty.fmt(mod),
+ @tagName(op), val_ty.fmt(pt),
});
try self.genSetReg(sse_reg, val_ty, .{ .register = .rax }, .{});
switch (mir_tag[0]) {
@@ -16086,7 +16171,8 @@ fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOr
}
fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
result: {
@@ -16112,7 +16198,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
};
defer if (src_val_lock) |lock| self.register_manager.unlockReg(lock);
- const elem_abi_size: u31 = @intCast(elem_ty.abiSize(mod));
+ const elem_abi_size: u31 = @intCast(elem_ty.abiSize(pt));
if (elem_abi_size == 1) {
const ptr: MCValue = switch (dst_ptr_ty.ptrSize(mod)) {
@@ -16185,7 +16271,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
self.performReloc(skip_reloc);
},
.One => {
- const elem_ptr_ty = try mod.singleMutPtrType(elem_ty);
+ const elem_ptr_ty = try pt.singleMutPtrType(elem_ty);
const len = dst_ptr_ty.childType(mod).arrayLen(mod);
@@ -16214,7 +16300,8 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
}
fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
try self.spillRegisters(&.{ .rdi, .rsi, .rcx });
@@ -16246,13 +16333,13 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
.{ .i_, .mul },
len_reg,
try dst_ptr.address().offset(8).deref().mem(self, .qword),
- Immediate.s(@intCast(dst_ptr_ty.childType(mod).abiSize(mod))),
+ Immediate.s(@intCast(dst_ptr_ty.childType(mod).abiSize(pt))),
);
break :len .{ .register = len_reg };
},
.One => len: {
const array_ty = dst_ptr_ty.childType(mod);
- break :len .{ .immediate = array_ty.arrayLen(mod) * array_ty.childType(mod).abiSize(mod) };
+ break :len .{ .immediate = array_ty.arrayLen(mod) * array_ty.childType(mod).abiSize(pt) };
},
.C, .Many => unreachable,
};
@@ -16269,7 +16356,8 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
}
fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const inst_ty = self.typeOfIndex(inst);
const enum_ty = self.typeOf(un_op);
@@ -16278,8 +16366,8 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
// We need a properly aligned and sized call frame to be able to call this function.
{
const needed_call_frame = FrameAlloc.init(.{
- .size = inst_ty.abiSize(mod),
- .alignment = inst_ty.abiAlignment(mod),
+ .size = inst_ty.abiSize(pt),
+ .alignment = inst_ty.abiAlignment(pt),
});
const frame_allocs_slice = self.frame_allocs.slice();
const stack_frame_size =
@@ -16311,7 +16399,8 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
}
fn airErrorName(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const err_ty = self.typeOf(un_op);
@@ -16413,7 +16502,8 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void {
}
fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const vector_ty = self.typeOfIndex(inst);
const vector_len = vector_ty.vectorLen(mod);
@@ -16495,15 +16585,15 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
const src_mcv = try self.resolveInst(ty_op.operand);
if (src_mcv.isMemory()) try self.asmRegisterMemory(
mir_tag,
- registerAlias(dst_reg, @intCast(vector_ty.abiSize(mod))),
+ registerAlias(dst_reg, @intCast(vector_ty.abiSize(pt))),
try src_mcv.mem(self, self.memSize(scalar_ty)),
) else {
if (mir_tag[0] == .v_i128) break :avx2;
try self.genSetReg(dst_reg, scalar_ty, src_mcv, .{});
try self.asmRegisterRegister(
mir_tag,
- registerAlias(dst_reg, @intCast(vector_ty.abiSize(mod))),
- registerAlias(dst_reg, @intCast(scalar_ty.abiSize(mod))),
+ registerAlias(dst_reg, @intCast(vector_ty.abiSize(pt))),
+ registerAlias(dst_reg, @intCast(scalar_ty.abiSize(pt))),
);
}
break :result .{ .register = dst_reg };
@@ -16515,7 +16605,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
try self.genSetReg(dst_reg, scalar_ty, .{ .air_ref = ty_op.operand }, .{});
if (vector_len == 1) break :result .{ .register = dst_reg };
- const dst_alias = registerAlias(dst_reg, @intCast(vector_ty.abiSize(mod)));
+ const dst_alias = registerAlias(dst_reg, @intCast(vector_ty.abiSize(pt)));
const scalar_bits = scalar_ty.intInfo(mod).bits;
if (switch (scalar_bits) {
1...8 => true,
@@ -16745,20 +16835,21 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
else => unreachable,
},
}
- return self.fail("TODO implement airSplat for {}", .{vector_ty.fmt(mod)});
+ return self.fail("TODO implement airSplat for {}", .{vector_ty.fmt(pt)});
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
const ty = self.typeOfIndex(inst);
const vec_len = ty.vectorLen(mod);
const elem_ty = ty.childType(mod);
- const elem_abi_size: u32 = @intCast(elem_ty.abiSize(mod));
- const abi_size: u32 = @intCast(ty.abiSize(mod));
+ const elem_abi_size: u32 = @intCast(elem_ty.abiSize(pt));
+ const abi_size: u32 = @intCast(ty.abiSize(pt));
const pred_ty = self.typeOf(pl_op.operand);
const result = result: {
@@ -16878,17 +16969,17 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
else => unreachable,
}),
);
- } else return self.fail("TODO implement airSelect for {}", .{ty.fmt(mod)});
+ } else return self.fail("TODO implement airSelect for {}", .{ty.fmt(pt)});
const elem_bits: u16 = @intCast(elem_abi_size * 8);
- const mask_elem_ty = try mod.intType(.unsigned, elem_bits);
- const mask_ty = try mod.vectorType(.{ .len = vec_len, .child = mask_elem_ty.toIntern() });
+ const mask_elem_ty = try pt.intType(.unsigned, elem_bits);
+ const mask_ty = try pt.vectorType(.{ .len = vec_len, .child = mask_elem_ty.toIntern() });
if (!pred_fits_in_elem) if (self.hasFeature(.ssse3)) {
var mask_elems: [32]InternPool.Index = undefined;
- for (mask_elems[0..vec_len], 0..) |*elem, bit| elem.* = try mod.intern(.{ .int = .{
+ for (mask_elems[0..vec_len], 0..) |*elem, bit| elem.* = try pt.intern(.{ .int = .{
.ty = mask_elem_ty.toIntern(),
.storage = .{ .u64 = bit / elem_bits },
} });
- const mask_mcv = try self.genTypedValue(Value.fromInterned(try mod.intern(.{ .aggregate = .{
+ const mask_mcv = try self.genTypedValue(Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = mask_ty.toIntern(),
.storage = .{ .elems = mask_elems[0..vec_len] },
} })));
@@ -16906,14 +16997,14 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
mask_alias,
mask_mem,
);
- } else return self.fail("TODO implement airSelect for {}", .{ty.fmt(mod)});
+ } else return self.fail("TODO implement airSelect for {}", .{ty.fmt(pt)});
{
var mask_elems: [32]InternPool.Index = undefined;
- for (mask_elems[0..vec_len], 0..) |*elem, bit| elem.* = try mod.intern(.{ .int = .{
+ for (mask_elems[0..vec_len], 0..) |*elem, bit| elem.* = try pt.intern(.{ .int = .{
.ty = mask_elem_ty.toIntern(),
.storage = .{ .u64 = @as(u32, 1) << @intCast(bit & (elem_bits - 1)) },
} });
- const mask_mcv = try self.genTypedValue(Value.fromInterned(try mod.intern(.{ .aggregate = .{
+ const mask_mcv = try self.genTypedValue(Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = mask_ty.toIntern(),
.storage = .{ .elems = mask_elems[0..vec_len] },
} })));
@@ -17014,7 +17105,7 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
else => null,
},
},
- }) orelse return self.fail("TODO implement airSelect for {}", .{ty.fmt(mod)});
+ }) orelse return self.fail("TODO implement airSelect for {}", .{ty.fmt(pt)});
if (has_avx) {
const rhs_alias = if (rhs_mcv.isRegister())
registerAlias(rhs_mcv.getReg().?, abi_size)
@@ -17061,7 +17152,7 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
16, 80, 128 => null,
else => unreachable,
},
- }) orelse return self.fail("TODO implement airSelect for {}", .{ty.fmt(mod)});
+ }) orelse return self.fail("TODO implement airSelect for {}", .{ty.fmt(pt)});
try self.asmRegisterRegister(.{ mir_fixes, .@"and" }, dst_alias, mask_alias);
if (rhs_mcv.isMemory()) try self.asmRegisterMemory(
.{ mir_fixes, .andn },
@@ -17083,18 +17174,19 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
}
fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
const dst_ty = self.typeOfIndex(inst);
const elem_ty = dst_ty.childType(mod);
- const elem_abi_size: u16 = @intCast(elem_ty.abiSize(mod));
- const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod));
+ const elem_abi_size: u16 = @intCast(elem_ty.abiSize(pt));
+ const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt));
const lhs_ty = self.typeOf(extra.a);
- const lhs_abi_size: u32 = @intCast(lhs_ty.abiSize(mod));
+ const lhs_abi_size: u32 = @intCast(lhs_ty.abiSize(pt));
const rhs_ty = self.typeOf(extra.b);
- const rhs_abi_size: u32 = @intCast(rhs_ty.abiSize(mod));
+ const rhs_abi_size: u32 = @intCast(rhs_ty.abiSize(pt));
const max_abi_size = @max(dst_abi_size, lhs_abi_size, rhs_abi_size);
const ExpectedContents = [32]?i32;
@@ -17106,11 +17198,11 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
defer allocator.free(mask_elems);
for (mask_elems, 0..) |*mask_elem, elem_index| {
const mask_elem_val =
- Value.fromInterned(extra.mask).elemValue(mod, elem_index) catch unreachable;
+ Value.fromInterned(extra.mask).elemValue(pt, elem_index) catch unreachable;
mask_elem.* = if (mask_elem_val.isUndef(mod))
null
else
- @intCast(mask_elem_val.toSignedInt(mod));
+ @intCast(mask_elem_val.toSignedInt(pt));
}
const has_avx = self.hasFeature(.avx);
@@ -17626,8 +17718,8 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
else
self.hasFeature(.avx2)) 32 else 16)) break :blendv;
- const select_mask_elem_ty = try mod.intType(.unsigned, elem_abi_size * 8);
- const select_mask_ty = try mod.vectorType(.{
+ const select_mask_elem_ty = try pt.intType(.unsigned, elem_abi_size * 8);
+ const select_mask_ty = try pt.vectorType(.{
.len = @intCast(mask_elems.len),
.child = select_mask_elem_ty.toIntern(),
});
@@ -17643,11 +17735,11 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
if (mask_elem_index != elem_index) break :blendv;
select_mask_elem.* = (if (mask_elem < 0)
- try select_mask_elem_ty.maxIntScalar(mod, select_mask_elem_ty)
+ try select_mask_elem_ty.maxIntScalar(pt, select_mask_elem_ty)
else
- try select_mask_elem_ty.minIntScalar(mod, select_mask_elem_ty)).toIntern();
+ try select_mask_elem_ty.minIntScalar(pt, select_mask_elem_ty)).toIntern();
}
- const select_mask_mcv = try self.genTypedValue(Value.fromInterned(try mod.intern(.{ .aggregate = .{
+ const select_mask_mcv = try self.genTypedValue(Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = select_mask_ty.toIntern(),
.storage = .{ .elems = select_mask_elems[0..mask_elems.len] },
} })));
@@ -17783,7 +17875,7 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
var lhs_mask_elems: [16]InternPool.Index = undefined;
for (lhs_mask_elems[0..max_abi_size], 0..) |*lhs_mask_elem, byte_index| {
const elem_index = byte_index / elem_abi_size;
- lhs_mask_elem.* = try mod.intern(.{ .int = .{
+ lhs_mask_elem.* = try pt.intern(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: {
const mask_elem = mask_elems[elem_index] orelse break :elem 0b1_00_00000;
@@ -17794,8 +17886,8 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
} },
} });
}
- const lhs_mask_ty = try mod.vectorType(.{ .len = max_abi_size, .child = .u8_type });
- const lhs_mask_mcv = try self.genTypedValue(Value.fromInterned(try mod.intern(.{ .aggregate = .{
+ const lhs_mask_ty = try pt.vectorType(.{ .len = max_abi_size, .child = .u8_type });
+ const lhs_mask_mcv = try self.genTypedValue(Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = lhs_mask_ty.toIntern(),
.storage = .{ .elems = lhs_mask_elems[0..max_abi_size] },
} })));
@@ -17817,7 +17909,7 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
var rhs_mask_elems: [16]InternPool.Index = undefined;
for (rhs_mask_elems[0..max_abi_size], 0..) |*rhs_mask_elem, byte_index| {
const elem_index = byte_index / elem_abi_size;
- rhs_mask_elem.* = try mod.intern(.{ .int = .{
+ rhs_mask_elem.* = try pt.intern(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: {
const mask_elem = mask_elems[elem_index] orelse break :elem 0b1_00_00000;
@@ -17828,8 +17920,8 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
} },
} });
}
- const rhs_mask_ty = try mod.vectorType(.{ .len = max_abi_size, .child = .u8_type });
- const rhs_mask_mcv = try self.genTypedValue(Value.fromInterned(try mod.intern(.{ .aggregate = .{
+ const rhs_mask_ty = try pt.vectorType(.{ .len = max_abi_size, .child = .u8_type });
+ const rhs_mask_mcv = try self.genTypedValue(Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = rhs_mask_ty.toIntern(),
.storage = .{ .elems = rhs_mask_elems[0..max_abi_size] },
} })));
@@ -17881,14 +17973,15 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
break :result null;
}) orelse return self.fail("TODO implement airShuffle from {} and {} to {} with {}", .{
- lhs_ty.fmt(mod), rhs_ty.fmt(mod), dst_ty.fmt(mod),
- Value.fromInterned(extra.mask).fmtValue(mod, null),
+ lhs_ty.fmt(pt), rhs_ty.fmt(pt), dst_ty.fmt(pt),
+ Value.fromInterned(extra.mask).fmtValue(pt, null),
});
return self.finishAir(inst, result, .{ extra.a, extra.b, .none });
}
fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce;
const result: MCValue = result: {
@@ -17898,9 +17991,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
const operand_mcv = try self.resolveInst(reduce.operand);
const mask_len = (math.cast(u6, operand_ty.vectorLen(mod)) orelse
- return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(mod)}));
+ return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(pt)}));
const mask = (@as(u64, 1) << mask_len) - 1;
- const abi_size: u32 = @intCast(operand_ty.abiSize(mod));
+ const abi_size: u32 = @intCast(operand_ty.abiSize(pt));
switch (reduce.operation) {
.Or => {
if (operand_mcv.isMemory()) try self.asmMemoryImmediate(
@@ -17936,16 +18029,17 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
try self.asmRegisterRegister(.{ ._, .@"test" }, tmp_reg, tmp_reg);
break :result .{ .eflags = .z };
},
- else => return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(mod)}),
+ else => return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(pt)}),
}
}
- return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(mod)});
+ return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(pt)});
};
return self.finishAir(inst, result, .{ reduce.operand, .none, .none });
}
fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const result_ty = self.typeOfIndex(inst);
const len: usize = @intCast(result_ty.arrayLen(mod));
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@@ -17953,30 +18047,30 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = result: {
switch (result_ty.zigTypeTag(mod)) {
.Struct => {
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, mod));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, pt));
if (result_ty.containerLayout(mod) == .@"packed") {
const struct_obj = mod.typeToStruct(result_ty).?;
try self.genInlineMemset(
.{ .lea_frame = .{ .index = frame_index } },
.{ .immediate = 0 },
- .{ .immediate = result_ty.abiSize(mod) },
+ .{ .immediate = result_ty.abiSize(pt) },
.{},
);
for (elements, 0..) |elem, elem_i_usize| {
const elem_i: u32 = @intCast(elem_i_usize);
- if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue;
+ if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue;
const elem_ty = result_ty.structFieldType(elem_i, mod);
- const elem_bit_size: u32 = @intCast(elem_ty.bitSize(mod));
+ const elem_bit_size: u32 = @intCast(elem_ty.bitSize(pt));
if (elem_bit_size > 64) {
return self.fail(
"TODO airAggregateInit implement packed structs with large fields",
.{},
);
}
- const elem_abi_size: u32 = @intCast(elem_ty.abiSize(mod));
+ const elem_abi_size: u32 = @intCast(elem_ty.abiSize(pt));
const elem_abi_bits = elem_abi_size * 8;
- const elem_off = mod.structPackedFieldBitOffset(struct_obj, elem_i);
+ const elem_off = pt.structPackedFieldBitOffset(struct_obj, elem_i);
const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size);
const elem_bit_off = elem_off % elem_abi_bits;
const elem_mcv = try self.resolveInst(elem);
@@ -18046,10 +18140,10 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
}
}
} else for (elements, 0..) |elem, elem_i| {
- if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue;
+ if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue;
const elem_ty = result_ty.structFieldType(elem_i, mod);
- const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, mod));
+ const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, pt));
const elem_mcv = try self.resolveInst(elem);
const mat_elem_mcv = switch (elem_mcv) {
.load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index },
@@ -18062,7 +18156,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
.Array, .Vector => {
const elem_ty = result_ty.childType(mod);
if (result_ty.isVector(mod) and elem_ty.toIntern() == .bool_type) {
- const result_size: u32 = @intCast(result_ty.abiSize(mod));
+ const result_size: u32 = @intCast(result_ty.abiSize(pt));
const dst_reg = try self.register_manager.allocReg(inst, abi.RegisterClass.gp);
try self.asmRegisterRegister(
.{ ._, .xor },
@@ -18093,8 +18187,8 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
}
break :result .{ .register = dst_reg };
} else {
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, mod));
- const elem_size: u32 = @intCast(elem_ty.abiSize(mod));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, pt));
+ const elem_size: u32 = @intCast(elem_ty.abiSize(pt));
for (elements, 0..) |elem, elem_i| {
const elem_mcv = try self.resolveInst(elem);
@@ -18136,18 +18230,19 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
}
fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
const result: MCValue = result: {
const union_ty = self.typeOfIndex(inst);
- const layout = union_ty.unionGetLayout(mod);
+ const layout = union_ty.unionGetLayout(pt);
const src_ty = self.typeOf(extra.init);
const src_mcv = try self.resolveInst(extra.init);
if (layout.tag_size == 0) {
- if (layout.abi_size <= src_ty.abiSize(mod) and
+ if (layout.abi_size <= src_ty.abiSize(pt) and
self.reuseOperand(inst, extra.init, 0, src_mcv)) break :result src_mcv;
const dst_mcv = try self.allocRegOrMem(inst, true);
@@ -18161,9 +18256,9 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
const field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
const tag_ty = Type.fromInterned(union_obj.enum_tag_ty);
const field_index = tag_ty.enumFieldIndex(field_name, mod).?;
- const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);
- const tag_int_val = try tag_val.intFromEnum(tag_ty, mod);
- const tag_int = tag_int_val.toUnsignedInt(mod);
+ const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
+ const tag_int_val = try tag_val.intFromEnum(tag_ty, pt);
+ const tag_int = tag_int_val.toUnsignedInt(pt);
const tag_off: i32 = if (layout.tag_align.compare(.lt, layout.payload_align))
@intCast(layout.payload_size)
else
@@ -18192,7 +18287,8 @@ fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void {
}
fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
const ty = self.typeOfIndex(inst);
@@ -18205,7 +18301,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
else => unreachable,
}) {
if (ty.zigTypeTag(mod) != .Float) return self.fail("TODO implement airMulAdd for {}", .{
- ty.fmt(mod),
+ ty.fmt(pt),
});
var callee_buf: ["__fma?".len]u8 = undefined;
@@ -18334,12 +18430,12 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
else => unreachable,
}
else
- unreachable) orelse return self.fail("TODO implement airMulAdd for {}", .{ty.fmt(mod)});
+ unreachable) orelse return self.fail("TODO implement airMulAdd for {}", .{ty.fmt(pt)});
var mops: [3]MCValue = undefined;
for (order, mcvs) |mop_index, mcv| mops[mop_index - 1] = mcv;
- const abi_size: u32 = @intCast(ty.abiSize(mod));
+ const abi_size: u32 = @intCast(ty.abiSize(pt));
const mop1_reg = registerAlias(mops[0].getReg().?, abi_size);
const mop2_reg = registerAlias(mops[1].getReg().?, abi_size);
if (mops[2].isRegister()) try self.asmRegisterRegisterRegister(
@@ -18359,9 +18455,10 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
}
fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const va_list_ty = self.air.instructions.items(.data)[@intFromEnum(inst)].ty;
- const ptr_anyopaque_ty = try mod.singleMutPtrType(Type.anyopaque);
+ const ptr_anyopaque_ty = try pt.singleMutPtrType(Type.anyopaque);
const result: MCValue = switch (abi.resolveCallingConvention(
self.fn_type.fnCallingConvention(mod),
@@ -18369,7 +18466,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
)) {
.SysV => result: {
const info = self.va_info.sysv;
- const dst_fi = try self.allocFrameIndex(FrameAlloc.initSpill(va_list_ty, mod));
+ const dst_fi = try self.allocFrameIndex(FrameAlloc.initSpill(va_list_ty, pt));
var field_off: u31 = 0;
// gp_offset: c_uint,
try self.genSetMem(
@@ -18379,7 +18476,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
.{ .immediate = info.gp_count * 8 },
.{},
);
- field_off += @intCast(Type.c_uint.abiSize(mod));
+ field_off += @intCast(Type.c_uint.abiSize(pt));
// fp_offset: c_uint,
try self.genSetMem(
.{ .frame = dst_fi },
@@ -18388,7 +18485,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
.{ .immediate = abi.SysV.c_abi_int_param_regs.len * 8 + info.fp_count * 16 },
.{},
);
- field_off += @intCast(Type.c_uint.abiSize(mod));
+ field_off += @intCast(Type.c_uint.abiSize(pt));
// overflow_arg_area: *anyopaque,
try self.genSetMem(
.{ .frame = dst_fi },
@@ -18397,7 +18494,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
.{ .lea_frame = info.overflow_arg_area },
.{},
);
- field_off += @intCast(ptr_anyopaque_ty.abiSize(mod));
+ field_off += @intCast(ptr_anyopaque_ty.abiSize(pt));
// reg_save_area: *anyopaque,
try self.genSetMem(
.{ .frame = dst_fi },
@@ -18406,7 +18503,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
.{ .lea_frame = info.reg_save_area },
.{},
);
- field_off += @intCast(ptr_anyopaque_ty.abiSize(mod));
+ field_off += @intCast(ptr_anyopaque_ty.abiSize(pt));
break :result .{ .load_frame = .{ .index = dst_fi } };
},
.Win64 => return self.fail("TODO implement c_va_start for Win64", .{}),
@@ -18416,11 +18513,12 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
}
fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const ty = self.typeOfIndex(inst);
const promote_ty = self.promoteVarArg(ty);
- const ptr_anyopaque_ty = try mod.singleMutPtrType(Type.anyopaque);
+ const ptr_anyopaque_ty = try pt.singleMutPtrType(Type.anyopaque);
const unused = self.liveness.isUnused(inst);
const result: MCValue = switch (abi.resolveCallingConvention(
@@ -18454,7 +18552,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
const overflow_arg_area: MCValue = .{ .indirect = .{ .reg = ptr_arg_list_reg, .off = 8 } };
const reg_save_area: MCValue = .{ .indirect = .{ .reg = ptr_arg_list_reg, .off = 16 } };
- const classes = mem.sliceTo(&abi.classifySystemV(promote_ty, mod, self.target.*, .arg), .none);
+ const classes = mem.sliceTo(&abi.classifySystemV(promote_ty, pt, self.target.*, .arg), .none);
switch (classes[0]) {
.integer => {
assert(classes.len == 1);
@@ -18489,7 +18587,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
.base = .{ .reg = addr_reg },
.mod = .{ .rm = .{
.size = .qword,
- .disp = @intCast(@max(promote_ty.abiSize(mod), 8)),
+ .disp = @intCast(@max(promote_ty.abiSize(pt), 8)),
} },
});
try self.genCopy(
@@ -18537,7 +18635,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
.base = .{ .reg = addr_reg },
.mod = .{ .rm = .{
.size = .qword,
- .disp = @intCast(@max(promote_ty.abiSize(mod), 8)),
+ .disp = @intCast(@max(promote_ty.abiSize(pt), 8)),
} },
});
try self.genCopy(
@@ -18557,7 +18655,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
unreachable;
},
else => return self.fail("TODO implement c_va_arg for {} on SysV", .{
- promote_ty.fmt(mod),
+ promote_ty.fmt(pt),
}),
}
@@ -18627,11 +18725,11 @@ fn airVaEnd(self: *Self, inst: Air.Inst.Index) !void {
}
fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
const ty = self.typeOf(ref);
// If the type has no codegen bits, no need to store it.
- if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none;
+ if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return .none;
const mcv = if (ref.toIndex()) |inst| mcv: {
break :mcv self.inst_tracking.getPtr(inst).?.short;
@@ -18705,8 +18803,8 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV
}
fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
- const mod = self.bin_file.comp.module.?;
- return switch (try codegen.genTypedValue(self.bin_file, self.src_loc, val, self.owner.getDecl(mod))) {
+ const pt = self.pt;
+ return switch (try codegen.genTypedValue(self.bin_file, pt, self.src_loc, val, self.owner.getDecl(pt.zcu))) {
.mcv => |mcv| switch (mcv) {
.none => .none,
.undef => .undef,
@@ -18745,7 +18843,8 @@ fn resolveCallingConventionValues(
var_args: []const Type,
stack_frame_base: FrameIndex,
) !CallMCValues {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const cc = fn_info.cc;
const param_types = try self.gpa.alloc(Type, fn_info.param_types.len + var_args.len);
@@ -18788,7 +18887,7 @@ fn resolveCallingConventionValues(
.SysV => {},
.Win64 => {
// Align the stack to 16bytes before allocating shadow stack space (if any).
- result.stack_byte_count += @intCast(4 * Type.usize.abiSize(mod));
+ result.stack_byte_count += @intCast(4 * Type.usize.abiSize(pt));
},
else => unreachable,
}
@@ -18796,7 +18895,7 @@ fn resolveCallingConventionValues(
// Return values
if (ret_ty.zigTypeTag(mod) == .NoReturn) {
result.return_value = InstTracking.init(.unreach);
- } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
// TODO: is this even possible for C calling convention?
result.return_value = InstTracking.init(.none);
} else {
@@ -18804,15 +18903,15 @@ fn resolveCallingConventionValues(
var ret_tracking_i: usize = 0;
const classes = switch (resolved_cc) {
- .SysV => mem.sliceTo(&abi.classifySystemV(ret_ty, mod, self.target.*, .ret), .none),
- .Win64 => &.{abi.classifyWindows(ret_ty, mod)},
+ .SysV => mem.sliceTo(&abi.classifySystemV(ret_ty, pt, self.target.*, .ret), .none),
+ .Win64 => &.{abi.classifyWindows(ret_ty, pt)},
else => unreachable,
};
for (classes) |class| switch (class) {
.integer => {
const ret_int_reg = registerAlias(
abi.getCAbiIntReturnRegs(resolved_cc)[ret_int_reg_i],
- @intCast(@min(ret_ty.abiSize(mod), 8)),
+ @intCast(@min(ret_ty.abiSize(pt), 8)),
);
ret_int_reg_i += 1;
@@ -18822,7 +18921,7 @@ fn resolveCallingConventionValues(
.sse, .float, .float_combine, .win_i128 => {
const ret_sse_reg = registerAlias(
abi.getCAbiSseReturnRegs(resolved_cc)[ret_sse_reg_i],
- @intCast(ret_ty.abiSize(mod)),
+ @intCast(ret_ty.abiSize(pt)),
);
ret_sse_reg_i += 1;
@@ -18865,7 +18964,7 @@ fn resolveCallingConventionValues(
// Input params
for (param_types, result.args) |ty, *arg| {
- assert(ty.hasRuntimeBitsIgnoreComptime(mod));
+ assert(ty.hasRuntimeBitsIgnoreComptime(pt));
switch (resolved_cc) {
.SysV => {},
.Win64 => {
@@ -18879,8 +18978,8 @@ fn resolveCallingConventionValues(
var arg_mcv_i: usize = 0;
const classes = switch (resolved_cc) {
- .SysV => mem.sliceTo(&abi.classifySystemV(ty, mod, self.target.*, .arg), .none),
- .Win64 => &.{abi.classifyWindows(ty, mod)},
+ .SysV => mem.sliceTo(&abi.classifySystemV(ty, pt, self.target.*, .arg), .none),
+ .Win64 => &.{abi.classifyWindows(ty, pt)},
else => unreachable,
};
for (classes) |class| switch (class) {
@@ -18890,7 +18989,7 @@ fn resolveCallingConventionValues(
const param_int_reg = registerAlias(
abi.getCAbiIntParamRegs(resolved_cc)[param_int_reg_i],
- @intCast(@min(ty.abiSize(mod), 8)),
+ @intCast(@min(ty.abiSize(pt), 8)),
);
param_int_reg_i += 1;
@@ -18903,7 +19002,7 @@ fn resolveCallingConventionValues(
const param_sse_reg = registerAlias(
abi.getCAbiSseParamRegs(resolved_cc)[param_sse_reg_i],
- @intCast(ty.abiSize(mod)),
+ @intCast(ty.abiSize(pt)),
);
param_sse_reg_i += 1;
@@ -18916,7 +19015,7 @@ fn resolveCallingConventionValues(
.x87, .x87up, .complex_x87, .memory => break,
else => unreachable,
},
- .Win64 => if (ty.abiSize(mod) > 8) {
+ .Win64 => if (ty.abiSize(pt) > 8) {
const param_int_reg =
abi.getCAbiIntParamRegs(resolved_cc)[param_int_reg_i].to64();
param_int_reg_i += 1;
@@ -18938,7 +19037,7 @@ fn resolveCallingConventionValues(
const frame_elems_len = ty.vectorLen(mod) - remaining_param_int_regs;
const frame_elem_size = mem.alignForward(
u64,
- ty.childType(mod).abiSize(mod),
+ ty.childType(mod).abiSize(pt),
frame_elem_align,
);
const frame_size: u31 = @intCast(frame_elems_len * frame_elem_size);
@@ -18962,9 +19061,9 @@ fn resolveCallingConventionValues(
continue;
}
- const param_size: u31 = @intCast(ty.abiSize(mod));
+ const param_size: u31 = @intCast(ty.abiSize(pt));
const param_align: u31 =
- @intCast(@max(ty.abiAlignment(mod).toByteUnits().?, 8));
+ @intCast(@max(ty.abiAlignment(pt).toByteUnits().?, 8));
result.stack_byte_count =
mem.alignForward(u31, result.stack_byte_count, param_align);
arg.* = .{ .load_frame = .{
@@ -18984,11 +19083,11 @@ fn resolveCallingConventionValues(
// Return values
if (ret_ty.zigTypeTag(mod) == .NoReturn) {
result.return_value = InstTracking.init(.unreach);
- } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
result.return_value = InstTracking.init(.none);
} else {
const ret_reg = abi.getCAbiIntReturnRegs(resolved_cc)[0];
- const ret_ty_size: u31 = @intCast(ret_ty.abiSize(mod));
+ const ret_ty_size: u31 = @intCast(ret_ty.abiSize(pt));
if (ret_ty_size <= 8 and !ret_ty.isRuntimeFloat()) {
const aliased_reg = registerAlias(ret_reg, ret_ty_size);
result.return_value = .{ .short = .{ .register = aliased_reg }, .long = .none };
@@ -19003,12 +19102,12 @@ fn resolveCallingConventionValues(
// Input params
for (param_types, result.args) |ty, *arg| {
- if (!ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!ty.hasRuntimeBitsIgnoreComptime(pt)) {
arg.* = .none;
continue;
}
- const param_size: u31 = @intCast(ty.abiSize(mod));
- const param_align: u31 = @intCast(ty.abiAlignment(mod).toByteUnits().?);
+ const param_size: u31 = @intCast(ty.abiSize(pt));
+ const param_align: u31 = @intCast(ty.abiAlignment(pt).toByteUnits().?);
result.stack_byte_count =
mem.alignForward(u31, result.stack_byte_count, param_align);
arg.* = .{ .load_frame = .{
@@ -19093,47 +19192,49 @@ fn registerAlias(reg: Register, size_bytes: u32) Register {
}
fn memSize(self: *Self, ty: Type) Memory.Size {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
return switch (ty.zigTypeTag(mod)) {
.Float => Memory.Size.fromBitSize(ty.floatBits(self.target.*)),
- else => Memory.Size.fromSize(@intCast(ty.abiSize(mod))),
+ else => Memory.Size.fromSize(@intCast(ty.abiSize(pt))),
};
}
fn splitType(self: *Self, ty: Type) ![2]Type {
- const mod = self.bin_file.comp.module.?;
- const classes = mem.sliceTo(&abi.classifySystemV(ty, mod, self.target.*, .other), .none);
+ const pt = self.pt;
+ const classes = mem.sliceTo(&abi.classifySystemV(ty, pt, self.target.*, .other), .none);
var parts: [2]Type = undefined;
if (classes.len == 2) for (&parts, classes, 0..) |*part, class, part_i| {
part.* = switch (class) {
.integer => switch (part_i) {
0 => Type.u64,
1 => part: {
- const elem_size = ty.abiAlignment(mod).minStrict(.@"8").toByteUnits().?;
- const elem_ty = try mod.intType(.unsigned, @intCast(elem_size * 8));
- break :part switch (@divExact(ty.abiSize(mod) - 8, elem_size)) {
+ const elem_size = ty.abiAlignment(pt).minStrict(.@"8").toByteUnits().?;
+ const elem_ty = try pt.intType(.unsigned, @intCast(elem_size * 8));
+ break :part switch (@divExact(ty.abiSize(pt) - 8, elem_size)) {
1 => elem_ty,
- else => |len| try mod.arrayType(.{ .len = len, .child = elem_ty.toIntern() }),
+ else => |len| try pt.arrayType(.{ .len = len, .child = elem_ty.toIntern() }),
};
},
else => unreachable,
},
.float => Type.f32,
- .float_combine => try mod.arrayType(.{ .len = 2, .child = .f32_type }),
+ .float_combine => try pt.arrayType(.{ .len = 2, .child = .f32_type }),
.sse => Type.f64,
else => break,
};
- } else if (parts[0].abiSize(mod) + parts[1].abiSize(mod) == ty.abiSize(mod)) return parts;
- return self.fail("TODO implement splitType for {}", .{ty.fmt(mod)});
+ } else if (parts[0].abiSize(pt) + parts[1].abiSize(pt) == ty.abiSize(pt)) return parts;
+ return self.fail("TODO implement splitType for {}", .{ty.fmt(pt)});
}
/// Truncates the value in the register in place.
/// Clobbers any remaining bits.
fn truncateRegister(self: *Self, ty: Type, reg: Register) !void {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{
.signedness = .unsigned,
- .bits = @intCast(ty.bitSize(mod)),
+ .bits = @intCast(ty.bitSize(pt)),
};
const shift = math.cast(u6, 64 - int_info.bits % 64) orelse return;
try self.spillEflagsIfOccupied();
@@ -19177,8 +19278,9 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void {
}
fn regBitSize(self: *Self, ty: Type) u64 {
- const mod = self.bin_file.comp.module.?;
- const abi_size = ty.abiSize(mod);
+ const pt = self.pt;
+ const mod = pt.zcu;
+ const abi_size = ty.abiSize(pt);
return switch (ty.zigTypeTag(mod)) {
else => switch (abi_size) {
1 => 8,
@@ -19196,8 +19298,7 @@ fn regBitSize(self: *Self, ty: Type) u64 {
}
fn regExtraBits(self: *Self, ty: Type) u64 {
- const mod = self.bin_file.comp.module.?;
- return self.regBitSize(ty) - ty.bitSize(mod);
+ return self.regBitSize(ty) - ty.bitSize(self.pt);
}
fn hasFeature(self: *Self, feature: Target.x86.Feature) bool {
@@ -19211,12 +19312,14 @@ fn hasAllFeatures(self: *Self, features: anytype) bool {
}
fn typeOf(self: *Self, inst: Air.Inst.Ref) Type {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
return self.air.typeOf(inst, &mod.intern_pool);
}
fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
return self.air.typeOfIndex(inst, &mod.intern_pool);
}
@@ -19268,7 +19371,8 @@ fn floatLibcAbiSuffix(ty: Type) []const u8 {
}
fn promoteInt(self: *Self, ty: Type) Type {
- const mod = self.bin_file.comp.module.?;
+ const pt = self.pt;
+ const mod = pt.zcu;
const int_info: InternPool.Key.IntType = switch (ty.toIntern()) {
.bool_type => .{ .signedness = .unsigned, .bits = 1 },
else => if (ty.isAbiInt(mod)) ty.intInfo(mod) else return ty,
src/arch/x86_64/Lower.zig
@@ -8,7 +8,7 @@ allocator: Allocator,
mir: Mir,
cc: std.builtin.CallingConvention,
err_msg: ?*ErrorMsg = null,
-src_loc: Module.LazySrcLoc,
+src_loc: Zcu.LazySrcLoc,
result_insts_len: u8 = undefined,
result_relocs_len: u8 = undefined,
result_insts: [
@@ -657,7 +657,7 @@ const std = @import("std");
const Air = @import("../../Air.zig");
const Allocator = std.mem.Allocator;
-const ErrorMsg = Module.ErrorMsg;
+const ErrorMsg = Zcu.ErrorMsg;
const Immediate = bits.Immediate;
const Instruction = encoder.Instruction;
const Lower = @This();
@@ -665,8 +665,6 @@ const Memory = Instruction.Memory;
const Mir = @import("Mir.zig");
const Mnemonic = Instruction.Mnemonic;
const Zcu = @import("../../Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
const Operand = Instruction.Operand;
const Prefix = Instruction.Prefix;
const Register = bits.Register;
src/codegen/c/Type.zig
@@ -1339,11 +1339,11 @@ pub const Pool = struct {
allocator: std.mem.Allocator,
scratch: *std.ArrayListUnmanaged(u32),
ty: Type,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
mod: *Module,
kind: Kind,
) !CType {
- const ip = &zcu.intern_pool;
+ const ip = &pt.zcu.intern_pool;
switch (ty.toIntern()) {
.u0_type,
.i0_type,
@@ -1400,7 +1400,7 @@ pub const Pool = struct {
allocator,
scratch,
Type.fromInterned(ip.loadEnumType(ip_index).tag_ty),
- zcu,
+ pt,
mod,
kind,
),
@@ -1409,7 +1409,7 @@ pub const Pool = struct {
.adhoc_inferred_error_set_type,
=> return pool.fromIntInfo(allocator, .{
.signedness = .unsigned,
- .bits = zcu.errorSetBits(),
+ .bits = pt.zcu.errorSetBits(),
}, mod, kind),
.manyptr_u8_type,
=> return pool.getPointer(allocator, .{
@@ -1492,13 +1492,13 @@ pub const Pool = struct {
allocator,
scratch,
Type.fromInterned(ptr_info.child),
- zcu,
+ pt,
mod,
.forward,
),
.alignas = AlignAs.fromAlignment(.{
.@"align" = ptr_info.flags.alignment,
- .abi = Type.fromInterned(ptr_info.child).abiAlignment(zcu),
+ .abi = Type.fromInterned(ptr_info.child).abiAlignment(pt),
}),
};
break :elem_ctype if (elem.alignas.abiOrder().compare(.gte))
@@ -1535,7 +1535,7 @@ pub const Pool = struct {
allocator,
scratch,
Type.fromInterned(ip.slicePtrType(ip_index)),
- zcu,
+ pt,
mod,
kind,
),
@@ -1560,7 +1560,7 @@ pub const Pool = struct {
allocator,
scratch,
elem_type,
- zcu,
+ pt,
mod,
kind.noParameter(),
);
@@ -1574,7 +1574,7 @@ pub const Pool = struct {
.{
.name = .{ .index = .array },
.ctype = array_ctype,
- .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)),
+ .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(pt)),
},
};
return pool.fromFields(allocator, .@"struct", &fields, kind);
@@ -1586,7 +1586,7 @@ pub const Pool = struct {
allocator,
scratch,
elem_type,
- zcu,
+ pt,
mod,
kind.noParameter(),
);
@@ -1600,7 +1600,7 @@ pub const Pool = struct {
.{
.name = .{ .index = .array },
.ctype = vector_ctype,
- .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)),
+ .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(pt)),
},
};
return pool.fromFields(allocator, .@"struct", &fields, kind);
@@ -1611,7 +1611,7 @@ pub const Pool = struct {
allocator,
scratch,
Type.fromInterned(payload_type),
- zcu,
+ pt,
mod,
kind.noParameter(),
);
@@ -1635,7 +1635,7 @@ pub const Pool = struct {
.name = .{ .index = .payload },
.ctype = payload_ctype,
.alignas = AlignAs.fromAbiAlignment(
- Type.fromInterned(payload_type).abiAlignment(zcu),
+ Type.fromInterned(payload_type).abiAlignment(pt),
),
},
};
@@ -1643,7 +1643,7 @@ pub const Pool = struct {
},
.anyframe_type => unreachable,
.error_union_type => |error_union_info| {
- const error_set_bits = zcu.errorSetBits();
+ const error_set_bits = pt.zcu.errorSetBits();
const error_set_ctype = try pool.fromIntInfo(allocator, .{
.signedness = .unsigned,
.bits = error_set_bits,
@@ -1654,7 +1654,7 @@ pub const Pool = struct {
allocator,
scratch,
payload_type,
- zcu,
+ pt,
mod,
kind.noParameter(),
);
@@ -1671,7 +1671,7 @@ pub const Pool = struct {
.{
.name = .{ .index = .payload },
.ctype = payload_ctype,
- .alignas = AlignAs.fromAbiAlignment(payload_type.abiAlignment(zcu)),
+ .alignas = AlignAs.fromAbiAlignment(payload_type.abiAlignment(pt)),
},
};
return pool.fromFields(allocator, .@"struct", &fields, kind);
@@ -1685,7 +1685,7 @@ pub const Pool = struct {
.tag = .@"struct",
.name = .{ .owner_decl = loaded_struct.decl.unwrap().? },
});
- if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(zcu))
+ if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(pt))
fwd_decl
else
CType.void;
@@ -1706,7 +1706,7 @@ pub const Pool = struct {
allocator,
scratch,
field_type,
- zcu,
+ pt,
mod,
kind.noParameter(),
);
@@ -1718,7 +1718,7 @@ pub const Pool = struct {
String.fromUnnamed(@intCast(field_index));
const field_alignas = AlignAs.fromAlignment(.{
.@"align" = loaded_struct.fieldAlign(ip, field_index),
- .abi = field_type.abiAlignment(zcu),
+ .abi = field_type.abiAlignment(pt),
});
pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{
.name = field_name.index,
@@ -1745,7 +1745,7 @@ pub const Pool = struct {
allocator,
scratch,
Type.fromInterned(loaded_struct.backingIntType(ip).*),
- zcu,
+ pt,
mod,
kind,
),
@@ -1766,7 +1766,7 @@ pub const Pool = struct {
allocator,
scratch,
field_type,
- zcu,
+ pt,
mod,
kind.noParameter(),
);
@@ -1780,7 +1780,7 @@ pub const Pool = struct {
.name = field_name.index,
.ctype = field_ctype.index,
.flags = .{ .alignas = AlignAs.fromAbiAlignment(
- field_type.abiAlignment(zcu),
+ field_type.abiAlignment(pt),
) },
});
}
@@ -1806,7 +1806,7 @@ pub const Pool = struct {
extra_index,
);
}
- const fwd_decl = try pool.fromType(allocator, scratch, ty, zcu, mod, .forward);
+ const fwd_decl = try pool.fromType(allocator, scratch, ty, pt, mod, .forward);
try pool.ensureUnusedCapacity(allocator, 1);
const extra_index = try pool.addHashedExtra(allocator, &hasher, Aggregate, .{
.fwd_decl = fwd_decl.index,
@@ -1824,7 +1824,7 @@ pub const Pool = struct {
.tag = if (has_tag) .@"struct" else .@"union",
.name = .{ .owner_decl = loaded_union.decl },
});
- if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(zcu))
+ if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(pt))
fwd_decl
else
CType.void;
@@ -1847,7 +1847,7 @@ pub const Pool = struct {
allocator,
scratch,
field_type,
- zcu,
+ pt,
mod,
kind.noParameter(),
);
@@ -1858,7 +1858,7 @@ pub const Pool = struct {
);
const field_alignas = AlignAs.fromAlignment(.{
.@"align" = loaded_union.fieldAlign(ip, field_index),
- .abi = field_type.abiAlignment(zcu),
+ .abi = field_type.abiAlignment(pt),
});
pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{
.name = field_name.index,
@@ -1895,7 +1895,7 @@ pub const Pool = struct {
allocator,
scratch,
tag_type,
- zcu,
+ pt,
mod,
kind.noParameter(),
);
@@ -1903,7 +1903,7 @@ pub const Pool = struct {
struct_fields[struct_fields_len] = .{
.name = .{ .index = .tag },
.ctype = tag_ctype,
- .alignas = AlignAs.fromAbiAlignment(tag_type.abiAlignment(zcu)),
+ .alignas = AlignAs.fromAbiAlignment(tag_type.abiAlignment(pt)),
};
struct_fields_len += 1;
}
@@ -1951,7 +1951,7 @@ pub const Pool = struct {
},
.@"packed" => return pool.fromIntInfo(allocator, .{
.signedness = .unsigned,
- .bits = @intCast(ty.bitSize(zcu)),
+ .bits = @intCast(ty.bitSize(pt)),
}, mod, kind),
}
},
@@ -1960,7 +1960,7 @@ pub const Pool = struct {
allocator,
scratch,
Type.fromInterned(ip.loadEnumType(ip_index).tag_ty),
- zcu,
+ pt,
mod,
kind,
),
@@ -1975,7 +1975,7 @@ pub const Pool = struct {
allocator,
scratch,
return_type,
- zcu,
+ pt,
mod,
kind.asParameter(),
) else CType.void;
@@ -1987,7 +1987,7 @@ pub const Pool = struct {
allocator,
scratch,
param_type,
- zcu,
+ pt,
mod,
kind.asParameter(),
);
@@ -2011,7 +2011,7 @@ pub const Pool = struct {
.inferred_error_set_type,
=> return pool.fromIntInfo(allocator, .{
.signedness = .unsigned,
- .bits = zcu.errorSetBits(),
+ .bits = pt.zcu.errorSetBits(),
}, mod, kind),
.undef,
src/codegen/c.zig
@@ -333,15 +333,15 @@ pub const Function = struct {
const gop = try f.value_map.getOrPut(ref);
if (gop.found_existing) return gop.value_ptr.*;
- const zcu = f.object.dg.zcu;
- const val = (try f.air.value(ref, zcu)).?;
+ const pt = f.object.dg.pt;
+ const val = (try f.air.value(ref, pt)).?;
const ty = f.typeOf(ref);
- const result: CValue = if (lowersToArray(ty, zcu)) result: {
+ const result: CValue = if (lowersToArray(ty, pt)) result: {
const writer = f.object.codeHeaderWriter();
const decl_c_value = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(ty, .complete),
- .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(zcu)),
+ .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(pt)),
});
const gpa = f.object.dg.gpa;
try f.allocs.put(gpa, decl_c_value.new_local, false);
@@ -358,7 +358,7 @@ pub const Function = struct {
}
fn wantSafety(f: *Function) bool {
- return switch (f.object.dg.zcu.optimizeMode()) {
+ return switch (f.object.dg.pt.zcu.optimizeMode()) {
.Debug, .ReleaseSafe => true,
.ReleaseFast, .ReleaseSmall => false,
};
@@ -379,7 +379,7 @@ pub const Function = struct {
fn allocLocal(f: *Function, inst: ?Air.Inst.Index, ty: Type) !CValue {
return f.allocAlignedLocal(inst, .{
.ctype = try f.ctypeFromType(ty, .complete),
- .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(f.object.dg.zcu)),
+ .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(f.object.dg.pt)),
});
}
@@ -500,7 +500,8 @@ pub const Function = struct {
fn getLazyFnName(f: *Function, key: LazyFnKey, data: LazyFnValue.Data) ![]const u8 {
const gpa = f.object.dg.gpa;
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ctype_pool = &f.object.dg.ctype_pool;
const gop = try f.lazy_fns.getOrPut(gpa, key);
@@ -539,13 +540,11 @@ pub const Function = struct {
}
fn typeOf(f: *Function, inst: Air.Inst.Ref) Type {
- const zcu = f.object.dg.zcu;
- return f.air.typeOf(inst, &zcu.intern_pool);
+ return f.air.typeOf(inst, &f.object.dg.pt.zcu.intern_pool);
}
fn typeOfIndex(f: *Function, inst: Air.Inst.Index) Type {
- const zcu = f.object.dg.zcu;
- return f.air.typeOfIndex(inst, &zcu.intern_pool);
+ return f.air.typeOfIndex(inst, &f.object.dg.pt.zcu.intern_pool);
}
fn copyCValue(f: *Function, ctype: CType, dst: CValue, src: CValue) !void {
@@ -608,7 +607,7 @@ pub const Object = struct {
/// This data is available both when outputting .c code and when outputting an .h file.
pub const DeclGen = struct {
gpa: mem.Allocator,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
mod: *Module,
pass: Pass,
is_naked_fn: bool,
@@ -634,7 +633,7 @@ pub const DeclGen = struct {
fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
@setCold(true);
- const zcu = dg.zcu;
+ const zcu = dg.pt.zcu;
const decl_index = dg.pass.decl;
const decl = zcu.declPtr(decl_index);
const src_loc = decl.navSrcLoc(zcu);
@@ -648,7 +647,8 @@ pub const DeclGen = struct {
anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl,
location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
- const zcu = dg.zcu;
+ const pt = dg.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ctype_pool = &dg.ctype_pool;
const decl_val = Value.fromInterned(anon_decl.val);
@@ -656,7 +656,7 @@ pub const DeclGen = struct {
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
const ptr_ty = Type.fromInterned(anon_decl.orig_ty);
- if (ptr_ty.isPtrAtRuntime(zcu) and !decl_ty.isFnOrHasRuntimeBits(zcu)) {
+ if (ptr_ty.isPtrAtRuntime(zcu) and !decl_ty.isFnOrHasRuntimeBits(pt)) {
return dg.writeCValue(writer, .{ .undef = ptr_ty });
}
@@ -696,7 +696,7 @@ pub const DeclGen = struct {
// alignment. If there is already an entry, keep the greater alignment.
const explicit_alignment = ptr_type.flags.alignment;
if (explicit_alignment != .none) {
- const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(zcu);
+ const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(pt);
if (explicit_alignment.order(abi_alignment).compare(.gt)) {
const aligned_gop = try dg.aligned_anon_decls.getOrPut(dg.gpa, anon_decl.val);
aligned_gop.value_ptr.* = if (aligned_gop.found_existing)
@@ -713,15 +713,16 @@ pub const DeclGen = struct {
decl_index: InternPool.DeclIndex,
location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
- const zcu = dg.zcu;
+ const pt = dg.pt;
+ const zcu = pt.zcu;
const ctype_pool = &dg.ctype_pool;
const decl = zcu.declPtr(decl_index);
assert(decl.has_tv);
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
const decl_ty = decl.typeOf(zcu);
- const ptr_ty = try decl.declPtrType(zcu);
- if (!decl_ty.isFnOrHasRuntimeBits(zcu)) {
+ const ptr_ty = try decl.declPtrType(pt);
+ if (!decl_ty.isFnOrHasRuntimeBits(pt)) {
return dg.writeCValue(writer, .{ .undef = ptr_ty });
}
@@ -756,12 +757,13 @@ pub const DeclGen = struct {
derivation: Value.PointerDeriveStep,
location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
- const zcu = dg.zcu;
+ const pt = dg.pt;
+ const zcu = pt.zcu;
switch (derivation) {
.comptime_alloc_ptr, .comptime_field_ptr => unreachable,
.int => |int| {
const ptr_ctype = try dg.ctypeFromType(int.ptr_ty, .complete);
- const addr_val = try zcu.intValue(Type.usize, int.addr);
+ const addr_val = try pt.intValue(Type.usize, int.addr);
try writer.writeByte('(');
try dg.renderCType(writer, ptr_ctype);
try writer.print("){x}", .{try dg.fmtIntLiteral(addr_val, .Other)});
@@ -777,12 +779,12 @@ pub const DeclGen = struct {
},
.field_ptr => |field| {
- const parent_ptr_ty = try field.parent.ptrType(zcu);
+ const parent_ptr_ty = try field.parent.ptrType(pt);
// Ensure complete type definition is available before accessing fields.
_ = try dg.ctypeFromType(parent_ptr_ty.childType(zcu), .complete);
- switch (fieldLocation(parent_ptr_ty, field.result_ptr_ty, field.field_idx, zcu)) {
+ switch (fieldLocation(parent_ptr_ty, field.result_ptr_ty, field.field_idx, pt)) {
.begin => {
const ptr_ctype = try dg.ctypeFromType(field.result_ptr_ty, .complete);
try writer.writeByte('(');
@@ -801,7 +803,7 @@ pub const DeclGen = struct {
try writer.writeByte('(');
try dg.renderCType(writer, ptr_ctype);
try writer.writeByte(')');
- const offset_val = try zcu.intValue(Type.usize, byte_offset);
+ const offset_val = try pt.intValue(Type.usize, byte_offset);
try writer.writeAll("((char *)");
try dg.renderPointer(writer, field.parent.*, location);
try writer.print(" + {})", .{try dg.fmtIntLiteral(offset_val, .Other)});
@@ -809,7 +811,7 @@ pub const DeclGen = struct {
}
},
- .elem_ptr => |elem| if (!(try elem.parent.ptrType(zcu)).childType(zcu).hasRuntimeBits(zcu)) {
+ .elem_ptr => |elem| if (!(try elem.parent.ptrType(pt)).childType(zcu).hasRuntimeBits(pt)) {
// Element type is zero-bit, so lowers to `void`. The index is irrelevant; just cast the pointer.
const ptr_ctype = try dg.ctypeFromType(elem.result_ptr_ty, .complete);
try writer.writeByte('(');
@@ -817,11 +819,11 @@ pub const DeclGen = struct {
try writer.writeByte(')');
try dg.renderPointer(writer, elem.parent.*, location);
} else {
- const index_val = try zcu.intValue(Type.usize, elem.elem_idx);
+ const index_val = try pt.intValue(Type.usize, elem.elem_idx);
// We want to do pointer arithmetic on a pointer to the element type.
// We might have a pointer-to-array. In this case, we must cast first.
const result_ctype = try dg.ctypeFromType(elem.result_ptr_ty, .complete);
- const parent_ctype = try dg.ctypeFromType(try elem.parent.ptrType(zcu), .complete);
+ const parent_ctype = try dg.ctypeFromType(try elem.parent.ptrType(pt), .complete);
if (result_ctype.eql(parent_ctype)) {
// The pointer already has an appropriate type - just do the arithmetic.
try writer.writeByte('(');
@@ -846,7 +848,7 @@ pub const DeclGen = struct {
if (oac.byte_offset == 0) {
try dg.renderPointer(writer, oac.parent.*, location);
} else {
- const offset_val = try zcu.intValue(Type.usize, oac.byte_offset);
+ const offset_val = try pt.intValue(Type.usize, oac.byte_offset);
try writer.writeAll("((char *)");
try dg.renderPointer(writer, oac.parent.*, location);
try writer.print(" + {})", .{try dg.fmtIntLiteral(offset_val, .Other)});
@@ -856,8 +858,7 @@ pub const DeclGen = struct {
}
fn renderErrorName(dg: *DeclGen, writer: anytype, err_name: InternPool.NullTerminatedString) !void {
- const zcu = dg.zcu;
- const ip = &zcu.intern_pool;
+ const ip = &dg.pt.zcu.intern_pool;
try writer.print("zig_error_{}", .{fmtIdent(err_name.toSlice(ip))});
}
@@ -867,7 +868,8 @@ pub const DeclGen = struct {
val: Value,
location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
- const zcu = dg.zcu;
+ const pt = dg.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const target = &dg.mod.resolved_target.result;
const ctype_pool = &dg.ctype_pool;
@@ -927,7 +929,7 @@ pub const DeclGen = struct {
try writer.writeAll("((");
try dg.renderCType(writer, ctype);
try writer.print("){x})", .{try dg.fmtIntLiteral(
- try zcu.intValue(Type.usize, val.toUnsignedInt(zcu)),
+ try pt.intValue(Type.usize, val.toUnsignedInt(pt)),
.Other,
)});
},
@@ -974,10 +976,10 @@ pub const DeclGen = struct {
.enum_tag => |enum_tag| try dg.renderValue(writer, Value.fromInterned(enum_tag.int), location),
.float => {
const bits = ty.floatBits(target.*);
- const f128_val = val.toFloat(f128, zcu);
+ const f128_val = val.toFloat(f128, pt);
// All unsigned ints matching float types are pre-allocated.
- const repr_ty = zcu.intType(.unsigned, bits) catch unreachable;
+ const repr_ty = pt.intType(.unsigned, bits) catch unreachable;
assert(bits <= 128);
var repr_val_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined;
@@ -988,10 +990,10 @@ pub const DeclGen = struct {
};
switch (bits) {
- 16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, zcu)))),
- 32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, zcu)))),
- 64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, zcu)))),
- 80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, zcu)))),
+ 16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, pt)))),
+ 32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, pt)))),
+ 64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, pt)))),
+ 80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, pt)))),
128 => repr_val_big.set(@as(u128, @bitCast(f128_val))),
else => unreachable,
}
@@ -1002,10 +1004,10 @@ pub const DeclGen = struct {
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
switch (bits) {
- 16 => try writer.print("{x}", .{val.toFloat(f16, zcu)}),
- 32 => try writer.print("{x}", .{val.toFloat(f32, zcu)}),
- 64 => try writer.print("{x}", .{val.toFloat(f64, zcu)}),
- 80 => try writer.print("{x}", .{val.toFloat(f80, zcu)}),
+ 16 => try writer.print("{x}", .{val.toFloat(f16, pt)}),
+ 32 => try writer.print("{x}", .{val.toFloat(f32, pt)}),
+ 64 => try writer.print("{x}", .{val.toFloat(f64, pt)}),
+ 80 => try writer.print("{x}", .{val.toFloat(f80, pt)}),
128 => try writer.print("{x}", .{f128_val}),
else => unreachable,
}
@@ -1045,10 +1047,10 @@ pub const DeclGen = struct {
if (std.math.isNan(f128_val)) switch (bits) {
// We only actually need to pass the significand, but it will get
// properly masked anyway, so just pass the whole value.
- 16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, zcu)))}),
- 32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, zcu)))}),
- 64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, zcu)))}),
- 80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, zcu)))}),
+ 16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, pt)))}),
+ 32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, pt)))}),
+ 64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, pt)))}),
+ 80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, pt)))}),
128 => try writer.print("\"0x{x}\"", .{@as(u128, @bitCast(f128_val))}),
else => unreachable,
};
@@ -1056,7 +1058,7 @@ pub const DeclGen = struct {
empty = false;
}
try writer.print("{x}", .{try dg.fmtIntLiteral(
- try zcu.intValue_big(repr_ty, repr_val_big.toConst()),
+ try pt.intValue_big(repr_ty, repr_val_big.toConst()),
location,
)});
if (!empty) try writer.writeByte(')');
@@ -1084,7 +1086,7 @@ pub const DeclGen = struct {
.ptr => {
var arena = std.heap.ArenaAllocator.init(zcu.gpa);
defer arena.deinit();
- const derivation = try val.pointerDerivation(arena.allocator(), zcu);
+ const derivation = try val.pointerDerivation(arena.allocator(), pt);
try dg.renderPointer(writer, derivation, location);
},
.opt => |opt| switch (ctype.info(ctype_pool)) {
@@ -1167,15 +1169,15 @@ pub const DeclGen = struct {
try literal.start();
var index: usize = 0;
while (index < ai.len) : (index += 1) {
- const elem_val = try val.elemValue(zcu, index);
+ const elem_val = try val.elemValue(pt, index);
const elem_val_u8: u8 = if (elem_val.isUndef(zcu))
undefPattern(u8)
else
- @intCast(elem_val.toUnsignedInt(zcu));
+ @intCast(elem_val.toUnsignedInt(pt));
try literal.writeChar(elem_val_u8);
}
if (ai.sentinel) |s| {
- const s_u8: u8 = @intCast(s.toUnsignedInt(zcu));
+ const s_u8: u8 = @intCast(s.toUnsignedInt(pt));
if (s_u8 != 0) try literal.writeChar(s_u8);
}
try literal.end();
@@ -1184,7 +1186,7 @@ pub const DeclGen = struct {
var index: usize = 0;
while (index < ai.len) : (index += 1) {
if (index != 0) try writer.writeByte(',');
- const elem_val = try val.elemValue(zcu, index);
+ const elem_val = try val.elemValue(pt, index);
try dg.renderValue(writer, elem_val, initializer_type);
}
if (ai.sentinel) |s| {
@@ -1207,13 +1209,13 @@ pub const DeclGen = struct {
const comptime_val = tuple.values.get(ip)[field_index];
if (comptime_val != .none) continue;
const field_ty = Type.fromInterned(tuple.types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
if (!empty) try writer.writeByte(',');
const field_val = Value.fromInterned(
switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
- .bytes => |bytes| try ip.get(zcu.gpa, .{ .int = .{
+ .bytes => |bytes| try pt.intern(.{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes.at(field_index, ip) },
} }),
@@ -1242,12 +1244,12 @@ pub const DeclGen = struct {
var need_comma = false;
while (field_it.next()) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
if (need_comma) try writer.writeByte(',');
need_comma = true;
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
- .bytes => |bytes| try ip.get(zcu.gpa, .{ .int = .{
+ .bytes => |bytes| try pt.intern(.{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes.at(field_index, ip) },
} }),
@@ -1262,14 +1264,14 @@ pub const DeclGen = struct {
const int_info = ty.intInfo(zcu);
const bits = Type.smallestUnsignedBits(int_info.bits - 1);
- const bit_offset_ty = try zcu.intType(.unsigned, bits);
+ const bit_offset_ty = try pt.intType(.unsigned, bits);
var bit_offset: u64 = 0;
var eff_num_fields: usize = 0;
for (0..loaded_struct.field_types.len) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
eff_num_fields += 1;
}
@@ -1277,7 +1279,7 @@ pub const DeclGen = struct {
try writer.writeByte('(');
try dg.renderUndefValue(writer, ty, location);
try writer.writeByte(')');
- } else if (ty.bitSize(zcu) > 64) {
+ } else if (ty.bitSize(pt) > 64) {
// zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
var num_or = eff_num_fields - 1;
while (num_or > 0) : (num_or -= 1) {
@@ -1290,10 +1292,10 @@ pub const DeclGen = struct {
var needs_closing_paren = false;
for (0..loaded_struct.field_types.len) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
- .bytes => |bytes| try ip.get(zcu.gpa, .{ .int = .{
+ .bytes => |bytes| try pt.intern(.{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes.at(field_index, ip) },
} }),
@@ -1307,7 +1309,7 @@ pub const DeclGen = struct {
try writer.writeByte('(');
try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument);
try writer.writeAll(", ");
- try dg.renderValue(writer, try zcu.intValue(bit_offset_ty, bit_offset), .FunctionArgument);
+ try dg.renderValue(writer, try pt.intValue(bit_offset_ty, bit_offset), .FunctionArgument);
try writer.writeByte(')');
} else {
try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument);
@@ -1316,7 +1318,7 @@ pub const DeclGen = struct {
if (needs_closing_paren) try writer.writeByte(')');
if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
- bit_offset += field_ty.bitSize(zcu);
+ bit_offset += field_ty.bitSize(pt);
needs_closing_paren = true;
eff_index += 1;
}
@@ -1326,7 +1328,7 @@ pub const DeclGen = struct {
var empty = true;
for (0..loaded_struct.field_types.len) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
if (!empty) try writer.writeAll(" | ");
try writer.writeByte('(');
@@ -1334,7 +1336,7 @@ pub const DeclGen = struct {
try writer.writeByte(')');
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
- .bytes => |bytes| try ip.get(zcu.gpa, .{ .int = .{
+ .bytes => |bytes| try pt.intern(.{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes.at(field_index, ip) },
} }),
@@ -1345,12 +1347,12 @@ pub const DeclGen = struct {
if (bit_offset != 0) {
try dg.renderValue(writer, Value.fromInterned(field_val), .Other);
try writer.writeAll(" << ");
- try dg.renderValue(writer, try zcu.intValue(bit_offset_ty, bit_offset), .FunctionArgument);
+ try dg.renderValue(writer, try pt.intValue(bit_offset_ty, bit_offset), .FunctionArgument);
} else {
try dg.renderValue(writer, Value.fromInterned(field_val), .Other);
}
- bit_offset += field_ty.bitSize(zcu);
+ bit_offset += field_ty.bitSize(pt);
empty = false;
}
try writer.writeByte(')');
@@ -1363,7 +1365,7 @@ pub const DeclGen = struct {
.un => |un| {
const loaded_union = ip.loadUnionType(ty.toIntern());
if (un.tag == .none) {
- const backing_ty = try ty.unionBackingType(zcu);
+ const backing_ty = try ty.unionBackingType(pt);
switch (loaded_union.getLayout(ip)) {
.@"packed" => {
if (!location.isInitializer()) {
@@ -1378,7 +1380,7 @@ pub const DeclGen = struct {
return dg.fail("TODO: C backend: implement extern union backing type rendering in static initializers", .{});
}
- const ptr_ty = try zcu.singleConstPtrType(ty);
+ const ptr_ty = try pt.singleConstPtrType(ty);
try writer.writeAll("*((");
try dg.renderType(writer, ptr_ty);
try writer.writeAll(")(");
@@ -1400,7 +1402,7 @@ pub const DeclGen = struct {
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
const field_name = loaded_union.loadTagType(ip).names.get(ip)[field_index];
if (loaded_union.getLayout(ip) == .@"packed") {
- if (field_ty.hasRuntimeBits(zcu)) {
+ if (field_ty.hasRuntimeBits(pt)) {
if (field_ty.isPtrAtRuntime(zcu)) {
try writer.writeByte('(');
try dg.renderCType(writer, ctype);
@@ -1431,7 +1433,7 @@ pub const DeclGen = struct {
),
.payload => {
try writer.writeByte('{');
- if (field_ty.hasRuntimeBits(zcu)) {
+ if (field_ty.hasRuntimeBits(pt)) {
try writer.print(" .{ } = ", .{fmtIdent(field_name.toSlice(ip))});
try dg.renderValue(
writer,
@@ -1443,7 +1445,7 @@ pub const DeclGen = struct {
const inner_field_ty = Type.fromInterned(
loaded_union.field_types.get(ip)[inner_field_index],
);
- if (!inner_field_ty.hasRuntimeBits(zcu)) continue;
+ if (!inner_field_ty.hasRuntimeBits(pt)) continue;
try dg.renderUndefValue(writer, inner_field_ty, initializer_type);
break;
}
@@ -1464,7 +1466,8 @@ pub const DeclGen = struct {
ty: Type,
location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
- const zcu = dg.zcu;
+ const pt = dg.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const target = &dg.mod.resolved_target.result;
const ctype_pool = &dg.ctype_pool;
@@ -1490,7 +1493,7 @@ pub const DeclGen = struct {
=> {
const bits = ty.floatBits(target.*);
// All unsigned ints matching float types are pre-allocated.
- const repr_ty = zcu.intType(.unsigned, bits) catch unreachable;
+ const repr_ty = dg.pt.intType(.unsigned, bits) catch unreachable;
try writer.writeAll("zig_make_");
try dg.renderTypeForBuiltinFnName(writer, ty);
@@ -1515,14 +1518,14 @@ pub const DeclGen = struct {
.error_set_type,
.inferred_error_set_type,
=> return writer.print("{x}", .{
- try dg.fmtIntLiteral(try zcu.undefValue(ty), location),
+ try dg.fmtIntLiteral(try pt.undefValue(ty), location),
}),
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.One, .Many, .C => {
try writer.writeAll("((");
try dg.renderCType(writer, ctype);
return writer.print("){x})", .{
- try dg.fmtIntLiteral(try zcu.undefValue(Type.usize), .Other),
+ try dg.fmtIntLiteral(try pt.undefValue(Type.usize), .Other),
});
},
.Slice => {
@@ -1536,7 +1539,7 @@ pub const DeclGen = struct {
const ptr_ty = ty.slicePtrFieldType(zcu);
try dg.renderType(writer, ptr_ty);
return writer.print("){x}, {0x}}}", .{
- try dg.fmtIntLiteral(try zcu.undefValue(Type.usize), .Other),
+ try dg.fmtIntLiteral(try dg.pt.undefValue(Type.usize), .Other),
});
},
},
@@ -1591,7 +1594,7 @@ pub const DeclGen = struct {
var need_comma = false;
while (field_it.next()) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
if (need_comma) try writer.writeByte(',');
need_comma = true;
@@ -1600,7 +1603,7 @@ pub const DeclGen = struct {
return writer.writeByte('}');
},
.@"packed" => return writer.print("{x}", .{
- try dg.fmtIntLiteral(try zcu.undefValue(ty), .Other),
+ try dg.fmtIntLiteral(try pt.undefValue(ty), .Other),
}),
}
},
@@ -1616,7 +1619,7 @@ pub const DeclGen = struct {
for (0..anon_struct_info.types.len) |field_index| {
if (anon_struct_info.values.get(ip)[field_index] != .none) continue;
const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
if (need_comma) try writer.writeByte(',');
need_comma = true;
@@ -1654,7 +1657,7 @@ pub const DeclGen = struct {
const inner_field_ty = Type.fromInterned(
loaded_union.field_types.get(ip)[inner_field_index],
);
- if (!inner_field_ty.hasRuntimeBits(zcu)) continue;
+ if (!inner_field_ty.hasRuntimeBits(pt)) continue;
try dg.renderUndefValue(
writer,
inner_field_ty,
@@ -1670,7 +1673,7 @@ pub const DeclGen = struct {
if (has_tag) try writer.writeByte('}');
},
.@"packed" => return writer.print("{x}", .{
- try dg.fmtIntLiteral(try zcu.undefValue(ty), .Other),
+ try dg.fmtIntLiteral(try pt.undefValue(ty), .Other),
}),
}
},
@@ -1775,7 +1778,7 @@ pub const DeclGen = struct {
},
},
) !void {
- const zcu = dg.zcu;
+ const zcu = dg.pt.zcu;
const ip = &zcu.intern_pool;
const fn_ty = fn_val.typeOf(zcu);
@@ -1856,7 +1859,7 @@ pub const DeclGen = struct {
fn ctypeFromType(dg: *DeclGen, ty: Type, kind: CType.Kind) !CType {
defer std.debug.assert(dg.scratch.items.len == 0);
- return dg.ctype_pool.fromType(dg.gpa, &dg.scratch, ty, dg.zcu, dg.mod, kind);
+ return dg.ctype_pool.fromType(dg.gpa, &dg.scratch, ty, dg.pt, dg.mod, kind);
}
fn byteSize(dg: *DeclGen, ctype: CType) u64 {
@@ -1879,8 +1882,8 @@ pub const DeclGen = struct {
}
fn renderCType(dg: *DeclGen, w: anytype, ctype: CType) error{OutOfMemory}!void {
- _ = try renderTypePrefix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, .{});
- try renderTypeSuffix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, .{});
+ _ = try renderTypePrefix(dg.pass, &dg.ctype_pool, dg.pt.zcu, w, ctype, .suffix, .{});
+ try renderTypeSuffix(dg.pass, &dg.ctype_pool, dg.pt.zcu, w, ctype, .suffix, .{});
}
const IntCastContext = union(enum) {
@@ -1904,18 +1907,18 @@ pub const DeclGen = struct {
}
};
fn intCastIsNoop(dg: *DeclGen, dest_ty: Type, src_ty: Type) bool {
- const zcu = dg.zcu;
- const dest_bits = dest_ty.bitSize(zcu);
- const dest_int_info = dest_ty.intInfo(zcu);
+ const pt = dg.pt;
+ const dest_bits = dest_ty.bitSize(pt);
+ const dest_int_info = dest_ty.intInfo(pt.zcu);
- const src_is_ptr = src_ty.isPtrAtRuntime(zcu);
+ const src_is_ptr = src_ty.isPtrAtRuntime(pt.zcu);
const src_eff_ty: Type = if (src_is_ptr) switch (dest_int_info.signedness) {
.unsigned => Type.usize,
.signed => Type.isize,
} else src_ty;
- const src_bits = src_eff_ty.bitSize(zcu);
- const src_int_info = if (src_eff_ty.isAbiInt(zcu)) src_eff_ty.intInfo(zcu) else null;
+ const src_bits = src_eff_ty.bitSize(pt);
+ const src_int_info = if (src_eff_ty.isAbiInt(pt.zcu)) src_eff_ty.intInfo(pt.zcu) else null;
if (dest_bits <= 64 and src_bits <= 64) {
const needs_cast = src_int_info == null or
(toCIntBits(dest_int_info.bits) != toCIntBits(src_int_info.?.bits) or
@@ -1944,8 +1947,9 @@ pub const DeclGen = struct {
src_ty: Type,
location: ValueRenderLocation,
) !void {
- const zcu = dg.zcu;
- const dest_bits = dest_ty.bitSize(zcu);
+ const pt = dg.pt;
+ const zcu = pt.zcu;
+ const dest_bits = dest_ty.bitSize(pt);
const dest_int_info = dest_ty.intInfo(zcu);
const src_is_ptr = src_ty.isPtrAtRuntime(zcu);
@@ -1954,7 +1958,7 @@ pub const DeclGen = struct {
.signed => Type.isize,
} else src_ty;
- const src_bits = src_eff_ty.bitSize(zcu);
+ const src_bits = src_eff_ty.bitSize(pt);
const src_int_info = if (src_eff_ty.isAbiInt(zcu)) src_eff_ty.intInfo(zcu) else null;
if (dest_bits <= 64 and src_bits <= 64) {
const needs_cast = src_int_info == null or
@@ -2035,7 +2039,7 @@ pub const DeclGen = struct {
qualifiers,
CType.AlignAs.fromAlignment(.{
.@"align" = alignment,
- .abi = ty.abiAlignment(dg.zcu),
+ .abi = ty.abiAlignment(dg.pt),
}),
);
}
@@ -2048,6 +2052,7 @@ pub const DeclGen = struct {
qualifiers: CQualifiers,
alignas: CType.AlignAs,
) error{ OutOfMemory, AnalysisFail }!void {
+ const zcu = dg.pt.zcu;
switch (alignas.abiOrder()) {
.lt => try w.print("zig_under_align({}) ", .{alignas.toByteUnits()}),
.eq => {},
@@ -2055,10 +2060,10 @@ pub const DeclGen = struct {
}
try w.print("{}", .{
- try renderTypePrefix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, qualifiers),
+ try renderTypePrefix(dg.pass, &dg.ctype_pool, zcu, w, ctype, .suffix, qualifiers),
});
try dg.writeName(w, name);
- try renderTypeSuffix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, .{});
+ try renderTypeSuffix(dg.pass, &dg.ctype_pool, zcu, w, ctype, .suffix, .{});
}
fn writeName(dg: *DeclGen, w: anytype, c_value: CValue) !void {
@@ -2162,7 +2167,7 @@ pub const DeclGen = struct {
decl_index: InternPool.DeclIndex,
variable: InternPool.Key.Variable,
) !void {
- const zcu = dg.zcu;
+ const zcu = dg.pt.zcu;
const decl = zcu.declPtr(decl_index);
const fwd = dg.fwdDeclWriter();
try fwd.writeAll(if (variable.is_extern) "zig_extern " else "static ");
@@ -2180,7 +2185,7 @@ pub const DeclGen = struct {
}
fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: InternPool.DeclIndex) !void {
- const zcu = dg.zcu;
+ const zcu = dg.pt.zcu;
const ip = &zcu.intern_pool;
const decl = zcu.declPtr(decl_index);
@@ -2236,15 +2241,15 @@ pub const DeclGen = struct {
.bits => {},
}
- const zcu = dg.zcu;
- const int_info = if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else std.builtin.Type.Int{
+ const pt = dg.pt;
+ const int_info = if (ty.isAbiInt(pt.zcu)) ty.intInfo(pt.zcu) else std.builtin.Type.Int{
.signedness = .unsigned,
- .bits = @as(u16, @intCast(ty.bitSize(zcu))),
+ .bits = @as(u16, @intCast(ty.bitSize(pt))),
};
if (is_big) try writer.print(", {}", .{int_info.signedness == .signed});
try writer.print(", {}", .{try dg.fmtIntLiteral(
- try zcu.intValue(if (is_big) Type.u16 else Type.u8, int_info.bits),
+ try pt.intValue(if (is_big) Type.u16 else Type.u8, int_info.bits),
.FunctionArgument,
)});
}
@@ -2254,7 +2259,7 @@ pub const DeclGen = struct {
val: Value,
loc: ValueRenderLocation,
) !std.fmt.Formatter(formatIntLiteral) {
- const zcu = dg.zcu;
+ const zcu = dg.pt.zcu;
const kind = loc.toCTypeKind();
const ty = val.typeOf(zcu);
return std.fmt.Formatter(formatIntLiteral){ .data = .{
@@ -2616,7 +2621,8 @@ pub fn genGlobalAsm(zcu: *Zcu, writer: anytype) !void {
}
pub fn genErrDecls(o: *Object) !void {
- const zcu = o.dg.zcu;
+ const pt = o.dg.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const writer = o.writer();
@@ -2628,7 +2634,7 @@ pub fn genErrDecls(o: *Object) !void {
for (zcu.global_error_set.keys()[1..], 1..) |name_nts, value| {
const name = name_nts.toSlice(ip);
max_name_len = @max(name.len, max_name_len);
- const err_val = try zcu.intern(.{ .err = .{
+ const err_val = try pt.intern(.{ .err = .{
.ty = .anyerror_type,
.name = name_nts,
} });
@@ -2649,12 +2655,12 @@ pub fn genErrDecls(o: *Object) !void {
@memcpy(name_buf[name_prefix.len..][0..name_slice.len], name_slice);
const identifier = name_buf[0 .. name_prefix.len + name_slice.len];
- const name_ty = try zcu.arrayType(.{
+ const name_ty = try pt.arrayType(.{
.len = name_slice.len,
.child = .u8_type,
.sentinel = .zero_u8,
});
- const name_val = try zcu.intern(.{ .aggregate = .{
+ const name_val = try pt.intern(.{ .aggregate = .{
.ty = name_ty.toIntern(),
.storage = .{ .bytes = name.toString() },
} });
@@ -2673,7 +2679,7 @@ pub fn genErrDecls(o: *Object) !void {
try writer.writeAll(";\n");
}
- const name_array_ty = try zcu.arrayType(.{
+ const name_array_ty = try pt.arrayType(.{
.len = zcu.global_error_set.count(),
.child = .slice_const_u8_sentinel_0_type,
});
@@ -2693,14 +2699,15 @@ pub fn genErrDecls(o: *Object) !void {
if (value != 0) try writer.writeByte(',');
try writer.print("{{" ++ name_prefix ++ "{}, {}}}", .{
fmtIdent(name),
- try o.dg.fmtIntLiteral(try zcu.intValue(Type.usize, name.len), .StaticInitializer),
+ try o.dg.fmtIntLiteral(try pt.intValue(Type.usize, name.len), .StaticInitializer),
});
}
try writer.writeAll("};\n");
}
pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFnMap.Entry) !void {
- const zcu = o.dg.zcu;
+ const pt = o.dg.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ctype_pool = &o.dg.ctype_pool;
const w = o.writer();
@@ -2721,20 +2728,20 @@ pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFn
for (0..tag_names.len) |tag_index| {
const tag_name = tag_names.get(ip)[tag_index];
const tag_name_len = tag_name.length(ip);
- const tag_val = try zcu.enumValueFieldIndex(enum_ty, @intCast(tag_index));
+ const tag_val = try pt.enumValueFieldIndex(enum_ty, @intCast(tag_index));
- const name_ty = try zcu.arrayType(.{
+ const name_ty = try pt.arrayType(.{
.len = tag_name_len,
.child = .u8_type,
.sentinel = .zero_u8,
});
- const name_val = try zcu.intern(.{ .aggregate = .{
+ const name_val = try pt.intern(.{ .aggregate = .{
.ty = name_ty.toIntern(),
.storage = .{ .bytes = tag_name.toString() },
} });
try w.print(" case {}: {{\n static ", .{
- try o.dg.fmtIntLiteral(try tag_val.intFromEnum(enum_ty, zcu), .Other),
+ try o.dg.fmtIntLiteral(try tag_val.intFromEnum(enum_ty, pt), .Other),
});
try o.dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, Const, .none, .complete);
try w.writeAll(" = ");
@@ -2743,7 +2750,7 @@ pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFn
try o.dg.renderType(w, name_slice_ty);
try w.print("){{{}, {}}};\n", .{
fmtIdent("name"),
- try o.dg.fmtIntLiteral(try zcu.intValue(Type.usize, tag_name_len), .Other),
+ try o.dg.fmtIntLiteral(try pt.intValue(Type.usize, tag_name_len), .Other),
});
try w.writeAll(" }\n");
@@ -2788,7 +2795,7 @@ pub fn genFunc(f: *Function) !void {
defer tracy.end();
const o = &f.object;
- const zcu = o.dg.zcu;
+ const zcu = o.dg.pt.zcu;
const gpa = o.dg.gpa;
const decl_index = o.dg.pass.decl;
const decl = zcu.declPtr(decl_index);
@@ -2879,12 +2886,13 @@ pub fn genDecl(o: *Object) !void {
const tracy = trace(@src());
defer tracy.end();
- const zcu = o.dg.zcu;
+ const pt = o.dg.pt;
+ const zcu = pt.zcu;
const decl_index = o.dg.pass.decl;
const decl = zcu.declPtr(decl_index);
const decl_ty = decl.typeOf(zcu);
- if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return;
+ if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return;
if (decl.val.getExternFunc(zcu)) |_| {
const fwd = o.dg.fwdDeclWriter();
try fwd.writeAll("zig_extern ");
@@ -2928,7 +2936,7 @@ pub fn genDeclValue(
alignment: Alignment,
@"linksection": InternPool.OptionalNullTerminatedString,
) !void {
- const zcu = o.dg.zcu;
+ const zcu = o.dg.pt.zcu;
const ty = val.typeOf(zcu);
const fwd = o.dg.fwdDeclWriter();
@@ -2946,7 +2954,7 @@ pub fn genDeclValue(
}
pub fn genExports(dg: *DeclGen, exported: Zcu.Exported, export_indices: []const u32) !void {
- const zcu = dg.zcu;
+ const zcu = dg.pt.zcu;
const ip = &zcu.intern_pool;
const fwd = dg.fwdDeclWriter();
@@ -3088,7 +3096,7 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con
}
fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void {
- const zcu = f.object.dg.zcu;
+ const zcu = f.object.dg.pt.zcu;
const ip = &zcu.intern_pool;
const air_tags = f.air.instructions.items(.tag);
const air_datas = f.air.instructions.items(.data);
@@ -3388,10 +3396,10 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [
}
fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
const inst_ty = f.typeOfIndex(inst);
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
@@ -3414,13 +3422,14 @@ fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
const inst_ty = f.typeOfIndex(inst);
const ptr_ty = f.typeOf(bin_op.lhs);
- const elem_has_bits = ptr_ty.elemType2(zcu).hasRuntimeBitsIgnoreComptime(zcu);
+ const elem_has_bits = ptr_ty.elemType2(zcu).hasRuntimeBitsIgnoreComptime(pt);
const ptr = try f.resolveInst(bin_op.lhs);
const index = try f.resolveInst(bin_op.rhs);
@@ -3449,10 +3458,10 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
const inst_ty = f.typeOfIndex(inst);
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
@@ -3475,14 +3484,15 @@ fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
const inst_ty = f.typeOfIndex(inst);
const slice_ty = f.typeOf(bin_op.lhs);
const elem_ty = slice_ty.elemType2(zcu);
- const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(zcu);
+ const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(pt);
const slice = try f.resolveInst(bin_op.lhs);
const index = try f.resolveInst(bin_op.rhs);
@@ -3505,10 +3515,10 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const inst_ty = f.typeOfIndex(inst);
- if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
@@ -3531,40 +3541,40 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const inst_ty = f.typeOfIndex(inst);
const elem_ty = inst_ty.childType(zcu);
- if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty };
+ if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .{ .undef = inst_ty };
const local = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(elem_ty, .complete),
.alignas = CType.AlignAs.fromAlignment(.{
.@"align" = inst_ty.ptrInfo(zcu).flags.alignment,
- .abi = elem_ty.abiAlignment(zcu),
+ .abi = elem_ty.abiAlignment(pt),
}),
});
log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
- const gpa = f.object.dg.zcu.gpa;
- try f.allocs.put(gpa, local.new_local, true);
+ try f.allocs.put(zcu.gpa, local.new_local, true);
return .{ .local_ref = local.new_local };
}
fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const inst_ty = f.typeOfIndex(inst);
const elem_ty = inst_ty.childType(zcu);
- if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty };
+ if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .{ .undef = inst_ty };
const local = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(elem_ty, .complete),
.alignas = CType.AlignAs.fromAlignment(.{
.@"align" = inst_ty.ptrInfo(zcu).flags.alignment,
- .abi = elem_ty.abiAlignment(zcu),
+ .abi = elem_ty.abiAlignment(pt),
}),
});
log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
- const gpa = f.object.dg.zcu.gpa;
- try f.allocs.put(gpa, local.new_local, true);
+ try f.allocs.put(zcu.gpa, local.new_local, true);
return .{ .local_ref = local.new_local };
}
@@ -3593,7 +3603,8 @@ fn airArg(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const ptr_ty = f.typeOf(ty_op.operand);
@@ -3601,7 +3612,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const ptr_info = ptr_scalar_ty.ptrInfo(zcu);
const src_ty = Type.fromInterned(ptr_info.child);
- if (!src_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ if (!src_ty.hasRuntimeBitsIgnoreComptime(pt)) {
try reap(f, inst, &.{ty_op.operand});
return .none;
}
@@ -3611,10 +3622,10 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ty_op.operand});
const is_aligned = if (ptr_info.flags.alignment != .none)
- ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte)
+ ptr_info.flags.alignment.order(src_ty.abiAlignment(pt)).compare(.gte)
else
true;
- const is_array = lowersToArray(src_ty, zcu);
+ const is_array = lowersToArray(src_ty, pt);
const need_memcpy = !is_aligned or is_array;
const writer = f.object.writer();
@@ -3634,12 +3645,12 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("))");
} else if (ptr_info.packed_offset.host_size > 0 and ptr_info.flags.vector_index == .none) {
const host_bits: u16 = ptr_info.packed_offset.host_size * 8;
- const host_ty = try zcu.intType(.unsigned, host_bits);
+ const host_ty = try pt.intType(.unsigned, host_bits);
- const bit_offset_ty = try zcu.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
- const bit_offset_val = try zcu.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
+ const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
+ const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
- const field_ty = try zcu.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(zcu))));
+ const field_ty = try pt.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(pt))));
try f.writeCValue(writer, local, .Other);
try v.elem(f, writer);
@@ -3650,9 +3661,9 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("((");
try f.renderType(writer, field_ty);
try writer.writeByte(')');
- const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64;
+ const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(pt) > 64;
if (cant_cast) {
- if (field_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+ if (field_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try writer.writeAll("zig_lo_");
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('(');
@@ -3680,7 +3691,8 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const writer = f.object.writer();
const op_inst = un_op.toIndex();
@@ -3695,11 +3707,11 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
var deref = is_ptr;
- const is_array = lowersToArray(ret_ty, zcu);
+ const is_array = lowersToArray(ret_ty, pt);
const ret_val = if (is_array) ret_val: {
const array_local = try f.allocAlignedLocal(inst, .{
.ctype = ret_ctype,
- .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(f.object.dg.zcu)),
+ .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(pt)),
});
try writer.writeAll("memcpy(");
try f.writeCValueMember(writer, array_local, .{ .identifier = "array" });
@@ -3733,7 +3745,8 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
}
fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try f.resolveInst(ty_op.operand);
@@ -3760,7 +3773,8 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try f.resolveInst(ty_op.operand);
@@ -3809,13 +3823,13 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, operand, .FunctionArgument);
try v.elem(f, writer);
try writer.print(", {x})", .{
- try f.fmtIntLiteral(try inst_scalar_ty.maxIntScalar(zcu, scalar_ty)),
+ try f.fmtIntLiteral(try inst_scalar_ty.maxIntScalar(pt, scalar_ty)),
});
},
.signed => {
const c_bits = toCIntBits(scalar_int_info.bits) orelse
return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
- const shift_val = try zcu.intValue(Type.u8, c_bits - dest_bits);
+ const shift_val = try pt.intValue(Type.u8, c_bits - dest_bits);
try writer.writeAll("zig_shr_");
try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
@@ -3860,7 +3874,8 @@ fn airIntFromBool(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
// *a = b;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
@@ -3871,7 +3886,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
const ptr_val = try f.resolveInst(bin_op.lhs);
const src_ty = f.typeOf(bin_op.rhs);
- const val_is_undef = if (try f.air.value(bin_op.rhs, zcu)) |v| v.isUndefDeep(zcu) else false;
+ const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |v| v.isUndefDeep(zcu) else false;
if (val_is_undef) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
@@ -3887,10 +3902,10 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
}
const is_aligned = if (ptr_info.flags.alignment != .none)
- ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte)
+ ptr_info.flags.alignment.order(src_ty.abiAlignment(pt)).compare(.gte)
else
true;
- const is_array = lowersToArray(Type.fromInterned(ptr_info.child), zcu);
+ const is_array = lowersToArray(Type.fromInterned(ptr_info.child), pt);
const need_memcpy = !is_aligned or is_array;
const src_val = try f.resolveInst(bin_op.rhs);
@@ -3901,7 +3916,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
if (need_memcpy) {
// For this memcpy to safely work we need the rhs to have the same
// underlying type as the lhs (i.e. they must both be arrays of the same underlying type).
- assert(src_ty.eql(Type.fromInterned(ptr_info.child), f.object.dg.zcu));
+ assert(src_ty.eql(Type.fromInterned(ptr_info.child), zcu));
// If the source is a constant, writeCValue will emit a brace initialization
// so work around this by initializing into new local.
@@ -3932,12 +3947,12 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try v.end(f, inst, writer);
} else if (ptr_info.packed_offset.host_size > 0 and ptr_info.flags.vector_index == .none) {
const host_bits = ptr_info.packed_offset.host_size * 8;
- const host_ty = try zcu.intType(.unsigned, host_bits);
+ const host_ty = try pt.intType(.unsigned, host_bits);
- const bit_offset_ty = try zcu.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
- const bit_offset_val = try zcu.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
+ const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
+ const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
- const src_bits = src_ty.bitSize(zcu);
+ const src_bits = src_ty.bitSize(pt);
const ExpectedContents = [BigInt.Managed.default_capacity]BigIntLimb;
var stack align(@alignOf(ExpectedContents)) =
@@ -3950,7 +3965,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try mask.shiftLeft(&mask, ptr_info.packed_offset.bit_offset);
try mask.bitNotWrap(&mask, .unsigned, host_bits);
- const mask_val = try zcu.intValue_big(host_ty, mask.toConst());
+ const mask_val = try pt.intValue_big(host_ty, mask.toConst());
const v = try Vectorize.start(f, inst, writer, ptr_ty);
const a = try Assignment.start(f, writer, src_scalar_ctype);
@@ -3967,9 +3982,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(mask_val)});
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('(');
- const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64;
+ const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(pt) > 64;
if (cant_cast) {
- if (src_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+ if (src_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try writer.writeAll("zig_make_");
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeAll("(0, ");
@@ -4013,7 +4028,8 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
}
fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: BuiltinInfo) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -4051,7 +4067,8 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info:
}
fn airNot(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_ty = f.typeOf(ty_op.operand);
const scalar_ty = operand_ty.scalarType(zcu);
@@ -4084,11 +4101,12 @@ fn airBinOp(
operation: []const u8,
info: BuiltinInfo,
) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const operand_ty = f.typeOf(bin_op.lhs);
const scalar_ty = operand_ty.scalarType(zcu);
- if ((scalar_ty.isInt(zcu) and scalar_ty.bitSize(zcu) > 64) or scalar_ty.isRuntimeFloat())
+ if ((scalar_ty.isInt(zcu) and scalar_ty.bitSize(pt) > 64) or scalar_ty.isRuntimeFloat())
return try airBinBuiltinCall(f, inst, operation, info);
const lhs = try f.resolveInst(bin_op.lhs);
@@ -4122,11 +4140,12 @@ fn airCmpOp(
data: anytype,
operator: std.math.CompareOperator,
) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const lhs_ty = f.typeOf(data.lhs);
const scalar_ty = lhs_ty.scalarType(zcu);
- const scalar_bits = scalar_ty.bitSize(zcu);
+ const scalar_bits = scalar_ty.bitSize(pt);
if (scalar_ty.isInt(zcu) and scalar_bits > 64)
return airCmpBuiltinCall(
f,
@@ -4170,12 +4189,13 @@ fn airEquality(
inst: Air.Inst.Index,
operator: std.math.CompareOperator,
) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ctype_pool = &f.object.dg.ctype_pool;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const operand_ty = f.typeOf(bin_op.lhs);
- const operand_bits = operand_ty.bitSize(zcu);
+ const operand_bits = operand_ty.bitSize(pt);
if (operand_ty.isAbiInt(zcu) and operand_bits > 64)
return airCmpBuiltinCall(
f,
@@ -4256,7 +4276,8 @@ fn airCmpLtErrorsLen(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -4267,7 +4288,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
const inst_ty = f.typeOfIndex(inst);
const inst_scalar_ty = inst_ty.scalarType(zcu);
const elem_ty = inst_scalar_ty.elemType2(zcu);
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) return f.moveCValue(inst, inst_ty, lhs);
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return f.moveCValue(inst, inst_ty, lhs);
const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete);
const local = try f.allocLocal(inst, inst_ty);
@@ -4299,13 +4320,14 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
}
fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []const u8) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const inst_ty = f.typeOfIndex(inst);
const inst_scalar_ty = inst_ty.scalarType(zcu);
- if ((inst_scalar_ty.isInt(zcu) and inst_scalar_ty.bitSize(zcu) > 64) or inst_scalar_ty.isRuntimeFloat())
+ if ((inst_scalar_ty.isInt(zcu) and inst_scalar_ty.bitSize(pt) > 64) or inst_scalar_ty.isRuntimeFloat())
return try airBinBuiltinCall(f, inst, operation, .none);
const lhs = try f.resolveInst(bin_op.lhs);
@@ -4339,7 +4361,8 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons
}
fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -4374,7 +4397,8 @@ fn airCall(
inst: Air.Inst.Index,
modifier: std.builtin.CallModifier,
) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
// Not even allowed to call panic in a naked function.
if (f.object.dg.is_naked_fn) return .none;
@@ -4398,7 +4422,7 @@ fn airCall(
if (!arg_ctype.eql(try f.ctypeFromType(arg_ty, .complete))) {
const array_local = try f.allocAlignedLocal(inst, .{
.ctype = arg_ctype,
- .alignas = CType.AlignAs.fromAbiAlignment(arg_ty.abiAlignment(zcu)),
+ .alignas = CType.AlignAs.fromAbiAlignment(arg_ty.abiAlignment(pt)),
});
try writer.writeAll("memcpy(");
try f.writeCValueMember(writer, array_local, .{ .identifier = "array" });
@@ -4445,7 +4469,7 @@ fn airCall(
} else {
const local = try f.allocAlignedLocal(inst, .{
.ctype = ret_ctype,
- .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(zcu)),
+ .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(pt)),
});
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
@@ -4456,7 +4480,7 @@ fn airCall(
callee: {
known: {
const fn_decl = fn_decl: {
- const callee_val = (try f.air.value(pl_op.operand, zcu)) orelse break :known;
+ const callee_val = (try f.air.value(pl_op.operand, pt)) orelse break :known;
break :fn_decl switch (zcu.intern_pool.indexToKey(callee_val.toIntern())) {
.extern_func => |extern_func| extern_func.decl,
.func => |func| func.owner_decl,
@@ -4499,7 +4523,7 @@ fn airCall(
try writer.writeAll(");\n");
const result = result: {
- if (result_local == .none or !lowersToArray(ret_ty, zcu))
+ if (result_local == .none or !lowersToArray(ret_ty, pt))
break :result result_local;
const array_local = try f.allocLocal(inst, ret_ty);
@@ -4533,7 +4557,8 @@ fn airDbgStmt(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airDbgInlineBlock(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
const owner_decl = zcu.funcOwnerDeclPtr(extra.data.func);
@@ -4545,10 +4570,11 @@ fn airDbgInlineBlock(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const name = f.air.nullTerminatedString(pl_op.payload);
- const operand_is_undef = if (try f.air.value(pl_op.operand, zcu)) |v| v.isUndefDeep(zcu) else false;
+ const operand_is_undef = if (try f.air.value(pl_op.operand, pt)) |v| v.isUndefDeep(zcu) else false;
if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand);
try reap(f, inst, &.{pl_op.operand});
@@ -4564,7 +4590,8 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn lowerBlock(f: *Function, inst: Air.Inst.Index, body: []const Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const liveness_block = f.liveness.getBlock(inst);
const block_id: usize = f.next_block_index;
@@ -4572,7 +4599,7 @@ fn lowerBlock(f: *Function, inst: Air.Inst.Index, body: []const Air.Inst.Index)
const writer = f.object.writer();
const inst_ty = f.typeOfIndex(inst);
- const result = if (inst_ty.hasRuntimeBitsIgnoreComptime(zcu) and !f.liveness.isUnused(inst))
+ const result = if (inst_ty.hasRuntimeBitsIgnoreComptime(pt) and !f.liveness.isUnused(inst))
try f.allocLocal(inst, inst_ty)
else
.none;
@@ -4611,7 +4638,8 @@ fn airTry(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airTryPtr(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.TryPtr, ty_pl.payload);
const body: []const Air.Inst.Index = @ptrCast(f.air.extra[extra.end..][0..extra.data.body_len]);
@@ -4627,13 +4655,14 @@ fn lowerTry(
err_union_ty: Type,
is_ptr: bool,
) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const err_union = try f.resolveInst(operand);
const inst_ty = f.typeOfIndex(inst);
const liveness_condbr = f.liveness.getCondBr(inst);
const writer = f.object.writer();
const payload_ty = err_union_ty.errorUnionPayload(zcu);
- const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(zcu);
+ const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(pt);
if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
try writer.writeAll("if (");
@@ -4725,7 +4754,8 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const target = &f.object.dg.mod.resolved_target.result;
const ctype_pool = &f.object.dg.ctype_pool;
const writer = f.object.writer();
@@ -4771,7 +4801,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !CVal
try writer.writeAll(", sizeof(");
try f.renderType(
writer,
- if (dest_ty.abiSize(zcu) <= operand_ty.abiSize(zcu)) dest_ty else operand_ty,
+ if (dest_ty.abiSize(pt) <= operand_ty.abiSize(pt)) dest_ty else operand_ty,
);
try writer.writeAll("));\n");
@@ -4805,7 +4835,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !CVal
try writer.writeByte('(');
}
try writer.writeAll("zig_wrap_");
- const info_ty = try zcu.intType(dest_info.signedness, bits);
+ const info_ty = try pt.intType(dest_info.signedness, bits);
if (wrap_ctype) |ctype|
try f.object.dg.renderCTypeForBuiltinFnName(writer, ctype)
else
@@ -4935,7 +4965,8 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const condition = try f.resolveInst(pl_op.operand);
try reap(f, inst, &.{pl_op.operand});
@@ -4979,16 +5010,16 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
for (items) |item| {
try f.object.indent_writer.insertNewline();
try writer.writeAll("case ");
- const item_value = try f.air.value(item, zcu);
- if (item_value.?.getUnsignedInt(zcu)) |item_int| try writer.print("{}\n", .{
- try f.fmtIntLiteral(try zcu.intValue(lowered_condition_ty, item_int)),
+ const item_value = try f.air.value(item, pt);
+ if (item_value.?.getUnsignedInt(pt)) |item_int| try writer.print("{}\n", .{
+ try f.fmtIntLiteral(try pt.intValue(lowered_condition_ty, item_int)),
}) else {
if (condition_ty.isPtrAtRuntime(zcu)) {
try writer.writeByte('(');
try f.renderType(writer, Type.usize);
try writer.writeByte(')');
}
- try f.object.dg.renderValue(writer, (try f.air.value(item, zcu)).?, .Other);
+ try f.object.dg.renderValue(writer, (try f.air.value(item, pt)).?, .Other);
}
try writer.writeByte(':');
}
@@ -5026,13 +5057,14 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn asmInputNeedsLocal(f: *Function, constraint: []const u8, value: CValue) bool {
- const target = &f.object.dg.mod.resolved_target.result;
+ const dg = f.object.dg;
+ const target = &dg.mod.resolved_target.result;
return switch (constraint[0]) {
'{' => true,
'i', 'r' => false,
'I' => !target.cpu.arch.isArmOrThumb(),
else => switch (value) {
- .constant => |val| switch (f.object.dg.zcu.intern_pool.indexToKey(val.toIntern())) {
+ .constant => |val| switch (dg.pt.zcu.intern_pool.indexToKey(val.toIntern())) {
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
.decl => false,
else => true,
@@ -5045,7 +5077,8 @@ fn asmInputNeedsLocal(f: *Function, constraint: []const u8, value: CValue) bool
}
fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.Asm, ty_pl.payload);
const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
@@ -5060,10 +5093,10 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
const result = result: {
const writer = f.object.writer();
const inst_ty = f.typeOfIndex(inst);
- const inst_local = if (inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) local: {
+ const inst_local = if (inst_ty.hasRuntimeBitsIgnoreComptime(pt)) local: {
const inst_local = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(inst_ty, .complete),
- .alignas = CType.AlignAs.fromAbiAlignment(inst_ty.abiAlignment(zcu)),
+ .alignas = CType.AlignAs.fromAbiAlignment(inst_ty.abiAlignment(pt)),
});
if (f.wantSafety()) {
try f.writeCValue(writer, inst_local, .Other);
@@ -5096,7 +5129,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("register ");
const output_local = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(output_ty, .complete),
- .alignas = CType.AlignAs.fromAbiAlignment(output_ty.abiAlignment(zcu)),
+ .alignas = CType.AlignAs.fromAbiAlignment(output_ty.abiAlignment(pt)),
});
try f.allocs.put(gpa, output_local.new_local, false);
try f.object.dg.renderTypeAndName(writer, output_ty, output_local, .{}, .none, .complete);
@@ -5131,7 +5164,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
if (is_reg) try writer.writeAll("register ");
const input_local = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(input_ty, .complete),
- .alignas = CType.AlignAs.fromAbiAlignment(input_ty.abiAlignment(zcu)),
+ .alignas = CType.AlignAs.fromAbiAlignment(input_ty.abiAlignment(pt)),
});
try f.allocs.put(gpa, input_local.new_local, false);
try f.object.dg.renderTypeAndName(writer, input_ty, input_local, Const, .none, .complete);
@@ -5314,7 +5347,8 @@ fn airIsNull(
operator: std.math.CompareOperator,
is_ptr: bool,
) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ctype_pool = &f.object.dg.ctype_pool;
const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
@@ -5369,7 +5403,8 @@ fn airIsNull(
}
fn airOptionalPayload(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ctype_pool = &f.object.dg.ctype_pool;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@@ -5404,7 +5439,8 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue
}
fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const writer = f.object.writer();
const operand = try f.resolveInst(ty_op.operand);
@@ -5458,21 +5494,22 @@ fn fieldLocation(
container_ptr_ty: Type,
field_ptr_ty: Type,
field_index: u32,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
) union(enum) {
begin: void,
field: CValue,
byte_offset: u64,
} {
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const container_ty = Type.fromInterned(ip.indexToKey(container_ptr_ty.toIntern()).ptr_type.child);
switch (ip.indexToKey(container_ty.toIntern())) {
.struct_type => {
const loaded_struct = ip.loadStructType(container_ty.toIntern());
return switch (loaded_struct.layout) {
- .auto, .@"extern" => if (!container_ty.hasRuntimeBitsIgnoreComptime(zcu))
+ .auto, .@"extern" => if (!container_ty.hasRuntimeBitsIgnoreComptime(pt))
.begin
- else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu))
+ else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(pt))
.{ .byte_offset = loaded_struct.offsets.get(ip)[field_index] }
else
.{ .field = if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name|
@@ -5480,16 +5517,16 @@ fn fieldLocation(
else
.{ .field = field_index } },
.@"packed" => if (field_ptr_ty.ptrInfo(zcu).packed_offset.host_size == 0)
- .{ .byte_offset = @divExact(zcu.structPackedFieldBitOffset(loaded_struct, field_index) +
+ .{ .byte_offset = @divExact(pt.structPackedFieldBitOffset(loaded_struct, field_index) +
container_ptr_ty.ptrInfo(zcu).packed_offset.bit_offset, 8) }
else
.begin,
};
},
- .anon_struct_type => |anon_struct_info| return if (!container_ty.hasRuntimeBitsIgnoreComptime(zcu))
+ .anon_struct_type => |anon_struct_info| return if (!container_ty.hasRuntimeBitsIgnoreComptime(pt))
.begin
- else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu))
- .{ .byte_offset = container_ty.structFieldOffset(field_index, zcu) }
+ else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(pt))
+ .{ .byte_offset = container_ty.structFieldOffset(field_index, pt) }
else
.{ .field = if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name|
.{ .identifier = field_name.toSlice(ip) }
@@ -5500,8 +5537,8 @@ fn fieldLocation(
switch (loaded_union.getLayout(ip)) {
.auto, .@"extern" => {
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu))
- return if (loaded_union.hasTag(ip) and !container_ty.unionHasAllZeroBitFieldTypes(zcu))
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt))
+ return if (loaded_union.hasTag(ip) and !container_ty.unionHasAllZeroBitFieldTypes(pt))
.{ .field = .{ .identifier = "payload" } }
else
.begin;
@@ -5546,7 +5583,8 @@ fn airStructFieldPtrIndex(f: *Function, inst: Air.Inst.Index, index: u8) !CValue
}
fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
@@ -5564,10 +5602,10 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue {
try f.renderType(writer, container_ptr_ty);
try writer.writeByte(')');
- switch (fieldLocation(container_ptr_ty, field_ptr_ty, extra.field_index, zcu)) {
+ switch (fieldLocation(container_ptr_ty, field_ptr_ty, extra.field_index, pt)) {
.begin => try f.writeCValue(writer, field_ptr_val, .Initializer),
.field => |field| {
- const u8_ptr_ty = try zcu.adjustPtrTypeChild(field_ptr_ty, Type.u8);
+ const u8_ptr_ty = try pt.adjustPtrTypeChild(field_ptr_ty, Type.u8);
try writer.writeAll("((");
try f.renderType(writer, u8_ptr_ty);
@@ -5580,14 +5618,14 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("))");
},
.byte_offset => |byte_offset| {
- const u8_ptr_ty = try zcu.adjustPtrTypeChild(field_ptr_ty, Type.u8);
+ const u8_ptr_ty = try pt.adjustPtrTypeChild(field_ptr_ty, Type.u8);
try writer.writeAll("((");
try f.renderType(writer, u8_ptr_ty);
try writer.writeByte(')');
try f.writeCValue(writer, field_ptr_val, .Other);
try writer.print(" - {})", .{
- try f.fmtIntLiteral(try zcu.intValue(Type.usize, byte_offset)),
+ try f.fmtIntLiteral(try pt.intValue(Type.usize, byte_offset)),
});
},
}
@@ -5603,7 +5641,8 @@ fn fieldPtr(
container_ptr_val: CValue,
field_index: u32,
) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const container_ty = container_ptr_ty.childType(zcu);
const field_ptr_ty = f.typeOfIndex(inst);
@@ -5617,21 +5656,21 @@ fn fieldPtr(
try f.renderType(writer, field_ptr_ty);
try writer.writeByte(')');
- switch (fieldLocation(container_ptr_ty, field_ptr_ty, field_index, zcu)) {
+ switch (fieldLocation(container_ptr_ty, field_ptr_ty, field_index, pt)) {
.begin => try f.writeCValue(writer, container_ptr_val, .Initializer),
.field => |field| {
try writer.writeByte('&');
try f.writeCValueDerefMember(writer, container_ptr_val, field);
},
.byte_offset => |byte_offset| {
- const u8_ptr_ty = try zcu.adjustPtrTypeChild(field_ptr_ty, Type.u8);
+ const u8_ptr_ty = try pt.adjustPtrTypeChild(field_ptr_ty, Type.u8);
try writer.writeAll("((");
try f.renderType(writer, u8_ptr_ty);
try writer.writeByte(')');
try f.writeCValue(writer, container_ptr_val, .Other);
try writer.print(" + {})", .{
- try f.fmtIntLiteral(try zcu.intValue(Type.usize, byte_offset)),
+ try f.fmtIntLiteral(try pt.intValue(Type.usize, byte_offset)),
});
},
}
@@ -5641,13 +5680,14 @@ fn fieldPtr(
}
fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.StructField, ty_pl.payload).data;
const inst_ty = f.typeOfIndex(inst);
- if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) {
try reap(f, inst, &.{extra.struct_operand});
return .none;
}
@@ -5671,15 +5711,15 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
.@"packed" => {
const int_info = struct_ty.intInfo(zcu);
- const bit_offset_ty = try zcu.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
+ const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
- const bit_offset = zcu.structPackedFieldBitOffset(loaded_struct, extra.field_index);
+ const bit_offset = pt.structPackedFieldBitOffset(loaded_struct, extra.field_index);
const field_int_signedness = if (inst_ty.isAbiInt(zcu))
inst_ty.intInfo(zcu).signedness
else
.unsigned;
- const field_int_ty = try zcu.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(zcu))));
+ const field_int_ty = try pt.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(pt))));
const temp_local = try f.allocLocal(inst, field_int_ty);
try f.writeCValue(writer, temp_local, .Other);
@@ -5690,7 +5730,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeByte(')');
const cant_cast = int_info.bits > 64;
if (cant_cast) {
- if (field_int_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+ if (field_int_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try writer.writeAll("zig_lo_");
try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
try writer.writeByte('(');
@@ -5702,12 +5742,12 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
}
try f.writeCValue(writer, struct_byval, .Other);
if (bit_offset > 0) try writer.print(", {})", .{
- try f.fmtIntLiteral(try zcu.intValue(bit_offset_ty, bit_offset)),
+ try f.fmtIntLiteral(try pt.intValue(bit_offset_ty, bit_offset)),
});
if (cant_cast) try writer.writeByte(')');
try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits);
try writer.writeAll(");\n");
- if (inst_ty.eql(field_int_ty, f.object.dg.zcu)) return temp_local;
+ if (inst_ty.eql(field_int_ty, zcu)) return temp_local;
const local = try f.allocLocal(inst, inst_ty);
if (local.new_local != temp_local.new_local) {
@@ -5783,7 +5823,8 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
/// *(E!T) -> E
/// Note that the result is never a pointer.
fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const inst_ty = f.typeOfIndex(inst);
@@ -5797,7 +5838,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
const payload_ty = error_union_ty.errorUnionPayload(zcu);
const local = try f.allocLocal(inst, inst_ty);
- if (!payload_ty.hasRuntimeBits(zcu) and operand == .local and operand.local == local.new_local) {
+ if (!payload_ty.hasRuntimeBits(pt) and operand == .local and operand.local == local.new_local) {
// The store will be 'x = x'; elide it.
return local;
}
@@ -5806,11 +5847,11 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
- if (!payload_ty.hasRuntimeBits(zcu))
+ if (!payload_ty.hasRuntimeBits(pt))
try f.writeCValue(writer, operand, .Other)
else if (error_ty.errorSetIsEmpty(zcu))
try writer.print("{}", .{
- try f.fmtIntLiteral(try zcu.intValue(try zcu.errorIntType(), 0)),
+ try f.fmtIntLiteral(try pt.intValue(try pt.errorIntType(), 0)),
})
else if (operand_is_ptr)
try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" })
@@ -5821,7 +5862,8 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const inst_ty = f.typeOfIndex(inst);
@@ -5831,7 +5873,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu
const error_union_ty = if (is_ptr) operand_ty.childType(zcu) else operand_ty;
const writer = f.object.writer();
- if (!error_union_ty.errorUnionPayload(zcu).hasRuntimeBits(zcu)) {
+ if (!error_union_ty.errorUnionPayload(zcu).hasRuntimeBits(pt)) {
if (!is_ptr) return .none;
const local = try f.allocLocal(inst, inst_ty);
@@ -5896,12 +5938,13 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const inst_ty = f.typeOfIndex(inst);
const payload_ty = inst_ty.errorUnionPayload(zcu);
- const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(zcu);
+ const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(pt);
const err_ty = inst_ty.errorUnionSet(zcu);
const err = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -5935,7 +5978,8 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const writer = f.object.writer();
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const inst_ty = f.typeOfIndex(inst);
@@ -5944,12 +5988,12 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
const error_union_ty = operand_ty.childType(zcu);
const payload_ty = error_union_ty.errorUnionPayload(zcu);
- const err_int_ty = try zcu.errorIntType();
- const no_err = try zcu.intValue(err_int_ty, 0);
+ const err_int_ty = try pt.errorIntType();
+ const no_err = try pt.intValue(err_int_ty, 0);
try reap(f, inst, &.{ty_op.operand});
// First, set the non-error value.
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
const a = try Assignment.start(f, writer, try f.ctypeFromType(operand_ty, .complete));
try f.writeCValueDeref(writer, operand);
try a.assign(f, writer);
@@ -5994,13 +6038,14 @@ fn airSaveErrReturnTraceIndex(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const inst_ty = f.typeOfIndex(inst);
const payload_ty = inst_ty.errorUnionPayload(zcu);
const payload = try f.resolveInst(ty_op.operand);
- const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(zcu);
+ const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(pt);
const err_ty = inst_ty.errorUnionSet(zcu);
try reap(f, inst, &.{ty_op.operand});
@@ -6020,14 +6065,15 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue {
else
try f.writeCValueMember(writer, local, .{ .identifier = "error" });
try a.assign(f, writer);
- try f.object.dg.renderValue(writer, try zcu.intValue(try zcu.errorIntType(), 0), .Other);
+ try f.object.dg.renderValue(writer, try pt.intValue(try pt.errorIntType(), 0), .Other);
try a.end(f, writer);
}
return local;
}
fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const u8) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const writer = f.object.writer();
@@ -6042,9 +6088,9 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const
const a = try Assignment.start(f, writer, CType.bool);
try f.writeCValue(writer, local, .Other);
try a.assign(f, writer);
- const err_int_ty = try zcu.errorIntType();
+ const err_int_ty = try pt.errorIntType();
if (!error_ty.errorSetIsEmpty(zcu))
- if (payload_ty.hasRuntimeBits(zcu))
+ if (payload_ty.hasRuntimeBits(pt))
if (is_ptr)
try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" })
else
@@ -6052,17 +6098,18 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const
else
try f.writeCValue(writer, operand, .Other)
else
- try f.object.dg.renderValue(writer, try zcu.intValue(err_int_ty, 0), .Other);
+ try f.object.dg.renderValue(writer, try pt.intValue(err_int_ty, 0), .Other);
try writer.writeByte(' ');
try writer.writeAll(operator);
try writer.writeByte(' ');
- try f.object.dg.renderValue(writer, try zcu.intValue(err_int_ty, 0), .Other);
+ try f.object.dg.renderValue(writer, try pt.intValue(err_int_ty, 0), .Other);
try a.end(f, writer);
return local;
}
fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ctype_pool = &f.object.dg.ctype_pool;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@@ -6096,7 +6143,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue {
if (operand_child_ctype.info(ctype_pool) == .array) {
try writer.writeByte('&');
try f.writeCValueDeref(writer, operand);
- try writer.print("[{}]", .{try f.fmtIntLiteral(try zcu.intValue(Type.usize, 0))});
+ try writer.print("[{}]", .{try f.fmtIntLiteral(try pt.intValue(Type.usize, 0))});
} else try f.writeCValue(writer, operand, .Initializer);
}
try a.end(f, writer);
@@ -6106,7 +6153,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValueMember(writer, local, .{ .identifier = "len" });
try a.assign(f, writer);
try writer.print("{}", .{
- try f.fmtIntLiteral(try zcu.intValue(Type.usize, array_ty.arrayLen(zcu))),
+ try f.fmtIntLiteral(try pt.intValue(Type.usize, array_ty.arrayLen(zcu))),
});
try a.end(f, writer);
}
@@ -6115,7 +6162,8 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const inst_ty = f.typeOfIndex(inst);
@@ -6165,7 +6213,8 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airIntFromPtr(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try f.resolveInst(un_op);
@@ -6194,7 +6243,8 @@ fn airUnBuiltinCall(
operation: []const u8,
info: BuiltinInfo,
) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const operand = try f.resolveInst(operand_ref);
try reap(f, inst, &.{operand_ref});
@@ -6237,7 +6287,8 @@ fn airBinBuiltinCall(
operation: []const u8,
info: BuiltinInfo,
) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const operand_ty = f.typeOf(bin_op.lhs);
@@ -6292,7 +6343,8 @@ fn airCmpBuiltinCall(
operation: enum { cmp, operator },
info: BuiltinInfo,
) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const lhs = try f.resolveInst(data.lhs);
const rhs = try f.resolveInst(data.rhs);
try reap(f, inst, &.{ data.lhs, data.rhs });
@@ -6333,7 +6385,7 @@ fn airCmpBuiltinCall(
try writer.writeByte(')');
if (!ref_ret) try writer.print("{s}{}", .{
compareOperatorC(operator),
- try f.fmtIntLiteral(try zcu.intValue(Type.i32, 0)),
+ try f.fmtIntLiteral(try pt.intValue(Type.i32, 0)),
});
try writer.writeAll(";\n");
try v.end(f, inst, writer);
@@ -6342,7 +6394,8 @@ fn airCmpBuiltinCall(
}
fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
const inst_ty = f.typeOfIndex(inst);
@@ -6358,7 +6411,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value });
const repr_ty = if (ty.isRuntimeFloat())
- zcu.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable
+ pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable
else
ty;
@@ -6448,7 +6501,8 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
}
fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = f.air.extraData(Air.AtomicRmw, pl_op.payload).data;
const inst_ty = f.typeOfIndex(inst);
@@ -6461,10 +6515,10 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_mat = try Materialize.start(f, inst, ty, operand);
try reap(f, inst, &.{ pl_op.operand, extra.operand });
- const repr_bits = @as(u16, @intCast(ty.abiSize(zcu) * 8));
+ const repr_bits = @as(u16, @intCast(ty.abiSize(pt) * 8));
const is_float = ty.isRuntimeFloat();
const is_128 = repr_bits == 128;
- const repr_ty = if (is_float) zcu.intType(.unsigned, repr_bits) catch unreachable else ty;
+ const repr_ty = if (is_float) pt.intType(.unsigned, repr_bits) catch unreachable else ty;
const local = try f.allocLocal(inst, inst_ty);
try writer.print("zig_atomicrmw_{s}", .{toAtomicRmwSuffix(extra.op())});
@@ -6503,7 +6557,8 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const atomic_load = f.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load;
const ptr = try f.resolveInst(atomic_load.ptr);
try reap(f, inst, &.{atomic_load.ptr});
@@ -6511,7 +6566,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const ty = ptr_ty.childType(zcu);
const repr_ty = if (ty.isRuntimeFloat())
- zcu.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable
+ pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable
else
ty;
@@ -6539,7 +6594,8 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = f.typeOf(bin_op.lhs);
const ty = ptr_ty.childType(zcu);
@@ -6551,7 +6607,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const repr_ty = if (ty.isRuntimeFloat())
- zcu.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable
+ pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable
else
ty;
@@ -6574,7 +6630,8 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa
}
fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !void {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
if (ptr_ty.isSlice(zcu)) {
try f.writeCValueMember(writer, ptr, .{ .identifier = "ptr" });
} else {
@@ -6583,14 +6640,15 @@ fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !vo
}
fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const dest_ty = f.typeOf(bin_op.lhs);
const dest_slice = try f.resolveInst(bin_op.lhs);
const value = try f.resolveInst(bin_op.rhs);
const elem_ty = f.typeOf(bin_op.rhs);
- const elem_abi_size = elem_ty.abiSize(zcu);
- const val_is_undef = if (try f.air.value(bin_op.rhs, zcu)) |val| val.isUndefDeep(zcu) else false;
+ const elem_abi_size = elem_ty.abiSize(pt);
+ const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(zcu) else false;
const writer = f.object.writer();
if (val_is_undef) {
@@ -6628,7 +6686,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
// For the assignment in this loop, the array pointer needs to get
// casted to a regular pointer, otherwise an error like this occurs:
// error: array type 'uint32_t[20]' (aka 'unsigned int[20]') is not assignable
- const elem_ptr_ty = try zcu.ptrType(.{
+ const elem_ptr_ty = try pt.ptrType(.{
.child = elem_ty.toIntern(),
.flags = .{
.size = .C,
@@ -6640,7 +6698,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try writer.writeAll("for (");
try f.writeCValue(writer, index, .Other);
try writer.writeAll(" = ");
- try f.object.dg.renderValue(writer, try zcu.intValue(Type.usize, 0), .Initializer);
+ try f.object.dg.renderValue(writer, try pt.intValue(Type.usize, 0), .Initializer);
try writer.writeAll("; ");
try f.writeCValue(writer, index, .Other);
try writer.writeAll(" != ");
@@ -6705,7 +6763,8 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
}
fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const dest_ptr = try f.resolveInst(bin_op.lhs);
const src_ptr = try f.resolveInst(bin_op.rhs);
@@ -6733,10 +6792,11 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn writeArrayLen(f: *Function, writer: ArrayListWriter, dest_ptr: CValue, dest_ty: Type) !void {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
switch (dest_ty.ptrSize(zcu)) {
.One => try writer.print("{}", .{
- try f.fmtIntLiteral(try zcu.intValue(Type.usize, dest_ty.childType(zcu).arrayLen(zcu))),
+ try f.fmtIntLiteral(try pt.intValue(Type.usize, dest_ty.childType(zcu).arrayLen(zcu))),
}),
.Many, .C => unreachable,
.Slice => try f.writeCValueMember(writer, dest_ptr, .{ .identifier = "len" }),
@@ -6744,14 +6804,15 @@ fn writeArrayLen(f: *Function, writer: ArrayListWriter, dest_ptr: CValue, dest_t
}
fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const union_ptr = try f.resolveInst(bin_op.lhs);
const new_tag = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const union_ty = f.typeOf(bin_op.lhs).childType(zcu);
- const layout = union_ty.unionGetLayout(zcu);
+ const layout = union_ty.unionGetLayout(pt);
if (layout.tag_size == 0) return .none;
const tag_ty = union_ty.unionTagTypeSafety(zcu).?;
@@ -6765,14 +6826,14 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const union_ty = f.typeOf(ty_op.operand);
- const layout = union_ty.unionGetLayout(zcu);
+ const layout = union_ty.unionGetLayout(pt);
if (layout.tag_size == 0) return .none;
const inst_ty = f.typeOfIndex(inst);
@@ -6787,7 +6848,8 @@ fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const inst_ty = f.typeOfIndex(inst);
@@ -6824,7 +6886,8 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try f.resolveInst(ty_op.operand);
@@ -6879,7 +6942,7 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data;
@@ -6895,11 +6958,11 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
for (0..extra.mask_len) |index| {
try f.writeCValue(writer, local, .Other);
try writer.writeByte('[');
- try f.object.dg.renderValue(writer, try zcu.intValue(Type.usize, index), .Other);
+ try f.object.dg.renderValue(writer, try pt.intValue(Type.usize, index), .Other);
try writer.writeAll("] = ");
- const mask_elem = (try mask.elemValue(zcu, index)).toSignedInt(zcu);
- const src_val = try zcu.intValue(Type.usize, @as(u64, @intCast(mask_elem ^ mask_elem >> 63)));
+ const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(pt);
+ const src_val = try pt.intValue(Type.usize, @as(u64, @intCast(mask_elem ^ mask_elem >> 63)));
try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other);
try writer.writeByte('[');
@@ -6911,7 +6974,8 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const reduce = f.air.instructions.items(.data)[@intFromEnum(inst)].reduce;
const scalar_ty = f.typeOfIndex(inst);
@@ -6920,7 +6984,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_ty = f.typeOf(reduce.operand);
const writer = f.object.writer();
- const use_operator = scalar_ty.bitSize(zcu) <= 64;
+ const use_operator = scalar_ty.bitSize(pt) <= 64;
const op: union(enum) {
const Func = struct { operation: []const u8, info: BuiltinInfo = .none };
builtin: Func,
@@ -6971,37 +7035,37 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
try f.object.dg.renderValue(writer, switch (reduce.operation) {
.Or, .Xor => switch (scalar_ty.zigTypeTag(zcu)) {
.Bool => Value.false,
- .Int => try zcu.intValue(scalar_ty, 0),
+ .Int => try pt.intValue(scalar_ty, 0),
else => unreachable,
},
.And => switch (scalar_ty.zigTypeTag(zcu)) {
.Bool => Value.true,
.Int => switch (scalar_ty.intInfo(zcu).signedness) {
- .unsigned => try scalar_ty.maxIntScalar(zcu, scalar_ty),
- .signed => try zcu.intValue(scalar_ty, -1),
+ .unsigned => try scalar_ty.maxIntScalar(pt, scalar_ty),
+ .signed => try pt.intValue(scalar_ty, -1),
},
else => unreachable,
},
.Add => switch (scalar_ty.zigTypeTag(zcu)) {
- .Int => try zcu.intValue(scalar_ty, 0),
- .Float => try zcu.floatValue(scalar_ty, 0.0),
+ .Int => try pt.intValue(scalar_ty, 0),
+ .Float => try pt.floatValue(scalar_ty, 0.0),
else => unreachable,
},
.Mul => switch (scalar_ty.zigTypeTag(zcu)) {
- .Int => try zcu.intValue(scalar_ty, 1),
- .Float => try zcu.floatValue(scalar_ty, 1.0),
+ .Int => try pt.intValue(scalar_ty, 1),
+ .Float => try pt.floatValue(scalar_ty, 1.0),
else => unreachable,
},
.Min => switch (scalar_ty.zigTypeTag(zcu)) {
.Bool => Value.true,
- .Int => try scalar_ty.maxIntScalar(zcu, scalar_ty),
- .Float => try zcu.floatValue(scalar_ty, std.math.nan(f128)),
+ .Int => try scalar_ty.maxIntScalar(pt, scalar_ty),
+ .Float => try pt.floatValue(scalar_ty, std.math.nan(f128)),
else => unreachable,
},
.Max => switch (scalar_ty.zigTypeTag(zcu)) {
.Bool => Value.false,
- .Int => try scalar_ty.minIntScalar(zcu, scalar_ty),
- .Float => try zcu.floatValue(scalar_ty, std.math.nan(f128)),
+ .Int => try scalar_ty.minIntScalar(pt, scalar_ty),
+ .Float => try pt.floatValue(scalar_ty, std.math.nan(f128)),
else => unreachable,
},
}, .Initializer);
@@ -7046,7 +7110,8 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const inst_ty = f.typeOfIndex(inst);
@@ -7096,7 +7161,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
var field_it = loaded_struct.iterateRuntimeOrder(ip);
while (field_it.next()) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
const a = try Assignment.start(f, writer, try f.ctypeFromType(field_ty, .complete));
try f.writeCValueMember(writer, local, if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name|
@@ -7113,7 +7178,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(" = ");
const int_info = inst_ty.intInfo(zcu);
- const bit_offset_ty = try zcu.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
+ const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
var bit_offset: u64 = 0;
@@ -7121,7 +7186,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
for (0..elements.len) |field_index| {
if (inst_ty.structFieldIsComptime(field_index, zcu)) continue;
const field_ty = inst_ty.structFieldType(field_index, zcu);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
if (!empty) {
try writer.writeAll("zig_or_");
@@ -7134,7 +7199,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
for (resolved_elements, 0..) |element, field_index| {
if (inst_ty.structFieldIsComptime(field_index, zcu)) continue;
const field_ty = inst_ty.structFieldType(field_index, zcu);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
if (!empty) try writer.writeAll(", ");
// TODO: Skip this entire shift if val is 0?
@@ -7160,13 +7225,13 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
}
try writer.print(", {}", .{
- try f.fmtIntLiteral(try zcu.intValue(bit_offset_ty, bit_offset)),
+ try f.fmtIntLiteral(try pt.intValue(bit_offset_ty, bit_offset)),
});
try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits);
try writer.writeByte(')');
if (!empty) try writer.writeByte(')');
- bit_offset += field_ty.bitSize(zcu);
+ bit_offset += field_ty.bitSize(pt);
empty = false;
}
try writer.writeAll(";\n");
@@ -7176,7 +7241,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
.anon_struct_type => |anon_struct_info| for (0..anon_struct_info.types.len) |field_index| {
if (anon_struct_info.values.get(ip)[field_index] != .none) continue;
const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
const a = try Assignment.start(f, writer, try f.ctypeFromType(field_ty, .complete));
try f.writeCValueMember(writer, local, if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name|
@@ -7194,7 +7259,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.UnionInit, ty_pl.payload).data;
@@ -7211,15 +7277,15 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
if (loaded_union.getLayout(ip) == .@"packed") return f.moveCValue(inst, union_ty, payload);
const field: CValue = if (union_ty.unionTagTypeSafety(zcu)) |tag_ty| field: {
- const layout = union_ty.unionGetLayout(zcu);
+ const layout = union_ty.unionGetLayout(pt);
if (layout.tag_size != 0) {
const field_index = tag_ty.enumFieldIndex(field_name, zcu).?;
- const tag_val = try zcu.enumValueFieldIndex(tag_ty, field_index);
+ const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
const a = try Assignment.start(f, writer, try f.ctypeFromType(tag_ty, .complete));
try f.writeCValueMember(writer, local, .{ .identifier = "tag" });
try a.assign(f, writer);
- try writer.print("{}", .{try f.fmtIntLiteral(try tag_val.intFromEnum(tag_ty, zcu))});
+ try writer.print("{}", .{try f.fmtIntLiteral(try tag_val.intFromEnum(tag_ty, pt))});
try a.end(f, writer);
}
break :field .{ .payload_identifier = field_name.toSlice(ip) };
@@ -7234,7 +7300,8 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airPrefetch(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const prefetch = f.air.instructions.items(.data)[@intFromEnum(inst)].prefetch;
const ptr_ty = f.typeOf(prefetch.ptr);
@@ -7291,7 +7358,8 @@ fn airWasmMemoryGrow(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const bin_op = f.air.extraData(Air.Bin, pl_op.payload).data;
@@ -7326,7 +7394,8 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const inst_ty = f.typeOfIndex(inst);
const decl_index = f.object.dg.pass.decl;
const decl = zcu.declPtr(decl_index);
@@ -7699,7 +7768,8 @@ fn formatIntLiteral(
options: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
- const zcu = data.dg.zcu;
+ const pt = data.dg.pt;
+ const zcu = pt.zcu;
const target = &data.dg.mod.resolved_target.result;
const ctype_pool = &data.dg.ctype_pool;
@@ -7732,7 +7802,7 @@ fn formatIntLiteral(
};
undef_int.truncate(undef_int.toConst(), data.int_info.signedness, data.int_info.bits);
break :blk undef_int.toConst();
- } else data.val.toBigInt(&int_buf, zcu);
+ } else data.val.toBigInt(&int_buf, pt);
assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits));
const c_bits: usize = @intCast(data.ctype.byteSize(ctype_pool, data.dg.mod) * 8);
@@ -7866,7 +7936,7 @@ fn formatIntLiteral(
.int_info = c_limb_int_info,
.kind = data.kind,
.ctype = c_limb_ctype,
- .val = try zcu.intValue_big(Type.comptime_int, c_limb_mut.toConst()),
+ .val = try pt.intValue_big(Type.comptime_int, c_limb_mut.toConst()),
}, fmt, options, writer);
}
}
@@ -7940,17 +8010,18 @@ const Vectorize = struct {
index: CValue = .none,
pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorize {
- const zcu = f.object.dg.zcu;
+ const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
return if (ty.zigTypeTag(zcu) == .Vector) index: {
const local = try f.allocLocal(inst, Type.usize);
try writer.writeAll("for (");
try f.writeCValue(writer, local, .Other);
- try writer.print(" = {d}; ", .{try f.fmtIntLiteral(try zcu.intValue(Type.usize, 0))});
+ try writer.print(" = {d}; ", .{try f.fmtIntLiteral(try pt.intValue(Type.usize, 0))});
try f.writeCValue(writer, local, .Other);
- try writer.print(" < {d}; ", .{try f.fmtIntLiteral(try zcu.intValue(Type.usize, ty.vectorLen(zcu)))});
+ try writer.print(" < {d}; ", .{try f.fmtIntLiteral(try pt.intValue(Type.usize, ty.vectorLen(zcu)))});
try f.writeCValue(writer, local, .Other);
- try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(try zcu.intValue(Type.usize, 1))});
+ try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(try pt.intValue(Type.usize, 1))});
f.object.indent_writer.pushIndent();
break :index .{ .index = local };
@@ -7974,10 +8045,10 @@ const Vectorize = struct {
}
};
-fn lowersToArray(ty: Type, zcu: *Zcu) bool {
- return switch (ty.zigTypeTag(zcu)) {
+fn lowersToArray(ty: Type, pt: Zcu.PerThread) bool {
+ return switch (ty.zigTypeTag(pt.zcu)) {
.Array, .Vector => return true,
- else => return ty.isAbiInt(zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(zcu)))) == null,
+ else => return ty.isAbiInt(pt.zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(pt)))) == null,
};
}
src/codegen/llvm.zig
@@ -15,8 +15,6 @@ const link = @import("../link.zig");
const Compilation = @import("../Compilation.zig");
const build_options = @import("build_options");
const Zcu = @import("../Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
const InternPool = @import("../InternPool.zig");
const Package = @import("../Package.zig");
const Air = @import("../Air.zig");
@@ -810,7 +808,7 @@ pub const Object = struct {
gpa: Allocator,
builder: Builder,
- module: *Module,
+ pt: Zcu.PerThread,
debug_compile_unit: Builder.Metadata,
@@ -820,7 +818,7 @@ pub const Object = struct {
debug_enums: std.ArrayListUnmanaged(Builder.Metadata),
debug_globals: std.ArrayListUnmanaged(Builder.Metadata),
- debug_file_map: std.AutoHashMapUnmanaged(*const Module.File, Builder.Metadata),
+ debug_file_map: std.AutoHashMapUnmanaged(*const Zcu.File, Builder.Metadata),
debug_type_map: std.AutoHashMapUnmanaged(Type, Builder.Metadata),
debug_unresolved_namespace_scopes: std.AutoArrayHashMapUnmanaged(InternPool.NamespaceIndex, Builder.Metadata),
@@ -992,7 +990,10 @@ pub const Object = struct {
obj.* = .{
.gpa = gpa,
.builder = builder,
- .module = comp.module.?,
+ .pt = .{
+ .zcu = comp.module.?,
+ .tid = .main,
+ },
.debug_compile_unit = debug_compile_unit,
.debug_enums_fwd_ref = debug_enums_fwd_ref,
.debug_globals_fwd_ref = debug_globals_fwd_ref,
@@ -1033,7 +1034,8 @@ pub const Object = struct {
// If o.error_name_table is null, then it was not referenced by any instructions.
if (o.error_name_table == .none) return;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const error_name_list = mod.global_error_set.keys();
const llvm_errors = try mod.gpa.alloc(Builder.Constant, error_name_list.len);
@@ -1072,7 +1074,7 @@ pub const Object = struct {
table_variable_index.setMutability(.constant, &o.builder);
table_variable_index.setUnnamedAddr(.unnamed_addr, &o.builder);
table_variable_index.setAlignment(
- slice_ty.abiAlignment(mod).toLlvm(),
+ slice_ty.abiAlignment(pt).toLlvm(),
&o.builder,
);
@@ -1083,8 +1085,7 @@ pub const Object = struct {
// If there is no such function in the module, it means the source code does not need it.
const name = o.builder.strtabStringIfExists(lt_errors_fn_name) orelse return;
const llvm_fn = o.builder.getGlobal(name) orelse return;
- const mod = o.module;
- const errors_len = mod.global_error_set.count();
+ const errors_len = o.pt.zcu.global_error_set.count();
var wip = try Builder.WipFunction.init(&o.builder, .{
.function = llvm_fn.ptrConst(&o.builder).kind.function,
@@ -1106,10 +1107,8 @@ pub const Object = struct {
}
fn genModuleLevelAssembly(object: *Object) !void {
- const mod = object.module;
-
const writer = object.builder.setModuleAsm();
- for (mod.global_assembly.values()) |assembly| {
+ for (object.pt.zcu.global_assembly.values()) |assembly| {
try writer.print("{s}\n", .{assembly});
}
try object.builder.finishModuleAsm();
@@ -1131,6 +1130,9 @@ pub const Object = struct {
};
pub fn emit(self: *Object, options: EmitOptions) !void {
+ const zcu = self.pt.zcu;
+ const comp = zcu.comp;
+
{
try self.genErrorNameTable();
try self.genCmpLtErrorsLenFunction();
@@ -1143,8 +1145,8 @@ pub const Object = struct {
const namespace_index = self.debug_unresolved_namespace_scopes.keys()[i];
const fwd_ref = self.debug_unresolved_namespace_scopes.values()[i];
- const namespace = self.module.namespacePtr(namespace_index);
- const debug_type = try self.lowerDebugType(namespace.getType(self.module));
+ const namespace = zcu.namespacePtr(namespace_index);
+ const debug_type = try self.lowerDebugType(namespace.getType(zcu));
self.builder.debugForwardReferenceSetType(fwd_ref, debug_type);
}
@@ -1206,12 +1208,12 @@ pub const Object = struct {
try file.writeAll(ptr[0..(bitcode.len * 4)]);
}
- if (!build_options.have_llvm or !self.module.comp.config.use_lib_llvm) {
+ if (!build_options.have_llvm or !comp.config.use_lib_llvm) {
log.err("emitting without libllvm not implemented", .{});
return error.FailedToEmit;
}
- initializeLLVMTarget(self.module.comp.root_mod.resolved_target.result.cpu.arch);
+ initializeLLVMTarget(comp.root_mod.resolved_target.result.cpu.arch);
const context: *llvm.Context = llvm.Context.create();
errdefer context.dispose();
@@ -1247,8 +1249,8 @@ pub const Object = struct {
@panic("Invalid LLVM triple");
}
- const optimize_mode = self.module.comp.root_mod.optimize_mode;
- const pic = self.module.comp.root_mod.pic;
+ const optimize_mode = comp.root_mod.optimize_mode;
+ const pic = comp.root_mod.pic;
const opt_level: llvm.CodeGenOptLevel = if (optimize_mode == .Debug)
.None
@@ -1257,12 +1259,12 @@ pub const Object = struct {
const reloc_mode: llvm.RelocMode = if (pic)
.PIC
- else if (self.module.comp.config.link_mode == .dynamic)
+ else if (comp.config.link_mode == .dynamic)
llvm.RelocMode.DynamicNoPIC
else
.Static;
- const code_model: llvm.CodeModel = switch (self.module.comp.root_mod.code_model) {
+ const code_model: llvm.CodeModel = switch (comp.root_mod.code_model) {
.default => .Default,
.tiny => .Tiny,
.small => .Small,
@@ -1277,24 +1279,24 @@ pub const Object = struct {
var target_machine = llvm.TargetMachine.create(
target,
target_triple_sentinel,
- if (self.module.comp.root_mod.resolved_target.result.cpu.model.llvm_name) |s| s.ptr else null,
- self.module.comp.root_mod.resolved_target.llvm_cpu_features.?,
+ if (comp.root_mod.resolved_target.result.cpu.model.llvm_name) |s| s.ptr else null,
+ comp.root_mod.resolved_target.llvm_cpu_features.?,
opt_level,
reloc_mode,
code_model,
- self.module.comp.function_sections,
- self.module.comp.data_sections,
+ comp.function_sections,
+ comp.data_sections,
float_abi,
- if (target_util.llvmMachineAbi(self.module.comp.root_mod.resolved_target.result)) |s| s.ptr else null,
+ if (target_util.llvmMachineAbi(comp.root_mod.resolved_target.result)) |s| s.ptr else null,
);
errdefer target_machine.dispose();
if (pic) module.setModulePICLevel();
- if (self.module.comp.config.pie) module.setModulePIELevel();
+ if (comp.config.pie) module.setModulePIELevel();
if (code_model != .Default) module.setModuleCodeModel(code_model);
- if (self.module.comp.llvm_opt_bisect_limit >= 0) {
- context.setOptBisectLimit(self.module.comp.llvm_opt_bisect_limit);
+ if (comp.llvm_opt_bisect_limit >= 0) {
+ context.setOptBisectLimit(comp.llvm_opt_bisect_limit);
}
// Unfortunately, LLVM shits the bed when we ask for both binary and assembly.
@@ -1352,11 +1354,13 @@ pub const Object = struct {
pub fn updateFunc(
o: *Object,
- zcu: *Module,
+ pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) !void {
+ assert(std.meta.eql(pt, o.pt));
+ const zcu = pt.zcu;
const comp = zcu.comp;
const func = zcu.funcInfo(func_index);
const decl_index = func.owner_decl;
@@ -1437,7 +1441,7 @@ pub const Object = struct {
var llvm_arg_i: u32 = 0;
// This gets the LLVM values from the function and stores them in `dg.args`.
- const sret = firstParamSRet(fn_info, zcu, target);
+ const sret = firstParamSRet(fn_info, pt, target);
const ret_ptr: Builder.Value = if (sret) param: {
const param = wip.arg(llvm_arg_i);
llvm_arg_i += 1;
@@ -1478,8 +1482,8 @@ pub const Object = struct {
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]);
const param = wip.arg(llvm_arg_i);
- if (isByRef(param_ty, zcu)) {
- const alignment = param_ty.abiAlignment(zcu).toLlvm();
+ if (isByRef(param_ty, pt)) {
+ const alignment = param_ty.abiAlignment(pt).toLlvm();
const param_llvm_ty = param.typeOfWip(&wip);
const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target);
_ = try wip.store(.normal, param, arg_ptr, alignment);
@@ -1495,12 +1499,12 @@ pub const Object = struct {
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
const param_llvm_ty = try o.lowerType(param_ty);
const param = wip.arg(llvm_arg_i);
- const alignment = param_ty.abiAlignment(zcu).toLlvm();
+ const alignment = param_ty.abiAlignment(pt).toLlvm();
try o.addByRefParamAttrs(&attributes, llvm_arg_i, alignment, it.byval_attr, param_llvm_ty);
llvm_arg_i += 1;
- if (isByRef(param_ty, zcu)) {
+ if (isByRef(param_ty, pt)) {
args.appendAssumeCapacity(param);
} else {
args.appendAssumeCapacity(try wip.load(.normal, param_llvm_ty, param, alignment, ""));
@@ -1510,12 +1514,12 @@ pub const Object = struct {
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
const param_llvm_ty = try o.lowerType(param_ty);
const param = wip.arg(llvm_arg_i);
- const alignment = param_ty.abiAlignment(zcu).toLlvm();
+ const alignment = param_ty.abiAlignment(pt).toLlvm();
try attributes.addParamAttr(llvm_arg_i, .noundef, &o.builder);
llvm_arg_i += 1;
- if (isByRef(param_ty, zcu)) {
+ if (isByRef(param_ty, pt)) {
args.appendAssumeCapacity(param);
} else {
args.appendAssumeCapacity(try wip.load(.normal, param_llvm_ty, param, alignment, ""));
@@ -1528,11 +1532,11 @@ pub const Object = struct {
llvm_arg_i += 1;
const param_llvm_ty = try o.lowerType(param_ty);
- const alignment = param_ty.abiAlignment(zcu).toLlvm();
+ const alignment = param_ty.abiAlignment(pt).toLlvm();
const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target);
_ = try wip.store(.normal, param, arg_ptr, alignment);
- args.appendAssumeCapacity(if (isByRef(param_ty, zcu))
+ args.appendAssumeCapacity(if (isByRef(param_ty, pt))
arg_ptr
else
try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, ""));
@@ -1556,7 +1560,7 @@ pub const Object = struct {
const elem_align = (if (ptr_info.flags.alignment != .none)
@as(InternPool.Alignment, ptr_info.flags.alignment)
else
- Type.fromInterned(ptr_info.child).abiAlignment(zcu).max(.@"1")).toLlvm();
+ Type.fromInterned(ptr_info.child).abiAlignment(pt).max(.@"1")).toLlvm();
try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder);
const ptr_param = wip.arg(llvm_arg_i);
llvm_arg_i += 1;
@@ -1573,7 +1577,7 @@ pub const Object = struct {
const field_types = it.types_buffer[0..it.types_len];
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
const param_llvm_ty = try o.lowerType(param_ty);
- const param_alignment = param_ty.abiAlignment(zcu).toLlvm();
+ const param_alignment = param_ty.abiAlignment(pt).toLlvm();
const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, param_alignment, target);
const llvm_ty = try o.builder.structType(.normal, field_types);
for (0..field_types.len) |field_i| {
@@ -1585,7 +1589,7 @@ pub const Object = struct {
_ = try wip.store(.normal, param, field_ptr, alignment);
}
- const is_by_ref = isByRef(param_ty, zcu);
+ const is_by_ref = isByRef(param_ty, pt);
args.appendAssumeCapacity(if (is_by_ref)
arg_ptr
else
@@ -1603,11 +1607,11 @@ pub const Object = struct {
const param = wip.arg(llvm_arg_i);
llvm_arg_i += 1;
- const alignment = param_ty.abiAlignment(zcu).toLlvm();
+ const alignment = param_ty.abiAlignment(pt).toLlvm();
const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target);
_ = try wip.store(.normal, param, arg_ptr, alignment);
- args.appendAssumeCapacity(if (isByRef(param_ty, zcu))
+ args.appendAssumeCapacity(if (isByRef(param_ty, pt))
arg_ptr
else
try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, ""));
@@ -1618,11 +1622,11 @@ pub const Object = struct {
const param = wip.arg(llvm_arg_i);
llvm_arg_i += 1;
- const alignment = param_ty.abiAlignment(zcu).toLlvm();
+ const alignment = param_ty.abiAlignment(pt).toLlvm();
const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target);
_ = try wip.store(.normal, param, arg_ptr, alignment);
- args.appendAssumeCapacity(if (isByRef(param_ty, zcu))
+ args.appendAssumeCapacity(if (isByRef(param_ty, pt))
arg_ptr
else
try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, ""));
@@ -1700,8 +1704,9 @@ pub const Object = struct {
try fg.wip.finish();
}
- pub fn updateDecl(self: *Object, module: *Module, decl_index: InternPool.DeclIndex) !void {
- const decl = module.declPtr(decl_index);
+ pub fn updateDecl(self: *Object, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void {
+ assert(std.meta.eql(pt, self.pt));
+ const decl = pt.zcu.declPtr(decl_index);
var dg: DeclGen = .{
.object = self,
.decl = decl,
@@ -1711,7 +1716,7 @@ pub const Object = struct {
dg.genDecl() catch |err| switch (err) {
error.CodegenFail => {
decl.analysis = .codegen_failure;
- try module.failed_analysis.put(module.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), dg.err_msg.?);
+ try pt.zcu.failed_analysis.put(pt.zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), dg.err_msg.?);
dg.err_msg = null;
return;
},
@@ -1721,10 +1726,12 @@ pub const Object = struct {
pub fn updateExports(
self: *Object,
- zcu: *Zcu,
- exported: Module.Exported,
+ pt: Zcu.PerThread,
+ exported: Zcu.Exported,
export_indices: []const u32,
) link.File.UpdateExportsError!void {
+ assert(std.meta.eql(pt, self.pt));
+ const zcu = pt.zcu;
const decl_index = switch (exported) {
.decl_index => |i| i,
.value => |val| return updateExportedValue(self, zcu, val, export_indices),
@@ -1748,7 +1755,7 @@ pub const Object = struct {
fn updateExportedValue(
o: *Object,
- mod: *Module,
+ mod: *Zcu,
exported_value: InternPool.Index,
export_indices: []const u32,
) link.File.UpdateExportsError!void {
@@ -1783,7 +1790,7 @@ pub const Object = struct {
fn updateExportedGlobal(
o: *Object,
- mod: *Module,
+ mod: *Zcu,
global_index: Builder.Global.Index,
export_indices: []const u32,
) link.File.UpdateExportsError!void {
@@ -1879,7 +1886,7 @@ pub const Object = struct {
global.delete(&self.builder);
}
- fn getDebugFile(o: *Object, file: *const Module.File) Allocator.Error!Builder.Metadata {
+ fn getDebugFile(o: *Object, file: *const Zcu.File) Allocator.Error!Builder.Metadata {
const gpa = o.gpa;
const gop = try o.debug_file_map.getOrPut(gpa, file);
errdefer assert(o.debug_file_map.remove(file));
@@ -1909,7 +1916,8 @@ pub const Object = struct {
const gpa = o.gpa;
const target = o.target;
- const zcu = o.module;
+ const pt = o.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
if (o.debug_type_map.get(ty)) |debug_type| return debug_type;
@@ -1931,7 +1939,7 @@ pub const Object = struct {
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const builder_name = try o.builder.metadataString(name);
- const debug_bits = ty.abiSize(zcu) * 8; // lldb cannot handle non-byte sized types
+ const debug_bits = ty.abiSize(pt) * 8; // lldb cannot handle non-byte sized types
const debug_int_type = switch (info.signedness) {
.signed => try o.builder.debugSignedType(builder_name, debug_bits),
.unsigned => try o.builder.debugUnsignedType(builder_name, debug_bits),
@@ -1941,9 +1949,9 @@ pub const Object = struct {
},
.Enum => {
const owner_decl_index = ty.getOwnerDecl(zcu);
- const owner_decl = o.module.declPtr(owner_decl_index);
+ const owner_decl = zcu.declPtr(owner_decl_index);
- if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ if (!ty.hasRuntimeBitsIgnoreComptime(pt)) {
const debug_enum_type = try o.makeEmptyNamespaceDebugType(owner_decl_index);
try o.debug_type_map.put(gpa, ty, debug_enum_type);
return debug_enum_type;
@@ -1961,7 +1969,7 @@ pub const Object = struct {
for (enum_type.names.get(ip), 0..) |field_name_ip, i| {
var bigint_space: Value.BigIntSpace = undefined;
const bigint = if (enum_type.values.len != 0)
- Value.fromInterned(enum_type.values.get(ip)[i]).toBigInt(&bigint_space, zcu)
+ Value.fromInterned(enum_type.values.get(ip)[i]).toBigInt(&bigint_space, pt)
else
std.math.big.int.Mutable.init(&bigint_space.limbs, i).toConst();
@@ -1986,8 +1994,8 @@ pub const Object = struct {
scope,
owner_decl.typeSrcLine(zcu) + 1, // Line
try o.lowerDebugType(int_ty),
- ty.abiSize(zcu) * 8,
- (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
+ ty.abiSize(pt) * 8,
+ (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(enumerators),
);
@@ -2027,10 +2035,10 @@ pub const Object = struct {
ptr_info.flags.is_const or
ptr_info.flags.is_volatile or
ptr_info.flags.size == .Many or ptr_info.flags.size == .C or
- !Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(zcu))
+ !Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(pt))
{
- const bland_ptr_ty = try zcu.ptrType(.{
- .child = if (!Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(zcu))
+ const bland_ptr_ty = try pt.ptrType(.{
+ .child = if (!Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(pt))
.anyopaque_type
else
ptr_info.child,
@@ -2060,10 +2068,10 @@ pub const Object = struct {
defer gpa.free(name);
const line = 0;
- const ptr_size = ptr_ty.abiSize(zcu);
- const ptr_align = ptr_ty.abiAlignment(zcu);
- const len_size = len_ty.abiSize(zcu);
- const len_align = len_ty.abiAlignment(zcu);
+ const ptr_size = ptr_ty.abiSize(pt);
+ const ptr_align = ptr_ty.abiAlignment(pt);
+ const len_size = len_ty.abiSize(pt);
+ const len_align = len_ty.abiAlignment(pt);
const len_offset = len_align.forward(ptr_size);
@@ -2095,8 +2103,8 @@ pub const Object = struct {
o.debug_compile_unit, // Scope
line,
.none, // Underlying type
- ty.abiSize(zcu) * 8,
- (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
+ ty.abiSize(pt) * 8,
+ (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&.{
debug_ptr_type,
debug_len_type,
@@ -2124,7 +2132,7 @@ pub const Object = struct {
0, // Line
debug_elem_ty,
target.ptrBitWidth(),
- (ty.ptrAlignment(zcu).toByteUnits() orelse 0) * 8,
+ (ty.ptrAlignment(pt).toByteUnits() orelse 0) * 8,
0, // Offset
);
@@ -2149,7 +2157,7 @@ pub const Object = struct {
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const owner_decl_index = ty.getOwnerDecl(zcu);
- const owner_decl = o.module.declPtr(owner_decl_index);
+ const owner_decl = zcu.declPtr(owner_decl_index);
const file_scope = zcu.namespacePtr(owner_decl.src_namespace).fileScope(zcu);
const debug_opaque_type = try o.builder.debugStructType(
try o.builder.metadataString(name),
@@ -2171,8 +2179,8 @@ pub const Object = struct {
.none, // Scope
0, // Line
try o.lowerDebugType(ty.childType(zcu)),
- ty.abiSize(zcu) * 8,
- (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
+ ty.abiSize(pt) * 8,
+ (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&.{
try o.builder.debugSubrange(
try o.builder.debugConstant(try o.builder.intConst(.i64, 0)),
@@ -2214,8 +2222,8 @@ pub const Object = struct {
.none, // Scope
0, // Line
debug_elem_type,
- ty.abiSize(zcu) * 8,
- (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
+ ty.abiSize(pt) * 8,
+ (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&.{
try o.builder.debugSubrange(
try o.builder.debugConstant(try o.builder.intConst(.i64, 0)),
@@ -2231,7 +2239,7 @@ pub const Object = struct {
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const child_ty = ty.optionalChild(zcu);
- if (!child_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ if (!child_ty.hasRuntimeBitsIgnoreComptime(pt)) {
const debug_bool_type = try o.builder.debugBoolType(
try o.builder.metadataString(name),
8,
@@ -2258,10 +2266,10 @@ pub const Object = struct {
}
const non_null_ty = Type.u8;
- const payload_size = child_ty.abiSize(zcu);
- const payload_align = child_ty.abiAlignment(zcu);
- const non_null_size = non_null_ty.abiSize(zcu);
- const non_null_align = non_null_ty.abiAlignment(zcu);
+ const payload_size = child_ty.abiSize(pt);
+ const payload_align = child_ty.abiAlignment(pt);
+ const non_null_size = non_null_ty.abiSize(pt);
+ const non_null_align = non_null_ty.abiAlignment(pt);
const non_null_offset = non_null_align.forward(payload_size);
const debug_data_type = try o.builder.debugMemberType(
@@ -2292,8 +2300,8 @@ pub const Object = struct {
o.debug_compile_unit, // Scope
0, // Line
.none, // Underlying type
- ty.abiSize(zcu) * 8,
- (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
+ ty.abiSize(pt) * 8,
+ (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&.{
debug_data_type,
debug_some_type,
@@ -2310,7 +2318,7 @@ pub const Object = struct {
},
.ErrorUnion => {
const payload_ty = ty.errorUnionPayload(zcu);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
// TODO: Maybe remove?
const debug_error_union_type = try o.lowerDebugType(Type.anyerror);
try o.debug_type_map.put(gpa, ty, debug_error_union_type);
@@ -2320,10 +2328,10 @@ pub const Object = struct {
const name = try o.allocTypeName(ty);
defer gpa.free(name);
- const error_size = Type.anyerror.abiSize(zcu);
- const error_align = Type.anyerror.abiAlignment(zcu);
- const payload_size = payload_ty.abiSize(zcu);
- const payload_align = payload_ty.abiAlignment(zcu);
+ const error_size = Type.anyerror.abiSize(pt);
+ const error_align = Type.anyerror.abiAlignment(pt);
+ const payload_size = payload_ty.abiSize(pt);
+ const payload_align = payload_ty.abiAlignment(pt);
var error_index: u32 = undefined;
var payload_index: u32 = undefined;
@@ -2371,8 +2379,8 @@ pub const Object = struct {
o.debug_compile_unit, // Sope
0, // Line
.none, // Underlying type
- ty.abiSize(zcu) * 8,
- (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
+ ty.abiSize(pt) * 8,
+ (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&fields),
);
@@ -2399,8 +2407,8 @@ pub const Object = struct {
const info = Type.fromInterned(backing_int_ty).intInfo(zcu);
const builder_name = try o.builder.metadataString(name);
const debug_int_type = switch (info.signedness) {
- .signed => try o.builder.debugSignedType(builder_name, ty.abiSize(zcu) * 8),
- .unsigned => try o.builder.debugUnsignedType(builder_name, ty.abiSize(zcu) * 8),
+ .signed => try o.builder.debugSignedType(builder_name, ty.abiSize(pt) * 8),
+ .unsigned => try o.builder.debugUnsignedType(builder_name, ty.abiSize(pt) * 8),
};
try o.debug_type_map.put(gpa, ty, debug_int_type);
return debug_int_type;
@@ -2420,10 +2428,10 @@ pub const Object = struct {
const debug_fwd_ref = try o.builder.debugForwardReference();
for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| {
- if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
+ if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
- const field_size = Type.fromInterned(field_ty).abiSize(zcu);
- const field_align = Type.fromInterned(field_ty).abiAlignment(zcu);
+ const field_size = Type.fromInterned(field_ty).abiSize(pt);
+ const field_align = Type.fromInterned(field_ty).abiAlignment(pt);
const field_offset = field_align.forward(offset);
offset = field_offset + field_size;
@@ -2451,8 +2459,8 @@ pub const Object = struct {
o.debug_compile_unit, // Scope
0, // Line
.none, // Underlying type
- ty.abiSize(zcu) * 8,
- (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
+ ty.abiSize(pt) * 8,
+ (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(fields.items),
);
@@ -2479,7 +2487,7 @@ pub const Object = struct {
else => {},
}
- if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ if (!ty.hasRuntimeBitsIgnoreComptime(pt)) {
const owner_decl_index = ty.getOwnerDecl(zcu);
const debug_struct_type = try o.makeEmptyNamespaceDebugType(owner_decl_index);
try o.debug_type_map.put(gpa, ty, debug_struct_type);
@@ -2502,14 +2510,14 @@ pub const Object = struct {
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
- const field_size = field_ty.abiSize(zcu);
- const field_align = zcu.structFieldAlignment(
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+ const field_size = field_ty.abiSize(pt);
+ const field_align = pt.structFieldAlignment(
struct_type.fieldAlign(ip, field_index),
field_ty,
struct_type.layout,
);
- const field_offset = ty.structFieldOffset(field_index, zcu);
+ const field_offset = ty.structFieldOffset(field_index, pt);
const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse
try ip.getOrPutStringFmt(gpa, "{d}", .{field_index}, .no_embedded_nulls);
@@ -2532,8 +2540,8 @@ pub const Object = struct {
o.debug_compile_unit, // Scope
0, // Line
.none, // Underlying type
- ty.abiSize(zcu) * 8,
- (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
+ ty.abiSize(pt) * 8,
+ (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(fields.items),
);
@@ -2553,7 +2561,7 @@ pub const Object = struct {
const union_type = ip.loadUnionType(ty.toIntern());
if (!union_type.haveFieldTypes(ip) or
- !ty.hasRuntimeBitsIgnoreComptime(zcu) or
+ !ty.hasRuntimeBitsIgnoreComptime(pt) or
!union_type.haveLayout(ip))
{
const debug_union_type = try o.makeEmptyNamespaceDebugType(owner_decl_index);
@@ -2561,7 +2569,7 @@ pub const Object = struct {
return debug_union_type;
}
- const layout = zcu.getUnionLayout(union_type);
+ const layout = pt.getUnionLayout(union_type);
const debug_fwd_ref = try o.builder.debugForwardReference();
@@ -2575,8 +2583,8 @@ pub const Object = struct {
o.debug_compile_unit, // Scope
0, // Line
.none, // Underlying type
- ty.abiSize(zcu) * 8,
- (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
+ ty.abiSize(pt) * 8,
+ (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(
&.{try o.lowerDebugType(Type.fromInterned(union_type.enum_tag_ty))},
),
@@ -2603,12 +2611,12 @@ pub const Object = struct {
for (0..tag_type.names.len) |field_index| {
const field_ty = union_type.field_types.get(ip)[field_index];
- if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(zcu)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue;
- const field_size = Type.fromInterned(field_ty).abiSize(zcu);
+ const field_size = Type.fromInterned(field_ty).abiSize(pt);
const field_align: InternPool.Alignment = switch (union_type.flagsPtr(ip).layout) {
.@"packed" => .none,
- .auto, .@"extern" => zcu.unionFieldNormalAlignment(union_type, @intCast(field_index)),
+ .auto, .@"extern" => pt.unionFieldNormalAlignment(union_type, @intCast(field_index)),
};
const field_name = tag_type.names.get(ip)[field_index];
@@ -2637,8 +2645,8 @@ pub const Object = struct {
o.debug_compile_unit, // Scope
0, // Line
.none, // Underlying type
- ty.abiSize(zcu) * 8,
- (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
+ ty.abiSize(pt) * 8,
+ (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(fields.items),
);
@@ -2696,8 +2704,8 @@ pub const Object = struct {
o.debug_compile_unit, // Scope
0, // Line
.none, // Underlying type
- ty.abiSize(zcu) * 8,
- (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
+ ty.abiSize(pt) * 8,
+ (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&full_fields),
);
@@ -2718,13 +2726,13 @@ pub const Object = struct {
try debug_param_types.ensureUnusedCapacity(3 + fn_info.param_types.len);
// Return type goes first.
- if (Type.fromInterned(fn_info.return_type).hasRuntimeBitsIgnoreComptime(zcu)) {
- const sret = firstParamSRet(fn_info, zcu, target);
+ if (Type.fromInterned(fn_info.return_type).hasRuntimeBitsIgnoreComptime(pt)) {
+ const sret = firstParamSRet(fn_info, pt, target);
const ret_ty = if (sret) Type.void else Type.fromInterned(fn_info.return_type);
debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ret_ty));
if (sret) {
- const ptr_ty = try zcu.singleMutPtrType(Type.fromInterned(fn_info.return_type));
+ const ptr_ty = try pt.singleMutPtrType(Type.fromInterned(fn_info.return_type));
debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ptr_ty));
}
} else {
@@ -2732,18 +2740,18 @@ pub const Object = struct {
}
if (Type.fromInterned(fn_info.return_type).isError(zcu) and
- o.module.comp.config.any_error_tracing)
+ zcu.comp.config.any_error_tracing)
{
- const ptr_ty = try zcu.singleMutPtrType(try o.getStackTraceType());
+ const ptr_ty = try pt.singleMutPtrType(try o.getStackTraceType());
debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ptr_ty));
}
for (0..fn_info.param_types.len) |i| {
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[i]);
- if (!param_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+ if (!param_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
- if (isByRef(param_ty, zcu)) {
- const ptr_ty = try zcu.singleMutPtrType(param_ty);
+ if (isByRef(param_ty, pt)) {
+ const ptr_ty = try pt.singleMutPtrType(param_ty);
debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ptr_ty));
} else {
debug_param_types.appendAssumeCapacity(try o.lowerDebugType(param_ty));
@@ -2770,7 +2778,7 @@ pub const Object = struct {
}
fn namespaceToDebugScope(o: *Object, namespace_index: InternPool.NamespaceIndex) !Builder.Metadata {
- const zcu = o.module;
+ const zcu = o.pt.zcu;
const namespace = zcu.namespacePtr(namespace_index);
const file_scope = namespace.fileScope(zcu);
if (namespace.parent == .none) return try o.getDebugFile(file_scope);
@@ -2783,7 +2791,7 @@ pub const Object = struct {
}
fn makeEmptyNamespaceDebugType(o: *Object, decl_index: InternPool.DeclIndex) !Builder.Metadata {
- const zcu = o.module;
+ const zcu = o.pt.zcu;
const decl = zcu.declPtr(decl_index);
const file_scope = zcu.namespacePtr(decl.src_namespace).fileScope(zcu);
return o.builder.debugStructType(
@@ -2799,7 +2807,7 @@ pub const Object = struct {
}
fn getStackTraceType(o: *Object) Allocator.Error!Type {
- const zcu = o.module;
+ const zcu = o.pt.zcu;
const std_mod = zcu.std_mod;
const std_file_imported = zcu.importPkg(std_mod) catch unreachable;
@@ -2807,13 +2815,13 @@ pub const Object = struct {
const builtin_str = try zcu.intern_pool.getOrPutString(zcu.gpa, "builtin", .no_embedded_nulls);
const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index);
const std_namespace = zcu.namespacePtr(zcu.declPtr(std_file_root_decl.unwrap().?).src_namespace);
- const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Module.DeclAdapter{ .zcu = zcu }).?;
+ const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }).?;
const stack_trace_str = try zcu.intern_pool.getOrPutString(zcu.gpa, "StackTrace", .no_embedded_nulls);
// buffer is only used for int_type, `builtin` is a struct.
const builtin_ty = zcu.declPtr(builtin_decl).val.toType();
const builtin_namespace = zcu.namespacePtrUnwrap(builtin_ty.getNamespaceIndex(zcu)).?;
- const stack_trace_decl_index = builtin_namespace.decls.getKeyAdapted(stack_trace_str, Module.DeclAdapter{ .zcu = zcu }).?;
+ const stack_trace_decl_index = builtin_namespace.decls.getKeyAdapted(stack_trace_str, Zcu.DeclAdapter{ .zcu = zcu }).?;
const stack_trace_decl = zcu.declPtr(stack_trace_decl_index);
// Sema should have ensured that StackTrace was analyzed.
@@ -2824,7 +2832,7 @@ pub const Object = struct {
fn allocTypeName(o: *Object, ty: Type) Allocator.Error![:0]const u8 {
var buffer = std.ArrayList(u8).init(o.gpa);
errdefer buffer.deinit();
- try ty.print(buffer.writer(), o.module);
+ try ty.print(buffer.writer(), o.pt);
return buffer.toOwnedSliceSentinel(0);
}
@@ -2835,7 +2843,8 @@ pub const Object = struct {
o: *Object,
decl_index: InternPool.DeclIndex,
) Allocator.Error!Builder.Function.Index {
- const zcu = o.module;
+ const pt = o.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const gpa = o.gpa;
const decl = zcu.declPtr(decl_index);
@@ -2848,7 +2857,7 @@ pub const Object = struct {
assert(decl.has_tv);
const fn_info = zcu.typeToFunc(zig_fn_type).?;
const target = owner_mod.resolved_target.result;
- const sret = firstParamSRet(fn_info, zcu, target);
+ const sret = firstParamSRet(fn_info, pt, target);
const is_extern = decl.isExtern(zcu);
const function_index = try o.builder.addFunction(
@@ -2929,14 +2938,14 @@ pub const Object = struct {
.byval => {
const param_index = it.zig_index - 1;
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]);
- if (!isByRef(param_ty, zcu)) {
+ if (!isByRef(param_ty, pt)) {
try o.addByValParamAttrs(&attributes, param_ty, param_index, fn_info, it.llvm_index - 1);
}
},
.byref => {
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
const param_llvm_ty = try o.lowerType(param_ty);
- const alignment = param_ty.abiAlignment(zcu);
+ const alignment = param_ty.abiAlignment(pt);
try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment.toLlvm(), it.byval_attr, param_llvm_ty);
},
.byref_mut => try attributes.addParamAttr(it.llvm_index - 1, .noundef, &o.builder),
@@ -2964,7 +2973,7 @@ pub const Object = struct {
attributes: *Builder.FunctionAttributes.Wip,
owner_mod: *Package.Module,
) Allocator.Error!void {
- const comp = o.module.comp;
+ const comp = o.pt.zcu.comp;
if (!owner_mod.red_zone) {
try attributes.addFnAttr(.noredzone, &o.builder);
@@ -3039,7 +3048,7 @@ pub const Object = struct {
}
errdefer assert(o.anon_decl_map.remove(decl_val));
- const mod = o.module;
+ const mod = o.pt.zcu;
const decl_ty = mod.intern_pool.typeOf(decl_val);
const variable_index = try o.builder.addVariable(
@@ -3065,7 +3074,7 @@ pub const Object = struct {
if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.variable;
errdefer assert(o.decl_map.remove(decl_index));
- const zcu = o.module;
+ const zcu = o.pt.zcu;
const decl = zcu.declPtr(decl_index);
const is_extern = decl.isExtern(zcu);
@@ -3100,11 +3109,12 @@ pub const Object = struct {
}
fn errorIntType(o: *Object) Allocator.Error!Builder.Type {
- return o.builder.intType(o.module.errorSetBits());
+ return o.builder.intType(o.pt.zcu.errorSetBits());
}
fn lowerType(o: *Object, t: Type) Allocator.Error!Builder.Type {
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const target = mod.getTarget();
const ip = &mod.intern_pool;
return switch (t.toIntern()) {
@@ -3230,7 +3240,7 @@ pub const Object = struct {
),
.opt_type => |child_ty| {
// Must stay in sync with `opt_payload` logic in `lowerPtr`.
- if (!Type.fromInterned(child_ty).hasRuntimeBitsIgnoreComptime(mod)) return .i8;
+ if (!Type.fromInterned(child_ty).hasRuntimeBitsIgnoreComptime(pt)) return .i8;
const payload_ty = try o.lowerType(Type.fromInterned(child_ty));
if (t.optionalReprIsPayload(mod)) return payload_ty;
@@ -3238,8 +3248,8 @@ pub const Object = struct {
comptime assert(optional_layout_version == 3);
var fields: [3]Builder.Type = .{ payload_ty, .i8, undefined };
var fields_len: usize = 2;
- const offset = Type.fromInterned(child_ty).abiSize(mod) + 1;
- const abi_size = t.abiSize(mod);
+ const offset = Type.fromInterned(child_ty).abiSize(pt) + 1;
+ const abi_size = t.abiSize(pt);
const padding_len = abi_size - offset;
if (padding_len > 0) {
fields[2] = try o.builder.arrayType(padding_len, .i8);
@@ -3252,16 +3262,16 @@ pub const Object = struct {
// Must stay in sync with `codegen.errUnionPayloadOffset`.
// See logic in `lowerPtr`.
const error_type = try o.errorIntType();
- if (!Type.fromInterned(error_union_type.payload_type).hasRuntimeBitsIgnoreComptime(mod))
+ if (!Type.fromInterned(error_union_type.payload_type).hasRuntimeBitsIgnoreComptime(pt))
return error_type;
const payload_type = try o.lowerType(Type.fromInterned(error_union_type.payload_type));
- const err_int_ty = try mod.errorIntType();
+ const err_int_ty = try o.pt.errorIntType();
- const payload_align = Type.fromInterned(error_union_type.payload_type).abiAlignment(mod);
- const error_align = err_int_ty.abiAlignment(mod);
+ const payload_align = Type.fromInterned(error_union_type.payload_type).abiAlignment(pt);
+ const error_align = err_int_ty.abiAlignment(pt);
- const payload_size = Type.fromInterned(error_union_type.payload_type).abiSize(mod);
- const error_size = err_int_ty.abiSize(mod);
+ const payload_size = Type.fromInterned(error_union_type.payload_type).abiSize(pt);
+ const error_size = err_int_ty.abiSize(pt);
var fields: [3]Builder.Type = undefined;
var fields_len: usize = 2;
@@ -3317,12 +3327,12 @@ pub const Object = struct {
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
- const field_align = mod.structFieldAlignment(
+ const field_align = pt.structFieldAlignment(
struct_type.fieldAlign(ip, field_index),
field_ty,
struct_type.layout,
);
- const field_ty_align = field_ty.abiAlignment(mod);
+ const field_ty_align = field_ty.abiAlignment(pt);
if (field_align.compare(.lt, field_ty_align)) struct_kind = .@"packed";
big_align = big_align.max(field_align);
const prev_offset = offset;
@@ -3334,7 +3344,7 @@ pub const Object = struct {
try o.builder.arrayType(padding_len, .i8),
);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) {
// This is a zero-bit field. If there are runtime bits after this field,
// map to the next LLVM field (which we know exists): otherwise, don't
// map the field, indicating it's at the end of the struct.
@@ -3353,7 +3363,7 @@ pub const Object = struct {
}, @intCast(llvm_field_types.items.len));
try llvm_field_types.append(o.gpa, try o.lowerType(field_ty));
- offset += field_ty.abiSize(mod);
+ offset += field_ty.abiSize(pt);
}
{
const prev_offset = offset;
@@ -3386,7 +3396,7 @@ pub const Object = struct {
var offset: u64 = 0;
var big_align: InternPool.Alignment = .none;
- const struct_size = t.abiSize(mod);
+ const struct_size = t.abiSize(pt);
for (
anon_struct_type.types.get(ip),
@@ -3395,7 +3405,7 @@ pub const Object = struct {
) |field_ty, field_val, field_index| {
if (field_val != .none) continue;
- const field_align = Type.fromInterned(field_ty).abiAlignment(mod);
+ const field_align = Type.fromInterned(field_ty).abiAlignment(pt);
big_align = big_align.max(field_align);
const prev_offset = offset;
offset = field_align.forward(offset);
@@ -3405,7 +3415,7 @@ pub const Object = struct {
o.gpa,
try o.builder.arrayType(padding_len, .i8),
);
- if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) {
// This is a zero-bit field. If there are runtime bits after this field,
// map to the next LLVM field (which we know exists): otherwise, don't
// map the field, indicating it's at the end of the struct.
@@ -3423,7 +3433,7 @@ pub const Object = struct {
}, @intCast(llvm_field_types.items.len));
try llvm_field_types.append(o.gpa, try o.lowerType(Type.fromInterned(field_ty)));
- offset += Type.fromInterned(field_ty).abiSize(mod);
+ offset += Type.fromInterned(field_ty).abiSize(pt);
}
{
const prev_offset = offset;
@@ -3440,10 +3450,10 @@ pub const Object = struct {
if (o.type_map.get(t.toIntern())) |value| return value;
const union_obj = ip.loadUnionType(t.toIntern());
- const layout = mod.getUnionLayout(union_obj);
+ const layout = pt.getUnionLayout(union_obj);
if (union_obj.flagsPtr(ip).layout == .@"packed") {
- const int_ty = try o.builder.intType(@intCast(t.bitSize(mod)));
+ const int_ty = try o.builder.intType(@intCast(t.bitSize(pt)));
try o.type_map.put(o.gpa, t.toIntern(), int_ty);
return int_ty;
}
@@ -3552,18 +3562,20 @@ pub const Object = struct {
/// being a zero bit type, but it should still be lowered as an i8 in such case.
/// There are other similar cases handled here as well.
fn lowerPtrElemTy(o: *Object, elem_ty: Type) Allocator.Error!Builder.Type {
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) {
.Opaque => true,
.Fn => !mod.typeToFunc(elem_ty).?.is_generic,
- .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod),
- else => elem_ty.hasRuntimeBitsIgnoreComptime(mod),
+ .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(pt),
+ else => elem_ty.hasRuntimeBitsIgnoreComptime(pt),
};
return if (lower_elem_ty) try o.lowerType(elem_ty) else .i8;
}
fn lowerTypeFn(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const target = mod.getTarget();
const ret_ty = try lowerFnRetTy(o, fn_info);
@@ -3571,14 +3583,14 @@ pub const Object = struct {
var llvm_params = std.ArrayListUnmanaged(Builder.Type){};
defer llvm_params.deinit(o.gpa);
- if (firstParamSRet(fn_info, mod, target)) {
+ if (firstParamSRet(fn_info, pt, target)) {
try llvm_params.append(o.gpa, .ptr);
}
if (Type.fromInterned(fn_info.return_type).isError(mod) and
mod.comp.config.any_error_tracing)
{
- const ptr_ty = try mod.singleMutPtrType(try o.getStackTraceType());
+ const ptr_ty = try pt.singleMutPtrType(try o.getStackTraceType());
try llvm_params.append(o.gpa, try o.lowerType(ptr_ty));
}
@@ -3595,7 +3607,7 @@ pub const Object = struct {
.abi_sized_int => {
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
try llvm_params.append(o.gpa, try o.builder.intType(
- @intCast(param_ty.abiSize(mod) * 8),
+ @intCast(param_ty.abiSize(pt) * 8),
));
},
.slice => {
@@ -3633,7 +3645,8 @@ pub const Object = struct {
}
fn lowerValueToInt(o: *Object, llvm_int_ty: Builder.Type, arg_val: InternPool.Index) Error!Builder.Constant {
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const target = mod.getTarget();
@@ -3666,15 +3679,15 @@ pub const Object = struct {
var running_int = try o.builder.intConst(llvm_int_ty, 0);
var running_bits: u16 = 0;
for (struct_type.field_types.get(ip), 0..) |field_ty, field_index| {
- if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue;
const shift_rhs = try o.builder.intConst(llvm_int_ty, running_bits);
- const field_val = try o.lowerValueToInt(llvm_int_ty, (try val.fieldValue(mod, field_index)).toIntern());
+ const field_val = try o.lowerValueToInt(llvm_int_ty, (try val.fieldValue(pt, field_index)).toIntern());
const shifted = try o.builder.binConst(.shl, field_val, shift_rhs);
running_int = try o.builder.binConst(.xor, running_int, shifted);
- const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(mod));
+ const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(pt));
running_bits += ty_bit_size;
}
return running_int;
@@ -3683,7 +3696,7 @@ pub const Object = struct {
else => unreachable,
},
.un => |un| {
- const layout = ty.unionGetLayout(mod);
+ const layout = ty.unionGetLayout(pt);
if (layout.payload_size == 0) return o.lowerValue(un.tag);
const union_obj = mod.typeToUnion(ty).?;
@@ -3701,7 +3714,7 @@ pub const Object = struct {
}
const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBits(mod)) return o.builder.intConst(llvm_int_ty, 0);
+ if (!field_ty.hasRuntimeBits(pt)) return o.builder.intConst(llvm_int_ty, 0);
return o.lowerValueToInt(llvm_int_ty, un.val);
},
.simple_value => |simple_value| switch (simple_value) {
@@ -3715,7 +3728,7 @@ pub const Object = struct {
.opt => {}, // pointer like optional expected
else => unreachable,
}
- const bits = ty.bitSize(mod);
+ const bits = ty.bitSize(pt);
const bytes: usize = @intCast(std.mem.alignForward(u64, bits, 8) / 8);
var stack = std.heap.stackFallback(32, o.gpa);
@@ -3729,12 +3742,7 @@ pub const Object = struct {
defer allocator.free(limbs);
@memset(limbs, 0);
- val.writeToPackedMemory(
- ty,
- mod,
- std.mem.sliceAsBytes(limbs)[0..bytes],
- 0,
- ) catch unreachable;
+ val.writeToPackedMemory(ty, pt, std.mem.sliceAsBytes(limbs)[0..bytes], 0) catch unreachable;
if (builtin.target.cpu.arch.endian() == .little) {
if (target.cpu.arch.endian() == .big)
@@ -3752,7 +3760,8 @@ pub const Object = struct {
}
fn lowerValue(o: *Object, arg_val: InternPool.Index) Error!Builder.Constant {
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const target = mod.getTarget();
@@ -3811,7 +3820,7 @@ pub const Object = struct {
},
.int => {
var bigint_space: Value.BigIntSpace = undefined;
- const bigint = val.toBigInt(&bigint_space, mod);
+ const bigint = val.toBigInt(&bigint_space, pt);
return lowerBigInt(o, ty, bigint);
},
.err => |err| {
@@ -3821,24 +3830,24 @@ pub const Object = struct {
},
.error_union => |error_union| {
const err_val = switch (error_union.val) {
- .err_name => |err_name| try mod.intern(.{ .err = .{
+ .err_name => |err_name| try pt.intern(.{ .err = .{
.ty = ty.errorUnionSet(mod).toIntern(),
.name = err_name,
} }),
- .payload => (try mod.intValue(try mod.errorIntType(), 0)).toIntern(),
+ .payload => (try pt.intValue(try pt.errorIntType(), 0)).toIntern(),
};
- const err_int_ty = try mod.errorIntType();
+ const err_int_ty = try pt.errorIntType();
const payload_type = ty.errorUnionPayload(mod);
- if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_type.hasRuntimeBitsIgnoreComptime(pt)) {
// We use the error type directly as the type.
return o.lowerValue(err_val);
}
- const payload_align = payload_type.abiAlignment(mod);
- const error_align = err_int_ty.abiAlignment(mod);
+ const payload_align = payload_type.abiAlignment(pt);
+ const error_align = err_int_ty.abiAlignment(pt);
const llvm_error_value = try o.lowerValue(err_val);
const llvm_payload_value = try o.lowerValue(switch (error_union.val) {
- .err_name => try mod.intern(.{ .undef = payload_type.toIntern() }),
+ .err_name => try pt.intern(.{ .undef = payload_type.toIntern() }),
.payload => |payload| payload,
});
@@ -3869,16 +3878,16 @@ pub const Object = struct {
.enum_tag => |enum_tag| o.lowerValue(enum_tag.int),
.float => switch (ty.floatBits(target)) {
16 => if (backendSupportsF16(target))
- try o.builder.halfConst(val.toFloat(f16, mod))
+ try o.builder.halfConst(val.toFloat(f16, pt))
else
- try o.builder.intConst(.i16, @as(i16, @bitCast(val.toFloat(f16, mod)))),
- 32 => try o.builder.floatConst(val.toFloat(f32, mod)),
- 64 => try o.builder.doubleConst(val.toFloat(f64, mod)),
+ try o.builder.intConst(.i16, @as(i16, @bitCast(val.toFloat(f16, pt)))),
+ 32 => try o.builder.floatConst(val.toFloat(f32, pt)),
+ 64 => try o.builder.doubleConst(val.toFloat(f64, pt)),
80 => if (backendSupportsF80(target))
- try o.builder.x86_fp80Const(val.toFloat(f80, mod))
+ try o.builder.x86_fp80Const(val.toFloat(f80, pt))
else
- try o.builder.intConst(.i80, @as(i80, @bitCast(val.toFloat(f80, mod)))),
- 128 => try o.builder.fp128Const(val.toFloat(f128, mod)),
+ try o.builder.intConst(.i80, @as(i80, @bitCast(val.toFloat(f80, pt)))),
+ 128 => try o.builder.fp128Const(val.toFloat(f128, pt)),
else => unreachable,
},
.ptr => try o.lowerPtr(arg_val, 0),
@@ -3891,7 +3900,7 @@ pub const Object = struct {
const payload_ty = ty.optionalChild(mod);
const non_null_bit = try o.builder.intConst(.i8, @intFromBool(opt.val != .none));
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
return non_null_bit;
}
const llvm_ty = try o.lowerType(ty);
@@ -3909,7 +3918,7 @@ pub const Object = struct {
var fields: [3]Builder.Type = undefined;
var vals: [3]Builder.Constant = undefined;
vals[0] = try o.lowerValue(switch (opt.val) {
- .none => try mod.intern(.{ .undef = payload_ty.toIntern() }),
+ .none => try pt.intern(.{ .undef = payload_ty.toIntern() }),
else => |payload| payload,
});
vals[1] = non_null_bit;
@@ -4058,9 +4067,9 @@ pub const Object = struct {
0..,
) |field_ty, field_val, field_index| {
if (field_val != .none) continue;
- if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue;
- const field_align = Type.fromInterned(field_ty).abiAlignment(mod);
+ const field_align = Type.fromInterned(field_ty).abiAlignment(pt);
big_align = big_align.max(field_align);
const prev_offset = offset;
offset = field_align.forward(offset);
@@ -4076,13 +4085,13 @@ pub const Object = struct {
}
vals[llvm_index] =
- try o.lowerValue((try val.fieldValue(mod, field_index)).toIntern());
+ try o.lowerValue((try val.fieldValue(pt, field_index)).toIntern());
fields[llvm_index] = vals[llvm_index].typeOf(&o.builder);
if (fields[llvm_index] != struct_ty.structFields(&o.builder)[llvm_index])
need_unnamed = true;
llvm_index += 1;
- offset += Type.fromInterned(field_ty).abiSize(mod);
+ offset += Type.fromInterned(field_ty).abiSize(pt);
}
{
const prev_offset = offset;
@@ -4109,7 +4118,7 @@ pub const Object = struct {
if (struct_type.layout == .@"packed") {
comptime assert(Type.packed_struct_layout_version == 2);
- const bits = ty.bitSize(mod);
+ const bits = ty.bitSize(pt);
const llvm_int_ty = try o.builder.intType(@intCast(bits));
return o.lowerValueToInt(llvm_int_ty, arg_val);
@@ -4138,7 +4147,7 @@ pub const Object = struct {
var field_it = struct_type.iterateRuntimeOrder(ip);
while (field_it.next()) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
- const field_align = mod.structFieldAlignment(
+ const field_align = pt.structFieldAlignment(
struct_type.fieldAlign(ip, field_index),
field_ty,
struct_type.layout,
@@ -4158,20 +4167,20 @@ pub const Object = struct {
llvm_index += 1;
}
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) {
// This is a zero-bit field - we only needed it for the alignment.
continue;
}
vals[llvm_index] = try o.lowerValue(
- (try val.fieldValue(mod, field_index)).toIntern(),
+ (try val.fieldValue(pt, field_index)).toIntern(),
);
fields[llvm_index] = vals[llvm_index].typeOf(&o.builder);
if (fields[llvm_index] != struct_ty.structFields(&o.builder)[llvm_index])
need_unnamed = true;
llvm_index += 1;
- offset += field_ty.abiSize(mod);
+ offset += field_ty.abiSize(pt);
}
{
const prev_offset = offset;
@@ -4195,7 +4204,7 @@ pub const Object = struct {
},
.un => |un| {
const union_ty = try o.lowerType(ty);
- const layout = ty.unionGetLayout(mod);
+ const layout = ty.unionGetLayout(pt);
if (layout.payload_size == 0) return o.lowerValue(un.tag);
const union_obj = mod.typeToUnion(ty).?;
@@ -4206,8 +4215,8 @@ pub const Object = struct {
const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
if (container_layout == .@"packed") {
- if (!field_ty.hasRuntimeBits(mod)) return o.builder.intConst(union_ty, 0);
- const bits = ty.bitSize(mod);
+ if (!field_ty.hasRuntimeBits(pt)) return o.builder.intConst(union_ty, 0);
+ const bits = ty.bitSize(pt);
const llvm_int_ty = try o.builder.intType(@intCast(bits));
return o.lowerValueToInt(llvm_int_ty, arg_val);
@@ -4219,7 +4228,7 @@ pub const Object = struct {
// must pointer cast to the expected type before accessing the union.
need_unnamed = layout.most_aligned_field != field_index;
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) {
const padding_len = layout.payload_size;
break :p try o.builder.undefConst(try o.builder.arrayType(padding_len, .i8));
}
@@ -4228,7 +4237,7 @@ pub const Object = struct {
if (payload_ty != union_ty.structFields(&o.builder)[
@intFromBool(layout.tag_align.compare(.gte, layout.payload_align))
]) need_unnamed = true;
- const field_size = field_ty.abiSize(mod);
+ const field_size = field_ty.abiSize(pt);
if (field_size == layout.payload_size) break :p payload;
const padding_len = layout.payload_size - field_size;
const padding_ty = try o.builder.arrayType(padding_len, .i8);
@@ -4239,7 +4248,7 @@ pub const Object = struct {
} else p: {
assert(layout.tag_size == 0);
if (container_layout == .@"packed") {
- const bits = ty.bitSize(mod);
+ const bits = ty.bitSize(pt);
const llvm_int_ty = try o.builder.intType(@intCast(bits));
return o.lowerValueToInt(llvm_int_ty, arg_val);
@@ -4286,7 +4295,7 @@ pub const Object = struct {
ty: Type,
bigint: std.math.big.int.Const,
) Allocator.Error!Builder.Constant {
- const mod = o.module;
+ const mod = o.pt.zcu;
return o.builder.bigIntConst(try o.builder.intType(ty.intInfo(mod).bits), bigint);
}
@@ -4295,7 +4304,8 @@ pub const Object = struct {
ptr_val: InternPool.Index,
prev_offset: u64,
) Error!Builder.Constant {
- const zcu = o.module;
+ const pt = o.pt;
+ const zcu = pt.zcu;
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
const offset: u64 = prev_offset + ptr.byte_offset;
return switch (ptr.base_addr) {
@@ -4320,7 +4330,7 @@ pub const Object = struct {
eu_ptr,
offset + @import("../codegen.zig").errUnionPayloadOffset(
Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu),
- zcu,
+ pt,
),
),
.opt_payload => |opt_ptr| try o.lowerPtr(opt_ptr, offset),
@@ -4336,7 +4346,7 @@ pub const Object = struct {
};
},
.Struct, .Union => switch (agg_ty.containerLayout(zcu)) {
- .auto => agg_ty.structFieldOffset(@intCast(field.index), zcu),
+ .auto => agg_ty.structFieldOffset(@intCast(field.index), pt),
.@"extern", .@"packed" => unreachable,
},
else => unreachable,
@@ -4353,7 +4363,8 @@ pub const Object = struct {
o: *Object,
anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl,
) Error!Builder.Constant {
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const decl_val = anon_decl.val;
const decl_ty = Type.fromInterned(ip.typeOf(decl_val));
@@ -4370,14 +4381,14 @@ pub const Object = struct {
const ptr_ty = Type.fromInterned(anon_decl.orig_ty);
const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn;
- if ((!is_fn_body and !decl_ty.hasRuntimeBits(mod)) or
+ if ((!is_fn_body and !decl_ty.hasRuntimeBits(pt)) or
(is_fn_body and mod.typeToFunc(decl_ty).?.is_generic)) return o.lowerPtrToVoid(ptr_ty);
if (is_fn_body)
@panic("TODO");
const llvm_addr_space = toLlvmAddressSpace(ptr_ty.ptrAddressSpace(mod), target);
- const alignment = ptr_ty.ptrAlignment(mod);
+ const alignment = ptr_ty.ptrAlignment(pt);
const llvm_global = (try o.resolveGlobalAnonDecl(decl_val, llvm_addr_space, alignment)).ptrConst(&o.builder).global;
const llvm_val = try o.builder.convConst(
@@ -4389,7 +4400,8 @@ pub const Object = struct {
}
fn lowerDeclRefValue(o: *Object, decl_index: InternPool.DeclIndex) Allocator.Error!Builder.Constant {
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
// In the case of something like:
// fn foo() void {}
@@ -4408,10 +4420,10 @@ pub const Object = struct {
}
const decl_ty = decl.typeOf(mod);
- const ptr_ty = try decl.declPtrType(mod);
+ const ptr_ty = try decl.declPtrType(pt);
const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn;
- if ((!is_fn_body and !decl_ty.hasRuntimeBits(mod)) or
+ if ((!is_fn_body and !decl_ty.hasRuntimeBits(pt)) or
(is_fn_body and mod.typeToFunc(decl_ty).?.is_generic))
{
return o.lowerPtrToVoid(ptr_ty);
@@ -4431,7 +4443,7 @@ pub const Object = struct {
}
fn lowerPtrToVoid(o: *Object, ptr_ty: Type) Allocator.Error!Builder.Constant {
- const mod = o.module;
+ const mod = o.pt.zcu;
// Even though we are pointing at something which has zero bits (e.g. `void`),
// Pointers are defined to have bits. So we must return something here.
// The value cannot be undefined, because we use the `nonnull` annotation
@@ -4459,20 +4471,21 @@ pub const Object = struct {
/// RMW exchange of floating-point values is bitcasted to same-sized integer
/// types to work around a LLVM deficiency when targeting ARM/AArch64.
fn getAtomicAbiType(o: *Object, ty: Type, is_rmw_xchg: bool) Allocator.Error!Builder.Type {
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const int_ty = switch (ty.zigTypeTag(mod)) {
.Int => ty,
.Enum => ty.intTagType(mod),
.Float => {
if (!is_rmw_xchg) return .none;
- return o.builder.intType(@intCast(ty.abiSize(mod) * 8));
+ return o.builder.intType(@intCast(ty.abiSize(pt) * 8));
},
.Bool => return .i8,
else => return .none,
};
const bit_count = int_ty.intInfo(mod).bits;
if (!std.math.isPowerOfTwo(bit_count) or (bit_count % 8) != 0) {
- return o.builder.intType(@intCast(int_ty.abiSize(mod) * 8));
+ return o.builder.intType(@intCast(int_ty.abiSize(pt) * 8));
} else {
return .none;
}
@@ -4486,7 +4499,8 @@ pub const Object = struct {
fn_info: InternPool.Key.FuncType,
llvm_arg_i: u32,
) Allocator.Error!void {
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
if (param_ty.isPtrAtRuntime(mod)) {
const ptr_info = param_ty.ptrInfo(mod);
if (math.cast(u5, param_index)) |i| {
@@ -4507,7 +4521,7 @@ pub const Object = struct {
const elem_align = if (ptr_info.flags.alignment != .none)
ptr_info.flags.alignment
else
- Type.fromInterned(ptr_info.child).abiAlignment(mod).max(.@"1");
+ Type.fromInterned(ptr_info.child).abiAlignment(pt).max(.@"1");
try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align.toLlvm() }, &o.builder);
} else if (ccAbiPromoteInt(fn_info.cc, mod, param_ty)) |s| switch (s) {
.signed => try attributes.addParamAttr(llvm_arg_i, .signext, &o.builder),
@@ -4540,7 +4554,7 @@ pub const Object = struct {
const name = try o.builder.strtabString(lt_errors_fn_name);
if (o.builder.getGlobal(name)) |llvm_fn| return llvm_fn.ptrConst(&o.builder).kind.function;
- const zcu = o.module;
+ const zcu = o.pt.zcu;
const target = zcu.root_mod.resolved_target.result;
const function_index = try o.builder.addFunction(
try o.builder.fnType(.i1, &.{try o.errorIntType()}, .normal),
@@ -4559,7 +4573,8 @@ pub const Object = struct {
}
fn getEnumTagNameFunction(o: *Object, enum_ty: Type) !Builder.Function.Index {
- const zcu = o.module;
+ const pt = o.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const enum_type = ip.loadEnumType(enum_ty.toIntern());
@@ -4618,7 +4633,7 @@ pub const Object = struct {
const return_block = try wip.block(1, "Name");
const this_tag_int_value = try o.lowerValue(
- (try zcu.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(),
+ (try pt.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(),
);
try wip_switch.addCase(this_tag_int_value, return_block, &wip);
@@ -4636,13 +4651,13 @@ pub const Object = struct {
pub const DeclGen = struct {
object: *Object,
- decl: *Module.Decl,
+ decl: *Zcu.Decl,
decl_index: InternPool.DeclIndex,
- err_msg: ?*Module.ErrorMsg,
+ err_msg: ?*Zcu.ErrorMsg,
fn ownerModule(dg: DeclGen) *Package.Module {
const o = dg.object;
- const zcu = o.module;
+ const zcu = o.pt.zcu;
const namespace = zcu.namespacePtr(dg.decl.src_namespace);
const file_scope = namespace.fileScope(zcu);
return file_scope.mod;
@@ -4653,15 +4668,15 @@ pub const DeclGen = struct {
assert(dg.err_msg == null);
const o = dg.object;
const gpa = o.gpa;
- const mod = o.module;
- const src_loc = dg.decl.navSrcLoc(mod);
- dg.err_msg = try Module.ErrorMsg.create(gpa, src_loc, "TODO (LLVM): " ++ format, args);
+ const src_loc = dg.decl.navSrcLoc(o.pt.zcu);
+ dg.err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "TODO (LLVM): " ++ format, args);
return error.CodegenFail;
}
fn genDecl(dg: *DeclGen) !void {
const o = dg.object;
- const zcu = o.module;
+ const pt = o.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const decl = dg.decl;
const decl_index = dg.decl_index;
@@ -4672,7 +4687,7 @@ pub const DeclGen = struct {
} else {
const variable_index = try o.resolveGlobalDecl(decl_index);
variable_index.setAlignment(
- decl.getAlignment(zcu).toLlvm(),
+ decl.getAlignment(pt).toLlvm(),
&o.builder,
);
if (decl.@"linksection".toSlice(ip)) |section|
@@ -4833,23 +4848,21 @@ pub const FuncGen = struct {
const gop = try self.func_inst_table.getOrPut(gpa, inst);
if (gop.found_existing) return gop.value_ptr.*;
- const o = self.dg.object;
- const mod = o.module;
- const llvm_val = try self.resolveValue((try self.air.value(inst, mod)).?);
+ const llvm_val = try self.resolveValue((try self.air.value(inst, self.dg.object.pt)).?);
gop.value_ptr.* = llvm_val.toValue();
return llvm_val.toValue();
}
fn resolveValue(self: *FuncGen, val: Value) Error!Builder.Constant {
const o = self.dg.object;
- const mod = o.module;
- const ty = val.typeOf(mod);
+ const pt = o.pt;
+ const ty = val.typeOf(pt.zcu);
const llvm_val = try o.lowerValue(val.toIntern());
- if (!isByRef(ty, mod)) return llvm_val;
+ if (!isByRef(ty, pt)) return llvm_val;
// We have an LLVM value but we need to create a global constant and
// set the value as its initializer, and then return a pointer to the global.
- const target = mod.getTarget();
+ const target = pt.zcu.getTarget();
const variable_index = try o.builder.addVariable(
.empty,
llvm_val.typeOf(&o.builder),
@@ -4859,7 +4872,7 @@ pub const FuncGen = struct {
variable_index.setLinkage(.private, &o.builder);
variable_index.setMutability(.constant, &o.builder);
variable_index.setUnnamedAddr(.unnamed_addr, &o.builder);
- variable_index.setAlignment(ty.abiAlignment(mod).toLlvm(), &o.builder);
+ variable_index.setAlignment(ty.abiAlignment(pt).toLlvm(), &o.builder);
return o.builder.convConst(
variable_index.toConst(&o.builder),
try o.builder.ptrType(toLlvmAddressSpace(.generic, target)),
@@ -4868,10 +4881,10 @@ pub const FuncGen = struct {
fn resolveNullOptUsize(self: *FuncGen) Error!Builder.Constant {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
if (o.null_opt_usize == .no_init) {
- o.null_opt_usize = try self.resolveValue(Value.fromInterned(try mod.intern(.{ .opt = .{
- .ty = try mod.intern(.{ .opt_type = .usize_type }),
+ o.null_opt_usize = try self.resolveValue(Value.fromInterned(try pt.intern(.{ .opt = .{
+ .ty = try pt.intern(.{ .opt_type = .usize_type }),
.val = .none,
} })));
}
@@ -4880,7 +4893,7 @@ pub const FuncGen = struct {
fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const ip = &mod.intern_pool;
const air_tags = self.air.instructions.items(.tag);
for (body, 0..) |inst, i| {
@@ -5145,7 +5158,8 @@ pub const FuncGen = struct {
if (maybe_inline_func) |inline_func| {
const o = self.dg.object;
- const zcu = o.module;
+ const pt = o.pt;
+ const zcu = pt.zcu;
const func = zcu.funcInfo(inline_func);
const decl_index = func.owner_decl;
@@ -5161,7 +5175,7 @@ pub const FuncGen = struct {
const fqn = try decl.fullyQualifiedName(zcu);
- const fn_ty = try zcu.funcType(.{
+ const fn_ty = try pt.funcType(.{
.param_types = &.{},
.return_type = .void_type,
});
@@ -5228,7 +5242,8 @@ pub const FuncGen = struct {
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]);
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const callee_ty = self.typeOf(pl_op.operand);
const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) {
@@ -5240,7 +5255,7 @@ pub const FuncGen = struct {
const return_type = Type.fromInterned(fn_info.return_type);
const llvm_fn = try self.resolveInst(pl_op.operand);
const target = mod.getTarget();
- const sret = firstParamSRet(fn_info, mod, target);
+ const sret = firstParamSRet(fn_info, pt, target);
var llvm_args = std.ArrayList(Builder.Value).init(self.gpa);
defer llvm_args.deinit();
@@ -5258,14 +5273,13 @@ pub const FuncGen = struct {
const llvm_ret_ty = try o.lowerType(return_type);
try attributes.addParamAttr(0, .{ .sret = llvm_ret_ty }, &o.builder);
- const alignment = return_type.abiAlignment(mod).toLlvm();
+ const alignment = return_type.abiAlignment(pt).toLlvm();
const ret_ptr = try self.buildAllocaWorkaround(return_type, alignment);
try llvm_args.append(ret_ptr);
break :blk ret_ptr;
};
- const err_return_tracing = return_type.isError(mod) and
- o.module.comp.config.any_error_tracing;
+ const err_return_tracing = return_type.isError(mod) and mod.comp.config.any_error_tracing;
if (err_return_tracing) {
assert(self.err_ret_trace != .none);
try llvm_args.append(self.err_ret_trace);
@@ -5279,8 +5293,8 @@ pub const FuncGen = struct {
const param_ty = self.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
const llvm_param_ty = try o.lowerType(param_ty);
- if (isByRef(param_ty, mod)) {
- const alignment = param_ty.abiAlignment(mod).toLlvm();
+ if (isByRef(param_ty, pt)) {
+ const alignment = param_ty.abiAlignment(pt).toLlvm();
const loaded = try self.wip.load(.normal, llvm_param_ty, llvm_arg, alignment, "");
try llvm_args.append(loaded);
} else {
@@ -5291,10 +5305,10 @@ pub const FuncGen = struct {
const arg = args[it.zig_index - 1];
const param_ty = self.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
- if (isByRef(param_ty, mod)) {
+ if (isByRef(param_ty, pt)) {
try llvm_args.append(llvm_arg);
} else {
- const alignment = param_ty.abiAlignment(mod).toLlvm();
+ const alignment = param_ty.abiAlignment(pt).toLlvm();
const param_llvm_ty = llvm_arg.typeOfWip(&self.wip);
const arg_ptr = try self.buildAlloca(param_llvm_ty, alignment);
_ = try self.wip.store(.normal, llvm_arg, arg_ptr, alignment);
@@ -5306,10 +5320,10 @@ pub const FuncGen = struct {
const param_ty = self.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
- const alignment = param_ty.abiAlignment(mod).toLlvm();
+ const alignment = param_ty.abiAlignment(pt).toLlvm();
const param_llvm_ty = try o.lowerType(param_ty);
const arg_ptr = try self.buildAllocaWorkaround(param_ty, alignment);
- if (isByRef(param_ty, mod)) {
+ if (isByRef(param_ty, pt)) {
const loaded = try self.wip.load(.normal, param_llvm_ty, llvm_arg, alignment, "");
_ = try self.wip.store(.normal, loaded, arg_ptr, alignment);
} else {
@@ -5321,16 +5335,16 @@ pub const FuncGen = struct {
const arg = args[it.zig_index - 1];
const param_ty = self.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
- const int_llvm_ty = try o.builder.intType(@intCast(param_ty.abiSize(mod) * 8));
+ const int_llvm_ty = try o.builder.intType(@intCast(param_ty.abiSize(pt) * 8));
- if (isByRef(param_ty, mod)) {
- const alignment = param_ty.abiAlignment(mod).toLlvm();
+ if (isByRef(param_ty, pt)) {
+ const alignment = param_ty.abiAlignment(pt).toLlvm();
const loaded = try self.wip.load(.normal, int_llvm_ty, llvm_arg, alignment, "");
try llvm_args.append(loaded);
} else {
// LLVM does not allow bitcasting structs so we must allocate
// a local, store as one type, and then load as another type.
- const alignment = param_ty.abiAlignment(mod).toLlvm();
+ const alignment = param_ty.abiAlignment(pt).toLlvm();
const int_ptr = try self.buildAllocaWorkaround(param_ty, alignment);
_ = try self.wip.store(.normal, llvm_arg, int_ptr, alignment);
const loaded = try self.wip.load(.normal, int_llvm_ty, int_ptr, alignment, "");
@@ -5349,9 +5363,9 @@ pub const FuncGen = struct {
const param_ty = self.typeOf(arg);
const llvm_types = it.types_buffer[0..it.types_len];
const llvm_arg = try self.resolveInst(arg);
- const is_by_ref = isByRef(param_ty, mod);
+ const is_by_ref = isByRef(param_ty, pt);
const arg_ptr = if (is_by_ref) llvm_arg else ptr: {
- const alignment = param_ty.abiAlignment(mod).toLlvm();
+ const alignment = param_ty.abiAlignment(pt).toLlvm();
const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, llvm_arg, ptr, alignment);
break :ptr ptr;
@@ -5377,8 +5391,8 @@ pub const FuncGen = struct {
const arg = args[it.zig_index - 1];
const arg_ty = self.typeOf(arg);
var llvm_arg = try self.resolveInst(arg);
- const alignment = arg_ty.abiAlignment(mod).toLlvm();
- if (!isByRef(arg_ty, mod)) {
+ const alignment = arg_ty.abiAlignment(pt).toLlvm();
+ if (!isByRef(arg_ty, pt)) {
const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, llvm_arg, ptr, alignment);
llvm_arg = ptr;
@@ -5395,8 +5409,8 @@ pub const FuncGen = struct {
const arg = args[it.zig_index - 1];
const arg_ty = self.typeOf(arg);
var llvm_arg = try self.resolveInst(arg);
- const alignment = arg_ty.abiAlignment(mod).toLlvm();
- if (!isByRef(arg_ty, mod)) {
+ const alignment = arg_ty.abiAlignment(pt).toLlvm();
+ if (!isByRef(arg_ty, pt)) {
const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, llvm_arg, ptr, alignment);
llvm_arg = ptr;
@@ -5418,7 +5432,7 @@ pub const FuncGen = struct {
.byval => {
const param_index = it.zig_index - 1;
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]);
- if (!isByRef(param_ty, mod)) {
+ if (!isByRef(param_ty, pt)) {
try o.addByValParamAttrs(&attributes, param_ty, param_index, fn_info, it.llvm_index - 1);
}
},
@@ -5426,7 +5440,7 @@ pub const FuncGen = struct {
const param_index = it.zig_index - 1;
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]);
const param_llvm_ty = try o.lowerType(param_ty);
- const alignment = param_ty.abiAlignment(mod).toLlvm();
+ const alignment = param_ty.abiAlignment(pt).toLlvm();
try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
},
.byref_mut => try attributes.addParamAttr(it.llvm_index - 1, .noundef, &o.builder),
@@ -5460,7 +5474,7 @@ pub const FuncGen = struct {
const elem_align = (if (ptr_info.flags.alignment != .none)
@as(InternPool.Alignment, ptr_info.flags.alignment)
else
- Type.fromInterned(ptr_info.child).abiAlignment(mod).max(.@"1")).toLlvm();
+ Type.fromInterned(ptr_info.child).abiAlignment(pt).max(.@"1")).toLlvm();
try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder);
},
};
@@ -5485,17 +5499,17 @@ pub const FuncGen = struct {
return .none;
}
- if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(pt)) {
return .none;
}
const llvm_ret_ty = try o.lowerType(return_type);
if (ret_ptr) |rp| {
- if (isByRef(return_type, mod)) {
+ if (isByRef(return_type, pt)) {
return rp;
} else {
// our by-ref status disagrees with sret so we must load.
- const return_alignment = return_type.abiAlignment(mod).toLlvm();
+ const return_alignment = return_type.abiAlignment(pt).toLlvm();
return self.wip.load(.normal, llvm_ret_ty, rp, return_alignment, "");
}
}
@@ -5506,19 +5520,19 @@ pub const FuncGen = struct {
// In this case the function return type is honoring the calling convention by having
// a different LLVM type than the usual one. We solve this here at the callsite
// by using our canonical type, then loading it if necessary.
- const alignment = return_type.abiAlignment(mod).toLlvm();
+ const alignment = return_type.abiAlignment(pt).toLlvm();
const rp = try self.buildAlloca(abi_ret_ty, alignment);
_ = try self.wip.store(.normal, call, rp, alignment);
- return if (isByRef(return_type, mod))
+ return if (isByRef(return_type, pt))
rp
else
try self.wip.load(.normal, llvm_ret_ty, rp, alignment, "");
}
- if (isByRef(return_type, mod)) {
+ if (isByRef(return_type, pt)) {
// our by-ref status disagrees with sret so we must allocate, store,
// and return the allocation pointer.
- const alignment = return_type.abiAlignment(mod).toLlvm();
+ const alignment = return_type.abiAlignment(pt).toLlvm();
const rp = try self.buildAlloca(llvm_ret_ty, alignment);
_ = try self.wip.store(.normal, call, rp, alignment);
return rp;
@@ -5527,9 +5541,9 @@ pub const FuncGen = struct {
}
}
- fn buildSimplePanic(fg: *FuncGen, panic_id: Module.PanicId) !void {
+ fn buildSimplePanic(fg: *FuncGen, panic_id: Zcu.PanicId) !void {
const o = fg.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const msg_decl_index = mod.panic_messages[@intFromEnum(panic_id)].unwrap().?;
const msg_decl = mod.declPtr(msg_decl_index);
const msg_len = msg_decl.typeOf(mod).childType(mod).arrayLen(mod);
@@ -5567,15 +5581,16 @@ pub const FuncGen = struct {
fn airRet(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const ret_ty = self.typeOf(un_op);
if (self.ret_ptr != .none) {
- const ptr_ty = try mod.singleMutPtrType(ret_ty);
+ const ptr_ty = try pt.singleMutPtrType(ret_ty);
const operand = try self.resolveInst(un_op);
- const val_is_undef = if (try self.air.value(un_op, mod)) |val| val.isUndefDeep(mod) else false;
+ const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndefDeep(mod) else false;
if (val_is_undef and safety) undef: {
const ptr_info = ptr_ty.ptrInfo(mod);
const needs_bitmask = (ptr_info.packed_offset.host_size != 0);
@@ -5585,10 +5600,10 @@ pub const FuncGen = struct {
// https://github.com/ziglang/zig/issues/15337
break :undef;
}
- const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(mod));
+ const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(pt));
_ = try self.wip.callMemSet(
self.ret_ptr,
- ptr_ty.ptrAlignment(mod).toLlvm(),
+ ptr_ty.ptrAlignment(pt).toLlvm(),
try o.builder.intValue(.i8, 0xaa),
len,
if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal,
@@ -5615,7 +5630,7 @@ pub const FuncGen = struct {
return .none;
}
const fn_info = mod.typeToFunc(self.dg.decl.typeOf(mod)).?;
- if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
if (Type.fromInterned(fn_info.return_type).isError(mod)) {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
@@ -5629,13 +5644,13 @@ pub const FuncGen = struct {
const abi_ret_ty = try lowerFnRetTy(o, fn_info);
const operand = try self.resolveInst(un_op);
- const val_is_undef = if (try self.air.value(un_op, mod)) |val| val.isUndefDeep(mod) else false;
- const alignment = ret_ty.abiAlignment(mod).toLlvm();
+ const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndefDeep(mod) else false;
+ const alignment = ret_ty.abiAlignment(pt).toLlvm();
if (val_is_undef and safety) {
const llvm_ret_ty = operand.typeOfWip(&self.wip);
const rp = try self.buildAlloca(llvm_ret_ty, alignment);
- const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(mod));
+ const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(pt));
_ = try self.wip.callMemSet(
rp,
alignment,
@@ -5651,7 +5666,7 @@ pub const FuncGen = struct {
return .none;
}
- if (isByRef(ret_ty, mod)) {
+ if (isByRef(ret_ty, pt)) {
// operand is a pointer however self.ret_ptr is null so that means
// we need to return a value.
_ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, operand, alignment, ""));
@@ -5672,12 +5687,13 @@ pub const FuncGen = struct {
fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const ptr_ty = self.typeOf(un_op);
const ret_ty = ptr_ty.childType(mod);
const fn_info = mod.typeToFunc(self.dg.decl.typeOf(mod)).?;
- if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
if (Type.fromInterned(fn_info.return_type).isError(mod)) {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
@@ -5694,7 +5710,7 @@ pub const FuncGen = struct {
}
const ptr = try self.resolveInst(un_op);
const abi_ret_ty = try lowerFnRetTy(o, fn_info);
- const alignment = ret_ty.abiAlignment(mod).toLlvm();
+ const alignment = ret_ty.abiAlignment(pt).toLlvm();
_ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, ptr, alignment, ""));
return .none;
}
@@ -5711,17 +5727,17 @@ pub const FuncGen = struct {
fn airCVaCopy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
+ const pt = o.pt;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const src_list = try self.resolveInst(ty_op.operand);
const va_list_ty = ty_op.ty.toType();
const llvm_va_list_ty = try o.lowerType(va_list_ty);
- const mod = o.module;
- const result_alignment = va_list_ty.abiAlignment(mod).toLlvm();
+ const result_alignment = va_list_ty.abiAlignment(pt).toLlvm();
const dest_list = try self.buildAllocaWorkaround(va_list_ty, result_alignment);
_ = try self.wip.callIntrinsic(.normal, .none, .va_copy, &.{}, &.{ dest_list, src_list }, "");
- return if (isByRef(va_list_ty, mod))
+ return if (isByRef(va_list_ty, pt))
dest_list
else
try self.wip.load(.normal, llvm_va_list_ty, dest_list, result_alignment, "");
@@ -5737,15 +5753,15 @@ pub const FuncGen = struct {
fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
const va_list_ty = self.typeOfIndex(inst);
const llvm_va_list_ty = try o.lowerType(va_list_ty);
- const result_alignment = va_list_ty.abiAlignment(mod).toLlvm();
+ const result_alignment = va_list_ty.abiAlignment(pt).toLlvm();
const dest_list = try self.buildAllocaWorkaround(va_list_ty, result_alignment);
_ = try self.wip.callIntrinsic(.normal, .none, .va_start, &.{}, &.{dest_list}, "");
- return if (isByRef(va_list_ty, mod))
+ return if (isByRef(va_list_ty, pt))
dest_list
else
try self.wip.load(.normal, llvm_va_list_ty, dest_list, result_alignment, "");
@@ -5802,21 +5818,22 @@ pub const FuncGen = struct {
rhs: Builder.Value,
) Allocator.Error!Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const scalar_ty = operand_ty.scalarType(mod);
const int_ty = switch (scalar_ty.zigTypeTag(mod)) {
.Enum => scalar_ty.intTagType(mod),
.Int, .Bool, .Pointer, .ErrorSet => scalar_ty,
.Optional => blk: {
const payload_ty = operand_ty.optionalChild(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt) or
operand_ty.optionalReprIsPayload(mod))
{
break :blk operand_ty;
}
// We need to emit instructions to check for equality/inequality
// of optionals that are not pointers.
- const is_by_ref = isByRef(scalar_ty, mod);
+ const is_by_ref = isByRef(scalar_ty, pt);
const opt_llvm_ty = try o.lowerType(scalar_ty);
const lhs_non_null = try self.optCmpNull(.ne, opt_llvm_ty, lhs, is_by_ref);
const rhs_non_null = try self.optCmpNull(.ne, opt_llvm_ty, rhs, is_by_ref);
@@ -5908,7 +5925,8 @@ pub const FuncGen = struct {
body: []const Air.Inst.Index,
) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const inst_ty = self.typeOfIndex(inst);
if (inst_ty.isNoReturn(mod)) {
@@ -5916,7 +5934,7 @@ pub const FuncGen = struct {
return .none;
}
- const have_block_result = inst_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod);
+ const have_block_result = inst_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt);
var breaks: BreakList = if (have_block_result) .{ .list = .{} } else .{ .len = 0 };
defer if (have_block_result) breaks.list.deinit(self.gpa);
@@ -5940,7 +5958,7 @@ pub const FuncGen = struct {
// a pointer to it. LLVM IR allows the call instruction to use function bodies instead
// of function pointers, however the phi makes it a runtime value and therefore
// the LLVM type has to be wrapped in a pointer.
- if (inst_ty.zigTypeTag(mod) == .Fn or isByRef(inst_ty, mod)) {
+ if (inst_ty.zigTypeTag(mod) == .Fn or isByRef(inst_ty, pt)) {
break :ty .ptr;
}
break :ty raw_llvm_ty;
@@ -5958,13 +5976,13 @@ pub const FuncGen = struct {
fn airBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
+ const pt = o.pt;
const branch = self.air.instructions.items(.data)[@intFromEnum(inst)].br;
const block = self.blocks.get(branch.block_inst).?;
// Add the values to the lists only if the break provides a value.
const operand_ty = self.typeOf(branch.operand);
- const mod = o.module;
- if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
+ if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
const val = try self.resolveInst(branch.operand);
// For the phi node, we need the basic blocks and the values of the
@@ -5998,7 +6016,7 @@ pub const FuncGen = struct {
fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
const inst = body_tail[0];
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const err_union = try self.resolveInst(pl_op.operand);
@@ -6006,14 +6024,14 @@ pub const FuncGen = struct {
const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]);
const err_union_ty = self.typeOf(pl_op.operand);
const payload_ty = self.typeOfIndex(inst);
- const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false;
+ const can_elide_load = if (isByRef(payload_ty, pt)) self.canElideLoad(body_tail) else false;
const is_unused = self.liveness.isUnused(inst);
return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused);
}
fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
const err_union_ptr = try self.resolveInst(extra.data.ptr);
@@ -6033,9 +6051,10 @@ pub const FuncGen = struct {
is_unused: bool,
) !Builder.Value {
const o = fg.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const payload_ty = err_union_ty.errorUnionPayload(mod);
- const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod);
+ const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(pt);
const err_union_llvm_ty = try o.lowerType(err_union_ty);
const error_type = try o.errorIntType();
@@ -6048,8 +6067,8 @@ pub const FuncGen = struct {
else
err_union;
}
- const err_field_index = try errUnionErrorOffset(payload_ty, mod);
- if (operand_is_ptr or isByRef(err_union_ty, mod)) {
+ const err_field_index = try errUnionErrorOffset(payload_ty, pt);
+ if (operand_is_ptr or isByRef(err_union_ty, pt)) {
const err_field_ptr =
try fg.wip.gepStruct(err_union_llvm_ty, err_union, err_field_index, "");
// TODO add alignment to this load
@@ -6077,13 +6096,13 @@ pub const FuncGen = struct {
}
if (is_unused) return .none;
if (!payload_has_bits) return if (operand_is_ptr) err_union else .none;
- const offset = try errUnionPayloadOffset(payload_ty, mod);
+ const offset = try errUnionPayloadOffset(payload_ty, pt);
if (operand_is_ptr) {
return fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, "");
- } else if (isByRef(err_union_ty, mod)) {
+ } else if (isByRef(err_union_ty, pt)) {
const payload_ptr = try fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, "");
- const payload_alignment = payload_ty.abiAlignment(mod).toLlvm();
- if (isByRef(payload_ty, mod)) {
+ const payload_alignment = payload_ty.abiAlignment(pt).toLlvm();
+ if (isByRef(payload_ty, pt)) {
if (can_elide_load)
return payload_ptr;
@@ -6161,7 +6180,7 @@ pub const FuncGen = struct {
fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end..][0..loop.data.body_len]);
@@ -6185,7 +6204,8 @@ pub const FuncGen = struct {
fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_ty = self.typeOf(ty_op.operand);
const array_ty = operand_ty.childType(mod);
@@ -6193,7 +6213,7 @@ pub const FuncGen = struct {
const len = try o.builder.intValue(llvm_usize, array_ty.arrayLen(mod));
const slice_llvm_ty = try o.lowerType(self.typeOfIndex(inst));
const operand = try self.resolveInst(ty_op.operand);
- if (!array_ty.hasRuntimeBitsIgnoreComptime(mod))
+ if (!array_ty.hasRuntimeBitsIgnoreComptime(pt))
return self.wip.buildAggregate(slice_llvm_ty, &.{ operand, len }, "");
const ptr = try self.wip.gep(.inbounds, try o.lowerType(array_ty), operand, &.{
try o.builder.intValue(llvm_usize, 0), try o.builder.intValue(llvm_usize, 0),
@@ -6203,7 +6223,8 @@ pub const FuncGen = struct {
fn airFloatFromInt(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const workaround_operand = try self.resolveInst(ty_op.operand);
@@ -6213,7 +6234,7 @@ pub const FuncGen = struct {
const operand = o: {
// Work around LLVM bug. See https://github.com/ziglang/zig/issues/17381.
- const bit_size = operand_scalar_ty.bitSize(mod);
+ const bit_size = operand_scalar_ty.bitSize(pt);
for ([_]u8{ 8, 16, 32, 64, 128 }) |b| {
if (bit_size < b) {
break :o try self.wip.cast(
@@ -6241,7 +6262,7 @@ pub const FuncGen = struct {
"",
);
- const rt_int_bits = compilerRtIntBits(@intCast(operand_scalar_ty.bitSize(mod)));
+ const rt_int_bits = compilerRtIntBits(@intCast(operand_scalar_ty.bitSize(pt)));
const rt_int_ty = try o.builder.intType(rt_int_bits);
var extended = try self.wip.conv(
if (is_signed_int) .signed else .unsigned,
@@ -6287,7 +6308,8 @@ pub const FuncGen = struct {
_ = fast;
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const target = mod.getTarget();
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@@ -6309,7 +6331,7 @@ pub const FuncGen = struct {
);
}
- const rt_int_bits = compilerRtIntBits(@intCast(dest_scalar_ty.bitSize(mod)));
+ const rt_int_bits = compilerRtIntBits(@intCast(dest_scalar_ty.bitSize(pt)));
const ret_ty = try o.builder.intType(rt_int_bits);
const libc_ret_ty = if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) b: {
// On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard
@@ -6348,19 +6370,20 @@ pub const FuncGen = struct {
fn sliceOrArrayPtr(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value {
const o = fg.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
return if (ty.isSlice(mod)) fg.wip.extractValue(ptr, &.{0}, "") else ptr;
}
fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value {
const o = fg.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const llvm_usize = try o.lowerType(Type.usize);
switch (ty.ptrSize(mod)) {
.Slice => {
const len = try fg.wip.extractValue(ptr, &.{1}, "");
const elem_ty = ty.childType(mod);
- const abi_size = elem_ty.abiSize(mod);
+ const abi_size = elem_ty.abiSize(pt);
if (abi_size == 1) return len;
const abi_size_llvm_val = try o.builder.intValue(llvm_usize, abi_size);
return fg.wip.bin(.@"mul nuw", len, abi_size_llvm_val, "");
@@ -6368,7 +6391,7 @@ pub const FuncGen = struct {
.One => {
const array_ty = ty.childType(mod);
const elem_ty = array_ty.childType(mod);
- const abi_size = elem_ty.abiSize(mod);
+ const abi_size = elem_ty.abiSize(pt);
return o.builder.intValue(llvm_usize, array_ty.arrayLen(mod) * abi_size);
},
.Many, .C => unreachable,
@@ -6383,7 +6406,7 @@ pub const FuncGen = struct {
fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const slice_ptr = try self.resolveInst(ty_op.operand);
const slice_ptr_ty = self.typeOf(ty_op.operand);
@@ -6394,7 +6417,8 @@ pub const FuncGen = struct {
fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const inst = body_tail[0];
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const slice_ty = self.typeOf(bin_op.lhs);
@@ -6404,11 +6428,11 @@ pub const FuncGen = struct {
const llvm_elem_ty = try o.lowerPtrElemTy(elem_ty);
const base_ptr = try self.wip.extractValue(slice, &.{0}, "");
const ptr = try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, &.{index}, "");
- if (isByRef(elem_ty, mod)) {
+ if (isByRef(elem_ty, pt)) {
if (self.canElideLoad(body_tail))
return ptr;
- const elem_alignment = elem_ty.abiAlignment(mod).toLlvm();
+ const elem_alignment = elem_ty.abiAlignment(pt).toLlvm();
return self.loadByRef(ptr, elem_ty, elem_alignment, .normal);
}
@@ -6417,7 +6441,7 @@ pub const FuncGen = struct {
fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const slice_ty = self.typeOf(bin_op.lhs);
@@ -6431,7 +6455,8 @@ pub const FuncGen = struct {
fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const inst = body_tail[0];
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
@@ -6440,15 +6465,15 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(bin_op.rhs);
const array_llvm_ty = try o.lowerType(array_ty);
const elem_ty = array_ty.childType(mod);
- if (isByRef(array_ty, mod)) {
+ if (isByRef(array_ty, pt)) {
const indices: [2]Builder.Value = .{
try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs,
};
- if (isByRef(elem_ty, mod)) {
+ if (isByRef(elem_ty, pt)) {
const elem_ptr =
try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, "");
if (canElideLoad(self, body_tail)) return elem_ptr;
- const elem_alignment = elem_ty.abiAlignment(mod).toLlvm();
+ const elem_alignment = elem_ty.abiAlignment(pt).toLlvm();
return self.loadByRef(elem_ptr, elem_ty, elem_alignment, .normal);
} else {
const elem_ptr =
@@ -6463,7 +6488,8 @@ pub const FuncGen = struct {
fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const inst = body_tail[0];
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = self.typeOf(bin_op.lhs);
@@ -6477,9 +6503,9 @@ pub const FuncGen = struct {
&.{ try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs }
else
&.{rhs}, "");
- if (isByRef(elem_ty, mod)) {
+ if (isByRef(elem_ty, pt)) {
if (self.canElideLoad(body_tail)) return ptr;
- const elem_alignment = elem_ty.abiAlignment(mod).toLlvm();
+ const elem_alignment = elem_ty.abiAlignment(pt).toLlvm();
return self.loadByRef(ptr, elem_ty, elem_alignment, .normal);
}
@@ -6488,12 +6514,13 @@ pub const FuncGen = struct {
fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_ty = self.typeOf(bin_op.lhs);
const elem_ty = ptr_ty.childType(mod);
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return self.resolveInst(bin_op.lhs);
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return self.resolveInst(bin_op.lhs);
const base_ptr = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -6530,7 +6557,8 @@ pub const FuncGen = struct {
fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const inst = body_tail[0];
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
@@ -6538,27 +6566,27 @@ pub const FuncGen = struct {
const struct_llvm_val = try self.resolveInst(struct_field.struct_operand);
const field_index = struct_field.field_index;
const field_ty = struct_ty.structFieldType(field_index, mod);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none;
- if (!isByRef(struct_ty, mod)) {
- assert(!isByRef(field_ty, mod));
+ if (!isByRef(struct_ty, pt)) {
+ assert(!isByRef(field_ty, pt));
switch (struct_ty.zigTypeTag(mod)) {
.Struct => switch (struct_ty.containerLayout(mod)) {
.@"packed" => {
const struct_type = mod.typeToStruct(struct_ty).?;
- const bit_offset = mod.structPackedFieldBitOffset(struct_type, field_index);
+ const bit_offset = pt.structPackedFieldBitOffset(struct_type, field_index);
const containing_int = struct_llvm_val;
const shift_amt =
try o.builder.intValue(containing_int.typeOfWip(&self.wip), bit_offset);
const shifted_value = try self.wip.bin(.lshr, containing_int, shift_amt, "");
const elem_llvm_ty = try o.lowerType(field_ty);
if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) {
- const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod)));
+ const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt)));
const truncated_int =
try self.wip.cast(.trunc, shifted_value, same_size_int, "");
return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, "");
} else if (field_ty.isPtrAtRuntime(mod)) {
- const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod)));
+ const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt)));
const truncated_int =
try self.wip.cast(.trunc, shifted_value, same_size_int, "");
return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, "");
@@ -6575,12 +6603,12 @@ pub const FuncGen = struct {
const containing_int = struct_llvm_val;
const elem_llvm_ty = try o.lowerType(field_ty);
if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) {
- const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod)));
+ const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt)));
const truncated_int =
try self.wip.cast(.trunc, containing_int, same_size_int, "");
return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, "");
} else if (field_ty.isPtrAtRuntime(mod)) {
- const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod)));
+ const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt)));
const truncated_int =
try self.wip.cast(.trunc, containing_int, same_size_int, "");
return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, "");
@@ -6599,12 +6627,12 @@ pub const FuncGen = struct {
const llvm_field_index = o.llvmFieldIndex(struct_ty, field_index).?;
const field_ptr =
try self.wip.gepStruct(struct_llvm_ty, struct_llvm_val, llvm_field_index, "");
- const alignment = struct_ty.structFieldAlign(field_index, mod);
- const field_ptr_ty = try mod.ptrType(.{
+ const alignment = struct_ty.structFieldAlign(field_index, pt);
+ const field_ptr_ty = try pt.ptrType(.{
.child = field_ty.toIntern(),
.flags = .{ .alignment = alignment },
});
- if (isByRef(field_ty, mod)) {
+ if (isByRef(field_ty, pt)) {
if (canElideLoad(self, body_tail))
return field_ptr;
@@ -6617,12 +6645,12 @@ pub const FuncGen = struct {
},
.Union => {
const union_llvm_ty = try o.lowerType(struct_ty);
- const layout = struct_ty.unionGetLayout(mod);
+ const layout = struct_ty.unionGetLayout(pt);
const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align));
const field_ptr =
try self.wip.gepStruct(union_llvm_ty, struct_llvm_val, payload_index, "");
const payload_alignment = layout.payload_align.toLlvm();
- if (isByRef(field_ty, mod)) {
+ if (isByRef(field_ty, pt)) {
if (canElideLoad(self, body_tail)) return field_ptr;
return self.loadByRef(field_ptr, field_ty, payload_alignment, .normal);
} else {
@@ -6635,14 +6663,15 @@ pub const FuncGen = struct {
fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const field_ptr = try self.resolveInst(extra.field_ptr);
const parent_ty = ty_pl.ty.toType().childType(mod);
- const field_offset = parent_ty.structFieldOffset(extra.field_index, mod);
+ const field_offset = parent_ty.structFieldOffset(extra.field_index, pt);
if (field_offset == 0) return field_ptr;
const res_ty = try o.lowerType(ty_pl.ty.toType());
@@ -6696,7 +6725,7 @@ pub const FuncGen = struct {
fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const operand = try self.resolveInst(pl_op.operand);
const name = self.air.nullTerminatedString(pl_op.payload);
@@ -6743,9 +6772,9 @@ pub const FuncGen = struct {
try o.lowerDebugType(operand_ty),
);
- const zcu = o.module;
+ const pt = o.pt;
const owner_mod = self.dg.ownerModule();
- if (isByRef(operand_ty, zcu)) {
+ if (isByRef(operand_ty, pt)) {
_ = try self.wip.callIntrinsic(
.normal,
.none,
@@ -6759,7 +6788,7 @@ pub const FuncGen = struct {
"",
);
} else if (owner_mod.optimize_mode == .Debug) {
- const alignment = operand_ty.abiAlignment(zcu).toLlvm();
+ const alignment = operand_ty.abiAlignment(pt).toLlvm();
const alloca = try self.buildAlloca(operand.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, operand, alloca, alignment);
_ = try self.wip.callIntrinsic(
@@ -6830,7 +6859,8 @@ pub const FuncGen = struct {
// This stores whether we need to add an elementtype attribute and
// if so, the element type itself.
const llvm_param_attrs = try arena.alloc(Builder.Type, max_param_count);
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const target = mod.getTarget();
var llvm_ret_i: usize = 0;
@@ -6930,13 +6960,13 @@ pub const FuncGen = struct {
const arg_llvm_value = try self.resolveInst(input);
const arg_ty = self.typeOf(input);
- const is_by_ref = isByRef(arg_ty, mod);
+ const is_by_ref = isByRef(arg_ty, pt);
if (is_by_ref) {
if (constraintAllowsMemory(constraint)) {
llvm_param_values[llvm_param_i] = arg_llvm_value;
llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip);
} else {
- const alignment = arg_ty.abiAlignment(mod).toLlvm();
+ const alignment = arg_ty.abiAlignment(pt).toLlvm();
const arg_llvm_ty = try o.lowerType(arg_ty);
const load_inst =
try self.wip.load(.normal, arg_llvm_ty, arg_llvm_value, alignment, "");
@@ -6948,7 +6978,7 @@ pub const FuncGen = struct {
llvm_param_values[llvm_param_i] = arg_llvm_value;
llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip);
} else {
- const alignment = arg_ty.abiAlignment(mod).toLlvm();
+ const alignment = arg_ty.abiAlignment(pt).toLlvm();
const arg_ptr = try self.buildAlloca(arg_llvm_value.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, arg_llvm_value, arg_ptr, alignment);
llvm_param_values[llvm_param_i] = arg_ptr;
@@ -7000,7 +7030,7 @@ pub const FuncGen = struct {
llvm_param_values[llvm_param_i] = llvm_rw_val;
llvm_param_types[llvm_param_i] = llvm_rw_val.typeOfWip(&self.wip);
} else {
- const alignment = rw_ty.abiAlignment(mod).toLlvm();
+ const alignment = rw_ty.abiAlignment(pt).toLlvm();
const loaded = try self.wip.load(.normal, llvm_elem_ty, llvm_rw_val, alignment, "");
llvm_param_values[llvm_param_i] = loaded;
llvm_param_types[llvm_param_i] = llvm_elem_ty;
@@ -7161,7 +7191,7 @@ pub const FuncGen = struct {
const output_ptr = try self.resolveInst(output);
const output_ptr_ty = self.typeOf(output);
- const alignment = output_ptr_ty.ptrAlignment(mod).toLlvm();
+ const alignment = output_ptr_ty.ptrAlignment(pt).toLlvm();
_ = try self.wip.store(.normal, output_value, output_ptr, alignment);
} else {
ret_val = output_value;
@@ -7179,7 +7209,8 @@ pub const FuncGen = struct {
cond: Builder.IntegerCondition,
) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.resolveInst(un_op);
const operand_ty = self.typeOf(un_op);
@@ -7204,7 +7235,7 @@ pub const FuncGen = struct {
comptime assert(optional_layout_version == 3);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
const loaded = if (operand_is_ptr)
try self.wip.load(.normal, optional_llvm_ty, operand, .default, "")
else
@@ -7212,7 +7243,7 @@ pub const FuncGen = struct {
return self.wip.icmp(cond, loaded, try o.builder.intValue(.i8, 0), "");
}
- const is_by_ref = operand_is_ptr or isByRef(optional_ty, mod);
+ const is_by_ref = operand_is_ptr or isByRef(optional_ty, pt);
return self.optCmpNull(cond, optional_llvm_ty, operand, is_by_ref);
}
@@ -7223,7 +7254,8 @@ pub const FuncGen = struct {
operand_is_ptr: bool,
) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.resolveInst(un_op);
const operand_ty = self.typeOf(un_op);
@@ -7241,7 +7273,7 @@ pub const FuncGen = struct {
return val.toValue();
}
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
const loaded = if (operand_is_ptr)
try self.wip.load(.normal, try o.lowerType(err_union_ty), operand, .default, "")
else
@@ -7249,9 +7281,9 @@ pub const FuncGen = struct {
return self.wip.icmp(cond, loaded, zero, "");
}
- const err_field_index = try errUnionErrorOffset(payload_ty, mod);
+ const err_field_index = try errUnionErrorOffset(payload_ty, pt);
- const loaded = if (operand_is_ptr or isByRef(err_union_ty, mod)) loaded: {
+ const loaded = if (operand_is_ptr or isByRef(err_union_ty, pt)) loaded: {
const err_union_llvm_ty = try o.lowerType(err_union_ty);
const err_field_ptr =
try self.wip.gepStruct(err_union_llvm_ty, operand, err_field_index, "");
@@ -7262,12 +7294,13 @@ pub const FuncGen = struct {
fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.typeOf(ty_op.operand).childType(mod);
const payload_ty = optional_ty.optionalChild(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
// We have a pointer to a zero-bit value and we need to return
// a pointer to a zero-bit value.
return operand;
@@ -7283,13 +7316,14 @@ pub const FuncGen = struct {
comptime assert(optional_layout_version == 3);
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.typeOf(ty_op.operand).childType(mod);
const payload_ty = optional_ty.optionalChild(mod);
const non_null_bit = try o.builder.intValue(.i8, 1);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
// We have a pointer to a i8. We need to set it to 1 and then return the same pointer.
_ = try self.wip.store(.normal, non_null_bit, operand, .default);
return operand;
@@ -7314,13 +7348,14 @@ pub const FuncGen = struct {
fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const inst = body_tail[0];
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.typeOf(ty_op.operand);
const payload_ty = self.typeOfIndex(inst);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none;
if (optional_ty.optionalReprIsPayload(mod)) {
// Payload value is the same as the optional value.
@@ -7328,7 +7363,7 @@ pub const FuncGen = struct {
}
const opt_llvm_ty = try o.lowerType(optional_ty);
- const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false;
+ const can_elide_load = if (isByRef(payload_ty, pt)) self.canElideLoad(body_tail) else false;
return self.optPayloadHandle(opt_llvm_ty, operand, optional_ty, can_elide_load);
}
@@ -7338,7 +7373,8 @@ pub const FuncGen = struct {
operand_is_ptr: bool,
) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const inst = body_tail[0];
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
@@ -7347,17 +7383,17 @@ pub const FuncGen = struct {
const result_ty = self.typeOfIndex(inst);
const payload_ty = if (operand_is_ptr) result_ty.childType(mod) else result_ty;
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
return if (operand_is_ptr) operand else .none;
}
- const offset = try errUnionPayloadOffset(payload_ty, mod);
+ const offset = try errUnionPayloadOffset(payload_ty, pt);
const err_union_llvm_ty = try o.lowerType(err_union_ty);
if (operand_is_ptr) {
return self.wip.gepStruct(err_union_llvm_ty, operand, offset, "");
- } else if (isByRef(err_union_ty, mod)) {
- const payload_alignment = payload_ty.abiAlignment(mod).toLlvm();
+ } else if (isByRef(err_union_ty, pt)) {
+ const payload_alignment = payload_ty.abiAlignment(pt).toLlvm();
const payload_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, "");
- if (isByRef(payload_ty, mod)) {
+ if (isByRef(payload_ty, pt)) {
if (self.canElideLoad(body_tail)) return payload_ptr;
return self.loadByRef(payload_ptr, payload_ty, payload_alignment, .normal);
}
@@ -7373,7 +7409,8 @@ pub const FuncGen = struct {
operand_is_ptr: bool,
) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
@@ -7388,14 +7425,14 @@ pub const FuncGen = struct {
}
const payload_ty = err_union_ty.errorUnionPayload(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
if (!operand_is_ptr) return operand;
return self.wip.load(.normal, error_type, operand, .default, "");
}
- const offset = try errUnionErrorOffset(payload_ty, mod);
+ const offset = try errUnionErrorOffset(payload_ty, pt);
- if (operand_is_ptr or isByRef(err_union_ty, mod)) {
+ if (operand_is_ptr or isByRef(err_union_ty, pt)) {
const err_union_llvm_ty = try o.lowerType(err_union_ty);
const err_field_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, "");
return self.wip.load(.normal, error_type, err_field_ptr, .default, "");
@@ -7406,22 +7443,23 @@ pub const FuncGen = struct {
fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const err_union_ty = self.typeOf(ty_op.operand).childType(mod);
const payload_ty = err_union_ty.errorUnionPayload(mod);
const non_error_val = try o.builder.intValue(try o.errorIntType(), 0);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
_ = try self.wip.store(.normal, non_error_val, operand, .default);
return operand;
}
const err_union_llvm_ty = try o.lowerType(err_union_ty);
{
- const err_int_ty = try mod.errorIntType();
- const error_alignment = err_int_ty.abiAlignment(mod).toLlvm();
- const error_offset = try errUnionErrorOffset(payload_ty, mod);
+ const err_int_ty = try pt.errorIntType();
+ const error_alignment = err_int_ty.abiAlignment(pt).toLlvm();
+ const error_offset = try errUnionErrorOffset(payload_ty, pt);
// First set the non-error value.
const non_null_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, error_offset, "");
_ = try self.wip.store(.normal, non_error_val, non_null_ptr, error_alignment);
@@ -7429,7 +7467,7 @@ pub const FuncGen = struct {
// Then return the payload pointer (only if it is used).
if (self.liveness.isUnused(inst)) return .none;
- const payload_offset = try errUnionPayloadOffset(payload_ty, mod);
+ const payload_offset = try errUnionPayloadOffset(payload_ty, pt);
return self.wip.gepStruct(err_union_llvm_ty, operand, payload_offset, "");
}
@@ -7446,19 +7484,21 @@ pub const FuncGen = struct {
fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
+ const pt = o.pt;
+ const mod = pt.zcu;
+
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const struct_ty = ty_pl.ty.toType();
const field_index = ty_pl.payload;
- const mod = o.module;
const struct_llvm_ty = try o.lowerType(struct_ty);
const llvm_field_index = o.llvmFieldIndex(struct_ty, field_index).?;
assert(self.err_ret_trace != .none);
const field_ptr =
try self.wip.gepStruct(struct_llvm_ty, self.err_ret_trace, llvm_field_index, "");
- const field_alignment = struct_ty.structFieldAlign(field_index, mod);
+ const field_alignment = struct_ty.structFieldAlign(field_index, pt);
const field_ty = struct_ty.structFieldType(field_index, mod);
- const field_ptr_ty = try mod.ptrType(.{
+ const field_ptr_ty = try pt.ptrType(.{
.child = field_ty.toIntern(),
.flags = .{ .alignment = field_alignment },
});
@@ -7490,29 +7530,30 @@ pub const FuncGen = struct {
fn airWrapOptional(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const inst = body_tail[0];
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const payload_ty = self.typeOf(ty_op.operand);
const non_null_bit = try o.builder.intValue(.i8, 1);
comptime assert(optional_layout_version == 3);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return non_null_bit;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return non_null_bit;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.typeOfIndex(inst);
if (optional_ty.optionalReprIsPayload(mod)) return operand;
const llvm_optional_ty = try o.lowerType(optional_ty);
- if (isByRef(optional_ty, mod)) {
+ if (isByRef(optional_ty, pt)) {
const directReturn = self.isNextRet(body_tail);
const optional_ptr = if (directReturn)
self.ret_ptr
else brk: {
- const alignment = optional_ty.abiAlignment(mod).toLlvm();
+ const alignment = optional_ty.abiAlignment(pt).toLlvm();
const optional_ptr = try self.buildAllocaWorkaround(optional_ty, alignment);
break :brk optional_ptr;
};
const payload_ptr = try self.wip.gepStruct(llvm_optional_ty, optional_ptr, 0, "");
- const payload_ptr_ty = try mod.singleMutPtrType(payload_ty);
+ const payload_ptr_ty = try pt.singleMutPtrType(payload_ty);
try self.store(payload_ptr, payload_ptr_ty, operand, .none);
const non_null_ptr = try self.wip.gepStruct(llvm_optional_ty, optional_ptr, 1, "");
_ = try self.wip.store(.normal, non_null_bit, non_null_ptr, .default);
@@ -7523,36 +7564,36 @@ pub const FuncGen = struct {
fn airWrapErrUnionPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
const inst = body_tail[0];
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const err_un_ty = self.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
const payload_ty = self.typeOf(ty_op.operand);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
return operand;
}
const ok_err_code = try o.builder.intValue(try o.errorIntType(), 0);
const err_un_llvm_ty = try o.lowerType(err_un_ty);
- const payload_offset = try errUnionPayloadOffset(payload_ty, mod);
- const error_offset = try errUnionErrorOffset(payload_ty, mod);
- if (isByRef(err_un_ty, mod)) {
+ const payload_offset = try errUnionPayloadOffset(payload_ty, pt);
+ const error_offset = try errUnionErrorOffset(payload_ty, pt);
+ if (isByRef(err_un_ty, pt)) {
const directReturn = self.isNextRet(body_tail);
const result_ptr = if (directReturn)
self.ret_ptr
else brk: {
- const alignment = err_un_ty.abiAlignment(mod).toLlvm();
+ const alignment = err_un_ty.abiAlignment(pt).toLlvm();
const result_ptr = try self.buildAllocaWorkaround(err_un_ty, alignment);
break :brk result_ptr;
};
const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, "");
- const err_int_ty = try mod.errorIntType();
- const error_alignment = err_int_ty.abiAlignment(mod).toLlvm();
+ const err_int_ty = try pt.errorIntType();
+ const error_alignment = err_int_ty.abiAlignment(pt).toLlvm();
_ = try self.wip.store(.normal, ok_err_code, err_ptr, error_alignment);
const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, "");
- const payload_ptr_ty = try mod.singleMutPtrType(payload_ty);
+ const payload_ptr_ty = try pt.singleMutPtrType(payload_ty);
try self.store(payload_ptr, payload_ptr_ty, operand, .none);
return result_ptr;
}
@@ -7564,33 +7605,34 @@ pub const FuncGen = struct {
fn airWrapErrUnionErr(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const inst = body_tail[0];
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const err_un_ty = self.typeOfIndex(inst);
const payload_ty = err_un_ty.errorUnionPayload(mod);
const operand = try self.resolveInst(ty_op.operand);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return operand;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return operand;
const err_un_llvm_ty = try o.lowerType(err_un_ty);
- const payload_offset = try errUnionPayloadOffset(payload_ty, mod);
- const error_offset = try errUnionErrorOffset(payload_ty, mod);
- if (isByRef(err_un_ty, mod)) {
+ const payload_offset = try errUnionPayloadOffset(payload_ty, pt);
+ const error_offset = try errUnionErrorOffset(payload_ty, pt);
+ if (isByRef(err_un_ty, pt)) {
const directReturn = self.isNextRet(body_tail);
const result_ptr = if (directReturn)
self.ret_ptr
else brk: {
- const alignment = err_un_ty.abiAlignment(mod).toLlvm();
+ const alignment = err_un_ty.abiAlignment(pt).toLlvm();
const result_ptr = try self.buildAllocaWorkaround(err_un_ty, alignment);
break :brk result_ptr;
};
const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, "");
- const err_int_ty = try mod.errorIntType();
- const error_alignment = err_int_ty.abiAlignment(mod).toLlvm();
+ const err_int_ty = try pt.errorIntType();
+ const error_alignment = err_int_ty.abiAlignment(pt).toLlvm();
_ = try self.wip.store(.normal, operand, err_ptr, error_alignment);
const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, "");
- const payload_ptr_ty = try mod.singleMutPtrType(payload_ty);
+ const payload_ptr_ty = try pt.singleMutPtrType(payload_ty);
// TODO store undef to payload_ptr
_ = payload_ptr;
_ = payload_ptr_ty;
@@ -7624,7 +7666,8 @@ pub const FuncGen = struct {
fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const data = self.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem;
const extra = self.air.extraData(Air.Bin, data.payload).data;
@@ -7636,7 +7679,7 @@ pub const FuncGen = struct {
const access_kind: Builder.MemoryAccessKind =
if (vector_ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal;
const elem_llvm_ty = try o.lowerType(vector_ptr_ty.childType(mod));
- const alignment = vector_ptr_ty.ptrAlignment(mod).toLlvm();
+ const alignment = vector_ptr_ty.ptrAlignment(pt).toLlvm();
const loaded = try self.wip.load(access_kind, elem_llvm_ty, vector_ptr, alignment, "");
const new_vector = try self.wip.insertElement(loaded, operand, index, "");
@@ -7646,7 +7689,7 @@ pub const FuncGen = struct {
fn airMin(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7666,7 +7709,7 @@ pub const FuncGen = struct {
fn airMax(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7696,7 +7739,7 @@ pub const FuncGen = struct {
fn airAdd(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7714,7 +7757,7 @@ pub const FuncGen = struct {
unsigned_intrinsic: Builder.Intrinsic,
) !Builder.Value {
const o = fg.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const bin_op = fg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try fg.resolveInst(bin_op.lhs);
@@ -7762,7 +7805,7 @@ pub const FuncGen = struct {
fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7782,7 +7825,7 @@ pub const FuncGen = struct {
fn airSub(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7803,7 +7846,7 @@ pub const FuncGen = struct {
fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7823,7 +7866,7 @@ pub const FuncGen = struct {
fn airMul(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7844,7 +7887,7 @@ pub const FuncGen = struct {
fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7873,7 +7916,7 @@ pub const FuncGen = struct {
fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7889,7 +7932,7 @@ pub const FuncGen = struct {
fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7921,7 +7964,7 @@ pub const FuncGen = struct {
fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7939,7 +7982,7 @@ pub const FuncGen = struct {
fn airRem(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7956,7 +7999,7 @@ pub const FuncGen = struct {
fn airMod(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7992,7 +8035,7 @@ pub const FuncGen = struct {
fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr = try self.resolveInst(bin_op.lhs);
@@ -8014,7 +8057,7 @@ pub const FuncGen = struct {
fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr = try self.resolveInst(bin_op.lhs);
@@ -8042,7 +8085,8 @@ pub const FuncGen = struct {
unsigned_intrinsic: Builder.Intrinsic,
) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -8065,8 +8109,8 @@ pub const FuncGen = struct {
const result_index = o.llvmFieldIndex(inst_ty, 0).?;
const overflow_index = o.llvmFieldIndex(inst_ty, 1).?;
- if (isByRef(inst_ty, mod)) {
- const result_alignment = inst_ty.abiAlignment(mod).toLlvm();
+ if (isByRef(inst_ty, pt)) {
+ const result_alignment = inst_ty.abiAlignment(pt).toLlvm();
const alloca_inst = try self.buildAllocaWorkaround(inst_ty, result_alignment);
{
const field_ptr = try self.wip.gepStruct(llvm_inst_ty, alloca_inst, result_index, "");
@@ -8135,7 +8179,7 @@ pub const FuncGen = struct {
return o.builder.addFunction(
try o.builder.fnType(return_type, param_types, .normal),
fn_name,
- toLlvmAddressSpace(.generic, o.module.getTarget()),
+ toLlvmAddressSpace(.generic, o.pt.zcu.getTarget()),
);
}
@@ -8149,8 +8193,8 @@ pub const FuncGen = struct {
params: [2]Builder.Value,
) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
- const target = o.module.getTarget();
+ const mod = o.pt.zcu;
+ const target = mod.getTarget();
const scalar_ty = ty.scalarType(mod);
const scalar_llvm_ty = try o.lowerType(scalar_ty);
@@ -8255,7 +8299,7 @@ pub const FuncGen = struct {
params: [params_len]Builder.Value,
) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const target = mod.getTarget();
const scalar_ty = ty.scalarType(mod);
const llvm_ty = try o.lowerType(ty);
@@ -8396,7 +8440,8 @@ pub const FuncGen = struct {
fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -8422,8 +8467,8 @@ pub const FuncGen = struct {
const result_index = o.llvmFieldIndex(dest_ty, 0).?;
const overflow_index = o.llvmFieldIndex(dest_ty, 1).?;
- if (isByRef(dest_ty, mod)) {
- const result_alignment = dest_ty.abiAlignment(mod).toLlvm();
+ if (isByRef(dest_ty, pt)) {
+ const result_alignment = dest_ty.abiAlignment(pt).toLlvm();
const alloca_inst = try self.buildAllocaWorkaround(dest_ty, result_alignment);
{
const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, result_index, "");
@@ -8466,7 +8511,7 @@ pub const FuncGen = struct {
fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -8497,7 +8542,8 @@ pub const FuncGen = struct {
fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -8505,7 +8551,7 @@ pub const FuncGen = struct {
const lhs_ty = self.typeOf(bin_op.lhs);
const lhs_scalar_ty = lhs_ty.scalarType(mod);
- const lhs_bits = lhs_scalar_ty.bitSize(mod);
+ const lhs_bits = lhs_scalar_ty.bitSize(pt);
const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), "");
@@ -8539,7 +8585,7 @@ pub const FuncGen = struct {
fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -8558,7 +8604,7 @@ pub const FuncGen = struct {
fn airAbs(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
@@ -8580,7 +8626,7 @@ pub const FuncGen = struct {
fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const dest_ty = self.typeOfIndex(inst);
const dest_llvm_ty = try o.lowerType(dest_ty);
@@ -8604,7 +8650,7 @@ pub const FuncGen = struct {
fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
@@ -8638,7 +8684,7 @@ pub const FuncGen = struct {
fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
@@ -8696,9 +8742,10 @@ pub const FuncGen = struct {
fn bitCast(self: *FuncGen, operand: Builder.Value, operand_ty: Type, inst_ty: Type) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
- const operand_is_ref = isByRef(operand_ty, mod);
- const result_is_ref = isByRef(inst_ty, mod);
+ const pt = o.pt;
+ const mod = pt.zcu;
+ const operand_is_ref = isByRef(operand_ty, pt);
+ const result_is_ref = isByRef(inst_ty, pt);
const llvm_dest_ty = try o.lowerType(inst_ty);
if (operand_is_ref and result_is_ref) {
@@ -8721,9 +8768,9 @@ pub const FuncGen = struct {
if (!result_is_ref) {
return self.dg.todo("implement bitcast vector to non-ref array", .{});
}
- const alignment = inst_ty.abiAlignment(mod).toLlvm();
+ const alignment = inst_ty.abiAlignment(pt).toLlvm();
const array_ptr = try self.buildAllocaWorkaround(inst_ty, alignment);
- const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8;
+ const bitcast_ok = elem_ty.bitSize(pt) == elem_ty.abiSize(pt) * 8;
if (bitcast_ok) {
_ = try self.wip.store(.normal, operand, array_ptr, alignment);
} else {
@@ -8748,11 +8795,11 @@ pub const FuncGen = struct {
const llvm_vector_ty = try o.lowerType(inst_ty);
if (!operand_is_ref) return self.dg.todo("implement bitcast non-ref array to vector", .{});
- const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8;
+ const bitcast_ok = elem_ty.bitSize(pt) == elem_ty.abiSize(pt) * 8;
if (bitcast_ok) {
// The array is aligned to the element's alignment, while the vector might have a completely
// different alignment. This means we need to enforce the alignment of this load.
- const alignment = elem_ty.abiAlignment(mod).toLlvm();
+ const alignment = elem_ty.abiAlignment(pt).toLlvm();
return self.wip.load(.normal, llvm_vector_ty, operand, alignment, "");
} else {
// If the ABI size of the element type is not evenly divisible by size in bits;
@@ -8777,24 +8824,25 @@ pub const FuncGen = struct {
}
if (operand_is_ref) {
- const alignment = operand_ty.abiAlignment(mod).toLlvm();
+ const alignment = operand_ty.abiAlignment(pt).toLlvm();
return self.wip.load(.normal, llvm_dest_ty, operand, alignment, "");
}
if (result_is_ref) {
- const alignment = operand_ty.abiAlignment(mod).max(inst_ty.abiAlignment(mod)).toLlvm();
+ const alignment = operand_ty.abiAlignment(pt).max(inst_ty.abiAlignment(pt)).toLlvm();
const result_ptr = try self.buildAllocaWorkaround(inst_ty, alignment);
_ = try self.wip.store(.normal, operand, result_ptr, alignment);
return result_ptr;
}
if (llvm_dest_ty.isStruct(&o.builder) or
- ((operand_ty.zigTypeTag(mod) == .Vector or inst_ty.zigTypeTag(mod) == .Vector) and operand_ty.bitSize(mod) != inst_ty.bitSize(mod)))
+ ((operand_ty.zigTypeTag(mod) == .Vector or inst_ty.zigTypeTag(mod) == .Vector) and
+ operand_ty.bitSize(pt) != inst_ty.bitSize(pt)))
{
// Both our operand and our result are values, not pointers,
// but LLVM won't let us bitcast struct values or vectors with padding bits.
// Therefore, we store operand to alloca, then load for result.
- const alignment = operand_ty.abiAlignment(mod).max(inst_ty.abiAlignment(mod)).toLlvm();
+ const alignment = operand_ty.abiAlignment(pt).max(inst_ty.abiAlignment(pt)).toLlvm();
const result_ptr = try self.buildAllocaWorkaround(inst_ty, alignment);
_ = try self.wip.store(.normal, operand, result_ptr, alignment);
return self.wip.load(.normal, llvm_dest_ty, result_ptr, alignment, "");
@@ -8811,7 +8859,8 @@ pub const FuncGen = struct {
fn airArg(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const arg_val = self.args[self.arg_index];
self.arg_index += 1;
@@ -8847,7 +8896,7 @@ pub const FuncGen = struct {
};
const owner_mod = self.dg.ownerModule();
- if (isByRef(inst_ty, mod)) {
+ if (isByRef(inst_ty, pt)) {
_ = try self.wip.callIntrinsic(
.normal,
.none,
@@ -8861,7 +8910,7 @@ pub const FuncGen = struct {
"",
);
} else if (owner_mod.optimize_mode == .Debug) {
- const alignment = inst_ty.abiAlignment(mod).toLlvm();
+ const alignment = inst_ty.abiAlignment(pt).toLlvm();
const alloca = try self.buildAlloca(arg_val.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, arg_val, alloca, alignment);
_ = try self.wip.callIntrinsic(
@@ -8897,27 +8946,29 @@ pub const FuncGen = struct {
fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ptr_ty = self.typeOfIndex(inst);
const pointee_type = ptr_ty.childType(mod);
- if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod))
+ if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(pt))
return (try o.lowerPtrToVoid(ptr_ty)).toValue();
//const pointee_llvm_ty = try o.lowerType(pointee_type);
- const alignment = ptr_ty.ptrAlignment(mod).toLlvm();
+ const alignment = ptr_ty.ptrAlignment(pt).toLlvm();
return self.buildAllocaWorkaround(pointee_type, alignment);
}
fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ptr_ty = self.typeOfIndex(inst);
const ret_ty = ptr_ty.childType(mod);
- if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod))
+ if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt))
return (try o.lowerPtrToVoid(ptr_ty)).toValue();
if (self.ret_ptr != .none) return self.ret_ptr;
//const ret_llvm_ty = try o.lowerType(ret_ty);
- const alignment = ptr_ty.ptrAlignment(mod).toLlvm();
+ const alignment = ptr_ty.ptrAlignment(pt).toLlvm();
return self.buildAllocaWorkaround(ret_ty, alignment);
}
@@ -8928,7 +8979,7 @@ pub const FuncGen = struct {
llvm_ty: Builder.Type,
alignment: Builder.Alignment,
) Allocator.Error!Builder.Value {
- const target = self.dg.object.module.getTarget();
+ const target = self.dg.object.pt.zcu.getTarget();
return buildAllocaInner(&self.wip, llvm_ty, alignment, target);
}
@@ -8939,18 +8990,19 @@ pub const FuncGen = struct {
alignment: Builder.Alignment,
) Allocator.Error!Builder.Value {
const o = self.dg.object;
- return self.buildAlloca(try o.builder.arrayType(ty.abiSize(o.module), .i8), alignment);
+ return self.buildAlloca(try o.builder.arrayType(ty.abiSize(o.pt), .i8), alignment);
}
fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const dest_ptr = try self.resolveInst(bin_op.lhs);
const ptr_ty = self.typeOf(bin_op.lhs);
const operand_ty = ptr_ty.childType(mod);
- const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep(mod) else false;
+ const val_is_undef = if (try self.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(mod) else false;
if (val_is_undef) {
const ptr_info = ptr_ty.ptrInfo(mod);
const needs_bitmask = (ptr_info.packed_offset.host_size != 0);
@@ -8964,10 +9016,10 @@ pub const FuncGen = struct {
// Even if safety is disabled, we still emit a memset to undefined since it conveys
// extra information to LLVM. However, safety makes the difference between using
// 0xaa or actual undefined for the fill byte.
- const len = try o.builder.intValue(try o.lowerType(Type.usize), operand_ty.abiSize(mod));
+ const len = try o.builder.intValue(try o.lowerType(Type.usize), operand_ty.abiSize(pt));
_ = try self.wip.callMemSet(
dest_ptr,
- ptr_ty.ptrAlignment(mod).toLlvm(),
+ ptr_ty.ptrAlignment(pt).toLlvm(),
if (safety) try o.builder.intValue(.i8, 0xaa) else try o.builder.undefValue(.i8),
len,
if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal,
@@ -8992,7 +9044,7 @@ pub const FuncGen = struct {
/// The first instruction of `body_tail` is the one whose copy we want to elide.
fn canElideLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) bool {
const o = fg.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const ip = &mod.intern_pool;
for (body_tail[1..]) |body_inst| {
switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip)) {
@@ -9008,7 +9060,8 @@ pub const FuncGen = struct {
fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = fg.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const inst = body_tail[0];
const ty_op = fg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const ptr_ty = fg.typeOf(ty_op.operand);
@@ -9016,7 +9069,7 @@ pub const FuncGen = struct {
const ptr = try fg.resolveInst(ty_op.operand);
elide: {
- if (!isByRef(Type.fromInterned(ptr_info.child), mod)) break :elide;
+ if (!isByRef(Type.fromInterned(ptr_info.child), pt)) break :elide;
if (!canElideLoad(fg, body_tail)) break :elide;
return ptr;
}
@@ -9040,7 +9093,7 @@ pub const FuncGen = struct {
_ = inst;
const o = self.dg.object;
const llvm_usize = try o.lowerType(Type.usize);
- if (!target_util.supportsReturnAddress(o.module.getTarget())) {
+ if (!target_util.supportsReturnAddress(o.pt.zcu.getTarget())) {
// https://github.com/ziglang/zig/issues/11946
return o.builder.intValue(llvm_usize, 0);
}
@@ -9068,7 +9121,8 @@ pub const FuncGen = struct {
kind: Builder.Function.Instruction.CmpXchg.Kind,
) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
const ptr = try self.resolveInst(extra.ptr);
@@ -9095,7 +9149,7 @@ pub const FuncGen = struct {
self.sync_scope,
toLlvmAtomicOrdering(extra.successOrder()),
toLlvmAtomicOrdering(extra.failureOrder()),
- ptr_ty.ptrAlignment(mod).toLlvm(),
+ ptr_ty.ptrAlignment(pt).toLlvm(),
"",
);
@@ -9118,7 +9172,8 @@ pub const FuncGen = struct {
fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data;
const ptr = try self.resolveInst(pl_op.operand);
@@ -9134,7 +9189,7 @@ pub const FuncGen = struct {
const access_kind: Builder.MemoryAccessKind =
if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal;
- const ptr_alignment = ptr_ty.ptrAlignment(mod).toLlvm();
+ const ptr_alignment = ptr_ty.ptrAlignment(pt).toLlvm();
if (llvm_abi_ty != .none) {
// operand needs widening and truncating or bitcasting.
@@ -9181,19 +9236,20 @@ pub const FuncGen = struct {
fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const atomic_load = self.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load;
const ptr = try self.resolveInst(atomic_load.ptr);
const ptr_ty = self.typeOf(atomic_load.ptr);
const info = ptr_ty.ptrInfo(mod);
const elem_ty = Type.fromInterned(info.child);
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none;
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none;
const ordering = toLlvmAtomicOrdering(atomic_load.order);
const llvm_abi_ty = try o.getAtomicAbiType(elem_ty, false);
const ptr_alignment = (if (info.flags.alignment != .none)
@as(InternPool.Alignment, info.flags.alignment)
else
- Type.fromInterned(info.child).abiAlignment(mod)).toLlvm();
+ Type.fromInterned(info.child).abiAlignment(pt)).toLlvm();
const access_kind: Builder.MemoryAccessKind =
if (info.flags.is_volatile) .@"volatile" else .normal;
const elem_llvm_ty = try o.lowerType(elem_ty);
@@ -9228,11 +9284,12 @@ pub const FuncGen = struct {
ordering: Builder.AtomicOrdering,
) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = self.typeOf(bin_op.lhs);
const operand_ty = ptr_ty.childType(mod);
- if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .none;
+ if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .none;
const ptr = try self.resolveInst(bin_op.lhs);
var element = try self.resolveInst(bin_op.rhs);
const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, false);
@@ -9252,12 +9309,13 @@ pub const FuncGen = struct {
fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const dest_slice = try self.resolveInst(bin_op.lhs);
const ptr_ty = self.typeOf(bin_op.lhs);
const elem_ty = self.typeOf(bin_op.rhs);
- const dest_ptr_align = ptr_ty.ptrAlignment(mod).toLlvm();
+ const dest_ptr_align = ptr_ty.ptrAlignment(pt).toLlvm();
const dest_ptr = try self.sliceOrArrayPtr(dest_slice, ptr_ty);
const access_kind: Builder.MemoryAccessKind =
if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal;
@@ -9270,7 +9328,7 @@ pub const FuncGen = struct {
ptr_ty.isSlice(mod) and
std.Target.wasm.featureSetHas(o.target.cpu.features, .bulk_memory);
- if (try self.air.value(bin_op.rhs, mod)) |elem_val| {
+ if (try self.air.value(bin_op.rhs, pt)) |elem_val| {
if (elem_val.isUndefDeep(mod)) {
// Even if safety is disabled, we still emit a memset to undefined since it conveys
// extra information to LLVM. However, safety makes the difference between using
@@ -9296,7 +9354,7 @@ pub const FuncGen = struct {
// repeating byte pattern, for example, `@as(u64, 0)` has a
// repeating byte pattern of 0 bytes. In such case, the memset
// intrinsic can be used.
- if (try elem_val.hasRepeatedByteRepr(elem_ty, mod)) |byte_val| {
+ if (try elem_val.hasRepeatedByteRepr(elem_ty, pt)) |byte_val| {
const fill_byte = try o.builder.intValue(.i8, byte_val);
const len = try self.sliceOrArrayLenInBytes(dest_slice, ptr_ty);
if (intrinsic_len0_traps) {
@@ -9309,7 +9367,7 @@ pub const FuncGen = struct {
}
const value = try self.resolveInst(bin_op.rhs);
- const elem_abi_size = elem_ty.abiSize(mod);
+ const elem_abi_size = elem_ty.abiSize(pt);
if (elem_abi_size == 1) {
// In this case we can take advantage of LLVM's intrinsic.
@@ -9361,9 +9419,9 @@ pub const FuncGen = struct {
_ = try self.wip.brCond(end, body_block, end_block);
self.wip.cursor = .{ .block = body_block };
- const elem_abi_align = elem_ty.abiAlignment(mod);
+ const elem_abi_align = elem_ty.abiAlignment(pt);
const it_ptr_align = InternPool.Alignment.fromLlvm(dest_ptr_align).min(elem_abi_align).toLlvm();
- if (isByRef(elem_ty, mod)) {
+ if (isByRef(elem_ty, pt)) {
_ = try self.wip.callMemCpy(
it_ptr.toValue(),
it_ptr_align,
@@ -9405,7 +9463,8 @@ pub const FuncGen = struct {
fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const dest_slice = try self.resolveInst(bin_op.lhs);
const dest_ptr_ty = self.typeOf(bin_op.lhs);
@@ -9434,9 +9493,9 @@ pub const FuncGen = struct {
self.wip.cursor = .{ .block = memcpy_block };
_ = try self.wip.callMemCpy(
dest_ptr,
- dest_ptr_ty.ptrAlignment(mod).toLlvm(),
+ dest_ptr_ty.ptrAlignment(pt).toLlvm(),
src_ptr,
- src_ptr_ty.ptrAlignment(mod).toLlvm(),
+ src_ptr_ty.ptrAlignment(pt).toLlvm(),
len,
access_kind,
);
@@ -9447,9 +9506,9 @@ pub const FuncGen = struct {
_ = try self.wip.callMemCpy(
dest_ptr,
- dest_ptr_ty.ptrAlignment(mod).toLlvm(),
+ dest_ptr_ty.ptrAlignment(pt).toLlvm(),
src_ptr,
- src_ptr_ty.ptrAlignment(mod).toLlvm(),
+ src_ptr_ty.ptrAlignment(pt).toLlvm(),
len,
access_kind,
);
@@ -9458,10 +9517,11 @@ pub const FuncGen = struct {
fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const un_ty = self.typeOf(bin_op.lhs).childType(mod);
- const layout = un_ty.unionGetLayout(mod);
+ const layout = un_ty.unionGetLayout(pt);
if (layout.tag_size == 0) return .none;
const union_ptr = try self.resolveInst(bin_op.lhs);
const new_tag = try self.resolveInst(bin_op.rhs);
@@ -9479,13 +9539,13 @@ pub const FuncGen = struct {
fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const un_ty = self.typeOf(ty_op.operand);
- const layout = un_ty.unionGetLayout(mod);
+ const layout = un_ty.unionGetLayout(pt);
if (layout.tag_size == 0) return .none;
const union_handle = try self.resolveInst(ty_op.operand);
- if (isByRef(un_ty, mod)) {
+ if (isByRef(un_ty, pt)) {
const llvm_un_ty = try o.lowerType(un_ty);
if (layout.payload_size == 0)
return self.wip.load(.normal, llvm_un_ty, union_handle, .default, "");
@@ -9554,7 +9614,7 @@ pub const FuncGen = struct {
fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_ty = self.typeOf(ty_op.operand);
var bits = operand_ty.intInfo(mod).bits;
@@ -9588,7 +9648,7 @@ pub const FuncGen = struct {
fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const ip = &mod.intern_pool;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
@@ -9638,7 +9698,8 @@ pub const FuncGen = struct {
fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !Builder.Function.Index {
const o = self.dg.object;
- const zcu = o.module;
+ const pt = o.pt;
+ const zcu = pt.zcu;
const enum_type = zcu.intern_pool.loadEnumType(enum_ty.toIntern());
// TODO: detect when the type changes and re-emit this function.
@@ -9678,7 +9739,7 @@ pub const FuncGen = struct {
for (0..enum_type.names.len) |field_index| {
const this_tag_int_value = try o.lowerValue(
- (try zcu.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(),
+ (try pt.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(),
);
try wip_switch.addCase(this_tag_int_value, named_block, &wip);
}
@@ -9745,7 +9806,8 @@ pub const FuncGen = struct {
fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
const a = try self.resolveInst(extra.a);
@@ -9763,11 +9825,11 @@ pub const FuncGen = struct {
defer self.gpa.free(values);
for (values, 0..) |*val, i| {
- const elem = try mask.elemValue(mod, i);
+ const elem = try mask.elemValue(pt, i);
if (elem.isUndef(mod)) {
val.* = try o.builder.undefConst(.i32);
} else {
- const int = elem.toSignedInt(mod);
+ const int = elem.toSignedInt(pt);
const unsigned: u32 = @intCast(if (int >= 0) int else ~int + a_len);
val.* = try o.builder.intConst(.i32, unsigned);
}
@@ -9854,7 +9916,7 @@ pub const FuncGen = struct {
fn airReduce(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
const target = mod.getTarget();
const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce;
@@ -9964,7 +10026,8 @@ pub const FuncGen = struct {
fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const result_ty = self.typeOfIndex(inst);
@@ -9986,16 +10049,16 @@ pub const FuncGen = struct {
if (mod.typeToPackedStruct(result_ty)) |struct_type| {
const backing_int_ty = struct_type.backingIntType(ip).*;
assert(backing_int_ty != .none);
- const big_bits = Type.fromInterned(backing_int_ty).bitSize(mod);
+ const big_bits = Type.fromInterned(backing_int_ty).bitSize(pt);
const int_ty = try o.builder.intType(@intCast(big_bits));
comptime assert(Type.packed_struct_layout_version == 2);
var running_int = try o.builder.intValue(int_ty, 0);
var running_bits: u16 = 0;
for (elements, struct_type.field_types.get(ip)) |elem, field_ty| {
- if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue;
const non_int_val = try self.resolveInst(elem);
- const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(mod));
+ const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(pt));
const small_int_ty = try o.builder.intType(ty_bit_size);
const small_int_val = if (Type.fromInterned(field_ty).isPtrAtRuntime(mod))
try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "")
@@ -10013,23 +10076,23 @@ pub const FuncGen = struct {
assert(result_ty.containerLayout(mod) != .@"packed");
- if (isByRef(result_ty, mod)) {
+ if (isByRef(result_ty, pt)) {
// TODO in debug builds init to undef so that the padding will be 0xaa
// even if we fully populate the fields.
- const alignment = result_ty.abiAlignment(mod).toLlvm();
+ const alignment = result_ty.abiAlignment(pt).toLlvm();
const alloca_inst = try self.buildAllocaWorkaround(result_ty, alignment);
for (elements, 0..) |elem, i| {
- if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue;
+ if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue;
const llvm_elem = try self.resolveInst(elem);
const llvm_i = o.llvmFieldIndex(result_ty, i).?;
const field_ptr =
try self.wip.gepStruct(llvm_result_ty, alloca_inst, llvm_i, "");
- const field_ptr_ty = try mod.ptrType(.{
+ const field_ptr_ty = try pt.ptrType(.{
.child = self.typeOf(elem).toIntern(),
.flags = .{
- .alignment = result_ty.structFieldAlign(i, mod),
+ .alignment = result_ty.structFieldAlign(i, pt),
},
});
try self.store(field_ptr, field_ptr_ty, llvm_elem, .none);
@@ -10039,7 +10102,7 @@ pub const FuncGen = struct {
} else {
var result = try o.builder.poisonValue(llvm_result_ty);
for (elements, 0..) |elem, i| {
- if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue;
+ if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue;
const llvm_elem = try self.resolveInst(elem);
const llvm_i = o.llvmFieldIndex(result_ty, i).?;
@@ -10049,15 +10112,15 @@ pub const FuncGen = struct {
}
},
.Array => {
- assert(isByRef(result_ty, mod));
+ assert(isByRef(result_ty, pt));
const llvm_usize = try o.lowerType(Type.usize);
const usize_zero = try o.builder.intValue(llvm_usize, 0);
- const alignment = result_ty.abiAlignment(mod).toLlvm();
+ const alignment = result_ty.abiAlignment(pt).toLlvm();
const alloca_inst = try self.buildAllocaWorkaround(result_ty, alignment);
const array_info = result_ty.arrayInfo(mod);
- const elem_ptr_ty = try mod.ptrType(.{
+ const elem_ptr_ty = try pt.ptrType(.{
.child = array_info.elem_type.toIntern(),
});
@@ -10084,21 +10147,22 @@ pub const FuncGen = struct {
fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
const union_ty = self.typeOfIndex(inst);
const union_llvm_ty = try o.lowerType(union_ty);
- const layout = union_ty.unionGetLayout(mod);
+ const layout = union_ty.unionGetLayout(pt);
const union_obj = mod.typeToUnion(union_ty).?;
if (union_obj.getLayout(ip) == .@"packed") {
- const big_bits = union_ty.bitSize(mod);
+ const big_bits = union_ty.bitSize(pt);
const int_llvm_ty = try o.builder.intType(@intCast(big_bits));
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
const non_int_val = try self.resolveInst(extra.init);
- const small_int_ty = try o.builder.intType(@intCast(field_ty.bitSize(mod)));
+ const small_int_ty = try o.builder.intType(@intCast(field_ty.bitSize(pt)));
const small_int_val = if (field_ty.isPtrAtRuntime(mod))
try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "")
else
@@ -10110,19 +10174,19 @@ pub const FuncGen = struct {
const tag_ty = union_ty.unionTagTypeHypothetical(mod);
const union_field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
const enum_field_index = tag_ty.enumFieldIndex(union_field_name, mod).?;
- const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
- break :blk try tag_val.intFromEnum(tag_ty, mod);
+ const tag_val = try pt.enumValueFieldIndex(tag_ty, enum_field_index);
+ break :blk try tag_val.intFromEnum(tag_ty, pt);
};
if (layout.payload_size == 0) {
if (layout.tag_size == 0) {
return .none;
}
- assert(!isByRef(union_ty, mod));
+ assert(!isByRef(union_ty, pt));
var big_int_space: Value.BigIntSpace = undefined;
- const tag_big_int = tag_int_val.toBigInt(&big_int_space, mod);
+ const tag_big_int = tag_int_val.toBigInt(&big_int_space, pt);
return try o.builder.bigIntValue(union_llvm_ty, tag_big_int);
}
- assert(isByRef(union_ty, mod));
+ assert(isByRef(union_ty, pt));
// The llvm type of the alloca will be the named LLVM union type, and will not
// necessarily match the format that we need, depending on which tag is active.
// We must construct the correct unnamed struct type here, in order to then set
@@ -10132,14 +10196,14 @@ pub const FuncGen = struct {
const llvm_payload = try self.resolveInst(extra.init);
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
const field_llvm_ty = try o.lowerType(field_ty);
- const field_size = field_ty.abiSize(mod);
- const field_align = mod.unionFieldNormalAlignment(union_obj, extra.field_index);
+ const field_size = field_ty.abiSize(pt);
+ const field_align = pt.unionFieldNormalAlignment(union_obj, extra.field_index);
const llvm_usize = try o.lowerType(Type.usize);
const usize_zero = try o.builder.intValue(llvm_usize, 0);
const llvm_union_ty = t: {
const payload_ty = p: {
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) {
const padding_len = layout.payload_size;
break :p try o.builder.arrayType(padding_len, .i8);
}
@@ -10169,7 +10233,7 @@ pub const FuncGen = struct {
// Now we follow the layout as expressed above with GEP instructions to set the
// tag and the payload.
- const field_ptr_ty = try mod.ptrType(.{
+ const field_ptr_ty = try pt.ptrType(.{
.child = field_ty.toIntern(),
.flags = .{ .alignment = field_align },
});
@@ -10195,9 +10259,9 @@ pub const FuncGen = struct {
const field_ptr = try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, &indices, "");
const tag_ty = try o.lowerType(Type.fromInterned(union_obj.enum_tag_ty));
var big_int_space: Value.BigIntSpace = undefined;
- const tag_big_int = tag_int_val.toBigInt(&big_int_space, mod);
+ const tag_big_int = tag_int_val.toBigInt(&big_int_space, pt);
const llvm_tag = try o.builder.bigIntValue(tag_ty, tag_big_int);
- const tag_alignment = Type.fromInterned(union_obj.enum_tag_ty).abiAlignment(mod).toLlvm();
+ const tag_alignment = Type.fromInterned(union_obj.enum_tag_ty).abiAlignment(pt).toLlvm();
_ = try self.wip.store(.normal, llvm_tag, field_ptr, tag_alignment);
}
@@ -10223,7 +10287,7 @@ pub const FuncGen = struct {
// by the target.
// To work around this, don't emit llvm.prefetch in this case.
// See https://bugs.llvm.org/show_bug.cgi?id=21037
- const mod = o.module;
+ const mod = o.pt.zcu;
const target = mod.getTarget();
switch (prefetch.cache) {
.instruction => switch (target.cpu.arch) {
@@ -10279,7 +10343,7 @@ pub const FuncGen = struct {
fn airWorkItemId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const target = o.module.getTarget();
+ const target = o.pt.zcu.getTarget();
assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
@@ -10289,7 +10353,7 @@ pub const FuncGen = struct {
fn airWorkGroupSize(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const target = o.module.getTarget();
+ const target = o.pt.zcu.getTarget();
assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
@@ -10312,7 +10376,7 @@ pub const FuncGen = struct {
fn airWorkGroupId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const target = o.module.getTarget();
+ const target = o.pt.zcu.getTarget();
assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
@@ -10322,7 +10386,7 @@ pub const FuncGen = struct {
fn getErrorNameTable(self: *FuncGen) Allocator.Error!Builder.Variable.Index {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
const table = o.error_name_table;
if (table != .none) return table;
@@ -10334,7 +10398,7 @@ pub const FuncGen = struct {
variable_index.setMutability(.constant, &o.builder);
variable_index.setUnnamedAddr(.unnamed_addr, &o.builder);
variable_index.setAlignment(
- Type.slice_const_u8_sentinel_0.abiAlignment(mod).toLlvm(),
+ Type.slice_const_u8_sentinel_0.abiAlignment(pt).toLlvm(),
&o.builder,
);
@@ -10372,15 +10436,16 @@ pub const FuncGen = struct {
can_elide_load: bool,
) !Builder.Value {
const o = fg.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const payload_ty = opt_ty.optionalChild(mod);
- if (isByRef(opt_ty, mod)) {
+ if (isByRef(opt_ty, pt)) {
// We have a pointer and we need to return a pointer to the first field.
const payload_ptr = try fg.wip.gepStruct(opt_llvm_ty, opt_handle, 0, "");
- const payload_alignment = payload_ty.abiAlignment(mod).toLlvm();
- if (isByRef(payload_ty, mod)) {
+ const payload_alignment = payload_ty.abiAlignment(pt).toLlvm();
+ if (isByRef(payload_ty, pt)) {
if (can_elide_load)
return payload_ptr;
@@ -10389,7 +10454,7 @@ pub const FuncGen = struct {
return fg.loadTruncate(.normal, payload_ty, payload_ptr, payload_alignment);
}
- assert(!isByRef(payload_ty, mod));
+ assert(!isByRef(payload_ty, pt));
return fg.wip.extractValue(opt_handle, &.{0}, "");
}
@@ -10400,12 +10465,12 @@ pub const FuncGen = struct {
non_null_bit: Builder.Value,
) !Builder.Value {
const o = self.dg.object;
+ const pt = o.pt;
const optional_llvm_ty = try o.lowerType(optional_ty);
const non_null_field = try self.wip.cast(.zext, non_null_bit, .i8, "");
- const mod = o.module;
- if (isByRef(optional_ty, mod)) {
- const payload_alignment = optional_ty.abiAlignment(mod).toLlvm();
+ if (isByRef(optional_ty, pt)) {
+ const payload_alignment = optional_ty.abiAlignment(pt).toLlvm();
const alloca_inst = try self.buildAllocaWorkaround(optional_ty, payload_alignment);
{
@@ -10432,7 +10497,8 @@ pub const FuncGen = struct {
field_index: u32,
) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const struct_ty = struct_ptr_ty.childType(mod);
switch (struct_ty.zigTypeTag(mod)) {
.Struct => switch (struct_ty.containerLayout(mod)) {
@@ -10452,7 +10518,7 @@ pub const FuncGen = struct {
// We have a pointer to a packed struct field that happens to be byte-aligned.
// Offset our operand pointer by the correct number of bytes.
- const byte_offset = @divExact(mod.structPackedFieldBitOffset(struct_type, field_index) + struct_ptr_ty_info.packed_offset.bit_offset, 8);
+ const byte_offset = @divExact(pt.structPackedFieldBitOffset(struct_type, field_index) + struct_ptr_ty_info.packed_offset.bit_offset, 8);
if (byte_offset == 0) return struct_ptr;
const usize_ty = try o.lowerType(Type.usize);
const llvm_index = try o.builder.intValue(usize_ty, byte_offset);
@@ -10470,14 +10536,14 @@ pub const FuncGen = struct {
// the struct.
const llvm_index = try o.builder.intValue(
try o.lowerType(Type.usize),
- @intFromBool(struct_ty.hasRuntimeBitsIgnoreComptime(mod)),
+ @intFromBool(struct_ty.hasRuntimeBitsIgnoreComptime(pt)),
);
return self.wip.gep(.inbounds, struct_llvm_ty, struct_ptr, &.{llvm_index}, "");
}
},
},
.Union => {
- const layout = struct_ty.unionGetLayout(mod);
+ const layout = struct_ty.unionGetLayout(pt);
if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .@"packed") return struct_ptr;
const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align));
const union_llvm_ty = try o.lowerType(struct_ty);
@@ -10500,9 +10566,10 @@ pub const FuncGen = struct {
// => so load the byte aligned value and trunc the unwanted bits.
const o = fg.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const payload_llvm_ty = try o.lowerType(payload_ty);
- const abi_size = payload_ty.abiSize(mod);
+ const abi_size = payload_ty.abiSize(pt);
// llvm bug workarounds:
const workaround_explicit_mask = o.target.cpu.arch == .powerpc and abi_size >= 4;
@@ -10522,7 +10589,7 @@ pub const FuncGen = struct {
const shifted = if (payload_llvm_ty != load_llvm_ty and o.target.cpu.arch.endian() == .big)
try fg.wip.bin(.lshr, loaded, try o.builder.intValue(
load_llvm_ty,
- (payload_ty.abiSize(mod) - (std.math.divCeil(u64, payload_ty.bitSize(mod), 8) catch unreachable)) * 8,
+ (payload_ty.abiSize(pt) - (std.math.divCeil(u64, payload_ty.bitSize(pt), 8) catch unreachable)) * 8,
), "")
else
loaded;
@@ -10546,11 +10613,11 @@ pub const FuncGen = struct {
access_kind: Builder.MemoryAccessKind,
) !Builder.Value {
const o = fg.dg.object;
- const mod = o.module;
+ const pt = o.pt;
//const pointee_llvm_ty = try o.lowerType(pointee_type);
- const result_align = InternPool.Alignment.fromLlvm(ptr_alignment).max(pointee_type.abiAlignment(mod)).toLlvm();
+ const result_align = InternPool.Alignment.fromLlvm(ptr_alignment).max(pointee_type.abiAlignment(pt)).toLlvm();
const result_ptr = try fg.buildAllocaWorkaround(pointee_type, result_align);
- const size_bytes = pointee_type.abiSize(mod);
+ const size_bytes = pointee_type.abiSize(pt);
_ = try fg.wip.callMemCpy(
result_ptr,
result_align,
@@ -10567,15 +10634,16 @@ pub const FuncGen = struct {
/// For isByRef=false types, it creates a load instruction and returns it.
fn load(self: *FuncGen, ptr: Builder.Value, ptr_ty: Type) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const info = ptr_ty.ptrInfo(mod);
const elem_ty = Type.fromInterned(info.child);
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none;
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none;
const ptr_alignment = (if (info.flags.alignment != .none)
@as(InternPool.Alignment, info.flags.alignment)
else
- elem_ty.abiAlignment(mod)).toLlvm();
+ elem_ty.abiAlignment(pt)).toLlvm();
const access_kind: Builder.MemoryAccessKind =
if (info.flags.is_volatile) .@"volatile" else .normal;
@@ -10591,7 +10659,7 @@ pub const FuncGen = struct {
}
if (info.packed_offset.host_size == 0) {
- if (isByRef(elem_ty, mod)) {
+ if (isByRef(elem_ty, pt)) {
return self.loadByRef(ptr, elem_ty, ptr_alignment, access_kind);
}
return self.loadTruncate(access_kind, elem_ty, ptr, ptr_alignment);
@@ -10601,13 +10669,13 @@ pub const FuncGen = struct {
const containing_int =
try self.wip.load(access_kind, containing_int_ty, ptr, ptr_alignment, "");
- const elem_bits = ptr_ty.childType(mod).bitSize(mod);
+ const elem_bits = ptr_ty.childType(mod).bitSize(pt);
const shift_amt = try o.builder.intValue(containing_int_ty, info.packed_offset.bit_offset);
const shifted_value = try self.wip.bin(.lshr, containing_int, shift_amt, "");
const elem_llvm_ty = try o.lowerType(elem_ty);
- if (isByRef(elem_ty, mod)) {
- const result_align = elem_ty.abiAlignment(mod).toLlvm();
+ if (isByRef(elem_ty, pt)) {
+ const result_align = elem_ty.abiAlignment(pt).toLlvm();
const result_ptr = try self.buildAllocaWorkaround(elem_ty, result_align);
const same_size_int = try o.builder.intType(@intCast(elem_bits));
@@ -10639,13 +10707,14 @@ pub const FuncGen = struct {
ordering: Builder.AtomicOrdering,
) !void {
const o = self.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const info = ptr_ty.ptrInfo(mod);
const elem_ty = Type.fromInterned(info.child);
- if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
+ if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
return;
}
- const ptr_alignment = ptr_ty.ptrAlignment(mod).toLlvm();
+ const ptr_alignment = ptr_ty.ptrAlignment(pt).toLlvm();
const access_kind: Builder.MemoryAccessKind =
if (info.flags.is_volatile) .@"volatile" else .normal;
@@ -10669,7 +10738,7 @@ pub const FuncGen = struct {
assert(ordering == .none);
const containing_int =
try self.wip.load(access_kind, containing_int_ty, ptr, ptr_alignment, "");
- const elem_bits = ptr_ty.childType(mod).bitSize(mod);
+ const elem_bits = ptr_ty.childType(mod).bitSize(pt);
const shift_amt = try o.builder.intConst(containing_int_ty, info.packed_offset.bit_offset);
// Convert to equally-sized integer type in order to perform the bit
// operations on the value to store
@@ -10704,7 +10773,7 @@ pub const FuncGen = struct {
_ = try self.wip.store(access_kind, ored_value, ptr, ptr_alignment);
return;
}
- if (!isByRef(elem_ty, mod)) {
+ if (!isByRef(elem_ty, pt)) {
_ = try self.wip.storeAtomic(
access_kind,
elem,
@@ -10720,8 +10789,8 @@ pub const FuncGen = struct {
ptr,
ptr_alignment,
elem,
- elem_ty.abiAlignment(mod).toLlvm(),
- try o.builder.intValue(try o.lowerType(Type.usize), elem_ty.abiSize(mod)),
+ elem_ty.abiAlignment(pt).toLlvm(),
+ try o.builder.intValue(try o.lowerType(Type.usize), elem_ty.abiSize(pt)),
access_kind,
);
}
@@ -10747,12 +10816,13 @@ pub const FuncGen = struct {
a5: Builder.Value,
) Allocator.Error!Builder.Value {
const o = fg.dg.object;
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const target = mod.getTarget();
if (!target_util.hasValgrindSupport(target)) return default_value;
const llvm_usize = try o.lowerType(Type.usize);
- const usize_alignment = Type.usize.abiAlignment(mod).toLlvm();
+ const usize_alignment = Type.usize.abiAlignment(pt).toLlvm();
const array_llvm_ty = try o.builder.arrayType(6, llvm_usize);
const array_ptr = if (fg.valgrind_client_request_array == .none) a: {
@@ -10813,13 +10883,13 @@ pub const FuncGen = struct {
fn typeOf(fg: *FuncGen, inst: Air.Inst.Ref) Type {
const o = fg.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
return fg.air.typeOf(inst, &mod.intern_pool);
}
fn typeOfIndex(fg: *FuncGen, inst: Air.Inst.Index) Type {
const o = fg.dg.object;
- const mod = o.module;
+ const mod = o.pt.zcu;
return fg.air.typeOfIndex(inst, &mod.intern_pool);
}
};
@@ -10990,12 +11060,12 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ
};
}
-fn returnTypeByRef(zcu: *Zcu, target: std.Target, ty: Type) bool {
- if (isByRef(ty, zcu)) {
+fn returnTypeByRef(pt: Zcu.PerThread, target: std.Target, ty: Type) bool {
+ if (isByRef(ty, pt)) {
return true;
} else if (target.cpu.arch.isX86() and
!std.Target.x86.featureSetHas(target.cpu.features, .evex512) and
- ty.totalVectorBits(zcu) >= 512)
+ ty.totalVectorBits(pt) >= 512)
{
// As of LLVM 18, passing a vector byval with fastcc that is 512 bits or more returns
// "512-bit vector arguments require 'evex512' for AVX512"
@@ -11005,38 +11075,38 @@ fn returnTypeByRef(zcu: *Zcu, target: std.Target, ty: Type) bool {
}
}
-fn firstParamSRet(fn_info: InternPool.Key.FuncType, zcu: *Zcu, target: std.Target) bool {
+fn firstParamSRet(fn_info: InternPool.Key.FuncType, pt: Zcu.PerThread, target: std.Target) bool {
const return_type = Type.fromInterned(fn_info.return_type);
- if (!return_type.hasRuntimeBitsIgnoreComptime(zcu)) return false;
+ if (!return_type.hasRuntimeBitsIgnoreComptime(pt)) return false;
return switch (fn_info.cc) {
- .Unspecified, .Inline => returnTypeByRef(zcu, target, return_type),
+ .Unspecified, .Inline => returnTypeByRef(pt, target, return_type),
.C => switch (target.cpu.arch) {
.mips, .mipsel => false,
- .x86 => isByRef(return_type, zcu),
+ .x86 => isByRef(return_type, pt),
.x86_64 => switch (target.os.tag) {
- .windows => x86_64_abi.classifyWindows(return_type, zcu) == .memory,
- else => firstParamSRetSystemV(return_type, zcu, target),
+ .windows => x86_64_abi.classifyWindows(return_type, pt) == .memory,
+ else => firstParamSRetSystemV(return_type, pt, target),
},
- .wasm32 => wasm_c_abi.classifyType(return_type, zcu)[0] == .indirect,
- .aarch64, .aarch64_be => aarch64_c_abi.classifyType(return_type, zcu) == .memory,
- .arm, .armeb => switch (arm_c_abi.classifyType(return_type, zcu, .ret)) {
+ .wasm32 => wasm_c_abi.classifyType(return_type, pt)[0] == .indirect,
+ .aarch64, .aarch64_be => aarch64_c_abi.classifyType(return_type, pt) == .memory,
+ .arm, .armeb => switch (arm_c_abi.classifyType(return_type, pt, .ret)) {
.memory, .i64_array => true,
.i32_array => |size| size != 1,
.byval => false,
},
- .riscv32, .riscv64 => riscv_c_abi.classifyType(return_type, zcu) == .memory,
+ .riscv32, .riscv64 => riscv_c_abi.classifyType(return_type, pt) == .memory,
else => false, // TODO investigate C ABI for other architectures
},
- .SysV => firstParamSRetSystemV(return_type, zcu, target),
- .Win64 => x86_64_abi.classifyWindows(return_type, zcu) == .memory,
- .Stdcall => !isScalar(zcu, return_type),
+ .SysV => firstParamSRetSystemV(return_type, pt, target),
+ .Win64 => x86_64_abi.classifyWindows(return_type, pt) == .memory,
+ .Stdcall => !isScalar(pt.zcu, return_type),
else => false,
};
}
-fn firstParamSRetSystemV(ty: Type, zcu: *Zcu, target: std.Target) bool {
- const class = x86_64_abi.classifySystemV(ty, zcu, target, .ret);
+fn firstParamSRetSystemV(ty: Type, pt: Zcu.PerThread, target: std.Target) bool {
+ const class = x86_64_abi.classifySystemV(ty, pt, target, .ret);
if (class[0] == .memory) return true;
if (class[0] == .x87 and class[2] != .none) return true;
return false;
@@ -11046,9 +11116,10 @@ fn firstParamSRetSystemV(ty: Type, zcu: *Zcu, target: std.Target) bool {
/// completely differently in the function prototype to honor the C ABI, and then
/// be effectively bitcasted to the actual return type.
fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const return_type = Type.fromInterned(fn_info.return_type);
- if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!return_type.hasRuntimeBitsIgnoreComptime(pt)) {
// If the return type is an error set or an error union, then we make this
// anyerror return type instead, so that it can be coerced into a function
// pointer type which has anyerror as the return type.
@@ -11058,12 +11129,12 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu
switch (fn_info.cc) {
.Unspecified,
.Inline,
- => return if (returnTypeByRef(mod, target, return_type)) .void else o.lowerType(return_type),
+ => return if (returnTypeByRef(pt, target, return_type)) .void else o.lowerType(return_type),
.C => {
switch (target.cpu.arch) {
.mips, .mipsel => return o.lowerType(return_type),
- .x86 => return if (isByRef(return_type, mod)) .void else o.lowerType(return_type),
+ .x86 => return if (isByRef(return_type, pt)) .void else o.lowerType(return_type),
.x86_64 => switch (target.os.tag) {
.windows => return lowerWin64FnRetTy(o, fn_info),
else => return lowerSystemVFnRetTy(o, fn_info),
@@ -11072,36 +11143,36 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu
if (isScalar(mod, return_type)) {
return o.lowerType(return_type);
}
- const classes = wasm_c_abi.classifyType(return_type, mod);
+ const classes = wasm_c_abi.classifyType(return_type, pt);
if (classes[0] == .indirect or classes[0] == .none) {
return .void;
}
assert(classes[0] == .direct and classes[1] == .none);
- const scalar_type = wasm_c_abi.scalarType(return_type, mod);
- return o.builder.intType(@intCast(scalar_type.abiSize(mod) * 8));
+ const scalar_type = wasm_c_abi.scalarType(return_type, pt);
+ return o.builder.intType(@intCast(scalar_type.abiSize(pt) * 8));
},
.aarch64, .aarch64_be => {
- switch (aarch64_c_abi.classifyType(return_type, mod)) {
+ switch (aarch64_c_abi.classifyType(return_type, pt)) {
.memory => return .void,
.float_array => return o.lowerType(return_type),
.byval => return o.lowerType(return_type),
- .integer => return o.builder.intType(@intCast(return_type.bitSize(mod))),
+ .integer => return o.builder.intType(@intCast(return_type.bitSize(pt))),
.double_integer => return o.builder.arrayType(2, .i64),
}
},
.arm, .armeb => {
- switch (arm_c_abi.classifyType(return_type, mod, .ret)) {
+ switch (arm_c_abi.classifyType(return_type, pt, .ret)) {
.memory, .i64_array => return .void,
.i32_array => |len| return if (len == 1) .i32 else .void,
.byval => return o.lowerType(return_type),
}
},
.riscv32, .riscv64 => {
- switch (riscv_c_abi.classifyType(return_type, mod)) {
+ switch (riscv_c_abi.classifyType(return_type, pt)) {
.memory => return .void,
.integer => {
- return o.builder.intType(@intCast(return_type.bitSize(mod)));
+ return o.builder.intType(@intCast(return_type.bitSize(pt)));
},
.double_integer => {
return o.builder.structType(.normal, &.{ .i64, .i64 });
@@ -11112,7 +11183,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu
var types: [8]Builder.Type = undefined;
for (0..return_type.structFieldCount(mod)) |field_index| {
const field_ty = return_type.structFieldType(field_index, mod);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
types[types_len] = try o.lowerType(field_ty);
types_len += 1;
}
@@ -11132,14 +11203,14 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu
}
fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
- const mod = o.module;
+ const pt = o.pt;
const return_type = Type.fromInterned(fn_info.return_type);
- switch (x86_64_abi.classifyWindows(return_type, mod)) {
+ switch (x86_64_abi.classifyWindows(return_type, pt)) {
.integer => {
- if (isScalar(mod, return_type)) {
+ if (isScalar(pt.zcu, return_type)) {
return o.lowerType(return_type);
} else {
- return o.builder.intType(@intCast(return_type.abiSize(mod) * 8));
+ return o.builder.intType(@intCast(return_type.abiSize(pt) * 8));
}
},
.win_i128 => return o.builder.vectorType(.normal, 2, .i64),
@@ -11150,14 +11221,15 @@ fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Err
}
fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
- const mod = o.module;
+ const pt = o.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const return_type = Type.fromInterned(fn_info.return_type);
if (isScalar(mod, return_type)) {
return o.lowerType(return_type);
}
const target = mod.getTarget();
- const classes = x86_64_abi.classifySystemV(return_type, mod, target, .ret);
+ const classes = x86_64_abi.classifySystemV(return_type, pt, target, .ret);
if (classes[0] == .memory) return .void;
var types_index: u32 = 0;
var types_buffer: [8]Builder.Type = undefined;
@@ -11249,8 +11321,7 @@ const ParamTypeIterator = struct {
pub fn next(it: *ParamTypeIterator) Allocator.Error!?Lowering {
if (it.zig_index >= it.fn_info.param_types.len) return null;
- const zcu = it.object.module;
- const ip = &zcu.intern_pool;
+ const ip = &it.object.pt.zcu.intern_pool;
const ty = it.fn_info.param_types.get(ip)[it.zig_index];
it.byval_attr = false;
return nextInner(it, Type.fromInterned(ty));
@@ -11258,8 +11329,7 @@ const ParamTypeIterator = struct {
/// `airCall` uses this instead of `next` so that it can take into account variadic functions.
pub fn nextCall(it: *ParamTypeIterator, fg: *FuncGen, args: []const Air.Inst.Ref) Allocator.Error!?Lowering {
- const zcu = it.object.module;
- const ip = &zcu.intern_pool;
+ const ip = &it.object.pt.zcu.intern_pool;
if (it.zig_index >= it.fn_info.param_types.len) {
if (it.zig_index >= args.len) {
return null;
@@ -11272,10 +11342,11 @@ const ParamTypeIterator = struct {
}
fn nextInner(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering {
- const zcu = it.object.module;
+ const pt = it.object.pt;
+ const zcu = pt.zcu;
const target = zcu.getTarget();
- if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ if (!ty.hasRuntimeBitsIgnoreComptime(pt)) {
it.zig_index += 1;
return .no_bits;
}
@@ -11288,11 +11359,11 @@ const ParamTypeIterator = struct {
{
it.llvm_index += 1;
return .slice;
- } else if (isByRef(ty, zcu)) {
+ } else if (isByRef(ty, pt)) {
return .byref;
} else if (target.cpu.arch.isX86() and
!std.Target.x86.featureSetHas(target.cpu.features, .evex512) and
- ty.totalVectorBits(zcu) >= 512)
+ ty.totalVectorBits(pt) >= 512)
{
// As of LLVM 18, passing a vector byval with fastcc that is 512 bits or more returns
// "512-bit vector arguments require 'evex512' for AVX512"
@@ -11320,7 +11391,7 @@ const ParamTypeIterator = struct {
if (isScalar(zcu, ty)) {
return .byval;
}
- const classes = wasm_c_abi.classifyType(ty, zcu);
+ const classes = wasm_c_abi.classifyType(ty, pt);
if (classes[0] == .indirect) {
return .byref;
}
@@ -11329,7 +11400,7 @@ const ParamTypeIterator = struct {
.aarch64, .aarch64_be => {
it.zig_index += 1;
it.llvm_index += 1;
- switch (aarch64_c_abi.classifyType(ty, zcu)) {
+ switch (aarch64_c_abi.classifyType(ty, pt)) {
.memory => return .byref_mut,
.float_array => |len| return Lowering{ .float_array = len },
.byval => return .byval,
@@ -11344,7 +11415,7 @@ const ParamTypeIterator = struct {
.arm, .armeb => {
it.zig_index += 1;
it.llvm_index += 1;
- switch (arm_c_abi.classifyType(ty, zcu, .arg)) {
+ switch (arm_c_abi.classifyType(ty, pt, .arg)) {
.memory => {
it.byval_attr = true;
return .byref;
@@ -11359,7 +11430,7 @@ const ParamTypeIterator = struct {
it.llvm_index += 1;
if (ty.toIntern() == .f16_type and
!std.Target.riscv.featureSetHas(target.cpu.features, .d)) return .as_u16;
- switch (riscv_c_abi.classifyType(ty, zcu)) {
+ switch (riscv_c_abi.classifyType(ty, pt)) {
.memory => return .byref_mut,
.byval => return .byval,
.integer => return .abi_sized_int,
@@ -11368,7 +11439,7 @@ const ParamTypeIterator = struct {
it.types_len = 0;
for (0..ty.structFieldCount(zcu)) |field_index| {
const field_ty = ty.structFieldType(field_index, zcu);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
it.types_buffer[it.types_len] = try it.object.lowerType(field_ty);
it.types_len += 1;
}
@@ -11406,10 +11477,10 @@ const ParamTypeIterator = struct {
}
fn nextWin64(it: *ParamTypeIterator, ty: Type) ?Lowering {
- const zcu = it.object.module;
- switch (x86_64_abi.classifyWindows(ty, zcu)) {
+ const pt = it.object.pt;
+ switch (x86_64_abi.classifyWindows(ty, pt)) {
.integer => {
- if (isScalar(zcu, ty)) {
+ if (isScalar(pt.zcu, ty)) {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
@@ -11439,17 +11510,17 @@ const ParamTypeIterator = struct {
}
fn nextSystemV(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering {
- const zcu = it.object.module;
- const ip = &zcu.intern_pool;
- const target = zcu.getTarget();
- const classes = x86_64_abi.classifySystemV(ty, zcu, target, .arg);
+ const pt = it.object.pt;
+ const ip = &pt.zcu.intern_pool;
+ const target = pt.zcu.getTarget();
+ const classes = x86_64_abi.classifySystemV(ty, pt, target, .arg);
if (classes[0] == .memory) {
it.zig_index += 1;
it.llvm_index += 1;
it.byval_attr = true;
return .byref;
}
- if (isScalar(zcu, ty)) {
+ if (isScalar(pt.zcu, ty)) {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
@@ -11550,7 +11621,7 @@ fn iterateParamTypes(object: *Object, fn_info: InternPool.Key.FuncType) ParamTyp
fn ccAbiPromoteInt(
cc: std.builtin.CallingConvention,
- mod: *Module,
+ mod: *Zcu,
ty: Type,
) ?std.builtin.Signedness {
const target = mod.getTarget();
@@ -11598,13 +11669,13 @@ fn ccAbiPromoteInt(
/// This is the one source of truth for whether a type is passed around as an LLVM pointer,
/// or as an LLVM value.
-fn isByRef(ty: Type, mod: *Module) bool {
+fn isByRef(ty: Type, pt: Zcu.PerThread) bool {
// For tuples and structs, if there are more than this many non-void
// fields, then we make it byref, otherwise byval.
const max_fields_byval = 0;
- const ip = &mod.intern_pool;
+ const ip = &pt.zcu.intern_pool;
- switch (ty.zigTypeTag(mod)) {
+ switch (ty.zigTypeTag(pt.zcu)) {
.Type,
.ComptimeInt,
.ComptimeFloat,
@@ -11627,17 +11698,17 @@ fn isByRef(ty: Type, mod: *Module) bool {
.AnyFrame,
=> return false,
- .Array, .Frame => return ty.hasRuntimeBits(mod),
+ .Array, .Frame => return ty.hasRuntimeBits(pt),
.Struct => {
const struct_type = switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |tuple| {
var count: usize = 0;
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| {
- if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
+ if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
count += 1;
if (count > max_fields_byval) return true;
- if (isByRef(Type.fromInterned(field_ty), mod)) return true;
+ if (isByRef(Type.fromInterned(field_ty), pt)) return true;
}
return false;
},
@@ -11655,27 +11726,27 @@ fn isByRef(ty: Type, mod: *Module) bool {
count += 1;
if (count > max_fields_byval) return true;
const field_ty = Type.fromInterned(field_types[field_index]);
- if (isByRef(field_ty, mod)) return true;
+ if (isByRef(field_ty, pt)) return true;
}
return false;
},
- .Union => switch (ty.containerLayout(mod)) {
+ .Union => switch (ty.containerLayout(pt.zcu)) {
.@"packed" => return false,
- else => return ty.hasRuntimeBits(mod),
+ else => return ty.hasRuntimeBits(pt),
},
.ErrorUnion => {
- const payload_ty = ty.errorUnionPayload(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ const payload_ty = ty.errorUnionPayload(pt.zcu);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
return false;
}
return true;
},
.Optional => {
- const payload_ty = ty.optionalChild(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ const payload_ty = ty.optionalChild(pt.zcu);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
return false;
}
- if (ty.optionalReprIsPayload(mod)) {
+ if (ty.optionalReprIsPayload(pt.zcu)) {
return false;
}
return true;
@@ -11683,7 +11754,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
}
}
-fn isScalar(mod: *Module, ty: Type) bool {
+fn isScalar(mod: *Zcu, ty: Type) bool {
return switch (ty.zigTypeTag(mod)) {
.Void,
.Bool,
@@ -11774,7 +11845,7 @@ const lt_errors_fn_name = "__zig_lt_errors_len";
/// Without this workaround, LLVM crashes with "unknown codeview register H1"
/// https://github.com/llvm/llvm-project/issues/56484
fn needDbgVarWorkaround(o: *Object) bool {
- const target = o.module.getTarget();
+ const target = o.pt.zcu.getTarget();
if (target.os.tag == .windows and target.cpu.arch == .aarch64) {
return true;
}
@@ -11817,14 +11888,14 @@ fn buildAllocaInner(
return wip.conv(.unneeded, alloca, .ptr, "");
}
-fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) !u1 {
- const err_int_ty = try mod.errorIntType();
- return @intFromBool(err_int_ty.abiAlignment(mod).compare(.gt, payload_ty.abiAlignment(mod)));
+fn errUnionPayloadOffset(payload_ty: Type, pt: Zcu.PerThread) !u1 {
+ const err_int_ty = try pt.errorIntType();
+ return @intFromBool(err_int_ty.abiAlignment(pt).compare(.gt, payload_ty.abiAlignment(pt)));
}
-fn errUnionErrorOffset(payload_ty: Type, mod: *Module) !u1 {
- const err_int_ty = try mod.errorIntType();
- return @intFromBool(err_int_ty.abiAlignment(mod).compare(.lte, payload_ty.abiAlignment(mod)));
+fn errUnionErrorOffset(payload_ty: Type, pt: Zcu.PerThread) !u1 {
+ const err_int_ty = try pt.errorIntType();
+ return @intFromBool(err_int_ty.abiAlignment(pt).compare(.lte, payload_ty.abiAlignment(pt)));
}
/// Returns true for asm constraint (e.g. "=*m", "=r") if it accepts a memory location
src/codegen/spirv.zig
@@ -6,9 +6,7 @@ const assert = std.debug.assert;
const Signedness = std.builtin.Signedness;
const Zcu = @import("../Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
-const Decl = Module.Decl;
+const Decl = Zcu.Decl;
const Type = @import("../Type.zig");
const Value = @import("../Value.zig");
const Air = @import("../Air.zig");
@@ -188,12 +186,13 @@ pub const Object = struct {
fn genDecl(
self: *Object,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
decl_index: InternPool.DeclIndex,
air: Air,
liveness: Liveness,
) !void {
- const gpa = self.gpa;
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
const decl = zcu.declPtr(decl_index);
const namespace = zcu.namespacePtr(decl.src_namespace);
const structured_cfg = namespace.fileScope(zcu).mod.structured_cfg;
@@ -201,7 +200,7 @@ pub const Object = struct {
var decl_gen = DeclGen{
.gpa = gpa,
.object = self,
- .module = zcu,
+ .pt = pt,
.spv = &self.spv,
.decl_index = decl_index,
.air = air,
@@ -235,34 +234,34 @@ pub const Object = struct {
pub fn updateFunc(
self: *Object,
- mod: *Module,
+ pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) !void {
- const decl_index = mod.funcInfo(func_index).owner_decl;
+ const decl_index = pt.zcu.funcInfo(func_index).owner_decl;
// TODO: Separate types for generating decls and functions?
- try self.genDecl(mod, decl_index, air, liveness);
+ try self.genDecl(pt, decl_index, air, liveness);
}
pub fn updateDecl(
self: *Object,
- mod: *Module,
+ pt: Zcu.PerThread,
decl_index: InternPool.DeclIndex,
) !void {
- try self.genDecl(mod, decl_index, undefined, undefined);
+ try self.genDecl(pt, decl_index, undefined, undefined);
}
/// Fetch or allocate a result id for decl index. This function also marks the decl as alive.
/// Note: Function does not actually generate the decl, it just allocates an index.
- pub fn resolveDecl(self: *Object, mod: *Module, decl_index: InternPool.DeclIndex) !SpvModule.Decl.Index {
- const decl = mod.declPtr(decl_index);
+ pub fn resolveDecl(self: *Object, zcu: *Zcu, decl_index: InternPool.DeclIndex) !SpvModule.Decl.Index {
+ const decl = zcu.declPtr(decl_index);
assert(decl.has_tv); // TODO: Do we need to handle a situation where this is false?
const entry = try self.decl_link.getOrPut(self.gpa, decl_index);
if (!entry.found_existing) {
// TODO: Extern fn?
- const kind: SpvModule.Decl.Kind = if (decl.val.isFuncBody(mod))
+ const kind: SpvModule.Decl.Kind = if (decl.val.isFuncBody(zcu))
.func
else switch (decl.@"addrspace") {
.generic => .invocation_global,
@@ -285,7 +284,7 @@ const DeclGen = struct {
object: *Object,
/// The Zig module that we are generating decls for.
- module: *Module,
+ pt: Zcu.PerThread,
/// The SPIR-V module that instructions should be emitted into.
/// This is the same as `self.object.spv`, repeated here for brevity.
@@ -333,7 +332,7 @@ const DeclGen = struct {
/// If `gen` returned `Error.CodegenFail`, this contains an explanatory message.
/// Memory is owned by `module.gpa`.
- error_msg: ?*Module.ErrorMsg = null,
+ error_msg: ?*Zcu.ErrorMsg = null,
/// Possible errors the `genDecl` function may return.
const Error = error{ CodegenFail, OutOfMemory };
@@ -410,15 +409,15 @@ const DeclGen = struct {
/// Return the target which we are currently compiling for.
pub fn getTarget(self: *DeclGen) std.Target {
- return self.module.getTarget();
+ return self.pt.zcu.getTarget();
}
pub fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
@setCold(true);
- const mod = self.module;
- const src_loc = self.module.declPtr(self.decl_index).navSrcLoc(mod);
+ const zcu = self.pt.zcu;
+ const src_loc = zcu.declPtr(self.decl_index).navSrcLoc(zcu);
assert(self.error_msg == null);
- self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args);
+ self.error_msg = try Zcu.ErrorMsg.create(zcu.gpa, src_loc, format, args);
return error.CodegenFail;
}
@@ -439,8 +438,9 @@ const DeclGen = struct {
/// Fetch the result-id for a previously generated instruction or constant.
fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !IdRef {
- const mod = self.module;
- if (try self.air.value(inst, mod)) |val| {
+ const pt = self.pt;
+ const mod = pt.zcu;
+ if (try self.air.value(inst, pt)) |val| {
const ty = self.typeOf(inst);
if (ty.zigTypeTag(mod) == .Fn) {
const fn_decl_index = switch (mod.intern_pool.indexToKey(val.ip_index)) {
@@ -462,7 +462,7 @@ const DeclGen = struct {
fn resolveAnonDecl(self: *DeclGen, val: InternPool.Index) !IdRef {
// TODO: This cannot be a function at this point, but it should probably be handled anyway.
- const mod = self.module;
+ const mod = self.pt.zcu;
const ty = Type.fromInterned(mod.intern_pool.typeOf(val));
const decl_ptr_ty_id = try self.ptrType(ty, .Generic);
@@ -642,7 +642,7 @@ const DeclGen = struct {
/// Checks whether the type can be directly translated to SPIR-V vectors
fn isSpvVector(self: *DeclGen, ty: Type) bool {
- const mod = self.module;
+ const mod = self.pt.zcu;
const target = self.getTarget();
if (ty.zigTypeTag(mod) != .Vector) return false;
@@ -668,7 +668,7 @@ const DeclGen = struct {
}
fn arithmeticTypeInfo(self: *DeclGen, ty: Type) ArithmeticTypeInfo {
- const mod = self.module;
+ const mod = self.pt.zcu;
const target = self.getTarget();
var scalar_ty = ty.scalarType(mod);
if (scalar_ty.zigTypeTag(mod) == .Enum) {
@@ -744,7 +744,7 @@ const DeclGen = struct {
/// the value to an unsigned int first for Kernels.
fn constInt(self: *DeclGen, ty: Type, value: anytype, repr: Repr) !IdRef {
// TODO: Cache?
- const mod = self.module;
+ const mod = self.pt.zcu;
const scalar_ty = ty.scalarType(mod);
const int_info = scalar_ty.intInfo(mod);
// Use backing bits so that negatives are sign extended
@@ -824,7 +824,7 @@ const DeclGen = struct {
/// Construct a vector at runtime.
/// ty must be an vector type.
fn constructVector(self: *DeclGen, ty: Type, constituents: []const IdRef) !IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
assert(ty.vectorLen(mod) == constituents.len);
// Note: older versions of the Khronos SPRIV-LLVM translator crash on this instruction
@@ -848,7 +848,7 @@ const DeclGen = struct {
/// Construct a vector at runtime with all lanes set to the same value.
/// ty must be an vector type.
fn constructVectorSplat(self: *DeclGen, ty: Type, constituent: IdRef) !IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
const n = ty.vectorLen(mod);
const constituents = try self.gpa.alloc(IdRef, n);
@@ -886,12 +886,13 @@ const DeclGen = struct {
return id;
}
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const target = self.getTarget();
const result_ty_id = try self.resolveType(ty, repr);
const ip = &mod.intern_pool;
- log.debug("lowering constant: ty = {}, val = {}", .{ ty.fmt(mod), val.fmtValue(mod, null) });
+ log.debug("lowering constant: ty = {}, val = {}", .{ ty.fmt(pt), val.fmtValue(pt, null) });
if (val.isUndefDeep(mod)) {
return self.spv.constUndef(result_ty_id);
}
@@ -940,16 +941,16 @@ const DeclGen = struct {
},
.int => {
if (ty.isSignedInt(mod)) {
- break :cache try self.constInt(ty, val.toSignedInt(mod), repr);
+ break :cache try self.constInt(ty, val.toSignedInt(pt), repr);
} else {
- break :cache try self.constInt(ty, val.toUnsignedInt(mod), repr);
+ break :cache try self.constInt(ty, val.toUnsignedInt(pt), repr);
}
},
.float => {
const lit: spec.LiteralContextDependentNumber = switch (ty.floatBits(target)) {
- 16 => .{ .uint32 = @as(u16, @bitCast(val.toFloat(f16, mod))) },
- 32 => .{ .float32 = val.toFloat(f32, mod) },
- 64 => .{ .float64 = val.toFloat(f64, mod) },
+ 16 => .{ .uint32 = @as(u16, @bitCast(val.toFloat(f16, pt))) },
+ 32 => .{ .float32 = val.toFloat(f32, pt) },
+ 64 => .{ .float64 = val.toFloat(f64, pt) },
80, 128 => unreachable, // TODO
else => unreachable,
};
@@ -968,17 +969,17 @@ const DeclGen = struct {
.error_union => |error_union| {
// TODO: Error unions may be constructed with constant instructions if the payload type
// allows it. For now, just generate it here regardless.
- const err_int_ty = try mod.errorIntType();
+ const err_int_ty = try pt.errorIntType();
const err_ty = switch (error_union.val) {
.err_name => ty.errorUnionSet(mod),
.payload => err_int_ty,
};
const err_val = switch (error_union.val) {
- .err_name => |err_name| Value.fromInterned((try mod.intern(.{ .err = .{
+ .err_name => |err_name| Value.fromInterned(try pt.intern(.{ .err = .{
.ty = ty.errorUnionSet(mod).toIntern(),
.name = err_name,
- } }))),
- .payload => try mod.intValue(err_int_ty, 0),
+ } })),
+ .payload => try pt.intValue(err_int_ty, 0),
};
const payload_ty = ty.errorUnionPayload(mod);
const eu_layout = self.errorUnionLayout(payload_ty);
@@ -988,7 +989,7 @@ const DeclGen = struct {
}
const payload_val = Value.fromInterned(switch (error_union.val) {
- .err_name => try mod.intern(.{ .undef = payload_ty.toIntern() }),
+ .err_name => try pt.intern(.{ .undef = payload_ty.toIntern() }),
.payload => |payload| payload,
});
@@ -1007,7 +1008,7 @@ const DeclGen = struct {
return try self.constructStruct(ty, &types, &constituents);
},
.enum_tag => {
- const int_val = try val.intFromEnum(ty, mod);
+ const int_val = try val.intFromEnum(ty, pt);
const int_ty = ty.intTagType(mod);
break :cache try self.constant(int_ty, int_val, repr);
},
@@ -1026,7 +1027,7 @@ const DeclGen = struct {
const payload_ty = ty.optionalChild(mod);
const maybe_payload_val = val.optionalValue(mod);
- if (!payload_ty.hasRuntimeBits(mod)) {
+ if (!payload_ty.hasRuntimeBits(pt)) {
break :cache try self.constBool(maybe_payload_val != null, .indirect);
} else if (ty.optionalReprIsPayload(mod)) {
// Optional representation is a nullable pointer or slice.
@@ -1104,13 +1105,13 @@ const DeclGen = struct {
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) {
// This is a zero-bit field - we only needed it for the alignment.
continue;
}
// TODO: Padding?
- const field_val = try val.fieldValue(mod, field_index);
+ const field_val = try val.fieldValue(pt, field_index);
const field_id = try self.constant(field_ty, field_val, .indirect);
try types.append(field_ty);
@@ -1126,7 +1127,7 @@ const DeclGen = struct {
const active_field = ty.unionTagFieldIndex(Value.fromInterned(un.tag), mod).?;
const union_obj = mod.typeToUnion(ty).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[active_field]);
- const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(mod))
+ const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(pt))
try self.constant(field_ty, Value.fromInterned(un.val), .direct)
else
null;
@@ -1144,10 +1145,10 @@ const DeclGen = struct {
fn constantPtr(self: *DeclGen, ptr_val: Value) Error!IdRef {
// TODO: Caching??
- const zcu = self.module;
+ const pt = self.pt;
- if (ptr_val.isUndef(zcu)) {
- const result_ty = ptr_val.typeOf(zcu);
+ if (ptr_val.isUndef(pt.zcu)) {
+ const result_ty = ptr_val.typeOf(pt.zcu);
const result_ty_id = try self.resolveType(result_ty, .direct);
return self.spv.constUndef(result_ty_id);
}
@@ -1155,12 +1156,13 @@ const DeclGen = struct {
var arena = std.heap.ArenaAllocator.init(self.gpa);
defer arena.deinit();
- const derivation = try ptr_val.pointerDerivation(arena.allocator(), zcu);
+ const derivation = try ptr_val.pointerDerivation(arena.allocator(), pt);
return self.derivePtr(derivation);
}
fn derivePtr(self: *DeclGen, derivation: Value.PointerDeriveStep) Error!IdRef {
- const zcu = self.module;
+ const pt = self.pt;
+ const zcu = pt.zcu;
switch (derivation) {
.comptime_alloc_ptr, .comptime_field_ptr => unreachable,
.int => |int| {
@@ -1172,12 +1174,12 @@ const DeclGen = struct {
try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{
.id_result_type = result_ty_id,
.id_result = result_ptr_id,
- .integer_value = try self.constant(Type.usize, try zcu.intValue(Type.usize, int.addr), .direct),
+ .integer_value = try self.constant(Type.usize, try pt.intValue(Type.usize, int.addr), .direct),
});
return result_ptr_id;
},
.decl_ptr => |decl| {
- const result_ptr_ty = try zcu.declPtr(decl).declPtrType(zcu);
+ const result_ptr_ty = try zcu.declPtr(decl).declPtrType(pt);
return self.constantDeclRef(result_ptr_ty, decl);
},
.anon_decl_ptr => |ad| {
@@ -1188,18 +1190,18 @@ const DeclGen = struct {
.opt_payload_ptr => @panic("TODO"),
.field_ptr => |field| {
const parent_ptr_id = try self.derivePtr(field.parent.*);
- const parent_ptr_ty = try field.parent.ptrType(zcu);
+ const parent_ptr_ty = try field.parent.ptrType(pt);
return self.structFieldPtr(field.result_ptr_ty, parent_ptr_ty, parent_ptr_id, field.field_idx);
},
.elem_ptr => |elem| {
const parent_ptr_id = try self.derivePtr(elem.parent.*);
- const parent_ptr_ty = try elem.parent.ptrType(zcu);
+ const parent_ptr_ty = try elem.parent.ptrType(pt);
const index_id = try self.constInt(Type.usize, elem.elem_idx, .direct);
return self.ptrElemPtr(parent_ptr_ty, parent_ptr_id, index_id);
},
.offset_and_cast => |oac| {
const parent_ptr_id = try self.derivePtr(oac.parent.*);
- const parent_ptr_ty = try oac.parent.ptrType(zcu);
+ const parent_ptr_ty = try oac.parent.ptrType(pt);
disallow: {
if (oac.byte_offset != 0) break :disallow;
// Allow changing the pointer type child only to restructure arrays.
@@ -1218,8 +1220,8 @@ const DeclGen = struct {
return result_ptr_id;
}
return self.fail("Cannot perform pointer cast: '{}' to '{}'", .{
- parent_ptr_ty.fmt(zcu),
- oac.new_ptr_ty.fmt(zcu),
+ parent_ptr_ty.fmt(pt),
+ oac.new_ptr_ty.fmt(pt),
});
},
}
@@ -1232,7 +1234,8 @@ const DeclGen = struct {
) !IdRef {
// TODO: Merge this function with constantDeclRef.
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const ty_id = try self.resolveType(ty, .direct);
const decl_val = anon_decl.val;
@@ -1247,7 +1250,7 @@ const DeclGen = struct {
}
// const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn;
- if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
+ if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
// Pointer to nothing - return undefoined
return self.spv.constUndef(ty_id);
}
@@ -1276,7 +1279,8 @@ const DeclGen = struct {
}
fn constantDeclRef(self: *DeclGen, ty: Type, decl_index: InternPool.DeclIndex) !IdRef {
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_id = try self.resolveType(ty, .direct);
const decl = mod.declPtr(decl_index);
@@ -1290,7 +1294,7 @@ const DeclGen = struct {
else => {},
}
- if (!decl.typeOf(mod).isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
+ if (!decl.typeOf(mod).isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
// Pointer to nothing - return undefined.
return self.spv.constUndef(ty_id);
}
@@ -1331,7 +1335,7 @@ const DeclGen = struct {
fn resolveTypeName(self: *DeclGen, ty: Type) ![]const u8 {
var name = std.ArrayList(u8).init(self.gpa);
defer name.deinit();
- try ty.print(name.writer(), self.module);
+ try ty.print(name.writer(), self.pt);
return try name.toOwnedSlice();
}
@@ -1424,14 +1428,14 @@ const DeclGen = struct {
}
fn zigScalarOrVectorTypeLike(self: *DeclGen, new_ty: Type, base_ty: Type) !Type {
- const mod = self.module;
- const new_scalar_ty = new_ty.scalarType(mod);
- if (!base_ty.isVector(mod)) {
+ const pt = self.pt;
+ const new_scalar_ty = new_ty.scalarType(pt.zcu);
+ if (!base_ty.isVector(pt.zcu)) {
return new_scalar_ty;
}
- return try mod.vectorType(.{
- .len = base_ty.vectorLen(mod),
+ return try pt.vectorType(.{
+ .len = base_ty.vectorLen(pt.zcu),
.child = new_scalar_ty.toIntern(),
});
}
@@ -1455,7 +1459,7 @@ const DeclGen = struct {
/// }
/// If any of the fields' size is 0, it will be omitted.
fn resolveUnionType(self: *DeclGen, ty: Type) !IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
const ip = &mod.intern_pool;
const union_obj = mod.typeToUnion(ty).?;
@@ -1506,12 +1510,12 @@ const DeclGen = struct {
}
fn resolveFnReturnType(self: *DeclGen, ret_ty: Type) !IdRef {
- const mod = self.module;
- if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ const pt = self.pt;
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
// If the return type is an error set or an error union, then we make this
// anyerror return type instead, so that it can be coerced into a function
// pointer type which has anyerror as the return type.
- if (ret_ty.isError(mod)) {
+ if (ret_ty.isError(pt.zcu)) {
return self.resolveType(Type.anyerror, .direct);
} else {
return self.resolveType(Type.void, .direct);
@@ -1533,9 +1537,10 @@ const DeclGen = struct {
}
fn resolveTypeInner(self: *DeclGen, ty: Type, repr: Repr) Error!IdRef {
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
- log.debug("resolveType: ty = {}", .{ty.fmt(mod)});
+ log.debug("resolveType: ty = {}", .{ty.fmt(pt)});
const target = self.getTarget();
const section = &self.spv.sections.types_globals_constants;
@@ -1607,7 +1612,7 @@ const DeclGen = struct {
return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel(mod)});
};
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) {
// The size of the array would be 0, but that is not allowed in SPIR-V.
// This path can be reached when the backend is asked to generate a pointer to
// an array of some zero-bit type. This should always be an indirect path.
@@ -1655,7 +1660,7 @@ const DeclGen = struct {
var param_index: usize = 0;
for (fn_info.param_types.get(ip)) |param_ty_index| {
const param_ty = Type.fromInterned(param_ty_index);
- if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!param_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
param_ty_ids[param_index] = try self.resolveType(param_ty, .direct);
param_index += 1;
@@ -1713,7 +1718,7 @@ const DeclGen = struct {
var member_index: usize = 0;
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| {
- if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
+ if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
member_types[member_index] = try self.resolveType(Type.fromInterned(field_ty), .indirect);
member_index += 1;
@@ -1742,7 +1747,7 @@ const DeclGen = struct {
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) {
// This is a zero-bit field - we only needed it for the alignment.
continue;
}
@@ -1761,7 +1766,7 @@ const DeclGen = struct {
},
.Optional => {
const payload_ty = ty.optionalChild(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
// Just use a bool.
// Note: Always generate the bool with indirect format, to save on some sanity
// Perform the conversion to a direct bool when the field is extracted.
@@ -1878,14 +1883,14 @@ const DeclGen = struct {
};
fn errorUnionLayout(self: *DeclGen, payload_ty: Type) ErrorUnionLayout {
- const mod = self.module;
+ const pt = self.pt;
- const error_align = Type.anyerror.abiAlignment(mod);
- const payload_align = payload_ty.abiAlignment(mod);
+ const error_align = Type.anyerror.abiAlignment(pt);
+ const payload_align = payload_ty.abiAlignment(pt);
const error_first = error_align.compare(.gt, payload_align);
return .{
- .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod),
+ .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(pt),
.error_first = error_first,
};
}
@@ -1909,9 +1914,10 @@ const DeclGen = struct {
};
fn unionLayout(self: *DeclGen, ty: Type) UnionLayout {
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
- const layout = ty.unionGetLayout(self.module);
+ const layout = ty.unionGetLayout(pt);
const union_obj = mod.typeToUnion(ty).?;
var union_layout = UnionLayout{
@@ -1932,7 +1938,7 @@ const DeclGen = struct {
const most_aligned_field = layout.most_aligned_field;
const most_aligned_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[most_aligned_field]);
union_layout.payload_ty = most_aligned_field_ty;
- union_layout.payload_size = @intCast(most_aligned_field_ty.abiSize(mod));
+ union_layout.payload_size = @intCast(most_aligned_field_ty.abiSize(pt));
} else {
union_layout.payload_size = 0;
}
@@ -1999,7 +2005,7 @@ const DeclGen = struct {
}
fn materialize(self: Temporary, dg: *DeclGen) !IdResult {
- const mod = dg.module;
+ const mod = dg.pt.zcu;
switch (self.value) {
.singleton => |id| return id,
.exploded_vector => |range| {
@@ -2029,12 +2035,12 @@ const DeclGen = struct {
/// 'Explode' a temporary into separate elements. This turns a vector
/// into a bag of elements.
fn explode(self: Temporary, dg: *DeclGen) !IdRange {
- const mod = dg.module;
+ const mod = dg.pt.zcu;
// If the value is a scalar, then this is a no-op.
if (!self.ty.isVector(mod)) {
return switch (self.value) {
- .singleton => |id| IdRange{ .base = @intFromEnum(id), .len = 1 },
+ .singleton => |id| .{ .base = @intFromEnum(id), .len = 1 },
.exploded_vector => |range| range,
};
}
@@ -2088,7 +2094,7 @@ const DeclGen = struct {
/// only checks the size, but the source-of-truth is implemented
/// by `isSpvVector()`.
fn fromType(ty: Type, dg: *DeclGen) Vectorization {
- const mod = dg.module;
+ const mod = dg.pt.zcu;
if (!ty.isVector(mod)) {
return .scalar;
} else if (dg.isSpvVector(ty)) {
@@ -2164,11 +2170,11 @@ const DeclGen = struct {
/// Turns `ty` into the result-type of an individual vector operation.
/// `ty` may be a scalar or vector, it doesn't matter.
fn operationType(self: Vectorization, dg: *DeclGen, ty: Type) !Type {
- const mod = dg.module;
- const scalar_ty = ty.scalarType(mod);
+ const pt = dg.pt;
+ const scalar_ty = ty.scalarType(pt.zcu);
return switch (self) {
.scalar, .unrolled => scalar_ty,
- .spv_vectorized => |n| try mod.vectorType(.{
+ .spv_vectorized => |n| try pt.vectorType(.{
.len = n,
.child = scalar_ty.toIntern(),
}),
@@ -2178,11 +2184,11 @@ const DeclGen = struct {
/// Turns `ty` into the result-type of the entire operation.
/// `ty` may be a scalar or vector, it doesn't matter.
fn resultType(self: Vectorization, dg: *DeclGen, ty: Type) !Type {
- const mod = dg.module;
- const scalar_ty = ty.scalarType(mod);
+ const pt = dg.pt;
+ const scalar_ty = ty.scalarType(pt.zcu);
return switch (self) {
.scalar => scalar_ty,
- .unrolled, .spv_vectorized => |n| try mod.vectorType(.{
+ .unrolled, .spv_vectorized => |n| try pt.vectorType(.{
.len = n,
.child = scalar_ty.toIntern(),
}),
@@ -2193,8 +2199,8 @@ const DeclGen = struct {
/// this setup, and returns a new type that holds the relevant information on how to access
/// elements of the input.
fn prepare(self: Vectorization, dg: *DeclGen, tmp: Temporary) !PreparedOperand {
- const mod = dg.module;
- const is_vector = tmp.ty.isVector(mod);
+ const pt = dg.pt;
+ const is_vector = tmp.ty.isVector(pt.zcu);
const is_spv_vector = dg.isSpvVector(tmp.ty);
const value: PreparedOperand.Value = switch (tmp.value) {
.singleton => |id| switch (self) {
@@ -2209,7 +2215,7 @@ const DeclGen = struct {
}
// Broadcast scalar into vector.
- const vector_ty = try mod.vectorType(.{
+ const vector_ty = try pt.vectorType(.{
.len = self.components(),
.child = tmp.ty.toIntern(),
});
@@ -2340,7 +2346,7 @@ const DeclGen = struct {
/// This function builds an OpSConvert of OpUConvert depending on the
/// signedness of the types.
fn buildIntConvert(self: *DeclGen, dst_ty: Type, src: Temporary) !Temporary {
- const mod = self.module;
+ const mod = self.pt.zcu;
const dst_ty_id = try self.resolveType(dst_ty.scalarType(mod), .direct);
const src_ty_id = try self.resolveType(src.ty.scalarType(mod), .direct);
@@ -2419,7 +2425,7 @@ const DeclGen = struct {
}
fn buildSelect(self: *DeclGen, condition: Temporary, lhs: Temporary, rhs: Temporary) !Temporary {
- const mod = self.module;
+ const mod = self.pt.zcu;
const v = self.vectorization(.{ condition, lhs, rhs });
const ops = v.operations();
@@ -2764,7 +2770,8 @@ const DeclGen = struct {
lhs: Temporary,
rhs: Temporary,
) !struct { Temporary, Temporary } {
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const target = self.getTarget();
const ip = &mod.intern_pool;
@@ -2814,7 +2821,7 @@ const DeclGen = struct {
// where T is maybe vectorized.
const types = [2]InternPool.Index{ arith_op_ty.toIntern(), arith_op_ty.toIntern() };
const values = [2]InternPool.Index{ .none, .none };
- const index = try ip.getAnonStructType(mod.gpa, .{
+ const index = try ip.getAnonStructType(mod.gpa, pt.tid, .{
.types = &types,
.values = &values,
.names = &.{},
@@ -2888,7 +2895,7 @@ const DeclGen = struct {
/// the name of an error in the text executor.
fn generateTestEntryPoint(self: *DeclGen, name: []const u8, spv_test_decl_index: SpvModule.Decl.Index) !void {
const anyerror_ty_id = try self.resolveType(Type.anyerror, .direct);
- const ptr_anyerror_ty = try self.module.ptrType(.{
+ const ptr_anyerror_ty = try self.pt.ptrType(.{
.child = Type.anyerror.toIntern(),
.flags = .{ .address_space = .global },
});
@@ -2940,7 +2947,8 @@ const DeclGen = struct {
}
fn genDecl(self: *DeclGen) !void {
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const decl = mod.declPtr(self.decl_index);
const spv_decl_index = try self.object.resolveDecl(mod, self.decl_index);
@@ -2967,7 +2975,7 @@ const DeclGen = struct {
try self.args.ensureUnusedCapacity(self.gpa, fn_info.param_types.len);
for (fn_info.param_types.get(ip)) |param_ty_index| {
const param_ty = Type.fromInterned(param_ty_index);
- if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!param_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
const param_type_id = try self.resolveType(param_ty, .direct);
const arg_result_id = self.spv.allocId();
@@ -3004,11 +3012,11 @@ const DeclGen = struct {
// Append the actual code into the functions section.
try self.spv.addFunction(spv_decl_index, self.func);
- const fqn = try decl.fullyQualifiedName(self.module);
+ const fqn = try decl.fullyQualifiedName(self.pt.zcu);
try self.spv.debugName(result_id, fqn.toSlice(ip));
// Temporarily generate a test kernel declaration if this is a test function.
- if (self.module.test_functions.contains(self.decl_index)) {
+ if (self.pt.zcu.test_functions.contains(self.decl_index)) {
try self.generateTestEntryPoint(fqn.toSlice(ip), spv_decl_index);
}
},
@@ -3033,7 +3041,7 @@ const DeclGen = struct {
.storage_class = final_storage_class,
});
- const fqn = try decl.fullyQualifiedName(self.module);
+ const fqn = try decl.fullyQualifiedName(self.pt.zcu);
try self.spv.debugName(result_id, fqn.toSlice(ip));
try self.spv.declareDeclDeps(spv_decl_index, &.{});
},
@@ -3078,7 +3086,7 @@ const DeclGen = struct {
try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {});
try self.spv.addFunction(spv_decl_index, self.func);
- const fqn = try decl.fullyQualifiedName(self.module);
+ const fqn = try decl.fullyQualifiedName(self.pt.zcu);
try self.spv.debugNameFmt(initializer_id, "initializer of {}", .{fqn.fmt(ip)});
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{
@@ -3119,7 +3127,7 @@ const DeclGen = struct {
/// Convert representation from indirect (in memory) to direct (in 'register')
/// This converts the argument type from resolveType(ty, .indirect) to resolveType(ty, .direct).
fn convertToDirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
switch (ty.scalarType(mod).zigTypeTag(mod)) {
.Bool => {
const false_id = try self.constBool(false, .indirect);
@@ -3145,7 +3153,7 @@ const DeclGen = struct {
/// Convert representation from direct (in 'register) to direct (in memory)
/// This converts the argument type from resolveType(ty, .direct) to resolveType(ty, .indirect).
fn convertToIndirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
switch (ty.scalarType(mod).zigTypeTag(mod)) {
.Bool => {
const result = try self.intFromBool(Temporary.init(ty, operand_id));
@@ -3222,7 +3230,7 @@ const DeclGen = struct {
}
fn genInst(self: *DeclGen, inst: Air.Inst.Index) !void {
- const mod = self.module;
+ const mod = self.pt.zcu;
const ip = &mod.intern_pool;
if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip))
return;
@@ -3402,7 +3410,7 @@ const DeclGen = struct {
}
fn airShift(self: *DeclGen, inst: Air.Inst.Index, unsigned: BinaryOp, signed: BinaryOp) !?IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const base = try self.temporary(bin_op.lhs);
@@ -3480,7 +3488,7 @@ const DeclGen = struct {
/// All other values are returned unmodified (this makes strange integer
/// wrapping easier to use in generic operations).
fn normalize(self: *DeclGen, value: Temporary, info: ArithmeticTypeInfo) !Temporary {
- const mod = self.module;
+ const mod = self.pt.zcu;
const ty = value.ty;
switch (info.class) {
.integer, .bool, .float => return value,
@@ -3721,7 +3729,7 @@ const DeclGen = struct {
fn airMulOverflow(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
const target = self.getTarget();
- const mod = self.module;
+ const pt = self.pt;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -3758,7 +3766,7 @@ const DeclGen = struct {
const result, const overflowed = switch (info.signedness) {
.unsigned => blk: {
if (maybe_op_ty_bits) |op_ty_bits| {
- const op_ty = try mod.intType(.unsigned, op_ty_bits);
+ const op_ty = try pt.intType(.unsigned, op_ty_bits);
const casted_lhs = try self.buildIntConvert(op_ty, lhs);
const casted_rhs = try self.buildIntConvert(op_ty, rhs);
@@ -3828,7 +3836,7 @@ const DeclGen = struct {
);
if (maybe_op_ty_bits) |op_ty_bits| {
- const op_ty = try mod.intType(.signed, op_ty_bits);
+ const op_ty = try pt.intType(.signed, op_ty_bits);
// Assume normalized; sign bit is set. We want a sign extend.
const casted_lhs = try self.buildIntConvert(op_ty, lhs);
const casted_rhs = try self.buildIntConvert(op_ty, rhs);
@@ -3900,7 +3908,7 @@ const DeclGen = struct {
}
fn airShlOverflow(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -3958,7 +3966,7 @@ const DeclGen = struct {
fn airClzCtz(self: *DeclGen, inst: Air.Inst.Index, op: UnaryOp) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
- const mod = self.module;
+ const mod = self.pt.zcu;
const target = self.getTarget();
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.temporary(ty_op.operand);
@@ -4007,7 +4015,7 @@ const DeclGen = struct {
}
fn airReduce(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce;
const operand = try self.resolve(reduce.operand);
const operand_ty = self.typeOf(reduce.operand);
@@ -4082,7 +4090,8 @@ const DeclGen = struct {
}
fn airShuffle(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
const a = try self.resolve(extra.a);
@@ -4108,14 +4117,14 @@ const DeclGen = struct {
const a_len = a_ty.vectorLen(mod);
for (components, 0..) |*component, i| {
- const elem = try mask.elemValue(mod, i);
+ const elem = try mask.elemValue(pt, i);
if (elem.isUndef(mod)) {
// This is explicitly valid for OpVectorShuffle, it indicates undefined.
component.* = 0xFFFF_FFFF;
continue;
}
- const index = elem.toSignedInt(mod);
+ const index = elem.toSignedInt(pt);
if (index >= 0) {
component.* = @intCast(index);
} else {
@@ -4140,13 +4149,13 @@ const DeclGen = struct {
defer self.gpa.free(components);
for (components, 0..) |*id, i| {
- const elem = try mask.elemValue(mod, i);
+ const elem = try mask.elemValue(pt, i);
if (elem.isUndef(mod)) {
id.* = try self.spv.constUndef(scalar_ty_id);
continue;
}
- const index = elem.toSignedInt(mod);
+ const index = elem.toSignedInt(pt);
if (index >= 0) {
id.* = try self.extractVectorComponent(scalar_ty, a, @intCast(index));
} else {
@@ -4220,7 +4229,7 @@ const DeclGen = struct {
}
fn ptrAdd(self: *DeclGen, result_ty: Type, ptr_ty: Type, ptr_id: IdRef, offset_id: IdRef) !IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
const result_ty_id = try self.resolveType(result_ty, .direct);
switch (ptr_ty.ptrSize(mod)) {
@@ -4276,7 +4285,8 @@ const DeclGen = struct {
lhs: Temporary,
rhs: Temporary,
) !Temporary {
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const scalar_ty = lhs.ty.scalarType(mod);
const is_vector = lhs.ty.isVector(mod);
@@ -4324,7 +4334,7 @@ const DeclGen = struct {
const payload_ty = ty.optionalChild(mod);
if (ty.optionalReprIsPayload(mod)) {
- assert(payload_ty.hasRuntimeBitsIgnoreComptime(mod));
+ assert(payload_ty.hasRuntimeBitsIgnoreComptime(pt));
assert(!payload_ty.isSlice(mod));
return try self.cmp(op, lhs.pun(payload_ty), rhs.pun(payload_ty));
@@ -4333,12 +4343,12 @@ const DeclGen = struct {
const lhs_id = try lhs.materialize(self);
const rhs_id = try rhs.materialize(self);
- const lhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(mod))
+ const lhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(pt))
try self.extractField(Type.bool, lhs_id, 1)
else
try self.convertToDirect(Type.bool, lhs_id);
- const rhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(mod))
+ const rhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(pt))
try self.extractField(Type.bool, rhs_id, 1)
else
try self.convertToDirect(Type.bool, rhs_id);
@@ -4346,7 +4356,7 @@ const DeclGen = struct {
const lhs_valid = Temporary.init(Type.bool, lhs_valid_id);
const rhs_valid = Temporary.init(Type.bool, rhs_valid_id);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
return try self.cmp(op, lhs_valid, rhs_valid);
}
@@ -4466,7 +4476,7 @@ const DeclGen = struct {
src_ty: Type,
src_id: IdRef,
) !IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
const src_ty_id = try self.resolveType(src_ty, .direct);
const dst_ty_id = try self.resolveType(dst_ty, .direct);
@@ -4675,7 +4685,8 @@ const DeclGen = struct {
}
fn airArrayToSlice(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const array_ptr_ty = self.typeOf(ty_op.operand);
const array_ty = array_ptr_ty.childType(mod);
@@ -4687,7 +4698,7 @@ const DeclGen = struct {
const array_ptr_id = try self.resolve(ty_op.operand);
const len_id = try self.constInt(Type.usize, array_ty.arrayLen(mod), .direct);
- const elem_ptr_id = if (!array_ty.hasRuntimeBitsIgnoreComptime(mod))
+ const elem_ptr_id = if (!array_ty.hasRuntimeBitsIgnoreComptime(pt))
// Note: The pointer is something like *opaque{}, so we need to bitcast it to the element type.
try self.bitCast(elem_ptr_ty, array_ptr_ty, array_ptr_id)
else
@@ -4719,7 +4730,8 @@ const DeclGen = struct {
}
fn airAggregateInit(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const result_ty = self.typeOfIndex(inst);
@@ -4742,8 +4754,8 @@ const DeclGen = struct {
switch (ip.indexToKey(result_ty.toIntern())) {
.anon_struct_type => |tuple| {
for (tuple.types.get(ip), elements, 0..) |field_ty, element, i| {
- if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue;
- assert(Type.fromInterned(field_ty).hasRuntimeBits(mod));
+ if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue;
+ assert(Type.fromInterned(field_ty).hasRuntimeBits(pt));
const id = try self.resolve(element);
types[index] = Type.fromInterned(field_ty);
@@ -4756,9 +4768,9 @@ const DeclGen = struct {
var it = struct_type.iterateRuntimeOrder(ip);
for (elements, 0..) |element, i| {
const field_index = it.next().?;
- if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue;
+ if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue;
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
- assert(field_ty.hasRuntimeBitsIgnoreComptime(mod));
+ assert(field_ty.hasRuntimeBitsIgnoreComptime(pt));
const id = try self.resolve(element);
types[index] = field_ty;
@@ -4808,13 +4820,14 @@ const DeclGen = struct {
}
fn sliceOrArrayLen(self: *DeclGen, operand_id: IdRef, ty: Type) !IdRef {
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
switch (ty.ptrSize(mod)) {
.Slice => return self.extractField(Type.usize, operand_id, 1),
.One => {
const array_ty = ty.childType(mod);
const elem_ty = array_ty.childType(mod);
- const abi_size = elem_ty.abiSize(mod);
+ const abi_size = elem_ty.abiSize(pt);
const size = array_ty.arrayLenIncludingSentinel(mod) * abi_size;
return try self.constInt(Type.usize, size, .direct);
},
@@ -4823,7 +4836,7 @@ const DeclGen = struct {
}
fn sliceOrArrayPtr(self: *DeclGen, operand_id: IdRef, ty: Type) !IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
if (ty.isSlice(mod)) {
const ptr_ty = ty.slicePtrFieldType(mod);
return self.extractField(ptr_ty, operand_id, 0);
@@ -4855,7 +4868,7 @@ const DeclGen = struct {
}
fn airSliceElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const slice_ty = self.typeOf(bin_op.lhs);
@@ -4872,7 +4885,7 @@ const DeclGen = struct {
}
fn airSliceElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const slice_ty = self.typeOf(bin_op.lhs);
if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null;
@@ -4889,7 +4902,7 @@ const DeclGen = struct {
}
fn ptrElemPtr(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef, index_id: IdRef) !IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
// Construct new pointer type for the resulting pointer
const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T.
const elem_ptr_ty_id = try self.ptrType(elem_ty, self.spvStorageClass(ptr_ty.ptrAddressSpace(mod)));
@@ -4904,14 +4917,15 @@ const DeclGen = struct {
}
fn airPtrElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const src_ptr_ty = self.typeOf(bin_op.lhs);
const elem_ty = src_ptr_ty.childType(mod);
const ptr_id = try self.resolve(bin_op.lhs);
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) {
const dst_ptr_ty = self.typeOfIndex(inst);
return try self.bitCast(dst_ptr_ty, src_ptr_ty, ptr_id);
}
@@ -4921,7 +4935,7 @@ const DeclGen = struct {
}
fn airArrayElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const array_ty = self.typeOf(bin_op.lhs);
const elem_ty = array_ty.childType(mod);
@@ -4982,7 +4996,7 @@ const DeclGen = struct {
}
fn airPtrElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = self.typeOf(bin_op.lhs);
const elem_ty = self.typeOfIndex(inst);
@@ -4993,7 +5007,7 @@ const DeclGen = struct {
}
fn airVectorStoreElem(self: *DeclGen, inst: Air.Inst.Index) !void {
- const mod = self.module;
+ const mod = self.pt.zcu;
const data = self.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem;
const extra = self.air.extraData(Air.Bin, data.payload).data;
@@ -5015,7 +5029,7 @@ const DeclGen = struct {
}
fn airSetUnionTag(self: *DeclGen, inst: Air.Inst.Index) !void {
- const mod = self.module;
+ const mod = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const un_ptr_ty = self.typeOf(bin_op.lhs);
const un_ty = un_ptr_ty.childType(mod);
@@ -5041,7 +5055,7 @@ const DeclGen = struct {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const un_ty = self.typeOf(ty_op.operand);
- const mod = self.module;
+ const mod = self.pt.zcu;
const layout = self.unionLayout(un_ty);
if (layout.tag_size == 0) return null;
@@ -5064,7 +5078,8 @@ const DeclGen = struct {
// Note: The result here is not cached, because it generates runtime code.
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const union_ty = mod.typeToUnion(ty).?;
const tag_ty = Type.fromInterned(union_ty.enum_tag_ty);
@@ -5076,9 +5091,9 @@ const DeclGen = struct {
const layout = self.unionLayout(ty);
const tag_int = if (layout.tag_size != 0) blk: {
- const tag_val = try mod.enumValueFieldIndex(tag_ty, active_field);
- const tag_int_val = try tag_val.intFromEnum(tag_ty, mod);
- break :blk tag_int_val.toUnsignedInt(mod);
+ const tag_val = try pt.enumValueFieldIndex(tag_ty, active_field);
+ const tag_int_val = try tag_val.intFromEnum(tag_ty, pt);
+ break :blk tag_int_val.toUnsignedInt(pt);
} else 0;
if (!layout.has_payload) {
@@ -5095,7 +5110,7 @@ const DeclGen = struct {
}
const payload_ty = Type.fromInterned(union_ty.field_types.get(ip)[active_field]);
- if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, .Function);
const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, tmp_id, &.{layout.payload_index});
const active_pl_ptr_ty_id = try self.ptrType(payload_ty, .Function);
@@ -5118,7 +5133,8 @@ const DeclGen = struct {
}
fn airUnionInit(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
@@ -5126,7 +5142,7 @@ const DeclGen = struct {
const union_obj = mod.typeToUnion(ty).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
- const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(mod))
+ const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(pt))
try self.resolve(extra.init)
else
null;
@@ -5134,7 +5150,8 @@ const DeclGen = struct {
}
fn airStructFieldVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
@@ -5143,7 +5160,7 @@ const DeclGen = struct {
const field_index = struct_field.field_index;
const field_ty = object_ty.structFieldType(field_index, mod);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) return null;
switch (object_ty.zigTypeTag(mod)) {
.Struct => switch (object_ty.containerLayout(mod)) {
@@ -5178,7 +5195,8 @@ const DeclGen = struct {
}
fn airFieldParentPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
@@ -5187,7 +5205,7 @@ const DeclGen = struct {
const field_ptr = try self.resolve(extra.field_ptr);
const field_ptr_int = try self.intFromPtr(field_ptr);
- const field_offset = parent_ty.structFieldOffset(extra.field_index, mod);
+ const field_offset = parent_ty.structFieldOffset(extra.field_index, pt);
const base_ptr_int = base_ptr_int: {
if (field_offset == 0) break :base_ptr_int field_ptr_int;
@@ -5218,7 +5236,7 @@ const DeclGen = struct {
) !IdRef {
const result_ty_id = try self.resolveType(result_ptr_ty, .direct);
- const zcu = self.module;
+ const zcu = self.pt.zcu;
const object_ty = object_ptr_ty.childType(zcu);
switch (object_ty.zigTypeTag(zcu)) {
.Pointer => {
@@ -5312,7 +5330,7 @@ const DeclGen = struct {
}
fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
const ptr_ty = self.typeOfIndex(inst);
assert(ptr_ty.ptrAddressSpace(mod) == .generic);
const child_ty = ptr_ty.childType(mod);
@@ -5486,9 +5504,10 @@ const DeclGen = struct {
// of the block, then a label, and then generate the rest of the current
// ir.Block in a different SPIR-V block.
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty = self.typeOfIndex(inst);
- const have_block_result = ty.isFnOrHasRuntimeBitsIgnoreComptime(mod);
+ const have_block_result = ty.isFnOrHasRuntimeBitsIgnoreComptime(pt);
const cf = switch (self.control_flow) {
.structured => |*cf| cf,
@@ -5618,13 +5637,13 @@ const DeclGen = struct {
}
fn airBr(self: *DeclGen, inst: Air.Inst.Index) !void {
- const mod = self.module;
+ const pt = self.pt;
const br = self.air.instructions.items(.data)[@intFromEnum(inst)].br;
const operand_ty = self.typeOf(br.operand);
switch (self.control_flow) {
.structured => |*cf| {
- if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
+ if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
const operand_id = try self.resolve(br.operand);
const block_result_var_id = cf.block_results.get(br.block_inst).?;
try self.store(operand_ty, block_result_var_id, operand_id, .{});
@@ -5635,7 +5654,7 @@ const DeclGen = struct {
},
.unstructured => |cf| {
const block = cf.blocks.get(br.block_inst).?;
- if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
+ if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
const operand_id = try self.resolve(br.operand);
// current_block_label should not be undefined here, lest there
// is a br or br_void in the function's body.
@@ -5762,7 +5781,7 @@ const DeclGen = struct {
}
fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const ptr_ty = self.typeOf(ty_op.operand);
const elem_ty = self.typeOfIndex(inst);
@@ -5773,20 +5792,22 @@ const DeclGen = struct {
}
fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const mod = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = self.typeOf(bin_op.lhs);
- const elem_ty = ptr_ty.childType(self.module);
+ const elem_ty = ptr_ty.childType(mod);
const ptr = try self.resolve(bin_op.lhs);
const value = try self.resolve(bin_op.rhs);
- try self.store(elem_ty, ptr, value, .{ .is_volatile = ptr_ty.isVolatilePtr(self.module) });
+ try self.store(elem_ty, ptr, value, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) });
}
fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const pt = self.pt;
+ const mod = pt.zcu;
const operand = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const ret_ty = self.typeOf(operand);
- const mod = self.module;
- if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
const decl = mod.declPtr(self.decl_index);
const fn_info = mod.typeToFunc(decl.typeOf(mod)).?;
if (Type.fromInterned(fn_info.return_type).isError(mod)) {
@@ -5805,12 +5826,13 @@ const DeclGen = struct {
}
fn airRetLoad(self: *DeclGen, inst: Air.Inst.Index) !void {
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const ptr_ty = self.typeOf(un_op);
const ret_ty = ptr_ty.childType(mod);
- if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
const decl = mod.declPtr(self.decl_index);
const fn_info = mod.typeToFunc(decl.typeOf(mod)).?;
if (Type.fromInterned(fn_info.return_type).isError(mod)) {
@@ -5832,7 +5854,7 @@ const DeclGen = struct {
}
fn airTry(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const err_union_id = try self.resolve(pl_op.operand);
const extra = self.air.extraData(Air.Try, pl_op.payload);
@@ -5902,7 +5924,7 @@ const DeclGen = struct {
}
fn airErrUnionErr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_id = try self.resolve(ty_op.operand);
const err_union_ty = self.typeOf(ty_op.operand);
@@ -5938,7 +5960,7 @@ const DeclGen = struct {
}
fn airWrapErrUnionErr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const err_union_ty = self.typeOfIndex(inst);
const payload_ty = err_union_ty.errorUnionPayload(mod);
@@ -5985,7 +6007,8 @@ const DeclGen = struct {
}
fn airIsNull(self: *DeclGen, inst: Air.Inst.Index, is_pointer: bool, pred: enum { is_null, is_non_null }) !?IdRef {
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand_id = try self.resolve(un_op);
const operand_ty = self.typeOf(un_op);
@@ -6026,7 +6049,7 @@ const DeclGen = struct {
const is_non_null_id = blk: {
if (is_pointer) {
- if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
const storage_class = self.spvStorageClass(operand_ty.ptrAddressSpace(mod));
const bool_ptr_ty_id = try self.ptrType(Type.bool, storage_class);
const tag_ptr_id = try self.accessChain(bool_ptr_ty_id, operand_id, &.{1});
@@ -6036,7 +6059,7 @@ const DeclGen = struct {
break :blk try self.load(Type.bool, operand_id, .{});
}
- break :blk if (payload_ty.hasRuntimeBitsIgnoreComptime(mod))
+ break :blk if (payload_ty.hasRuntimeBitsIgnoreComptime(pt))
try self.extractField(Type.bool, operand_id, 1)
else
// Optional representation is bool indicating whether the optional is set
@@ -6061,7 +6084,7 @@ const DeclGen = struct {
}
fn airIsErr(self: *DeclGen, inst: Air.Inst.Index, pred: enum { is_err, is_non_err }) !?IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand_id = try self.resolve(un_op);
const err_union_ty = self.typeOf(un_op);
@@ -6094,13 +6117,14 @@ const DeclGen = struct {
}
fn airUnwrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_id = try self.resolve(ty_op.operand);
const optional_ty = self.typeOf(ty_op.operand);
const payload_ty = self.typeOfIndex(inst);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return null;
if (optional_ty.optionalReprIsPayload(mod)) {
return operand_id;
@@ -6110,7 +6134,8 @@ const DeclGen = struct {
}
fn airUnwrapOptionalPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_id = try self.resolve(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
@@ -6119,7 +6144,7 @@ const DeclGen = struct {
const result_ty = self.typeOfIndex(inst);
const result_ty_id = try self.resolveType(result_ty, .direct);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
// There is no payload, but we still need to return a valid pointer.
// We can just return anything here, so just return a pointer to the operand.
return try self.bitCast(result_ty, operand_ty, operand_id);
@@ -6134,11 +6159,12 @@ const DeclGen = struct {
}
fn airWrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const payload_ty = self.typeOf(ty_op.operand);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
return try self.constBool(true, .indirect);
}
@@ -6156,7 +6182,8 @@ const DeclGen = struct {
}
fn airSwitchBr(self: *DeclGen, inst: Air.Inst.Index) !void {
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const target = self.getTarget();
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const cond_ty = self.typeOf(pl_op.operand);
@@ -6240,15 +6267,15 @@ const DeclGen = struct {
const label = case_labels.at(case_i);
for (items) |item| {
- const value = (try self.air.value(item, mod)) orelse unreachable;
+ const value = (try self.air.value(item, pt)) orelse unreachable;
const int_val: u64 = switch (cond_ty.zigTypeTag(mod)) {
- .Bool, .Int => if (cond_ty.isSignedInt(mod)) @bitCast(value.toSignedInt(mod)) else value.toUnsignedInt(mod),
+ .Bool, .Int => if (cond_ty.isSignedInt(mod)) @bitCast(value.toSignedInt(pt)) else value.toUnsignedInt(pt),
.Enum => blk: {
// TODO: figure out of cond_ty is correct (something with enum literals)
- break :blk (try value.intFromEnum(cond_ty, mod)).toUnsignedInt(mod); // TODO: composite integer constants
+ break :blk (try value.intFromEnum(cond_ty, pt)).toUnsignedInt(pt); // TODO: composite integer constants
},
.ErrorSet => value.getErrorInt(mod),
- .Pointer => value.toUnsignedInt(mod),
+ .Pointer => value.toUnsignedInt(pt),
else => unreachable,
};
const int_lit: spec.LiteralContextDependentNumber = switch (cond_words) {
@@ -6328,8 +6355,9 @@ const DeclGen = struct {
}
fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const pt = self.pt;
+ const mod = pt.zcu;
const dbg_stmt = self.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt;
- const mod = self.module;
const decl = mod.declPtr(self.decl_index);
const path = decl.getFileScope(mod).sub_file_path;
try self.func.body.emit(self.spv.gpa, .OpLine, .{
@@ -6340,7 +6368,7 @@ const DeclGen = struct {
}
fn airDbgInlineBlock(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
const inst_datas = self.air.instructions.items(.data);
const extra = self.air.extraData(Air.DbgInlineBlock, inst_datas[@intFromEnum(inst)].ty_pl.payload);
const decl = mod.funcOwnerDeclPtr(extra.data.func);
@@ -6358,7 +6386,7 @@ const DeclGen = struct {
}
fn airAssembly(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.module;
+ const mod = self.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
@@ -6440,20 +6468,20 @@ const DeclGen = struct {
// TODO: Translate proper error locations.
assert(as.errors.items.len != 0);
assert(self.error_msg == null);
- const src_loc = self.module.declPtr(self.decl_index).navSrcLoc(mod);
- self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{});
- const notes = try self.module.gpa.alloc(Module.ErrorMsg, as.errors.items.len);
+ const src_loc = mod.declPtr(self.decl_index).navSrcLoc(mod);
+ self.error_msg = try Zcu.ErrorMsg.create(mod.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{});
+ const notes = try mod.gpa.alloc(Zcu.ErrorMsg, as.errors.items.len);
// Sub-scope to prevent `return error.CodegenFail` from running the errdefers.
{
- errdefer self.module.gpa.free(notes);
+ errdefer mod.gpa.free(notes);
var i: usize = 0;
errdefer for (notes[0..i]) |*note| {
- note.deinit(self.module.gpa);
+ note.deinit(mod.gpa);
};
while (i < as.errors.items.len) : (i += 1) {
- notes[i] = try Module.ErrorMsg.init(self.module.gpa, src_loc, "{s}", .{as.errors.items[i].msg});
+ notes[i] = try Zcu.ErrorMsg.init(mod.gpa, src_loc, "{s}", .{as.errors.items[i].msg});
}
}
self.error_msg.?.notes = notes;
@@ -6489,7 +6517,8 @@ const DeclGen = struct {
fn airCall(self: *DeclGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !?IdRef {
_ = modifier;
- const mod = self.module;
+ const pt = self.pt;
+ const mod = pt.zcu;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]);
@@ -6515,7 +6544,7 @@ const DeclGen = struct {
// before starting to emit OpFunctionCall instructions. Hence the
// temporary params buffer.
const arg_ty = self.typeOf(arg);
- if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!arg_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
const arg_id = try self.resolve(arg);
params[n_params] = arg_id;
@@ -6533,7 +6562,7 @@ const DeclGen = struct {
try self.func.body.emit(self.spv.gpa, .OpUnreachable, {});
}
- if (self.liveness.isUnused(inst) or !Type.fromInterned(return_type).hasRuntimeBitsIgnoreComptime(mod)) {
+ if (self.liveness.isUnused(inst) or !Type.fromInterned(return_type).hasRuntimeBitsIgnoreComptime(pt)) {
return null;
}
@@ -6541,11 +6570,10 @@ const DeclGen = struct {
}
fn builtin3D(self: *DeclGen, result_ty: Type, builtin: spec.BuiltIn, dimension: u32, out_of_range_value: anytype) !IdRef {
- const mod = self.module;
if (dimension >= 3) {
return try self.constInt(result_ty, out_of_range_value, .direct);
}
- const vec_ty = try mod.vectorType(.{
+ const vec_ty = try self.pt.vectorType(.{
.len = 3,
.child = result_ty.toIntern(),
});
@@ -6591,12 +6619,12 @@ const DeclGen = struct {
}
fn typeOf(self: *DeclGen, inst: Air.Inst.Ref) Type {
- const mod = self.module;
+ const mod = self.pt.zcu;
return self.air.typeOf(inst, &mod.intern_pool);
}
fn typeOfIndex(self: *DeclGen, inst: Air.Inst.Index) Type {
- const mod = self.module;
+ const mod = self.pt.zcu;
return self.air.typeOfIndex(inst, &mod.intern_pool);
}
};
src/link/Coff/lld.zig
@@ -15,8 +15,9 @@ const Allocator = mem.Allocator;
const Coff = @import("../Coff.zig");
const Compilation = @import("../../Compilation.zig");
+const Zcu = @import("../../Zcu.zig");
-pub fn linkWithLLD(self: *Coff, arena: Allocator, prog_node: std.Progress.Node) !void {
+pub fn linkWithLLD(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -29,7 +30,7 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, prog_node: std.Progress.Node)
// If there is no Zig code to compile, then we should skip flushing the output file because it
// will not be part of the linker line anyway.
const module_obj_path: ?[]const u8 = if (comp.module != null) blk: {
- try self.flushModule(arena, prog_node);
+ try self.flushModule(arena, tid, prog_node);
if (fs.path.dirname(full_out_path)) |dirname| {
break :blk try fs.path.join(arena, &.{ dirname, self.base.zcu_object_sub_path.? });
src/link/Elf/ZigObject.zig
@@ -158,16 +158,17 @@ pub fn deinit(self: *ZigObject, allocator: Allocator) void {
}
}
-pub fn flushModule(self: *ZigObject, elf_file: *Elf) !void {
+pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !void {
// Handle any lazy symbols that were emitted by incremental compilation.
if (self.lazy_syms.getPtr(.none)) |metadata| {
- const zcu = elf_file.base.comp.module.?;
+ const pt: Zcu.PerThread = .{ .zcu = elf_file.base.comp.module.?, .tid = tid };
// Most lazy symbols can be updated on first use, but
// anyerror needs to wait for everything to be flushed.
if (metadata.text_state != .unused) self.updateLazySymbol(
elf_file,
- link.File.LazySymbol.initDecl(.code, null, zcu),
+ pt,
+ link.File.LazySymbol.initDecl(.code, null, pt.zcu),
metadata.text_symbol_index,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
@@ -175,7 +176,8 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf) !void {
};
if (metadata.rodata_state != .unused) self.updateLazySymbol(
elf_file,
- link.File.LazySymbol.initDecl(.const_data, null, zcu),
+ pt,
+ link.File.LazySymbol.initDecl(.const_data, null, pt.zcu),
metadata.rodata_symbol_index,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
@@ -188,8 +190,8 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf) !void {
}
if (self.dwarf) |*dw| {
- const zcu = elf_file.base.comp.module.?;
- try dw.flushModule(zcu);
+ const pt: Zcu.PerThread = .{ .zcu = elf_file.base.comp.module.?, .tid = tid };
+ try dw.flushModule(pt);
// TODO I need to re-think how to handle ZigObject's debug sections AND debug sections
// extracted from input object files correctly.
@@ -202,7 +204,7 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf) !void {
const text_shdr = elf_file.shdrs.items[elf_file.zig_text_section_index.?];
const low_pc = text_shdr.sh_addr;
const high_pc = text_shdr.sh_addr + text_shdr.sh_size;
- try dw.writeDbgInfoHeader(zcu, low_pc, high_pc);
+ try dw.writeDbgInfoHeader(pt.zcu, low_pc, high_pc);
self.debug_info_header_dirty = false;
}
@@ -684,6 +686,7 @@ pub fn getAnonDeclVAddr(
pub fn lowerAnonDecl(
self: *ZigObject,
elf_file: *Elf,
+ pt: Zcu.PerThread,
decl_val: InternPool.Index,
explicit_alignment: InternPool.Alignment,
src_loc: Module.LazySrcLoc,
@@ -692,7 +695,7 @@ pub fn lowerAnonDecl(
const mod = elf_file.base.comp.module.?;
const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val));
const decl_alignment = switch (explicit_alignment) {
- .none => ty.abiAlignment(mod),
+ .none => ty.abiAlignment(pt),
else => explicit_alignment,
};
if (self.anon_decls.get(decl_val)) |metadata| {
@@ -708,6 +711,7 @@ pub fn lowerAnonDecl(
}) catch unreachable;
const res = self.lowerConst(
elf_file,
+ pt,
name,
val,
decl_alignment,
@@ -733,10 +737,11 @@ pub fn lowerAnonDecl(
pub fn getOrCreateMetadataForLazySymbol(
self: *ZigObject,
elf_file: *Elf,
+ pt: Zcu.PerThread,
lazy_sym: link.File.LazySymbol,
) !Symbol.Index {
- const gpa = elf_file.base.comp.gpa;
- const mod = elf_file.base.comp.module.?;
+ const mod = pt.zcu;
+ const gpa = mod.gpa;
const gop = try self.lazy_syms.getOrPut(gpa, lazy_sym.getDecl(mod));
errdefer _ = if (!gop.found_existing) self.lazy_syms.pop();
if (!gop.found_existing) gop.value_ptr.* = .{};
@@ -766,7 +771,7 @@ pub fn getOrCreateMetadataForLazySymbol(
metadata.state.* = .pending_flush;
const symbol_index = metadata.symbol_index.*;
// anyerror needs to be deferred until flushModule
- if (lazy_sym.getDecl(mod) != .none) try self.updateLazySymbol(elf_file, lazy_sym, symbol_index);
+ if (lazy_sym.getDecl(mod) != .none) try self.updateLazySymbol(elf_file, pt, lazy_sym, symbol_index);
return symbol_index;
}
@@ -893,6 +898,7 @@ fn getDeclShdrIndex(
fn updateDeclCode(
self: *ZigObject,
elf_file: *Elf,
+ pt: Zcu.PerThread,
decl_index: InternPool.DeclIndex,
sym_index: Symbol.Index,
shdr_index: u32,
@@ -900,13 +906,13 @@ fn updateDeclCode(
stt_bits: u8,
) !void {
const gpa = elf_file.base.comp.gpa;
- const mod = elf_file.base.comp.module.?;
+ const mod = pt.zcu;
const decl = mod.declPtr(decl_index);
const decl_name = try decl.fullyQualifiedName(mod);
log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl });
- const required_alignment = decl.getAlignment(mod).max(
+ const required_alignment = decl.getAlignment(pt).max(
target_util.minFunctionAlignment(mod.getTarget()),
);
@@ -994,19 +1000,20 @@ fn updateDeclCode(
fn updateTlv(
self: *ZigObject,
elf_file: *Elf,
+ pt: Zcu.PerThread,
decl_index: InternPool.DeclIndex,
sym_index: Symbol.Index,
shndx: u32,
code: []const u8,
) !void {
- const gpa = elf_file.base.comp.gpa;
- const mod = elf_file.base.comp.module.?;
+ const mod = pt.zcu;
+ const gpa = mod.gpa;
const decl = mod.declPtr(decl_index);
const decl_name = try decl.fullyQualifiedName(mod);
log.debug("updateTlv {} ({*})", .{ decl_name.fmt(&mod.intern_pool), decl });
- const required_alignment = decl.getAlignment(mod);
+ const required_alignment = decl.getAlignment(pt);
const sym = elf_file.symbol(sym_index);
const esym = &self.local_esyms.items(.elf_sym)[sym.esym_index];
@@ -1048,7 +1055,7 @@ fn updateTlv(
pub fn updateFunc(
self: *ZigObject,
elf_file: *Elf,
- mod: *Module,
+ pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
@@ -1056,6 +1063,7 @@ pub fn updateFunc(
const tracy = trace(@src());
defer tracy.end();
+ const mod = pt.zcu;
const gpa = elf_file.base.comp.gpa;
const func = mod.funcInfo(func_index);
const decl_index = func.owner_decl;
@@ -1068,29 +1076,19 @@ pub fn updateFunc(
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
- var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null;
+ var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(pt, decl_index) else null;
defer if (decl_state) |*ds| ds.deinit();
- const res = if (decl_state) |*ds|
- try codegen.generateFunction(
- &elf_file.base,
- decl.navSrcLoc(mod),
- func_index,
- air,
- liveness,
- &code_buffer,
- .{ .dwarf = ds },
- )
- else
- try codegen.generateFunction(
- &elf_file.base,
- decl.navSrcLoc(mod),
- func_index,
- air,
- liveness,
- &code_buffer,
- .none,
- );
+ const res = try codegen.generateFunction(
+ &elf_file.base,
+ pt,
+ decl.navSrcLoc(mod),
+ func_index,
+ air,
+ liveness,
+ &code_buffer,
+ if (decl_state) |*ds| .{ .dwarf = ds } else .none,
+ );
const code = switch (res) {
.ok => code_buffer.items,
@@ -1102,12 +1100,12 @@ pub fn updateFunc(
};
const shndx = try self.getDeclShdrIndex(elf_file, decl, code);
- try self.updateDeclCode(elf_file, decl_index, sym_index, shndx, code, elf.STT_FUNC);
+ try self.updateDeclCode(elf_file, pt, decl_index, sym_index, shndx, code, elf.STT_FUNC);
if (decl_state) |*ds| {
const sym = elf_file.symbol(sym_index);
try self.dwarf.?.commitDeclState(
- mod,
+ pt,
decl_index,
@intCast(sym.address(.{}, elf_file)),
sym.atom(elf_file).?.size,
@@ -1121,12 +1119,13 @@ pub fn updateFunc(
pub fn updateDecl(
self: *ZigObject,
elf_file: *Elf,
- mod: *Module,
+ pt: Zcu.PerThread,
decl_index: InternPool.DeclIndex,
) link.File.UpdateDeclError!void {
const tracy = trace(@src());
defer tracy.end();
+ const mod = pt.zcu;
const decl = mod.declPtr(decl_index);
if (decl.val.getExternFunc(mod)) |_| {
@@ -1150,19 +1149,19 @@ pub fn updateDecl(
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
- var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null;
+ var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(pt, decl_index) else null;
defer if (decl_state) |*ds| ds.deinit();
// TODO implement .debug_info for global variables
const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val;
const res = if (decl_state) |*ds|
- try codegen.generateSymbol(&elf_file.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .{
+ try codegen.generateSymbol(&elf_file.base, pt, decl.navSrcLoc(mod), decl_val, &code_buffer, .{
.dwarf = ds,
}, .{
.parent_atom_index = sym_index,
})
else
- try codegen.generateSymbol(&elf_file.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .none, .{
+ try codegen.generateSymbol(&elf_file.base, pt, decl.navSrcLoc(mod), decl_val, &code_buffer, .none, .{
.parent_atom_index = sym_index,
});
@@ -1177,14 +1176,14 @@ pub fn updateDecl(
const shndx = try self.getDeclShdrIndex(elf_file, decl, code);
if (elf_file.shdrs.items[shndx].sh_flags & elf.SHF_TLS != 0)
- try self.updateTlv(elf_file, decl_index, sym_index, shndx, code)
+ try self.updateTlv(elf_file, pt, decl_index, sym_index, shndx, code)
else
- try self.updateDeclCode(elf_file, decl_index, sym_index, shndx, code, elf.STT_OBJECT);
+ try self.updateDeclCode(elf_file, pt, decl_index, sym_index, shndx, code, elf.STT_OBJECT);
if (decl_state) |*ds| {
const sym = elf_file.symbol(sym_index);
try self.dwarf.?.commitDeclState(
- mod,
+ pt,
decl_index,
@intCast(sym.address(.{}, elf_file)),
sym.atom(elf_file).?.size,
@@ -1198,11 +1197,12 @@ pub fn updateDecl(
fn updateLazySymbol(
self: *ZigObject,
elf_file: *Elf,
+ pt: Zcu.PerThread,
sym: link.File.LazySymbol,
symbol_index: Symbol.Index,
) !void {
- const gpa = elf_file.base.comp.gpa;
- const mod = elf_file.base.comp.module.?;
+ const mod = pt.zcu;
+ const gpa = mod.gpa;
var required_alignment: InternPool.Alignment = .none;
var code_buffer = std.ArrayList(u8).init(gpa);
@@ -1211,7 +1211,7 @@ fn updateLazySymbol(
const name_str_index = blk: {
const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{
@tagName(sym.kind),
- sym.ty.fmt(mod),
+ sym.ty.fmt(pt),
});
defer gpa.free(name);
break :blk try self.strtab.insert(gpa, name);
@@ -1220,6 +1220,7 @@ fn updateLazySymbol(
const src = sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded;
const res = try codegen.generateLazySymbol(
&elf_file.base,
+ pt,
src,
sym,
&required_alignment,
@@ -1273,6 +1274,7 @@ fn updateLazySymbol(
pub fn lowerUnnamedConst(
self: *ZigObject,
elf_file: *Elf,
+ pt: Zcu.PerThread,
val: Value,
decl_index: InternPool.DeclIndex,
) !u32 {
@@ -1291,9 +1293,10 @@ pub fn lowerUnnamedConst(
const ty = val.typeOf(mod);
const sym_index = switch (try self.lowerConst(
elf_file,
+ pt,
name,
val,
- ty.abiAlignment(mod),
+ ty.abiAlignment(pt),
elf_file.zig_data_rel_ro_section_index.?,
decl.navSrcLoc(mod),
)) {
@@ -1318,20 +1321,21 @@ const LowerConstResult = union(enum) {
fn lowerConst(
self: *ZigObject,
elf_file: *Elf,
+ pt: Zcu.PerThread,
name: []const u8,
val: Value,
required_alignment: InternPool.Alignment,
output_section_index: u32,
src_loc: Module.LazySrcLoc,
) !LowerConstResult {
- const gpa = elf_file.base.comp.gpa;
+ const gpa = pt.zcu.gpa;
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
const sym_index = try self.addAtom(elf_file);
- const res = try codegen.generateSymbol(&elf_file.base, src_loc, val, &code_buffer, .{
+ const res = try codegen.generateSymbol(&elf_file.base, pt, src_loc, val, &code_buffer, .{
.none = {},
}, .{
.parent_atom_index = sym_index,
@@ -1373,13 +1377,14 @@ fn lowerConst(
pub fn updateExports(
self: *ZigObject,
elf_file: *Elf,
- mod: *Module,
+ pt: Zcu.PerThread,
exported: Module.Exported,
export_indices: []const u32,
) link.File.UpdateExportsError!void {
const tracy = trace(@src());
defer tracy.end();
+ const mod = pt.zcu;
const gpa = elf_file.base.comp.gpa;
const metadata = switch (exported) {
.decl_index => |decl_index| blk: {
@@ -1388,7 +1393,7 @@ pub fn updateExports(
},
.value => |value| self.anon_decls.getPtr(value) orelse blk: {
const first_exp = mod.all_exports.items[export_indices[0]];
- const res = try self.lowerAnonDecl(elf_file, value, .none, first_exp.src);
+ const res = try self.lowerAnonDecl(elf_file, pt, value, .none, first_exp.src);
switch (res) {
.ok => {},
.fail => |em| {
src/link/MachO/ZigObject.zig
@@ -425,16 +425,17 @@ pub fn getInputSection(self: ZigObject, atom: Atom, macho_file: *MachO) macho.se
return sect;
}
-pub fn flushModule(self: *ZigObject, macho_file: *MachO) !void {
+pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id) !void {
// Handle any lazy symbols that were emitted by incremental compilation.
if (self.lazy_syms.getPtr(.none)) |metadata| {
- const zcu = macho_file.base.comp.module.?;
+ const pt: Zcu.PerThread = .{ .zcu = macho_file.base.comp.module.?, .tid = tid };
// Most lazy symbols can be updated on first use, but
// anyerror needs to wait for everything to be flushed.
if (metadata.text_state != .unused) self.updateLazySymbol(
macho_file,
- link.File.LazySymbol.initDecl(.code, null, zcu),
+ pt,
+ link.File.LazySymbol.initDecl(.code, null, pt.zcu),
metadata.text_symbol_index,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
@@ -442,7 +443,8 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO) !void {
};
if (metadata.const_state != .unused) self.updateLazySymbol(
macho_file,
- link.File.LazySymbol.initDecl(.const_data, null, zcu),
+ pt,
+ link.File.LazySymbol.initDecl(.const_data, null, pt.zcu),
metadata.const_symbol_index,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
@@ -455,8 +457,8 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO) !void {
}
if (self.dwarf) |*dw| {
- const zcu = macho_file.base.comp.module.?;
- try dw.flushModule(zcu);
+ const pt: Zcu.PerThread = .{ .zcu = macho_file.base.comp.module.?, .tid = tid };
+ try dw.flushModule(pt);
if (self.debug_abbrev_dirty) {
try dw.writeDbgAbbrev();
@@ -469,7 +471,7 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO) !void {
const text_section = macho_file.sections.items(.header)[macho_file.zig_text_sect_index.?];
const low_pc = text_section.addr;
const high_pc = text_section.addr + text_section.size;
- try dw.writeDbgInfoHeader(zcu, low_pc, high_pc);
+ try dw.writeDbgInfoHeader(pt.zcu, low_pc, high_pc);
self.debug_info_header_dirty = false;
}
@@ -570,6 +572,7 @@ pub fn getAnonDeclVAddr(
pub fn lowerAnonDecl(
self: *ZigObject,
macho_file: *MachO,
+ pt: Zcu.PerThread,
decl_val: InternPool.Index,
explicit_alignment: Atom.Alignment,
src_loc: Module.LazySrcLoc,
@@ -578,7 +581,7 @@ pub fn lowerAnonDecl(
const mod = macho_file.base.comp.module.?;
const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val));
const decl_alignment = switch (explicit_alignment) {
- .none => ty.abiAlignment(mod),
+ .none => ty.abiAlignment(pt),
else => explicit_alignment,
};
if (self.anon_decls.get(decl_val)) |metadata| {
@@ -593,6 +596,7 @@ pub fn lowerAnonDecl(
}) catch unreachable;
const res = self.lowerConst(
macho_file,
+ pt,
name,
Value.fromInterned(decl_val),
decl_alignment,
@@ -656,7 +660,7 @@ pub fn freeDecl(self: *ZigObject, macho_file: *MachO, decl_index: InternPool.Dec
pub fn updateFunc(
self: *ZigObject,
macho_file: *MachO,
- mod: *Module,
+ pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
@@ -664,7 +668,8 @@ pub fn updateFunc(
const tracy = trace(@src());
defer tracy.end();
- const gpa = macho_file.base.comp.gpa;
+ const mod = pt.zcu;
+ const gpa = mod.gpa;
const func = mod.funcInfo(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
@@ -676,12 +681,13 @@ pub fn updateFunc(
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
- var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null;
+ var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(pt, decl_index) else null;
defer if (decl_state) |*ds| ds.deinit();
const dio: codegen.DebugInfoOutput = if (decl_state) |*ds| .{ .dwarf = ds } else .none;
const res = try codegen.generateFunction(
&macho_file.base,
+ pt,
decl.navSrcLoc(mod),
func_index,
air,
@@ -700,12 +706,12 @@ pub fn updateFunc(
};
const sect_index = try self.getDeclOutputSection(macho_file, decl, code);
- try self.updateDeclCode(macho_file, decl_index, sym_index, sect_index, code);
+ try self.updateDeclCode(macho_file, pt, decl_index, sym_index, sect_index, code);
if (decl_state) |*ds| {
const sym = macho_file.getSymbol(sym_index);
try self.dwarf.?.commitDeclState(
- mod,
+ pt,
decl_index,
sym.getAddress(.{}, macho_file),
sym.getAtom(macho_file).?.size,
@@ -719,12 +725,13 @@ pub fn updateFunc(
pub fn updateDecl(
self: *ZigObject,
macho_file: *MachO,
- mod: *Module,
+ pt: Zcu.PerThread,
decl_index: InternPool.DeclIndex,
) link.File.UpdateDeclError!void {
const tracy = trace(@src());
defer tracy.end();
+ const mod = pt.zcu;
const decl = mod.declPtr(decl_index);
if (decl.val.getExternFunc(mod)) |_| {
@@ -749,12 +756,12 @@ pub fn updateDecl(
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
- var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null;
+ var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(pt, decl_index) else null;
defer if (decl_state) |*ds| ds.deinit();
const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val;
const dio: codegen.DebugInfoOutput = if (decl_state) |*ds| .{ .dwarf = ds } else .none;
- const res = try codegen.generateSymbol(&macho_file.base, decl.navSrcLoc(mod), decl_val, &code_buffer, dio, .{
+ const res = try codegen.generateSymbol(&macho_file.base, pt, decl.navSrcLoc(mod), decl_val, &code_buffer, dio, .{
.parent_atom_index = sym_index,
});
@@ -772,15 +779,15 @@ pub fn updateDecl(
else => false,
};
if (is_threadlocal) {
- try self.updateTlv(macho_file, decl_index, sym_index, sect_index, code);
+ try self.updateTlv(macho_file, pt, decl_index, sym_index, sect_index, code);
} else {
- try self.updateDeclCode(macho_file, decl_index, sym_index, sect_index, code);
+ try self.updateDeclCode(macho_file, pt, decl_index, sym_index, sect_index, code);
}
if (decl_state) |*ds| {
const sym = macho_file.getSymbol(sym_index);
try self.dwarf.?.commitDeclState(
- mod,
+ pt,
decl_index,
sym.getAddress(.{}, macho_file),
sym.getAtom(macho_file).?.size,
@@ -794,19 +801,20 @@ pub fn updateDecl(
fn updateDeclCode(
self: *ZigObject,
macho_file: *MachO,
+ pt: Zcu.PerThread,
decl_index: InternPool.DeclIndex,
sym_index: Symbol.Index,
sect_index: u8,
code: []const u8,
) !void {
const gpa = macho_file.base.comp.gpa;
- const mod = macho_file.base.comp.module.?;
+ const mod = pt.zcu;
const decl = mod.declPtr(decl_index);
const decl_name = try decl.fullyQualifiedName(mod);
log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl });
- const required_alignment = decl.getAlignment(mod);
+ const required_alignment = decl.getAlignment(pt);
const sect = &macho_file.sections.items(.header)[sect_index];
const sym = macho_file.getSymbol(sym_index);
@@ -879,19 +887,20 @@ fn updateDeclCode(
fn updateTlv(
self: *ZigObject,
macho_file: *MachO,
+ pt: Zcu.PerThread,
decl_index: InternPool.DeclIndex,
sym_index: Symbol.Index,
sect_index: u8,
code: []const u8,
) !void {
- const mod = macho_file.base.comp.module.?;
+ const mod = pt.zcu;
const decl = mod.declPtr(decl_index);
const decl_name = try decl.fullyQualifiedName(mod);
log.debug("updateTlv {} ({*})", .{ decl_name.fmt(&mod.intern_pool), decl });
const decl_name_slice = decl_name.toSlice(&mod.intern_pool);
- const required_alignment = decl.getAlignment(mod);
+ const required_alignment = decl.getAlignment(pt);
// 1. Lower TLV initializer
const init_sym_index = try self.createTlvInitializer(
@@ -1079,11 +1088,12 @@ fn getDeclOutputSection(
pub fn lowerUnnamedConst(
self: *ZigObject,
macho_file: *MachO,
+ pt: Zcu.PerThread,
val: Value,
decl_index: InternPool.DeclIndex,
) !u32 {
- const gpa = macho_file.base.comp.gpa;
- const mod = macho_file.base.comp.module.?;
+ const mod = pt.zcu;
+ const gpa = mod.gpa;
const gop = try self.unnamed_consts.getOrPut(gpa, decl_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
@@ -1096,9 +1106,10 @@ pub fn lowerUnnamedConst(
defer gpa.free(name);
const sym_index = switch (try self.lowerConst(
macho_file,
+ pt,
name,
val,
- val.typeOf(mod).abiAlignment(mod),
+ val.typeOf(mod).abiAlignment(pt),
macho_file.zig_const_sect_index.?,
decl.navSrcLoc(mod),
)) {
@@ -1123,6 +1134,7 @@ const LowerConstResult = union(enum) {
fn lowerConst(
self: *ZigObject,
macho_file: *MachO,
+ pt: Zcu.PerThread,
name: []const u8,
val: Value,
required_alignment: Atom.Alignment,
@@ -1136,7 +1148,7 @@ fn lowerConst(
const sym_index = try self.addAtom(macho_file);
- const res = try codegen.generateSymbol(&macho_file.base, src_loc, val, &code_buffer, .{
+ const res = try codegen.generateSymbol(&macho_file.base, pt, src_loc, val, &code_buffer, .{
.none = {},
}, .{
.parent_atom_index = sym_index,
@@ -1181,13 +1193,14 @@ fn lowerConst(
pub fn updateExports(
self: *ZigObject,
macho_file: *MachO,
- mod: *Module,
+ pt: Zcu.PerThread,
exported: Module.Exported,
export_indices: []const u32,
) link.File.UpdateExportsError!void {
const tracy = trace(@src());
defer tracy.end();
+ const mod = pt.zcu;
const gpa = macho_file.base.comp.gpa;
const metadata = switch (exported) {
.decl_index => |decl_index| blk: {
@@ -1196,7 +1209,7 @@ pub fn updateExports(
},
.value => |value| self.anon_decls.getPtr(value) orelse blk: {
const first_exp = mod.all_exports.items[export_indices[0]];
- const res = try self.lowerAnonDecl(macho_file, value, .none, first_exp.src);
+ const res = try self.lowerAnonDecl(macho_file, pt, value, .none, first_exp.src);
switch (res) {
.ok => {},
.fail => |em| {
@@ -1272,6 +1285,7 @@ pub fn updateExports(
fn updateLazySymbol(
self: *ZigObject,
macho_file: *MachO,
+ pt: Zcu.PerThread,
lazy_sym: link.File.LazySymbol,
symbol_index: Symbol.Index,
) !void {
@@ -1285,7 +1299,7 @@ fn updateLazySymbol(
const name_str_index = blk: {
const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{
@tagName(lazy_sym.kind),
- lazy_sym.ty.fmt(mod),
+ lazy_sym.ty.fmt(pt),
});
defer gpa.free(name);
break :blk try self.strtab.insert(gpa, name);
@@ -1294,6 +1308,7 @@ fn updateLazySymbol(
const src = lazy_sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded;
const res = try codegen.generateLazySymbol(
&macho_file.base,
+ pt,
src,
lazy_sym,
&required_alignment,
@@ -1431,10 +1446,11 @@ pub fn getOrCreateMetadataForDecl(
pub fn getOrCreateMetadataForLazySymbol(
self: *ZigObject,
macho_file: *MachO,
+ pt: Zcu.PerThread,
lazy_sym: link.File.LazySymbol,
) !Symbol.Index {
- const gpa = macho_file.base.comp.gpa;
- const mod = macho_file.base.comp.module.?;
+ const mod = pt.zcu;
+ const gpa = mod.gpa;
const gop = try self.lazy_syms.getOrPut(gpa, lazy_sym.getDecl(mod));
errdefer _ = if (!gop.found_existing) self.lazy_syms.pop();
if (!gop.found_existing) gop.value_ptr.* = .{};
@@ -1464,7 +1480,7 @@ pub fn getOrCreateMetadataForLazySymbol(
metadata.state.* = .pending_flush;
const symbol_index = metadata.symbol_index.*;
// anyerror needs to be deferred until flushModule
- if (lazy_sym.getDecl(mod) != .none) try self.updateLazySymbol(macho_file, lazy_sym, symbol_index);
+ if (lazy_sym.getDecl(mod) != .none) try self.updateLazySymbol(macho_file, pt, lazy_sym, symbol_index);
return symbol_index;
}
src/link/Wasm/ZigObject.zig
@@ -241,9 +241,10 @@ pub fn allocateSymbol(zig_object: *ZigObject, gpa: std.mem.Allocator) !Symbol.In
pub fn updateDecl(
zig_object: *ZigObject,
wasm_file: *Wasm,
- mod: *Module,
+ pt: Zcu.PerThread,
decl_index: InternPool.DeclIndex,
) !void {
+ const mod = pt.zcu;
const decl = mod.declPtr(decl_index);
if (decl.val.getFunction(mod)) |_| {
return;
@@ -269,6 +270,7 @@ pub fn updateDecl(
const res = try codegen.generateSymbol(
&wasm_file.base,
+ pt,
decl.navSrcLoc(mod),
val,
&code_writer,
@@ -285,21 +287,21 @@ pub fn updateDecl(
},
};
- return zig_object.finishUpdateDecl(wasm_file, decl_index, code);
+ return zig_object.finishUpdateDecl(wasm_file, pt, decl_index, code);
}
pub fn updateFunc(
zig_object: *ZigObject,
wasm_file: *Wasm,
- mod: *Module,
+ pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) !void {
const gpa = wasm_file.base.comp.gpa;
- const func = mod.funcInfo(func_index);
+ const func = pt.zcu.funcInfo(func_index);
const decl_index = func.owner_decl;
- const decl = mod.declPtr(decl_index);
+ const decl = pt.zcu.declPtr(decl_index);
const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, decl_index);
const atom = wasm_file.getAtomPtr(atom_index);
atom.clear();
@@ -308,7 +310,8 @@ pub fn updateFunc(
defer code_writer.deinit();
const result = try codegen.generateFunction(
&wasm_file.base,
- decl.navSrcLoc(mod),
+ pt,
+ decl.navSrcLoc(pt.zcu),
func_index,
air,
liveness,
@@ -320,29 +323,31 @@ pub fn updateFunc(
.ok => code_writer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
- try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em);
+ try pt.zcu.failed_analysis.put(gpa, AnalUnit.wrap(.{ .decl = decl_index }), em);
return;
},
};
- return zig_object.finishUpdateDecl(wasm_file, decl_index, code);
+ return zig_object.finishUpdateDecl(wasm_file, pt, decl_index, code);
}
fn finishUpdateDecl(
zig_object: *ZigObject,
wasm_file: *Wasm,
+ pt: Zcu.PerThread,
decl_index: InternPool.DeclIndex,
code: []const u8,
) !void {
- const gpa = wasm_file.base.comp.gpa;
- const zcu = wasm_file.base.comp.module.?;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
+ const gpa = zcu.gpa;
const decl = zcu.declPtr(decl_index);
const decl_info = zig_object.decls_map.get(decl_index).?;
const atom_index = decl_info.atom;
const atom = wasm_file.getAtomPtr(atom_index);
const sym = zig_object.symbol(atom.sym_index);
const full_name = try decl.fullyQualifiedName(zcu);
- sym.name = try zig_object.string_table.insert(gpa, full_name.toSlice(&zcu.intern_pool));
+ sym.name = try zig_object.string_table.insert(gpa, full_name.toSlice(ip));
try atom.code.appendSlice(gpa, code);
atom.size = @intCast(code.len);
@@ -382,7 +387,7 @@ fn finishUpdateDecl(
// Will be freed upon freeing of decl or after cleanup of Wasm binary.
const full_segment_name = try std.mem.concat(gpa, u8, &.{
segment_name,
- full_name.toSlice(&zcu.intern_pool),
+ full_name.toSlice(ip),
});
errdefer gpa.free(full_segment_name);
sym.tag = .data;
@@ -390,7 +395,7 @@ fn finishUpdateDecl(
},
}
if (code.len == 0) return;
- atom.alignment = decl.getAlignment(zcu);
+ atom.alignment = decl.getAlignment(pt);
}
/// Creates and initializes a new segment in the 'Data' section.
@@ -437,9 +442,10 @@ pub fn getOrCreateAtomForDecl(zig_object: *ZigObject, wasm_file: *Wasm, decl_ind
pub fn lowerAnonDecl(
zig_object: *ZigObject,
wasm_file: *Wasm,
+ pt: Zcu.PerThread,
decl_val: InternPool.Index,
explicit_alignment: InternPool.Alignment,
- src_loc: Module.LazySrcLoc,
+ src_loc: Zcu.LazySrcLoc,
) !codegen.Result {
const gpa = wasm_file.base.comp.gpa;
const gop = try zig_object.anon_decls.getOrPut(gpa, decl_val);
@@ -449,7 +455,7 @@ pub fn lowerAnonDecl(
@intFromEnum(decl_val),
}) catch unreachable;
- switch (try zig_object.lowerConst(wasm_file, name, Value.fromInterned(decl_val), src_loc)) {
+ switch (try zig_object.lowerConst(wasm_file, pt, name, Value.fromInterned(decl_val), src_loc)) {
.ok => |atom_index| zig_object.anon_decls.values()[gop.index] = atom_index,
.fail => |em| return .{ .fail = em },
}
@@ -469,9 +475,15 @@ pub fn lowerAnonDecl(
/// Lowers a constant typed value to a local symbol and atom.
/// Returns the symbol index of the local
/// The given `decl` is the parent decl whom owns the constant.
-pub fn lowerUnnamedConst(zig_object: *ZigObject, wasm_file: *Wasm, val: Value, decl_index: InternPool.DeclIndex) !u32 {
- const gpa = wasm_file.base.comp.gpa;
- const mod = wasm_file.base.comp.module.?;
+pub fn lowerUnnamedConst(
+ zig_object: *ZigObject,
+ wasm_file: *Wasm,
+ pt: Zcu.PerThread,
+ val: Value,
+ decl_index: InternPool.DeclIndex,
+) !u32 {
+ const mod = pt.zcu;
+ const gpa = mod.gpa;
std.debug.assert(val.typeOf(mod).zigTypeTag(mod) != .Fn); // cannot create local symbols for functions
const decl = mod.declPtr(decl_index);
@@ -494,7 +506,7 @@ pub fn lowerUnnamedConst(zig_object: *ZigObject, wasm_file: *Wasm, val: Value, d
else
decl.navSrcLoc(mod);
- switch (try zig_object.lowerConst(wasm_file, name, val, decl_src)) {
+ switch (try zig_object.lowerConst(wasm_file, pt, name, val, decl_src)) {
.ok => |atom_index| {
try wasm_file.getAtomPtr(parent_atom_index).locals.append(gpa, atom_index);
return @intFromEnum(wasm_file.getAtom(atom_index).sym_index);
@@ -509,10 +521,17 @@ pub fn lowerUnnamedConst(zig_object: *ZigObject, wasm_file: *Wasm, val: Value, d
const LowerConstResult = union(enum) {
ok: Atom.Index,
- fail: *Module.ErrorMsg,
+ fail: *Zcu.ErrorMsg,
};
-fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, val: Value, src_loc: Module.LazySrcLoc) !LowerConstResult {
+fn lowerConst(
+ zig_object: *ZigObject,
+ wasm_file: *Wasm,
+ pt: Zcu.PerThread,
+ name: []const u8,
+ val: Value,
+ src_loc: Zcu.LazySrcLoc,
+) !LowerConstResult {
const gpa = wasm_file.base.comp.gpa;
const mod = wasm_file.base.comp.module.?;
@@ -526,7 +545,7 @@ fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, val: V
const code = code: {
const atom = wasm_file.getAtomPtr(atom_index);
- atom.alignment = ty.abiAlignment(mod);
+ atom.alignment = ty.abiAlignment(pt);
const segment_name = try std.mem.concat(gpa, u8, &.{ ".rodata.", name });
errdefer gpa.free(segment_name);
zig_object.symbol(sym_index).* = .{
@@ -536,13 +555,14 @@ fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, val: V
.index = try zig_object.createDataSegment(
gpa,
segment_name,
- ty.abiAlignment(mod),
+ ty.abiAlignment(pt),
),
.virtual_address = undefined,
};
const result = try codegen.generateSymbol(
&wasm_file.base,
+ pt,
src_loc,
val,
&value_bytes,
@@ -568,7 +588,7 @@ fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, val: V
/// Returns the symbol index of the error name table.
///
/// When the symbol does not yet exist, it will create a new one instead.
-pub fn getErrorTableSymbol(zig_object: *ZigObject, wasm_file: *Wasm) !Symbol.Index {
+pub fn getErrorTableSymbol(zig_object: *ZigObject, wasm_file: *Wasm, pt: Zcu.PerThread) !Symbol.Index {
if (zig_object.error_table_symbol != .null) {
return zig_object.error_table_symbol;
}
@@ -581,8 +601,7 @@ pub fn getErrorTableSymbol(zig_object: *ZigObject, wasm_file: *Wasm) !Symbol.Ind
const atom_index = try wasm_file.createAtom(sym_index, zig_object.index);
const atom = wasm_file.getAtomPtr(atom_index);
const slice_ty = Type.slice_const_u8_sentinel_0;
- const mod = wasm_file.base.comp.module.?;
- atom.alignment = slice_ty.abiAlignment(mod);
+ atom.alignment = slice_ty.abiAlignment(pt);
const sym_name = try zig_object.string_table.insert(gpa, "__zig_err_name_table");
const segment_name = try gpa.dupe(u8, ".rodata.__zig_err_name_table");
@@ -604,7 +623,7 @@ pub fn getErrorTableSymbol(zig_object: *ZigObject, wasm_file: *Wasm) !Symbol.Ind
///
/// This creates a table that consists of pointers and length to each error name.
/// The table is what is being pointed to within the runtime bodies that are generated.
-fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm) !void {
+fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm, tid: Zcu.PerThread.Id) !void {
if (zig_object.error_table_symbol == .null) return;
const gpa = wasm_file.base.comp.gpa;
const atom_index = wasm_file.symbol_atom.get(.{ .file = zig_object.index, .index = zig_object.error_table_symbol }).?;
@@ -631,11 +650,11 @@ fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm) !void {
// Addend for each relocation to the table
var addend: u32 = 0;
- const mod = wasm_file.base.comp.module.?;
- for (mod.global_error_set.keys()) |error_name| {
+ const pt: Zcu.PerThread = .{ .zcu = wasm_file.base.comp.module.?, .tid = tid };
+ for (pt.zcu.global_error_set.keys()) |error_name| {
const atom = wasm_file.getAtomPtr(atom_index);
- const error_name_slice = error_name.toSlice(&mod.intern_pool);
+ const error_name_slice = error_name.toSlice(&pt.zcu.intern_pool);
const len: u32 = @intCast(error_name_slice.len + 1); // names are 0-terminated
const slice_ty = Type.slice_const_u8_sentinel_0;
@@ -650,14 +669,14 @@ fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm) !void {
.offset = offset,
.addend = @intCast(addend),
});
- atom.size += @intCast(slice_ty.abiSize(mod));
+ atom.size += @intCast(slice_ty.abiSize(pt));
addend += len;
// as we updated the error name table, we now store the actual name within the names atom
try names_atom.code.ensureUnusedCapacity(gpa, len);
names_atom.code.appendSliceAssumeCapacity(error_name_slice[0..len]);
- log.debug("Populated error name: '{}'", .{error_name.fmt(&mod.intern_pool)});
+ log.debug("Populated error name: '{}'", .{error_name.fmt(&pt.zcu.intern_pool)});
}
names_atom.size = addend;
zig_object.error_names_atom = names_atom_index;
@@ -858,10 +877,11 @@ pub fn deleteExport(
pub fn updateExports(
zig_object: *ZigObject,
wasm_file: *Wasm,
- mod: *Module,
- exported: Module.Exported,
+ pt: Zcu.PerThread,
+ exported: Zcu.Exported,
export_indices: []const u32,
) !void {
+ const mod = pt.zcu;
const decl_index = switch (exported) {
.decl_index => |i| i,
.value => |val| {
@@ -880,7 +900,7 @@ pub fn updateExports(
for (export_indices) |export_idx| {
const exp = mod.all_exports.items[export_idx];
if (exp.opts.section.toSlice(&mod.intern_pool)) |section| {
- try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create(
+ try mod.failed_exports.putNoClobber(gpa, export_idx, try Zcu.ErrorMsg.create(
gpa,
decl.navSrcLoc(mod),
"Unimplemented: ExportOptions.section '{s}'",
@@ -913,7 +933,7 @@ pub fn updateExports(
},
.strong => {}, // symbols are strong by default
.link_once => {
- try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create(
+ try mod.failed_exports.putNoClobber(gpa, export_idx, try Zcu.ErrorMsg.create(
gpa,
decl.navSrcLoc(mod),
"Unimplemented: LinkOnce",
@@ -1096,7 +1116,7 @@ pub fn createDebugSectionForIndex(zig_object: *ZigObject, wasm_file: *Wasm, inde
return atom_index;
}
-pub fn updateDeclLineNumber(zig_object: *ZigObject, mod: *Module, decl_index: InternPool.DeclIndex) !void {
+pub fn updateDeclLineNumber(zig_object: *ZigObject, mod: *Zcu, decl_index: InternPool.DeclIndex) !void {
if (zig_object.dwarf) |*dw| {
const decl = mod.declPtr(decl_index);
const decl_name = try decl.fullyQualifiedName(mod);
@@ -1228,8 +1248,8 @@ fn appendFunction(zig_object: *ZigObject, gpa: std.mem.Allocator, func: std.wasm
return index;
}
-pub fn flushModule(zig_object: *ZigObject, wasm_file: *Wasm) !void {
- try zig_object.populateErrorNameTable(wasm_file);
+pub fn flushModule(zig_object: *ZigObject, wasm_file: *Wasm, tid: Zcu.PerThread.Id) !void {
+ try zig_object.populateErrorNameTable(wasm_file, tid);
try zig_object.setupErrorsLen(wasm_file);
}
@@ -1248,8 +1268,6 @@ const File = @import("file.zig").File;
const InternPool = @import("../../InternPool.zig");
const Liveness = @import("../../Liveness.zig");
const Zcu = @import("../../Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
const StringTable = @import("../StringTable.zig");
const Symbol = @import("Symbol.zig");
const Type = @import("../../Type.zig");
src/link/C.zig
@@ -186,13 +186,13 @@ pub fn freeDecl(self: *C, decl_index: InternPool.DeclIndex) void {
pub fn updateFunc(
self: *C,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
) !void {
- const gpa = self.base.comp.gpa;
-
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
const decl_index = func.owner_decl;
const decl = zcu.declPtr(decl_index);
@@ -218,7 +218,7 @@ pub fn updateFunc(
.object = .{
.dg = .{
.gpa = gpa,
- .zcu = zcu,
+ .pt = pt,
.mod = file_scope.mod,
.error_msg = null,
.pass = .{ .decl = decl_index },
@@ -263,7 +263,7 @@ pub fn updateFunc(
gop.value_ptr.code = try self.addString(function.object.code.items);
}
-fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void {
+fn updateAnonDecl(self: *C, pt: Zcu.PerThread, i: usize) !void {
const gpa = self.base.comp.gpa;
const anon_decl = self.anon_decls.keys()[i];
@@ -275,8 +275,8 @@ fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void {
var object: codegen.Object = .{
.dg = .{
.gpa = gpa,
- .zcu = zcu,
- .mod = zcu.root_mod,
+ .pt = pt,
+ .mod = pt.zcu.root_mod,
.error_msg = null,
.pass = .{ .anon = anon_decl },
.is_naked_fn = false,
@@ -319,12 +319,13 @@ fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void {
};
}
-pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void {
+pub fn updateDecl(self: *C, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void {
const tracy = trace(@src());
defer tracy.end();
const gpa = self.base.comp.gpa;
+ const zcu = pt.zcu;
const decl = zcu.declPtr(decl_index);
const gop = try self.decl_table.getOrPut(gpa, decl_index);
errdefer _ = self.decl_table.pop();
@@ -342,7 +343,7 @@ pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void {
var object: codegen.Object = .{
.dg = .{
.gpa = gpa,
- .zcu = zcu,
+ .pt = pt,
.mod = file_scope.mod,
.error_msg = null,
.pass = .{ .decl = decl_index },
@@ -390,8 +391,8 @@ pub fn updateDeclLineNumber(self: *C, zcu: *Zcu, decl_index: InternPool.DeclInde
_ = decl_index;
}
-pub fn flush(self: *C, arena: Allocator, prog_node: std.Progress.Node) !void {
- return self.flushModule(arena, prog_node);
+pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
+ return self.flushModule(arena, tid, prog_node);
}
fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) {
@@ -409,7 +410,7 @@ fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) {
return defines;
}
-pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !void {
+pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
_ = arena; // Has the same lifetime as the call to Compilation.update.
const tracy = trace(@src());
@@ -421,11 +422,12 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !vo
const comp = self.base.comp;
const gpa = comp.gpa;
const zcu = self.base.comp.module.?;
+ const pt: Zcu.PerThread = .{ .zcu = zcu, .tid = tid };
{
var i: usize = 0;
while (i < self.anon_decls.count()) : (i += 1) {
- try updateAnonDecl(self, zcu, i);
+ try updateAnonDecl(self, pt, i);
}
}
@@ -463,7 +465,7 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !vo
self.lazy_fwd_decl_buf.clearRetainingCapacity();
self.lazy_code_buf.clearRetainingCapacity();
try f.lazy_ctype_pool.init(gpa);
- try self.flushErrDecls(zcu, &f.lazy_ctype_pool);
+ try self.flushErrDecls(pt, &f.lazy_ctype_pool);
// Unlike other backends, the .c code we are emitting has order-dependent decls.
// `CType`s, forward decls, and non-functions first.
@@ -483,7 +485,7 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !vo
}
for (self.anon_decls.keys(), self.anon_decls.values()) |value, *decl_block| try self.flushDeclBlock(
- zcu,
+ pt,
zcu.root_mod,
&f,
decl_block,
@@ -497,7 +499,7 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !vo
const extern_name = if (decl.isExtern(zcu)) decl.name.toOptional() else .none;
const mod = zcu.namespacePtr(decl.src_namespace).fileScope(zcu).mod;
try self.flushDeclBlock(
- zcu,
+ pt,
mod,
&f,
decl_block,
@@ -670,7 +672,7 @@ fn flushCTypes(
}
}
-fn flushErrDecls(self: *C, zcu: *Zcu, ctype_pool: *codegen.CType.Pool) FlushDeclError!void {
+fn flushErrDecls(self: *C, pt: Zcu.PerThread, ctype_pool: *codegen.CType.Pool) FlushDeclError!void {
const gpa = self.base.comp.gpa;
const fwd_decl = &self.lazy_fwd_decl_buf;
@@ -679,8 +681,8 @@ fn flushErrDecls(self: *C, zcu: *Zcu, ctype_pool: *codegen.CType.Pool) FlushDecl
var object = codegen.Object{
.dg = .{
.gpa = gpa,
- .zcu = zcu,
- .mod = zcu.root_mod,
+ .pt = pt,
+ .mod = pt.zcu.root_mod,
.error_msg = null,
.pass = .flush,
.is_naked_fn = false,
@@ -712,7 +714,7 @@ fn flushErrDecls(self: *C, zcu: *Zcu, ctype_pool: *codegen.CType.Pool) FlushDecl
fn flushLazyFn(
self: *C,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
mod: *Module,
ctype_pool: *codegen.CType.Pool,
lazy_ctype_pool: *const codegen.CType.Pool,
@@ -726,7 +728,7 @@ fn flushLazyFn(
var object = codegen.Object{
.dg = .{
.gpa = gpa,
- .zcu = zcu,
+ .pt = pt,
.mod = mod,
.error_msg = null,
.pass = .flush,
@@ -761,7 +763,7 @@ fn flushLazyFn(
fn flushLazyFns(
self: *C,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
mod: *Module,
f: *Flush,
lazy_ctype_pool: *const codegen.CType.Pool,
@@ -775,13 +777,13 @@ fn flushLazyFns(
const gop = f.lazy_fns.getOrPutAssumeCapacity(entry.key_ptr.*);
if (gop.found_existing) continue;
gop.value_ptr.* = {};
- try self.flushLazyFn(zcu, mod, &f.lazy_ctype_pool, lazy_ctype_pool, entry);
+ try self.flushLazyFn(pt, mod, &f.lazy_ctype_pool, lazy_ctype_pool, entry);
}
}
fn flushDeclBlock(
self: *C,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
mod: *Module,
f: *Flush,
decl_block: *const DeclBlock,
@@ -790,7 +792,7 @@ fn flushDeclBlock(
extern_name: InternPool.OptionalNullTerminatedString,
) FlushDeclError!void {
const gpa = self.base.comp.gpa;
- try self.flushLazyFns(zcu, mod, f, &decl_block.ctype_pool, decl_block.lazy_fns);
+ try self.flushLazyFns(pt, mod, f, &decl_block.ctype_pool, decl_block.lazy_fns);
try f.all_buffers.ensureUnusedCapacity(gpa, 1);
// avoid emitting extern decls that are already exported
if (extern_name.unwrap()) |name| if (export_names.contains(name)) return;
@@ -845,11 +847,12 @@ pub fn flushEmitH(zcu: *Zcu) !void {
pub fn updateExports(
self: *C,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
exported: Zcu.Exported,
export_indices: []const u32,
) !void {
- const gpa = self.base.comp.gpa;
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
const mod, const pass: codegen.DeclGen.Pass, const decl_block, const exported_block = switch (exported) {
.decl_index => |decl_index| .{
zcu.namespacePtr(zcu.declPtr(decl_index).src_namespace).fileScope(zcu).mod,
@@ -869,7 +872,7 @@ pub fn updateExports(
fwd_decl.clearRetainingCapacity();
var dg: codegen.DeclGen = .{
.gpa = gpa,
- .zcu = zcu,
+ .pt = pt,
.mod = mod,
.error_msg = null,
.pass = pass,
src/link/Coff.zig
@@ -1120,16 +1120,17 @@ fn freeAtom(self: *Coff, atom_index: Atom.Index) void {
self.getAtomPtr(atom_index).sym_index = 0;
}
-pub fn updateFunc(self: *Coff, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(self: *Coff, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .coff) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (self.llvm_object) |llvm_object| {
- return llvm_object.updateFunc(mod, func_index, air, liveness);
+ return llvm_object.updateFunc(pt, func_index, air, liveness);
}
const tracy = trace(@src());
defer tracy.end();
+ const mod = pt.zcu;
const func = mod.funcInfo(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
@@ -1144,6 +1145,7 @@ pub fn updateFunc(self: *Coff, mod: *Module, func_index: InternPool.Index, air:
const res = try codegen.generateFunction(
&self.base,
+ pt,
decl.navSrcLoc(mod),
func_index,
air,
@@ -1160,14 +1162,14 @@ pub fn updateFunc(self: *Coff, mod: *Module, func_index: InternPool.Index, air:
},
};
- try self.updateDeclCode(decl_index, code, .FUNCTION);
+ try self.updateDeclCode(pt, decl_index, code, .FUNCTION);
// Exports will be updated by `Zcu.processExports` after the update.
}
-pub fn lowerUnnamedConst(self: *Coff, val: Value, decl_index: InternPool.DeclIndex) !u32 {
- const gpa = self.base.comp.gpa;
- const mod = self.base.comp.module.?;
+pub fn lowerUnnamedConst(self: *Coff, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) !u32 {
+ const mod = pt.zcu;
+ const gpa = mod.gpa;
const decl = mod.declPtr(decl_index);
const gop = try self.unnamed_const_atoms.getOrPut(gpa, decl_index);
if (!gop.found_existing) {
@@ -1179,7 +1181,7 @@ pub fn lowerUnnamedConst(self: *Coff, val: Value, decl_index: InternPool.DeclInd
const sym_name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index });
defer gpa.free(sym_name);
const ty = val.typeOf(mod);
- const atom_index = switch (try self.lowerConst(sym_name, val, ty.abiAlignment(mod), self.rdata_section_index.?, decl.navSrcLoc(mod))) {
+ const atom_index = switch (try self.lowerConst(pt, sym_name, val, ty.abiAlignment(pt), self.rdata_section_index.?, decl.navSrcLoc(mod))) {
.ok => |atom_index| atom_index,
.fail => |em| {
decl.analysis = .codegen_failure;
@@ -1197,7 +1199,15 @@ const LowerConstResult = union(enum) {
fail: *Module.ErrorMsg,
};
-fn lowerConst(self: *Coff, name: []const u8, val: Value, required_alignment: InternPool.Alignment, sect_id: u16, src_loc: Module.LazySrcLoc) !LowerConstResult {
+fn lowerConst(
+ self: *Coff,
+ pt: Zcu.PerThread,
+ name: []const u8,
+ val: Value,
+ required_alignment: InternPool.Alignment,
+ sect_id: u16,
+ src_loc: Module.LazySrcLoc,
+) !LowerConstResult {
const gpa = self.base.comp.gpa;
var code_buffer = std.ArrayList(u8).init(gpa);
@@ -1208,7 +1218,7 @@ fn lowerConst(self: *Coff, name: []const u8, val: Value, required_alignment: Int
try self.setSymbolName(sym, name);
sym.section_number = @as(coff.SectionNumber, @enumFromInt(sect_id + 1));
- const res = try codegen.generateSymbol(&self.base, src_loc, val, &code_buffer, .none, .{
+ const res = try codegen.generateSymbol(&self.base, pt, src_loc, val, &code_buffer, .none, .{
.parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?,
});
const code = switch (res) {
@@ -1235,13 +1245,14 @@ fn lowerConst(self: *Coff, name: []const u8, val: Value, required_alignment: Int
pub fn updateDecl(
self: *Coff,
- mod: *Module,
+ pt: Zcu.PerThread,
decl_index: InternPool.DeclIndex,
) link.File.UpdateDeclError!void {
+ const mod = pt.zcu;
if (build_options.skip_non_native and builtin.object_format != .coff) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
- if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index);
+ if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(pt, decl_index);
const tracy = trace(@src());
defer tracy.end();
@@ -1270,7 +1281,7 @@ pub fn updateDecl(
defer code_buffer.deinit();
const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val;
- const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .none, .{
+ const res = try codegen.generateSymbol(&self.base, pt, decl.navSrcLoc(mod), decl_val, &code_buffer, .none, .{
.parent_atom_index = atom.getSymbolIndex().?,
});
const code = switch (res) {
@@ -1282,19 +1293,20 @@ pub fn updateDecl(
},
};
- try self.updateDeclCode(decl_index, code, .NULL);
+ try self.updateDeclCode(pt, decl_index, code, .NULL);
// Exports will be updated by `Zcu.processExports` after the update.
}
fn updateLazySymbolAtom(
self: *Coff,
+ pt: Zcu.PerThread,
sym: link.File.LazySymbol,
atom_index: Atom.Index,
section_index: u16,
) !void {
- const gpa = self.base.comp.gpa;
- const mod = self.base.comp.module.?;
+ const mod = pt.zcu;
+ const gpa = mod.gpa;
var required_alignment: InternPool.Alignment = .none;
var code_buffer = std.ArrayList(u8).init(gpa);
@@ -1302,7 +1314,7 @@ fn updateLazySymbolAtom(
const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{
@tagName(sym.kind),
- sym.ty.fmt(mod),
+ sym.ty.fmt(pt),
});
defer gpa.free(name);
@@ -1312,6 +1324,7 @@ fn updateLazySymbolAtom(
const src = sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded;
const res = try codegen.generateLazySymbol(
&self.base,
+ pt,
src,
sym,
&required_alignment,
@@ -1346,7 +1359,7 @@ fn updateLazySymbolAtom(
try self.writeAtom(atom_index, code);
}
-pub fn getOrCreateAtomForLazySymbol(self: *Coff, sym: link.File.LazySymbol) !Atom.Index {
+pub fn getOrCreateAtomForLazySymbol(self: *Coff, pt: Zcu.PerThread, sym: link.File.LazySymbol) !Atom.Index {
const gpa = self.base.comp.gpa;
const mod = self.base.comp.module.?;
const gop = try self.lazy_syms.getOrPut(gpa, sym.getDecl(mod));
@@ -1364,7 +1377,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *Coff, sym: link.File.LazySymbol) !Ato
metadata.state.* = .pending_flush;
const atom = metadata.atom.*;
// anyerror needs to be deferred until flushModule
- if (sym.getDecl(mod) != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) {
+ if (sym.getDecl(mod) != .none) try self.updateLazySymbolAtom(pt, sym, atom, switch (sym.kind) {
.code => self.text_section_index.?,
.const_data => self.rdata_section_index.?,
});
@@ -1410,14 +1423,14 @@ fn getDeclOutputSection(self: *Coff, decl_index: InternPool.DeclIndex) u16 {
return index;
}
-fn updateDeclCode(self: *Coff, decl_index: InternPool.DeclIndex, code: []u8, complex_type: coff.ComplexType) !void {
- const mod = self.base.comp.module.?;
+fn updateDeclCode(self: *Coff, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, code: []u8, complex_type: coff.ComplexType) !void {
+ const mod = pt.zcu;
const decl = mod.declPtr(decl_index);
const decl_name = try decl.fullyQualifiedName(mod);
log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl });
- const required_alignment: u32 = @intCast(decl.getAlignment(mod).toByteUnits() orelse 0);
+ const required_alignment: u32 = @intCast(decl.getAlignment(pt).toByteUnits() orelse 0);
const decl_metadata = self.decls.get(decl_index).?;
const atom_index = decl_metadata.atom;
@@ -1496,7 +1509,7 @@ pub fn freeDecl(self: *Coff, decl_index: InternPool.DeclIndex) void {
pub fn updateExports(
self: *Coff,
- mod: *Module,
+ pt: Zcu.PerThread,
exported: Module.Exported,
export_indices: []const u32,
) link.File.UpdateExportsError!void {
@@ -1504,6 +1517,7 @@ pub fn updateExports(
@panic("Attempted to compile for object format that was disabled by build configuration");
}
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const comp = self.base.comp;
const target = comp.root_mod.resolved_target.result;
@@ -1542,7 +1556,7 @@ pub fn updateExports(
}
}
- if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, export_indices);
+ if (self.llvm_object) |llvm_object| return llvm_object.updateExports(pt, exported, export_indices);
const gpa = comp.gpa;
@@ -1553,7 +1567,7 @@ pub fn updateExports(
},
.value => |value| self.anon_decls.getPtr(value) orelse blk: {
const first_exp = mod.all_exports.items[export_indices[0]];
- const res = try self.lowerAnonDecl(value, .none, first_exp.src);
+ const res = try self.lowerAnonDecl(pt, value, .none, first_exp.src);
switch (res) {
.ok => {},
.fail => |em| {
@@ -1696,19 +1710,19 @@ fn resolveGlobalSymbol(self: *Coff, current: SymbolWithLoc) !void {
gop.value_ptr.* = current;
}
-pub fn flush(self: *Coff, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
+pub fn flush(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
const comp = self.base.comp;
const use_lld = build_options.have_llvm and comp.config.use_lld;
if (use_lld) {
- return lld.linkWithLLD(self, arena, prog_node);
+ return lld.linkWithLLD(self, arena, tid, prog_node);
}
switch (comp.config.output_mode) {
- .Exe, .Obj => return self.flushModule(arena, prog_node),
+ .Exe, .Obj => return self.flushModule(arena, tid, prog_node),
.Lib => return error.TODOImplementWritingLibFiles,
}
}
-pub fn flushModule(self: *Coff, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
+pub fn flushModule(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -1723,13 +1737,17 @@ pub fn flushModule(self: *Coff, arena: Allocator, prog_node: std.Progress.Node)
const sub_prog_node = prog_node.start("COFF Flush", 0);
defer sub_prog_node.end();
- const module = comp.module orelse return error.LinkingWithoutZigSourceUnimplemented;
+ const pt: Zcu.PerThread = .{
+ .zcu = comp.module orelse return error.LinkingWithoutZigSourceUnimplemented,
+ .tid = tid,
+ };
if (self.lazy_syms.getPtr(.none)) |metadata| {
// Most lazy symbols can be updated on first use, but
// anyerror needs to wait for everything to be flushed.
if (metadata.text_state != .unused) self.updateLazySymbolAtom(
- link.File.LazySymbol.initDecl(.code, null, module),
+ pt,
+ link.File.LazySymbol.initDecl(.code, null, pt.zcu),
metadata.text_atom,
self.text_section_index.?,
) catch |err| return switch (err) {
@@ -1737,7 +1755,8 @@ pub fn flushModule(self: *Coff, arena: Allocator, prog_node: std.Progress.Node)
else => |e| e,
};
if (metadata.rdata_state != .unused) self.updateLazySymbolAtom(
- link.File.LazySymbol.initDecl(.const_data, null, module),
+ pt,
+ link.File.LazySymbol.initDecl(.const_data, null, pt.zcu),
metadata.rdata_atom,
self.rdata_section_index.?,
) catch |err| return switch (err) {
@@ -1858,6 +1877,7 @@ pub fn getDeclVAddr(self: *Coff, decl_index: InternPool.DeclIndex, reloc_info: l
pub fn lowerAnonDecl(
self: *Coff,
+ pt: Zcu.PerThread,
decl_val: InternPool.Index,
explicit_alignment: InternPool.Alignment,
src_loc: Module.LazySrcLoc,
@@ -1866,7 +1886,7 @@ pub fn lowerAnonDecl(
const mod = self.base.comp.module.?;
const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val));
const decl_alignment = switch (explicit_alignment) {
- .none => ty.abiAlignment(mod),
+ .none => ty.abiAlignment(pt),
else => explicit_alignment,
};
if (self.anon_decls.get(decl_val)) |metadata| {
@@ -1881,6 +1901,7 @@ pub fn lowerAnonDecl(
@intFromEnum(decl_val),
}) catch unreachable;
const res = self.lowerConst(
+ pt,
name,
val,
decl_alignment,
src/link/Dwarf.zig
@@ -31,7 +31,7 @@ strtab: StringTable = .{},
/// They will end up in the DWARF debug_line header as two lists:
/// * []include_directory
/// * []file_names
-di_files: std.AutoArrayHashMapUnmanaged(*const Module.File, void) = .{},
+di_files: std.AutoArrayHashMapUnmanaged(*const Zcu.File, void) = .{},
global_abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation) = .{},
@@ -67,7 +67,7 @@ const DbgLineHeader = struct {
/// Decl's inner Atom is assigned an offset within the DWARF section.
pub const DeclState = struct {
dwarf: *Dwarf,
- mod: *Module,
+ pt: Zcu.PerThread,
di_atom_decls: *const AtomTable,
dbg_line_func: InternPool.Index,
dbg_line: std.ArrayList(u8),
@@ -113,7 +113,7 @@ pub const DeclState = struct {
.type = ty,
.offset = undefined,
});
- log.debug("%{d}: {}", .{ sym_index, ty.fmt(self.mod) });
+ log.debug("%{d}: {}", .{ sym_index, ty.fmt(self.pt) });
try self.abbrev_resolver.putNoClobber(gpa, ty.toIntern(), sym_index);
break :blk sym_index;
};
@@ -128,16 +128,17 @@ pub const DeclState = struct {
fn addDbgInfoType(
self: *DeclState,
- mod: *Module,
+ pt: Zcu.PerThread,
atom_index: Atom.Index,
ty: Type,
) error{OutOfMemory}!void {
+ const zcu = pt.zcu;
const dbg_info_buffer = &self.dbg_info;
- const target = mod.getTarget();
+ const target = zcu.getTarget();
const target_endian = target.cpu.arch.endian();
- const ip = &mod.intern_pool;
+ const ip = &zcu.intern_pool;
- switch (ty.zigTypeTag(mod)) {
+ switch (ty.zigTypeTag(zcu)) {
.NoReturn => unreachable,
.Void => {
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.zero_bit_type));
@@ -148,12 +149,12 @@ pub const DeclState = struct {
// DW.AT.encoding, DW.FORM.data1
dbg_info_buffer.appendAssumeCapacity(DW.ATE.boolean);
// DW.AT.byte_size, DW.FORM.udata
- try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(mod));
+ try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt));
// DW.AT.name, DW.FORM.string
- try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
+ try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)});
},
.Int => {
- const info = ty.intInfo(mod);
+ const info = ty.intInfo(zcu);
try dbg_info_buffer.ensureUnusedCapacity(12);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.base_type));
// DW.AT.encoding, DW.FORM.data1
@@ -162,30 +163,30 @@ pub const DeclState = struct {
.unsigned => DW.ATE.unsigned,
});
// DW.AT.byte_size, DW.FORM.udata
- try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(mod));
+ try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt));
// DW.AT.name, DW.FORM.string
- try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
+ try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)});
},
.Optional => {
- if (ty.isPtrLikeOptional(mod)) {
+ if (ty.isPtrLikeOptional(zcu)) {
try dbg_info_buffer.ensureUnusedCapacity(12);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.base_type));
// DW.AT.encoding, DW.FORM.data1
dbg_info_buffer.appendAssumeCapacity(DW.ATE.address);
// DW.AT.byte_size, DW.FORM.udata
- try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(mod));
+ try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt));
// DW.AT.name, DW.FORM.string
- try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
+ try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)});
} else {
// Non-pointer optionals are structs: struct { .maybe = *, .val = * }
- const payload_ty = ty.optionalChild(mod);
+ const payload_ty = ty.optionalChild(zcu);
// DW.AT.structure_type
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_type));
// DW.AT.byte_size, DW.FORM.udata
- const abi_size = ty.abiSize(mod);
+ const abi_size = ty.abiSize(pt);
try leb128.writeUleb128(dbg_info_buffer.writer(), abi_size);
// DW.AT.name, DW.FORM.string
- try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
+ try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)});
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(21);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_member));
@@ -208,14 +209,14 @@ pub const DeclState = struct {
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4);
try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(index));
// DW.AT.data_member_location, DW.FORM.udata
- const offset = abi_size - payload_ty.abiSize(mod);
+ const offset = abi_size - payload_ty.abiSize(pt);
try leb128.writeUleb128(dbg_info_buffer.writer(), offset);
// DW.AT.structure_type delimit children
try dbg_info_buffer.append(0);
}
},
.Pointer => {
- if (ty.isSlice(mod)) {
+ if (ty.isSlice(zcu)) {
// Slices are structs: struct { .ptr = *, .len = N }
const ptr_bits = target.ptrBitWidth();
const ptr_bytes: u8 = @intCast(@divExact(ptr_bits, 8));
@@ -223,9 +224,9 @@ pub const DeclState = struct {
try dbg_info_buffer.ensureUnusedCapacity(2);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_type));
// DW.AT.byte_size, DW.FORM.udata
- try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(mod));
+ try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt));
// DW.AT.name, DW.FORM.string
- try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
+ try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)});
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(21);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_member));
@@ -235,7 +236,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4);
- const ptr_ty = ty.slicePtrFieldType(mod);
+ const ptr_ty = ty.slicePtrFieldType(zcu);
try self.addTypeRelocGlobal(atom_index, ptr_ty, @intCast(index));
// DW.AT.data_member_location, DW.FORM.udata
dbg_info_buffer.appendAssumeCapacity(0);
@@ -258,19 +259,19 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4);
- try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(index));
+ try self.addTypeRelocGlobal(atom_index, ty.childType(zcu), @intCast(index));
}
},
.Array => {
// DW.AT.array_type
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.array_type));
// DW.AT.name, DW.FORM.string
- try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
+ try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)});
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.ensureUnusedCapacity(9);
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4);
- try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(index));
+ try self.addTypeRelocGlobal(atom_index, ty.childType(zcu), @intCast(index));
// DW.AT.subrange_type
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.array_dim));
// DW.AT.type, DW.FORM.ref4
@@ -278,7 +279,7 @@ pub const DeclState = struct {
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4);
try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(index));
// DW.AT.count, DW.FORM.udata
- const len = ty.arrayLenIncludingSentinel(mod);
+ const len = ty.arrayLenIncludingSentinel(pt.zcu);
try leb128.writeUleb128(dbg_info_buffer.writer(), len);
// DW.AT.array_type delimit children
try dbg_info_buffer.append(0);
@@ -287,13 +288,13 @@ pub const DeclState = struct {
// DW.AT.structure_type
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_type));
// DW.AT.byte_size, DW.FORM.udata
- try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(mod));
+ try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt));
blk: {
switch (ip.indexToKey(ty.ip_index)) {
.anon_struct_type => |fields| {
// DW.AT.name, DW.FORM.string
- try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
+ try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)});
for (fields.types.get(ip), 0..) |field_ty, field_index| {
// DW.AT.member
@@ -305,14 +306,14 @@ pub const DeclState = struct {
try dbg_info_buffer.appendNTimes(0, 4);
try self.addTypeRelocGlobal(atom_index, Type.fromInterned(field_ty), @intCast(index));
// DW.AT.data_member_location, DW.FORM.udata
- const field_off = ty.structFieldOffset(field_index, mod);
+ const field_off = ty.structFieldOffset(field_index, pt);
try leb128.writeUleb128(dbg_info_buffer.writer(), field_off);
}
},
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
// DW.AT.name, DW.FORM.string
- try ty.print(dbg_info_buffer.writer(), mod);
+ try ty.print(dbg_info_buffer.writer(), pt);
try dbg_info_buffer.append(0);
if (struct_type.layout == .@"packed") {
@@ -322,7 +323,7 @@ pub const DeclState = struct {
if (struct_type.isTuple(ip)) {
for (struct_type.field_types.get(ip), struct_type.offsets.get(ip), 0..) |field_ty, field_off, field_index| {
- if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
// DW.AT.member
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_member));
// DW.AT.name, DW.FORM.string
@@ -340,7 +341,7 @@ pub const DeclState = struct {
struct_type.field_types.get(ip),
struct_type.offsets.get(ip),
) |field_name, field_ty, field_off| {
- if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
const field_name_slice = field_name.toSlice(ip);
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(field_name_slice.len + 2);
@@ -367,9 +368,9 @@ pub const DeclState = struct {
// DW.AT.enumeration_type
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.enum_type));
// DW.AT.byte_size, DW.FORM.udata
- try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(mod));
+ try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt));
// DW.AT.name, DW.FORM.string
- try ty.print(dbg_info_buffer.writer(), mod);
+ try ty.print(dbg_info_buffer.writer(), pt);
try dbg_info_buffer.append(0);
const enum_type = ip.loadEnumType(ty.ip_index);
@@ -386,8 +387,8 @@ pub const DeclState = struct {
const value = enum_type.values.get(ip)[field_i];
// TODO do not assume a 64bit enum value - could be bigger.
// See https://github.com/ziglang/zig/issues/645
- const field_int_val = try Value.fromInterned(value).intFromEnum(ty, mod);
- break :value @bitCast(field_int_val.toSignedInt(mod));
+ const field_int_val = try Value.fromInterned(value).intFromEnum(ty, pt);
+ break :value @bitCast(field_int_val.toSignedInt(pt));
};
mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), value, target_endian);
}
@@ -396,8 +397,8 @@ pub const DeclState = struct {
try dbg_info_buffer.append(0);
},
.Union => {
- const union_obj = mod.typeToUnion(ty).?;
- const layout = mod.getUnionLayout(union_obj);
+ const union_obj = zcu.typeToUnion(ty).?;
+ const layout = pt.getUnionLayout(union_obj);
const payload_offset = if (layout.tag_align.compare(.gte, layout.payload_align)) layout.tag_size else 0;
const tag_offset = if (layout.tag_align.compare(.gte, layout.payload_align)) 0 else layout.payload_size;
// TODO this is temporary to match current state of unions in Zig - we don't yet have
@@ -410,7 +411,7 @@ pub const DeclState = struct {
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeUleb128(dbg_info_buffer.writer(), layout.abi_size);
// DW.AT.name, DW.FORM.string
- try ty.print(dbg_info_buffer.writer(), mod);
+ try ty.print(dbg_info_buffer.writer(), pt);
try dbg_info_buffer.append(0);
// DW.AT.member
@@ -435,12 +436,12 @@ pub const DeclState = struct {
if (is_tagged) {
try dbg_info_buffer.writer().print("AnonUnion\x00", .{});
} else {
- try ty.print(dbg_info_buffer.writer(), mod);
+ try ty.print(dbg_info_buffer.writer(), pt);
try dbg_info_buffer.append(0);
}
for (union_obj.field_types.get(ip), union_obj.loadTagType(ip).names.get(ip)) |field_ty, field_name| {
- if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
const field_name_slice = field_name.toSlice(ip);
// DW.AT.member
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_member));
@@ -474,25 +475,25 @@ pub const DeclState = struct {
try dbg_info_buffer.append(0);
}
},
- .ErrorSet => try addDbgInfoErrorSet(mod, ty, target, &self.dbg_info),
+ .ErrorSet => try addDbgInfoErrorSet(pt, ty, target, &self.dbg_info),
.ErrorUnion => {
- const error_ty = ty.errorUnionSet(mod);
- const payload_ty = ty.errorUnionPayload(mod);
- const payload_align = if (payload_ty.isNoReturn(mod)) .none else payload_ty.abiAlignment(mod);
- const error_align = Type.anyerror.abiAlignment(mod);
- const abi_size = ty.abiSize(mod);
- const payload_off = if (error_align.compare(.gte, payload_align)) Type.anyerror.abiSize(mod) else 0;
- const error_off = if (error_align.compare(.gte, payload_align)) 0 else payload_ty.abiSize(mod);
+ const error_ty = ty.errorUnionSet(zcu);
+ const payload_ty = ty.errorUnionPayload(zcu);
+ const payload_align = if (payload_ty.isNoReturn(zcu)) .none else payload_ty.abiAlignment(pt);
+ const error_align = Type.anyerror.abiAlignment(pt);
+ const abi_size = ty.abiSize(pt);
+ const payload_off = if (error_align.compare(.gte, payload_align)) Type.anyerror.abiSize(pt) else 0;
+ const error_off = if (error_align.compare(.gte, payload_align)) 0 else payload_ty.abiSize(pt);
// DW.AT.structure_type
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_type));
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeUleb128(dbg_info_buffer.writer(), abi_size);
// DW.AT.name, DW.FORM.string
- try ty.print(dbg_info_buffer.writer(), mod);
+ try ty.print(dbg_info_buffer.writer(), pt);
try dbg_info_buffer.append(0);
- if (!payload_ty.isNoReturn(mod)) {
+ if (!payload_ty.isNoReturn(zcu)) {
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(11);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_member));
@@ -526,7 +527,7 @@ pub const DeclState = struct {
try dbg_info_buffer.append(0);
},
else => {
- log.debug("TODO implement .debug_info for type '{}'", .{ty.fmt(self.mod)});
+ log.debug("TODO implement .debug_info for type '{}'", .{ty.fmt(pt)});
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.zero_bit_type));
},
}
@@ -555,6 +556,7 @@ pub const DeclState = struct {
owner_decl: InternPool.DeclIndex,
loc: DbgInfoLoc,
) error{OutOfMemory}!void {
+ const pt = self.pt;
const dbg_info = &self.dbg_info;
const atom_index = self.di_atom_decls.get(owner_decl).?;
const name_with_null = name.ptr[0 .. name.len + 1];
@@ -580,9 +582,9 @@ pub const DeclState = struct {
}
},
.register_pair => |regs| {
- const reg_bits = self.mod.getTarget().ptrBitWidth();
+ const reg_bits = pt.zcu.getTarget().ptrBitWidth();
const reg_bytes: u8 = @intCast(@divExact(reg_bits, 8));
- const abi_size = ty.abiSize(self.mod);
+ const abi_size = ty.abiSize(pt);
try dbg_info.ensureUnusedCapacity(10);
dbg_info.appendAssumeCapacity(@intFromEnum(AbbrevCode.parameter));
// DW.AT.location, DW.FORM.exprloc
@@ -675,10 +677,10 @@ pub const DeclState = struct {
const name_with_null = name.ptr[0 .. name.len + 1];
try dbg_info.append(@intFromEnum(AbbrevCode.variable));
const gpa = self.dwarf.allocator;
- const mod = self.mod;
- const target = mod.getTarget();
+ const pt = self.pt;
+ const target = pt.zcu.getTarget();
const endian = target.cpu.arch.endian();
- const child_ty = if (is_ptr) ty.childType(mod) else ty;
+ const child_ty = if (is_ptr) ty.childType(pt.zcu) else ty;
switch (loc) {
.register => |reg| {
@@ -701,9 +703,9 @@ pub const DeclState = struct {
},
.register_pair => |regs| {
- const reg_bits = self.mod.getTarget().ptrBitWidth();
+ const reg_bits = pt.zcu.getTarget().ptrBitWidth();
const reg_bytes: u8 = @intCast(@divExact(reg_bits, 8));
- const abi_size = child_ty.abiSize(self.mod);
+ const abi_size = child_ty.abiSize(pt);
try dbg_info.ensureUnusedCapacity(9);
// DW.AT.location, DW.FORM.exprloc
var expr_len = std.io.countingWriter(std.io.null_writer);
@@ -829,9 +831,9 @@ pub const DeclState = struct {
const fixup = dbg_info.items.len;
dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc
1,
- if (child_ty.isSignedInt(mod)) DW.OP.consts else DW.OP.constu,
+ if (child_ty.isSignedInt(pt.zcu)) DW.OP.consts else DW.OP.constu,
});
- if (child_ty.isSignedInt(mod)) {
+ if (child_ty.isSignedInt(pt.zcu)) {
try leb128.writeIleb128(dbg_info.writer(), @as(i64, @bitCast(x)));
} else {
try leb128.writeUleb128(dbg_info.writer(), x);
@@ -844,7 +846,7 @@ pub const DeclState = struct {
// DW.AT.location, DW.FORM.exprloc
// uleb128(exprloc_len)
// DW.OP.implicit_value uleb128(len_of_bytes) bytes
- const abi_size: u32 = @intCast(child_ty.abiSize(mod));
+ const abi_size: u32 = @intCast(child_ty.abiSize(self.pt));
var implicit_value_len = std.ArrayList(u8).init(gpa);
defer implicit_value_len.deinit();
try leb128.writeUleb128(implicit_value_len.writer(), abi_size);
@@ -934,22 +936,23 @@ pub const DeclState = struct {
}
pub fn setInlineFunc(self: *DeclState, func: InternPool.Index) error{OutOfMemory}!void {
+ const zcu = self.pt.zcu;
if (self.dbg_line_func == func) return;
try self.dbg_line.ensureUnusedCapacity((1 + 4) + (1 + 5));
- const old_func_info = self.mod.funcInfo(self.dbg_line_func);
- const new_func_info = self.mod.funcInfo(func);
+ const old_func_info = zcu.funcInfo(self.dbg_line_func);
+ const new_func_info = zcu.funcInfo(func);
- const old_file = try self.dwarf.addDIFile(self.mod, old_func_info.owner_decl);
- const new_file = try self.dwarf.addDIFile(self.mod, new_func_info.owner_decl);
+ const old_file = try self.dwarf.addDIFile(zcu, old_func_info.owner_decl);
+ const new_file = try self.dwarf.addDIFile(zcu, new_func_info.owner_decl);
if (old_file != new_file) {
self.dbg_line.appendAssumeCapacity(DW.LNS.set_file);
leb128.writeUnsignedFixed(4, self.dbg_line.addManyAsArrayAssumeCapacity(4), new_file);
}
- const old_src_line: i33 = self.mod.declPtr(old_func_info.owner_decl).navSrcLine(self.mod);
- const new_src_line: i33 = self.mod.declPtr(new_func_info.owner_decl).navSrcLine(self.mod);
+ const old_src_line: i33 = zcu.declPtr(old_func_info.owner_decl).navSrcLine(zcu);
+ const new_src_line: i33 = zcu.declPtr(new_func_info.owner_decl).navSrcLine(zcu);
if (new_src_line != old_src_line) {
self.dbg_line.appendAssumeCapacity(DW.LNS.advance_line);
leb128.writeSignedFixed(5, self.dbg_line.addManyAsArrayAssumeCapacity(5), new_src_line - old_src_line);
@@ -1074,19 +1077,19 @@ pub fn deinit(self: *Dwarf) void {
/// Initializes Decl's state and its matching output buffers.
/// Call this before `commitDeclState`.
-pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclIndex) !DeclState {
+pub fn initDeclState(self: *Dwarf, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !DeclState {
const tracy = trace(@src());
defer tracy.end();
- const decl = mod.declPtr(decl_index);
- const decl_linkage_name = try decl.fullyQualifiedName(mod);
+ const decl = pt.zcu.declPtr(decl_index);
+ const decl_linkage_name = try decl.fullyQualifiedName(pt.zcu);
- log.debug("initDeclState {}{*}", .{ decl_linkage_name.fmt(&mod.intern_pool), decl });
+ log.debug("initDeclState {}{*}", .{ decl_linkage_name.fmt(&pt.zcu.intern_pool), decl });
const gpa = self.allocator;
var decl_state: DeclState = .{
.dwarf = self,
- .mod = mod,
+ .pt = pt,
.di_atom_decls = &self.di_atom_decls,
.dbg_line_func = undefined,
.dbg_line = std.ArrayList(u8).init(gpa),
@@ -1105,7 +1108,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclInde
assert(decl.has_tv);
- switch (decl.typeOf(mod).zigTypeTag(mod)) {
+ switch (decl.typeOf(pt.zcu).zigTypeTag(pt.zcu)) {
.Fn => {
_ = try self.getOrCreateAtomForDecl(.src_fn, decl_index);
@@ -1114,13 +1117,13 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclInde
try dbg_line_buffer.ensureTotalCapacity((3 + ptr_width_bytes) + (1 + 4) + (1 + 4) + (1 + 5) + 1);
decl_state.dbg_line_func = decl.val.toIntern();
- const func = decl.val.getFunction(mod).?;
+ const func = decl.val.getFunction(pt.zcu).?;
log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{
- decl.navSrcLine(mod),
+ decl.navSrcLine(pt.zcu),
func.lbrace_line,
func.rbrace_line,
});
- const line: u28 = @intCast(decl.navSrcLine(mod) + func.lbrace_line);
+ const line: u28 = @intCast(decl.navSrcLine(pt.zcu) + func.lbrace_line);
dbg_line_buffer.appendSliceAssumeCapacity(&.{
DW.LNS.extended_op,
@@ -1142,7 +1145,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclInde
assert(self.getRelocDbgFileIndex() == dbg_line_buffer.items.len);
// Once we support more than one source file, this will have the ability to be more
// than one possible value.
- const file_index = try self.addDIFile(mod, decl_index);
+ const file_index = try self.addDIFile(pt.zcu, decl_index);
leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), file_index);
dbg_line_buffer.appendAssumeCapacity(DW.LNS.set_column);
@@ -1153,13 +1156,13 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclInde
dbg_line_buffer.appendAssumeCapacity(DW.LNS.copy);
// .debug_info subprogram
- const decl_name_slice = decl.name.toSlice(&mod.intern_pool);
- const decl_linkage_name_slice = decl_linkage_name.toSlice(&mod.intern_pool);
+ const decl_name_slice = decl.name.toSlice(&pt.zcu.intern_pool);
+ const decl_linkage_name_slice = decl_linkage_name.toSlice(&pt.zcu.intern_pool);
try dbg_info_buffer.ensureUnusedCapacity(1 + ptr_width_bytes + 4 + 4 +
(decl_name_slice.len + 1) + (decl_linkage_name_slice.len + 1));
- const fn_ret_type = decl.typeOf(mod).fnReturnType(mod);
- const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(mod);
+ const fn_ret_type = decl.typeOf(pt.zcu).fnReturnType(pt.zcu);
+ const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(pt);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(
@as(AbbrevCode, if (fn_ret_has_bits) .subprogram else .subprogram_retvoid),
));
@@ -1191,7 +1194,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclInde
pub fn commitDeclState(
self: *Dwarf,
- zcu: *Module,
+ pt: Zcu.PerThread,
decl_index: InternPool.DeclIndex,
sym_addr: u64,
sym_size: u64,
@@ -1201,6 +1204,7 @@ pub fn commitDeclState(
defer tracy.end();
const gpa = self.allocator;
+ const zcu = pt.zcu;
const decl = zcu.declPtr(decl_index);
const ip = &zcu.intern_pool;
const namespace = zcu.namespacePtr(decl.src_namespace);
@@ -1432,7 +1436,7 @@ pub fn commitDeclState(
if (ip.isErrorSetType(ty.toIntern())) continue;
symbol.offset = @intCast(dbg_info_buffer.items.len);
- try decl_state.addDbgInfoType(zcu, di_atom_index, ty);
+ try decl_state.addDbgInfoType(pt, di_atom_index, ty);
}
}
@@ -1457,7 +1461,7 @@ pub fn commitDeclState(
reloc.offset,
value,
reloc_target,
- ty.fmt(zcu),
+ ty.fmt(pt),
});
mem.writeInt(
u32,
@@ -1691,7 +1695,7 @@ fn writeDeclDebugInfo(self: *Dwarf, atom_index: Atom.Index, dbg_info_buf: []cons
}
}
-pub fn updateDeclLineNumber(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclIndex) !void {
+pub fn updateDeclLineNumber(self: *Dwarf, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -1699,14 +1703,14 @@ pub fn updateDeclLineNumber(self: *Dwarf, mod: *Module, decl_index: InternPool.D
const atom = self.getAtom(.src_fn, atom_index);
if (atom.len == 0) return;
- const decl = mod.declPtr(decl_index);
- const func = decl.val.getFunction(mod).?;
+ const decl = zcu.declPtr(decl_index);
+ const func = decl.val.getFunction(zcu).?;
log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{
- decl.navSrcLine(mod),
+ decl.navSrcLine(zcu),
func.lbrace_line,
func.rbrace_line,
});
- const line: u28 = @intCast(decl.navSrcLine(mod) + func.lbrace_line);
+ const line: u28 = @intCast(decl.navSrcLine(zcu) + func.lbrace_line);
var data: [4]u8 = undefined;
leb128.writeUnsignedFixed(4, &data, line);
@@ -1969,7 +1973,7 @@ fn dbgInfoHeaderBytes(self: *Dwarf) usize {
return 120;
}
-pub fn writeDbgInfoHeader(self: *Dwarf, zcu: *Module, low_pc: u64, high_pc: u64) !void {
+pub fn writeDbgInfoHeader(self: *Dwarf, zcu: *Zcu, low_pc: u64, high_pc: u64) !void {
// If this value is null it means there is an error in the module;
// leave debug_info_header_dirty=true.
const first_dbg_info_off = self.getDebugInfoOff() orelse return;
@@ -2058,14 +2062,14 @@ pub fn writeDbgInfoHeader(self: *Dwarf, zcu: *Module, low_pc: u64, high_pc: u64)
}
}
-fn resolveCompilationDir(module: *Module, buffer: *[std.fs.max_path_bytes]u8) []const u8 {
+fn resolveCompilationDir(zcu: *Zcu, buffer: *[std.fs.max_path_bytes]u8) []const u8 {
// We fully resolve all paths at this point to avoid lack of source line info in stack
// traces or lack of debugging information which, if relative paths were used, would
// be very location dependent.
// TODO: the only concern I have with this is WASI as either host or target, should
// we leave the paths as relative then?
- const root_dir_path = module.root_mod.root.root_dir.path orelse ".";
- const sub_path = module.root_mod.root.sub_path;
+ const root_dir_path = zcu.root_mod.root.root_dir.path orelse ".";
+ const sub_path = zcu.root_mod.root.sub_path;
const realpath = if (std.fs.path.isAbsolute(root_dir_path)) r: {
@memcpy(buffer[0..root_dir_path.len], root_dir_path);
break :r root_dir_path;
@@ -2682,7 +2686,7 @@ fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
return actual_size +| (actual_size / ideal_factor);
}
-pub fn flushModule(self: *Dwarf, module: *Module) !void {
+pub fn flushModule(self: *Dwarf, pt: Zcu.PerThread) !void {
const comp = self.bin_file.comp;
const target = comp.root_mod.resolved_target.result;
@@ -2694,9 +2698,9 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
var dbg_info_buffer = std.ArrayList(u8).init(arena);
try addDbgInfoErrorSetNames(
- module,
+ pt,
Type.anyerror,
- module.global_error_set.keys(),
+ pt.zcu.global_error_set.keys(),
target,
&dbg_info_buffer,
);
@@ -2759,9 +2763,9 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
}
}
-fn addDIFile(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclIndex) !u28 {
- const decl = mod.declPtr(decl_index);
- const file_scope = decl.getFileScope(mod);
+fn addDIFile(self: *Dwarf, zcu: *Zcu, decl_index: InternPool.DeclIndex) !u28 {
+ const decl = zcu.declPtr(decl_index);
+ const file_scope = decl.getFileScope(zcu);
const gop = try self.di_files.getOrPut(self.allocator, file_scope);
if (!gop.found_existing) {
switch (self.bin_file.tag) {
@@ -2827,16 +2831,16 @@ fn genIncludeDirsAndFileNames(self: *Dwarf, arena: Allocator) !struct {
}
fn addDbgInfoErrorSet(
- mod: *Module,
+ pt: Zcu.PerThread,
ty: Type,
target: std.Target,
dbg_info_buffer: *std.ArrayList(u8),
) !void {
- return addDbgInfoErrorSetNames(mod, ty, ty.errorSetNames(mod).get(&mod.intern_pool), target, dbg_info_buffer);
+ return addDbgInfoErrorSetNames(pt, ty, ty.errorSetNames(pt.zcu).get(&pt.zcu.intern_pool), target, dbg_info_buffer);
}
fn addDbgInfoErrorSetNames(
- mod: *Module,
+ pt: Zcu.PerThread,
/// Used for printing the type name only.
ty: Type,
error_names: []const InternPool.NullTerminatedString,
@@ -2848,10 +2852,10 @@ fn addDbgInfoErrorSetNames(
// DW.AT.enumeration_type
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.enum_type));
// DW.AT.byte_size, DW.FORM.udata
- const abi_size = Type.anyerror.abiSize(mod);
+ const abi_size = Type.anyerror.abiSize(pt);
try leb128.writeUleb128(dbg_info_buffer.writer(), abi_size);
// DW.AT.name, DW.FORM.string
- try ty.print(dbg_info_buffer.writer(), mod);
+ try ty.print(dbg_info_buffer.writer(), pt);
try dbg_info_buffer.append(0);
// DW.AT.enumerator
@@ -2865,8 +2869,8 @@ fn addDbgInfoErrorSetNames(
mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), 0, target_endian);
for (error_names) |error_name| {
- const int = try mod.getErrorValue(error_name);
- const error_name_slice = error_name.toSlice(&mod.intern_pool);
+ const int = try pt.zcu.getErrorValue(error_name);
+ const error_name_slice = error_name.toSlice(&pt.zcu.intern_pool);
// DW.AT.enumerator
try dbg_info_buffer.ensureUnusedCapacity(error_name_slice.len + 2 + @sizeOf(u64));
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.enum_variant));
@@ -2965,8 +2969,6 @@ const LinkBlock = File.LinkBlock;
const LinkFn = File.LinkFn;
const LinkerLoad = @import("../codegen.zig").LinkerLoad;
const Zcu = @import("../Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
const InternPool = @import("../InternPool.zig");
const StringTable = @import("StringTable.zig");
const Type = @import("../Type.zig");
src/link/Elf.zig
@@ -550,11 +550,12 @@ pub fn getDeclVAddr(self: *Elf, decl_index: InternPool.DeclIndex, reloc_info: li
pub fn lowerAnonDecl(
self: *Elf,
+ pt: Zcu.PerThread,
decl_val: InternPool.Index,
explicit_alignment: InternPool.Alignment,
src_loc: Module.LazySrcLoc,
) !codegen.Result {
- return self.zigObjectPtr().?.lowerAnonDecl(self, decl_val, explicit_alignment, src_loc);
+ return self.zigObjectPtr().?.lowerAnonDecl(self, pt, decl_val, explicit_alignment, src_loc);
}
pub fn getAnonDeclVAddr(self: *Elf, decl_val: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 {
@@ -1064,15 +1065,15 @@ pub fn markDirty(self: *Elf, shdr_index: u32) void {
}
}
-pub fn flush(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
+pub fn flush(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
const use_lld = build_options.have_llvm and self.base.comp.config.use_lld;
if (use_lld) {
- return self.linkWithLLD(arena, prog_node);
+ return self.linkWithLLD(arena, tid, prog_node);
}
- try self.flushModule(arena, prog_node);
+ try self.flushModule(arena, tid, prog_node);
}
-pub fn flushModule(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
+pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -1103,7 +1104,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) l
// --verbose-link
if (comp.verbose_link) try self.dumpArgv(comp);
- if (self.zigObjectPtr()) |zig_object| try zig_object.flushModule(self);
+ if (self.zigObjectPtr()) |zig_object| try zig_object.flushModule(self, tid);
if (self.base.isStaticLib()) return relocatable.flushStaticLib(self, comp, module_obj_path);
if (self.base.isObject()) return relocatable.flushObject(self, comp, module_obj_path);
@@ -2146,7 +2147,7 @@ fn scanRelocs(self: *Elf) !void {
}
}
-fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) !void {
+fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -2159,7 +2160,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) !void
// If there is no Zig code to compile, then we should skip flushing the output file because it
// will not be part of the linker line anyway.
const module_obj_path: ?[]const u8 = if (comp.module != null) blk: {
- try self.flushModule(arena, prog_node);
+ try self.flushModule(arena, tid, prog_node);
if (fs.path.dirname(full_out_path)) |dirname| {
break :blk try fs.path.join(arena, &.{ dirname, self.base.zcu_object_sub_path.? });
@@ -2983,41 +2984,41 @@ pub fn freeDecl(self: *Elf, decl_index: InternPool.DeclIndex) void {
return self.zigObjectPtr().?.freeDecl(self, decl_index);
}
-pub fn updateFunc(self: *Elf, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(self: *Elf, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .elf) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
- if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness);
- return self.zigObjectPtr().?.updateFunc(self, mod, func_index, air, liveness);
+ if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(pt, func_index, air, liveness);
+ return self.zigObjectPtr().?.updateFunc(self, pt, func_index, air, liveness);
}
pub fn updateDecl(
self: *Elf,
- mod: *Module,
+ pt: Zcu.PerThread,
decl_index: InternPool.DeclIndex,
) link.File.UpdateDeclError!void {
if (build_options.skip_non_native and builtin.object_format != .elf) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
- if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index);
- return self.zigObjectPtr().?.updateDecl(self, mod, decl_index);
+ if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(pt, decl_index);
+ return self.zigObjectPtr().?.updateDecl(self, pt, decl_index);
}
-pub fn lowerUnnamedConst(self: *Elf, val: Value, decl_index: InternPool.DeclIndex) !u32 {
- return self.zigObjectPtr().?.lowerUnnamedConst(self, val, decl_index);
+pub fn lowerUnnamedConst(self: *Elf, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) !u32 {
+ return self.zigObjectPtr().?.lowerUnnamedConst(self, pt, val, decl_index);
}
pub fn updateExports(
self: *Elf,
- mod: *Module,
+ pt: Zcu.PerThread,
exported: Module.Exported,
export_indices: []const u32,
) link.File.UpdateExportsError!void {
if (build_options.skip_non_native and builtin.object_format != .elf) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
- if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, export_indices);
- return self.zigObjectPtr().?.updateExports(self, mod, exported, export_indices);
+ if (self.llvm_object) |llvm_object| return llvm_object.updateExports(pt, exported, export_indices);
+ return self.zigObjectPtr().?.updateExports(self, pt, exported, export_indices);
}
pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: InternPool.DeclIndex) !void {
src/link/MachO.zig
@@ -360,11 +360,11 @@ pub fn deinit(self: *MachO) void {
self.unwind_records.deinit(gpa);
}
-pub fn flush(self: *MachO, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
- try self.flushModule(arena, prog_node);
+pub fn flush(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
+ try self.flushModule(arena, tid, prog_node);
}
-pub fn flushModule(self: *MachO, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
+pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -391,7 +391,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, prog_node: std.Progress.Node)
// --verbose-link
if (comp.verbose_link) try self.dumpArgv(comp);
- if (self.getZigObject()) |zo| try zo.flushModule(self);
+ if (self.getZigObject()) |zo| try zo.flushModule(self, tid);
if (self.base.isStaticLib()) return relocatable.flushStaticLib(self, comp, module_obj_path);
if (self.base.isObject()) return relocatable.flushObject(self, comp, module_obj_path);
@@ -3178,24 +3178,24 @@ pub fn writeCodeSignature(self: *MachO, code_sig: *CodeSignature) !void {
try self.base.file.?.pwriteAll(buffer.items, offset);
}
-pub fn updateFunc(self: *MachO, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(self: *MachO, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
- if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness);
- return self.getZigObject().?.updateFunc(self, mod, func_index, air, liveness);
+ if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(pt, func_index, air, liveness);
+ return self.getZigObject().?.updateFunc(self, pt, func_index, air, liveness);
}
-pub fn lowerUnnamedConst(self: *MachO, val: Value, decl_index: InternPool.DeclIndex) !u32 {
- return self.getZigObject().?.lowerUnnamedConst(self, val, decl_index);
+pub fn lowerUnnamedConst(self: *MachO, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) !u32 {
+ return self.getZigObject().?.lowerUnnamedConst(self, pt, val, decl_index);
}
-pub fn updateDecl(self: *MachO, mod: *Module, decl_index: InternPool.DeclIndex) !void {
+pub fn updateDecl(self: *MachO, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void {
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
- if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index);
- return self.getZigObject().?.updateDecl(self, mod, decl_index);
+ if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(pt, decl_index);
+ return self.getZigObject().?.updateDecl(self, pt, decl_index);
}
pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl_index: InternPool.DeclIndex) !void {
@@ -3205,15 +3205,15 @@ pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl_index: InternPoo
pub fn updateExports(
self: *MachO,
- mod: *Module,
+ pt: Zcu.PerThread,
exported: Module.Exported,
export_indices: []const u32,
) link.File.UpdateExportsError!void {
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
- if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, export_indices);
- return self.getZigObject().?.updateExports(self, mod, exported, export_indices);
+ if (self.llvm_object) |llvm_object| return llvm_object.updateExports(pt, exported, export_indices);
+ return self.getZigObject().?.updateExports(self, pt, exported, export_indices);
}
pub fn deleteExport(
@@ -3237,11 +3237,12 @@ pub fn getDeclVAddr(self: *MachO, decl_index: InternPool.DeclIndex, reloc_info:
pub fn lowerAnonDecl(
self: *MachO,
+ pt: Zcu.PerThread,
decl_val: InternPool.Index,
explicit_alignment: InternPool.Alignment,
src_loc: Module.LazySrcLoc,
) !codegen.Result {
- return self.getZigObject().?.lowerAnonDecl(self, decl_val, explicit_alignment, src_loc);
+ return self.getZigObject().?.lowerAnonDecl(self, pt, decl_val, explicit_alignment, src_loc);
}
pub fn getAnonDeclVAddr(self: *MachO, decl_val: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 {
src/link/NvPtx.zig
@@ -13,8 +13,6 @@ const assert = std.debug.assert;
const log = std.log.scoped(.link);
const Zcu = @import("../Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
const InternPool = @import("../InternPool.zig");
const Compilation = @import("../Compilation.zig");
const link = @import("../link.zig");
@@ -84,35 +82,35 @@ pub fn deinit(self: *NvPtx) void {
self.llvm_object.deinit();
}
-pub fn updateFunc(self: *NvPtx, module: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
- try self.llvm_object.updateFunc(module, func_index, air, liveness);
+pub fn updateFunc(self: *NvPtx, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
+ try self.llvm_object.updateFunc(pt, func_index, air, liveness);
}
-pub fn updateDecl(self: *NvPtx, module: *Module, decl_index: InternPool.DeclIndex) !void {
- return self.llvm_object.updateDecl(module, decl_index);
+pub fn updateDecl(self: *NvPtx, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void {
+ return self.llvm_object.updateDecl(pt, decl_index);
}
pub fn updateExports(
self: *NvPtx,
- module: *Module,
- exported: Module.Exported,
+ pt: Zcu.PerThread,
+ exported: Zcu.Exported,
export_indices: []const u32,
) !void {
if (build_options.skip_non_native and builtin.object_format != .nvptx)
@panic("Attempted to compile for object format that was disabled by build configuration");
- return self.llvm_object.updateExports(module, exported, export_indices);
+ return self.llvm_object.updateExports(pt, exported, export_indices);
}
pub fn freeDecl(self: *NvPtx, decl_index: InternPool.DeclIndex) void {
return self.llvm_object.freeDecl(decl_index);
}
-pub fn flush(self: *NvPtx, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
- return self.flushModule(arena, prog_node);
+pub fn flush(self: *NvPtx, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
+ return self.flushModule(arena, tid, prog_node);
}
-pub fn flushModule(self: *NvPtx, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
+pub fn flushModule(self: *NvPtx, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
if (build_options.skip_non_native)
@panic("Attempted to compile for architecture that was disabled by build configuration");
@@ -121,5 +119,6 @@ pub fn flushModule(self: *NvPtx, arena: Allocator, prog_node: std.Progress.Node)
_ = arena;
_ = self;
_ = prog_node;
+ _ = tid;
@panic("TODO: rewrite the NvPtx.flushModule function");
}
src/link/Plan9.zig
@@ -4,8 +4,6 @@
const Plan9 = @This();
const link = @import("../link.zig");
const Zcu = @import("../Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
const InternPool = @import("../InternPool.zig");
const Compilation = @import("../Compilation.zig");
const aout = @import("Plan9/aout.zig");
@@ -56,7 +54,7 @@ path_arena: std.heap.ArenaAllocator,
/// of the function to know what file it came from.
/// If we group the decls by file, it makes it really easy to do this (put the symbol in the correct place)
fn_decl_table: std.AutoArrayHashMapUnmanaged(
- *Module.File,
+ *Zcu.File,
struct { sym_index: u32, functions: std.AutoArrayHashMapUnmanaged(InternPool.DeclIndex, FnDeclOutput) = .{} },
) = .{},
/// the code is modified when relocated, so that is why it is mutable
@@ -411,12 +409,13 @@ fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !voi
}
}
-pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .plan9) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
- const gpa = self.base.comp.gpa;
+ const mod = pt.zcu;
+ const gpa = mod.gpa;
const target = self.base.comp.root_mod.resolved_target.result;
const func = mod.funcInfo(func_index);
const decl_index = func.owner_decl;
@@ -439,6 +438,7 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air:
const res = try codegen.generateFunction(
&self.base,
+ pt,
decl.navSrcLoc(mod),
func_index,
air,
@@ -468,13 +468,13 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air:
return self.updateFinish(decl_index);
}
-pub fn lowerUnnamedConst(self: *Plan9, val: Value, decl_index: InternPool.DeclIndex) !u32 {
- const gpa = self.base.comp.gpa;
+pub fn lowerUnnamedConst(self: *Plan9, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) !u32 {
+ const mod = pt.zcu;
+ const gpa = mod.gpa;
_ = try self.seeDecl(decl_index);
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
- const mod = self.base.comp.module.?;
const decl = mod.declPtr(decl_index);
const gop = try self.unnamed_const_atoms.getOrPut(gpa, decl_index);
@@ -505,7 +505,7 @@ pub fn lowerUnnamedConst(self: *Plan9, val: Value, decl_index: InternPool.DeclIn
};
self.syms.items[info.sym_index.?] = sym;
- const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod), val, &code_buffer, .{
+ const res = try codegen.generateSymbol(&self.base, pt, decl.navSrcLoc(mod), val, &code_buffer, .{
.none = {},
}, .{
.parent_atom_index = new_atom_idx,
@@ -530,8 +530,9 @@ pub fn lowerUnnamedConst(self: *Plan9, val: Value, decl_index: InternPool.DeclIn
return new_atom_idx;
}
-pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: InternPool.DeclIndex) !void {
+pub fn updateDecl(self: *Plan9, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void {
const gpa = self.base.comp.gpa;
+ const mod = pt.zcu;
const decl = mod.declPtr(decl_index);
if (decl.isExtern(mod)) {
@@ -544,7 +545,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: InternPool.DeclIndex)
defer code_buffer.deinit();
const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val;
// TODO we need the symbol index for symbol in the table of locals for the containing atom
- const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .{ .none = {} }, .{
+ const res = try codegen.generateSymbol(&self.base, pt, decl.navSrcLoc(mod), decl_val, &code_buffer, .{ .none = {} }, .{
.parent_atom_index = @as(Atom.Index, @intCast(atom_idx)),
});
const code = switch (res) {
@@ -610,7 +611,7 @@ fn allocateGotIndex(self: *Plan9) usize {
}
}
-pub fn flush(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
+pub fn flush(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
const comp = self.base.comp;
const use_lld = build_options.have_llvm and comp.config.use_lld;
assert(!use_lld);
@@ -621,7 +622,7 @@ pub fn flush(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) link.
.Obj => return error.TODOImplementPlan9Objs,
.Lib => return error.TODOImplementWritingLibFiles,
}
- return self.flushModule(arena, prog_node);
+ return self.flushModule(arena, tid, prog_node);
}
pub fn changeLine(l: *std.ArrayList(u8), delta_line: i32) !void {
@@ -669,20 +670,20 @@ fn atomCount(self: *Plan9) usize {
return data_decl_count + fn_decl_count + unnamed_const_count + lazy_atom_count + extern_atom_count + anon_atom_count;
}
-pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
+pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
if (build_options.skip_non_native and builtin.object_format != .plan9) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
+ const tracy = trace(@src());
+ defer tracy.end();
+
_ = arena; // Has the same lifetime as the call to Compilation.update.
const comp = self.base.comp;
const gpa = comp.gpa;
const target = comp.root_mod.resolved_target.result;
- const tracy = trace(@src());
- defer tracy.end();
-
const sub_prog_node = prog_node.start("Flush Module", 0);
defer sub_prog_node.end();
@@ -690,21 +691,26 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node)
defer assert(self.hdr.entry != 0x0);
- const mod = self.base.comp.module orelse return error.LinkingWithoutZigSourceUnimplemented;
+ const pt: Zcu.PerThread = .{
+ .zcu = self.base.comp.module orelse return error.LinkingWithoutZigSourceUnimplemented,
+ .tid = tid,
+ };
// finish up the lazy syms
if (self.lazy_syms.getPtr(.none)) |metadata| {
// Most lazy symbols can be updated on first use, but
// anyerror needs to wait for everything to be flushed.
if (metadata.text_state != .unused) self.updateLazySymbolAtom(
- File.LazySymbol.initDecl(.code, null, mod),
+ pt,
+ File.LazySymbol.initDecl(.code, null, pt.zcu),
metadata.text_atom,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
else => |e| e,
};
if (metadata.rodata_state != .unused) self.updateLazySymbolAtom(
- File.LazySymbol.initDecl(.const_data, null, mod),
+ pt,
+ File.LazySymbol.initDecl(.const_data, null, pt.zcu),
metadata.rodata_atom,
) catch |err| return switch (err) {
error.CodegenFail => error.FlushFailure,
@@ -747,7 +753,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node)
var it = fentry.value_ptr.functions.iterator();
while (it.next()) |entry| {
const decl_index = entry.key_ptr.*;
- const decl = mod.declPtr(decl_index);
+ const decl = pt.zcu.declPtr(decl_index);
const atom = self.getAtomPtr(self.decls.get(decl_index).?.index);
const out = entry.value_ptr.*;
{
@@ -767,7 +773,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node)
const off = self.getAddr(text_i, .t);
text_i += out.code.len;
atom.offset = off;
- log.debug("write text decl {*} ({}), lines {d} to {d}.;__GOT+0x{x} vaddr: 0x{x}", .{ decl, decl.name.fmt(&mod.intern_pool), out.start_line + 1, out.end_line, atom.got_index.? * 8, off });
+ log.debug("write text decl {*} ({}), lines {d} to {d}.;__GOT+0x{x} vaddr: 0x{x}", .{ decl, decl.name.fmt(&pt.zcu.intern_pool), out.start_line + 1, out.end_line, atom.got_index.? * 8, off });
if (!self.sixtyfour_bit) {
mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), target.cpu.arch.endian());
} else {
@@ -775,7 +781,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node)
}
self.syms.items[atom.sym_index.?].value = off;
if (self.decl_exports.get(decl_index)) |export_indices| {
- try self.addDeclExports(mod, decl_index, export_indices);
+ try self.addDeclExports(pt.zcu, decl_index, export_indices);
}
}
}
@@ -841,7 +847,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node)
}
self.syms.items[atom.sym_index.?].value = off;
if (self.decl_exports.get(decl_index)) |export_indices| {
- try self.addDeclExports(mod, decl_index, export_indices);
+ try self.addDeclExports(pt.zcu, decl_index, export_indices);
}
}
// write the unnamed constants after the other data decls
@@ -1009,7 +1015,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node)
}
fn addDeclExports(
self: *Plan9,
- mod: *Module,
+ mod: *Zcu,
decl_index: InternPool.DeclIndex,
export_indices: []const u32,
) !void {
@@ -1025,7 +1031,7 @@ fn addDeclExports(
if (!section_name.eqlSlice(".text", &mod.intern_pool) and
!section_name.eqlSlice(".data", &mod.intern_pool))
{
- try mod.failed_exports.put(mod.gpa, export_idx, try Module.ErrorMsg.create(
+ try mod.failed_exports.put(mod.gpa, export_idx, try Zcu.ErrorMsg.create(
gpa,
mod.declPtr(decl_index).navSrcLoc(mod),
"plan9 does not support extra sections",
@@ -1155,8 +1161,8 @@ pub fn seeDecl(self: *Plan9, decl_index: InternPool.DeclIndex) !Atom.Index {
pub fn updateExports(
self: *Plan9,
- module: *Module,
- exported: Module.Exported,
+ pt: Zcu.PerThread,
+ exported: Zcu.Exported,
export_indices: []const u32,
) !void {
const gpa = self.base.comp.gpa;
@@ -1173,11 +1179,11 @@ pub fn updateExports(
},
}
// all proper work is done in flush
- _ = module;
+ _ = pt;
}
-pub fn getOrCreateAtomForLazySymbol(self: *Plan9, sym: File.LazySymbol) !Atom.Index {
- const gpa = self.base.comp.gpa;
+pub fn getOrCreateAtomForLazySymbol(self: *Plan9, pt: Zcu.PerThread, sym: File.LazySymbol) !Atom.Index {
+ const gpa = pt.zcu.gpa;
const gop = try self.lazy_syms.getOrPut(gpa, sym.getDecl(self.base.comp.module.?));
errdefer _ = if (!gop.found_existing) self.lazy_syms.pop();
@@ -1198,14 +1204,13 @@ pub fn getOrCreateAtomForLazySymbol(self: *Plan9, sym: File.LazySymbol) !Atom.In
_ = self.getAtomPtr(atom).getOrCreateOffsetTableEntry(self);
// anyerror needs to be deferred until flushModule
if (sym.getDecl(self.base.comp.module.?) != .none) {
- try self.updateLazySymbolAtom(sym, atom);
+ try self.updateLazySymbolAtom(pt, sym, atom);
}
return atom;
}
-fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Index) !void {
- const gpa = self.base.comp.gpa;
- const mod = self.base.comp.module.?;
+fn updateLazySymbolAtom(self: *Plan9, pt: Zcu.PerThread, sym: File.LazySymbol, atom_index: Atom.Index) !void {
+ const gpa = pt.zcu.gpa;
var required_alignment: InternPool.Alignment = .none;
var code_buffer = std.ArrayList(u8).init(gpa);
@@ -1214,7 +1219,7 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind
// create the symbol for the name
const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{
@tagName(sym.kind),
- sym.ty.fmt(mod),
+ sym.ty.fmt(pt),
});
const symbol: aout.Sym = .{
@@ -1225,9 +1230,10 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind
self.syms.items[self.getAtomPtr(atom_index).sym_index.?] = symbol;
// generate the code
- const src = sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded;
+ const src = sym.ty.srcLocOrNull(pt.zcu) orelse Zcu.LazySrcLoc.unneeded;
const res = try codegen.generateLazySymbol(
&self.base,
+ pt,
src,
sym,
&required_alignment,
@@ -1490,7 +1496,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
}
/// Must be called only after a successful call to `updateDecl`.
-pub fn updateDeclLineNumber(self: *Plan9, mod: *Module, decl_index: InternPool.DeclIndex) !void {
+pub fn updateDeclLineNumber(self: *Plan9, mod: *Zcu, decl_index: InternPool.DeclIndex) !void {
_ = self;
_ = mod;
_ = decl_index;
@@ -1544,9 +1550,10 @@ pub fn getDeclVAddr(
pub fn lowerAnonDecl(
self: *Plan9,
+ pt: Zcu.PerThread,
decl_val: InternPool.Index,
explicit_alignment: InternPool.Alignment,
- src_loc: Module.LazySrcLoc,
+ src_loc: Zcu.LazySrcLoc,
) !codegen.Result {
_ = explicit_alignment;
// This is basically the same as lowerUnnamedConst.
@@ -1569,7 +1576,7 @@ pub fn lowerAnonDecl(
gop.value_ptr.* = index;
// we need to free name latex
var code_buffer = std.ArrayList(u8).init(gpa);
- const res = try codegen.generateSymbol(&self.base, src_loc, val, &code_buffer, .{ .none = {} }, .{ .parent_atom_index = index });
+ const res = try codegen.generateSymbol(&self.base, pt, src_loc, val, &code_buffer, .{ .none = {} }, .{ .parent_atom_index = index });
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| return .{ .fail = em },
src/link/SpirV.zig
@@ -28,8 +28,6 @@ const assert = std.debug.assert;
const log = std.log.scoped(.link);
const Zcu = @import("../Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
const InternPool = @import("../InternPool.zig");
const Compilation = @import("../Compilation.zig");
const link = @import("../link.zig");
@@ -125,35 +123,36 @@ pub fn deinit(self: *SpirV) void {
self.object.deinit();
}
-pub fn updateFunc(self: *SpirV, module: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(self: *SpirV, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
- const func = module.funcInfo(func_index);
- const decl = module.declPtr(func.owner_decl);
- log.debug("lowering function {}", .{decl.name.fmt(&module.intern_pool)});
+ const func = pt.zcu.funcInfo(func_index);
+ const decl = pt.zcu.declPtr(func.owner_decl);
+ log.debug("lowering function {}", .{decl.name.fmt(&pt.zcu.intern_pool)});
- try self.object.updateFunc(module, func_index, air, liveness);
+ try self.object.updateFunc(pt, func_index, air, liveness);
}
-pub fn updateDecl(self: *SpirV, module: *Module, decl_index: InternPool.DeclIndex) !void {
+pub fn updateDecl(self: *SpirV, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void {
if (build_options.skip_non_native) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
- const decl = module.declPtr(decl_index);
- log.debug("lowering declaration {}", .{decl.name.fmt(&module.intern_pool)});
+ const decl = pt.zcu.declPtr(decl_index);
+ log.debug("lowering declaration {}", .{decl.name.fmt(&pt.zcu.intern_pool)});
- try self.object.updateDecl(module, decl_index);
+ try self.object.updateDecl(pt, decl_index);
}
pub fn updateExports(
self: *SpirV,
- mod: *Module,
- exported: Module.Exported,
+ pt: Zcu.PerThread,
+ exported: Zcu.Exported,
export_indices: []const u32,
) !void {
+ const mod = pt.zcu;
const decl_index = switch (exported) {
.decl_index => |i| i,
.value => |val| {
@@ -196,11 +195,11 @@ pub fn freeDecl(self: *SpirV, decl_index: InternPool.DeclIndex) void {
_ = decl_index;
}
-pub fn flush(self: *SpirV, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
- return self.flushModule(arena, prog_node);
+pub fn flush(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
+ return self.flushModule(arena, tid, prog_node);
}
-pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
+pub fn flushModule(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
if (build_options.skip_non_native) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@@ -216,6 +215,7 @@ pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: std.Progress.Node)
const comp = self.base.comp;
const gpa = comp.gpa;
const target = comp.getTarget();
+ _ = tid;
try writeCapabilities(spv, target);
try writeMemoryModel(spv, target);
src/link/Wasm.zig
@@ -29,8 +29,6 @@ const InternPool = @import("../InternPool.zig");
const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
const Zcu = @import("../Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
const Object = @import("Wasm/Object.zig");
const Symbol = @import("Wasm/Symbol.zig");
const Type = @import("../Type.zig");
@@ -1441,25 +1439,25 @@ pub fn deinit(wasm: *Wasm) void {
wasm.files.deinit(gpa);
}
-pub fn updateFunc(wasm: *Wasm, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(wasm: *Wasm, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .wasm) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
- if (wasm.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness);
- try wasm.zigObjectPtr().?.updateFunc(wasm, mod, func_index, air, liveness);
+ if (wasm.llvm_object) |llvm_object| return llvm_object.updateFunc(pt, func_index, air, liveness);
+ try wasm.zigObjectPtr().?.updateFunc(wasm, pt, func_index, air, liveness);
}
// Generate code for the Decl, storing it in memory to be later written to
// the file on flush().
-pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: InternPool.DeclIndex) !void {
+pub fn updateDecl(wasm: *Wasm, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void {
if (build_options.skip_non_native and builtin.object_format != .wasm) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
- if (wasm.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index);
- try wasm.zigObjectPtr().?.updateDecl(wasm, mod, decl_index);
+ if (wasm.llvm_object) |llvm_object| return llvm_object.updateDecl(pt, decl_index);
+ try wasm.zigObjectPtr().?.updateDecl(wasm, pt, decl_index);
}
-pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Module, decl_index: InternPool.DeclIndex) !void {
+pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Zcu, decl_index: InternPool.DeclIndex) !void {
if (wasm.llvm_object) |_| return;
try wasm.zigObjectPtr().?.updateDeclLineNumber(mod, decl_index);
}
@@ -1506,8 +1504,8 @@ fn getFunctionSignature(wasm: *const Wasm, loc: SymbolLoc) std.wasm.Type {
/// Lowers a constant typed value to a local symbol and atom.
/// Returns the symbol index of the local
/// The given `decl` is the parent decl whom owns the constant.
-pub fn lowerUnnamedConst(wasm: *Wasm, val: Value, decl_index: InternPool.DeclIndex) !u32 {
- return wasm.zigObjectPtr().?.lowerUnnamedConst(wasm, val, decl_index);
+pub fn lowerUnnamedConst(wasm: *Wasm, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) !u32 {
+ return wasm.zigObjectPtr().?.lowerUnnamedConst(wasm, pt, val, decl_index);
}
/// Returns the symbol index from a symbol of which its flag is set global,
@@ -1531,11 +1529,12 @@ pub fn getDeclVAddr(
pub fn lowerAnonDecl(
wasm: *Wasm,
+ pt: Zcu.PerThread,
decl_val: InternPool.Index,
explicit_alignment: Alignment,
- src_loc: Module.LazySrcLoc,
+ src_loc: Zcu.LazySrcLoc,
) !codegen.Result {
- return wasm.zigObjectPtr().?.lowerAnonDecl(wasm, decl_val, explicit_alignment, src_loc);
+ return wasm.zigObjectPtr().?.lowerAnonDecl(wasm, pt, decl_val, explicit_alignment, src_loc);
}
pub fn getAnonDeclVAddr(wasm: *Wasm, decl_val: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 {
@@ -1553,15 +1552,15 @@ pub fn deleteExport(
pub fn updateExports(
wasm: *Wasm,
- mod: *Module,
- exported: Module.Exported,
+ pt: Zcu.PerThread,
+ exported: Zcu.Exported,
export_indices: []const u32,
) !void {
if (build_options.skip_non_native and builtin.object_format != .wasm) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
- if (wasm.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, export_indices);
- return wasm.zigObjectPtr().?.updateExports(wasm, mod, exported, export_indices);
+ if (wasm.llvm_object) |llvm_object| return llvm_object.updateExports(pt, exported, export_indices);
+ return wasm.zigObjectPtr().?.updateExports(wasm, pt, exported, export_indices);
}
pub fn freeDecl(wasm: *Wasm, decl_index: InternPool.DeclIndex) void {
@@ -2466,18 +2465,18 @@ fn appendDummySegment(wasm: *Wasm) !void {
});
}
-pub fn flush(wasm: *Wasm, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
+pub fn flush(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
const comp = wasm.base.comp;
const use_lld = build_options.have_llvm and comp.config.use_lld;
if (use_lld) {
- return wasm.linkWithLLD(arena, prog_node);
+ return wasm.linkWithLLD(arena, tid, prog_node);
}
- return wasm.flushModule(arena, prog_node);
+ return wasm.flushModule(arena, tid, prog_node);
}
/// Uses the in-house linker to link one or multiple object -and archive files into a WebAssembly binary.
-pub fn flushModule(wasm: *Wasm, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void {
+pub fn flushModule(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -2513,7 +2512,7 @@ pub fn flushModule(wasm: *Wasm, arena: Allocator, prog_node: std.Progress.Node)
const wasi_exec_model = comp.config.wasi_exec_model;
if (wasm.zigObjectPtr()) |zig_object| {
- try zig_object.flushModule(wasm);
+ try zig_object.flushModule(wasm, tid);
}
// When the target os is WASI, we allow linking with WASI-LIBC
@@ -3324,7 +3323,7 @@ fn emitImport(wasm: *Wasm, writer: anytype, import: types.Import) !void {
}
}
-fn linkWithLLD(wasm: *Wasm, arena: Allocator, prog_node: std.Progress.Node) !void {
+fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -3342,7 +3341,7 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, prog_node: std.Progress.Node) !voi
// If there is no Zig code to compile, then we should skip flushing the output file because it
// will not be part of the linker line anyway.
const module_obj_path: ?[]const u8 = if (comp.module != null) blk: {
- try wasm.flushModule(arena, prog_node);
+ try wasm.flushModule(arena, tid, prog_node);
if (fs.path.dirname(full_out_path)) |dirname| {
break :blk try fs.path.join(arena, &.{ dirname, wasm.base.zcu_object_sub_path.? });
@@ -4009,8 +4008,8 @@ pub fn storeDeclType(wasm: *Wasm, decl_index: InternPool.DeclIndex, func_type: s
/// Returns the symbol index of the error name table.
///
/// When the symbol does not yet exist, it will create a new one instead.
-pub fn getErrorTableSymbol(wasm_file: *Wasm) !u32 {
- const sym_index = try wasm_file.zigObjectPtr().?.getErrorTableSymbol(wasm_file);
+pub fn getErrorTableSymbol(wasm_file: *Wasm, pt: Zcu.PerThread) !u32 {
+ const sym_index = try wasm_file.zigObjectPtr().?.getErrorTableSymbol(wasm_file, pt);
return @intFromEnum(sym_index);
}
src/Sema/bitcast.zig
@@ -69,7 +69,8 @@ fn bitCastInner(
host_bits: u64,
bit_offset: u64,
) BitCastError!Value {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const endian = zcu.getTarget().cpu.arch.endian();
if (dest_ty.toIntern() == val.typeOf(zcu).toIntern() and bit_offset == 0) {
@@ -78,29 +79,29 @@ fn bitCastInner(
const val_ty = val.typeOf(zcu);
- try val_ty.resolveLayout(zcu);
- try dest_ty.resolveLayout(zcu);
+ try val_ty.resolveLayout(pt);
+ try dest_ty.resolveLayout(pt);
assert(val_ty.hasWellDefinedLayout(zcu));
const abi_pad_bits, const host_pad_bits = if (host_bits > 0)
- .{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) }
+ .{ val_ty.abiSize(pt) * 8 - host_bits, host_bits - val_ty.bitSize(pt) }
else
- .{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 };
+ .{ val_ty.abiSize(pt) * 8 - val_ty.bitSize(pt), 0 };
const skip_bits = switch (endian) {
.little => bit_offset + byte_offset * 8,
.big => if (host_bits > 0)
- val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset
+ val_ty.abiSize(pt) * 8 - byte_offset * 8 - host_bits + bit_offset
else
- val_ty.abiSize(zcu) * 8 - byte_offset * 8 - dest_ty.bitSize(zcu),
+ val_ty.abiSize(pt) * 8 - byte_offset * 8 - dest_ty.bitSize(pt),
};
var unpack: UnpackValueBits = .{
- .zcu = zcu,
+ .pt = sema.pt,
.arena = sema.arena,
.skip_bits = skip_bits,
- .remaining_bits = dest_ty.bitSize(zcu),
+ .remaining_bits = dest_ty.bitSize(pt),
.unpacked = std.ArrayList(InternPool.Index).init(sema.arena),
};
switch (endian) {
@@ -116,7 +117,7 @@ fn bitCastInner(
try unpack.padding(host_pad_bits);
var pack: PackValueBits = .{
- .zcu = zcu,
+ .pt = sema.pt,
.arena = sema.arena,
.unpacked = unpack.unpacked.items,
};
@@ -131,33 +132,34 @@ fn bitCastSpliceInner(
host_bits: u64,
bit_offset: u64,
) BitCastError!Value {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const endian = zcu.getTarget().cpu.arch.endian();
const val_ty = val.typeOf(zcu);
const splice_val_ty = splice_val.typeOf(zcu);
- try val_ty.resolveLayout(zcu);
- try splice_val_ty.resolveLayout(zcu);
+ try val_ty.resolveLayout(pt);
+ try splice_val_ty.resolveLayout(pt);
- const splice_bits = splice_val_ty.bitSize(zcu);
+ const splice_bits = splice_val_ty.bitSize(pt);
const splice_offset = switch (endian) {
.little => bit_offset + byte_offset * 8,
.big => if (host_bits > 0)
- val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset
+ val_ty.abiSize(pt) * 8 - byte_offset * 8 - host_bits + bit_offset
else
- val_ty.abiSize(zcu) * 8 - byte_offset * 8 - splice_bits,
+ val_ty.abiSize(pt) * 8 - byte_offset * 8 - splice_bits,
};
- assert(splice_offset + splice_bits <= val_ty.abiSize(zcu) * 8);
+ assert(splice_offset + splice_bits <= val_ty.abiSize(pt) * 8);
const abi_pad_bits, const host_pad_bits = if (host_bits > 0)
- .{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) }
+ .{ val_ty.abiSize(pt) * 8 - host_bits, host_bits - val_ty.bitSize(pt) }
else
- .{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 };
+ .{ val_ty.abiSize(pt) * 8 - val_ty.bitSize(pt), 0 };
var unpack: UnpackValueBits = .{
- .zcu = zcu,
+ .pt = pt,
.arena = sema.arena,
.skip_bits = 0,
.remaining_bits = splice_offset,
@@ -179,7 +181,7 @@ fn bitCastSpliceInner(
try unpack.add(splice_val);
unpack.skip_bits = splice_offset + splice_bits;
- unpack.remaining_bits = val_ty.abiSize(zcu) * 8 - splice_offset - splice_bits;
+ unpack.remaining_bits = val_ty.abiSize(pt) * 8 - splice_offset - splice_bits;
switch (endian) {
.little => {
try unpack.add(val);
@@ -193,7 +195,7 @@ fn bitCastSpliceInner(
try unpack.padding(host_pad_bits);
var pack: PackValueBits = .{
- .zcu = zcu,
+ .pt = pt,
.arena = sema.arena,
.unpacked = unpack.unpacked.items,
};
@@ -209,7 +211,7 @@ fn bitCastSpliceInner(
/// of values in *packed* memory - therefore, on big-endian targets, the first element of this
/// list contains bits from the *final* byte of the value.
const UnpackValueBits = struct {
- zcu: *Zcu,
+ pt: Zcu.PerThread,
arena: Allocator,
skip_bits: u64,
remaining_bits: u64,
@@ -217,7 +219,8 @@ const UnpackValueBits = struct {
unpacked: std.ArrayList(InternPool.Index),
fn add(unpack: *UnpackValueBits, val: Value) BitCastError!void {
- const zcu = unpack.zcu;
+ const pt = unpack.pt;
+ const zcu = pt.zcu;
const endian = zcu.getTarget().cpu.arch.endian();
const ip = &zcu.intern_pool;
@@ -226,7 +229,7 @@ const UnpackValueBits = struct {
}
const ty = val.typeOf(zcu);
- const bit_size = ty.bitSize(zcu);
+ const bit_size = ty.bitSize(pt);
if (unpack.skip_bits >= bit_size) {
unpack.skip_bits -= bit_size;
@@ -279,7 +282,7 @@ const UnpackValueBits = struct {
.little => i,
.big => len - i - 1,
};
- const elem_val = try val.elemValue(zcu, real_idx);
+ const elem_val = try val.elemValue(pt, real_idx);
try unpack.add(elem_val);
}
},
@@ -288,7 +291,7 @@ const UnpackValueBits = struct {
// The final element does not have trailing padding.
// Elements are reversed in packed memory on BE targets.
const elem_ty = ty.childType(zcu);
- const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu);
+ const pad_bits = elem_ty.abiSize(pt) * 8 - elem_ty.bitSize(pt);
const len = ty.arrayLen(zcu);
const maybe_sent = ty.sentinel(zcu);
@@ -303,7 +306,7 @@ const UnpackValueBits = struct {
.little => i,
.big => len - i - 1,
};
- const elem_val = try val.elemValue(zcu, @intCast(real_idx));
+ const elem_val = try val.elemValue(pt, @intCast(real_idx));
try unpack.add(elem_val);
if (i != len - 1) try unpack.padding(pad_bits);
}
@@ -320,12 +323,12 @@ const UnpackValueBits = struct {
var cur_bit_off: u64 = 0;
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip);
while (it.next()) |field_idx| {
- const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8;
+ const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8;
const pad_bits = want_bit_off - cur_bit_off;
- const field_val = try val.fieldValue(zcu, field_idx);
+ const field_val = try val.fieldValue(pt, field_idx);
try unpack.padding(pad_bits);
try unpack.add(field_val);
- cur_bit_off = want_bit_off + field_val.typeOf(zcu).bitSize(zcu);
+ cur_bit_off = want_bit_off + field_val.typeOf(zcu).bitSize(pt);
}
// Add trailing padding bits.
try unpack.padding(bit_size - cur_bit_off);
@@ -334,13 +337,13 @@ const UnpackValueBits = struct {
var cur_bit_off: u64 = bit_size;
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip);
while (it.next()) |field_idx| {
- const field_val = try val.fieldValue(zcu, field_idx);
+ const field_val = try val.fieldValue(pt, field_idx);
const field_ty = field_val.typeOf(zcu);
- const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu);
+ const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8 + field_ty.bitSize(pt);
const pad_bits = cur_bit_off - want_bit_off;
try unpack.padding(pad_bits);
try unpack.add(field_val);
- cur_bit_off = want_bit_off - field_ty.bitSize(zcu);
+ cur_bit_off = want_bit_off - field_ty.bitSize(pt);
}
assert(cur_bit_off == 0);
},
@@ -349,7 +352,7 @@ const UnpackValueBits = struct {
// Just add all fields in order. There are no padding bits.
// This is identical between LE and BE targets.
for (0..ty.structFieldCount(zcu)) |i| {
- const field_val = try val.fieldValue(zcu, i);
+ const field_val = try val.fieldValue(pt, i);
try unpack.add(field_val);
}
},
@@ -363,7 +366,7 @@ const UnpackValueBits = struct {
// This correctly handles the case where `tag == .none`, since the payload is then
// either an integer or a byte array, both of which we can unpack.
const payload_val = Value.fromInterned(un.val);
- const pad_bits = bit_size - payload_val.typeOf(zcu).bitSize(zcu);
+ const pad_bits = bit_size - payload_val.typeOf(zcu).bitSize(pt);
if (endian == .little or ty.containerLayout(zcu) == .@"packed") {
try unpack.add(payload_val);
try unpack.padding(pad_bits);
@@ -377,31 +380,31 @@ const UnpackValueBits = struct {
fn padding(unpack: *UnpackValueBits, pad_bits: u64) BitCastError!void {
if (pad_bits == 0) return;
- const zcu = unpack.zcu;
+ const pt = unpack.pt;
// Figure out how many full bytes and leftover bits there are.
const bytes = pad_bits / 8;
const bits = pad_bits % 8;
// Add undef u8 values for the bytes...
- const undef_u8 = try zcu.undefValue(Type.u8);
+ const undef_u8 = try pt.undefValue(Type.u8);
for (0..@intCast(bytes)) |_| {
try unpack.primitive(undef_u8);
}
// ...and an undef int for the leftover bits.
if (bits == 0) return;
- const bits_ty = try zcu.intType(.unsigned, @intCast(bits));
- const bits_val = try zcu.undefValue(bits_ty);
+ const bits_ty = try pt.intType(.unsigned, @intCast(bits));
+ const bits_val = try pt.undefValue(bits_ty);
try unpack.primitive(bits_val);
}
fn primitive(unpack: *UnpackValueBits, val: Value) BitCastError!void {
- const zcu = unpack.zcu;
+ const pt = unpack.pt;
if (unpack.remaining_bits == 0) {
return;
}
- const ty = val.typeOf(zcu);
- const bit_size = ty.bitSize(zcu);
+ const ty = val.typeOf(pt.zcu);
+ const bit_size = ty.bitSize(pt);
// Note that this skips all zero-bit types.
if (unpack.skip_bits >= bit_size) {
@@ -425,21 +428,21 @@ const UnpackValueBits = struct {
}
fn splitPrimitive(unpack: *UnpackValueBits, val: Value, bit_offset: u64, bit_count: u64) BitCastError!void {
- const zcu = unpack.zcu;
- const ty = val.typeOf(zcu);
+ const pt = unpack.pt;
+ const ty = val.typeOf(pt.zcu);
- const val_bits = ty.bitSize(zcu);
+ const val_bits = ty.bitSize(pt);
assert(bit_offset + bit_count <= val_bits);
- switch (zcu.intern_pool.indexToKey(val.toIntern())) {
+ switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) {
// In the `ptr` case, this will return `error.ReinterpretDeclRef`
// if we're trying to split a non-integer pointer value.
.int, .float, .enum_tag, .ptr, .opt => {
// This @intCast is okay because no primitive can exceed the size of a u16.
- const int_ty = try zcu.intType(.unsigned, @intCast(bit_count));
+ const int_ty = try unpack.pt.intType(.unsigned, @intCast(bit_count));
const buf = try unpack.arena.alloc(u8, @intCast((val_bits + 7) / 8));
- try val.writeToPackedMemory(ty, zcu, buf, 0);
- const sub_val = try Value.readFromPackedMemory(int_ty, zcu, buf, @intCast(bit_offset), unpack.arena);
+ try val.writeToPackedMemory(ty, unpack.pt, buf, 0);
+ const sub_val = try Value.readFromPackedMemory(int_ty, unpack.pt, buf, @intCast(bit_offset), unpack.arena);
try unpack.primitive(sub_val);
},
.undef => try unpack.padding(bit_count),
@@ -456,13 +459,14 @@ const UnpackValueBits = struct {
/// reconstructs a value of an arbitrary type, with correct handling of `undefined`
/// values and of pointers which align in virtual memory.
const PackValueBits = struct {
- zcu: *Zcu,
+ pt: Zcu.PerThread,
arena: Allocator,
bit_offset: u64 = 0,
unpacked: []const InternPool.Index,
fn get(pack: *PackValueBits, ty: Type) BitCastError!Value {
- const zcu = pack.zcu;
+ const pt = pack.pt;
+ const zcu = pt.zcu;
const endian = zcu.getTarget().cpu.arch.endian();
const ip = &zcu.intern_pool;
const arena = pack.arena;
@@ -485,7 +489,7 @@ const PackValueBits = struct {
}
},
}
- return Value.fromInterned(try zcu.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = elems },
} }));
@@ -495,12 +499,12 @@ const PackValueBits = struct {
const len = ty.arrayLen(zcu);
const elem_ty = ty.childType(zcu);
const maybe_sent = ty.sentinel(zcu);
- const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu);
+ const pad_bits = elem_ty.abiSize(pt) * 8 - elem_ty.bitSize(pt);
const elems = try arena.alloc(InternPool.Index, @intCast(len));
if (endian == .big and maybe_sent != null) {
// TODO: validate sentinel was preserved!
- try pack.padding(elem_ty.bitSize(zcu));
+ try pack.padding(elem_ty.bitSize(pt));
if (len != 0) try pack.padding(pad_bits);
}
@@ -516,10 +520,10 @@ const PackValueBits = struct {
if (endian == .little and maybe_sent != null) {
// TODO: validate sentinel was preserved!
if (len != 0) try pack.padding(pad_bits);
- try pack.padding(elem_ty.bitSize(zcu));
+ try pack.padding(elem_ty.bitSize(pt));
}
- return Value.fromInterned(try zcu.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = elems },
} }));
@@ -534,23 +538,23 @@ const PackValueBits = struct {
var cur_bit_off: u64 = 0;
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip);
while (it.next()) |field_idx| {
- const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8;
+ const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8;
try pack.padding(want_bit_off - cur_bit_off);
const field_ty = ty.structFieldType(field_idx, zcu);
elems[field_idx] = (try pack.get(field_ty)).toIntern();
- cur_bit_off = want_bit_off + field_ty.bitSize(zcu);
+ cur_bit_off = want_bit_off + field_ty.bitSize(pt);
}
- try pack.padding(ty.bitSize(zcu) - cur_bit_off);
+ try pack.padding(ty.bitSize(pt) - cur_bit_off);
},
.big => {
- var cur_bit_off: u64 = ty.bitSize(zcu);
+ var cur_bit_off: u64 = ty.bitSize(pt);
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip);
while (it.next()) |field_idx| {
const field_ty = ty.structFieldType(field_idx, zcu);
- const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu);
+ const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8 + field_ty.bitSize(pt);
try pack.padding(cur_bit_off - want_bit_off);
elems[field_idx] = (try pack.get(field_ty)).toIntern();
- cur_bit_off = want_bit_off - field_ty.bitSize(zcu);
+ cur_bit_off = want_bit_off - field_ty.bitSize(pt);
}
assert(cur_bit_off == 0);
},
@@ -559,10 +563,10 @@ const PackValueBits = struct {
// Fill those values now.
for (elems, 0..) |*elem, field_idx| {
if (elem.* != .none) continue;
- const val = (try ty.structFieldValueComptime(zcu, field_idx)).?;
+ const val = (try ty.structFieldValueComptime(pt, field_idx)).?;
elem.* = val.toIntern();
}
- return Value.fromInterned(try zcu.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = elems },
} }));
@@ -575,7 +579,7 @@ const PackValueBits = struct {
const field_ty = ty.structFieldType(i, zcu);
elem.* = (try pack.get(field_ty)).toIntern();
}
- return Value.fromInterned(try zcu.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = elems },
} }));
@@ -591,7 +595,7 @@ const PackValueBits = struct {
const prev_unpacked = pack.unpacked;
const prev_bit_offset = pack.bit_offset;
- const backing_ty = try ty.unionBackingType(zcu);
+ const backing_ty = try ty.unionBackingType(pt);
backing: {
const backing_val = pack.get(backing_ty) catch |err| switch (err) {
@@ -607,7 +611,7 @@ const PackValueBits = struct {
pack.bit_offset = prev_bit_offset;
break :backing;
}
- return Value.fromInterned(try zcu.intern(.{ .un = .{
+ return Value.fromInterned(try pt.intern(.{ .un = .{
.ty = ty.toIntern(),
.tag = .none,
.val = backing_val.toIntern(),
@@ -618,16 +622,16 @@ const PackValueBits = struct {
for (field_order, 0..) |*f, i| f.* = @intCast(i);
// Sort `field_order` to put the fields with the largest bit sizes first.
const SizeSortCtx = struct {
- zcu: *Zcu,
+ pt: Zcu.PerThread,
field_types: []const InternPool.Index,
fn lessThan(ctx: @This(), a_idx: u32, b_idx: u32) bool {
const a_ty = Type.fromInterned(ctx.field_types[a_idx]);
const b_ty = Type.fromInterned(ctx.field_types[b_idx]);
- return a_ty.bitSize(ctx.zcu) > b_ty.bitSize(ctx.zcu);
+ return a_ty.bitSize(ctx.pt) > b_ty.bitSize(ctx.pt);
}
};
std.mem.sortUnstable(u32, field_order, SizeSortCtx{
- .zcu = zcu,
+ .pt = pt,
.field_types = zcu.typeToUnion(ty).?.field_types.get(ip),
}, SizeSortCtx.lessThan);
@@ -635,7 +639,7 @@ const PackValueBits = struct {
for (field_order) |field_idx| {
const field_ty = Type.fromInterned(zcu.typeToUnion(ty).?.field_types.get(ip)[field_idx]);
- const pad_bits = ty.bitSize(zcu) - field_ty.bitSize(zcu);
+ const pad_bits = ty.bitSize(pt) - field_ty.bitSize(pt);
if (!padding_after) try pack.padding(pad_bits);
const field_val = pack.get(field_ty) catch |err| switch (err) {
error.ReinterpretDeclRef => {
@@ -651,8 +655,8 @@ const PackValueBits = struct {
pack.bit_offset = prev_bit_offset;
continue;
}
- const tag_val = try zcu.enumValueFieldIndex(ty.unionTagTypeHypothetical(zcu), field_idx);
- return Value.fromInterned(try zcu.intern(.{ .un = .{
+ const tag_val = try pt.enumValueFieldIndex(ty.unionTagTypeHypothetical(zcu), field_idx);
+ return Value.fromInterned(try pt.intern(.{ .un = .{
.ty = ty.toIntern(),
.tag = tag_val.toIntern(),
.val = field_val.toIntern(),
@@ -662,7 +666,7 @@ const PackValueBits = struct {
// No field could represent the value. Just do whatever happens when we try to read
// the backing type - either `undefined` or `error.ReinterpretDeclRef`.
const backing_val = try pack.get(backing_ty);
- return Value.fromInterned(try zcu.intern(.{ .un = .{
+ return Value.fromInterned(try pt.intern(.{ .un = .{
.ty = ty.toIntern(),
.tag = .none,
.val = backing_val.toIntern(),
@@ -677,14 +681,14 @@ const PackValueBits = struct {
}
fn primitive(pack: *PackValueBits, want_ty: Type) BitCastError!Value {
- const zcu = pack.zcu;
- const vals, const bit_offset = pack.prepareBits(want_ty.bitSize(zcu));
+ const pt = pack.pt;
+ const vals, const bit_offset = pack.prepareBits(want_ty.bitSize(pt));
for (vals) |val| {
- if (!Value.fromInterned(val).isUndef(zcu)) break;
+ if (!Value.fromInterned(val).isUndef(pt.zcu)) break;
} else {
// All bits of the value are `undefined`.
- return zcu.undefValue(want_ty);
+ return pt.undefValue(want_ty);
}
// TODO: we need to decide how to handle partially-undef values here.
@@ -702,9 +706,9 @@ const PackValueBits = struct {
ptr_cast: {
if (vals.len != 1) break :ptr_cast;
const val = Value.fromInterned(vals[0]);
- if (!val.typeOf(zcu).isPtrAtRuntime(zcu)) break :ptr_cast;
- if (!want_ty.isPtrAtRuntime(zcu)) break :ptr_cast;
- return zcu.getCoerced(val, want_ty);
+ if (!val.typeOf(pt.zcu).isPtrAtRuntime(pt.zcu)) break :ptr_cast;
+ if (!want_ty.isPtrAtRuntime(pt.zcu)) break :ptr_cast;
+ return pt.getCoerced(val, want_ty);
}
// Reinterpret via an in-memory buffer.
@@ -712,8 +716,8 @@ const PackValueBits = struct {
var buf_bits: u64 = 0;
for (vals) |ip_val| {
const val = Value.fromInterned(ip_val);
- const ty = val.typeOf(zcu);
- buf_bits += ty.bitSize(zcu);
+ const ty = val.typeOf(pt.zcu);
+ buf_bits += ty.bitSize(pt);
}
const buf = try pack.arena.alloc(u8, @intCast((buf_bits + 7) / 8));
@@ -722,25 +726,25 @@ const PackValueBits = struct {
var cur_bit_off: usize = 0;
for (vals) |ip_val| {
const val = Value.fromInterned(ip_val);
- const ty = val.typeOf(zcu);
- if (!val.isUndef(zcu)) {
- try val.writeToPackedMemory(ty, zcu, buf, cur_bit_off);
+ const ty = val.typeOf(pt.zcu);
+ if (!val.isUndef(pt.zcu)) {
+ try val.writeToPackedMemory(ty, pt, buf, cur_bit_off);
}
- cur_bit_off += @intCast(ty.bitSize(zcu));
+ cur_bit_off += @intCast(ty.bitSize(pt));
}
- return Value.readFromPackedMemory(want_ty, zcu, buf, @intCast(bit_offset), pack.arena);
+ return Value.readFromPackedMemory(want_ty, pt, buf, @intCast(bit_offset), pack.arena);
}
fn prepareBits(pack: *PackValueBits, need_bits: u64) struct { []const InternPool.Index, u64 } {
if (need_bits == 0) return .{ &.{}, 0 };
- const zcu = pack.zcu;
+ const pt = pack.pt;
var bits: u64 = 0;
var len: usize = 0;
while (bits < pack.bit_offset + need_bits) {
- bits += Value.fromInterned(pack.unpacked[len]).typeOf(zcu).bitSize(zcu);
+ bits += Value.fromInterned(pack.unpacked[len]).typeOf(pt.zcu).bitSize(pt);
len += 1;
}
@@ -753,7 +757,7 @@ const PackValueBits = struct {
pack.bit_offset = 0;
} else {
pack.unpacked = pack.unpacked[len - 1 ..];
- pack.bit_offset = Value.fromInterned(pack.unpacked[0]).typeOf(zcu).bitSize(zcu) - extra_bits;
+ pack.bit_offset = Value.fromInterned(pack.unpacked[0]).typeOf(pt.zcu).bitSize(pt) - extra_bits;
}
return .{ result_vals, result_offset };
src/Sema/comptime_ptr_access.zig
@@ -12,19 +12,19 @@ pub const ComptimeLoadResult = union(enum) {
};
pub fn loadComptimePtr(sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Value) !ComptimeLoadResult {
- const zcu = sema.mod;
- const ptr_info = ptr.typeOf(zcu).ptrInfo(zcu);
+ const pt = sema.pt;
+ const ptr_info = ptr.typeOf(pt.zcu).ptrInfo(pt.zcu);
// TODO: host size for vectors is terrible
const host_bits = switch (ptr_info.flags.vector_index) {
.none => ptr_info.packed_offset.host_size * 8,
- else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(zcu),
+ else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(pt),
};
const bit_offset = if (host_bits != 0) bit_offset: {
- const child_bits = Type.fromInterned(ptr_info.child).bitSize(zcu);
+ const child_bits = Type.fromInterned(ptr_info.child).bitSize(pt);
const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
.none => 0,
.runtime => return .runtime_load,
- else => |idx| switch (zcu.getTarget().cpu.arch.endian()) {
+ else => |idx| switch (pt.zcu.getTarget().cpu.arch.endian()) {
.little => child_bits * @intFromEnum(idx),
.big => host_bits - child_bits * (@intFromEnum(idx) + 1), // element order reversed on big endian
},
@@ -60,28 +60,29 @@ pub fn storeComptimePtr(
ptr: Value,
store_val: Value,
) !ComptimeStoreResult {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const ptr_info = ptr.typeOf(zcu).ptrInfo(zcu);
assert(store_val.typeOf(zcu).toIntern() == ptr_info.child);
// TODO: host size for vectors is terrible
const host_bits = switch (ptr_info.flags.vector_index) {
.none => ptr_info.packed_offset.host_size * 8,
- else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(zcu),
+ else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(pt),
};
const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
.none => 0,
.runtime => return .runtime_store,
else => |idx| switch (zcu.getTarget().cpu.arch.endian()) {
- .little => Type.fromInterned(ptr_info.child).bitSize(zcu) * @intFromEnum(idx),
- .big => host_bits - Type.fromInterned(ptr_info.child).bitSize(zcu) * (@intFromEnum(idx) + 1), // element order reversed on big endian
+ .little => Type.fromInterned(ptr_info.child).bitSize(pt) * @intFromEnum(idx),
+ .big => host_bits - Type.fromInterned(ptr_info.child).bitSize(pt) * (@intFromEnum(idx) + 1), // element order reversed on big endian
},
};
const pseudo_store_ty = if (host_bits > 0) t: {
- const need_bits = Type.fromInterned(ptr_info.child).bitSize(zcu);
+ const need_bits = Type.fromInterned(ptr_info.child).bitSize(pt);
if (need_bits + bit_offset > host_bits) {
return .exceeds_host_size;
}
- break :t try zcu.intType(.unsigned, @intCast(host_bits));
+ break :t try sema.pt.intType(.unsigned, @intCast(host_bits));
} else Type.fromInterned(ptr_info.child);
const strat = try prepareComptimePtrStore(sema, block, src, ptr, pseudo_store_ty, 0);
@@ -103,7 +104,7 @@ pub fn storeComptimePtr(
.needed_well_defined => |ty| return .{ .needed_well_defined = ty },
.out_of_bounds => |ty| return .{ .out_of_bounds = ty },
};
- const expected = try expected_mv.intern(zcu, sema.arena);
+ const expected = try expected_mv.intern(pt, sema.arena);
if (store_val.toIntern() != expected.toIntern()) {
return .{ .comptime_field_mismatch = expected };
}
@@ -126,14 +127,14 @@ pub fn storeComptimePtr(
switch (strat) {
.direct => |direct| {
const want_ty = direct.val.typeOf(zcu);
- const coerced_store_val = try zcu.getCoerced(store_val, want_ty);
+ const coerced_store_val = try pt.getCoerced(store_val, want_ty);
direct.val.* = .{ .interned = coerced_store_val.toIntern() };
return .success;
},
.index => |index| {
const want_ty = index.val.typeOf(zcu).childType(zcu);
- const coerced_store_val = try zcu.getCoerced(store_val, want_ty);
- try index.val.setElem(zcu, sema.arena, @intCast(index.elem_index), .{ .interned = coerced_store_val.toIntern() });
+ const coerced_store_val = try pt.getCoerced(store_val, want_ty);
+ try index.val.setElem(pt, sema.arena, @intCast(index.elem_index), .{ .interned = coerced_store_val.toIntern() });
return .success;
},
.flat_index => |flat| {
@@ -149,7 +150,7 @@ pub fn storeComptimePtr(
// Better would be to gather all the store targets into an array.
var index: u64 = flat.flat_elem_index + idx;
const val_ptr, const final_idx = (try recursiveIndex(sema, flat.val, &index)).?;
- try val_ptr.setElem(zcu, sema.arena, @intCast(final_idx), .{ .interned = elem });
+ try val_ptr.setElem(pt, sema.arena, @intCast(final_idx), .{ .interned = elem });
}
return .success;
},
@@ -165,9 +166,9 @@ pub fn storeComptimePtr(
.direct => |direct| .{ direct.val, 0 },
.index => |index| .{
index.val,
- index.elem_index * index.val.typeOf(zcu).childType(zcu).abiSize(zcu),
+ index.elem_index * index.val.typeOf(zcu).childType(zcu).abiSize(pt),
},
- .flat_index => |flat| .{ flat.val, flat.flat_elem_index * flat.val.typeOf(zcu).arrayBase(zcu)[0].abiSize(zcu) },
+ .flat_index => |flat| .{ flat.val, flat.flat_elem_index * flat.val.typeOf(zcu).arrayBase(zcu)[0].abiSize(pt) },
.reinterpret => |reinterpret| .{ reinterpret.val, reinterpret.byte_offset },
else => unreachable,
};
@@ -181,7 +182,7 @@ pub fn storeComptimePtr(
}
const new_val = try sema.bitCastSpliceVal(
- try val_ptr.intern(zcu, sema.arena),
+ try val_ptr.intern(pt, sema.arena),
store_val,
byte_offset,
host_bits,
@@ -205,7 +206,8 @@ fn loadComptimePtrInner(
/// before `load_ty`. Otherwise, it is ignored and may be `undefined`.
array_offset: u64,
) !ComptimeLoadResult {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ptr = switch (ip.indexToKey(ptr_val.toIntern())) {
@@ -263,7 +265,7 @@ fn loadComptimePtrInner(
const load_one_ty, const load_count = load_ty.arrayBase(zcu);
const count = if (load_one_ty.toIntern() == base_ty.toIntern()) load_count else 1;
- const want_ty = try zcu.arrayType(.{
+ const want_ty = try sema.pt.arrayType(.{
.len = count,
.child = base_ty.toIntern(),
});
@@ -285,7 +287,7 @@ fn loadComptimePtrInner(
const agg_ty = agg_val.typeOf(zcu);
switch (agg_ty.zigTypeTag(zcu)) {
- .Struct, .Pointer => break :val try agg_val.getElem(zcu, @intCast(base_index.index)),
+ .Struct, .Pointer => break :val try agg_val.getElem(sema.pt, @intCast(base_index.index)),
.Union => {
const tag_val: Value, const payload_mv: MutableValue = switch (agg_val) {
.un => |un| .{ Value.fromInterned(un.tag), un.payload.* },
@@ -427,7 +429,7 @@ fn loadComptimePtrInner(
const next_elem_off = elem_size * (elem_idx + 1);
if (cur_offset + need_bytes <= next_elem_off) {
// We can look at a single array element.
- cur_val = try cur_val.getElem(zcu, @intCast(elem_idx));
+ cur_val = try cur_val.getElem(sema.pt, @intCast(elem_idx));
cur_offset -= elem_idx * elem_size;
} else {
break;
@@ -437,10 +439,10 @@ fn loadComptimePtrInner(
.auto => unreachable, // ill-defined layout
.@"packed" => break, // let the bitcast logic handle this
.@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
- const start_off = cur_ty.structFieldOffset(field_idx, zcu);
+ const start_off = cur_ty.structFieldOffset(field_idx, pt);
const end_off = start_off + try sema.typeAbiSize(cur_ty.structFieldType(field_idx, zcu));
if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
- cur_val = try cur_val.getElem(zcu, field_idx);
+ cur_val = try cur_val.getElem(sema.pt, field_idx);
cur_offset -= start_off;
break;
}
@@ -482,7 +484,7 @@ fn loadComptimePtrInner(
}
const result_val = try sema.bitCastVal(
- try cur_val.intern(zcu, sema.arena),
+ try cur_val.intern(sema.pt, sema.arena),
load_ty,
cur_offset,
host_bits,
@@ -564,7 +566,8 @@ fn prepareComptimePtrStore(
/// before `store_ty`. Otherwise, it is ignored and may be `undefined`.
array_offset: u64,
) !ComptimeStoreStrategy {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ptr = switch (ip.indexToKey(ptr_val.toIntern())) {
@@ -587,14 +590,14 @@ fn prepareComptimePtrStore(
const eu_val_ptr, const alloc = switch (try prepareComptimePtrStore(sema, block, src, base_ptr, base_ty, undefined)) {
.direct => |direct| .{ direct.val, direct.alloc },
.index => |index| .{
- try index.val.elem(zcu, sema.arena, @intCast(index.elem_index)),
+ try index.val.elem(pt, sema.arena, @intCast(index.elem_index)),
index.alloc,
},
.flat_index => unreachable, // base_ty is not an array
.reinterpret => unreachable, // base_ty has ill-defined layout
else => |err| return err,
};
- try eu_val_ptr.unintern(zcu, sema.arena, false, false);
+ try eu_val_ptr.unintern(pt, sema.arena, false, false);
switch (eu_val_ptr.*) {
.interned => |ip_index| switch (ip.indexToKey(ip_index)) {
.undef => return .undef,
@@ -614,14 +617,14 @@ fn prepareComptimePtrStore(
const opt_val_ptr, const alloc = switch (try prepareComptimePtrStore(sema, block, src, base_ptr, base_ty, undefined)) {
.direct => |direct| .{ direct.val, direct.alloc },
.index => |index| .{
- try index.val.elem(zcu, sema.arena, @intCast(index.elem_index)),
+ try index.val.elem(pt, sema.arena, @intCast(index.elem_index)),
index.alloc,
},
.flat_index => unreachable, // base_ty is not an array
.reinterpret => unreachable, // base_ty has ill-defined layout
else => |err| return err,
};
- try opt_val_ptr.unintern(zcu, sema.arena, false, false);
+ try opt_val_ptr.unintern(pt, sema.arena, false, false);
switch (opt_val_ptr.*) {
.interned => |ip_index| switch (ip.indexToKey(ip_index)) {
.undef => return .undef,
@@ -648,7 +651,7 @@ fn prepareComptimePtrStore(
const store_one_ty, const store_count = store_ty.arrayBase(zcu);
const count = if (store_one_ty.toIntern() == base_ty.toIntern()) store_count else 1;
- const want_ty = try zcu.arrayType(.{
+ const want_ty = try pt.arrayType(.{
.len = count,
.child = base_ty.toIntern(),
});
@@ -668,7 +671,7 @@ fn prepareComptimePtrStore(
const agg_val, const alloc = switch (try prepareComptimePtrStore(sema, block, src, base_ptr, base_ty, undefined)) {
.direct => |direct| .{ direct.val, direct.alloc },
.index => |index| .{
- try index.val.elem(zcu, sema.arena, @intCast(index.elem_index)),
+ try index.val.elem(pt, sema.arena, @intCast(index.elem_index)),
index.alloc,
},
.flat_index => unreachable, // base_ty is not an array
@@ -679,14 +682,14 @@ fn prepareComptimePtrStore(
const agg_ty = agg_val.typeOf(zcu);
switch (agg_ty.zigTypeTag(zcu)) {
.Struct, .Pointer => break :strat .{ .direct = .{
- .val = try agg_val.elem(zcu, sema.arena, @intCast(base_index.index)),
+ .val = try agg_val.elem(pt, sema.arena, @intCast(base_index.index)),
.alloc = alloc,
} },
.Union => {
if (agg_val.* == .interned and Value.fromInterned(agg_val.interned).isUndef(zcu)) {
return .undef;
}
- try agg_val.unintern(zcu, sema.arena, false, false);
+ try agg_val.unintern(pt, sema.arena, false, false);
const un = agg_val.un;
const tag_ty = agg_ty.unionTagTypeHypothetical(zcu);
if (tag_ty.enumTagFieldIndex(Value.fromInterned(un.tag), zcu).? != base_index.index) {
@@ -847,7 +850,7 @@ fn prepareComptimePtrStore(
const next_elem_off = elem_size * (elem_idx + 1);
if (cur_offset + need_bytes <= next_elem_off) {
// We can look at a single array element.
- cur_val = try cur_val.elem(zcu, sema.arena, @intCast(elem_idx));
+ cur_val = try cur_val.elem(pt, sema.arena, @intCast(elem_idx));
cur_offset -= elem_idx * elem_size;
} else {
break;
@@ -857,10 +860,10 @@ fn prepareComptimePtrStore(
.auto => unreachable, // ill-defined layout
.@"packed" => break, // let the bitcast logic handle this
.@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
- const start_off = cur_ty.structFieldOffset(field_idx, zcu);
+ const start_off = cur_ty.structFieldOffset(field_idx, pt);
const end_off = start_off + try sema.typeAbiSize(cur_ty.structFieldType(field_idx, zcu));
if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
- cur_val = try cur_val.elem(zcu, sema.arena, field_idx);
+ cur_val = try cur_val.elem(pt, sema.arena, field_idx);
cur_offset -= start_off;
break;
}
@@ -874,7 +877,7 @@ fn prepareComptimePtrStore(
// Otherwise, we might traverse into a union field which doesn't allow pointers.
// Figure out a solution!
if (true) break;
- try cur_val.unintern(zcu, sema.arena, false, false);
+ try cur_val.unintern(pt, sema.arena, false, false);
const payload = switch (cur_val.*) {
.un => |un| un.payload,
else => unreachable,
@@ -918,7 +921,7 @@ fn flattenArray(
) Allocator.Error!void {
if (next_idx.* == out.len) return;
- const zcu = sema.mod;
+ const zcu = sema.pt.zcu;
const ty = val.typeOf(zcu);
const base_elem_count = ty.arrayBase(zcu)[1];
@@ -928,7 +931,7 @@ fn flattenArray(
}
if (ty.zigTypeTag(zcu) != .Array) {
- out[@intCast(next_idx.*)] = (try val.intern(zcu, sema.arena)).toIntern();
+ out[@intCast(next_idx.*)] = (try val.intern(sema.pt, sema.arena)).toIntern();
next_idx.* += 1;
return;
}
@@ -942,7 +945,7 @@ fn flattenArray(
skip.* -= arr_base_elem_count;
continue;
}
- try flattenArray(sema, try val.getElem(zcu, elem_idx), skip, next_idx, out);
+ try flattenArray(sema, try val.getElem(sema.pt, elem_idx), skip, next_idx, out);
}
if (ty.sentinel(zcu)) |s| {
try flattenArray(sema, .{ .interned = s.toIntern() }, skip, next_idx, out);
@@ -957,13 +960,13 @@ fn unflattenArray(
elems: []const InternPool.Index,
next_idx: *u64,
) Allocator.Error!Value {
- const zcu = sema.mod;
+ const zcu = sema.pt.zcu;
const arena = sema.arena;
if (ty.zigTypeTag(zcu) != .Array) {
const val = Value.fromInterned(elems[@intCast(next_idx.*)]);
next_idx.* += 1;
- return zcu.getCoerced(val, ty);
+ return sema.pt.getCoerced(val, ty);
}
const elem_ty = ty.childType(zcu);
@@ -975,7 +978,7 @@ fn unflattenArray(
// TODO: validate sentinel
_ = try unflattenArray(sema, elem_ty, elems, next_idx);
}
- return Value.fromInterned(try zcu.intern(.{ .aggregate = .{
+ return Value.fromInterned(try sema.pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = buf },
} }));
@@ -990,25 +993,25 @@ fn recursiveIndex(
mv: *MutableValue,
index: *u64,
) !?struct { *MutableValue, u64 } {
- const zcu = sema.mod;
+ const pt = sema.pt;
- const ty = mv.typeOf(zcu);
- assert(ty.zigTypeTag(zcu) == .Array);
+ const ty = mv.typeOf(pt.zcu);
+ assert(ty.zigTypeTag(pt.zcu) == .Array);
- const ty_base_elems = ty.arrayBase(zcu)[1];
+ const ty_base_elems = ty.arrayBase(pt.zcu)[1];
if (index.* >= ty_base_elems) {
index.* -= ty_base_elems;
return null;
}
- const elem_ty = ty.childType(zcu);
- if (elem_ty.zigTypeTag(zcu) != .Array) {
- assert(index.* < ty.arrayLenIncludingSentinel(zcu)); // should be handled by initial check
+ const elem_ty = ty.childType(pt.zcu);
+ if (elem_ty.zigTypeTag(pt.zcu) != .Array) {
+ assert(index.* < ty.arrayLenIncludingSentinel(pt.zcu)); // should be handled by initial check
return .{ mv, index.* };
}
- for (0..@intCast(ty.arrayLenIncludingSentinel(zcu))) |elem_index| {
- if (try recursiveIndex(sema, try mv.elem(zcu, sema.arena, elem_index), index)) |result| {
+ for (0..@intCast(ty.arrayLenIncludingSentinel(pt.zcu))) |elem_index| {
+ if (try recursiveIndex(sema, try mv.elem(pt, sema.arena, elem_index), index)) |result| {
return result;
}
}
src/Zcu/PerThread.zig
@@ -0,0 +1,2102 @@
+zcu: *Zcu,
+
+/// Dense, per-thread unique index.
+tid: Id,
+
+pub const Id = if (builtin.single_threaded) enum { main } else enum(usize) { main, _ };
+
+/// Like `ensureDeclAnalyzed`, but the Decl is a file's root Decl.
+pub fn ensureFileAnalyzed(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void {
+ if (pt.zcu.fileRootDecl(file_index).unwrap()) |existing_root| {
+ return pt.ensureDeclAnalyzed(existing_root);
+ } else {
+ return pt.semaFile(file_index);
+ }
+}
+
+/// This ensures that the Decl will have an up-to-date Type and Value populated.
+/// However the resolution status of the Type may not be fully resolved.
+/// For example an inferred error set is not resolved until after `analyzeFnBody`.
+/// is called.
+pub fn ensureDeclAnalyzed(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) Zcu.SemaError!void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const mod = pt.zcu;
+ const ip = &mod.intern_pool;
+ const decl = mod.declPtr(decl_index);
+
+ log.debug("ensureDeclAnalyzed '{d}' (name '{}')", .{
+ @intFromEnum(decl_index),
+ decl.name.fmt(ip),
+ });
+
+ // Determine whether or not this Decl is outdated, i.e. requires re-analysis
+ // even if `complete`. If a Decl is PO, we pessismistically assume that it
+ // *does* require re-analysis, to ensure that the Decl is definitely
+ // up-to-date when this function returns.
+
+ // If analysis occurs in a poor order, this could result in over-analysis.
+ // We do our best to avoid this by the other dependency logic in this file
+ // which tries to limit re-analysis to Decls whose previously listed
+ // dependencies are all up-to-date.
+
+ const decl_as_depender = InternPool.AnalUnit.wrap(.{ .decl = decl_index });
+ const decl_was_outdated = mod.outdated.swapRemove(decl_as_depender) or
+ mod.potentially_outdated.swapRemove(decl_as_depender);
+
+ if (decl_was_outdated) {
+ _ = mod.outdated_ready.swapRemove(decl_as_depender);
+ }
+
+ const was_outdated = mod.outdated_file_root.swapRemove(decl_index) or decl_was_outdated;
+
+ switch (decl.analysis) {
+ .in_progress => unreachable,
+
+ .file_failure => return error.AnalysisFail,
+
+ .sema_failure,
+ .dependency_failure,
+ .codegen_failure,
+ => if (!was_outdated) return error.AnalysisFail,
+
+ .complete => if (!was_outdated) return,
+
+ .unreferenced => {},
+ }
+
+ if (was_outdated) {
+ // The exports this Decl performs will be re-discovered, so we remove them here
+ // prior to re-analysis.
+ if (build_options.only_c) unreachable;
+ mod.deleteUnitExports(decl_as_depender);
+ mod.deleteUnitReferences(decl_as_depender);
+ }
+
+ const sema_result: Zcu.SemaDeclResult = blk: {
+ if (decl.zir_decl_index == .none and !mod.declIsRoot(decl_index)) {
+ // Anonymous decl. We don't semantically analyze these.
+ break :blk .{
+ .invalidate_decl_val = false,
+ .invalidate_decl_ref = false,
+ };
+ }
+
+ if (mod.declIsRoot(decl_index)) {
+ const changed = try pt.semaFileUpdate(decl.getFileScopeIndex(mod), decl_was_outdated);
+ break :blk .{
+ .invalidate_decl_val = changed,
+ .invalidate_decl_ref = changed,
+ };
+ }
+
+ const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0);
+ defer decl_prog_node.end();
+
+ break :blk pt.semaDecl(decl_index) catch |err| switch (err) {
+ error.AnalysisFail => {
+ if (decl.analysis == .in_progress) {
+ // If this decl caused the compile error, the analysis field would
+ // be changed to indicate it was this Decl's fault. Because this
+ // did not happen, we infer here that it was a dependency failure.
+ decl.analysis = .dependency_failure;
+ }
+ return error.AnalysisFail;
+ },
+ error.GenericPoison => unreachable,
+ else => |e| {
+ decl.analysis = .sema_failure;
+ try mod.failed_analysis.ensureUnusedCapacity(mod.gpa, 1);
+ try mod.retryable_failures.append(mod.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }));
+ mod.failed_analysis.putAssumeCapacityNoClobber(InternPool.AnalUnit.wrap(.{ .decl = decl_index }), try Zcu.ErrorMsg.create(
+ mod.gpa,
+ decl.navSrcLoc(mod),
+ "unable to analyze: {s}",
+ .{@errorName(e)},
+ ));
+ return error.AnalysisFail;
+ },
+ };
+ };
+
+ // TODO: we do not yet have separate dependencies for decl values vs types.
+ if (decl_was_outdated) {
+ if (sema_result.invalidate_decl_val or sema_result.invalidate_decl_ref) {
+ log.debug("Decl tv invalidated ('{d}')", .{@intFromEnum(decl_index)});
+ // This dependency was marked as PO, meaning dependees were waiting
+ // on its analysis result, and it has turned out to be outdated.
+ // Update dependees accordingly.
+ try mod.markDependeeOutdated(.{ .decl_val = decl_index });
+ } else {
+ log.debug("Decl tv up-to-date ('{d}')", .{@intFromEnum(decl_index)});
+ // This dependency was previously PO, but turned out to be up-to-date.
+ // We do not need to queue successive analysis.
+ try mod.markPoDependeeUpToDate(.{ .decl_val = decl_index });
+ }
+ }
+}
+
+pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: InternPool.Index) Zcu.SemaError!void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
+ const ip = &zcu.intern_pool;
+
+ // We only care about the uncoerced function.
+ // We need to do this for the "orphaned function" check below to be valid.
+ const func_index = ip.unwrapCoercedFunc(maybe_coerced_func_index);
+
+ const func = zcu.funcInfo(maybe_coerced_func_index);
+ const decl_index = func.owner_decl;
+ const decl = zcu.declPtr(decl_index);
+
+ log.debug("ensureFuncBodyAnalyzed '{d}' (instance of '{}')", .{
+ @intFromEnum(func_index),
+ decl.name.fmt(ip),
+ });
+
+ // First, our owner decl must be up-to-date. This will always be the case
+ // during the first update, but may not on successive updates if we happen
+ // to get analyzed before our parent decl.
+ try pt.ensureDeclAnalyzed(decl_index);
+
+ // On an update, it's possible this function changed such that our owner
+ // decl now refers to a different function, making this one orphaned. If
+ // that's the case, we should remove this function from the binary.
+ if (decl.val.ip_index != func_index) {
+ try zcu.markDependeeOutdated(.{ .func_ies = func_index });
+ ip.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index }));
+ ip.remove(func_index);
+ @panic("TODO: remove orphaned function from binary");
+ }
+
+ // We'll want to remember what the IES used to be before the update for
+ // dependency invalidation purposes.
+ const old_resolved_ies = if (func.analysis(ip).inferred_error_set)
+ func.resolvedErrorSet(ip).*
+ else
+ .none;
+
+ switch (decl.analysis) {
+ .unreferenced => unreachable,
+ .in_progress => unreachable,
+
+ .codegen_failure => unreachable, // functions do not perform constant value generation
+
+ .file_failure,
+ .sema_failure,
+ .dependency_failure,
+ => return error.AnalysisFail,
+
+ .complete => {},
+ }
+
+ const func_as_depender = InternPool.AnalUnit.wrap(.{ .func = func_index });
+ const was_outdated = zcu.outdated.swapRemove(func_as_depender) or
+ zcu.potentially_outdated.swapRemove(func_as_depender);
+
+ if (was_outdated) {
+ if (build_options.only_c) unreachable;
+ _ = zcu.outdated_ready.swapRemove(func_as_depender);
+ zcu.deleteUnitExports(func_as_depender);
+ zcu.deleteUnitReferences(func_as_depender);
+ }
+
+ switch (func.analysis(ip).state) {
+ .success => if (!was_outdated) return,
+ .sema_failure,
+ .dependency_failure,
+ .codegen_failure,
+ => if (!was_outdated) return error.AnalysisFail,
+ .none, .queued => {},
+ .in_progress => unreachable,
+ .inline_only => unreachable, // don't queue work for this
+ }
+
+ log.debug("analyze and generate fn body '{d}'; reason='{s}'", .{
+ @intFromEnum(func_index),
+ if (was_outdated) "outdated" else "never analyzed",
+ });
+
+ var tmp_arena = std.heap.ArenaAllocator.init(gpa);
+ defer tmp_arena.deinit();
+ const sema_arena = tmp_arena.allocator();
+
+ var air = pt.analyzeFnBody(func_index, sema_arena) catch |err| switch (err) {
+ error.AnalysisFail => {
+ if (func.analysis(ip).state == .in_progress) {
+ // If this decl caused the compile error, the analysis field would
+ // be changed to indicate it was this Decl's fault. Because this
+ // did not happen, we infer here that it was a dependency failure.
+ func.analysis(ip).state = .dependency_failure;
+ }
+ return error.AnalysisFail;
+ },
+ error.OutOfMemory => return error.OutOfMemory,
+ };
+ errdefer air.deinit(gpa);
+
+ const invalidate_ies_deps = i: {
+ if (!was_outdated) break :i false;
+ if (!func.analysis(ip).inferred_error_set) break :i true;
+ const new_resolved_ies = func.resolvedErrorSet(ip).*;
+ break :i new_resolved_ies != old_resolved_ies;
+ };
+ if (invalidate_ies_deps) {
+ log.debug("func IES invalidated ('{d}')", .{@intFromEnum(func_index)});
+ try zcu.markDependeeOutdated(.{ .func_ies = func_index });
+ } else if (was_outdated) {
+ log.debug("func IES up-to-date ('{d}')", .{@intFromEnum(func_index)});
+ try zcu.markPoDependeeUpToDate(.{ .func_ies = func_index });
+ }
+
+ const comp = zcu.comp;
+
+ const dump_air = build_options.enable_debug_extensions and comp.verbose_air;
+ const dump_llvm_ir = build_options.enable_debug_extensions and (comp.verbose_llvm_ir != null or comp.verbose_llvm_bc != null);
+
+ if (comp.bin_file == null and zcu.llvm_object == null and !dump_air and !dump_llvm_ir) {
+ air.deinit(gpa);
+ return;
+ }
+
+ try comp.work_queue.writeItem(.{ .codegen_func = .{
+ .func = func_index,
+ .air = air,
+ } });
+}
+
+/// Takes ownership of `air`, even on error.
+/// If any types referenced by `air` are unresolved, marks the codegen as failed.
+pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Air) Allocator.Error!void {
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
+ const ip = &zcu.intern_pool;
+ const comp = zcu.comp;
+
+ defer {
+ var air_mut = air;
+ air_mut.deinit(gpa);
+ }
+
+ const func = zcu.funcInfo(func_index);
+ const decl_index = func.owner_decl;
+ const decl = zcu.declPtr(decl_index);
+
+ var liveness = try Liveness.analyze(gpa, air, ip);
+ defer liveness.deinit(gpa);
+
+ if (build_options.enable_debug_extensions and comp.verbose_air) {
+ const fqn = try decl.fullyQualifiedName(zcu);
+ std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(ip)});
+ @import("../print_air.zig").dump(pt, air, liveness);
+ std.debug.print("# End Function AIR: {}\n\n", .{fqn.fmt(ip)});
+ }
+
+ if (std.debug.runtime_safety) {
+ var verify: Liveness.Verify = .{
+ .gpa = gpa,
+ .air = air,
+ .liveness = liveness,
+ .intern_pool = ip,
+ };
+ defer verify.deinit();
+
+ verify.verify() catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ else => {
+ try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1);
+ zcu.failed_analysis.putAssumeCapacityNoClobber(
+ InternPool.AnalUnit.wrap(.{ .func = func_index }),
+ try Zcu.ErrorMsg.create(
+ gpa,
+ decl.navSrcLoc(zcu),
+ "invalid liveness: {s}",
+ .{@errorName(err)},
+ ),
+ );
+ func.analysis(ip).state = .codegen_failure;
+ return;
+ },
+ };
+ }
+
+ const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(ip), 0);
+ defer codegen_prog_node.end();
+
+ if (!air.typesFullyResolved(zcu)) {
+ // A type we depend on failed to resolve. This is a transitive failure.
+ // Correcting this failure will involve changing a type this function
+ // depends on, hence triggering re-analysis of this function, so this
+ // interacts correctly with incremental compilation.
+ func.analysis(ip).state = .codegen_failure;
+ } else if (comp.bin_file) |lf| {
+ lf.updateFunc(pt, func_index, air, liveness) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.AnalysisFail => {
+ func.analysis(ip).state = .codegen_failure;
+ },
+ else => {
+ try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1);
+ zcu.failed_analysis.putAssumeCapacityNoClobber(InternPool.AnalUnit.wrap(.{ .func = func_index }), try Zcu.ErrorMsg.create(
+ gpa,
+ decl.navSrcLoc(zcu),
+ "unable to codegen: {s}",
+ .{@errorName(err)},
+ ));
+ func.analysis(ip).state = .codegen_failure;
+ try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .func = func_index }));
+ },
+ };
+ } else if (zcu.llvm_object) |llvm_object| {
+ if (build_options.only_c) unreachable;
+ llvm_object.updateFunc(pt, func_index, air, liveness) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ };
+ }
+}
+
+/// https://github.com/ziglang/zig/issues/14307
+pub fn semaPkg(pt: Zcu.PerThread, pkg: *Module) !void {
+ const import_file_result = try pt.zcu.importPkg(pkg);
+ const root_decl_index = pt.zcu.fileRootDecl(import_file_result.file_index);
+ if (root_decl_index == .none) {
+ return pt.semaFile(import_file_result.file_index);
+ }
+}
+
+fn getFileRootStruct(
+ pt: Zcu.PerThread,
+ decl_index: Zcu.Decl.Index,
+ namespace_index: Zcu.Namespace.Index,
+ file_index: Zcu.File.Index,
+) Allocator.Error!InternPool.Index {
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
+ const ip = &zcu.intern_pool;
+ const file = zcu.fileByIndex(file_index);
+ const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended;
+ assert(extended.opcode == .struct_decl);
+ const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
+ assert(!small.has_captures_len);
+ assert(!small.has_backing_int);
+ assert(small.layout == .auto);
+ var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
+ const fields_len = if (small.has_fields_len) blk: {
+ const fields_len = file.zir.extra[extra_index];
+ extra_index += 1;
+ break :blk fields_len;
+ } else 0;
+ const decls_len = if (small.has_decls_len) blk: {
+ const decls_len = file.zir.extra[extra_index];
+ extra_index += 1;
+ break :blk decls_len;
+ } else 0;
+ const decls = file.zir.bodySlice(extra_index, decls_len);
+ extra_index += decls_len;
+
+ const tracked_inst = try ip.trackZir(gpa, file_index, .main_struct_inst);
+ const wip_ty = switch (try ip.getStructType(gpa, pt.tid, .{
+ .layout = .auto,
+ .fields_len = fields_len,
+ .known_non_opv = small.known_non_opv,
+ .requires_comptime = if (small.known_comptime_only) .yes else .unknown,
+ .is_tuple = small.is_tuple,
+ .any_comptime_fields = small.any_comptime_fields,
+ .any_default_inits = small.any_default_inits,
+ .inits_resolved = false,
+ .any_aligned_fields = small.any_aligned_fields,
+ .has_namespace = true,
+ .key = .{ .declared = .{
+ .zir_index = tracked_inst,
+ .captures = &.{},
+ } },
+ })) {
+ .existing => unreachable, // we wouldn't be analysing the file root if this type existed
+ .wip => |wip| wip,
+ };
+ errdefer wip_ty.cancel(ip);
+
+ if (zcu.comp.debug_incremental) {
+ try ip.addDependency(
+ gpa,
+ InternPool.AnalUnit.wrap(.{ .decl = decl_index }),
+ .{ .src_hash = tracked_inst },
+ );
+ }
+
+ const decl = zcu.declPtr(decl_index);
+ decl.val = Value.fromInterned(wip_ty.index);
+ decl.has_tv = true;
+ decl.owns_tv = true;
+ decl.analysis = .complete;
+
+ try zcu.scanNamespace(namespace_index, decls, decl);
+ try zcu.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index });
+ return wip_ty.finish(ip, decl_index, namespace_index.toOptional());
+}
+
+/// Re-analyze the root Decl of a file on an incremental update.
+/// If `type_outdated`, the struct type itself is considered outdated and is
+/// reconstructed at a new InternPool index. Otherwise, the namespace is just
+/// re-analyzed. Returns whether the decl's tyval was invalidated.
+fn semaFileUpdate(pt: Zcu.PerThread, file_index: Zcu.File.Index, type_outdated: bool) Zcu.SemaError!bool {
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
+ const file = zcu.fileByIndex(file_index);
+ const decl = zcu.declPtr(zcu.fileRootDecl(file_index).unwrap().?);
+
+ log.debug("semaFileUpdate mod={s} sub_file_path={s} type_outdated={}", .{
+ file.mod.fully_qualified_name,
+ file.sub_file_path,
+ type_outdated,
+ });
+
+ if (file.status != .success_zir) {
+ if (decl.analysis == .file_failure) {
+ return false;
+ } else {
+ decl.analysis = .file_failure;
+ return true;
+ }
+ }
+
+ if (decl.analysis == .file_failure) {
+ // No struct type currently exists. Create one!
+ const root_decl = zcu.fileRootDecl(file_index);
+ _ = try pt.getFileRootStruct(root_decl.unwrap().?, decl.src_namespace, file_index);
+ return true;
+ }
+
+ assert(decl.has_tv);
+ assert(decl.owns_tv);
+
+ if (type_outdated) {
+ // Invalidate the existing type, reusing the decl and namespace.
+ const file_root_decl = zcu.fileRootDecl(file_index).unwrap().?;
+ ip.removeDependenciesForDepender(zcu.gpa, InternPool.AnalUnit.wrap(.{
+ .decl = file_root_decl,
+ }));
+ ip.remove(decl.val.toIntern());
+ decl.val = undefined;
+ _ = try pt.getFileRootStruct(file_root_decl, decl.src_namespace, file_index);
+ return true;
+ }
+
+ // Only the struct's namespace is outdated.
+ // Preserve the type - just scan the namespace again.
+
+ const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended;
+ const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
+
+ var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
+ extra_index += @intFromBool(small.has_fields_len);
+ const decls_len = if (small.has_decls_len) blk: {
+ const decls_len = file.zir.extra[extra_index];
+ extra_index += 1;
+ break :blk decls_len;
+ } else 0;
+ const decls = file.zir.bodySlice(extra_index, decls_len);
+
+ if (!type_outdated) {
+ try zcu.scanNamespace(decl.src_namespace, decls, decl);
+ }
+
+ return false;
+}
+
+/// Regardless of the file status, will create a `Decl` if none exists so that we can track
+/// dependencies and re-analyze when the file becomes outdated.
+fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
+ const file = zcu.fileByIndex(file_index);
+ assert(zcu.fileRootDecl(file_index) == .none);
+ log.debug("semaFile zcu={s} sub_file_path={s}", .{
+ file.mod.fully_qualified_name, file.sub_file_path,
+ });
+
+ // Because these three things each reference each other, `undefined`
+ // placeholders are used before being set after the struct type gains an
+ // InternPool index.
+ const new_namespace_index = try zcu.createNamespace(.{
+ .parent = .none,
+ .decl_index = undefined,
+ .file_scope = file_index,
+ });
+ errdefer zcu.destroyNamespace(new_namespace_index);
+
+ const new_decl_index = try zcu.allocateNewDecl(new_namespace_index);
+ const new_decl = zcu.declPtr(new_decl_index);
+ errdefer @panic("TODO error handling");
+
+ zcu.setFileRootDecl(file_index, new_decl_index.toOptional());
+ zcu.namespacePtr(new_namespace_index).decl_index = new_decl_index;
+
+ new_decl.name = try file.fullyQualifiedName(zcu);
+ new_decl.name_fully_qualified = true;
+ new_decl.is_pub = true;
+ new_decl.is_exported = false;
+ new_decl.alignment = .none;
+ new_decl.@"linksection" = .none;
+ new_decl.analysis = .in_progress;
+
+ if (file.status != .success_zir) {
+ new_decl.analysis = .file_failure;
+ return;
+ }
+ assert(file.zir_loaded);
+
+ const struct_ty = try pt.getFileRootStruct(new_decl_index, new_namespace_index, file_index);
+ errdefer zcu.intern_pool.remove(struct_ty);
+
+ switch (zcu.comp.cache_use) {
+ .whole => |whole| if (whole.cache_manifest) |man| {
+ const source = file.getSource(gpa) catch |err| {
+ try Zcu.reportRetryableFileError(zcu, file_index, "unable to load source: {s}", .{@errorName(err)});
+ return error.AnalysisFail;
+ };
+
+ const resolved_path = std.fs.path.resolve(gpa, &.{
+ file.mod.root.root_dir.path orelse ".",
+ file.mod.root.sub_path,
+ file.sub_file_path,
+ }) catch |err| {
+ try Zcu.reportRetryableFileError(zcu, file_index, "unable to resolve path: {s}", .{@errorName(err)});
+ return error.AnalysisFail;
+ };
+ errdefer gpa.free(resolved_path);
+
+ whole.cache_manifest_mutex.lock();
+ defer whole.cache_manifest_mutex.unlock();
+ try man.addFilePostContents(resolved_path, source.bytes, source.stat);
+ },
+ .incremental => {},
+ }
+}
+
+fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const zcu = pt.zcu;
+ const decl = zcu.declPtr(decl_index);
+ const ip = &zcu.intern_pool;
+
+ if (decl.getFileScope(zcu).status != .success_zir) {
+ return error.AnalysisFail;
+ }
+
+ assert(!zcu.declIsRoot(decl_index));
+
+ if (decl.zir_decl_index == .none and decl.owns_tv) {
+ // We are re-analyzing an anonymous owner Decl (for a function or a namespace type).
+ return zcu.semaAnonOwnerDecl(decl_index);
+ }
+
+ log.debug("semaDecl '{d}'", .{@intFromEnum(decl_index)});
+ log.debug("decl name '{}'", .{(try decl.fullyQualifiedName(zcu)).fmt(ip)});
+ defer blk: {
+ log.debug("finish decl name '{}'", .{(decl.fullyQualifiedName(zcu) catch break :blk).fmt(ip)});
+ }
+
+ const old_has_tv = decl.has_tv;
+ // The following values are ignored if `!old_has_tv`
+ const old_ty = if (old_has_tv) decl.typeOf(zcu) else undefined;
+ const old_val = decl.val;
+ const old_align = decl.alignment;
+ const old_linksection = decl.@"linksection";
+ const old_addrspace = decl.@"addrspace";
+ const old_is_inline = if (decl.getOwnedFunction(zcu)) |prev_func|
+ prev_func.analysis(ip).state == .inline_only
+ else
+ false;
+
+ const decl_inst = decl.zir_decl_index.unwrap().?.resolve(ip);
+
+ const gpa = zcu.gpa;
+ const zir = decl.getFileScope(zcu).zir;
+
+ const builtin_type_target_index: InternPool.Index = ip_index: {
+ const std_mod = zcu.std_mod;
+ if (decl.getFileScope(zcu).mod != std_mod) break :ip_index .none;
+ // We're in the std module.
+ const std_file_imported = try zcu.importPkg(std_mod);
+ const std_file_root_decl_index = zcu.fileRootDecl(std_file_imported.file_index);
+ const std_decl = zcu.declPtr(std_file_root_decl_index.unwrap().?);
+ const std_namespace = std_decl.getInnerNamespace(zcu).?;
+ const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls);
+ const builtin_decl = zcu.declPtr(std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse break :ip_index .none);
+ const builtin_namespace = builtin_decl.getInnerNamespaceIndex(zcu).unwrap() orelse break :ip_index .none;
+ if (decl.src_namespace != builtin_namespace) break :ip_index .none;
+ // We're in builtin.zig. This could be a builtin we need to add to a specific InternPool index.
+ for ([_][]const u8{
+ "AtomicOrder",
+ "AtomicRmwOp",
+ "CallingConvention",
+ "AddressSpace",
+ "FloatMode",
+ "ReduceOp",
+ "CallModifier",
+ "PrefetchOptions",
+ "ExportOptions",
+ "ExternOptions",
+ "Type",
+ }, [_]InternPool.Index{
+ .atomic_order_type,
+ .atomic_rmw_op_type,
+ .calling_convention_type,
+ .address_space_type,
+ .float_mode_type,
+ .reduce_op_type,
+ .call_modifier_type,
+ .prefetch_options_type,
+ .export_options_type,
+ .extern_options_type,
+ .type_info_type,
+ }) |type_name, type_ip| {
+ if (decl.name.eqlSlice(type_name, ip)) break :ip_index type_ip;
+ }
+ break :ip_index .none;
+ };
+
+ zcu.intern_pool.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }));
+
+ decl.analysis = .in_progress;
+
+ var analysis_arena = std.heap.ArenaAllocator.init(gpa);
+ defer analysis_arena.deinit();
+
+ var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa);
+ defer comptime_err_ret_trace.deinit();
+
+ var sema: Sema = .{
+ .pt = pt,
+ .gpa = gpa,
+ .arena = analysis_arena.allocator(),
+ .code = zir,
+ .owner_decl = decl,
+ .owner_decl_index = decl_index,
+ .func_index = .none,
+ .func_is_naked = false,
+ .fn_ret_ty = Type.void,
+ .fn_ret_ty_ies = null,
+ .owner_func_index = .none,
+ .comptime_err_ret_trace = &comptime_err_ret_trace,
+ .builtin_type_target_index = builtin_type_target_index,
+ };
+ defer sema.deinit();
+
+ // Every Decl (other than file root Decls, which do not have a ZIR index) has a dependency on its own source.
+ try sema.declareDependency(.{ .src_hash = try ip.trackZir(
+ gpa,
+ decl.getFileScopeIndex(zcu),
+ decl_inst,
+ ) });
+
+ var block_scope: Sema.Block = .{
+ .parent = null,
+ .sema = &sema,
+ .namespace = decl.src_namespace,
+ .instructions = .{},
+ .inlining = null,
+ .is_comptime = true,
+ .src_base_inst = decl.zir_decl_index.unwrap().?,
+ .type_name_ctx = decl.name,
+ };
+ defer block_scope.instructions.deinit(gpa);
+
+ const decl_bodies = decl.zirBodies(zcu);
+
+ const result_ref = try sema.resolveInlineBody(&block_scope, decl_bodies.value_body, decl_inst);
+ // We'll do some other bits with the Sema. Clear the type target index just
+ // in case they analyze any type.
+ sema.builtin_type_target_index = .none;
+ const align_src = block_scope.src(.{ .node_offset_var_decl_align = 0 });
+ const section_src = block_scope.src(.{ .node_offset_var_decl_section = 0 });
+ const address_space_src = block_scope.src(.{ .node_offset_var_decl_addrspace = 0 });
+ const ty_src = block_scope.src(.{ .node_offset_var_decl_ty = 0 });
+ const init_src = block_scope.src(.{ .node_offset_var_decl_init = 0 });
+ const decl_val = try sema.resolveFinalDeclValue(&block_scope, init_src, result_ref);
+ const decl_ty = decl_val.typeOf(zcu);
+
+ // Note this resolves the type of the Decl, not the value; if this Decl
+ // is a struct, for example, this resolves `type` (which needs no resolution),
+ // not the struct itself.
+ try decl_ty.resolveLayout(pt);
+
+ if (decl.kind == .@"usingnamespace") {
+ if (!decl_ty.eql(Type.type, zcu)) {
+ return sema.fail(&block_scope, ty_src, "expected type, found {}", .{decl_ty.fmt(pt)});
+ }
+ const ty = decl_val.toType();
+ if (ty.getNamespace(zcu) == null) {
+ return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(pt)});
+ }
+
+ decl.val = ty.toValue();
+ decl.alignment = .none;
+ decl.@"linksection" = .none;
+ decl.has_tv = true;
+ decl.owns_tv = false;
+ decl.analysis = .complete;
+
+ // TODO: usingnamespace cannot currently participate in incremental compilation
+ return .{
+ .invalidate_decl_val = true,
+ .invalidate_decl_ref = true,
+ };
+ }
+
+ var queue_linker_work = true;
+ var is_func = false;
+ var is_inline = false;
+ switch (decl_val.toIntern()) {
+ .generic_poison => unreachable,
+ .unreachable_value => unreachable,
+ else => switch (ip.indexToKey(decl_val.toIntern())) {
+ .variable => |variable| {
+ decl.owns_tv = variable.decl == decl_index;
+ queue_linker_work = decl.owns_tv;
+ },
+
+ .extern_func => |extern_func| {
+ decl.owns_tv = extern_func.decl == decl_index;
+ queue_linker_work = decl.owns_tv;
+ is_func = decl.owns_tv;
+ },
+
+ .func => |func| {
+ decl.owns_tv = func.owner_decl == decl_index;
+ queue_linker_work = false;
+ is_inline = decl.owns_tv and decl_ty.fnCallingConvention(zcu) == .Inline;
+ is_func = decl.owns_tv;
+ },
+
+ else => {},
+ },
+ }
+
+ decl.val = decl_val;
+ // Function linksection, align, and addrspace were already set by Sema
+ if (!is_func) {
+ decl.alignment = blk: {
+ const align_body = decl_bodies.align_body orelse break :blk .none;
+ const align_ref = try sema.resolveInlineBody(&block_scope, align_body, decl_inst);
+ break :blk try sema.analyzeAsAlign(&block_scope, align_src, align_ref);
+ };
+ decl.@"linksection" = blk: {
+ const linksection_body = decl_bodies.linksection_body orelse break :blk .none;
+ const linksection_ref = try sema.resolveInlineBody(&block_scope, linksection_body, decl_inst);
+ const bytes = try sema.toConstString(&block_scope, section_src, linksection_ref, .{
+ .needed_comptime_reason = "linksection must be comptime-known",
+ });
+ if (std.mem.indexOfScalar(u8, bytes, 0) != null) {
+ return sema.fail(&block_scope, section_src, "linksection cannot contain null bytes", .{});
+ } else if (bytes.len == 0) {
+ return sema.fail(&block_scope, section_src, "linksection cannot be empty", .{});
+ }
+ break :blk try ip.getOrPutStringOpt(gpa, bytes, .no_embedded_nulls);
+ };
+ decl.@"addrspace" = blk: {
+ const addrspace_ctx: Sema.AddressSpaceContext = switch (ip.indexToKey(decl_val.toIntern())) {
+ .variable => .variable,
+ .extern_func, .func => .function,
+ else => .constant,
+ };
+
+ const target = zcu.getTarget();
+
+ const addrspace_body = decl_bodies.addrspace_body orelse break :blk switch (addrspace_ctx) {
+ .function => target_util.defaultAddressSpace(target, .function),
+ .variable => target_util.defaultAddressSpace(target, .global_mutable),
+ .constant => target_util.defaultAddressSpace(target, .global_constant),
+ else => unreachable,
+ };
+ const addrspace_ref = try sema.resolveInlineBody(&block_scope, addrspace_body, decl_inst);
+ break :blk try sema.analyzeAsAddressSpace(&block_scope, address_space_src, addrspace_ref, addrspace_ctx);
+ };
+ }
+ decl.has_tv = true;
+ decl.analysis = .complete;
+
+ const result: Zcu.SemaDeclResult = if (old_has_tv) .{
+ .invalidate_decl_val = !decl_ty.eql(old_ty, zcu) or
+ !decl.val.eql(old_val, decl_ty, zcu) or
+ is_inline != old_is_inline,
+ .invalidate_decl_ref = !decl_ty.eql(old_ty, zcu) or
+ decl.alignment != old_align or
+ decl.@"linksection" != old_linksection or
+ decl.@"addrspace" != old_addrspace or
+ is_inline != old_is_inline,
+ } else .{
+ .invalidate_decl_val = true,
+ .invalidate_decl_ref = true,
+ };
+
+ const has_runtime_bits = queue_linker_work and (is_func or try sema.typeHasRuntimeBits(decl_ty));
+ if (has_runtime_bits) {
+ // Needed for codegen_decl which will call updateDecl and then the
+ // codegen backend wants full access to the Decl Type.
+ try decl_ty.resolveFully(pt);
+
+ try zcu.comp.work_queue.writeItem(.{ .codegen_decl = decl_index });
+
+ if (result.invalidate_decl_ref and zcu.emit_h != null) {
+ try zcu.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index });
+ }
+ }
+
+ if (decl.is_exported) {
+ const export_src = block_scope.src(.{ .token_offset = @intFromBool(decl.is_pub) });
+ if (is_inline) return sema.fail(&block_scope, export_src, "export of inline function", .{});
+ // The scope needs to have the decl in it.
+ try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index);
+ }
+
+ try sema.flushExports();
+
+ return result;
+}
+
+pub fn embedFile(
+ pt: Zcu.PerThread,
+ cur_file: *Zcu.File,
+ import_string: []const u8,
+ src_loc: Zcu.LazySrcLoc,
+) !InternPool.Index {
+ const mod = pt.zcu;
+ const gpa = mod.gpa;
+
+ if (cur_file.mod.deps.get(import_string)) |pkg| {
+ const resolved_path = try std.fs.path.resolve(gpa, &.{
+ pkg.root.root_dir.path orelse ".",
+ pkg.root.sub_path,
+ pkg.root_src_path,
+ });
+ var keep_resolved_path = false;
+ defer if (!keep_resolved_path) gpa.free(resolved_path);
+
+ const gop = try mod.embed_table.getOrPut(gpa, resolved_path);
+ errdefer {
+ assert(std.mem.eql(u8, mod.embed_table.pop().key, resolved_path));
+ keep_resolved_path = false;
+ }
+ if (gop.found_existing) return gop.value_ptr.*.val;
+ keep_resolved_path = true;
+
+ const sub_file_path = try gpa.dupe(u8, pkg.root_src_path);
+ errdefer gpa.free(sub_file_path);
+
+ return pt.newEmbedFile(pkg, sub_file_path, resolved_path, gop.value_ptr, src_loc);
+ }
+
+ // The resolved path is used as the key in the table, to detect if a file
+ // refers to the same as another, despite different relative paths.
+ const resolved_path = try std.fs.path.resolve(gpa, &.{
+ cur_file.mod.root.root_dir.path orelse ".",
+ cur_file.mod.root.sub_path,
+ cur_file.sub_file_path,
+ "..",
+ import_string,
+ });
+
+ var keep_resolved_path = false;
+ defer if (!keep_resolved_path) gpa.free(resolved_path);
+
+ const gop = try mod.embed_table.getOrPut(gpa, resolved_path);
+ errdefer {
+ assert(std.mem.eql(u8, mod.embed_table.pop().key, resolved_path));
+ keep_resolved_path = false;
+ }
+ if (gop.found_existing) return gop.value_ptr.*.val;
+ keep_resolved_path = true;
+
+ const resolved_root_path = try std.fs.path.resolve(gpa, &.{
+ cur_file.mod.root.root_dir.path orelse ".",
+ cur_file.mod.root.sub_path,
+ });
+ defer gpa.free(resolved_root_path);
+
+ const sub_file_path = p: {
+ const relative = try std.fs.path.relative(gpa, resolved_root_path, resolved_path);
+ errdefer gpa.free(relative);
+
+ if (!isUpDir(relative) and !std.fs.path.isAbsolute(relative)) {
+ break :p relative;
+ }
+ return error.ImportOutsideModulePath;
+ };
+ defer gpa.free(sub_file_path);
+
+ return pt.newEmbedFile(cur_file.mod, sub_file_path, resolved_path, gop.value_ptr, src_loc);
+}
+
+/// Finalize the creation of an anon decl.
+pub fn finalizeAnonDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) Allocator.Error!void {
+ if (pt.zcu.declPtr(decl_index).typeOf(pt.zcu).isFnOrHasRuntimeBits(pt)) {
+ try pt.zcu.comp.work_queue.writeItem(.{ .codegen_decl = decl_index });
+ }
+}
+
+/// https://github.com/ziglang/zig/issues/14307
+fn newEmbedFile(
+ pt: Zcu.PerThread,
+ pkg: *Module,
+ sub_file_path: []const u8,
+ resolved_path: []const u8,
+ result: **Zcu.EmbedFile,
+ src_loc: Zcu.LazySrcLoc,
+) !InternPool.Index {
+ const mod = pt.zcu;
+ const gpa = mod.gpa;
+ const ip = &mod.intern_pool;
+
+ const new_file = try gpa.create(Zcu.EmbedFile);
+ errdefer gpa.destroy(new_file);
+
+ var file = try pkg.root.openFile(sub_file_path, .{});
+ defer file.close();
+
+ const actual_stat = try file.stat();
+ const stat: Cache.File.Stat = .{
+ .size = actual_stat.size,
+ .inode = actual_stat.inode,
+ .mtime = actual_stat.mtime,
+ };
+ const size = std.math.cast(usize, actual_stat.size) orelse return error.Overflow;
+
+ const bytes = try ip.string_bytes.addManyAsSlice(gpa, try std.math.add(usize, size, 1));
+ const actual_read = try file.readAll(bytes[0..size]);
+ if (actual_read != size) return error.UnexpectedEndOfFile;
+ bytes[size] = 0;
+
+ const comp = mod.comp;
+ switch (comp.cache_use) {
+ .whole => |whole| if (whole.cache_manifest) |man| {
+ const copied_resolved_path = try gpa.dupe(u8, resolved_path);
+ errdefer gpa.free(copied_resolved_path);
+ whole.cache_manifest_mutex.lock();
+ defer whole.cache_manifest_mutex.unlock();
+ try man.addFilePostContents(copied_resolved_path, bytes[0..size], stat);
+ },
+ .incremental => {},
+ }
+
+ const array_ty = try pt.intern(.{ .array_type = .{
+ .len = size,
+ .sentinel = .zero_u8,
+ .child = .u8_type,
+ } });
+ const array_val = try pt.intern(.{ .aggregate = .{
+ .ty = array_ty,
+ .storage = .{ .bytes = try ip.getOrPutTrailingString(gpa, bytes.len, .maybe_embedded_nulls) },
+ } });
+
+ const ptr_ty = (try pt.ptrType(.{
+ .child = array_ty,
+ .flags = .{
+ .alignment = .none,
+ .is_const = true,
+ .address_space = .generic,
+ },
+ })).toIntern();
+ const ptr_val = try pt.intern(.{ .ptr = .{
+ .ty = ptr_ty,
+ .base_addr = .{ .anon_decl = .{
+ .val = array_val,
+ .orig_ty = ptr_ty,
+ } },
+ .byte_offset = 0,
+ } });
+
+ result.* = new_file;
+ new_file.* = .{
+ .sub_file_path = try ip.getOrPutString(gpa, sub_file_path, .no_embedded_nulls),
+ .owner = pkg,
+ .stat = stat,
+ .val = ptr_val,
+ .src_loc = src_loc,
+ };
+ return ptr_val;
+}
+
+pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: Allocator) Zcu.SemaError!Air {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const mod = pt.zcu;
+ const gpa = mod.gpa;
+ const ip = &mod.intern_pool;
+ const func = mod.funcInfo(func_index);
+ const decl_index = func.owner_decl;
+ const decl = mod.declPtr(decl_index);
+
+ log.debug("func name '{}'", .{(try decl.fullyQualifiedName(mod)).fmt(ip)});
+ defer blk: {
+ log.debug("finish func name '{}'", .{(decl.fullyQualifiedName(mod) catch break :blk).fmt(ip)});
+ }
+
+ const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0);
+ defer decl_prog_node.end();
+
+ mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index }));
+
+ var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa);
+ defer comptime_err_ret_trace.deinit();
+
+ // In the case of a generic function instance, this is the type of the
+ // instance, which has comptime parameters elided. In other words, it is
+ // the runtime-known parameters only, not to be confused with the
+ // generic_owner function type, which potentially has more parameters,
+ // including comptime parameters.
+ const fn_ty = decl.typeOf(mod);
+ const fn_ty_info = mod.typeToFunc(fn_ty).?;
+
+ var sema: Sema = .{
+ .pt = pt,
+ .gpa = gpa,
+ .arena = arena,
+ .code = decl.getFileScope(mod).zir,
+ .owner_decl = decl,
+ .owner_decl_index = decl_index,
+ .func_index = func_index,
+ .func_is_naked = fn_ty_info.cc == .Naked,
+ .fn_ret_ty = Type.fromInterned(fn_ty_info.return_type),
+ .fn_ret_ty_ies = null,
+ .owner_func_index = func_index,
+ .branch_quota = @max(func.branchQuota(ip).*, Sema.default_branch_quota),
+ .comptime_err_ret_trace = &comptime_err_ret_trace,
+ };
+ defer sema.deinit();
+
+ // Every runtime function has a dependency on the source of the Decl it originates from.
+ // It also depends on the value of its owner Decl.
+ try sema.declareDependency(.{ .src_hash = decl.zir_decl_index.unwrap().? });
+ try sema.declareDependency(.{ .decl_val = decl_index });
+
+ if (func.analysis(ip).inferred_error_set) {
+ const ies = try arena.create(Sema.InferredErrorSet);
+ ies.* = .{ .func = func_index };
+ sema.fn_ret_ty_ies = ies;
+ }
+
+ // reset in case calls to errorable functions are removed.
+ func.analysis(ip).calls_or_awaits_errorable_fn = false;
+
+ // First few indexes of extra are reserved and set at the end.
+ const reserved_count = @typeInfo(Air.ExtraIndex).Enum.fields.len;
+ try sema.air_extra.ensureTotalCapacity(gpa, reserved_count);
+ sema.air_extra.items.len += reserved_count;
+
+ var inner_block: Sema.Block = .{
+ .parent = null,
+ .sema = &sema,
+ .namespace = decl.src_namespace,
+ .instructions = .{},
+ .inlining = null,
+ .is_comptime = false,
+ .src_base_inst = inst: {
+ const owner_info = if (func.generic_owner == .none)
+ func
+ else
+ mod.funcInfo(func.generic_owner);
+ const orig_decl = mod.declPtr(owner_info.owner_decl);
+ break :inst orig_decl.zir_decl_index.unwrap().?;
+ },
+ .type_name_ctx = decl.name,
+ };
+ defer inner_block.instructions.deinit(gpa);
+
+ const fn_info = sema.code.getFnInfo(func.zirBodyInst(ip).resolve(ip));
+
+ // Here we are performing "runtime semantic analysis" for a function body, which means
+ // we must map the parameter ZIR instructions to `arg` AIR instructions.
+ // AIR requires the `arg` parameters to be the first N instructions.
+ // This could be a generic function instantiation, however, in which case we need to
+ // map the comptime parameters to constant values and only emit arg AIR instructions
+ // for the runtime ones.
+ const runtime_params_len = fn_ty_info.param_types.len;
+ try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len);
+ try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len);
+ try sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body);
+
+ // In the case of a generic function instance, pre-populate all the comptime args.
+ if (func.comptime_args.len != 0) {
+ for (
+ fn_info.param_body[0..func.comptime_args.len],
+ func.comptime_args.get(ip),
+ ) |inst, comptime_arg| {
+ if (comptime_arg == .none) continue;
+ sema.inst_map.putAssumeCapacityNoClobber(inst, Air.internedToRef(comptime_arg));
+ }
+ }
+
+ const src_params_len = if (func.comptime_args.len != 0)
+ func.comptime_args.len
+ else
+ runtime_params_len;
+
+ var runtime_param_index: usize = 0;
+ for (fn_info.param_body[0..src_params_len], 0..) |inst, src_param_index| {
+ const gop = sema.inst_map.getOrPutAssumeCapacity(inst);
+ if (gop.found_existing) continue; // provided above by comptime arg
+
+ const param_ty = fn_ty_info.param_types.get(ip)[runtime_param_index];
+ runtime_param_index += 1;
+
+ const opt_opv = sema.typeHasOnePossibleValue(Type.fromInterned(param_ty)) catch |err| switch (err) {
+ error.GenericPoison => unreachable,
+ error.ComptimeReturn => unreachable,
+ error.ComptimeBreak => unreachable,
+ else => |e| return e,
+ };
+ if (opt_opv) |opv| {
+ gop.value_ptr.* = Air.internedToRef(opv.toIntern());
+ continue;
+ }
+ const arg_index: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
+ gop.value_ptr.* = arg_index.toRef();
+ inner_block.instructions.appendAssumeCapacity(arg_index);
+ sema.air_instructions.appendAssumeCapacity(.{
+ .tag = .arg,
+ .data = .{ .arg = .{
+ .ty = Air.internedToRef(param_ty),
+ .src_index = @intCast(src_param_index),
+ } },
+ });
+ }
+
+ func.analysis(ip).state = .in_progress;
+
+ const last_arg_index = inner_block.instructions.items.len;
+
+ // Save the error trace as our first action in the function.
+ // If this is unnecessary after all, Liveness will clean it up for us.
+ const error_return_trace_index = try sema.analyzeSaveErrRetIndex(&inner_block);
+ sema.error_return_trace_index_on_fn_entry = error_return_trace_index;
+ inner_block.error_return_trace_index = error_return_trace_index;
+
+ sema.analyzeFnBody(&inner_block, fn_info.body) catch |err| switch (err) {
+ // TODO make these unreachable instead of @panic
+ error.GenericPoison => @panic("zig compiler bug: GenericPoison"),
+ error.ComptimeReturn => @panic("zig compiler bug: ComptimeReturn"),
+ else => |e| return e,
+ };
+
+ for (sema.unresolved_inferred_allocs.keys()) |ptr_inst| {
+ // The lack of a resolve_inferred_alloc means that this instruction
+ // is unused so it just has to be a no-op.
+ sema.air_instructions.set(@intFromEnum(ptr_inst), .{
+ .tag = .alloc,
+ .data = .{ .ty = Type.single_const_pointer_to_comptime_int },
+ });
+ }
+
+ // If we don't get an error return trace from a caller, create our own.
+ if (func.analysis(ip).calls_or_awaits_errorable_fn and
+ mod.comp.config.any_error_tracing and
+ !sema.fn_ret_ty.isError(mod))
+ {
+ sema.setupErrorReturnTrace(&inner_block, last_arg_index) catch |err| switch (err) {
+ // TODO make these unreachable instead of @panic
+ error.GenericPoison => @panic("zig compiler bug: GenericPoison"),
+ error.ComptimeReturn => @panic("zig compiler bug: ComptimeReturn"),
+ error.ComptimeBreak => @panic("zig compiler bug: ComptimeBreak"),
+ else => |e| return e,
+ };
+ }
+
+ // Copy the block into place and mark that as the main block.
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
+ inner_block.instructions.items.len);
+ const main_block_index = sema.addExtraAssumeCapacity(Air.Block{
+ .body_len = @intCast(inner_block.instructions.items.len),
+ });
+ sema.air_extra.appendSliceAssumeCapacity(@ptrCast(inner_block.instructions.items));
+ sema.air_extra.items[@intFromEnum(Air.ExtraIndex.main_block)] = main_block_index;
+
+ // Resolving inferred error sets is done *before* setting the function
+ // state to success, so that "unable to resolve inferred error set" errors
+ // can be emitted here.
+ if (sema.fn_ret_ty_ies) |ies| {
+ sema.resolveInferredErrorSetPtr(&inner_block, .{
+ .base_node_inst = inner_block.src_base_inst,
+ .offset = Zcu.LazySrcLoc.Offset.nodeOffset(0),
+ }, ies) catch |err| switch (err) {
+ error.GenericPoison => unreachable,
+ error.ComptimeReturn => unreachable,
+ error.ComptimeBreak => unreachable,
+ error.AnalysisFail => {
+ // In this case our function depends on a type that had a compile error.
+ // We should not try to lower this function.
+ decl.analysis = .dependency_failure;
+ return error.AnalysisFail;
+ },
+ else => |e| return e,
+ };
+ assert(ies.resolved != .none);
+ ip.funcIesResolved(func_index).* = ies.resolved;
+ }
+
+ func.analysis(ip).state = .success;
+
+ // Finally we must resolve the return type and parameter types so that backends
+ // have full access to type information.
+ // Crucially, this happens *after* we set the function state to success above,
+ // so that dependencies on the function body will now be satisfied rather than
+ // result in circular dependency errors.
+ sema.resolveFnTypes(fn_ty) catch |err| switch (err) {
+ error.GenericPoison => unreachable,
+ error.ComptimeReturn => unreachable,
+ error.ComptimeBreak => unreachable,
+ error.AnalysisFail => {
+ // In this case our function depends on a type that had a compile error.
+ // We should not try to lower this function.
+ decl.analysis = .dependency_failure;
+ return error.AnalysisFail;
+ },
+ else => |e| return e,
+ };
+
+ try sema.flushExports();
+
+ return .{
+ .instructions = sema.air_instructions.toOwnedSlice(),
+ .extra = try sema.air_extra.toOwnedSlice(gpa),
+ };
+}
+
+/// Called from `Compilation.update`, after everything is done, just before
+/// reporting compile errors. In this function we emit exported symbol collision
+/// errors and communicate exported symbols to the linker backend.
+pub fn processExports(pt: Zcu.PerThread) !void {
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
+
+ // First, construct a mapping of every exported value and Decl to the indices of all its different exports.
+ var decl_exports: std.AutoArrayHashMapUnmanaged(Zcu.Decl.Index, std.ArrayListUnmanaged(u32)) = .{};
+ var value_exports: std.AutoArrayHashMapUnmanaged(InternPool.Index, std.ArrayListUnmanaged(u32)) = .{};
+ defer {
+ for (decl_exports.values()) |*exports| {
+ exports.deinit(gpa);
+ }
+ decl_exports.deinit(gpa);
+ for (value_exports.values()) |*exports| {
+ exports.deinit(gpa);
+ }
+ value_exports.deinit(gpa);
+ }
+
+ // We note as a heuristic:
+ // * It is rare to export a value.
+ // * It is rare for one Decl to be exported multiple times.
+ // So, this ensureTotalCapacity serves as a reasonable (albeit very approximate) optimization.
+ try decl_exports.ensureTotalCapacity(gpa, zcu.single_exports.count() + zcu.multi_exports.count());
+
+ for (zcu.single_exports.values()) |export_idx| {
+ const exp = zcu.all_exports.items[export_idx];
+ const value_ptr, const found_existing = switch (exp.exported) {
+ .decl_index => |i| gop: {
+ const gop = try decl_exports.getOrPut(gpa, i);
+ break :gop .{ gop.value_ptr, gop.found_existing };
+ },
+ .value => |i| gop: {
+ const gop = try value_exports.getOrPut(gpa, i);
+ break :gop .{ gop.value_ptr, gop.found_existing };
+ },
+ };
+ if (!found_existing) value_ptr.* = .{};
+ try value_ptr.append(gpa, export_idx);
+ }
+
+ for (zcu.multi_exports.values()) |info| {
+ for (zcu.all_exports.items[info.index..][0..info.len], info.index..) |exp, export_idx| {
+ const value_ptr, const found_existing = switch (exp.exported) {
+ .decl_index => |i| gop: {
+ const gop = try decl_exports.getOrPut(gpa, i);
+ break :gop .{ gop.value_ptr, gop.found_existing };
+ },
+ .value => |i| gop: {
+ const gop = try value_exports.getOrPut(gpa, i);
+ break :gop .{ gop.value_ptr, gop.found_existing };
+ },
+ };
+ if (!found_existing) value_ptr.* = .{};
+ try value_ptr.append(gpa, @intCast(export_idx));
+ }
+ }
+
+ // Map symbol names to `Export` for name collision detection.
+ var symbol_exports: SymbolExports = .{};
+ defer symbol_exports.deinit(gpa);
+
+ for (decl_exports.keys(), decl_exports.values()) |exported_decl, exports_list| {
+ const exported: Zcu.Exported = .{ .decl_index = exported_decl };
+ try pt.processExportsInner(&symbol_exports, exported, exports_list.items);
+ }
+
+ for (value_exports.keys(), value_exports.values()) |exported_value, exports_list| {
+ const exported: Zcu.Exported = .{ .value = exported_value };
+ try pt.processExportsInner(&symbol_exports, exported, exports_list.items);
+ }
+}
+
+const SymbolExports = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, u32);
+
+fn processExportsInner(
+ pt: Zcu.PerThread,
+ symbol_exports: *SymbolExports,
+ exported: Zcu.Exported,
+ export_indices: []const u32,
+) error{OutOfMemory}!void {
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
+
+ for (export_indices) |export_idx| {
+ const new_export = &zcu.all_exports.items[export_idx];
+ const gop = try symbol_exports.getOrPut(gpa, new_export.opts.name);
+ if (gop.found_existing) {
+ new_export.status = .failed_retryable;
+ try zcu.failed_exports.ensureUnusedCapacity(gpa, 1);
+ const msg = try Zcu.ErrorMsg.create(gpa, new_export.src, "exported symbol collision: {}", .{
+ new_export.opts.name.fmt(&zcu.intern_pool),
+ });
+ errdefer msg.destroy(gpa);
+ const other_export = zcu.all_exports.items[gop.value_ptr.*];
+ try zcu.errNote(other_export.src, msg, "other symbol here", .{});
+ zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, msg);
+ new_export.status = .failed;
+ } else {
+ gop.value_ptr.* = export_idx;
+ }
+ }
+ if (zcu.comp.bin_file) |lf| {
+ try zcu.handleUpdateExports(export_indices, lf.updateExports(pt, exported, export_indices));
+ } else if (zcu.llvm_object) |llvm_object| {
+ if (build_options.only_c) unreachable;
+ try zcu.handleUpdateExports(export_indices, llvm_object.updateExports(pt, exported, export_indices));
+ }
+}
+
+pub fn populateTestFunctions(
+ pt: Zcu.PerThread,
+ main_progress_node: std.Progress.Node,
+) !void {
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
+ const ip = &zcu.intern_pool;
+ const builtin_mod = zcu.root_mod.getBuiltinDependency();
+ const builtin_file_index = (zcu.importPkg(builtin_mod) catch unreachable).file_index;
+ const root_decl_index = zcu.fileRootDecl(builtin_file_index);
+ const root_decl = zcu.declPtr(root_decl_index.unwrap().?);
+ const builtin_namespace = zcu.namespacePtr(root_decl.src_namespace);
+ const test_functions_str = try ip.getOrPutString(gpa, "test_functions", .no_embedded_nulls);
+ const decl_index = builtin_namespace.decls.getKeyAdapted(
+ test_functions_str,
+ Zcu.DeclAdapter{ .zcu = zcu },
+ ).?;
+ {
+ // We have to call `ensureDeclAnalyzed` here in case `builtin.test_functions`
+ // was not referenced by start code.
+ zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0);
+ defer {
+ zcu.sema_prog_node.end();
+ zcu.sema_prog_node = undefined;
+ }
+ try pt.ensureDeclAnalyzed(decl_index);
+ }
+
+ const decl = zcu.declPtr(decl_index);
+ const test_fn_ty = decl.typeOf(zcu).slicePtrFieldType(zcu).childType(zcu);
+
+ const array_anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl = array: {
+ // Add zcu.test_functions to an array decl then make the test_functions
+ // decl reference it as a slice.
+ const test_fn_vals = try gpa.alloc(InternPool.Index, zcu.test_functions.count());
+ defer gpa.free(test_fn_vals);
+
+ for (test_fn_vals, zcu.test_functions.keys()) |*test_fn_val, test_decl_index| {
+ const test_decl = zcu.declPtr(test_decl_index);
+ const test_decl_name = try test_decl.fullyQualifiedName(zcu);
+ const test_decl_name_len = test_decl_name.length(ip);
+ const test_name_anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl = n: {
+ const test_name_ty = try pt.arrayType(.{
+ .len = test_decl_name_len,
+ .child = .u8_type,
+ });
+ const test_name_val = try pt.intern(.{ .aggregate = .{
+ .ty = test_name_ty.toIntern(),
+ .storage = .{ .bytes = test_decl_name.toString() },
+ } });
+ break :n .{
+ .orig_ty = (try pt.singleConstPtrType(test_name_ty)).toIntern(),
+ .val = test_name_val,
+ };
+ };
+
+ const test_fn_fields = .{
+ // name
+ try pt.intern(.{ .slice = .{
+ .ty = .slice_const_u8_type,
+ .ptr = try pt.intern(.{ .ptr = .{
+ .ty = .manyptr_const_u8_type,
+ .base_addr = .{ .anon_decl = test_name_anon_decl },
+ .byte_offset = 0,
+ } }),
+ .len = try pt.intern(.{ .int = .{
+ .ty = .usize_type,
+ .storage = .{ .u64 = test_decl_name_len },
+ } }),
+ } }),
+ // func
+ try pt.intern(.{ .ptr = .{
+ .ty = try pt.intern(.{ .ptr_type = .{
+ .child = test_decl.typeOf(zcu).toIntern(),
+ .flags = .{
+ .is_const = true,
+ },
+ } }),
+ .base_addr = .{ .decl = test_decl_index },
+ .byte_offset = 0,
+ } }),
+ };
+ test_fn_val.* = try pt.intern(.{ .aggregate = .{
+ .ty = test_fn_ty.toIntern(),
+ .storage = .{ .elems = &test_fn_fields },
+ } });
+ }
+
+ const array_ty = try pt.arrayType(.{
+ .len = test_fn_vals.len,
+ .child = test_fn_ty.toIntern(),
+ .sentinel = .none,
+ });
+ const array_val = try pt.intern(.{ .aggregate = .{
+ .ty = array_ty.toIntern(),
+ .storage = .{ .elems = test_fn_vals },
+ } });
+ break :array .{
+ .orig_ty = (try pt.singleConstPtrType(array_ty)).toIntern(),
+ .val = array_val,
+ };
+ };
+
+ {
+ const new_ty = try pt.ptrType(.{
+ .child = test_fn_ty.toIntern(),
+ .flags = .{
+ .is_const = true,
+ .size = .Slice,
+ },
+ });
+ const new_val = decl.val;
+ const new_init = try pt.intern(.{ .slice = .{
+ .ty = new_ty.toIntern(),
+ .ptr = try pt.intern(.{ .ptr = .{
+ .ty = new_ty.slicePtrFieldType(zcu).toIntern(),
+ .base_addr = .{ .anon_decl = array_anon_decl },
+ .byte_offset = 0,
+ } }),
+ .len = (try pt.intValue(Type.usize, zcu.test_functions.count())).toIntern(),
+ } });
+ ip.mutateVarInit(decl.val.toIntern(), new_init);
+
+ // Since we are replacing the Decl's value we must perform cleanup on the
+ // previous value.
+ decl.val = new_val;
+ decl.has_tv = true;
+ }
+ {
+ zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0);
+ defer {
+ zcu.codegen_prog_node.end();
+ zcu.codegen_prog_node = undefined;
+ }
+
+ try pt.linkerUpdateDecl(decl_index);
+ }
+}
+
+pub fn linkerUpdateDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !void {
+ const zcu = pt.zcu;
+ const comp = zcu.comp;
+
+ const decl = zcu.declPtr(decl_index);
+
+ const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(&zcu.intern_pool), 0);
+ defer codegen_prog_node.end();
+
+ if (comp.bin_file) |lf| {
+ lf.updateDecl(pt, decl_index) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.AnalysisFail => {
+ decl.analysis = .codegen_failure;
+ },
+ else => {
+ const gpa = zcu.gpa;
+ try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1);
+ zcu.failed_analysis.putAssumeCapacityNoClobber(InternPool.AnalUnit.wrap(.{ .decl = decl_index }), try Zcu.ErrorMsg.create(
+ gpa,
+ decl.navSrcLoc(zcu),
+ "unable to codegen: {s}",
+ .{@errorName(err)},
+ ));
+ decl.analysis = .codegen_failure;
+ try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }));
+ },
+ };
+ } else if (zcu.llvm_object) |llvm_object| {
+ if (build_options.only_c) unreachable;
+ llvm_object.updateDecl(pt, decl_index) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ };
+ }
+}
+
+/// Shortcut for calling `intern_pool.get`.
+pub fn intern(pt: Zcu.PerThread, key: InternPool.Key) Allocator.Error!InternPool.Index {
+ return pt.zcu.intern_pool.get(pt.zcu.gpa, pt.tid, key);
+}
+
+/// Shortcut for calling `intern_pool.getCoerced`.
+pub fn getCoerced(pt: Zcu.PerThread, val: Value, new_ty: Type) Allocator.Error!Value {
+ return Value.fromInterned(try pt.zcu.intern_pool.getCoerced(pt.zcu.gpa, pt.tid, val.toIntern(), new_ty.toIntern()));
+}
+
+pub fn intType(pt: Zcu.PerThread, signedness: std.builtin.Signedness, bits: u16) Allocator.Error!Type {
+ return Type.fromInterned(try pt.intern(.{ .int_type = .{
+ .signedness = signedness,
+ .bits = bits,
+ } }));
+}
+
+pub fn errorIntType(pt: Zcu.PerThread) std.mem.Allocator.Error!Type {
+ return pt.intType(.unsigned, pt.zcu.errorSetBits());
+}
+
+pub fn arrayType(pt: Zcu.PerThread, info: InternPool.Key.ArrayType) Allocator.Error!Type {
+ return Type.fromInterned(try pt.intern(.{ .array_type = info }));
+}
+
+pub fn vectorType(pt: Zcu.PerThread, info: InternPool.Key.VectorType) Allocator.Error!Type {
+ return Type.fromInterned(try pt.intern(.{ .vector_type = info }));
+}
+
+pub fn optionalType(pt: Zcu.PerThread, child_type: InternPool.Index) Allocator.Error!Type {
+ return Type.fromInterned(try pt.intern(.{ .opt_type = child_type }));
+}
+
+pub fn ptrType(pt: Zcu.PerThread, info: InternPool.Key.PtrType) Allocator.Error!Type {
+ var canon_info = info;
+
+ if (info.flags.size == .C) canon_info.flags.is_allowzero = true;
+
+ // Canonicalize non-zero alignment. If it matches the ABI alignment of the pointee
+ // type, we change it to 0 here. If this causes an assertion trip because the
+ // pointee type needs to be resolved more, that needs to be done before calling
+ // this ptr() function.
+ if (info.flags.alignment != .none and
+ info.flags.alignment == Type.fromInterned(info.child).abiAlignment(pt))
+ {
+ canon_info.flags.alignment = .none;
+ }
+
+ switch (info.flags.vector_index) {
+ // Canonicalize host_size. If it matches the bit size of the pointee type,
+ // we change it to 0 here. If this causes an assertion trip, the pointee type
+ // needs to be resolved before calling this ptr() function.
+ .none => if (info.packed_offset.host_size != 0) {
+ const elem_bit_size = Type.fromInterned(info.child).bitSize(pt);
+ assert(info.packed_offset.bit_offset + elem_bit_size <= info.packed_offset.host_size * 8);
+ if (info.packed_offset.host_size * 8 == elem_bit_size) {
+ canon_info.packed_offset.host_size = 0;
+ }
+ },
+ .runtime => {},
+ _ => assert(@intFromEnum(info.flags.vector_index) < info.packed_offset.host_size),
+ }
+
+ return Type.fromInterned(try pt.intern(.{ .ptr_type = canon_info }));
+}
+
+/// Like `ptrType`, but if `info` specifies an `alignment`, first ensures the pointer
+/// child type's alignment is resolved so that an invalid alignment is not used.
+/// In general, prefer this function during semantic analysis.
+pub fn ptrTypeSema(pt: Zcu.PerThread, info: InternPool.Key.PtrType) Zcu.SemaError!Type {
+ if (info.flags.alignment != .none) {
+ _ = try Type.fromInterned(info.child).abiAlignmentAdvanced(pt, .sema);
+ }
+ return pt.ptrType(info);
+}
+
+pub fn singleMutPtrType(pt: Zcu.PerThread, child_type: Type) Allocator.Error!Type {
+ return pt.ptrType(.{ .child = child_type.toIntern() });
+}
+
+pub fn singleConstPtrType(pt: Zcu.PerThread, child_type: Type) Allocator.Error!Type {
+ return pt.ptrType(.{
+ .child = child_type.toIntern(),
+ .flags = .{
+ .is_const = true,
+ },
+ });
+}
+
+pub fn manyConstPtrType(pt: Zcu.PerThread, child_type: Type) Allocator.Error!Type {
+ return pt.ptrType(.{
+ .child = child_type.toIntern(),
+ .flags = .{
+ .size = .Many,
+ .is_const = true,
+ },
+ });
+}
+
+pub fn adjustPtrTypeChild(pt: Zcu.PerThread, ptr_ty: Type, new_child: Type) Allocator.Error!Type {
+ var info = ptr_ty.ptrInfo(pt.zcu);
+ info.child = new_child.toIntern();
+ return pt.ptrType(info);
+}
+
+pub fn funcType(pt: Zcu.PerThread, key: InternPool.GetFuncTypeKey) Allocator.Error!Type {
+ return Type.fromInterned(try pt.zcu.intern_pool.getFuncType(pt.zcu.gpa, pt.tid, key));
+}
+
+/// Use this for `anyframe->T` only.
+/// For `anyframe`, use the `InternPool.Index.anyframe` tag directly.
+pub fn anyframeType(pt: Zcu.PerThread, payload_ty: Type) Allocator.Error!Type {
+ return Type.fromInterned(try pt.intern(.{ .anyframe_type = payload_ty.toIntern() }));
+}
+
+pub fn errorUnionType(pt: Zcu.PerThread, error_set_ty: Type, payload_ty: Type) Allocator.Error!Type {
+ return Type.fromInterned(try pt.intern(.{ .error_union_type = .{
+ .error_set_type = error_set_ty.toIntern(),
+ .payload_type = payload_ty.toIntern(),
+ } }));
+}
+
+pub fn singleErrorSetType(pt: Zcu.PerThread, name: InternPool.NullTerminatedString) Allocator.Error!Type {
+ const names: *const [1]InternPool.NullTerminatedString = &name;
+ return Type.fromInterned(try pt.zcu.intern_pool.getErrorSetType(pt.zcu.gpa, pt.tid, names));
+}
+
+/// Sorts `names` in place.
+pub fn errorSetFromUnsortedNames(
+ pt: Zcu.PerThread,
+ names: []InternPool.NullTerminatedString,
+) Allocator.Error!Type {
+ std.mem.sort(
+ InternPool.NullTerminatedString,
+ names,
+ {},
+ InternPool.NullTerminatedString.indexLessThan,
+ );
+ const new_ty = try pt.zcu.intern_pool.getErrorSetType(pt.zcu.gpa, pt.tid, names);
+ return Type.fromInterned(new_ty);
+}
+
+/// Supports only pointers, not pointer-like optionals.
+pub fn ptrIntValue(pt: Zcu.PerThread, ty: Type, x: u64) Allocator.Error!Value {
+ const mod = pt.zcu;
+ assert(ty.zigTypeTag(mod) == .Pointer and !ty.isSlice(mod));
+ assert(x != 0 or ty.isAllowzeroPtr(mod));
+ return Value.fromInterned(try pt.intern(.{ .ptr = .{
+ .ty = ty.toIntern(),
+ .base_addr = .int,
+ .byte_offset = x,
+ } }));
+}
+
+/// Creates an enum tag value based on the integer tag value.
+pub fn enumValue(pt: Zcu.PerThread, ty: Type, tag_int: InternPool.Index) Allocator.Error!Value {
+ if (std.debug.runtime_safety) {
+ const tag = ty.zigTypeTag(pt.zcu);
+ assert(tag == .Enum);
+ }
+ return Value.fromInterned(try pt.intern(.{ .enum_tag = .{
+ .ty = ty.toIntern(),
+ .int = tag_int,
+ } }));
+}
+
+/// Creates an enum tag value based on the field index according to source code
+/// declaration order.
+pub fn enumValueFieldIndex(pt: Zcu.PerThread, ty: Type, field_index: u32) Allocator.Error!Value {
+ const ip = &pt.zcu.intern_pool;
+ const enum_type = ip.loadEnumType(ty.toIntern());
+
+ if (enum_type.values.len == 0) {
+ // Auto-numbered fields.
+ return Value.fromInterned(try pt.intern(.{ .enum_tag = .{
+ .ty = ty.toIntern(),
+ .int = try pt.intern(.{ .int = .{
+ .ty = enum_type.tag_ty,
+ .storage = .{ .u64 = field_index },
+ } }),
+ } }));
+ }
+
+ return Value.fromInterned(try pt.intern(.{ .enum_tag = .{
+ .ty = ty.toIntern(),
+ .int = enum_type.values.get(ip)[field_index],
+ } }));
+}
+
+pub fn undefValue(pt: Zcu.PerThread, ty: Type) Allocator.Error!Value {
+ return Value.fromInterned(try pt.intern(.{ .undef = ty.toIntern() }));
+}
+
+pub fn undefRef(pt: Zcu.PerThread, ty: Type) Allocator.Error!Air.Inst.Ref {
+ return Air.internedToRef((try pt.undefValue(ty)).toIntern());
+}
+
+pub fn intValue(pt: Zcu.PerThread, ty: Type, x: anytype) Allocator.Error!Value {
+ if (std.math.cast(u64, x)) |casted| return pt.intValue_u64(ty, casted);
+ if (std.math.cast(i64, x)) |casted| return pt.intValue_i64(ty, casted);
+ var limbs_buffer: [4]usize = undefined;
+ var big_int = BigIntMutable.init(&limbs_buffer, x);
+ return pt.intValue_big(ty, big_int.toConst());
+}
+
+pub fn intRef(pt: Zcu.PerThread, ty: Type, x: anytype) Allocator.Error!Air.Inst.Ref {
+ return Air.internedToRef((try pt.intValue(ty, x)).toIntern());
+}
+
+pub fn intValue_big(pt: Zcu.PerThread, ty: Type, x: BigIntConst) Allocator.Error!Value {
+ return Value.fromInterned(try pt.intern(.{ .int = .{
+ .ty = ty.toIntern(),
+ .storage = .{ .big_int = x },
+ } }));
+}
+
+pub fn intValue_u64(pt: Zcu.PerThread, ty: Type, x: u64) Allocator.Error!Value {
+ return Value.fromInterned(try pt.intern(.{ .int = .{
+ .ty = ty.toIntern(),
+ .storage = .{ .u64 = x },
+ } }));
+}
+
+pub fn intValue_i64(pt: Zcu.PerThread, ty: Type, x: i64) Allocator.Error!Value {
+ return Value.fromInterned(try pt.intern(.{ .int = .{
+ .ty = ty.toIntern(),
+ .storage = .{ .i64 = x },
+ } }));
+}
+
+pub fn unionValue(pt: Zcu.PerThread, union_ty: Type, tag: Value, val: Value) Allocator.Error!Value {
+ return Value.fromInterned(try pt.intern(.{ .un = .{
+ .ty = union_ty.toIntern(),
+ .tag = tag.toIntern(),
+ .val = val.toIntern(),
+ } }));
+}
+
+/// This function casts the float representation down to the representation of the type, potentially
+/// losing data if the representation wasn't correct.
+pub fn floatValue(pt: Zcu.PerThread, ty: Type, x: anytype) Allocator.Error!Value {
+ const storage: InternPool.Key.Float.Storage = switch (ty.floatBits(pt.zcu.getTarget())) {
+ 16 => .{ .f16 = @as(f16, @floatCast(x)) },
+ 32 => .{ .f32 = @as(f32, @floatCast(x)) },
+ 64 => .{ .f64 = @as(f64, @floatCast(x)) },
+ 80 => .{ .f80 = @as(f80, @floatCast(x)) },
+ 128 => .{ .f128 = @as(f128, @floatCast(x)) },
+ else => unreachable,
+ };
+ return Value.fromInterned(try pt.intern(.{ .float = .{
+ .ty = ty.toIntern(),
+ .storage = storage,
+ } }));
+}
+
+pub fn nullValue(pt: Zcu.PerThread, opt_ty: Type) Allocator.Error!Value {
+ assert(pt.zcu.intern_pool.isOptionalType(opt_ty.toIntern()));
+ return Value.fromInterned(try pt.intern(.{ .opt = .{
+ .ty = opt_ty.toIntern(),
+ .val = .none,
+ } }));
+}
+
+pub fn smallestUnsignedInt(pt: Zcu.PerThread, max: u64) Allocator.Error!Type {
+ return pt.intType(.unsigned, Type.smallestUnsignedBits(max));
+}
+
+/// Returns the smallest possible integer type containing both `min` and
+/// `max`. Asserts that neither value is undef.
+/// TODO: if #3806 is implemented, this becomes trivial
+pub fn intFittingRange(pt: Zcu.PerThread, min: Value, max: Value) !Type {
+ const mod = pt.zcu;
+ assert(!min.isUndef(mod));
+ assert(!max.isUndef(mod));
+
+ if (std.debug.runtime_safety) {
+ assert(Value.order(min, max, pt).compare(.lte));
+ }
+
+ const sign = min.orderAgainstZero(pt) == .lt;
+
+ const min_val_bits = pt.intBitsForValue(min, sign);
+ const max_val_bits = pt.intBitsForValue(max, sign);
+
+ return pt.intType(
+ if (sign) .signed else .unsigned,
+ @max(min_val_bits, max_val_bits),
+ );
+}
+
+/// Given a value representing an integer, returns the number of bits necessary to represent
+/// this value in an integer. If `sign` is true, returns the number of bits necessary in a
+/// twos-complement integer; otherwise in an unsigned integer.
+/// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true.
+pub fn intBitsForValue(pt: Zcu.PerThread, val: Value, sign: bool) u16 {
+ const mod = pt.zcu;
+ assert(!val.isUndef(mod));
+
+ const key = mod.intern_pool.indexToKey(val.toIntern());
+ switch (key.int.storage) {
+ .i64 => |x| {
+ if (std.math.cast(u64, x)) |casted| return Type.smallestUnsignedBits(casted) + @intFromBool(sign);
+ assert(sign);
+ // Protect against overflow in the following negation.
+ if (x == std.math.minInt(i64)) return 64;
+ return Type.smallestUnsignedBits(@as(u64, @intCast(-(x + 1)))) + 1;
+ },
+ .u64 => |x| {
+ return Type.smallestUnsignedBits(x) + @intFromBool(sign);
+ },
+ .big_int => |big| {
+ if (big.positive) return @as(u16, @intCast(big.bitCountAbs() + @intFromBool(sign)));
+
+ // Zero is still a possibility, in which case unsigned is fine
+ if (big.eqlZero()) return 0;
+
+ return @as(u16, @intCast(big.bitCountTwosComp()));
+ },
+ .lazy_align => |lazy_ty| {
+ return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiAlignment(pt).toByteUnits() orelse 0) + @intFromBool(sign);
+ },
+ .lazy_size => |lazy_ty| {
+ return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiSize(pt)) + @intFromBool(sign);
+ },
+ }
+}
+
+pub fn getUnionLayout(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionType) Zcu.UnionLayout {
+ const mod = pt.zcu;
+ const ip = &mod.intern_pool;
+ assert(loaded_union.haveLayout(ip));
+ var most_aligned_field: u32 = undefined;
+ var most_aligned_field_size: u64 = undefined;
+ var biggest_field: u32 = undefined;
+ var payload_size: u64 = 0;
+ var payload_align: InternPool.Alignment = .@"1";
+ for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| {
+ if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue;
+
+ const explicit_align = loaded_union.fieldAlign(ip, field_index);
+ const field_align = if (explicit_align != .none)
+ explicit_align
+ else
+ Type.fromInterned(field_ty).abiAlignment(pt);
+ const field_size = Type.fromInterned(field_ty).abiSize(pt);
+ if (field_size > payload_size) {
+ payload_size = field_size;
+ biggest_field = @intCast(field_index);
+ }
+ if (field_align.compare(.gte, payload_align)) {
+ payload_align = field_align;
+ most_aligned_field = @intCast(field_index);
+ most_aligned_field_size = field_size;
+ }
+ }
+ const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag();
+ if (!have_tag or !Type.fromInterned(loaded_union.enum_tag_ty).hasRuntimeBits(pt)) {
+ return .{
+ .abi_size = payload_align.forward(payload_size),
+ .abi_align = payload_align,
+ .most_aligned_field = most_aligned_field,
+ .most_aligned_field_size = most_aligned_field_size,
+ .biggest_field = biggest_field,
+ .payload_size = payload_size,
+ .payload_align = payload_align,
+ .tag_align = .none,
+ .tag_size = 0,
+ .padding = 0,
+ };
+ }
+
+ const tag_size = Type.fromInterned(loaded_union.enum_tag_ty).abiSize(pt);
+ const tag_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(pt).max(.@"1");
+ return .{
+ .abi_size = loaded_union.size(ip).*,
+ .abi_align = tag_align.max(payload_align),
+ .most_aligned_field = most_aligned_field,
+ .most_aligned_field_size = most_aligned_field_size,
+ .biggest_field = biggest_field,
+ .payload_size = payload_size,
+ .payload_align = payload_align,
+ .tag_align = tag_align,
+ .tag_size = tag_size,
+ .padding = loaded_union.padding(ip).*,
+ };
+}
+
+pub fn unionAbiSize(mod: *Module, loaded_union: InternPool.LoadedUnionType) u64 {
+ return mod.getUnionLayout(loaded_union).abi_size;
+}
+
+/// Returns 0 if the union is represented with 0 bits at runtime.
+pub fn unionAbiAlignment(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionType) InternPool.Alignment {
+ const mod = pt.zcu;
+ const ip = &mod.intern_pool;
+ const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag();
+ var max_align: InternPool.Alignment = .none;
+ if (have_tag) max_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(pt);
+ for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| {
+ if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
+
+ const field_align = mod.unionFieldNormalAlignment(loaded_union, @intCast(field_index));
+ max_align = max_align.max(field_align);
+ }
+ return max_align;
+}
+
+/// Returns the field alignment of a non-packed union. Asserts the layout is not packed.
+pub fn unionFieldNormalAlignment(
+ pt: Zcu.PerThread,
+ loaded_union: InternPool.LoadedUnionType,
+ field_index: u32,
+) InternPool.Alignment {
+ return pt.unionFieldNormalAlignmentAdvanced(loaded_union, field_index, .normal) catch unreachable;
+}
+
+/// Returns the field alignment of a non-packed union. Asserts the layout is not packed.
+/// If `strat` is `.sema`, may perform type resolution.
+pub fn unionFieldNormalAlignmentAdvanced(
+ pt: Zcu.PerThread,
+ loaded_union: InternPool.LoadedUnionType,
+ field_index: u32,
+ strat: Type.ResolveStrat,
+) Zcu.SemaError!InternPool.Alignment {
+ const ip = &pt.zcu.intern_pool;
+ assert(loaded_union.flagsPtr(ip).layout != .@"packed");
+ const field_align = loaded_union.fieldAlign(ip, field_index);
+ if (field_align != .none) return field_align;
+ const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
+ if (field_ty.isNoReturn(pt.zcu)) return .none;
+ return (try field_ty.abiAlignmentAdvanced(pt, strat.toLazy())).scalar;
+}
+
+/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed.
+pub fn structFieldAlignment(
+ pt: Zcu.PerThread,
+ explicit_alignment: InternPool.Alignment,
+ field_ty: Type,
+ layout: std.builtin.Type.ContainerLayout,
+) InternPool.Alignment {
+ return pt.structFieldAlignmentAdvanced(explicit_alignment, field_ty, layout, .normal) catch unreachable;
+}
+
+/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed.
+/// If `strat` is `.sema`, may perform type resolution.
+pub fn structFieldAlignmentAdvanced(
+ pt: Zcu.PerThread,
+ explicit_alignment: InternPool.Alignment,
+ field_ty: Type,
+ layout: std.builtin.Type.ContainerLayout,
+ strat: Type.ResolveStrat,
+) Zcu.SemaError!InternPool.Alignment {
+ assert(layout != .@"packed");
+ if (explicit_alignment != .none) return explicit_alignment;
+ const ty_abi_align = (try field_ty.abiAlignmentAdvanced(pt, strat.toLazy())).scalar;
+ switch (layout) {
+ .@"packed" => unreachable,
+ .auto => if (pt.zcu.getTarget().ofmt != .c) return ty_abi_align,
+ .@"extern" => {},
+ }
+ // extern
+ if (field_ty.isAbiInt(pt.zcu) and field_ty.intInfo(pt.zcu).bits >= 128) {
+ return ty_abi_align.maxStrict(.@"16");
+ }
+ return ty_abi_align;
+}
+
+/// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets
+/// into the packed struct InternPool data rather than computing this on the
+/// fly, however it was found to perform worse when measured on real world
+/// projects.
+pub fn structPackedFieldBitOffset(
+ pt: Zcu.PerThread,
+ struct_type: InternPool.LoadedStructType,
+ field_index: u32,
+) u16 {
+ const mod = pt.zcu;
+ const ip = &mod.intern_pool;
+ assert(struct_type.layout == .@"packed");
+ assert(struct_type.haveLayout(ip));
+ var bit_sum: u64 = 0;
+ for (0..struct_type.field_types.len) |i| {
+ if (i == field_index) {
+ return @intCast(bit_sum);
+ }
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
+ bit_sum += field_ty.bitSize(pt);
+ }
+ unreachable; // index out of bounds
+}
+
+pub fn getBuiltin(pt: Zcu.PerThread, name: []const u8) Allocator.Error!Air.Inst.Ref {
+ const decl_index = try pt.getBuiltinDecl(name);
+ pt.ensureDeclAnalyzed(decl_index) catch @panic("std.builtin is corrupt");
+ return Air.internedToRef(pt.zcu.declPtr(decl_index).val.toIntern());
+}
+
+pub fn getBuiltinDecl(pt: Zcu.PerThread, name: []const u8) Allocator.Error!InternPool.DeclIndex {
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
+ const ip = &zcu.intern_pool;
+ const std_file_imported = zcu.importPkg(zcu.std_mod) catch @panic("failed to import lib/std.zig");
+ const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index).unwrap().?;
+ const std_namespace = zcu.declPtr(std_file_root_decl).getOwnedInnerNamespace(zcu).?;
+ const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls);
+ const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std.zig is corrupt and missing 'builtin'");
+ pt.ensureDeclAnalyzed(builtin_decl) catch @panic("std.builtin is corrupt");
+ const builtin_namespace = zcu.declPtr(builtin_decl).getInnerNamespace(zcu) orelse @panic("std.builtin is corrupt");
+ const name_str = try ip.getOrPutString(gpa, name, .no_embedded_nulls);
+ return builtin_namespace.decls.getKeyAdapted(name_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std/builtin.zig is corrupt");
+}
+
+pub fn getBuiltinType(pt: Zcu.PerThread, name: []const u8) Allocator.Error!Type {
+ const ty_inst = try pt.getBuiltin(name);
+ const ty = Type.fromInterned(ty_inst.toInterned() orelse @panic("std.builtin is corrupt"));
+ ty.resolveFully(pt) catch @panic("std.builtin is corrupt");
+ return ty;
+}
+
+const Air = @import("../Air.zig");
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+const BigIntConst = std.math.big.int.Const;
+const BigIntMutable = std.math.big.int.Mutable;
+const build_options = @import("build_options");
+const builtin = @import("builtin");
+const Cache = std.Build.Cache;
+const InternPool = @import("../InternPool.zig");
+const isUpDir = @import("../introspect.zig").isUpDir;
+const Liveness = @import("../Liveness.zig");
+const log = std.log.scoped(.zcu);
+const Module = @import("../Package.zig").Module;
+const Sema = @import("../Sema.zig");
+const std = @import("std");
+const target_util = @import("../target.zig");
+const trace = @import("../tracy.zig").trace;
+const Type = @import("../Type.zig");
+const Value = @import("../Value.zig");
+const Zcu = @import("../Zcu.zig");
+const Zir = std.zig.Zir;
src/Air.zig
@@ -1563,12 +1563,12 @@ pub fn internedToRef(ip_index: InternPool.Index) Inst.Ref {
}
/// Returns `null` if runtime-known.
-pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value {
+pub fn value(air: Air, inst: Inst.Ref, pt: Zcu.PerThread) !?Value {
if (inst.toInterned()) |ip_index| {
return Value.fromInterned(ip_index);
}
const index = inst.toIndex().?;
- return air.typeOfIndex(index, &mod.intern_pool).onePossibleValue(mod);
+ return air.typeOfIndex(index, &pt.zcu.intern_pool).onePossibleValue(pt);
}
pub fn nullTerminatedString(air: Air, index: usize) [:0]const u8 {
src/codegen.zig
@@ -13,12 +13,10 @@ const trace = @import("tracy.zig").trace;
const Air = @import("Air.zig");
const Allocator = mem.Allocator;
const Compilation = @import("Compilation.zig");
-const ErrorMsg = Module.ErrorMsg;
+const ErrorMsg = Zcu.ErrorMsg;
const InternPool = @import("InternPool.zig");
const Liveness = @import("Liveness.zig");
const Zcu = @import("Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
const Target = std.Target;
const Type = @import("Type.zig");
const Value = @import("Value.zig");
@@ -47,14 +45,15 @@ pub const DebugInfoOutput = union(enum) {
pub fn generateFunction(
lf: *link.File,
- src_loc: Module.LazySrcLoc,
+ pt: Zcu.PerThread,
+ src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
) CodeGenError!Result {
- const zcu = lf.comp.module.?;
+ const zcu = pt.zcu;
const func = zcu.funcInfo(func_index);
const decl = zcu.declPtr(func.owner_decl);
const namespace = zcu.namespacePtr(decl.src_namespace);
@@ -62,35 +61,36 @@ pub fn generateFunction(
switch (target.cpu.arch) {
.arm,
.armeb,
- => return @import("arch/arm/CodeGen.zig").generate(lf, src_loc, func_index, air, liveness, code, debug_output),
+ => return @import("arch/arm/CodeGen.zig").generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output),
.aarch64,
.aarch64_be,
.aarch64_32,
- => return @import("arch/aarch64/CodeGen.zig").generate(lf, src_loc, func_index, air, liveness, code, debug_output),
- .riscv64 => return @import("arch/riscv64/CodeGen.zig").generate(lf, src_loc, func_index, air, liveness, code, debug_output),
- .sparc64 => return @import("arch/sparc64/CodeGen.zig").generate(lf, src_loc, func_index, air, liveness, code, debug_output),
- .x86_64 => return @import("arch/x86_64/CodeGen.zig").generate(lf, src_loc, func_index, air, liveness, code, debug_output),
+ => return @import("arch/aarch64/CodeGen.zig").generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output),
+ .riscv64 => return @import("arch/riscv64/CodeGen.zig").generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output),
+ .sparc64 => return @import("arch/sparc64/CodeGen.zig").generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output),
+ .x86_64 => return @import("arch/x86_64/CodeGen.zig").generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output),
.wasm32,
.wasm64,
- => return @import("arch/wasm/CodeGen.zig").generate(lf, src_loc, func_index, air, liveness, code, debug_output),
+ => return @import("arch/wasm/CodeGen.zig").generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output),
else => unreachable,
}
}
pub fn generateLazyFunction(
lf: *link.File,
- src_loc: Module.LazySrcLoc,
+ pt: Zcu.PerThread,
+ src_loc: Zcu.LazySrcLoc,
lazy_sym: link.File.LazySymbol,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
) CodeGenError!Result {
- const zcu = lf.comp.module.?;
+ const zcu = pt.zcu;
const decl_index = lazy_sym.ty.getOwnerDecl(zcu);
const decl = zcu.declPtr(decl_index);
const namespace = zcu.namespacePtr(decl.src_namespace);
const target = namespace.fileScope(zcu).mod.resolved_target.result;
switch (target.cpu.arch) {
- .x86_64 => return @import("arch/x86_64/CodeGen.zig").generateLazy(lf, src_loc, lazy_sym, code, debug_output),
+ .x86_64 => return @import("arch/x86_64/CodeGen.zig").generateLazy(lf, pt, src_loc, lazy_sym, code, debug_output),
else => unreachable,
}
}
@@ -105,7 +105,8 @@ fn writeFloat(comptime F: type, f: F, target: Target, endian: std.builtin.Endian
pub fn generateLazySymbol(
bin_file: *link.File,
- src_loc: Module.LazySrcLoc,
+ pt: Zcu.PerThread,
+ src_loc: Zcu.LazySrcLoc,
lazy_sym: link.File.LazySymbol,
// TODO don't use an "out" parameter like this; put it in the result instead
alignment: *Alignment,
@@ -119,25 +120,24 @@ pub fn generateLazySymbol(
defer tracy.end();
const comp = bin_file.comp;
- const zcu = comp.module.?;
- const ip = &zcu.intern_pool;
+ const ip = &pt.zcu.intern_pool;
const target = comp.root_mod.resolved_target.result;
const endian = target.cpu.arch.endian();
const gpa = comp.gpa;
log.debug("generateLazySymbol: kind = {s}, ty = {}", .{
@tagName(lazy_sym.kind),
- lazy_sym.ty.fmt(zcu),
+ lazy_sym.ty.fmt(pt),
});
if (lazy_sym.kind == .code) {
alignment.* = target_util.defaultFunctionAlignment(target);
- return generateLazyFunction(bin_file, src_loc, lazy_sym, code, debug_output);
+ return generateLazyFunction(bin_file, pt, src_loc, lazy_sym, code, debug_output);
}
- if (lazy_sym.ty.isAnyError(zcu)) {
+ if (lazy_sym.ty.isAnyError(pt.zcu)) {
alignment.* = .@"4";
- const err_names = zcu.global_error_set.keys();
+ const err_names = pt.zcu.global_error_set.keys();
mem.writeInt(u32, try code.addManyAsArray(4), @intCast(err_names.len), endian);
var offset = code.items.len;
try code.resize((1 + err_names.len + 1) * 4);
@@ -151,9 +151,9 @@ pub fn generateLazySymbol(
}
mem.writeInt(u32, code.items[offset..][0..4], @intCast(code.items.len), endian);
return Result.ok;
- } else if (lazy_sym.ty.zigTypeTag(zcu) == .Enum) {
+ } else if (lazy_sym.ty.zigTypeTag(pt.zcu) == .Enum) {
alignment.* = .@"1";
- const tag_names = lazy_sym.ty.enumFields(zcu);
+ const tag_names = lazy_sym.ty.enumFields(pt.zcu);
for (0..tag_names.len) |tag_index| {
const tag_name = tag_names.get(ip)[tag_index].toSlice(ip);
try code.ensureUnusedCapacity(tag_name.len + 1);
@@ -165,13 +165,14 @@ pub fn generateLazySymbol(
gpa,
src_loc,
"TODO implement generateLazySymbol for {s} {}",
- .{ @tagName(lazy_sym.kind), lazy_sym.ty.fmt(zcu) },
+ .{ @tagName(lazy_sym.kind), lazy_sym.ty.fmt(pt) },
) };
}
pub fn generateSymbol(
bin_file: *link.File,
- src_loc: Module.LazySrcLoc,
+ pt: Zcu.PerThread,
+ src_loc: Zcu.LazySrcLoc,
val: Value,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
@@ -180,17 +181,17 @@ pub fn generateSymbol(
const tracy = trace(@src());
defer tracy.end();
- const mod = bin_file.comp.module.?;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const ty = val.typeOf(mod);
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
- log.debug("generateSymbol: val = {}", .{val.fmtValue(mod, null)});
+ log.debug("generateSymbol: val = {}", .{val.fmtValue(pt, null)});
if (val.isUndefDeep(mod)) {
- const abi_size = math.cast(usize, ty.abiSize(mod)) orelse return error.Overflow;
+ const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
try code.appendNTimes(0xaa, abi_size);
return .ok;
}
@@ -236,9 +237,9 @@ pub fn generateSymbol(
.empty_enum_value,
=> unreachable, // non-runtime values
.int => {
- const abi_size = math.cast(usize, ty.abiSize(mod)) orelse return error.Overflow;
+ const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
var space: Value.BigIntSpace = undefined;
- const int_val = val.toBigInt(&space, mod);
+ const int_val = val.toBigInt(&space, pt);
int_val.writeTwosComplement(try code.addManyAsSlice(abi_size), endian);
},
.err => |err| {
@@ -252,14 +253,14 @@ pub fn generateSymbol(
.payload => 0,
};
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
try code.writer().writeInt(u16, err_val, endian);
return .ok;
}
- const payload_align = payload_ty.abiAlignment(mod);
- const error_align = Type.anyerror.abiAlignment(mod);
- const abi_align = ty.abiAlignment(mod);
+ const payload_align = payload_ty.abiAlignment(pt);
+ const error_align = Type.anyerror.abiAlignment(pt);
+ const abi_align = ty.abiAlignment(pt);
// error value first when its type is larger than the error union's payload
if (error_align.order(payload_align) == .gt) {
@@ -269,8 +270,8 @@ pub fn generateSymbol(
// emit payload part of the error union
{
const begin = code.items.len;
- switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(switch (error_union.val) {
- .err_name => try mod.intern(.{ .undef = payload_ty.toIntern() }),
+ switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (error_union.val) {
+ .err_name => try pt.intern(.{ .undef = payload_ty.toIntern() }),
.payload => |payload| payload,
}), code, debug_output, reloc_info)) {
.ok => {},
@@ -300,7 +301,7 @@ pub fn generateSymbol(
},
.enum_tag => |enum_tag| {
const int_tag_ty = ty.intTagType(mod);
- switch (try generateSymbol(bin_file, src_loc, try mod.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, debug_output, reloc_info)) {
+ switch (try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return .{ .fail = em },
}
@@ -311,21 +312,21 @@ pub fn generateSymbol(
.f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(8)),
.f80 => |f80_val| {
writeFloat(f80, f80_val, target, endian, try code.addManyAsArray(10));
- const abi_size = math.cast(usize, ty.abiSize(mod)) orelse return error.Overflow;
+ const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
try code.appendNTimes(0, abi_size - 10);
},
.f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(16)),
},
- .ptr => switch (try lowerPtr(bin_file, src_loc, val.toIntern(), code, debug_output, reloc_info, 0)) {
+ .ptr => switch (try lowerPtr(bin_file, pt, src_loc, val.toIntern(), code, debug_output, reloc_info, 0)) {
.ok => {},
.fail => |em| return .{ .fail = em },
},
.slice => |slice| {
- switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(slice.ptr), code, debug_output, reloc_info)) {
+ switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.ptr), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return .{ .fail = em },
}
- switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(slice.len), code, debug_output, reloc_info)) {
+ switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.len), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return .{ .fail = em },
}
@@ -333,11 +334,11 @@ pub fn generateSymbol(
.opt => {
const payload_type = ty.optionalChild(mod);
const payload_val = val.optionalValue(mod);
- const abi_size = math.cast(usize, ty.abiSize(mod)) orelse return error.Overflow;
+ const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
if (ty.optionalReprIsPayload(mod)) {
if (payload_val) |value| {
- switch (try generateSymbol(bin_file, src_loc, value, code, debug_output, reloc_info)) {
+ switch (try generateSymbol(bin_file, pt, src_loc, value, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
@@ -345,10 +346,12 @@ pub fn generateSymbol(
try code.appendNTimes(0, abi_size);
}
} else {
- const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1;
- if (payload_type.hasRuntimeBits(mod)) {
- const value = payload_val orelse Value.fromInterned((try mod.intern(.{ .undef = payload_type.toIntern() })));
- switch (try generateSymbol(bin_file, src_loc, value, code, debug_output, reloc_info)) {
+ const padding = abi_size - (math.cast(usize, payload_type.abiSize(pt)) orelse return error.Overflow) - 1;
+ if (payload_type.hasRuntimeBits(pt)) {
+ const value = payload_val orelse Value.fromInterned(try pt.intern(.{
+ .undef = payload_type.toIntern(),
+ }));
+ switch (try generateSymbol(bin_file, pt, src_loc, value, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
@@ -363,7 +366,7 @@ pub fn generateSymbol(
.elems, .repeated_elem => {
var index: u64 = 0;
while (index < array_type.lenIncludingSentinel()) : (index += 1) {
- switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(switch (aggregate.storage) {
+ switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (aggregate.storage) {
.bytes => unreachable,
.elems => |elems| elems[@intCast(index)],
.repeated_elem => |elem| if (index < array_type.len)
@@ -378,8 +381,7 @@ pub fn generateSymbol(
},
},
.vector_type => |vector_type| {
- const abi_size = math.cast(usize, ty.abiSize(mod)) orelse
- return error.Overflow;
+ const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
if (vector_type.child == .bool_type) {
const bytes = try code.addManyAsSlice(abi_size);
@memset(bytes, 0xaa);
@@ -424,7 +426,7 @@ pub fn generateSymbol(
.elems, .repeated_elem => {
var index: u64 = 0;
while (index < vector_type.len) : (index += 1) {
- switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(switch (aggregate.storage) {
+ switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (aggregate.storage) {
.bytes => unreachable,
.elems => |elems| elems[
math.cast(usize, index) orelse return error.Overflow
@@ -439,7 +441,7 @@ pub fn generateSymbol(
}
const padding = abi_size -
- (math.cast(usize, Type.fromInterned(vector_type.child).abiSize(mod) * vector_type.len) orelse
+ (math.cast(usize, Type.fromInterned(vector_type.child).abiSize(pt) * vector_type.len) orelse
return error.Overflow);
if (padding > 0) try code.appendNTimes(0, padding);
}
@@ -452,10 +454,10 @@ pub fn generateSymbol(
0..,
) |field_ty, comptime_val, index| {
if (comptime_val != .none) continue;
- if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
const field_val = switch (aggregate.storage) {
- .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
+ .bytes => |bytes| try pt.intern(.{ .int = .{
.ty = field_ty,
.storage = .{ .u64 = bytes.at(index, ip) },
} }),
@@ -463,14 +465,14 @@ pub fn generateSymbol(
.repeated_elem => |elem| elem,
};
- switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(field_val), code, debug_output, reloc_info)) {
+ switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
const unpadded_field_end = code.items.len - struct_begin;
// Pad struct members if required
- const padded_field_end = ty.structFieldOffset(index + 1, mod);
+ const padded_field_end = ty.structFieldOffset(index + 1, pt);
const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse
return error.Overflow;
@@ -483,15 +485,14 @@ pub fn generateSymbol(
const struct_type = ip.loadStructType(ty.toIntern());
switch (struct_type.layout) {
.@"packed" => {
- const abi_size = math.cast(usize, ty.abiSize(mod)) orelse
- return error.Overflow;
+ const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
const current_pos = code.items.len;
try code.appendNTimes(0, abi_size);
var bits: u16 = 0;
for (struct_type.field_types.get(ip), 0..) |field_ty, index| {
const field_val = switch (aggregate.storage) {
- .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
+ .bytes => |bytes| try pt.intern(.{ .int = .{
.ty = field_ty,
.storage = .{ .u64 = bytes.at(index, ip) },
} }),
@@ -502,18 +503,18 @@ pub fn generateSymbol(
// pointer may point to a decl which must be marked used
// but can also result in a relocation. Therefore we handle those separately.
if (Type.fromInterned(field_ty).zigTypeTag(mod) == .Pointer) {
- const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(mod)) orelse
+ const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(pt)) orelse
return error.Overflow;
var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size);
defer tmp_list.deinit();
- switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(field_val), &tmp_list, debug_output, reloc_info)) {
+ switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), &tmp_list, debug_output, reloc_info)) {
.ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items),
.fail => |em| return Result{ .fail = em },
}
} else {
- Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), mod, code.items[current_pos..], bits) catch unreachable;
+ Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), pt, code.items[current_pos..], bits) catch unreachable;
}
- bits += @intCast(Type.fromInterned(field_ty).bitSize(mod));
+ bits += @intCast(Type.fromInterned(field_ty).bitSize(pt));
}
},
.auto, .@"extern" => {
@@ -524,10 +525,10 @@ pub fn generateSymbol(
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
const field_ty = field_types[field_index];
- if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
- .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
+ .bytes => |bytes| try pt.intern(.{ .int = .{
.ty = field_ty,
.storage = .{ .u64 = bytes.at(field_index, ip) },
} }),
@@ -541,7 +542,7 @@ pub fn generateSymbol(
) orelse return error.Overflow;
if (padding > 0) try code.appendNTimes(0, padding);
- switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(field_val), code, debug_output, reloc_info)) {
+ switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
@@ -562,15 +563,15 @@ pub fn generateSymbol(
else => unreachable,
},
.un => |un| {
- const layout = ty.unionGetLayout(mod);
+ const layout = ty.unionGetLayout(pt);
if (layout.payload_size == 0) {
- return generateSymbol(bin_file, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info);
+ return generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info);
}
// Check if we should store the tag first.
if (layout.tag_size > 0 and layout.tag_align.compare(.gte, layout.payload_align)) {
- switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info)) {
+ switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
@@ -580,28 +581,28 @@ pub fn generateSymbol(
if (un.tag != .none) {
const field_index = ty.unionTagFieldIndex(Value.fromInterned(un.tag), mod).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBits(mod)) {
+ if (!field_ty.hasRuntimeBits(pt)) {
try code.appendNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow);
} else {
- switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(un.val), code, debug_output, reloc_info)) {
+ switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
- const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(mod)) orelse return error.Overflow;
+ const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(pt)) orelse return error.Overflow;
if (padding > 0) {
try code.appendNTimes(0, padding);
}
}
} else {
- switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(un.val), code, debug_output, reloc_info)) {
+ switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
}
if (layout.tag_size > 0 and layout.tag_align.compare(.lt, layout.payload_align)) {
- switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info)) {
+ switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
@@ -618,22 +619,24 @@ pub fn generateSymbol(
fn lowerPtr(
bin_file: *link.File,
- src_loc: Module.LazySrcLoc,
+ pt: Zcu.PerThread,
+ src_loc: Zcu.LazySrcLoc,
ptr_val: InternPool.Index,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
reloc_info: RelocInfo,
prev_offset: u64,
) CodeGenError!Result {
- const zcu = bin_file.comp.module.?;
+ const zcu = pt.zcu;
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
const offset: u64 = prev_offset + ptr.byte_offset;
return switch (ptr.base_addr) {
- .decl => |decl| try lowerDeclRef(bin_file, src_loc, decl, code, debug_output, reloc_info, offset),
- .anon_decl => |ad| try lowerAnonDeclRef(bin_file, src_loc, ad, code, debug_output, reloc_info, offset),
- .int => try generateSymbol(bin_file, src_loc, try zcu.intValue(Type.usize, offset), code, debug_output, reloc_info),
+ .decl => |decl| try lowerDeclRef(bin_file, pt, src_loc, decl, code, debug_output, reloc_info, offset),
+ .anon_decl => |ad| try lowerAnonDeclRef(bin_file, pt, src_loc, ad, code, debug_output, reloc_info, offset),
+ .int => try generateSymbol(bin_file, pt, src_loc, try pt.intValue(Type.usize, offset), code, debug_output, reloc_info),
.eu_payload => |eu_ptr| try lowerPtr(
bin_file,
+ pt,
src_loc,
eu_ptr,
code,
@@ -641,11 +644,12 @@ fn lowerPtr(
reloc_info,
offset + errUnionPayloadOffset(
Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu).errorUnionPayload(zcu),
- zcu,
+ pt,
),
),
.opt_payload => |opt_ptr| try lowerPtr(
bin_file,
+ pt,
src_loc,
opt_ptr,
code,
@@ -666,12 +670,12 @@ fn lowerPtr(
};
},
.Struct, .Union => switch (base_ty.containerLayout(zcu)) {
- .auto => base_ty.structFieldOffset(@intCast(field.index), zcu),
+ .auto => base_ty.structFieldOffset(@intCast(field.index), pt),
.@"extern", .@"packed" => unreachable,
},
else => unreachable,
};
- return lowerPtr(bin_file, src_loc, field.base, code, debug_output, reloc_info, offset + field_off);
+ return lowerPtr(bin_file, pt, src_loc, field.base, code, debug_output, reloc_info, offset + field_off);
},
.arr_elem, .comptime_field, .comptime_alloc => unreachable,
};
@@ -683,7 +687,8 @@ const RelocInfo = struct {
fn lowerAnonDeclRef(
lf: *link.File,
- src_loc: Module.LazySrcLoc,
+ pt: Zcu.PerThread,
+ src_loc: Zcu.LazySrcLoc,
anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
@@ -691,22 +696,21 @@ fn lowerAnonDeclRef(
offset: u64,
) CodeGenError!Result {
_ = debug_output;
- const zcu = lf.comp.module.?;
- const ip = &zcu.intern_pool;
+ const ip = &pt.zcu.intern_pool;
const target = lf.comp.root_mod.resolved_target.result;
const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8);
const decl_val = anon_decl.val;
const decl_ty = Type.fromInterned(ip.typeOf(decl_val));
- log.debug("lowerAnonDecl: ty = {}", .{decl_ty.fmt(zcu)});
- const is_fn_body = decl_ty.zigTypeTag(zcu) == .Fn;
- if (!is_fn_body and !decl_ty.hasRuntimeBits(zcu)) {
+ log.debug("lowerAnonDecl: ty = {}", .{decl_ty.fmt(pt)});
+ const is_fn_body = decl_ty.zigTypeTag(pt.zcu) == .Fn;
+ if (!is_fn_body and !decl_ty.hasRuntimeBits(pt)) {
try code.appendNTimes(0xaa, ptr_width_bytes);
return Result.ok;
}
const decl_align = ip.indexToKey(anon_decl.orig_ty).ptr_type.flags.alignment;
- const res = try lf.lowerAnonDecl(decl_val, decl_align, src_loc);
+ const res = try lf.lowerAnonDecl(pt, decl_val, decl_align, src_loc);
switch (res) {
.ok => {},
.fail => |em| return .{ .fail = em },
@@ -730,7 +734,8 @@ fn lowerAnonDeclRef(
fn lowerDeclRef(
lf: *link.File,
- src_loc: Module.LazySrcLoc,
+ pt: Zcu.PerThread,
+ src_loc: Zcu.LazySrcLoc,
decl_index: InternPool.DeclIndex,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
@@ -739,14 +744,14 @@ fn lowerDeclRef(
) CodeGenError!Result {
_ = src_loc;
_ = debug_output;
- const zcu = lf.comp.module.?;
+ const zcu = pt.zcu;
const decl = zcu.declPtr(decl_index);
const namespace = zcu.namespacePtr(decl.src_namespace);
const target = namespace.fileScope(zcu).mod.resolved_target.result;
const ptr_width = target.ptrBitWidth();
const is_fn_body = decl.typeOf(zcu).zigTypeTag(zcu) == .Fn;
- if (!is_fn_body and !decl.typeOf(zcu).hasRuntimeBits(zcu)) {
+ if (!is_fn_body and !decl.typeOf(zcu).hasRuntimeBits(pt)) {
try code.appendNTimes(0xaa, @divExact(ptr_width, 8));
return Result.ok;
}
@@ -814,7 +819,7 @@ pub const GenResult = union(enum) {
fn fail(
gpa: Allocator,
- src_loc: Module.LazySrcLoc,
+ src_loc: Zcu.LazySrcLoc,
comptime format: []const u8,
args: anytype,
) Allocator.Error!GenResult {
@@ -825,14 +830,15 @@ pub const GenResult = union(enum) {
fn genDeclRef(
lf: *link.File,
- src_loc: Module.LazySrcLoc,
+ pt: Zcu.PerThread,
+ src_loc: Zcu.LazySrcLoc,
val: Value,
ptr_decl_index: InternPool.DeclIndex,
) CodeGenError!GenResult {
- const zcu = lf.comp.module.?;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ty = val.typeOf(zcu);
- log.debug("genDeclRef: val = {}", .{val.fmtValue(zcu, null)});
+ log.debug("genDeclRef: val = {}", .{val.fmtValue(pt, null)});
const ptr_decl = zcu.declPtr(ptr_decl_index);
const namespace = zcu.namespacePtr(ptr_decl.src_namespace);
@@ -848,7 +854,7 @@ fn genDeclRef(
};
const decl = zcu.declPtr(decl_index);
- if (!decl.typeOf(zcu).isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
+ if (!decl.typeOf(zcu).isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
const imm: u64 = switch (ptr_bytes) {
1 => 0xaa,
2 => 0xaaaa,
@@ -865,12 +871,12 @@ fn genDeclRef(
// TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
if (ty.castPtrToFn(zcu)) |fn_ty| {
if (zcu.typeToFunc(fn_ty).?.is_generic) {
- return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(zcu).toByteUnits().? });
+ return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(pt).toByteUnits().? });
}
} else if (ty.zigTypeTag(zcu) == .Pointer) {
const elem_ty = ty.elemType2(zcu);
- if (!elem_ty.hasRuntimeBits(zcu)) {
- return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(zcu).toByteUnits().? });
+ if (!elem_ty.hasRuntimeBits(pt)) {
+ return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(pt).toByteUnits().? });
}
}
@@ -931,15 +937,15 @@ fn genDeclRef(
fn genUnnamedConst(
lf: *link.File,
- src_loc: Module.LazySrcLoc,
+ pt: Zcu.PerThread,
+ src_loc: Zcu.LazySrcLoc,
val: Value,
owner_decl_index: InternPool.DeclIndex,
) CodeGenError!GenResult {
- const zcu = lf.comp.module.?;
const gpa = lf.comp.gpa;
- log.debug("genUnnamedConst: val = {}", .{val.fmtValue(zcu, null)});
+ log.debug("genUnnamedConst: val = {}", .{val.fmtValue(pt, null)});
- const local_sym_index = lf.lowerUnnamedConst(val, owner_decl_index) catch |err| {
+ const local_sym_index = lf.lowerUnnamedConst(pt, val, owner_decl_index) catch |err| {
return GenResult.fail(gpa, src_loc, "lowering unnamed constant failed: {s}", .{@errorName(err)});
};
switch (lf.tag) {
@@ -970,15 +976,16 @@ fn genUnnamedConst(
pub fn genTypedValue(
lf: *link.File,
- src_loc: Module.LazySrcLoc,
+ pt: Zcu.PerThread,
+ src_loc: Zcu.LazySrcLoc,
val: Value,
owner_decl_index: InternPool.DeclIndex,
) CodeGenError!GenResult {
- const zcu = lf.comp.module.?;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ty = val.typeOf(zcu);
- log.debug("genTypedValue: val = {}", .{val.fmtValue(zcu, null)});
+ log.debug("genTypedValue: val = {}", .{val.fmtValue(pt, null)});
if (val.isUndef(zcu))
return GenResult.mcv(.undef);
@@ -990,7 +997,7 @@ pub fn genTypedValue(
if (!ty.isSlice(zcu)) switch (ip.indexToKey(val.toIntern())) {
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
- .decl => |decl| return genDeclRef(lf, src_loc, val, decl),
+ .decl => |decl| return genDeclRef(lf, pt, src_loc, val, decl),
else => {},
},
else => {},
@@ -1007,7 +1014,7 @@ pub fn genTypedValue(
.none => {},
else => switch (ip.indexToKey(val.toIntern())) {
.int => {
- return GenResult.mcv(.{ .immediate = val.toUnsignedInt(zcu) });
+ return GenResult.mcv(.{ .immediate = val.toUnsignedInt(pt) });
},
else => {},
},
@@ -1017,8 +1024,8 @@ pub fn genTypedValue(
const info = ty.intInfo(zcu);
if (info.bits <= ptr_bits) {
const unsigned: u64 = switch (info.signedness) {
- .signed => @bitCast(val.toSignedInt(zcu)),
- .unsigned => val.toUnsignedInt(zcu),
+ .signed => @bitCast(val.toSignedInt(pt)),
+ .unsigned => val.toUnsignedInt(pt),
};
return GenResult.mcv(.{ .immediate = unsigned });
}
@@ -1030,11 +1037,12 @@ pub fn genTypedValue(
if (ty.isPtrLikeOptional(zcu)) {
return genTypedValue(
lf,
+ pt,
src_loc,
val.optionalValue(zcu) orelse return GenResult.mcv(.{ .immediate = 0 }),
owner_decl_index,
);
- } else if (ty.abiSize(zcu) == 1) {
+ } else if (ty.abiSize(pt) == 1) {
return GenResult.mcv(.{ .immediate = @intFromBool(!val.isNull(zcu)) });
}
},
@@ -1042,6 +1050,7 @@ pub fn genTypedValue(
const enum_tag = ip.indexToKey(val.toIntern()).enum_tag;
return genTypedValue(
lf,
+ pt,
src_loc,
Value.fromInterned(enum_tag.int),
owner_decl_index,
@@ -1055,14 +1064,15 @@ pub fn genTypedValue(
.ErrorUnion => {
const err_type = ty.errorUnionSet(zcu);
const payload_type = ty.errorUnionPayload(zcu);
- if (!payload_type.hasRuntimeBitsIgnoreComptime(zcu)) {
+ if (!payload_type.hasRuntimeBitsIgnoreComptime(pt)) {
// We use the error type directly as the type.
- const err_int_ty = try zcu.errorIntType();
+ const err_int_ty = try pt.errorIntType();
switch (ip.indexToKey(val.toIntern()).error_union.val) {
.err_name => |err_name| return genTypedValue(
lf,
+ pt,
src_loc,
- Value.fromInterned(try zcu.intern(.{ .err = .{
+ Value.fromInterned(try pt.intern(.{ .err = .{
.ty = err_type.toIntern(),
.name = err_name,
} })),
@@ -1070,8 +1080,9 @@ pub fn genTypedValue(
),
.payload => return genTypedValue(
lf,
+ pt,
src_loc,
- try zcu.intValue(err_int_ty, 0),
+ try pt.intValue(err_int_ty, 0),
owner_decl_index,
),
}
@@ -1090,26 +1101,26 @@ pub fn genTypedValue(
else => {},
}
- return genUnnamedConst(lf, src_loc, val, owner_decl_index);
+ return genUnnamedConst(lf, pt, src_loc, val, owner_decl_index);
}
-pub fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u64 {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0;
- const payload_align = payload_ty.abiAlignment(mod);
- const error_align = Type.anyerror.abiAlignment(mod);
- if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+pub fn errUnionPayloadOffset(payload_ty: Type, pt: Zcu.PerThread) u64 {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return 0;
+ const payload_align = payload_ty.abiAlignment(pt);
+ const error_align = Type.anyerror.abiAlignment(pt);
+ if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
return 0;
} else {
- return payload_align.forward(Type.anyerror.abiSize(mod));
+ return payload_align.forward(Type.anyerror.abiSize(pt));
}
}
-pub fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u64 {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0;
- const payload_align = payload_ty.abiAlignment(mod);
- const error_align = Type.anyerror.abiAlignment(mod);
- if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- return error_align.forward(payload_ty.abiSize(mod));
+pub fn errUnionErrorOffset(payload_ty: Type, pt: Zcu.PerThread) u64 {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return 0;
+ const payload_align = payload_ty.abiAlignment(pt);
+ const error_align = Type.anyerror.abiAlignment(pt);
+ if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ return error_align.forward(payload_ty.abiSize(pt));
} else {
return 0;
}
src/Compilation.zig
@@ -2146,6 +2146,8 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
try comp.performAllTheWork(main_progress_node);
if (comp.module) |zcu| {
+ const pt: Zcu.PerThread = .{ .zcu = zcu, .tid = .main };
+
if (build_options.enable_debug_extensions and comp.verbose_intern_pool) {
std.debug.print("intern pool stats for '{s}':\n", .{
comp.root_name,
@@ -2165,10 +2167,10 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
// The `test_functions` decl has been intentionally postponed until now,
// at which point we must populate it with the list of test functions that
// have been discovered and not filtered out.
- try zcu.populateTestFunctions(main_progress_node);
+ try pt.populateTestFunctions(main_progress_node);
}
- try zcu.processExports();
+ try pt.processExports();
}
if (comp.totalErrorCount() != 0) {
@@ -2247,7 +2249,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
}
}
- try flush(comp, arena, main_progress_node);
+ try flush(comp, arena, .main, main_progress_node);
if (comp.totalErrorCount() != 0) return;
// Failure here only means an unnecessary cache miss.
@@ -2264,16 +2266,16 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
whole.lock = man.toOwnedLock();
},
.incremental => {
- try flush(comp, arena, main_progress_node);
+ try flush(comp, arena, .main, main_progress_node);
if (comp.totalErrorCount() != 0) return;
},
}
}
-fn flush(comp: *Compilation, arena: Allocator, prog_node: std.Progress.Node) !void {
+fn flush(comp: *Compilation, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
if (comp.bin_file) |lf| {
// This is needed before reading the error flags.
- lf.flush(arena, prog_node) catch |err| switch (err) {
+ lf.flush(arena, tid, prog_node) catch |err| switch (err) {
error.FlushFailure => {}, // error reported through link_error_flags
error.LLDReportedFailure => {}, // error reported via lockAndParseLldStderr
else => |e| return e,
@@ -3419,7 +3421,7 @@ pub fn performAllTheWork(
while (true) {
if (comp.work_queue.readItem()) |work_item| {
- try processOneJob(comp, work_item, main_progress_node);
+ try processOneJob(0, comp, work_item, main_progress_node);
continue;
}
if (comp.module) |zcu| {
@@ -3447,11 +3449,11 @@ pub fn performAllTheWork(
}
}
-fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !void {
+fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progress.Node) !void {
switch (job) {
.codegen_decl => |decl_index| {
- const zcu = comp.module.?;
- const decl = zcu.declPtr(decl_index);
+ const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
+ const decl = pt.zcu.declPtr(decl_index);
switch (decl.analysis) {
.unreferenced => unreachable,
@@ -3469,7 +3471,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo
assert(decl.has_tv);
- try zcu.linkerUpdateDecl(decl_index);
+ try pt.linkerUpdateDecl(decl_index);
return;
},
}
@@ -3478,16 +3480,16 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo
const named_frame = tracy.namedFrame("codegen_func");
defer named_frame.end();
- const zcu = comp.module.?;
+ const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
// This call takes ownership of `func.air`.
- try zcu.linkerUpdateFunc(func.func, func.air);
+ try pt.linkerUpdateFunc(func.func, func.air);
},
.analyze_func => |func| {
const named_frame = tracy.namedFrame("analyze_func");
defer named_frame.end();
- const zcu = comp.module.?;
- zcu.ensureFuncBodyAnalyzed(func) catch |err| switch (err) {
+ const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
+ pt.ensureFuncBodyAnalyzed(func) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => return,
};
@@ -3496,8 +3498,8 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo
if (true) @panic("regressed compiler feature: emit-h should hook into updateExports, " ++
"not decl analysis, which is too early to know about @export calls");
- const zcu = comp.module.?;
- const decl = zcu.declPtr(decl_index);
+ const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
+ const decl = pt.zcu.declPtr(decl_index);
switch (decl.analysis) {
.unreferenced => unreachable,
@@ -3515,7 +3517,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo
defer named_frame.end();
const gpa = comp.gpa;
- const emit_h = zcu.emit_h.?;
+ const emit_h = pt.zcu.emit_h.?;
_ = try emit_h.decl_table.getOrPut(gpa, decl_index);
const decl_emit_h = emit_h.declPtr(decl_index);
const fwd_decl = &decl_emit_h.fwd_decl;
@@ -3523,11 +3525,11 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo
var ctypes_arena = std.heap.ArenaAllocator.init(gpa);
defer ctypes_arena.deinit();
- const file_scope = zcu.namespacePtr(decl.src_namespace).fileScope(zcu);
+ const file_scope = pt.zcu.namespacePtr(decl.src_namespace).fileScope(pt.zcu);
var dg: c_codegen.DeclGen = .{
.gpa = gpa,
- .zcu = zcu,
+ .pt = pt,
.mod = file_scope.mod,
.error_msg = null,
.pass = .{ .decl = decl_index },
@@ -3557,25 +3559,25 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo
}
},
.analyze_decl => |decl_index| {
- const zcu = comp.module.?;
- zcu.ensureDeclAnalyzed(decl_index) catch |err| switch (err) {
+ const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
+ pt.ensureDeclAnalyzed(decl_index) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => return,
};
- const decl = zcu.declPtr(decl_index);
+ const decl = pt.zcu.declPtr(decl_index);
if (decl.kind == .@"test" and comp.config.is_test) {
// Tests are always emitted in test binaries. The decl_refs are created by
// Zcu.populateTestFunctions, but this will not queue body analysis, so do
// that now.
- try zcu.ensureFuncBodyAnalysisQueued(decl.val.toIntern());
+ try pt.zcu.ensureFuncBodyAnalysisQueued(decl.val.toIntern());
}
},
.resolve_type_fully => |ty| {
const named_frame = tracy.namedFrame("resolve_type_fully");
defer named_frame.end();
- const zcu = comp.module.?;
- Type.fromInterned(ty).resolveFully(zcu) catch |err| switch (err) {
+ const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
+ Type.fromInterned(ty).resolveFully(pt) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => return,
};
@@ -3603,12 +3605,12 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo
try zcu.retryable_failures.append(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }));
};
},
- .analyze_mod => |pkg| {
+ .analyze_mod => |mod| {
const named_frame = tracy.namedFrame("analyze_mod");
defer named_frame.end();
- const zcu = comp.module.?;
- zcu.semaPkg(pkg) catch |err| switch (err) {
+ const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
+ pt.semaPkg(mod) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => return,
};
src/crash_report.zig
@@ -76,9 +76,9 @@ fn dumpStatusReport() !void {
const stderr = io.getStdErr().writer();
const block: *Sema.Block = anal.block;
- const mod = anal.sema.mod;
+ const zcu = anal.sema.pt.zcu;
- const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, mod);
+ const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, zcu);
try stderr.writeAll("Analyzing ");
try writeFilePath(file, stderr);
@@ -104,7 +104,7 @@ fn dumpStatusReport() !void {
while (parent) |curr| {
fba.reset();
try stderr.writeAll(" in ");
- const cur_block_file, const cur_block_src_base_node = Zcu.LazySrcLoc.resolveBaseNode(curr.block.src_base_inst, mod);
+ const cur_block_file, const cur_block_src_base_node = Zcu.LazySrcLoc.resolveBaseNode(curr.block.src_base_inst, zcu);
try writeFilePath(cur_block_file, stderr);
try stderr.writeAll("\n > ");
print_zir.renderSingleInstruction(
src/InternPool.zig
@@ -4548,17 +4548,14 @@ pub fn init(ip: *InternPool, gpa: Allocator) !void {
// This inserts all the statically-known values into the intern pool in the
// order expected.
- for (static_keys[0..@intFromEnum(Index.empty_struct_type)]) |key| {
- _ = ip.get(gpa, key) catch unreachable;
- }
- _ = ip.getAnonStructType(gpa, .{
- .types = &.{},
- .names = &.{},
- .values = &.{},
- }) catch unreachable;
- for (static_keys[@intFromEnum(Index.empty_struct_type) + 1 ..]) |key| {
- _ = ip.get(gpa, key) catch unreachable;
- }
+ for (&static_keys, 0..) |key, key_index| switch (@as(Index, @enumFromInt(key_index))) {
+ .empty_struct_type => assert(try ip.getAnonStructType(gpa, .main, .{
+ .types = &.{},
+ .names = &.{},
+ .values = &.{},
+ }) == .empty_struct_type),
+ else => |expected_index| assert(try ip.get(gpa, .main, key) == expected_index),
+ };
if (std.debug.runtime_safety) {
// Sanity check.
@@ -5242,7 +5239,7 @@ fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key
} };
}
-pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
+pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) Allocator.Error!Index {
const adapter: KeyAdapter = .{ .intern_pool = ip };
const gop = try ip.map.getOrPutAdapted(gpa, key, adapter);
if (gop.found_existing) return @enumFromInt(gop.index);
@@ -5266,8 +5263,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
_ = ip.map.pop();
var new_key = key;
new_key.ptr_type.flags.size = .Many;
- const ptr_type_index = try ip.get(gpa, new_key);
+ const ptr_type_index = try ip.get(gpa, tid, new_key);
assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing);
+
try ip.items.ensureUnusedCapacity(gpa, 1);
ip.items.appendAssumeCapacity(.{
.tag = .type_slice,
@@ -5519,7 +5517,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
else => unreachable,
}
_ = ip.map.pop();
- const index_index = try ip.get(gpa, .{ .int = .{
+ const index_index = try ip.get(gpa, tid, .{ .int = .{
.ty = .usize_type,
.storage = .{ .u64 = base_index.index },
} });
@@ -5932,7 +5930,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
const elem = switch (aggregate.storage) {
.bytes => |bytes| elem: {
_ = ip.map.pop();
- const elem = try ip.get(gpa, .{ .int = .{
+ const elem = try ip.get(gpa, tid, .{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = bytes.at(0, ip) },
} });
@@ -6074,7 +6072,12 @@ pub const UnionTypeInit = struct {
},
};
-pub fn getUnionType(ip: *InternPool, gpa: Allocator, ini: UnionTypeInit) Allocator.Error!WipNamespaceType.Result {
+pub fn getUnionType(
+ ip: *InternPool,
+ gpa: Allocator,
+ _: Zcu.PerThread.Id,
+ ini: UnionTypeInit,
+) Allocator.Error!WipNamespaceType.Result {
const adapter: KeyAdapter = .{ .intern_pool = ip };
const gop = try ip.map.getOrPutAdapted(gpa, Key{ .union_type = switch (ini.key) {
.declared => |d| .{ .declared = .{
@@ -6221,6 +6224,7 @@ pub const StructTypeInit = struct {
pub fn getStructType(
ip: *InternPool,
gpa: Allocator,
+ _: Zcu.PerThread.Id,
ini: StructTypeInit,
) Allocator.Error!WipNamespaceType.Result {
const adapter: KeyAdapter = .{ .intern_pool = ip };
@@ -6396,7 +6400,12 @@ pub const AnonStructTypeInit = struct {
values: []const Index,
};
-pub fn getAnonStructType(ip: *InternPool, gpa: Allocator, ini: AnonStructTypeInit) Allocator.Error!Index {
+pub fn getAnonStructType(
+ ip: *InternPool,
+ gpa: Allocator,
+ _: Zcu.PerThread.Id,
+ ini: AnonStructTypeInit,
+) Allocator.Error!Index {
assert(ini.types.len == ini.values.len);
for (ini.types) |elem| assert(elem != .none);
@@ -6450,7 +6459,12 @@ pub const GetFuncTypeKey = struct {
addrspace_is_generic: bool = false,
};
-pub fn getFuncType(ip: *InternPool, gpa: Allocator, key: GetFuncTypeKey) Allocator.Error!Index {
+pub fn getFuncType(
+ ip: *InternPool,
+ gpa: Allocator,
+ _: Zcu.PerThread.Id,
+ key: GetFuncTypeKey,
+) Allocator.Error!Index {
// Validate input parameters.
assert(key.return_type != .none);
for (key.param_types) |param_type| assert(param_type != .none);
@@ -6503,7 +6517,12 @@ pub fn getFuncType(ip: *InternPool, gpa: Allocator, key: GetFuncTypeKey) Allocat
return @enumFromInt(ip.items.len - 1);
}
-pub fn getExternFunc(ip: *InternPool, gpa: Allocator, key: Key.ExternFunc) Allocator.Error!Index {
+pub fn getExternFunc(
+ ip: *InternPool,
+ gpa: Allocator,
+ _: Zcu.PerThread.Id,
+ key: Key.ExternFunc,
+) Allocator.Error!Index {
const adapter: KeyAdapter = .{ .intern_pool = ip };
const gop = try ip.map.getOrPutAdapted(gpa, Key{ .extern_func = key }, adapter);
if (gop.found_existing) return @enumFromInt(gop.index);
@@ -6531,7 +6550,12 @@ pub const GetFuncDeclKey = struct {
is_noinline: bool,
};
-pub fn getFuncDecl(ip: *InternPool, gpa: Allocator, key: GetFuncDeclKey) Allocator.Error!Index {
+pub fn getFuncDecl(
+ ip: *InternPool,
+ gpa: Allocator,
+ _: Zcu.PerThread.Id,
+ key: GetFuncDeclKey,
+) Allocator.Error!Index {
// The strategy here is to add the function type unconditionally, then to
// ask if it already exists, and if so, revert the lengths of the mutated
// arrays. This is similar to what `getOrPutTrailingString` does.
@@ -6598,7 +6622,12 @@ pub const GetFuncDeclIesKey = struct {
rbrace_column: u32,
};
-pub fn getFuncDeclIes(ip: *InternPool, gpa: Allocator, key: GetFuncDeclIesKey) Allocator.Error!Index {
+pub fn getFuncDeclIes(
+ ip: *InternPool,
+ gpa: Allocator,
+ _: Zcu.PerThread.Id,
+ key: GetFuncDeclIesKey,
+) Allocator.Error!Index {
// Validate input parameters.
assert(key.bare_return_type != .none);
for (key.param_types) |param_type| assert(param_type != .none);
@@ -6707,6 +6736,7 @@ pub fn getFuncDeclIes(ip: *InternPool, gpa: Allocator, key: GetFuncDeclIesKey) A
pub fn getErrorSetType(
ip: *InternPool,
gpa: Allocator,
+ _: Zcu.PerThread.Id,
names: []const NullTerminatedString,
) Allocator.Error!Index {
assert(std.sort.isSorted(NullTerminatedString, names, {}, NullTerminatedString.indexLessThan));
@@ -6770,11 +6800,16 @@ pub const GetFuncInstanceKey = struct {
inferred_error_set: bool,
};
-pub fn getFuncInstance(ip: *InternPool, gpa: Allocator, arg: GetFuncInstanceKey) Allocator.Error!Index {
+pub fn getFuncInstance(
+ ip: *InternPool,
+ gpa: Allocator,
+ tid: Zcu.PerThread.Id,
+ arg: GetFuncInstanceKey,
+) Allocator.Error!Index {
if (arg.inferred_error_set)
- return getFuncInstanceIes(ip, gpa, arg);
+ return getFuncInstanceIes(ip, gpa, tid, arg);
- const func_ty = try ip.getFuncType(gpa, .{
+ const func_ty = try ip.getFuncType(gpa, tid, .{
.param_types = arg.param_types,
.return_type = arg.bare_return_type,
.noalias_bits = arg.noalias_bits,
@@ -6844,6 +6879,7 @@ pub fn getFuncInstance(ip: *InternPool, gpa: Allocator, arg: GetFuncInstanceKey)
pub fn getFuncInstanceIes(
ip: *InternPool,
gpa: Allocator,
+ _: Zcu.PerThread.Id,
arg: GetFuncInstanceKey,
) Allocator.Error!Index {
// Validate input parameters.
@@ -6955,7 +6991,6 @@ pub fn getFuncInstanceIes(
assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{
.func_type = extraFuncType(ip, func_type_extra_index),
}, adapter).found_existing);
-
return finishFuncInstance(
ip,
gpa,
@@ -7096,6 +7131,7 @@ pub const WipEnumType = struct {
pub fn getEnumType(
ip: *InternPool,
gpa: Allocator,
+ _: Zcu.PerThread.Id,
ini: EnumTypeInit,
) Allocator.Error!WipEnumType.Result {
const adapter: KeyAdapter = .{ .intern_pool = ip };
@@ -7172,7 +7208,7 @@ pub fn getEnumType(
break :m values_map.toOptional();
};
errdefer if (ini.has_values) {
- _ = ip.map.pop();
+ _ = ip.maps.pop();
};
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len +
@@ -7245,7 +7281,12 @@ const GeneratedTagEnumTypeInit = struct {
/// Creates an enum type which was automatically-generated as the tag type of a
/// `union` with no explicit tag type. Since this is only called once per union
/// type, it asserts that no matching type yet exists.
-pub fn getGeneratedTagEnumType(ip: *InternPool, gpa: Allocator, ini: GeneratedTagEnumTypeInit) Allocator.Error!Index {
+pub fn getGeneratedTagEnumType(
+ ip: *InternPool,
+ gpa: Allocator,
+ _: Zcu.PerThread.Id,
+ ini: GeneratedTagEnumTypeInit,
+) Allocator.Error!Index {
assert(ip.isUnion(ini.owner_union_ty));
assert(ip.isIntegerType(ini.tag_ty));
for (ini.values) |val| assert(ip.typeOf(val) == ini.tag_ty);
@@ -7342,7 +7383,12 @@ pub const OpaqueTypeInit = struct {
},
};
-pub fn getOpaqueType(ip: *InternPool, gpa: Allocator, ini: OpaqueTypeInit) Allocator.Error!WipNamespaceType.Result {
+pub fn getOpaqueType(
+ ip: *InternPool,
+ gpa: Allocator,
+ _: Zcu.PerThread.Id,
+ ini: OpaqueTypeInit,
+) Allocator.Error!WipNamespaceType.Result {
const adapter: KeyAdapter = .{ .intern_pool = ip };
const gop = try ip.map.getOrPutAdapted(gpa, Key{ .opaque_type = switch (ini.key) {
.declared => |d| .{ .declared = .{
@@ -7680,23 +7726,23 @@ test "basic usage" {
var ip: InternPool = .{};
defer ip.deinit(gpa);
- const i32_type = try ip.get(gpa, .{ .int_type = .{
+ const i32_type = try ip.get(gpa, .main, .{ .int_type = .{
.signedness = .signed,
.bits = 32,
} });
- const array_i32 = try ip.get(gpa, .{ .array_type = .{
+ const array_i32 = try ip.get(gpa, .main, .{ .array_type = .{
.len = 10,
.child = i32_type,
.sentinel = .none,
} });
- const another_i32_type = try ip.get(gpa, .{ .int_type = .{
+ const another_i32_type = try ip.get(gpa, .main, .{ .int_type = .{
.signedness = .signed,
.bits = 32,
} });
try std.testing.expect(another_i32_type == i32_type);
- const another_array_i32 = try ip.get(gpa, .{ .array_type = .{
+ const another_array_i32 = try ip.get(gpa, .main, .{ .array_type = .{
.len = 10,
.child = i32_type,
.sentinel = .none,
@@ -7766,48 +7812,54 @@ pub fn sliceLen(ip: *const InternPool, i: Index) Index {
/// * payload => error union
/// * fn <=> fn
/// * aggregate <=> aggregate (where children can also be coerced)
-pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index {
+pub fn getCoerced(
+ ip: *InternPool,
+ gpa: Allocator,
+ tid: Zcu.PerThread.Id,
+ val: Index,
+ new_ty: Index,
+) Allocator.Error!Index {
const old_ty = ip.typeOf(val);
if (old_ty == new_ty) return val;
const tags = ip.items.items(.tag);
switch (val) {
- .undef => return ip.get(gpa, .{ .undef = new_ty }),
+ .undef => return ip.get(gpa, tid, .{ .undef = new_ty }),
.null_value => {
- if (ip.isOptionalType(new_ty)) return ip.get(gpa, .{ .opt = .{
+ if (ip.isOptionalType(new_ty)) return ip.get(gpa, tid, .{ .opt = .{
.ty = new_ty,
.val = .none,
} });
if (ip.isPointerType(new_ty)) switch (ip.indexToKey(new_ty).ptr_type.flags.size) {
- .One, .Many, .C => return ip.get(gpa, .{ .ptr = .{
+ .One, .Many, .C => return ip.get(gpa, tid, .{ .ptr = .{
.ty = new_ty,
.base_addr = .int,
.byte_offset = 0,
} }),
- .Slice => return ip.get(gpa, .{ .slice = .{
+ .Slice => return ip.get(gpa, tid, .{ .slice = .{
.ty = new_ty,
- .ptr = try ip.get(gpa, .{ .ptr = .{
+ .ptr = try ip.get(gpa, tid, .{ .ptr = .{
.ty = ip.slicePtrType(new_ty),
.base_addr = .int,
.byte_offset = 0,
} }),
- .len = try ip.get(gpa, .{ .undef = .usize_type }),
+ .len = try ip.get(gpa, tid, .{ .undef = .usize_type }),
} }),
};
},
else => switch (tags[@intFromEnum(val)]) {
- .func_decl => return getCoercedFuncDecl(ip, gpa, val, new_ty),
- .func_instance => return getCoercedFuncInstance(ip, gpa, val, new_ty),
+ .func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty),
+ .func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty),
.func_coerced => {
const extra_index = ip.items.items(.data)[@intFromEnum(val)];
const func: Index = @enumFromInt(
ip.extra.items[extra_index + std.meta.fieldIndex(Tag.FuncCoerced, "func").?],
);
switch (tags[@intFromEnum(func)]) {
- .func_decl => return getCoercedFuncDecl(ip, gpa, val, new_ty),
- .func_instance => return getCoercedFuncInstance(ip, gpa, val, new_ty),
+ .func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty),
+ .func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty),
else => unreachable,
}
},
@@ -7816,9 +7868,9 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
}
switch (ip.indexToKey(val)) {
- .undef => return ip.get(gpa, .{ .undef = new_ty }),
+ .undef => return ip.get(gpa, tid, .{ .undef = new_ty }),
.extern_func => |extern_func| if (ip.isFunctionType(new_ty))
- return ip.get(gpa, .{ .extern_func = .{
+ return ip.get(gpa, tid, .{ .extern_func = .{
.ty = new_ty,
.decl = extern_func.decl,
.lib_name = extern_func.lib_name,
@@ -7827,12 +7879,12 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
.func => unreachable,
.int => |int| switch (ip.indexToKey(new_ty)) {
- .enum_type => return ip.get(gpa, .{ .enum_tag = .{
+ .enum_type => return ip.get(gpa, tid, .{ .enum_tag = .{
.ty = new_ty,
- .int = try ip.getCoerced(gpa, val, ip.loadEnumType(new_ty).tag_ty),
+ .int = try ip.getCoerced(gpa, tid, val, ip.loadEnumType(new_ty).tag_ty),
} }),
.ptr_type => switch (int.storage) {
- inline .u64, .i64 => |int_val| return ip.get(gpa, .{ .ptr = .{
+ inline .u64, .i64 => |int_val| return ip.get(gpa, tid, .{ .ptr = .{
.ty = new_ty,
.base_addr = .int,
.byte_offset = @intCast(int_val),
@@ -7841,7 +7893,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
.lazy_align, .lazy_size => {},
},
else => if (ip.isIntegerType(new_ty))
- return getCoercedInts(ip, gpa, int, new_ty),
+ return ip.getCoercedInts(gpa, tid, int, new_ty),
},
.float => |float| switch (ip.indexToKey(new_ty)) {
.simple_type => |simple| switch (simple) {
@@ -7852,7 +7904,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
.f128,
.c_longdouble,
.comptime_float,
- => return ip.get(gpa, .{ .float = .{
+ => return ip.get(gpa, tid, .{ .float = .{
.ty = new_ty,
.storage = float.storage,
} }),
@@ -7861,17 +7913,17 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
else => {},
},
.enum_tag => |enum_tag| if (ip.isIntegerType(new_ty))
- return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty),
+ return ip.getCoercedInts(gpa, tid, ip.indexToKey(enum_tag.int).int, new_ty),
.enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) {
.enum_type => {
const enum_type = ip.loadEnumType(new_ty);
const index = enum_type.nameIndex(ip, enum_literal).?;
- return ip.get(gpa, .{ .enum_tag = .{
+ return ip.get(gpa, tid, .{ .enum_tag = .{
.ty = new_ty,
.int = if (enum_type.values.len != 0)
enum_type.values.get(ip)[index]
else
- try ip.get(gpa, .{ .int = .{
+ try ip.get(gpa, tid, .{ .int = .{
.ty = enum_type.tag_ty,
.storage = .{ .u64 = index },
} }),
@@ -7880,22 +7932,22 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
else => {},
},
.slice => |slice| if (ip.isPointerType(new_ty) and ip.indexToKey(new_ty).ptr_type.flags.size == .Slice)
- return ip.get(gpa, .{ .slice = .{
+ return ip.get(gpa, tid, .{ .slice = .{
.ty = new_ty,
- .ptr = try ip.getCoerced(gpa, slice.ptr, ip.slicePtrType(new_ty)),
+ .ptr = try ip.getCoerced(gpa, tid, slice.ptr, ip.slicePtrType(new_ty)),
.len = slice.len,
} })
else if (ip.isIntegerType(new_ty))
- return ip.getCoerced(gpa, slice.ptr, new_ty),
+ return ip.getCoerced(gpa, tid, slice.ptr, new_ty),
.ptr => |ptr| if (ip.isPointerType(new_ty) and ip.indexToKey(new_ty).ptr_type.flags.size != .Slice)
- return ip.get(gpa, .{ .ptr = .{
+ return ip.get(gpa, tid, .{ .ptr = .{
.ty = new_ty,
.base_addr = ptr.base_addr,
.byte_offset = ptr.byte_offset,
} })
else if (ip.isIntegerType(new_ty))
switch (ptr.base_addr) {
- .int => return ip.get(gpa, .{ .int = .{
+ .int => return ip.get(gpa, tid, .{ .int = .{
.ty = .usize_type,
.storage = .{ .u64 = @intCast(ptr.byte_offset) },
} }),
@@ -7904,44 +7956,44 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
.opt => |opt| switch (ip.indexToKey(new_ty)) {
.ptr_type => |ptr_type| return switch (opt.val) {
.none => switch (ptr_type.flags.size) {
- .One, .Many, .C => try ip.get(gpa, .{ .ptr = .{
+ .One, .Many, .C => try ip.get(gpa, tid, .{ .ptr = .{
.ty = new_ty,
.base_addr = .int,
.byte_offset = 0,
} }),
- .Slice => try ip.get(gpa, .{ .slice = .{
+ .Slice => try ip.get(gpa, tid, .{ .slice = .{
.ty = new_ty,
- .ptr = try ip.get(gpa, .{ .ptr = .{
+ .ptr = try ip.get(gpa, tid, .{ .ptr = .{
.ty = ip.slicePtrType(new_ty),
.base_addr = .int,
.byte_offset = 0,
} }),
- .len = try ip.get(gpa, .{ .undef = .usize_type }),
+ .len = try ip.get(gpa, tid, .{ .undef = .usize_type }),
} }),
},
- else => |payload| try ip.getCoerced(gpa, payload, new_ty),
+ else => |payload| try ip.getCoerced(gpa, tid, payload, new_ty),
},
- .opt_type => |child_type| return try ip.get(gpa, .{ .opt = .{
+ .opt_type => |child_type| return try ip.get(gpa, tid, .{ .opt = .{
.ty = new_ty,
.val = switch (opt.val) {
.none => .none,
- else => try ip.getCoerced(gpa, opt.val, child_type),
+ else => try ip.getCoerced(gpa, tid, opt.val, child_type),
},
} }),
else => {},
},
.err => |err| if (ip.isErrorSetType(new_ty))
- return ip.get(gpa, .{ .err = .{
+ return ip.get(gpa, tid, .{ .err = .{
.ty = new_ty,
.name = err.name,
} })
else if (ip.isErrorUnionType(new_ty))
- return ip.get(gpa, .{ .error_union = .{
+ return ip.get(gpa, tid, .{ .error_union = .{
.ty = new_ty,
.val = .{ .err_name = err.name },
} }),
.error_union => |error_union| if (ip.isErrorUnionType(new_ty))
- return ip.get(gpa, .{ .error_union = .{
+ return ip.get(gpa, tid, .{ .error_union = .{
.ty = new_ty,
.val = error_union.val,
} }),
@@ -7960,20 +8012,20 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
};
if (old_ty_child != new_ty_child) break :direct;
switch (aggregate.storage) {
- .bytes => |bytes| return ip.get(gpa, .{ .aggregate = .{
+ .bytes => |bytes| return ip.get(gpa, tid, .{ .aggregate = .{
.ty = new_ty,
.storage = .{ .bytes = bytes },
} }),
.elems => |elems| {
const elems_copy = try gpa.dupe(Index, elems[0..new_len]);
defer gpa.free(elems_copy);
- return ip.get(gpa, .{ .aggregate = .{
+ return ip.get(gpa, tid, .{ .aggregate = .{
.ty = new_ty,
.storage = .{ .elems = elems_copy },
} });
},
.repeated_elem => |elem| {
- return ip.get(gpa, .{ .aggregate = .{
+ return ip.get(gpa, tid, .{ .aggregate = .{
.ty = new_ty,
.storage = .{ .repeated_elem = elem },
} });
@@ -7991,7 +8043,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
// We have to intern each value here, so unfortunately we can't easily avoid
// the repeated indexToKey calls.
for (agg_elems, 0..) |*elem, index| {
- elem.* = try ip.get(gpa, .{ .int = .{
+ elem.* = try ip.get(gpa, tid, .{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = bytes.at(index, ip) },
} });
@@ -8008,27 +8060,27 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
.struct_type => ip.loadStructType(new_ty).field_types.get(ip)[i],
else => unreachable,
};
- elem.* = try ip.getCoerced(gpa, elem.*, new_elem_ty);
+ elem.* = try ip.getCoerced(gpa, tid, elem.*, new_elem_ty);
}
- return ip.get(gpa, .{ .aggregate = .{ .ty = new_ty, .storage = .{ .elems = agg_elems } } });
+ return ip.get(gpa, tid, .{ .aggregate = .{ .ty = new_ty, .storage = .{ .elems = agg_elems } } });
},
else => {},
}
switch (ip.indexToKey(new_ty)) {
.opt_type => |child_type| switch (val) {
- .null_value => return ip.get(gpa, .{ .opt = .{
+ .null_value => return ip.get(gpa, tid, .{ .opt = .{
.ty = new_ty,
.val = .none,
} }),
- else => return ip.get(gpa, .{ .opt = .{
+ else => return ip.get(gpa, tid, .{ .opt = .{
.ty = new_ty,
- .val = try ip.getCoerced(gpa, val, child_type),
+ .val = try ip.getCoerced(gpa, tid, val, child_type),
} }),
},
- .error_union_type => |error_union_type| return ip.get(gpa, .{ .error_union = .{
+ .error_union_type => |error_union_type| return ip.get(gpa, tid, .{ .error_union = .{
.ty = new_ty,
- .val = .{ .payload = try ip.getCoerced(gpa, val, error_union_type.payload_type) },
+ .val = .{ .payload = try ip.getCoerced(gpa, tid, val, error_union_type.payload_type) },
} }),
else => {},
}
@@ -8042,27 +8094,45 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
unreachable;
}
-fn getCoercedFuncDecl(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index {
+fn getCoercedFuncDecl(
+ ip: *InternPool,
+ gpa: Allocator,
+ tid: Zcu.PerThread.Id,
+ val: Index,
+ new_ty: Index,
+) Allocator.Error!Index {
const datas = ip.items.items(.data);
const extra_index = datas[@intFromEnum(val)];
const prev_ty: Index = @enumFromInt(
ip.extra.items[extra_index + std.meta.fieldIndex(Tag.FuncDecl, "ty").?],
);
if (new_ty == prev_ty) return val;
- return getCoercedFunc(ip, gpa, val, new_ty);
+ return getCoercedFunc(ip, gpa, tid, val, new_ty);
}
-fn getCoercedFuncInstance(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index {
+fn getCoercedFuncInstance(
+ ip: *InternPool,
+ gpa: Allocator,
+ tid: Zcu.PerThread.Id,
+ val: Index,
+ new_ty: Index,
+) Allocator.Error!Index {
const datas = ip.items.items(.data);
const extra_index = datas[@intFromEnum(val)];
const prev_ty: Index = @enumFromInt(
ip.extra.items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "ty").?],
);
if (new_ty == prev_ty) return val;
- return getCoercedFunc(ip, gpa, val, new_ty);
+ return getCoercedFunc(ip, gpa, tid, val, new_ty);
}
-fn getCoercedFunc(ip: *InternPool, gpa: Allocator, func: Index, ty: Index) Allocator.Error!Index {
+fn getCoercedFunc(
+ ip: *InternPool,
+ gpa: Allocator,
+ _: Zcu.PerThread.Id,
+ func: Index,
+ ty: Index,
+) Allocator.Error!Index {
const prev_extra_len = ip.extra.items.len;
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncCoerced).Struct.fields.len);
try ip.items.ensureUnusedCapacity(gpa, 1);
@@ -8092,7 +8162,7 @@ fn getCoercedFunc(ip: *InternPool, gpa: Allocator, func: Index, ty: Index) Alloc
/// Asserts `val` has an integer type.
/// Assumes `new_ty` is an integer type.
-pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Index) Allocator.Error!Index {
+pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, int: Key.Int, new_ty: Index) Allocator.Error!Index {
// The key cannot be passed directly to `get`, otherwise in the case of
// big_int storage, the limbs would be invalidated before they are read.
// Here we pre-reserve the limbs to ensure that the logic in `addInt` will
@@ -8111,7 +8181,7 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Ind
} };
},
};
- return ip.get(gpa, .{ .int = .{
+ return ip.get(gpa, tid, .{ .int = .{
.ty = new_ty,
.storage = new_storage,
} });
src/link.zig
@@ -15,8 +15,6 @@ const Compilation = @import("Compilation.zig");
const LibCInstallation = std.zig.LibCInstallation;
const Liveness = @import("Liveness.zig");
const Zcu = @import("Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
const InternPool = @import("InternPool.zig");
const Type = @import("Type.zig");
const Value = @import("Value.zig");
@@ -367,14 +365,14 @@ pub const File = struct {
/// Called from within the CodeGen to lower a local variable instantion as an unnamed
/// constant. Returns the symbol index of the lowered constant in the read-only section
/// of the final binary.
- pub fn lowerUnnamedConst(base: *File, val: Value, decl_index: InternPool.DeclIndex) UpdateDeclError!u32 {
+ pub fn lowerUnnamedConst(base: *File, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) UpdateDeclError!u32 {
if (build_options.only_c) @compileError("unreachable");
switch (base.tag) {
.spirv => unreachable,
.c => unreachable,
.nvptx => unreachable,
inline else => |t| {
- return @as(*t.Type(), @fieldParentPtr("base", base)).lowerUnnamedConst(val, decl_index);
+ return @as(*t.Type(), @fieldParentPtr("base", base)).lowerUnnamedConst(pt, val, decl_index);
},
}
}
@@ -399,13 +397,13 @@ pub const File = struct {
}
/// May be called before or after updateExports for any given Decl.
- pub fn updateDecl(base: *File, module: *Module, decl_index: InternPool.DeclIndex) UpdateDeclError!void {
- const decl = module.declPtr(decl_index);
+ pub fn updateDecl(base: *File, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) UpdateDeclError!void {
+ const decl = pt.zcu.declPtr(decl_index);
assert(decl.has_tv);
switch (base.tag) {
inline else => |tag| {
if (tag != .c and build_options.only_c) unreachable;
- return @as(*tag.Type(), @fieldParentPtr("base", base)).updateDecl(module, decl_index);
+ return @as(*tag.Type(), @fieldParentPtr("base", base)).updateDecl(pt, decl_index);
},
}
}
@@ -413,7 +411,7 @@ pub const File = struct {
/// May be called before or after updateExports for any given Decl.
pub fn updateFunc(
base: *File,
- module: *Module,
+ pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
@@ -421,12 +419,12 @@ pub const File = struct {
switch (base.tag) {
inline else => |tag| {
if (tag != .c and build_options.only_c) unreachable;
- return @as(*tag.Type(), @fieldParentPtr("base", base)).updateFunc(module, func_index, air, liveness);
+ return @as(*tag.Type(), @fieldParentPtr("base", base)).updateFunc(pt, func_index, air, liveness);
},
}
}
- pub fn updateDeclLineNumber(base: *File, module: *Module, decl_index: InternPool.DeclIndex) UpdateDeclError!void {
+ pub fn updateDeclLineNumber(base: *File, module: *Zcu, decl_index: InternPool.DeclIndex) UpdateDeclError!void {
const decl = module.declPtr(decl_index);
assert(decl.has_tv);
switch (base.tag) {
@@ -537,7 +535,7 @@ pub const File = struct {
/// Commit pending changes and write headers. Takes into account final output mode
/// and `use_lld`, not only `effectiveOutputMode`.
/// `arena` has the lifetime of the call to `Compilation.update`.
- pub fn flush(base: *File, arena: Allocator, prog_node: std.Progress.Node) FlushError!void {
+ pub fn flush(base: *File, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) FlushError!void {
if (build_options.only_c) {
assert(base.tag == .c);
return @as(*C, @fieldParentPtr("base", base)).flush(arena, prog_node);
@@ -563,27 +561,27 @@ pub const File = struct {
const output_mode = comp.config.output_mode;
const link_mode = comp.config.link_mode;
if (use_lld and output_mode == .Lib and link_mode == .static) {
- return base.linkAsArchive(arena, prog_node);
+ return base.linkAsArchive(arena, tid, prog_node);
}
switch (base.tag) {
inline else => |tag| {
- return @as(*tag.Type(), @fieldParentPtr("base", base)).flush(arena, prog_node);
+ return @as(*tag.Type(), @fieldParentPtr("base", base)).flush(arena, tid, prog_node);
},
}
}
/// Commit pending changes and write headers. Works based on `effectiveOutputMode`
/// rather than final output mode.
- pub fn flushModule(base: *File, arena: Allocator, prog_node: std.Progress.Node) FlushError!void {
+ pub fn flushModule(base: *File, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) FlushError!void {
switch (base.tag) {
inline else => |tag| {
if (tag != .c and build_options.only_c) unreachable;
- return @as(*tag.Type(), @fieldParentPtr("base", base)).flushModule(arena, prog_node);
+ return @as(*tag.Type(), @fieldParentPtr("base", base)).flushModule(arena, tid, prog_node);
},
}
}
- /// Called when a Decl is deleted from the Module.
+ /// Called when a Decl is deleted from the Zcu.
pub fn freeDecl(base: *File, decl_index: InternPool.DeclIndex) void {
switch (base.tag) {
inline else => |tag| {
@@ -604,14 +602,14 @@ pub const File = struct {
/// May be called before or after updateDecl for any given Decl.
pub fn updateExports(
base: *File,
- module: *Module,
- exported: Module.Exported,
+ pt: Zcu.PerThread,
+ exported: Zcu.Exported,
export_indices: []const u32,
) UpdateExportsError!void {
switch (base.tag) {
inline else => |tag| {
if (tag != .c and build_options.only_c) unreachable;
- return @as(*tag.Type(), @fieldParentPtr("base", base)).updateExports(module, exported, export_indices);
+ return @as(*tag.Type(), @fieldParentPtr("base", base)).updateExports(pt, exported, export_indices);
},
}
}
@@ -644,9 +642,10 @@ pub const File = struct {
pub fn lowerAnonDecl(
base: *File,
+ pt: Zcu.PerThread,
decl_val: InternPool.Index,
decl_align: InternPool.Alignment,
- src_loc: Module.LazySrcLoc,
+ src_loc: Zcu.LazySrcLoc,
) !LowerResult {
if (build_options.only_c) @compileError("unreachable");
switch (base.tag) {
@@ -654,7 +653,7 @@ pub const File = struct {
.spirv => unreachable,
.nvptx => unreachable,
inline else => |tag| {
- return @as(*tag.Type(), @fieldParentPtr("base", base)).lowerAnonDecl(decl_val, decl_align, src_loc);
+ return @as(*tag.Type(), @fieldParentPtr("base", base)).lowerAnonDecl(pt, decl_val, decl_align, src_loc);
},
}
}
@@ -689,7 +688,7 @@ pub const File = struct {
}
}
- pub fn linkAsArchive(base: *File, arena: Allocator, prog_node: std.Progress.Node) FlushError!void {
+ pub fn linkAsArchive(base: *File, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) FlushError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -704,7 +703,7 @@ pub const File = struct {
// If there is no Zig code to compile, then we should skip flushing the output file
// because it will not be part of the linker line anyway.
const zcu_obj_path: ?[]const u8 = if (opt_zcu != null) blk: {
- try base.flushModule(arena, prog_node);
+ try base.flushModule(arena, tid, prog_node);
const dirname = fs.path.dirname(full_out_path_z) orelse ".";
break :blk try fs.path.join(arena, &.{ dirname, base.zcu_object_sub_path.? });
@@ -896,14 +895,14 @@ pub const File = struct {
kind: Kind,
ty: Type,
- pub fn initDecl(kind: Kind, decl: ?InternPool.DeclIndex, mod: *Module) LazySymbol {
+ pub fn initDecl(kind: Kind, decl: ?InternPool.DeclIndex, mod: *Zcu) LazySymbol {
return .{ .kind = kind, .ty = if (decl) |decl_index|
mod.declPtr(decl_index).val.toType()
else
Type.anyerror };
}
- pub fn getDecl(self: LazySymbol, mod: *Module) InternPool.OptionalDeclIndex {
+ pub fn getDecl(self: LazySymbol, mod: *Zcu) InternPool.OptionalDeclIndex {
return InternPool.OptionalDeclIndex.init(self.ty.getOwnerDeclOrNull(mod));
}
};
src/main.zig
@@ -172,7 +172,7 @@ pub fn main() anyerror!void {
}
// We would prefer to use raw libc allocator here, but cannot
// use it if it won't support the alignment we need.
- if (@alignOf(std.c.max_align_t) < @alignOf(i128)) {
+ if (@alignOf(std.c.max_align_t) < @max(@alignOf(i128), std.atomic.cache_line)) {
break :gpa std.heap.c_allocator;
}
break :gpa std.heap.raw_c_allocator;
@@ -3092,7 +3092,7 @@ fn buildOutputType(
defer emit_implib_resolved.deinit();
var thread_pool: ThreadPool = undefined;
- try thread_pool.init(.{ .allocator = gpa });
+ try thread_pool.init(.{ .allocator = gpa, .track_ids = true });
defer thread_pool.deinit();
var cleanup_local_cache_dir: ?fs.Dir = null;
@@ -4895,7 +4895,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
child_argv.items[argv_index_cache_dir] = local_cache_directory.path orelse cwd_path;
var thread_pool: ThreadPool = undefined;
- try thread_pool.init(.{ .allocator = gpa });
+ try thread_pool.init(.{ .allocator = gpa, .track_ids = true });
defer thread_pool.deinit();
// Dummy http client that is not actually used when only_core_functionality is enabled.
@@ -5329,7 +5329,7 @@ fn jitCmd(
defer global_cache_directory.handle.close();
var thread_pool: ThreadPool = undefined;
- try thread_pool.init(.{ .allocator = gpa });
+ try thread_pool.init(.{ .allocator = gpa, .track_ids = true });
defer thread_pool.deinit();
var child_argv: std.ArrayListUnmanaged([]const u8) = .{};
src/mutable_value.zig
@@ -54,46 +54,44 @@ pub const MutableValue = union(enum) {
payload: *MutableValue,
};
- pub fn intern(mv: MutableValue, zcu: *Zcu, arena: Allocator) Allocator.Error!Value {
- const ip = &zcu.intern_pool;
- const gpa = zcu.gpa;
+ pub fn intern(mv: MutableValue, pt: Zcu.PerThread, arena: Allocator) Allocator.Error!Value {
return Value.fromInterned(switch (mv) {
.interned => |ip_index| ip_index,
- .eu_payload => |sv| try ip.get(gpa, .{ .error_union = .{
+ .eu_payload => |sv| try pt.intern(.{ .error_union = .{
.ty = sv.ty,
- .val = .{ .payload = (try sv.child.intern(zcu, arena)).toIntern() },
+ .val = .{ .payload = (try sv.child.intern(pt, arena)).toIntern() },
} }),
- .opt_payload => |sv| try ip.get(gpa, .{ .opt = .{
+ .opt_payload => |sv| try pt.intern(.{ .opt = .{
.ty = sv.ty,
- .val = (try sv.child.intern(zcu, arena)).toIntern(),
+ .val = (try sv.child.intern(pt, arena)).toIntern(),
} }),
- .repeated => |sv| try ip.get(gpa, .{ .aggregate = .{
+ .repeated => |sv| try pt.intern(.{ .aggregate = .{
.ty = sv.ty,
- .storage = .{ .repeated_elem = (try sv.child.intern(zcu, arena)).toIntern() },
+ .storage = .{ .repeated_elem = (try sv.child.intern(pt, arena)).toIntern() },
} }),
- .bytes => |b| try ip.get(gpa, .{ .aggregate = .{
+ .bytes => |b| try pt.intern(.{ .aggregate = .{
.ty = b.ty,
- .storage = .{ .bytes = try ip.getOrPutString(gpa, b.data, .maybe_embedded_nulls) },
+ .storage = .{ .bytes = try pt.zcu.intern_pool.getOrPutString(pt.zcu.gpa, b.data, .maybe_embedded_nulls) },
} }),
.aggregate => |a| {
const elems = try arena.alloc(InternPool.Index, a.elems.len);
for (a.elems, elems) |mut_elem, *interned_elem| {
- interned_elem.* = (try mut_elem.intern(zcu, arena)).toIntern();
+ interned_elem.* = (try mut_elem.intern(pt, arena)).toIntern();
}
- return Value.fromInterned(try ip.get(gpa, .{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = a.ty,
.storage = .{ .elems = elems },
} }));
},
- .slice => |s| try ip.get(gpa, .{ .slice = .{
+ .slice => |s| try pt.intern(.{ .slice = .{
.ty = s.ty,
- .ptr = (try s.ptr.intern(zcu, arena)).toIntern(),
- .len = (try s.len.intern(zcu, arena)).toIntern(),
+ .ptr = (try s.ptr.intern(pt, arena)).toIntern(),
+ .len = (try s.len.intern(pt, arena)).toIntern(),
} }),
- .un => |u| try ip.get(gpa, .{ .un = .{
+ .un => |u| try pt.intern(.{ .un = .{
.ty = u.ty,
.tag = u.tag,
- .val = (try u.payload.intern(zcu, arena)).toIntern(),
+ .val = (try u.payload.intern(pt, arena)).toIntern(),
} }),
});
}
@@ -108,13 +106,13 @@ pub const MutableValue = union(enum) {
/// If `!allow_repeated`, the `repeated` representation will not be used.
pub fn unintern(
mv: *MutableValue,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
arena: Allocator,
allow_bytes: bool,
allow_repeated: bool,
) Allocator.Error!void {
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
- const gpa = zcu.gpa;
switch (mv.*) {
.interned => |ip_index| switch (ip.indexToKey(ip_index)) {
.opt => |opt| if (opt.val != .none) {
@@ -170,7 +168,7 @@ pub const MutableValue = union(enum) {
} else {
const mut_elems = try arena.alloc(MutableValue, len);
for (bytes.toSlice(len, ip), mut_elems) |b, *mut_elem| {
- mut_elem.* = .{ .interned = try ip.get(gpa, .{ .int = .{
+ mut_elem.* = .{ .interned = try pt.intern(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = b },
} }) };
@@ -221,12 +219,12 @@ pub const MutableValue = union(enum) {
switch (type_tag) {
.Array, .Vector => {
const elem_ty = ip.childType(ty_ip);
- const undef_elem = try ip.get(gpa, .{ .undef = elem_ty });
+ const undef_elem = try pt.intern(.{ .undef = elem_ty });
@memset(elems[0..@intCast(len_no_sent)], .{ .interned = undef_elem });
},
.Struct => for (elems[0..@intCast(len_no_sent)], 0..) |*mut_elem, i| {
const field_ty = ty.structFieldType(i, zcu).toIntern();
- mut_elem.* = .{ .interned = try ip.get(gpa, .{ .undef = field_ty }) };
+ mut_elem.* = .{ .interned = try pt.intern(.{ .undef = field_ty }) };
},
else => unreachable,
}
@@ -238,7 +236,7 @@ pub const MutableValue = union(enum) {
} else {
const repeated_val = try arena.create(MutableValue);
repeated_val.* = .{
- .interned = try ip.get(gpa, .{ .undef = ip.childType(ty_ip) }),
+ .interned = try pt.intern(.{ .undef = ip.childType(ty_ip) }),
};
mv.* = .{ .repeated = .{
.ty = ty_ip,
@@ -248,11 +246,8 @@ pub const MutableValue = union(enum) {
},
.Union => {
const payload = try arena.create(MutableValue);
- const backing_ty = try Type.fromInterned(ty_ip).unionBackingType(zcu);
- payload.* = .{ .interned = try ip.get(
- gpa,
- .{ .undef = backing_ty.toIntern() },
- ) };
+ const backing_ty = try Type.fromInterned(ty_ip).unionBackingType(pt);
+ payload.* = .{ .interned = try pt.intern(.{ .undef = backing_ty.toIntern() }) };
mv.* = .{ .un = .{
.ty = ty_ip,
.tag = .none,
@@ -264,8 +259,8 @@ pub const MutableValue = union(enum) {
if (ptr_ty.flags.size != .Slice) return;
const ptr = try arena.create(MutableValue);
const len = try arena.create(MutableValue);
- ptr.* = .{ .interned = try ip.get(gpa, .{ .undef = ip.slicePtrType(ty_ip) }) };
- len.* = .{ .interned = try ip.get(gpa, .{ .undef = .usize_type }) };
+ ptr.* = .{ .interned = try pt.intern(.{ .undef = ip.slicePtrType(ty_ip) }) };
+ len.* = .{ .interned = try pt.intern(.{ .undef = .usize_type }) };
mv.* = .{ .slice = .{
.ty = ty_ip,
.ptr = ptr,
@@ -279,7 +274,7 @@ pub const MutableValue = union(enum) {
.bytes => |bytes| if (!allow_bytes) {
const elems = try arena.alloc(MutableValue, bytes.data.len);
for (bytes.data, elems) |byte, *interned_byte| {
- interned_byte.* = .{ .interned = try ip.get(gpa, .{ .int = .{
+ interned_byte.* = .{ .interned = try pt.intern(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = byte },
} }) };
@@ -298,22 +293,22 @@ pub const MutableValue = union(enum) {
/// The returned pointer is valid until the representation of `mv` changes.
pub fn elem(
mv: *MutableValue,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
arena: Allocator,
field_idx: usize,
) Allocator.Error!*MutableValue {
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
- const gpa = zcu.gpa;
// Convert to the `aggregate` representation.
switch (mv.*) {
.eu_payload, .opt_payload, .un => unreachable,
.interned => {
- try mv.unintern(zcu, arena, false, false);
+ try mv.unintern(pt, arena, false, false);
},
.bytes => |bytes| {
const elems = try arena.alloc(MutableValue, bytes.data.len);
for (bytes.data, elems) |byte, *interned_byte| {
- interned_byte.* = .{ .interned = try ip.get(gpa, .{ .int = .{
+ interned_byte.* = .{ .interned = try pt.intern(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = byte },
} }) };
@@ -351,14 +346,15 @@ pub const MutableValue = union(enum) {
/// For slices, uses `Value.slice_ptr_index` and `Value.slice_len_index`.
pub fn setElem(
mv: *MutableValue,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
arena: Allocator,
field_idx: usize,
field_val: MutableValue,
) Allocator.Error!void {
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const is_trivial_int = field_val.isTrivialInt(zcu);
- try mv.unintern(zcu, arena, is_trivial_int, true);
+ try mv.unintern(pt, arena, is_trivial_int, true);
switch (mv.*) {
.interned,
.eu_payload,
@@ -373,7 +369,7 @@ pub const MutableValue = union(enum) {
.bytes => |b| {
assert(is_trivial_int);
assert(field_val.typeOf(zcu).toIntern() == .u8_type);
- b.data[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(zcu));
+ b.data[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(pt));
},
.repeated => |r| {
if (field_val.eqlTrivial(r.child.*)) return;
@@ -386,9 +382,9 @@ pub const MutableValue = union(enum) {
{
// We can use the `bytes` representation.
const bytes = try arena.alloc(u8, @intCast(len_inc_sent));
- const repeated_byte = Value.fromInterned(r.child.interned).toUnsignedInt(zcu);
+ const repeated_byte = Value.fromInterned(r.child.interned).toUnsignedInt(pt);
@memset(bytes, @intCast(repeated_byte));
- bytes[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(zcu));
+ bytes[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(pt));
mv.* = .{ .bytes = .{
.ty = r.ty,
.data = bytes,
@@ -435,7 +431,7 @@ pub const MutableValue = union(enum) {
} else {
const bytes = try arena.alloc(u8, a.elems.len);
for (a.elems, bytes) |elem_val, *b| {
- b.* = @intCast(Value.fromInterned(elem_val.interned).toUnsignedInt(zcu));
+ b.* = @intCast(Value.fromInterned(elem_val.interned).toUnsignedInt(pt));
}
mv.* = .{ .bytes = .{
.ty = a.ty,
@@ -451,7 +447,7 @@ pub const MutableValue = union(enum) {
/// For slices, uses `Value.slice_ptr_index` and `Value.slice_len_index`.
pub fn getElem(
mv: MutableValue,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
field_idx: usize,
) Allocator.Error!MutableValue {
return switch (mv) {
@@ -459,16 +455,16 @@ pub const MutableValue = union(enum) {
.opt_payload,
=> unreachable,
.interned => |ip_index| {
- const ty = Type.fromInterned(zcu.intern_pool.typeOf(ip_index));
- switch (ty.zigTypeTag(zcu)) {
- .Array, .Vector => return .{ .interned = (try Value.fromInterned(ip_index).elemValue(zcu, field_idx)).toIntern() },
- .Struct, .Union => return .{ .interned = (try Value.fromInterned(ip_index).fieldValue(zcu, field_idx)).toIntern() },
+ const ty = Type.fromInterned(pt.zcu.intern_pool.typeOf(ip_index));
+ switch (ty.zigTypeTag(pt.zcu)) {
+ .Array, .Vector => return .{ .interned = (try Value.fromInterned(ip_index).elemValue(pt, field_idx)).toIntern() },
+ .Struct, .Union => return .{ .interned = (try Value.fromInterned(ip_index).fieldValue(pt, field_idx)).toIntern() },
.Pointer => {
- assert(ty.isSlice(zcu));
+ assert(ty.isSlice(pt.zcu));
return switch (field_idx) {
- Value.slice_ptr_index => .{ .interned = Value.fromInterned(ip_index).slicePtr(zcu).toIntern() },
- Value.slice_len_index => .{ .interned = switch (zcu.intern_pool.indexToKey(ip_index)) {
- .undef => try zcu.intern(.{ .undef = .usize_type }),
+ Value.slice_ptr_index => .{ .interned = Value.fromInterned(ip_index).slicePtr(pt.zcu).toIntern() },
+ Value.slice_len_index => .{ .interned = switch (pt.zcu.intern_pool.indexToKey(ip_index)) {
+ .undef => try pt.intern(.{ .undef = .usize_type }),
.slice => |s| s.len,
else => unreachable,
} },
@@ -487,7 +483,7 @@ pub const MutableValue = union(enum) {
Value.slice_len_index => s.len.*,
else => unreachable,
},
- .bytes => |b| .{ .interned = try zcu.intern(.{ .int = .{
+ .bytes => |b| .{ .interned = try pt.intern(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = b.data[field_idx] },
} }) },
src/print_air.zig
@@ -9,7 +9,7 @@ const Air = @import("Air.zig");
const Liveness = @import("Liveness.zig");
const InternPool = @import("InternPool.zig");
-pub fn write(stream: anytype, module: *Zcu, air: Air, liveness: ?Liveness) void {
+pub fn write(stream: anytype, pt: Zcu.PerThread, air: Air, liveness: ?Liveness) void {
const instruction_bytes = air.instructions.len *
// Here we don't use @sizeOf(Air.Inst.Data) because it would include
// the debug safety tag but we want to measure release size.
@@ -42,8 +42,8 @@ pub fn write(stream: anytype, module: *Zcu, air: Air, liveness: ?Liveness) void
// zig fmt: on
var writer: Writer = .{
- .module = module,
- .gpa = module.gpa,
+ .pt = pt,
+ .gpa = pt.zcu.gpa,
.air = air,
.liveness = liveness,
.indent = 2,
@@ -55,13 +55,13 @@ pub fn write(stream: anytype, module: *Zcu, air: Air, liveness: ?Liveness) void
pub fn writeInst(
stream: anytype,
inst: Air.Inst.Index,
- module: *Zcu,
+ pt: Zcu.PerThread,
air: Air,
liveness: ?Liveness,
) void {
var writer: Writer = .{
- .module = module,
- .gpa = module.gpa,
+ .pt = pt,
+ .gpa = pt.zcu.gpa,
.air = air,
.liveness = liveness,
.indent = 2,
@@ -70,16 +70,16 @@ pub fn writeInst(
writer.writeInst(stream, inst) catch return;
}
-pub fn dump(module: *Zcu, air: Air, liveness: ?Liveness) void {
- write(std.io.getStdErr().writer(), module, air, liveness);
+pub fn dump(pt: Zcu.PerThread, air: Air, liveness: ?Liveness) void {
+ write(std.io.getStdErr().writer(), pt, air, liveness);
}
-pub fn dumpInst(inst: Air.Inst.Index, module: *Zcu, air: Air, liveness: ?Liveness) void {
- writeInst(std.io.getStdErr().writer(), inst, module, air, liveness);
+pub fn dumpInst(inst: Air.Inst.Index, pt: Zcu.PerThread, air: Air, liveness: ?Liveness) void {
+ writeInst(std.io.getStdErr().writer(), inst, pt, air, liveness);
}
const Writer = struct {
- module: *Zcu,
+ pt: Zcu.PerThread,
gpa: Allocator,
air: Air,
liveness: ?Liveness,
@@ -345,7 +345,7 @@ const Writer = struct {
}
fn writeType(w: *Writer, s: anytype, ty: Type) !void {
- return ty.print(s, w.module);
+ return ty.print(s, w.pt);
}
fn writeTy(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
@@ -424,7 +424,7 @@ const Writer = struct {
}
fn writeAggregateInit(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
- const mod = w.module;
+ const mod = w.pt.zcu;
const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const vector_ty = ty_pl.ty.toType();
const len = @as(usize, @intCast(vector_ty.arrayLen(mod)));
@@ -504,7 +504,7 @@ const Writer = struct {
}
fn writeSelect(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
- const mod = w.module;
+ const mod = w.pt.zcu;
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = w.air.extraData(Air.Bin, pl_op.payload).data;
@@ -947,11 +947,11 @@ const Writer = struct {
if (@intFromEnum(operand) < InternPool.static_len) {
return s.print("@{}", .{operand});
} else if (operand.toInterned()) |ip_index| {
- const mod = w.module;
- const ty = Type.fromInterned(mod.intern_pool.indexToKey(ip_index).typeOf());
+ const pt = w.pt;
+ const ty = Type.fromInterned(pt.zcu.intern_pool.indexToKey(ip_index).typeOf());
try s.print("<{}, {}>", .{
- ty.fmt(mod),
- Value.fromInterned(ip_index).fmtValue(mod, null),
+ ty.fmt(pt),
+ Value.fromInterned(ip_index).fmtValue(pt, null),
});
} else {
return w.writeInstIndex(s, operand.toIndex().?, dies);
@@ -970,7 +970,7 @@ const Writer = struct {
}
fn typeOfIndex(w: *Writer, inst: Air.Inst.Index) Type {
- const mod = w.module;
+ const mod = w.pt.zcu;
return w.air.typeOfIndex(inst, &mod.intern_pool);
}
};
src/print_value.zig
@@ -5,8 +5,6 @@ const std = @import("std");
const Type = @import("Type.zig");
const Value = @import("Value.zig");
const Zcu = @import("Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
const Sema = @import("Sema.zig");
const InternPool = @import("InternPool.zig");
const Allocator = std.mem.Allocator;
@@ -17,7 +15,7 @@ const max_string_len = 256;
pub const FormatContext = struct {
val: Value,
- mod: *Module,
+ pt: Zcu.PerThread,
opt_sema: ?*Sema,
depth: u8,
};
@@ -30,7 +28,7 @@ pub fn format(
) !void {
_ = options;
comptime std.debug.assert(fmt.len == 0);
- return print(ctx.val, writer, ctx.depth, ctx.mod, ctx.opt_sema) catch |err| switch (err) {
+ return print(ctx.val, writer, ctx.depth, ctx.pt, ctx.opt_sema) catch |err| switch (err) {
error.OutOfMemory => @panic("OOM"), // We're not allowed to return this from a format function
error.ComptimeBreak, error.ComptimeReturn => unreachable,
error.AnalysisFail => unreachable, // TODO: re-evaluate when we use `opt_sema` more fully
@@ -42,10 +40,11 @@ pub fn print(
val: Value,
writer: anytype,
level: u8,
- mod: *Module,
+ pt: Zcu.PerThread,
/// If this `Sema` is provided, we will recurse through pointers where possible to provide friendly output.
opt_sema: ?*Sema,
-) (@TypeOf(writer).Error || Module.CompileError)!void {
+) (@TypeOf(writer).Error || Zcu.CompileError)!void {
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
switch (ip.indexToKey(val.toIntern())) {
.int_type,
@@ -64,7 +63,7 @@ pub fn print(
.func_type,
.error_set_type,
.inferred_error_set_type,
- => try Type.print(val.toType(), writer, mod),
+ => try Type.print(val.toType(), writer, pt),
.undef => try writer.writeAll("undefined"),
.simple_value => |simple_value| switch (simple_value) {
.void => try writer.writeAll("{}"),
@@ -82,13 +81,13 @@ pub fn print(
.int => |int| switch (int.storage) {
inline .u64, .i64, .big_int => |x| try writer.print("{}", .{x}),
.lazy_align => |ty| if (opt_sema != null) {
- const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .sema)).scalar;
+ const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(pt, .sema)).scalar;
try writer.print("{}", .{a.toByteUnits() orelse 0});
- } else try writer.print("@alignOf({})", .{Type.fromInterned(ty).fmt(mod)}),
+ } else try writer.print("@alignOf({})", .{Type.fromInterned(ty).fmt(pt)}),
.lazy_size => |ty| if (opt_sema != null) {
- const s = (try Type.fromInterned(ty).abiSizeAdvanced(mod, .sema)).scalar;
+ const s = (try Type.fromInterned(ty).abiSizeAdvanced(pt, .sema)).scalar;
try writer.print("{}", .{s});
- } else try writer.print("@sizeOf({})", .{Type.fromInterned(ty).fmt(mod)}),
+ } else try writer.print("@sizeOf({})", .{Type.fromInterned(ty).fmt(pt)}),
},
.err => |err| try writer.print("error.{}", .{
err.name.fmt(ip),
@@ -97,7 +96,7 @@ pub fn print(
.err_name => |err_name| try writer.print("error.{}", .{
err_name.fmt(ip),
}),
- .payload => |payload| try print(Value.fromInterned(payload), writer, level, mod, opt_sema),
+ .payload => |payload| try print(Value.fromInterned(payload), writer, level, pt, opt_sema),
},
.enum_literal => |enum_literal| try writer.print(".{}", .{
enum_literal.fmt(ip),
@@ -111,7 +110,7 @@ pub fn print(
return writer.writeAll("@enumFromInt(...)");
}
try writer.writeAll("@enumFromInt(");
- try print(Value.fromInterned(enum_tag.int), writer, level - 1, mod, opt_sema);
+ try print(Value.fromInterned(enum_tag.int), writer, level - 1, pt, opt_sema);
try writer.writeAll(")");
},
.empty_enum_value => try writer.writeAll("(empty enum value)"),
@@ -128,12 +127,12 @@ pub fn print(
// TODO: eventually we want to load the slice as an array with `opt_sema`, but that's
// currently not possible without e.g. triggering compile errors.
}
- try printPtr(Value.fromInterned(slice.ptr), writer, level, mod, opt_sema);
+ try printPtr(Value.fromInterned(slice.ptr), writer, level, pt, opt_sema);
try writer.writeAll("[0..");
if (level == 0) {
try writer.writeAll("(...)");
} else {
- try print(Value.fromInterned(slice.len), writer, level - 1, mod, opt_sema);
+ try print(Value.fromInterned(slice.len), writer, level - 1, pt, opt_sema);
}
try writer.writeAll("]");
},
@@ -147,28 +146,28 @@ pub fn print(
// TODO: eventually we want to load the pointer with `opt_sema`, but that's
// currently not possible without e.g. triggering compile errors.
}
- try printPtr(val, writer, level, mod, opt_sema);
+ try printPtr(val, writer, level, pt, opt_sema);
},
.opt => |opt| switch (opt.val) {
.none => try writer.writeAll("null"),
- else => |payload| try print(Value.fromInterned(payload), writer, level, mod, opt_sema),
+ else => |payload| try print(Value.fromInterned(payload), writer, level, pt, opt_sema),
},
- .aggregate => |aggregate| try printAggregate(val, aggregate, false, writer, level, mod, opt_sema),
+ .aggregate => |aggregate| try printAggregate(val, aggregate, false, writer, level, pt, opt_sema),
.un => |un| {
if (level == 0) {
try writer.writeAll(".{ ... }");
return;
}
if (un.tag == .none) {
- const backing_ty = try val.typeOf(mod).unionBackingType(mod);
- try writer.print("@bitCast(@as({}, ", .{backing_ty.fmt(mod)});
- try print(Value.fromInterned(un.val), writer, level - 1, mod, opt_sema);
+ const backing_ty = try val.typeOf(mod).unionBackingType(pt);
+ try writer.print("@bitCast(@as({}, ", .{backing_ty.fmt(pt)});
+ try print(Value.fromInterned(un.val), writer, level - 1, pt, opt_sema);
try writer.writeAll("))");
} else {
try writer.writeAll(".{ ");
- try print(Value.fromInterned(un.tag), writer, level - 1, mod, opt_sema);
+ try print(Value.fromInterned(un.tag), writer, level - 1, pt, opt_sema);
try writer.writeAll(" = ");
- try print(Value.fromInterned(un.val), writer, level - 1, mod, opt_sema);
+ try print(Value.fromInterned(un.val), writer, level - 1, pt, opt_sema);
try writer.writeAll(" }");
}
},
@@ -182,13 +181,14 @@ fn printAggregate(
is_ref: bool,
writer: anytype,
level: u8,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
opt_sema: ?*Sema,
-) (@TypeOf(writer).Error || Module.CompileError)!void {
+) (@TypeOf(writer).Error || Zcu.CompileError)!void {
if (level == 0) {
if (is_ref) try writer.writeByte('&');
return writer.writeAll(".{ ... }");
}
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ty = Type.fromInterned(aggregate.ty);
switch (ty.zigTypeTag(zcu)) {
@@ -203,7 +203,7 @@ fn printAggregate(
if (i != 0) try writer.writeAll(", ");
const field_name = ty.structFieldName(@intCast(i), zcu).unwrap().?;
try writer.print(".{i} = ", .{field_name.fmt(ip)});
- try print(try val.fieldValue(zcu, i), writer, level - 1, zcu, opt_sema);
+ try print(try val.fieldValue(pt, i), writer, level - 1, pt, opt_sema);
}
try writer.writeAll(" }");
return;
@@ -230,7 +230,7 @@ fn printAggregate(
if (ty.childType(zcu).toIntern() != .u8_type) break :one_byte_str;
const elem_val = Value.fromInterned(aggregate.storage.values()[0]);
if (elem_val.isUndef(zcu)) break :one_byte_str;
- const byte = elem_val.toUnsignedInt(zcu);
+ const byte = elem_val.toUnsignedInt(pt);
try writer.print("\"{}\"", .{std.zig.fmtEscapes(&.{@intCast(byte)})});
if (!is_ref) try writer.writeAll(".*");
return;
@@ -253,7 +253,7 @@ fn printAggregate(
const max_len = @min(len, max_aggregate_items);
for (0..max_len) |i| {
if (i != 0) try writer.writeAll(", ");
- try print(try val.fieldValue(zcu, i), writer, level - 1, zcu, opt_sema);
+ try print(try val.fieldValue(pt, i), writer, level - 1, pt, opt_sema);
}
if (len > max_aggregate_items) {
try writer.writeAll(", ...");
@@ -261,8 +261,8 @@ fn printAggregate(
return writer.writeAll(" }");
}
-fn printPtr(ptr_val: Value, writer: anytype, level: u8, zcu: *Zcu, opt_sema: ?*Sema) (@TypeOf(writer).Error || Module.CompileError)!void {
- const ptr = switch (zcu.intern_pool.indexToKey(ptr_val.toIntern())) {
+fn printPtr(ptr_val: Value, writer: anytype, level: u8, pt: Zcu.PerThread, opt_sema: ?*Sema) (@TypeOf(writer).Error || Zcu.CompileError)!void {
+ const ptr = switch (pt.zcu.intern_pool.indexToKey(ptr_val.toIntern())) {
.undef => return writer.writeAll("undefined"),
.ptr => |ptr| ptr,
else => unreachable,
@@ -270,32 +270,33 @@ fn printPtr(ptr_val: Value, writer: anytype, level: u8, zcu: *Zcu, opt_sema: ?*S
if (ptr.base_addr == .anon_decl) {
// If the value is an aggregate, we can potentially print it more nicely.
- switch (zcu.intern_pool.indexToKey(ptr.base_addr.anon_decl.val)) {
+ switch (pt.zcu.intern_pool.indexToKey(ptr.base_addr.anon_decl.val)) {
.aggregate => |agg| return printAggregate(
Value.fromInterned(ptr.base_addr.anon_decl.val),
agg,
true,
writer,
level,
- zcu,
+ pt,
opt_sema,
),
else => {},
}
}
- var arena = std.heap.ArenaAllocator.init(zcu.gpa);
+ var arena = std.heap.ArenaAllocator.init(pt.zcu.gpa);
defer arena.deinit();
- const derivation = try ptr_val.pointerDerivationAdvanced(arena.allocator(), zcu, opt_sema);
- try printPtrDerivation(derivation, writer, level, zcu, opt_sema);
+ const derivation = try ptr_val.pointerDerivationAdvanced(arena.allocator(), pt, opt_sema);
+ try printPtrDerivation(derivation, writer, level, pt, opt_sema);
}
/// Print `derivation` as an lvalue, i.e. such that writing `&` before this gives the pointer value.
-fn printPtrDerivation(derivation: Value.PointerDeriveStep, writer: anytype, level: u8, zcu: *Zcu, opt_sema: ?*Sema) (@TypeOf(writer).Error || Module.CompileError)!void {
+fn printPtrDerivation(derivation: Value.PointerDeriveStep, writer: anytype, level: u8, pt: Zcu.PerThread, opt_sema: ?*Sema) (@TypeOf(writer).Error || Zcu.CompileError)!void {
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
switch (derivation) {
.int => |int| try writer.print("@as({}, @ptrFromInt({x})).*", .{
- int.ptr_ty.fmt(zcu),
+ int.ptr_ty.fmt(pt),
int.addr,
}),
.decl_ptr => |decl| {
@@ -303,33 +304,33 @@ fn printPtrDerivation(derivation: Value.PointerDeriveStep, writer: anytype, leve
},
.anon_decl_ptr => |anon| {
const ty = Value.fromInterned(anon.val).typeOf(zcu);
- try writer.print("@as({}, ", .{ty.fmt(zcu)});
- try print(Value.fromInterned(anon.val), writer, level - 1, zcu, opt_sema);
+ try writer.print("@as({}, ", .{ty.fmt(pt)});
+ try print(Value.fromInterned(anon.val), writer, level - 1, pt, opt_sema);
try writer.writeByte(')');
},
.comptime_alloc_ptr => |info| {
- try writer.print("@as({}, ", .{info.val.typeOf(zcu).fmt(zcu)});
- try print(info.val, writer, level - 1, zcu, opt_sema);
+ try writer.print("@as({}, ", .{info.val.typeOf(zcu).fmt(pt)});
+ try print(info.val, writer, level - 1, pt, opt_sema);
try writer.writeByte(')');
},
.comptime_field_ptr => |val| {
const ty = val.typeOf(zcu);
- try writer.print("@as({}, ", .{ty.fmt(zcu)});
- try print(val, writer, level - 1, zcu, opt_sema);
+ try writer.print("@as({}, ", .{ty.fmt(pt)});
+ try print(val, writer, level - 1, pt, opt_sema);
try writer.writeByte(')');
},
.eu_payload_ptr => |info| {
try writer.writeByte('(');
- try printPtrDerivation(info.parent.*, writer, level, zcu, opt_sema);
+ try printPtrDerivation(info.parent.*, writer, level, pt, opt_sema);
try writer.writeAll(" catch unreachable)");
},
.opt_payload_ptr => |info| {
- try printPtrDerivation(info.parent.*, writer, level, zcu, opt_sema);
+ try printPtrDerivation(info.parent.*, writer, level, pt, opt_sema);
try writer.writeAll(".?");
},
.field_ptr => |field| {
- try printPtrDerivation(field.parent.*, writer, level, zcu, opt_sema);
- const agg_ty = (try field.parent.ptrType(zcu)).childType(zcu);
+ try printPtrDerivation(field.parent.*, writer, level, pt, opt_sema);
+ const agg_ty = (try field.parent.ptrType(pt)).childType(zcu);
switch (agg_ty.zigTypeTag(zcu)) {
.Struct => if (agg_ty.structFieldName(field.field_idx, zcu).unwrap()) |field_name| {
try writer.print(".{i}", .{field_name.fmt(ip)});
@@ -350,16 +351,16 @@ fn printPtrDerivation(derivation: Value.PointerDeriveStep, writer: anytype, leve
}
},
.elem_ptr => |elem| {
- try printPtrDerivation(elem.parent.*, writer, level, zcu, opt_sema);
+ try printPtrDerivation(elem.parent.*, writer, level, pt, opt_sema);
try writer.print("[{d}]", .{elem.elem_idx});
},
.offset_and_cast => |oac| if (oac.byte_offset == 0) {
- try writer.print("@as({}, @ptrCast(", .{oac.new_ptr_ty.fmt(zcu)});
- try printPtrDerivation(oac.parent.*, writer, level, zcu, opt_sema);
+ try writer.print("@as({}, @ptrCast(", .{oac.new_ptr_ty.fmt(pt)});
+ try printPtrDerivation(oac.parent.*, writer, level, pt, opt_sema);
try writer.writeAll("))");
} else {
- try writer.print("@as({}, @ptrFromInt(@intFromPtr(", .{oac.new_ptr_ty.fmt(zcu)});
- try printPtrDerivation(oac.parent.*, writer, level, zcu, opt_sema);
+ try writer.print("@as({}, @ptrFromInt(@intFromPtr(", .{oac.new_ptr_ty.fmt(pt)});
+ try printPtrDerivation(oac.parent.*, writer, level, pt, opt_sema);
try writer.print(") + {d}))", .{oac.byte_offset});
},
}
src/print_zir.zig
@@ -7,13 +7,12 @@ const InternPool = @import("InternPool.zig");
const Zir = std.zig.Zir;
const Zcu = @import("Zcu.zig");
-const Module = Zcu;
const LazySrcLoc = Zcu.LazySrcLoc;
/// Write human-readable, debug formatted ZIR code to a file.
pub fn renderAsTextToFile(
gpa: Allocator,
- scope_file: *Module.File,
+ scope_file: *Zcu.File,
fs_file: std.fs.File,
) !void {
var arena = std.heap.ArenaAllocator.init(gpa);
@@ -64,7 +63,7 @@ pub fn renderInstructionContext(
gpa: Allocator,
block: []const Zir.Inst.Index,
block_index: usize,
- scope_file: *Module.File,
+ scope_file: *Zcu.File,
parent_decl_node: Ast.Node.Index,
indent: u32,
stream: anytype,
@@ -96,7 +95,7 @@ pub fn renderInstructionContext(
pub fn renderSingleInstruction(
gpa: Allocator,
inst: Zir.Inst.Index,
- scope_file: *Module.File,
+ scope_file: *Zcu.File,
parent_decl_node: Ast.Node.Index,
indent: u32,
stream: anytype,
@@ -122,7 +121,7 @@ pub fn renderSingleInstruction(
const Writer = struct {
gpa: Allocator,
arena: Allocator,
- file: *Module.File,
+ file: *Zcu.File,
code: Zir,
indent: u32,
parent_decl_node: Ast.Node.Index,
src/RangeSet.zig
@@ -6,13 +6,11 @@ const InternPool = @import("InternPool.zig");
const Type = @import("Type.zig");
const Value = @import("Value.zig");
const Zcu = @import("Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
const RangeSet = @This();
const LazySrcLoc = Zcu.LazySrcLoc;
+pt: Zcu.PerThread,
ranges: std.ArrayList(Range),
-module: *Module,
pub const Range = struct {
first: InternPool.Index,
@@ -20,10 +18,10 @@ pub const Range = struct {
src: LazySrcLoc,
};
-pub fn init(allocator: std.mem.Allocator, module: *Module) RangeSet {
+pub fn init(allocator: std.mem.Allocator, pt: Zcu.PerThread) RangeSet {
return .{
+ .pt = pt,
.ranges = std.ArrayList(Range).init(allocator),
- .module = module,
};
}
@@ -37,8 +35,8 @@ pub fn add(
last: InternPool.Index,
src: LazySrcLoc,
) !?LazySrcLoc {
- const mod = self.module;
- const ip = &mod.intern_pool;
+ const pt = self.pt;
+ const ip = &pt.zcu.intern_pool;
const ty = ip.typeOf(first);
assert(ty == ip.typeOf(last));
@@ -47,8 +45,8 @@ pub fn add(
assert(ty == ip.typeOf(range.first));
assert(ty == ip.typeOf(range.last));
- if (Value.fromInterned(last).compareScalar(.gte, Value.fromInterned(range.first), Type.fromInterned(ty), mod) and
- Value.fromInterned(first).compareScalar(.lte, Value.fromInterned(range.last), Type.fromInterned(ty), mod))
+ if (Value.fromInterned(last).compareScalar(.gte, Value.fromInterned(range.first), Type.fromInterned(ty), pt) and
+ Value.fromInterned(first).compareScalar(.lte, Value.fromInterned(range.last), Type.fromInterned(ty), pt))
{
return range.src; // They overlap.
}
@@ -63,20 +61,20 @@ pub fn add(
}
/// Assumes a and b do not overlap
-fn lessThan(mod: *Module, a: Range, b: Range) bool {
- const ty = Type.fromInterned(mod.intern_pool.typeOf(a.first));
- return Value.fromInterned(a.first).compareScalar(.lt, Value.fromInterned(b.first), ty, mod);
+fn lessThan(pt: Zcu.PerThread, a: Range, b: Range) bool {
+ const ty = Type.fromInterned(pt.zcu.intern_pool.typeOf(a.first));
+ return Value.fromInterned(a.first).compareScalar(.lt, Value.fromInterned(b.first), ty, pt);
}
pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !bool {
- const mod = self.module;
- const ip = &mod.intern_pool;
+ const pt = self.pt;
+ const ip = &pt.zcu.intern_pool;
assert(ip.typeOf(first) == ip.typeOf(last));
if (self.ranges.items.len == 0)
return false;
- std.mem.sort(Range, self.ranges.items, mod, lessThan);
+ std.mem.sort(Range, self.ranges.items, pt, lessThan);
if (self.ranges.items[0].first != first or
self.ranges.items[self.ranges.items.len - 1].last != last)
@@ -95,10 +93,10 @@ pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !
const prev = self.ranges.items[i];
// prev.last + 1 == cur.first
- try counter.copy(Value.fromInterned(prev.last).toBigInt(&space, mod));
+ try counter.copy(Value.fromInterned(prev.last).toBigInt(&space, pt));
try counter.addScalar(&counter, 1);
- const cur_start_int = Value.fromInterned(cur.first).toBigInt(&space, mod);
+ const cur_start_int = Value.fromInterned(cur.first).toBigInt(&space, pt);
if (!cur_start_int.eql(counter.toConst())) {
return false;
}
src/register_manager.zig
@@ -7,8 +7,6 @@ const Air = @import("Air.zig");
const StaticBitSet = std.bit_set.StaticBitSet;
const Type = @import("Type.zig");
const Zcu = @import("Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const expectEqualSlices = std.testing.expectEqualSlices;
src/Sema.zig
@@ -5,7 +5,7 @@
//! Does type checking, comptime control flow, and safety-check generation.
//! This is the the heart of the Zig compiler.
-mod: *Module,
+pt: Zcu.PerThread,
/// Alias to `mod.gpa`.
gpa: Allocator,
/// Points to the temporary arena allocator of the Sema.
@@ -146,7 +146,7 @@ const ComptimeAlloc = struct {
fn newComptimeAlloc(sema: *Sema, block: *Block, ty: Type, alignment: Alignment) !ComptimeAllocIndex {
const idx = sema.comptime_allocs.items.len;
try sema.comptime_allocs.append(sema.gpa, .{
- .val = .{ .interned = try sema.mod.intern(.{ .undef = ty.toIntern() }) },
+ .val = .{ .interned = try sema.pt.intern(.{ .undef = ty.toIntern() }) },
.is_const = false,
.alignment = alignment,
.runtime_index = block.runtime_index,
@@ -433,7 +433,7 @@ pub const Block = struct {
fn explain(cr: ComptimeReason, sema: *Sema, msg: ?*Module.ErrorMsg) !void {
const parent = msg orelse return;
- const mod = sema.mod;
+ const pt = sema.pt;
const prefix = "expression is evaluated at comptime because ";
switch (cr) {
.c_import => |ci| {
@@ -451,7 +451,7 @@ pub const Block = struct {
ret_ty_src,
parent,
prefix ++ "the function returns a comptime-only type '{}'",
- .{rt.return_ty.fmt(mod)},
+ .{rt.return_ty.fmt(pt)},
);
try sema.explainWhyTypeIsComptime(parent, ret_ty_src, rt.return_ty);
},
@@ -538,7 +538,7 @@ pub const Block = struct {
}
pub fn wantSafety(block: *const Block) bool {
- return block.want_safety orelse switch (block.sema.mod.optimizeMode()) {
+ return block.want_safety orelse switch (block.sema.pt.zcu.optimizeMode()) {
.Debug => true,
.ReleaseSafe => true,
.ReleaseFast => false,
@@ -737,11 +737,12 @@ pub const Block = struct {
fn addCmpVector(block: *Block, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, cmp_op: std.math.CompareOperator) !Air.Inst.Ref {
const sema = block.sema;
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
return block.addInst(.{
.tag = if (block.float_mode == .optimized) .cmp_vector_optimized else .cmp_vector,
.data = .{ .ty_pl = .{
- .ty = Air.internedToRef((try mod.vectorType(.{
+ .ty = Air.internedToRef((try pt.vectorType(.{
.len = sema.typeOf(lhs).vectorLen(mod),
.child = .bool_type,
})).toIntern()),
@@ -829,14 +830,14 @@ pub const Block = struct {
}
pub fn ownerModule(block: Block) *Package.Module {
- const zcu = block.sema.mod;
+ const zcu = block.sema.pt.zcu;
return zcu.namespacePtr(block.namespace).fileScope(zcu).mod;
}
fn trackZir(block: *Block, inst: Zir.Inst.Index) Allocator.Error!InternPool.TrackedInst.Index {
const sema = block.sema;
const gpa = sema.gpa;
- const zcu = sema.mod;
+ const zcu = sema.pt.zcu;
const ip = &zcu.intern_pool;
const file_index = block.getFileScopeIndex(zcu);
return ip.trackZir(gpa, file_index, inst);
@@ -992,7 +993,8 @@ fn analyzeBodyInner(
try sema.inst_map.ensureSpaceForInstructions(sema.gpa, body);
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const map = &sema.inst_map;
const tags = sema.code.instructions.items(.tag);
const datas = sema.code.instructions.items(.data);
@@ -1777,7 +1779,7 @@ fn analyzeBodyInner(
const err_union_ty = sema.typeOf(err_union);
if (err_union_ty.zigTypeTag(zcu) != .ErrorUnion) {
return sema.fail(block, operand_src, "expected error union type, found '{}'", .{
- err_union_ty.fmt(zcu),
+ err_union_ty.fmt(pt),
});
}
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union);
@@ -1910,10 +1912,11 @@ pub fn toConstString(
air_inst: Air.Inst.Ref,
reason: NeededComptimeReason,
) ![]u8 {
+ const pt = sema.pt;
const coerced_inst = try sema.coerce(block, Type.slice_const_u8, air_inst, src);
const slice_val = try sema.resolveConstDefinedValue(block, src, coerced_inst, reason);
const arr_val = try sema.derefSliceAsArray(block, src, slice_val, reason);
- return arr_val.toAllocatedBytes(arr_val.typeOf(sema.mod), sema.arena, sema.mod);
+ return arr_val.toAllocatedBytes(arr_val.typeOf(pt.zcu), sema.arena, pt);
}
pub fn resolveConstStringIntern(
@@ -1945,7 +1948,8 @@ fn resolveDestType(
strat: enum { remove_eu_opt, remove_eu, remove_opt },
builtin_name: []const u8,
) !Type {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const remove_eu = switch (strat) {
.remove_eu_opt, .remove_eu => true,
.remove_opt => false,
@@ -2062,7 +2066,8 @@ fn analyzeAsType(
}
pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) !void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const comp = mod.comp;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
@@ -2076,16 +2081,16 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize)
// var addrs: [err_return_trace_addr_count]usize = undefined;
const err_return_trace_addr_count = 32;
- const addr_arr_ty = try mod.arrayType(.{
+ const addr_arr_ty = try pt.arrayType(.{
.len = err_return_trace_addr_count,
.child = .usize_type,
});
- const addrs_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(addr_arr_ty));
+ const addrs_ptr = try err_trace_block.addTy(.alloc, try pt.singleMutPtrType(addr_arr_ty));
// var st: StackTrace = undefined;
- const stack_trace_ty = try mod.getBuiltinType("StackTrace");
- try stack_trace_ty.resolveFields(mod);
- const st_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(stack_trace_ty));
+ const stack_trace_ty = try pt.getBuiltinType("StackTrace");
+ try stack_trace_ty.resolveFields(pt);
+ const st_ptr = try err_trace_block.addTy(.alloc, try pt.singleMutPtrType(stack_trace_ty));
// st.instruction_addresses = &addrs;
const instruction_addresses_field_name = try ip.getOrPutString(gpa, "instruction_addresses", .no_embedded_nulls);
@@ -2109,7 +2114,7 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize)
fn resolveValue(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value {
const val = (try sema.resolveValueAllowVariables(inst)) orelse return null;
if (val.isGenericPoison()) return error.GenericPoison;
- if (sema.mod.intern_pool.isVariable(val.toIntern())) return null;
+ if (sema.pt.zcu.intern_pool.isVariable(val.toIntern())) return null;
return val;
}
@@ -2133,7 +2138,8 @@ fn resolveDefinedValue(
src: LazySrcLoc,
air_ref: Air.Inst.Ref,
) CompileError!?Value {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const val = try sema.resolveValue(air_ref) orelse return null;
if (val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, src);
@@ -2150,7 +2156,7 @@ fn resolveConstDefinedValue(
reason: NeededComptimeReason,
) CompileError!Value {
const val = try sema.resolveConstValue(block, src, air_ref, reason);
- if (val.isUndef(sema.mod)) return sema.failWithUseOfUndef(block, src);
+ if (val.isUndef(sema.pt.zcu)) return sema.failWithUseOfUndef(block, src);
return val;
}
@@ -2164,7 +2170,7 @@ fn resolveValueResolveLazy(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value
/// Lazy values are recursively resolved.
fn resolveValueIntable(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value {
const val = (try sema.resolveValue(inst)) orelse return null;
- if (sema.mod.intern_pool.getBackingAddrTag(val.toIntern())) |addr| switch (addr) {
+ if (sema.pt.zcu.intern_pool.getBackingAddrTag(val.toIntern())) |addr| switch (addr) {
.decl, .anon_decl, .comptime_alloc, .comptime_field => return null,
.int => {},
.eu_payload, .opt_payload, .arr_elem, .field => unreachable,
@@ -2174,6 +2180,7 @@ fn resolveValueIntable(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value {
/// Returns all InternPool keys representing values, including `variable`, `undef`, and `generic_poison`.
fn resolveValueAllowVariables(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value {
+ const pt = sema.pt;
assert(inst != .none);
// First section of indexes correspond to a set number of constant values.
if (@intFromEnum(inst) < InternPool.static_len) {
@@ -2184,7 +2191,7 @@ fn resolveValueAllowVariables(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Val
if (try sema.typeHasOnePossibleValue(sema.typeOf(inst))) |opv| {
if (inst.toInterned()) |ip_index| {
const val = Value.fromInterned(ip_index);
- if (val.getVariable(sema.mod) != null) return val;
+ if (val.getVariable(pt.zcu) != null) return val;
}
return opv;
}
@@ -2196,7 +2203,7 @@ fn resolveValueAllowVariables(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Val
}
};
const val = Value.fromInterned(ip_index);
- if (val.isPtrToThreadLocal(sema.mod)) return null;
+ if (val.isPtrToThreadLocal(pt.zcu)) return null;
return val;
}
@@ -2225,7 +2232,7 @@ pub fn resolveFinalDeclValue(
});
};
if (val.isGenericPoison()) return error.GenericPoison;
- if (val.canMutateComptimeVarState(sema.mod)) {
+ if (val.canMutateComptimeVarState(sema.pt.zcu)) {
return sema.fail(block, src, "global variable contains reference to comptime var", .{});
}
return val;
@@ -2254,19 +2261,20 @@ fn failWithDivideByZero(sema: *Sema, block: *Block, src: LazySrcLoc) CompileErro
}
fn failWithModRemNegative(sema: *Sema, block: *Block, src: LazySrcLoc, lhs_ty: Type, rhs_ty: Type) CompileError {
+ const pt = sema.pt;
return sema.fail(block, src, "remainder division with '{}' and '{}': signed integers and floats must use @rem or @mod", .{
- lhs_ty.fmt(sema.mod), rhs_ty.fmt(sema.mod),
+ lhs_ty.fmt(pt), rhs_ty.fmt(pt),
});
}
fn failWithExpectedOptionalType(sema: *Sema, block: *Block, src: LazySrcLoc, non_optional_ty: Type) CompileError {
- const mod = sema.mod;
+ const pt = sema.pt;
const msg = msg: {
const msg = try sema.errMsg(src, "expected optional type, found '{}'", .{
- non_optional_ty.fmt(mod),
+ non_optional_ty.fmt(pt),
});
errdefer msg.destroy(sema.gpa);
- if (non_optional_ty.zigTypeTag(mod) == .ErrorUnion) {
+ if (non_optional_ty.zigTypeTag(pt.zcu) == .ErrorUnion) {
try sema.errNote(src, msg, "consider using 'try', 'catch', or 'if'", .{});
}
try addDeclaredHereNote(sema, msg, non_optional_ty);
@@ -2276,14 +2284,14 @@ fn failWithExpectedOptionalType(sema: *Sema, block: *Block, src: LazySrcLoc, non
}
fn failWithArrayInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError {
- const mod = sema.mod;
+ const pt = sema.pt;
const msg = msg: {
const msg = try sema.errMsg(src, "type '{}' does not support array initialization syntax", .{
- ty.fmt(mod),
+ ty.fmt(pt),
});
errdefer msg.destroy(sema.gpa);
- if (ty.isSlice(mod)) {
- try sema.errNote(src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2(mod).fmt(mod)});
+ if (ty.isSlice(pt.zcu)) {
+ try sema.errNote(src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2(pt.zcu).fmt(pt)});
}
break :msg msg;
};
@@ -2291,8 +2299,9 @@ fn failWithArrayInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty
}
fn failWithStructInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError {
+ const pt = sema.pt;
return sema.fail(block, src, "type '{}' does not support struct initialization syntax", .{
- ty.fmt(sema.mod),
+ ty.fmt(pt),
});
}
@@ -2303,17 +2312,19 @@ fn failWithErrorSetCodeMissing(
dest_err_set_ty: Type,
src_err_set_ty: Type,
) CompileError {
+ const pt = sema.pt;
return sema.fail(block, src, "expected type '{}', found type '{}'", .{
- dest_err_set_ty.fmt(sema.mod), src_err_set_ty.fmt(sema.mod),
+ dest_err_set_ty.fmt(pt), src_err_set_ty.fmt(pt),
});
}
fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty: Type, val: Value, vector_index: usize) CompileError {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
if (int_ty.zigTypeTag(zcu) == .Vector) {
const msg = msg: {
const msg = try sema.errMsg(src, "overflow of vector type '{}' with value '{}'", .{
- int_ty.fmt(zcu), val.fmtValue(zcu, sema),
+ int_ty.fmt(pt), val.fmtValue(pt, sema),
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(src, msg, "when computing vector element at index '{d}'", .{vector_index});
@@ -2322,12 +2333,13 @@ fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty:
return sema.failWithOwnedErrorMsg(block, msg);
}
return sema.fail(block, src, "overflow of integer type '{}' with value '{}'", .{
- int_ty.fmt(zcu), val.fmtValue(zcu, sema),
+ int_ty.fmt(pt), val.fmtValue(pt, sema),
});
}
fn failWithInvalidComptimeFieldStore(sema: *Sema, block: *Block, init_src: LazySrcLoc, container_ty: Type, field_index: usize) CompileError {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const msg = msg: {
const msg = try sema.errMsg(init_src, "value stored in comptime field does not match the default value of the field", .{});
errdefer msg.destroy(sema.gpa);
@@ -2358,14 +2370,15 @@ fn failWithInvalidFieldAccess(
object_ty: Type,
field_name: InternPool.NullTerminatedString,
) CompileError {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inner_ty = if (object_ty.isSinglePointer(mod)) object_ty.childType(mod) else object_ty;
if (inner_ty.zigTypeTag(mod) == .Optional) opt: {
const child_ty = inner_ty.optionalChild(mod);
if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :opt;
const msg = msg: {
- const msg = try sema.errMsg(src, "optional type '{}' does not support field access", .{object_ty.fmt(sema.mod)});
+ const msg = try sema.errMsg(src, "optional type '{}' does not support field access", .{object_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.errNote(src, msg, "consider using '.?', 'orelse', or 'if'", .{});
break :msg msg;
@@ -2375,14 +2388,14 @@ fn failWithInvalidFieldAccess(
const child_ty = inner_ty.errorUnionPayload(mod);
if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :err;
const msg = msg: {
- const msg = try sema.errMsg(src, "error union type '{}' does not support field access", .{object_ty.fmt(sema.mod)});
+ const msg = try sema.errMsg(src, "error union type '{}' does not support field access", .{object_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.errNote(src, msg, "consider using 'try', 'catch', or 'if'", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
}
- return sema.fail(block, src, "type '{}' does not support field access", .{object_ty.fmt(sema.mod)});
+ return sema.fail(block, src, "type '{}' does not support field access", .{object_ty.fmt(pt)});
}
fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: InternPool.NullTerminatedString) bool {
@@ -2408,7 +2421,8 @@ fn failWithComptimeErrorRetTrace(
src: LazySrcLoc,
name: InternPool.NullTerminatedString,
) CompileError {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const msg = msg: {
const msg = try sema.errMsg(src, "caught unexpected error '{}'", .{name.fmt(&mod.intern_pool)});
errdefer msg.destroy(sema.gpa);
@@ -2430,7 +2444,7 @@ pub fn errNote(
comptime format: []const u8,
args: anytype,
) error{OutOfMemory}!void {
- return sema.mod.errNote(src, parent, format, args);
+ return sema.pt.zcu.errNote(src, parent, format, args);
}
fn addFieldErrNote(
@@ -2442,8 +2456,7 @@ fn addFieldErrNote(
args: anytype,
) !void {
@setCold(true);
- const zcu = sema.mod;
- const type_src = container_ty.srcLocOrNull(zcu) orelse return;
+ const type_src = container_ty.srcLocOrNull(sema.pt.zcu) orelse return;
const field_src: LazySrcLoc = .{
.base_node_inst = type_src.base_node_inst,
.offset = .{ .container_field_name = @intCast(field_index) },
@@ -2480,7 +2493,7 @@ pub fn fail(
pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.ErrorMsg) error{ AnalysisFail, OutOfMemory } {
@setCold(true);
const gpa = sema.gpa;
- const mod = sema.mod;
+ const mod = sema.pt.zcu;
const ip = &mod.intern_pool;
if (build_options.enable_debug_extensions and mod.comp.debug_compile_errors) {
@@ -2545,8 +2558,7 @@ fn reparentOwnedErrorMsg(
comptime format: []const u8,
args: anytype,
) !void {
- const mod = sema.mod;
- const msg_str = try std.fmt.allocPrint(mod.gpa, format, args);
+ const msg_str = try std.fmt.allocPrint(sema.gpa, format, args);
const orig_notes = msg.notes.len;
msg.notes = try sema.gpa.realloc(msg.notes, orig_notes + 1);
@@ -2630,16 +2642,16 @@ fn analyzeAsInt(
dest_ty: Type,
reason: NeededComptimeReason,
) !u64 {
- const mod = sema.mod;
const coerced = try sema.coerce(block, dest_ty, air_ref, src);
const val = try sema.resolveConstDefinedValue(block, src, coerced, reason);
- return (try val.getUnsignedIntAdvanced(mod, .sema)).?;
+ return (try val.getUnsignedIntAdvanced(sema.pt, .sema)).?;
}
/// Given a ZIR extra index which points to a list of `Zir.Inst.Capture`,
/// resolves this into a list of `InternPool.CaptureValue` allocated by `arena`.
fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: usize, captures_len: u32) ![]InternPool.CaptureValue {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const parent_captures: InternPool.CaptureValue.Slice = zcu.namespacePtr(block.namespace).getType(zcu).getCaptures(zcu);
@@ -2706,7 +2718,7 @@ fn wrapWipTy(sema: *Sema, wip_ty: anytype) @TypeOf(wip_ty) {
if (sema.builtin_type_target_index == .none) return wip_ty;
var new = wip_ty;
new.index = sema.builtin_type_target_index;
- sema.mod.intern_pool.resolveBuiltinType(new.index, wip_ty.index);
+ sema.pt.zcu.intern_pool.resolveBuiltinType(new.index, wip_ty.index);
return new;
}
@@ -2714,7 +2726,8 @@ fn wrapWipTy(sema: *Sema, wip_ty: anytype) @TypeOf(wip_ty) {
/// considered outdated on this update. If so, remove it from the pool
/// and return `true`.
fn maybeRemoveOutdatedType(sema: *Sema, ty: InternPool.Index) !bool {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
if (!zcu.comp.debug_incremental) return false;
@@ -2737,7 +2750,8 @@ fn zirStructDecl(
extended: Zir.Inst.Extended.InstData,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
@@ -2796,10 +2810,10 @@ fn zirStructDecl(
.captures = captures,
} },
};
- const wip_ty = sema.wrapWipTy(switch (try ip.getStructType(gpa, struct_init)) {
+ const wip_ty = sema.wrapWipTy(switch (try ip.getStructType(gpa, pt.tid, struct_init)) {
.existing => |ty| wip: {
if (!try sema.maybeRemoveOutdatedType(ty)) return Air.internedToRef(ty);
- break :wip (try ip.getStructType(gpa, struct_init)).wip;
+ break :wip (try ip.getStructType(gpa, pt.tid, struct_init)).wip;
},
.wip => |wip| wip,
});
@@ -2815,7 +2829,7 @@ fn zirStructDecl(
mod.declPtr(new_decl_index).owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
- if (sema.mod.comp.debug_incremental) {
+ if (pt.zcu.comp.debug_incremental) {
try ip.addDependency(
sema.gpa,
AnalUnit.wrap(.{ .decl = new_decl_index }),
@@ -2836,7 +2850,7 @@ fn zirStructDecl(
try mod.scanNamespace(ns, decls, mod.declPtr(new_decl_index));
}
- try mod.finalizeAnonDecl(new_decl_index);
+ try pt.finalizeAnonDecl(new_decl_index);
try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index });
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index }));
return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index));
@@ -2850,7 +2864,8 @@ fn createAnonymousDeclTypeNamed(
anon_prefix: []const u8,
inst: ?Zir.Inst.Index,
) !InternPool.DeclIndex {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const gpa = sema.gpa;
const namespace = block.namespace;
@@ -2892,7 +2907,7 @@ fn createAnonymousDeclTypeNamed(
// some tooling may not support very long symbol names.
try writer.print("{}", .{Value.fmtValueFull(.{
.val = arg_val,
- .mod = zcu,
+ .pt = pt,
.opt_sema = sema,
.depth = 1,
})});
@@ -2953,7 +2968,8 @@ fn zirEnumDecl(
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small);
@@ -3026,10 +3042,10 @@ fn zirEnumDecl(
.captures = captures,
} },
};
- const wip_ty = sema.wrapWipTy(switch (try ip.getEnumType(gpa, enum_init)) {
+ const wip_ty = sema.wrapWipTy(switch (try ip.getEnumType(gpa, pt.tid, enum_init)) {
.existing => |ty| wip: {
if (!try sema.maybeRemoveOutdatedType(ty)) return Air.internedToRef(ty);
- break :wip (try ip.getEnumType(gpa, enum_init)).wip;
+ break :wip (try ip.getEnumType(gpa, pt.tid, enum_init)).wip;
},
.wip => |wip| wip,
});
@@ -3051,7 +3067,7 @@ fn zirEnumDecl(
new_decl.owns_tv = true;
errdefer if (!done) mod.abortAnonDecl(new_decl_index);
- if (sema.mod.comp.debug_incremental) {
+ if (pt.zcu.comp.debug_incremental) {
try mod.intern_pool.addDependency(
gpa,
AnalUnit.wrap(.{ .decl = new_decl_index }),
@@ -3118,21 +3134,21 @@ fn zirEnumDecl(
if (tag_type_ref != .none) {
const ty = try sema.resolveType(&enum_block, tag_ty_src, tag_type_ref);
if (ty.zigTypeTag(mod) != .Int and ty.zigTypeTag(mod) != .ComptimeInt) {
- return sema.fail(&enum_block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(sema.mod)});
+ return sema.fail(&enum_block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(pt)});
}
break :ty ty;
} else if (fields_len == 0) {
- break :ty try mod.intType(.unsigned, 0);
+ break :ty try pt.intType(.unsigned, 0);
} else {
const bits = std.math.log2_int_ceil(usize, fields_len);
- break :ty try mod.intType(.unsigned, bits);
+ break :ty try pt.intType(.unsigned, bits);
}
};
wip_ty.setTagTy(ip, int_tag_ty.toIntern());
if (small.nonexhaustive and int_tag_ty.toIntern() != .comptime_int_type) {
- if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(mod)) {
+ if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(pt)) {
return sema.fail(block, src, "non-exhaustive enum specifies every value", .{});
}
}
@@ -3171,7 +3187,7 @@ fn zirEnumDecl(
.needed_comptime_reason = "enum tag value must be comptime-known",
});
if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) break :overflow true;
- last_tag_val = try mod.getCoerced(last_tag_val.?, int_tag_ty);
+ last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty);
if (wip_ty.nextField(&mod.intern_pool, field_name, last_tag_val.?.toIntern())) |conflict| {
assert(conflict.kind == .value); // AstGen validated names are unique
const other_field_src: LazySrcLoc = .{
@@ -3179,7 +3195,7 @@ fn zirEnumDecl(
.offset = .{ .container_field_value = conflict.prev_field_idx },
};
const msg = msg: {
- const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(sema.mod, sema)});
+ const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(pt, sema)});
errdefer msg.destroy(gpa);
try sema.errNote(other_field_src, msg, "other occurrence here", .{});
break :msg msg;
@@ -3190,9 +3206,9 @@ fn zirEnumDecl(
} else if (any_values) overflow: {
var overflow: ?usize = null;
last_tag_val = if (last_tag_val) |val|
- try sema.intAdd(val, try mod.intValue(int_tag_ty, 1), int_tag_ty, &overflow)
+ try sema.intAdd(val, try pt.intValue(int_tag_ty, 1), int_tag_ty, &overflow)
else
- try mod.intValue(int_tag_ty, 0);
+ try pt.intValue(int_tag_ty, 0);
if (overflow != null) break :overflow true;
if (wip_ty.nextField(&mod.intern_pool, field_name, last_tag_val.?.toIntern())) |conflict| {
assert(conflict.kind == .value); // AstGen validated names are unique
@@ -3201,7 +3217,7 @@ fn zirEnumDecl(
.offset = .{ .container_field_value = conflict.prev_field_idx },
};
const msg = msg: {
- const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(sema.mod, sema)});
+ const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(pt, sema)});
errdefer msg.destroy(gpa);
try sema.errNote(other_field_src, msg, "other occurrence here", .{});
break :msg msg;
@@ -3211,21 +3227,21 @@ fn zirEnumDecl(
break :overflow false;
} else overflow: {
assert(wip_ty.nextField(&mod.intern_pool, field_name, .none) == null);
- last_tag_val = try mod.intValue(Type.comptime_int, field_i);
+ last_tag_val = try pt.intValue(Type.comptime_int, field_i);
if (!try sema.intFitsInType(last_tag_val.?, int_tag_ty, null)) break :overflow true;
- last_tag_val = try mod.getCoerced(last_tag_val.?, int_tag_ty);
+ last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty);
break :overflow false;
};
if (tag_overflow) {
const msg = try sema.errMsg(value_src, "enumeration value '{}' too large for type '{}'", .{
- last_tag_val.?.fmtValue(mod, sema), int_tag_ty.fmt(mod),
+ last_tag_val.?.fmtValue(pt, sema), int_tag_ty.fmt(pt),
});
return sema.failWithOwnedErrorMsg(block, msg);
}
}
- try mod.finalizeAnonDecl(new_decl_index);
+ try pt.finalizeAnonDecl(new_decl_index);
return Air.internedToRef(wip_ty.index);
}
@@ -3238,7 +3254,8 @@ fn zirUnionDecl(
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small);
@@ -3298,10 +3315,10 @@ fn zirUnionDecl(
.captures = captures,
} },
};
- const wip_ty = sema.wrapWipTy(switch (try ip.getUnionType(gpa, union_init)) {
+ const wip_ty = sema.wrapWipTy(switch (try ip.getUnionType(gpa, pt.tid, union_init)) {
.existing => |ty| wip: {
if (!try sema.maybeRemoveOutdatedType(ty)) return Air.internedToRef(ty);
- break :wip (try ip.getUnionType(gpa, union_init)).wip;
+ break :wip (try ip.getUnionType(gpa, pt.tid, union_init)).wip;
},
.wip => |wip| wip,
});
@@ -3317,7 +3334,7 @@ fn zirUnionDecl(
mod.declPtr(new_decl_index).owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
- if (sema.mod.comp.debug_incremental) {
+ if (pt.zcu.comp.debug_incremental) {
try mod.intern_pool.addDependency(
gpa,
AnalUnit.wrap(.{ .decl = new_decl_index }),
@@ -3338,7 +3355,7 @@ fn zirUnionDecl(
try mod.scanNamespace(ns, decls, mod.declPtr(new_decl_index));
}
- try mod.finalizeAnonDecl(new_decl_index);
+ try pt.finalizeAnonDecl(new_decl_index);
try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index });
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index }));
return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index));
@@ -3353,7 +3370,8 @@ fn zirOpaqueDecl(
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
@@ -3387,10 +3405,10 @@ fn zirOpaqueDecl(
} },
};
// No `wrapWipTy` needed as no std.builtin types are opaque.
- const wip_ty = switch (try ip.getOpaqueType(gpa, opaque_init)) {
+ const wip_ty = switch (try ip.getOpaqueType(gpa, pt.tid, opaque_init)) {
.existing => |ty| wip: {
if (!try sema.maybeRemoveOutdatedType(ty)) return Air.internedToRef(ty);
- break :wip (try ip.getOpaqueType(gpa, opaque_init)).wip;
+ break :wip (try ip.getOpaqueType(gpa, pt.tid, opaque_init)).wip;
},
.wip => |wip| wip,
};
@@ -3406,7 +3424,7 @@ fn zirOpaqueDecl(
mod.declPtr(new_decl_index).owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
- if (sema.mod.comp.debug_incremental) {
+ if (pt.zcu.comp.debug_incremental) {
try ip.addDependency(
gpa,
AnalUnit.wrap(.{ .decl = new_decl_index }),
@@ -3426,7 +3444,7 @@ fn zirOpaqueDecl(
try mod.scanNamespace(ns, decls, mod.declPtr(new_decl_index));
}
- try mod.finalizeAnonDecl(new_decl_index);
+ try pt.finalizeAnonDecl(new_decl_index);
return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index));
}
@@ -3438,7 +3456,8 @@ fn zirErrorSetDecl(
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.ErrorSetDecl, inst_data.payload_index);
@@ -3457,20 +3476,22 @@ fn zirErrorSetDecl(
assert(!result.found_existing); // verified in AstGen
}
- return Air.internedToRef((try mod.errorSetFromUnsortedNames(names.keys())).toIntern());
+ return Air.internedToRef((try pt.errorSetFromUnsortedNames(names.keys())).toIntern());
}
fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
+ const pt = sema.pt;
+
if (block.is_comptime or try sema.typeRequiresComptime(sema.fn_ret_ty)) {
- try sema.fn_ret_ty.resolveFields(sema.mod);
+ try sema.fn_ret_ty.resolveFields(pt);
return sema.analyzeComptimeAlloc(block, sema.fn_ret_ty, .none);
}
- const target = sema.mod.getTarget();
- const ptr_type = try sema.mod.ptrTypeSema(.{
+ const target = pt.zcu.getTarget();
+ const ptr_type = try pt.ptrTypeSema(.{
.child = sema.fn_ret_ty.toIntern(),
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
@@ -3511,7 +3532,8 @@ fn ensureResultUsed(
ty: Type,
src: LazySrcLoc,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
switch (ty.zigTypeTag(mod)) {
.Void, .NoReturn => return,
.ErrorSet => return sema.fail(block, src, "error set is ignored", .{}),
@@ -3526,7 +3548,7 @@ fn ensureResultUsed(
},
else => {
const msg = msg: {
- const msg = try sema.errMsg(src, "value of type '{}' ignored", .{ty.fmt(sema.mod)});
+ const msg = try sema.errMsg(src, "value of type '{}' ignored", .{ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.errNote(src, msg, "all non-void values must be used", .{});
try sema.errNote(src, msg, "to discard the value, assign it to '_'", .{});
@@ -3541,7 +3563,8 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const operand = try sema.resolveInst(inst_data.operand);
const src = block.nodeOffset(inst_data.src_node);
@@ -3565,7 +3588,8 @@ fn zirEnsureErrUnionPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(inst_data.src_node);
const operand = try sema.resolveInst(inst_data.operand);
@@ -3604,7 +3628,8 @@ fn indexablePtrLen(
src: LazySrcLoc,
object: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const object_ty = sema.typeOf(object);
const is_pointer_to = object_ty.isSinglePointer(mod);
const indexable_ty = if (is_pointer_to) object_ty.childType(mod) else object_ty;
@@ -3619,7 +3644,8 @@ fn indexablePtrLenOrNone(
src: LazySrcLoc,
operand: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const operand_ty = sema.typeOf(operand);
try checkMemOperand(sema, block, src, operand_ty);
if (operand_ty.ptrSize(mod) == .Many) return .none;
@@ -3632,6 +3658,7 @@ fn zirAllocExtended(
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
+ const pt = sema.pt;
const gpa = sema.gpa;
const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand);
const ty_src = block.src(.{ .node_offset_var_decl_ty = extra.data.src_node });
@@ -3673,9 +3700,9 @@ fn zirAllocExtended(
if (!small.is_const) {
try sema.validateVarType(block, ty_src, var_ty, false);
}
- const target = sema.mod.getTarget();
- try var_ty.resolveLayout(sema.mod);
- const ptr_type = try sema.mod.ptrTypeSema(.{
+ const target = pt.zcu.getTarget();
+ try var_ty.resolveLayout(pt);
+ const ptr_type = try sema.pt.ptrTypeSema(.{
.child = var_ty.toIntern(),
.flags = .{
.alignment = alignment,
@@ -3717,7 +3744,8 @@ fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
}
fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const alloc = try sema.resolveInst(inst_data.operand);
const alloc_ty = sema.typeOf(alloc);
@@ -3749,7 +3777,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
assert(ptr.byte_offset == 0);
const alloc_index = ptr.base_addr.comptime_alloc;
const ct_alloc = sema.getComptimeAlloc(alloc_index);
- const interned = try ct_alloc.val.intern(mod, sema.arena);
+ const interned = try ct_alloc.val.intern(pt, sema.arena);
if (interned.canMutateComptimeVarState(mod)) {
// Preserve the comptime alloc, just make the pointer const.
ct_alloc.val = .{ .interned = interned.toIntern() };
@@ -3757,7 +3785,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
return sema.makePtrConst(block, alloc);
} else {
// Promote the constant to an anon decl.
- const new_mut_ptr = Air.internedToRef(try mod.intern(.{ .ptr = .{
+ const new_mut_ptr = Air.internedToRef(try pt.intern(.{ .ptr = .{
.ty = alloc_ty.toIntern(),
.base_addr = .{ .anon_decl = .{
.val = interned.toIntern(),
@@ -3778,7 +3806,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
// The value was initialized through RLS, so we didn't detect the runtime condition earlier.
// TODO: source location of runtime control flow
const init_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node });
- return sema.fail(block, init_src, "value with comptime-only type '{}' depends on runtime control flow", .{elem_ty.fmt(mod)});
+ return sema.fail(block, init_src, "value with comptime-only type '{}' depends on runtime control flow", .{elem_ty.fmt(pt)});
}
// This is a runtime value.
@@ -3788,7 +3816,8 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
/// If `alloc` is an inferred allocation, `resolved_inferred_ty` is taken to be its resolved
/// type. Otherwise, it may be `null`, and the type will be inferred from `alloc`.
fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, resolved_alloc_ty: ?Type) CompileError!?InternPool.Index {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const alloc_ty = resolved_alloc_ty orelse sema.typeOf(alloc);
const ptr_info = alloc_ty.ptrInfo(zcu);
@@ -3831,7 +3860,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
const ct_alloc = try sema.newComptimeAlloc(block, elem_ty, ptr_info.flags.alignment);
- const alloc_ptr = try zcu.intern(.{ .ptr = .{
+ const alloc_ptr = try pt.intern(.{ .ptr = .{
.ty = alloc_ty.toIntern(),
.base_addr = .{ .comptime_alloc = ct_alloc },
.byte_offset = 0,
@@ -3909,7 +3938,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
const idx_val = (try sema.resolveValue(data.rhs)).?;
break :blk .{
data.lhs,
- .{ .elem = try idx_val.toUnsignedIntSema(zcu) },
+ .{ .elem = try idx_val.toUnsignedIntSema(pt) },
};
},
.bitcast => .{
@@ -3935,32 +3964,32 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
};
const new_ptr_ty = tmp_air.typeOfIndex(air_ptr, &zcu.intern_pool).toIntern();
const new_ptr = switch (method) {
- .same_addr => try zcu.intern_pool.getCoerced(sema.gpa, decl_parent_ptr, new_ptr_ty),
+ .same_addr => try zcu.intern_pool.getCoerced(sema.gpa, pt.tid, decl_parent_ptr, new_ptr_ty),
.opt_payload => ptr: {
// Set the optional to non-null at comptime.
// If the payload is OPV, we must use that value instead of undef.
const opt_ty = Value.fromInterned(decl_parent_ptr).typeOf(zcu).childType(zcu);
const payload_ty = opt_ty.optionalChild(zcu);
- const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try zcu.undefValue(payload_ty);
- const opt_val = try zcu.intern(.{ .opt = .{
+ const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try pt.undefValue(payload_ty);
+ const opt_val = try pt.intern(.{ .opt = .{
.ty = opt_ty.toIntern(),
.val = payload_val.toIntern(),
} });
try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), Value.fromInterned(opt_val), opt_ty);
- break :ptr (try Value.fromInterned(decl_parent_ptr).ptrOptPayload(zcu)).toIntern();
+ break :ptr (try Value.fromInterned(decl_parent_ptr).ptrOptPayload(pt)).toIntern();
},
.eu_payload => ptr: {
// Set the error union to non-error at comptime.
// If the payload is OPV, we must use that value instead of undef.
const eu_ty = Value.fromInterned(decl_parent_ptr).typeOf(zcu).childType(zcu);
const payload_ty = eu_ty.errorUnionPayload(zcu);
- const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try zcu.undefValue(payload_ty);
- const eu_val = try zcu.intern(.{ .error_union = .{
+ const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try pt.undefValue(payload_ty);
+ const eu_val = try pt.intern(.{ .error_union = .{
.ty = eu_ty.toIntern(),
.val = .{ .payload = payload_val.toIntern() },
} });
try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), Value.fromInterned(eu_val), eu_ty);
- break :ptr (try Value.fromInterned(decl_parent_ptr).ptrEuPayload(zcu)).toIntern();
+ break :ptr (try Value.fromInterned(decl_parent_ptr).ptrEuPayload(pt)).toIntern();
},
.field => |idx| ptr: {
const maybe_union_ty = Value.fromInterned(decl_parent_ptr).typeOf(zcu).childType(zcu);
@@ -3969,14 +3998,14 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
// If the payload is OPV, there will not be a payload store, so we store that value.
// Otherwise, there will be a payload store to process later, so undef will suffice.
const payload_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[idx]);
- const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try zcu.undefValue(payload_ty);
- const tag_val = try zcu.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), idx);
- const store_val = try zcu.unionValue(maybe_union_ty, tag_val, payload_val);
+ const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try pt.undefValue(payload_ty);
+ const tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), idx);
+ const store_val = try pt.unionValue(maybe_union_ty, tag_val, payload_val);
try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), store_val, maybe_union_ty);
}
- break :ptr (try Value.fromInterned(decl_parent_ptr).ptrField(idx, zcu)).toIntern();
+ break :ptr (try Value.fromInterned(decl_parent_ptr).ptrField(idx, pt)).toIntern();
},
- .elem => |idx| (try Value.fromInterned(decl_parent_ptr).ptrElem(idx, zcu)).toIntern(),
+ .elem => |idx| (try Value.fromInterned(decl_parent_ptr).ptrElem(idx, pt)).toIntern(),
};
try ptr_mapping.put(air_ptr, new_ptr);
}
@@ -4020,7 +4049,8 @@ fn finishResolveComptimeKnownAllocPtr(
alloc_inst: Air.Inst.Index,
comptime_info: MaybeComptimeAlloc,
) CompileError!?InternPool.Index {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
// We're almost done - we have the resolved comptime value. We just need to
// eliminate the now-dead runtime instructions.
@@ -4041,19 +4071,19 @@ fn finishResolveComptimeKnownAllocPtr(
if (Value.fromInterned(result_val).canMutateComptimeVarState(zcu)) {
const alloc_index = existing_comptime_alloc orelse a: {
- const idx = try sema.newComptimeAlloc(block, alloc_ty.childType(zcu), alloc_ty.ptrAlignment(zcu));
+ const idx = try sema.newComptimeAlloc(block, alloc_ty.childType(zcu), alloc_ty.ptrAlignment(pt));
const alloc = sema.getComptimeAlloc(idx);
alloc.val = .{ .interned = result_val };
break :a idx;
};
sema.getComptimeAlloc(alloc_index).is_const = true;
- return try zcu.intern(.{ .ptr = .{
+ return try pt.intern(.{ .ptr = .{
.ty = alloc_ty.toIntern(),
.base_addr = .{ .comptime_alloc = alloc_index },
.byte_offset = 0,
} });
} else {
- return try zcu.intern(.{ .ptr = .{
+ return try pt.intern(.{ .ptr = .{
.ty = alloc_ty.toIntern(),
.base_addr = .{ .anon_decl = .{
.orig_ty = alloc_ty.toIntern(),
@@ -4065,9 +4095,9 @@ fn finishResolveComptimeKnownAllocPtr(
}
fn makePtrTyConst(sema: *Sema, ptr_ty: Type) CompileError!Type {
- var ptr_info = ptr_ty.ptrInfo(sema.mod);
+ var ptr_info = ptr_ty.ptrInfo(sema.pt.zcu);
ptr_info.flags.is_const = true;
- return sema.mod.ptrTypeSema(ptr_info);
+ return sema.pt.ptrTypeSema(ptr_info);
}
fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Air.Inst.Ref {
@@ -4076,7 +4106,7 @@ fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Ai
// Detect if a comptime value simply needs to have its type changed.
if (try sema.resolveValue(alloc)) |val| {
- return Air.internedToRef((try sema.mod.getCoerced(val, const_ptr_ty)).toIntern());
+ return Air.internedToRef((try sema.pt.getCoerced(val, const_ptr_ty)).toIntern());
}
return block.addBitCast(const_ptr_ty, alloc);
@@ -4103,14 +4133,16 @@ fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
const tracy = trace(@src());
defer tracy.end();
+ const pt = sema.pt;
+
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const ty_src = block.src(.{ .node_offset_var_decl_ty = inst_data.src_node });
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
if (block.is_comptime) {
return sema.analyzeComptimeAlloc(block, var_ty, .none);
}
- const target = sema.mod.getTarget();
- const ptr_type = try sema.mod.ptrTypeSema(.{
+ const target = pt.zcu.getTarget();
+ const ptr_type = try pt.ptrTypeSema(.{
.child = var_ty.toIntern(),
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
@@ -4125,6 +4157,8 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const tracy = trace(@src());
defer tracy.end();
+ const pt = sema.pt;
+
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const ty_src = block.src(.{ .node_offset_var_decl_ty = inst_data.src_node });
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
@@ -4132,8 +4166,8 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
return sema.analyzeComptimeAlloc(block, var_ty, .none);
}
try sema.validateVarType(block, ty_src, var_ty, false);
- const target = sema.mod.getTarget();
- const ptr_type = try sema.mod.ptrTypeSema(.{
+ const target = pt.zcu.getTarget();
+ const ptr_type = try pt.ptrTypeSema(.{
.child = var_ty.toIntern(),
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
@@ -4181,7 +4215,8 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(inst_data.src_node);
@@ -4206,7 +4241,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
.anon_decl => |a| a.val,
.comptime_alloc => |i| val: {
const alloc = sema.getComptimeAlloc(i);
- break :val (try alloc.val.intern(mod, sema.arena)).toIntern();
+ break :val (try alloc.val.intern(pt, sema.arena)).toIntern();
},
else => unreachable,
};
@@ -4232,7 +4267,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
}
const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_vals, .none);
- const final_ptr_ty = try mod.ptrTypeSema(.{
+ const final_ptr_ty = try pt.ptrTypeSema(.{
.child = final_elem_ty.toIntern(),
.flags = .{
.alignment = ia1.alignment,
@@ -4244,7 +4279,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
try sema.validateVarType(block, ty_src, final_elem_ty, false);
} else if (try sema.resolveComptimeKnownAllocPtr(block, ptr, final_ptr_ty)) |ptr_val| {
const const_ptr_ty = try sema.makePtrTyConst(final_ptr_ty);
- const new_const_ptr = try mod.getCoerced(Value.fromInterned(ptr_val), const_ptr_ty);
+ const new_const_ptr = try pt.getCoerced(Value.fromInterned(ptr_val), const_ptr_ty);
// Remap the ZIR operand to the resolved pointer value
sema.inst_map.putAssumeCapacity(inst_data.operand.toIndex().?, Air.internedToRef(new_const_ptr.toIntern()));
@@ -4252,7 +4287,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
// Unless the block is comptime, `alloc_inferred` always produces
// a runtime constant. The final inferred type needs to be
// fully resolved so it can be lowered in codegen.
- try final_elem_ty.resolveFully(mod);
+ try final_elem_ty.resolveFully(pt);
return;
}
@@ -4261,7 +4296,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
// The alloc wasn't comptime-known per the above logic, so the
// type cannot be comptime-only.
// TODO: source location of runtime control flow
- return sema.fail(block, src, "value with comptime-only type '{}' depends on runtime control flow", .{final_elem_ty.fmt(mod)});
+ return sema.fail(block, src, "value with comptime-only type '{}' depends on runtime control flow", .{final_elem_ty.fmt(pt)});
}
// Change it to a normal alloc.
@@ -4318,7 +4353,8 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
}
fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
@@ -4355,7 +4391,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
if (!object_ty.isIndexable(mod)) {
// Instead of using checkIndexable we customize this error.
const msg = msg: {
- const msg = try sema.errMsg(arg_src, "type '{}' is not indexable and not a range", .{object_ty.fmt(sema.mod)});
+ const msg = try sema.errMsg(arg_src, "type '{}' is not indexable and not a range", .{object_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.errNote(arg_src, msg, "for loop operand must be a range, array, slice, tuple, or vector", .{});
@@ -4387,10 +4423,10 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
.input_index = len_idx,
} });
try sema.errNote(a_src, msg, "length {} here", .{
- v.fmtValue(sema.mod, sema),
+ v.fmtValue(pt, sema),
});
try sema.errNote(arg_src, msg, "length {} here", .{
- arg_val.fmtValue(sema.mod, sema),
+ arg_val.fmtValue(pt, sema),
});
break :msg msg;
};
@@ -4427,7 +4463,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
.input_index = i,
} });
try sema.errNote(arg_src, msg, "type '{}' has no upper bound", .{
- object_ty.fmt(sema.mod),
+ object_ty.fmt(pt),
});
}
break :msg msg;
@@ -4453,7 +4489,8 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
/// Given a `*E!?T`, returns a (valid) `*T`.
/// May invalidate already-stored payload data.
fn optEuBasePtrInit(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, src: LazySrcLoc) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
var base_ptr = ptr;
while (true) switch (sema.typeOf(base_ptr).childType(mod).zigTypeTag(mod)) {
.ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true),
@@ -4471,7 +4508,8 @@ fn zirOptEuBasePtrInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile
}
fn zirCoercePtrElemTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(pl_node.src_node);
const extra = sema.code.extraData(Zir.Inst.Bin, pl_node.payload_index).data;
@@ -4503,10 +4541,10 @@ fn zirCoercePtrElemTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
switch (val_ty.zigTypeTag(mod)) {
.Array, .Vector => {},
else => if (!val_ty.isTuple(mod)) {
- return sema.fail(block, src, "expected array of '{}', found '{}'", .{ elem_ty.fmt(mod), val_ty.fmt(mod) });
+ return sema.fail(block, src, "expected array of '{}', found '{}'", .{ elem_ty.fmt(pt), val_ty.fmt(pt) });
},
}
- const want_ty = try mod.arrayType(.{
+ const want_ty = try pt.arrayType(.{
.len = val_ty.arrayLen(mod),
.child = elem_ty.toIntern(),
.sentinel = if (ptr_ty.sentinel(mod)) |s| s.toIntern() else .none,
@@ -4522,7 +4560,8 @@ fn zirCoercePtrElemTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
}
fn zirValidateRefTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const un_tok = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_tok;
const src = block.tokenOffset(un_tok.src_tok);
// In case of GenericPoison, we don't actually have a type, so this will be
@@ -4538,7 +4577,7 @@ fn zirValidateRefTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
if (ty_operand.isGenericPoison()) return;
if (ty_operand.optEuBaseType(mod).zigTypeTag(mod) != .Pointer) {
return sema.failWithOwnedErrorMsg(block, msg: {
- const msg = try sema.errMsg(src, "expected type '{}', found pointer", .{ty_operand.fmt(mod)});
+ const msg = try sema.errMsg(src, "expected type '{}', found pointer", .{ty_operand.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.errNote(src, msg, "address-of operator always returns a pointer", .{});
break :msg msg;
@@ -4551,7 +4590,8 @@ fn zirValidateArrayInitRefTy(
block: *Block,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(pl_node.src_node);
const extra = sema.code.extraData(Zir.Inst.ArrayInitRefTy, pl_node.payload_index).data;
@@ -4565,7 +4605,7 @@ fn zirValidateArrayInitRefTy(
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.Slice, .Many => {
// Use array of correct length
- const arr_ty = try mod.arrayType(.{
+ const arr_ty = try pt.arrayType(.{
.len = extra.elem_count,
.child = ptr_ty.childType(mod).toIntern(),
.sentinel = if (ptr_ty.sentinel(mod)) |s| s.toIntern() else .none,
@@ -4593,7 +4633,8 @@ fn zirValidateArrayInitTy(
inst: Zir.Inst.Index,
is_result_ty: bool,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(inst_data.src_node);
const ty_src: LazySrcLoc = if (is_result_ty) src else block.src(.{ .node_offset_init_ty = inst_data.src_node });
@@ -4615,7 +4656,8 @@ fn validateArrayInitTy(
init_count: u32,
ty: Type,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
switch (ty.zigTypeTag(mod)) {
.Array => {
const array_len = ty.arrayLen(mod);
@@ -4636,7 +4678,7 @@ fn validateArrayInitTy(
return;
},
.Struct => if (ty.isTuple(mod)) {
- try ty.resolveFields(mod);
+ try ty.resolveFields(pt);
const array_len = ty.arrayLen(mod);
if (init_count > array_len) {
return sema.fail(block, src, "expected at most {d} tuple fields; found {d}", .{
@@ -4656,7 +4698,8 @@ fn zirValidateStructInitTy(
inst: Zir.Inst.Index,
is_result_ty: bool,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(inst_data.src_node);
const ty = sema.resolveType(block, src, inst_data.operand) catch |err| switch (err) {
@@ -4681,7 +4724,8 @@ fn zirValidatePtrStructInit(
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const validate_inst = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const init_src = block.nodeOffset(validate_inst.src_node);
const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index);
@@ -4716,7 +4760,8 @@ fn validateUnionInit(
instrs: []const Zir.Inst.Index,
union_ptr: Air.Inst.Ref,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
if (instrs.len != 1) {
@@ -4814,7 +4859,7 @@ fn validateUnionInit(
}
const tag_ty = union_ty.unionTagTypeHypothetical(mod);
- const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);
+ const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
const field_type = union_ty.unionFieldType(tag_val, mod).?;
if (try sema.typeHasOnePossibleValue(field_type)) |field_only_value| {
@@ -4848,7 +4893,7 @@ fn validateUnionInit(
}
block.instructions.shrinkRetainingCapacity(block_index);
- const union_val = try mod.intern(.{ .un = .{
+ const union_val = try pt.intern(.{ .un = .{
.ty = union_ty.toIntern(),
.tag = tag_val.toIntern(),
.val = val.toIntern(),
@@ -4875,7 +4920,8 @@ fn validateStructInit(
init_src: LazySrcLoc,
instrs: []const Zir.Inst.Index,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
@@ -4914,7 +4960,7 @@ fn validateStructInit(
if (block.is_comptime and
(try sema.resolveDefinedValue(block, init_src, struct_ptr)) != null)
{
- try struct_ty.resolveLayout(mod);
+ try struct_ty.resolveLayout(pt);
// In this case the only thing we need to do is evaluate the implicit
// store instructions for default field values, and report any missing fields.
// Avoid the cost of the extra machinery for detecting a comptime struct init value.
@@ -4922,7 +4968,7 @@ fn validateStructInit(
const i: u32 = @intCast(i_usize);
if (field_ptr != .none) continue;
- try struct_ty.resolveStructFieldInits(mod);
+ try struct_ty.resolveStructFieldInits(pt);
const default_val = struct_ty.structFieldDefaultValue(i, mod);
if (default_val.toIntern() == .unreachable_value) {
const field_name = struct_ty.structFieldName(i, mod).unwrap() orelse {
@@ -4971,7 +5017,7 @@ fn validateStructInit(
const air_tags = sema.air_instructions.items(.tag);
const air_datas = sema.air_instructions.items(.data);
- try struct_ty.resolveStructFieldInits(mod);
+ try struct_ty.resolveStructFieldInits(pt);
// We collect the comptime field values in case the struct initialization
// ends up being comptime-known.
@@ -5094,7 +5140,7 @@ fn validateStructInit(
for (block.instructions.items[first_block_index..]) |cur_inst| {
while (field_ptr_ref == .none and init_index < instrs.len) : (init_index += 1) {
const field_ty = struct_ty.structFieldType(field_indices[init_index], mod);
- if (try field_ty.onePossibleValue(mod)) |_| continue;
+ if (try field_ty.onePossibleValue(pt)) |_| continue;
field_ptr_ref = sema.inst_map.get(instrs[init_index]).?;
}
switch (air_tags[@intFromEnum(cur_inst)]) {
@@ -5122,7 +5168,7 @@ fn validateStructInit(
}
block.instructions.shrinkRetainingCapacity(block_index);
- const struct_val = try mod.intern(.{ .aggregate = .{
+ const struct_val = try pt.intern(.{ .aggregate = .{
.ty = struct_ty.toIntern(),
.storage = .{ .elems = field_values },
} });
@@ -5130,7 +5176,7 @@ fn validateStructInit(
try sema.storePtr2(block, init_src, struct_ptr, init_src, struct_init, init_src, .store);
return;
}
- try struct_ty.resolveLayout(mod);
+ try struct_ty.resolveLayout(pt);
// Our task is to insert `store` instructions for all the default field values.
for (found_fields, 0..) |field_ptr, i| {
@@ -5152,7 +5198,8 @@ fn zirValidatePtrArrayInit(
block: *Block,
inst: Zir.Inst.Index,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const validate_inst = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const init_src = block.nodeOffset(validate_inst.src_node);
const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index);
@@ -5175,7 +5222,7 @@ fn zirValidatePtrArrayInit(
var root_msg: ?*Module.ErrorMsg = null;
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
- try array_ty.resolveStructFieldInits(mod);
+ try array_ty.resolveStructFieldInits(pt);
var i = instrs.len;
while (i < array_len) : (i += 1) {
const default_val = array_ty.structFieldDefaultValue(i, mod).toIntern();
@@ -5218,7 +5265,7 @@ fn zirValidatePtrArrayInit(
// sentinel-terminated array, the sentinel will not have been populated by
// any ZIR instructions at comptime; we need to do that here.
if (array_ty.sentinel(mod)) |sentinel_val| {
- const array_len_ref = try mod.intRef(Type.usize, array_len);
+ const array_len_ref = try pt.intRef(Type.usize, array_len);
const sentinel_ptr = try sema.elemPtrArray(block, init_src, init_src, array_ptr, init_src, array_len_ref, true, true);
const sentinel = Air.internedToRef(sentinel_val.toIntern());
try sema.storePtr2(block, init_src, sentinel_ptr, init_src, sentinel, init_src, .store);
@@ -5244,8 +5291,8 @@ fn zirValidatePtrArrayInit(
if (array_ty.isTuple(mod)) {
if (array_ty.structFieldIsComptime(i, mod))
- try array_ty.resolveStructFieldInits(mod);
- if (try array_ty.structFieldValueComptime(mod, i)) |opv| {
+ try array_ty.resolveStructFieldInits(pt);
+ if (try array_ty.structFieldValueComptime(pt, i)) |opv| {
element_vals[i] = opv.toIntern();
continue;
}
@@ -5347,7 +5394,7 @@ fn zirValidatePtrArrayInit(
}
block.instructions.shrinkRetainingCapacity(block_index);
- const array_val = try mod.intern(.{ .aggregate = .{
+ const array_val = try pt.intern(.{ .aggregate = .{
.ty = array_ty.toIntern(),
.storage = .{ .elems = element_vals },
} });
@@ -5357,18 +5404,19 @@ fn zirValidatePtrArrayInit(
}
fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(inst_data.src_node);
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
if (operand_ty.zigTypeTag(mod) != .Pointer) {
- return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(mod)});
+ return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(pt)});
} else switch (operand_ty.ptrSize(mod)) {
.One, .C => {},
- .Many => return sema.fail(block, src, "index syntax required for unknown-length pointer type '{}'", .{operand_ty.fmt(mod)}),
- .Slice => return sema.fail(block, src, "index syntax required for slice type '{}'", .{operand_ty.fmt(mod)}),
+ .Many => return sema.fail(block, src, "index syntax required for unknown-length pointer type '{}'", .{operand_ty.fmt(pt)}),
+ .Slice => return sema.fail(block, src, "index syntax required for slice type '{}'", .{operand_ty.fmt(pt)}),
}
if ((try sema.typeHasOnePossibleValue(operand_ty.childType(mod))) != null) {
@@ -5386,7 +5434,7 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
const msg = try sema.errMsg(
src,
"values of type '{}' must be comptime-known, but operand value is runtime-known",
- .{elem_ty.fmt(mod)},
+ .{elem_ty.fmt(pt)},
);
errdefer msg.destroy(sema.gpa);
@@ -5398,7 +5446,8 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
}
fn zirValidateDestructure(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.ValidateDestructure, inst_data.payload_index).data;
const src = block.nodeOffset(inst_data.src_node);
@@ -5414,7 +5463,7 @@ fn zirValidateDestructure(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
if (!can_destructure) {
return sema.failWithOwnedErrorMsg(block, msg: {
- const msg = try sema.errMsg(src, "type '{}' cannot be destructured", .{operand_ty.fmt(mod)});
+ const msg = try sema.errMsg(src, "type '{}' cannot be destructured", .{operand_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.errNote(destructure_src, msg, "result destructured here", .{});
break :msg msg;
@@ -5441,7 +5490,8 @@ fn failWithBadMemberAccess(
field_src: LazySrcLoc,
field_name: InternPool.NullTerminatedString,
) CompileError {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const kw_name = switch (agg_ty.zigTypeTag(mod)) {
.Union => "union",
.Struct => "struct",
@@ -5451,12 +5501,12 @@ fn failWithBadMemberAccess(
};
if (agg_ty.getOwnerDeclOrNull(mod)) |some| if (mod.declIsRoot(some)) {
return sema.fail(block, field_src, "root struct of file '{}' has no member named '{}'", .{
- agg_ty.fmt(mod), field_name.fmt(&mod.intern_pool),
+ agg_ty.fmt(pt), field_name.fmt(&mod.intern_pool),
});
};
return sema.fail(block, field_src, "{s} '{}' has no member named '{}'", .{
- kw_name, agg_ty.fmt(mod), field_name.fmt(&mod.intern_pool),
+ kw_name, agg_ty.fmt(pt), field_name.fmt(&mod.intern_pool),
});
}
@@ -5468,8 +5518,8 @@ fn failWithBadStructFieldAccess(
field_src: LazySrcLoc,
field_name: InternPool.NullTerminatedString,
) CompileError {
- const zcu = sema.mod;
- const gpa = sema.gpa;
+ const zcu = sema.pt.zcu;
+ const ip = &zcu.intern_pool;
const decl = zcu.declPtr(struct_type.decl.unwrap().?);
const fqn = try decl.fullyQualifiedName(zcu);
@@ -5477,9 +5527,9 @@ fn failWithBadStructFieldAccess(
const msg = try sema.errMsg(
field_src,
"no field named '{}' in struct '{}'",
- .{ field_name.fmt(&zcu.intern_pool), fqn.fmt(&zcu.intern_pool) },
+ .{ field_name.fmt(ip), fqn.fmt(ip) },
);
- errdefer msg.destroy(gpa);
+ errdefer msg.destroy(sema.gpa);
try sema.errNote(struct_ty.srcLoc(zcu), msg, "struct declared here", .{});
break :msg msg;
};
@@ -5494,7 +5544,8 @@ fn failWithBadUnionFieldAccess(
field_src: LazySrcLoc,
field_name: InternPool.NullTerminatedString,
) CompileError {
- const zcu = sema.mod;
+ const zcu = sema.pt.zcu;
+ const ip = &zcu.intern_pool;
const gpa = sema.gpa;
const decl = zcu.declPtr(union_obj.decl);
@@ -5504,7 +5555,7 @@ fn failWithBadUnionFieldAccess(
const msg = try sema.errMsg(
field_src,
"no field named '{}' in union '{}'",
- .{ field_name.fmt(&zcu.intern_pool), fqn.fmt(&zcu.intern_pool) },
+ .{ field_name.fmt(ip), fqn.fmt(ip) },
);
errdefer msg.destroy(gpa);
try sema.errNote(union_ty.srcLoc(zcu), msg, "union declared here", .{});
@@ -5514,9 +5565,9 @@ fn failWithBadUnionFieldAccess(
}
fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !void {
- const mod = sema.mod;
- const src_loc = decl_ty.srcLocOrNull(mod) orelse return;
- const category = switch (decl_ty.zigTypeTag(mod)) {
+ const zcu = sema.pt.zcu;
+ const src_loc = decl_ty.srcLocOrNull(zcu) orelse return;
+ const category = switch (decl_ty.zigTypeTag(zcu)) {
.Union => "union",
.Struct => "struct",
.Enum => "enum",
@@ -5575,7 +5626,8 @@ fn storeToInferredAllocComptime(
operand: Air.Inst.Ref,
iac: *Air.Inst.Data.InferredAllocComptime,
) CompileError!void {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const operand_ty = sema.typeOf(operand);
// There will be only one store_to_inferred_ptr because we are running at comptime.
// The alloc will turn into a Decl or a ComptimeAlloc.
@@ -5584,7 +5636,7 @@ fn storeToInferredAllocComptime(
.needed_comptime_reason = "value being stored to a comptime variable must be comptime-known",
});
};
- const alloc_ty = try zcu.ptrTypeSema(.{
+ const alloc_ty = try pt.ptrTypeSema(.{
.child = operand_ty.toIntern(),
.flags = .{
.alignment = iac.alignment,
@@ -5592,7 +5644,7 @@ fn storeToInferredAllocComptime(
},
});
if (iac.is_const and !operand_val.canMutateComptimeVarState(zcu)) {
- iac.ptr = try zcu.intern(.{ .ptr = .{
+ iac.ptr = try pt.intern(.{ .ptr = .{
.ty = alloc_ty.toIntern(),
.base_addr = .{ .anon_decl = .{
.val = operand_val.toIntern(),
@@ -5603,7 +5655,7 @@ fn storeToInferredAllocComptime(
} else {
const alloc_index = try sema.newComptimeAlloc(block, operand_ty, iac.alignment);
sema.getComptimeAlloc(alloc_index).val = .{ .interned = operand_val.toIntern() };
- iac.ptr = try zcu.intern(.{ .ptr = .{
+ iac.ptr = try pt.intern(.{ .ptr = .{
.ty = alloc_ty.toIntern(),
.base_addr = .{ .comptime_alloc = alloc_index },
.byte_offset = 0,
@@ -5624,7 +5676,8 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const zir_tags = sema.code.instructions.items(.tag);
const zir_datas = sema.code.instructions.items(.data);
const inst_data = zir_datas[@intFromEnum(inst)].pl_node;
@@ -5662,23 +5715,23 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v
fn zirStr(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const bytes = sema.code.instructions.items(.data)[@intFromEnum(inst)].str.get(sema.code);
return sema.addStrLit(
- try sema.mod.intern_pool.getOrPutString(sema.gpa, bytes, .maybe_embedded_nulls),
+ try sema.pt.zcu.intern_pool.getOrPutString(sema.gpa, bytes, .maybe_embedded_nulls),
bytes.len,
);
}
fn addNullTerminatedStrLit(sema: *Sema, string: InternPool.NullTerminatedString) CompileError!Air.Inst.Ref {
- return sema.addStrLit(string.toString(), string.length(&sema.mod.intern_pool));
+ return sema.addStrLit(string.toString(), string.length(&sema.pt.zcu.intern_pool));
}
fn addStrLit(sema: *Sema, string: InternPool.String, len: u64) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
- const array_ty = try mod.arrayType(.{
+ const pt = sema.pt;
+ const array_ty = try pt.arrayType(.{
.len = len,
.sentinel = .zero_u8,
.child = .u8_type,
});
- const val = try mod.intern(.{ .aggregate = .{
+ const val = try pt.intern(.{ .aggregate = .{
.ty = array_ty.toIntern(),
.storage = .{ .bytes = string },
} });
@@ -5690,16 +5743,16 @@ fn anonDeclRef(sema: *Sema, val: InternPool.Index) CompileError!Air.Inst.Ref {
}
fn refValue(sema: *Sema, val: InternPool.Index) CompileError!InternPool.Index {
- const mod = sema.mod;
- const ptr_ty = (try mod.ptrTypeSema(.{
- .child = mod.intern_pool.typeOf(val),
+ const pt = sema.pt;
+ const ptr_ty = (try pt.ptrTypeSema(.{
+ .child = pt.zcu.intern_pool.typeOf(val),
.flags = .{
.alignment = .none,
.is_const = true,
.address_space = .generic,
},
})).toIntern();
- return mod.intern(.{ .ptr = .{
+ return pt.intern(.{ .ptr = .{
.ty = ptr_ty,
.base_addr = .{ .anon_decl = .{
.val = val,
@@ -5715,7 +5768,7 @@ fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
defer tracy.end();
const int = sema.code.instructions.items(.data)[@intFromEnum(inst)].int;
- return sema.mod.intRef(Type.comptime_int, int);
+ return sema.pt.intRef(Type.comptime_int, int);
}
fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -5723,7 +5776,6 @@ fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
const int = sema.code.instructions.items(.data)[@intFromEnum(inst)].str;
const byte_count = int.len * @sizeOf(std.math.big.Limb);
const limb_bytes = sema.code.string_bytes[@intFromEnum(int.start)..][0..byte_count];
@@ -5734,7 +5786,7 @@ fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const limbs = try sema.arena.alloc(std.math.big.Limb, int.len);
@memcpy(mem.sliceAsBytes(limbs), limb_bytes);
- return Air.internedToRef((try mod.intValue_big(Type.comptime_int, .{
+ return Air.internedToRef((try sema.pt.intValue_big(Type.comptime_int, .{
.limbs = limbs,
.positive = true,
})).toIntern());
@@ -5743,7 +5795,7 @@ fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
fn zirFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
_ = block;
const number = sema.code.instructions.items(.data)[@intFromEnum(inst)].float;
- return Air.internedToRef((try sema.mod.floatValue(
+ return Air.internedToRef((try sema.pt.floatValue(
Type.comptime_float,
number,
)).toIntern());
@@ -5754,7 +5806,7 @@ fn zirFloat128(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data;
const number = extra.get();
- return Air.internedToRef((try sema.mod.floatValue(Type.comptime_float, number)).toIntern());
+ return Air.internedToRef((try sema.pt.floatValue(Type.comptime_float, number)).toIntern());
}
fn zirCompileError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
@@ -5775,10 +5827,11 @@ fn zirCompileLog(
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
var managed = mod.compile_log_text.toManaged(sema.gpa);
- defer sema.mod.compile_log_text = managed.moveToUnmanaged();
+ defer pt.zcu.compile_log_text = managed.moveToUnmanaged();
const writer = managed.writer();
const extra = sema.code.extraData(Zir.Inst.NodeMultiOp, extended.operand);
@@ -5792,10 +5845,10 @@ fn zirCompileLog(
const arg_ty = sema.typeOf(arg);
if (try sema.resolveValueResolveLazy(arg)) |val| {
try writer.print("@as({}, {})", .{
- arg_ty.fmt(mod), val.fmtValue(mod, sema),
+ arg_ty.fmt(pt), val.fmtValue(pt, sema),
});
} else {
- try writer.print("@as({}, [runtime value])", .{arg_ty.fmt(mod)});
+ try writer.print("@as({}, [runtime value])", .{arg_ty.fmt(pt)});
}
}
try writer.print("\n", .{});
@@ -5835,7 +5888,8 @@ fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = parent_block.nodeOffset(inst_data.src_node);
const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
@@ -5906,7 +5960,8 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
const tracy = trace(@src());
defer tracy.end();
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = sema.gpa;
const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
@@ -6005,7 +6060,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
zcu.astGenFile(result.file, result.file_index, path_digest, root_decl) catch |err|
return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)});
- try zcu.ensureFileAnalyzed(result.file_index);
+ try pt.ensureFileAnalyzed(result.file_index);
const file_root_decl_index = zcu.fileRootDecl(result.file_index).unwrap().?;
return sema.analyzeDeclVal(parent_block, src, file_root_decl_index);
}
@@ -6147,7 +6202,8 @@ fn resolveAnalyzedBlock(
defer tracy.end();
const gpa = sema.gpa;
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
// Blocks must terminate with noreturn instruction.
assert(child_block.instructions.items.len != 0);
@@ -6258,7 +6314,7 @@ fn resolveAnalyzedBlock(
const type_src = src; // TODO: better source location
if (try sema.typeRequiresComptime(resolved_ty)) {
const msg = msg: {
- const msg = try sema.errMsg(type_src, "value with comptime-only type '{}' depends on runtime control flow", .{resolved_ty.fmt(mod)});
+ const msg = try sema.errMsg(type_src, "value with comptime-only type '{}' depends on runtime control flow", .{resolved_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
const runtime_src = child_block.runtime_cond orelse child_block.runtime_loop.?;
@@ -6353,7 +6409,8 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.Export, inst_data.payload_index).data;
const src = block.nodeOffset(inst_data.src_node);
@@ -6388,7 +6445,8 @@ fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.ExportValue, inst_data.payload_index).data;
const src = block.nodeOffset(inst_data.src_node);
@@ -6421,7 +6479,8 @@ pub fn analyzeExport(
exported_decl_index: InternPool.DeclIndex,
) !void {
const gpa = sema.gpa;
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (options.linkage == .internal)
return;
@@ -6433,7 +6492,7 @@ pub fn analyzeExport(
if (!try sema.validateExternType(export_ty, .other)) {
const msg = msg: {
- const msg = try sema.errMsg(src, "unable to export type '{}'", .{export_ty.fmt(mod)});
+ const msg = try sema.errMsg(src, "unable to export type '{}'", .{export_ty.fmt(pt)});
errdefer msg.destroy(gpa);
try sema.explainWhyTypeIsNotExtern(msg, src, export_ty, .other);
@@ -6460,7 +6519,8 @@ pub fn analyzeExport(
}
fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const operand_src = block.builtinCallArgSrc(extra.node, 0);
const src = block.nodeOffset(extra.node);
@@ -6502,7 +6562,8 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
}
fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const operand_src = block.builtinCallArgSrc(extra.node, 0);
@@ -6628,7 +6689,8 @@ fn addDbgVar(
) CompileError!void {
if (block.is_comptime or block.ownerModule().strip) return;
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const operand_ty = sema.typeOf(operand);
const val_ty = switch (air_tag) {
.dbg_var_ptr => operand_ty.childType(mod),
@@ -6669,7 +6731,8 @@ fn addDbgVar(
}
fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
const src = block.tokenOffset(inst_data.src_tok);
const decl_name = try mod.intern_pool.getOrPutString(
@@ -6682,7 +6745,8 @@ fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
}
fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
const src = block.tokenOffset(inst_data.src_tok);
const decl_name = try mod.intern_pool.getOrPutString(
@@ -6695,7 +6759,8 @@ fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
}
fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: InternPool.NullTerminatedString) !InternPool.DeclIndex {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
var namespace = block.namespace;
while (true) {
if (try sema.lookupInNamespace(block, src, namespace.toOptional(), name, false)) |decl_index| {
@@ -6716,7 +6781,8 @@ fn lookupInNamespace(
ident_name: InternPool.NullTerminatedString,
observe_usingnamespace: bool,
) CompileError!?InternPool.DeclIndex {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const namespace_index = opt_namespace_index.unwrap() orelse return null;
const namespace = mod.namespacePtr(namespace_index);
@@ -6811,7 +6877,8 @@ fn lookupInNamespace(
}
fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const func_val = (try sema.resolveValue(func_inst)) orelse return null;
if (func_val.isUndef(mod)) return null;
const owner_decl_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) {
@@ -6827,18 +6894,19 @@ fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl {
}
pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
if (block.is_comptime or block.is_typeof) {
- const index_val = try mod.intValue_u64(Type.usize, sema.comptime_err_ret_trace.items.len);
+ const index_val = try pt.intValue_u64(Type.usize, sema.comptime_err_ret_trace.items.len);
return Air.internedToRef(index_val.toIntern());
}
if (!block.ownerModule().error_tracing) return .none;
- const stack_trace_ty = try mod.getBuiltinType("StackTrace");
- try stack_trace_ty.resolveFields(mod);
+ const stack_trace_ty = try pt.getBuiltinType("StackTrace");
+ try stack_trace_ty.resolveFields(pt);
const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls);
const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, LazySrcLoc.unneeded) catch |err| switch (err) {
error.AnalysisFail => @panic("std.builtin.StackTrace is corrupt"),
@@ -6864,7 +6932,8 @@ fn popErrorReturnTrace(
operand: Air.Inst.Ref,
saved_error_trace_index: Air.Inst.Ref,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
var is_non_error: ?bool = null;
var is_non_error_inst: Air.Inst.Ref = undefined;
@@ -6878,9 +6947,9 @@ fn popErrorReturnTrace(
// AstGen determined this result does not go to an error-handling expr (try/catch/return etc.), or
// the result is comptime-known to be a non-error. Either way, pop unconditionally.
- const stack_trace_ty = try mod.getBuiltinType("StackTrace");
- try stack_trace_ty.resolveFields(mod);
- const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
+ const stack_trace_ty = try pt.getBuiltinType("StackTrace");
+ try stack_trace_ty.resolveFields(pt);
+ const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty);
const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty);
const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls);
const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, field_name, src, stack_trace_ty, true);
@@ -6904,9 +6973,9 @@ fn popErrorReturnTrace(
defer then_block.instructions.deinit(gpa);
// If non-error, then pop the error return trace by restoring the index.
- const stack_trace_ty = try mod.getBuiltinType("StackTrace");
- try stack_trace_ty.resolveFields(mod);
- const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
+ const stack_trace_ty = try pt.getBuiltinType("StackTrace");
+ try stack_trace_ty.resolveFields(pt);
+ const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty);
const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty);
const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls);
const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, field_name, src, stack_trace_ty, true);
@@ -6947,7 +7016,8 @@ fn zirCall(
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const callee_src = block.src(.{ .node_offset_call_func = inst_data.src_node });
const call_src = block.nodeOffset(inst_data.src_node);
@@ -7031,8 +7101,8 @@ fn zirCall(
// If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only
// need to clean-up our own trace if we were passed to a non-error-handling expression.
if (input_is_error or (pop_error_return_trace and return_ty.isError(mod))) {
- const stack_trace_ty = try mod.getBuiltinType("StackTrace");
- try stack_trace_ty.resolveFields(mod);
+ const stack_trace_ty = try pt.getBuiltinType("StackTrace");
+ try stack_trace_ty.resolveFields(pt);
const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "index", .no_embedded_nulls);
const field_index = try sema.structFieldIndex(block, stack_trace_ty, field_name, call_src);
@@ -7065,7 +7135,8 @@ fn checkCallArgumentCount(
total_args: usize,
member_fn: bool,
) !Type {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const func_ty = func_ty: {
switch (callee_ty.zigTypeTag(mod)) {
.Fn => break :func_ty callee_ty,
@@ -7082,7 +7153,7 @@ fn checkCallArgumentCount(
{
const msg = msg: {
const msg = try sema.errMsg(func_src, "cannot call optional type '{}'", .{
- callee_ty.fmt(mod),
+ callee_ty.fmt(pt),
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(func_src, msg, "consider using '.?', 'orelse' or 'if'", .{});
@@ -7093,7 +7164,7 @@ fn checkCallArgumentCount(
},
else => {},
}
- return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(mod)});
+ return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(pt)});
};
const func_ty_info = mod.typeToFunc(func_ty).?;
@@ -7142,7 +7213,8 @@ fn callBuiltin(
args: []const Air.Inst.Ref,
operation: CallOperation,
) !void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const callee_ty = sema.typeOf(builtin_fn);
const func_ty = func_ty: {
switch (callee_ty.zigTypeTag(mod)) {
@@ -7155,7 +7227,7 @@ fn callBuiltin(
},
else => {},
}
- std.debug.panic("type '{}' is not a function calling builtin fn", .{callee_ty.fmt(mod)});
+ std.debug.panic("type '{}' is not a function calling builtin fn", .{callee_ty.fmt(pt)});
};
const func_ty_info = mod.typeToFunc(func_ty).?;
@@ -7261,7 +7333,8 @@ const CallArgsInfo = union(enum) {
func_ty_info: InternPool.Key.FuncType,
func_inst: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const param_count = func_ty_info.param_types.len;
const uncoerced_arg: Air.Inst.Ref = switch (cai) {
inline .resolved, .call_builtin => |resolved| resolved.args[arg_index],
@@ -7438,7 +7511,8 @@ fn analyzeCall(
call_dbg_node: ?Zir.Inst.Index,
operation: CallOperation,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const callee_ty = sema.typeOf(func);
@@ -7741,10 +7815,10 @@ fn analyzeCall(
const ies = try sema.arena.create(InferredErrorSet);
ies.* = .{ .func = .none };
sema.fn_ret_ty_ies = ies;
- sema.fn_ret_ty = Type.fromInterned((try ip.get(gpa, .{ .error_union_type = .{
+ sema.fn_ret_ty = Type.fromInterned(try pt.intern(.{ .error_union_type = .{
.error_set_type = .adhoc_inferred_error_set_type,
.payload_type = sema.fn_ret_ty.toIntern(),
- } })));
+ } }));
}
// This `res2` is here instead of directly breaking from `res` due to a stage1
@@ -7816,7 +7890,7 @@ fn analyzeCall(
// TODO: check whether any external comptime memory was mutated by the
// comptime function call. If so, then do not memoize the call here.
if (should_memoize and !Value.fromInterned(result_interned).canMutateComptimeVarState(mod)) {
- _ = try mod.intern(.{ .memoized_call = .{
+ _ = try pt.intern(.{ .memoized_call = .{
.func = module_fn_index,
.arg_values = memoized_arg_values,
.result = result_transformed,
@@ -7921,7 +7995,8 @@ fn analyzeCall(
}
fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Type, result: Air.Inst.Ref) !Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const target = mod.getTarget();
const backend = mod.comp.getZigBackend();
if (!target_util.supportsTailCall(target, backend)) {
@@ -7932,7 +8007,7 @@ fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Typ
const func_decl = mod.funcOwnerDeclPtr(sema.owner_func_index);
if (!func_ty.eql(func_decl.typeOf(mod), mod)) {
return sema.fail(block, call_src, "unable to perform tail call: type of function being called '{}' does not match type of calling function '{}'", .{
- func_ty.fmt(mod), func_decl.typeOf(mod).fmt(mod),
+ func_ty.fmt(pt), func_decl.typeOf(mod).fmt(pt),
});
}
_ = try block.addUnOp(.ret, result);
@@ -7954,7 +8029,7 @@ fn analyzeInlineCallArg(
func_ty_info: InternPool.Key.FuncType,
func_inst: Air.Inst.Ref,
) !?Air.Inst.Ref {
- const mod = ics.sema.mod;
+ const mod = ics.sema.pt.zcu;
const ip = &mod.intern_pool;
const zir_tags = ics.callee().code.instructions.items(.tag);
switch (zir_tags[@intFromEnum(inst)]) {
@@ -8084,7 +8159,8 @@ fn instantiateGenericCall(
call_tag: Air.Inst.Tag,
call_dbg_node: ?Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const gpa = sema.gpa;
const ip = &zcu.intern_pool;
@@ -8127,7 +8203,7 @@ fn instantiateGenericCall(
// `param_anytype_comptime` ZIR instructions to be ignored, resulting in a
// new, monomorphized function, with the comptime parameters elided.
var child_sema: Sema = .{
- .mod = zcu,
+ .pt = pt,
.gpa = gpa,
.arena = sema.arena,
.code = fn_zir,
@@ -8358,7 +8434,8 @@ fn instantiateGenericCall(
}
fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const tuple = switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |tuple| tuple,
@@ -8373,9 +8450,8 @@ fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type)
}
fn zirIntType(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
const int_type = sema.code.instructions.items(.data)[@intFromEnum(inst)].int_type;
- const ty = try mod.intType(int_type.signedness, int_type.bit_count);
+ const ty = try sema.pt.intType(int_type.signedness, int_type.bit_count);
return Air.internedToRef(ty.toIntern());
}
@@ -8383,22 +8459,24 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const operand_src = block.src(.{ .node_offset_un_op = inst_data.src_node });
const child_type = try sema.resolveType(block, operand_src, inst_data.operand);
if (child_type.zigTypeTag(mod) == .Opaque) {
- return sema.fail(block, operand_src, "opaque type '{}' cannot be optional", .{child_type.fmt(mod)});
+ return sema.fail(block, operand_src, "opaque type '{}' cannot be optional", .{child_type.fmt(pt)});
} else if (child_type.zigTypeTag(mod) == .Null) {
- return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(mod)});
+ return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(pt)});
}
- const opt_type = try mod.optionalType(child_type.toIntern());
+ const opt_type = try pt.optionalType(child_type.toIntern());
return Air.internedToRef(opt_type.toIntern());
}
fn zirArrayInitElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const bin = sema.code.instructions.items(.data)[@intFromEnum(inst)].bin;
const maybe_wrapped_indexable_ty = sema.resolveType(block, LazySrcLoc.unneeded, bin.lhs) catch |err| switch (err) {
// Since this is a ZIR instruction that returns a type, encountering
@@ -8409,7 +8487,7 @@ fn zirArrayInitElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
else => |e| return e,
};
const indexable_ty = maybe_wrapped_indexable_ty.optEuBaseType(mod);
- try indexable_ty.resolveFields(mod);
+ try indexable_ty.resolveFields(pt);
assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction
if (indexable_ty.zigTypeTag(mod) == .Struct) {
const elem_type = indexable_ty.structFieldType(@intFromEnum(bin.rhs), mod);
@@ -8421,7 +8499,8 @@ fn zirArrayInitElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
}
fn zirElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const maybe_wrapped_ptr_ty = sema.resolveType(block, LazySrcLoc.unneeded, un_node.operand) catch |err| switch (err) {
error.GenericPoison => return .generic_poison_type,
@@ -8439,7 +8518,8 @@ fn zirElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
fn zirIndexablePtrElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(un_node.src_node);
const ptr_ty = sema.resolveType(block, src, un_node.operand) catch |err| switch (err) {
@@ -8455,7 +8535,8 @@ fn zirIndexablePtrElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
}
fn zirVectorElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const vec_ty = sema.resolveType(block, LazySrcLoc.unneeded, un_node.operand) catch |err| switch (err) {
// Since this is a ZIR instruction that returns a type, encountering
@@ -8466,13 +8547,12 @@ fn zirVectorElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
else => |e| return e,
};
if (!vec_ty.isVector(mod)) {
- return sema.fail(block, block.nodeOffset(un_node.src_node), "expected vector type, found '{}'", .{vec_ty.fmt(mod)});
+ return sema.fail(block, block.nodeOffset(un_node.src_node), "expected vector type, found '{}'", .{vec_ty.fmt(pt)});
}
return Air.internedToRef(vec_ty.childType(mod).toIntern());
}
fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const len_src = block.builtinCallArgSrc(inst_data.src_node, 0);
const elem_type_src = block.builtinCallArgSrc(inst_data.src_node, 1);
@@ -8482,7 +8562,7 @@ fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
}));
const elem_type = try sema.resolveType(block, elem_type_src, extra.rhs);
try sema.checkVectorElemType(block, elem_type_src, elem_type);
- const vector_type = try mod.vectorType(.{
+ const vector_type = try sema.pt.vectorType(.{
.len = len,
.child = elem_type.toIntern(),
});
@@ -8502,7 +8582,7 @@ fn zirArrayType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
});
const elem_type = try sema.resolveType(block, elem_src, extra.rhs);
try sema.validateArrayElemType(block, elem_type, elem_src);
- const array_ty = try sema.mod.arrayType(.{
+ const array_ty = try sema.pt.arrayType(.{
.len = len,
.child = elem_type.toIntern(),
});
@@ -8529,7 +8609,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
const sentinel_val = try sema.resolveConstDefinedValue(block, sentinel_src, sentinel, .{
.needed_comptime_reason = "array sentinel value must be comptime-known",
});
- const array_ty = try sema.mod.arrayType(.{
+ const array_ty = try sema.pt.arrayType(.{
.len = len,
.sentinel = sentinel_val.toIntern(),
.child = elem_type.toIntern(),
@@ -8539,9 +8619,10 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
}
fn validateArrayElemType(sema: *Sema, block: *Block, elem_type: Type, elem_src: LazySrcLoc) !void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (elem_type.zigTypeTag(mod) == .Opaque) {
- return sema.fail(block, elem_src, "array of opaque type '{}' not allowed", .{elem_type.fmt(mod)});
+ return sema.fail(block, elem_src, "array of opaque type '{}' not allowed", .{elem_type.fmt(pt)});
} else if (elem_type.zigTypeTag(mod) == .NoReturn) {
return sema.fail(block, elem_src, "array of 'noreturn' not allowed", .{});
}
@@ -8567,7 +8648,8 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -8577,40 +8659,41 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
if (error_set.zigTypeTag(mod) != .ErrorSet) {
return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{
- error_set.fmt(mod),
+ error_set.fmt(pt),
});
}
try sema.validateErrorUnionPayloadType(block, payload, rhs_src);
- const err_union_ty = try mod.errorUnionType(error_set, payload);
+ const err_union_ty = try pt.errorUnionType(error_set, payload);
return Air.internedToRef(err_union_ty.toIntern());
}
fn validateErrorUnionPayloadType(sema: *Sema, block: *Block, payload_ty: Type, payload_src: LazySrcLoc) !void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (payload_ty.zigTypeTag(mod) == .Opaque) {
return sema.fail(block, payload_src, "error union with payload of opaque type '{}' not allowed", .{
- payload_ty.fmt(mod),
+ payload_ty.fmt(pt),
});
} else if (payload_ty.zigTypeTag(mod) == .ErrorSet) {
return sema.fail(block, payload_src, "error union with payload of error set type '{}' not allowed", .{
- payload_ty.fmt(mod),
+ payload_ty.fmt(pt),
});
}
}
fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
_ = block;
- const mod = sema.mod;
+ const pt = sema.pt;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
- const name = try mod.intern_pool.getOrPutString(
+ const name = try pt.zcu.intern_pool.getOrPutString(
sema.gpa,
inst_data.get(sema.code),
.no_embedded_nulls,
);
- _ = try mod.getErrorValue(name);
+ _ = try pt.zcu.getErrorValue(name);
// Create an error set type with only this error value, and return the value.
- const error_set_type = try mod.singleErrorSetType(name);
- return Air.internedToRef((try mod.intern(.{ .err = .{
+ const error_set_type = try pt.singleErrorSetType(name);
+ return Air.internedToRef((try pt.intern(.{ .err = .{
.ty = error_set_type.toIntern(),
.name = name,
} })));
@@ -8620,21 +8703,22 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = block.nodeOffset(extra.node);
const operand_src = block.builtinCallArgSrc(extra.node, 0);
const uncasted_operand = try sema.resolveInst(extra.operand);
const operand = try sema.coerce(block, Type.anyerror, uncasted_operand, operand_src);
- const err_int_ty = try mod.errorIntType();
+ const err_int_ty = try pt.errorIntType();
if (try sema.resolveValue(operand)) |val| {
if (val.isUndef(mod)) {
- return mod.undefRef(err_int_ty);
+ return pt.undefRef(err_int_ty);
}
const err_name = ip.indexToKey(val.toIntern()).err.name;
- return Air.internedToRef((try mod.intValue(
+ return Air.internedToRef((try pt.intValue(
err_int_ty,
try mod.getErrorValue(err_name),
)).toIntern());
@@ -8646,10 +8730,10 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
else => |err_set_ty_index| {
const names = ip.indexToKey(err_set_ty_index).error_set_type.names;
switch (names.len) {
- 0 => return Air.internedToRef((try mod.intValue(err_int_ty, 0)).toIntern()),
+ 0 => return Air.internedToRef((try pt.intValue(err_int_ty, 0)).toIntern()),
1 => {
const int: Module.ErrorInt = @intCast(mod.global_error_set.getIndex(names.get(ip)[0]).?);
- return mod.intRef(err_int_ty, int);
+ return pt.intRef(err_int_ty, int);
},
else => {},
}
@@ -8664,19 +8748,20 @@ fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = block.nodeOffset(extra.node);
const operand_src = block.builtinCallArgSrc(extra.node, 0);
const uncasted_operand = try sema.resolveInst(extra.operand);
- const err_int_ty = try mod.errorIntType();
+ const err_int_ty = try pt.errorIntType();
const operand = try sema.coerce(block, err_int_ty, uncasted_operand, operand_src);
if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| {
- const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntSema(mod));
+ const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntSema(pt));
if (int > mod.global_error_set.count() or int == 0)
return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int});
- return Air.internedToRef((try mod.intern(.{ .err = .{
+ return Air.internedToRef((try pt.intern(.{ .err = .{
.ty = .anyerror_type,
.name = mod.global_error_set.keys()[int],
} })));
@@ -8684,7 +8769,7 @@ fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
try sema.requireRuntimeBlock(block, src, operand_src);
if (block.wantSafety()) {
const is_lt_len = try block.addUnOp(.cmp_lt_errors_len, operand);
- const zero_val = Air.internedToRef((try mod.intValue(err_int_ty, 0)).toIntern());
+ const zero_val = Air.internedToRef((try pt.intValue(err_int_ty, 0)).toIntern());
const is_non_zero = try block.addBinOp(.cmp_neq, operand, zero_val);
const ok = try block.addBinOp(.bool_and, is_lt_len, is_non_zero);
try sema.addSafetyCheck(block, src, ok, .invalid_error_code);
@@ -8702,7 +8787,8 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
@@ -8723,9 +8809,9 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
const lhs_ty = try sema.analyzeAsType(block, lhs_src, lhs);
const rhs_ty = try sema.analyzeAsType(block, rhs_src, rhs);
if (lhs_ty.zigTypeTag(mod) != .ErrorSet)
- return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{lhs_ty.fmt(mod)});
+ return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{lhs_ty.fmt(pt)});
if (rhs_ty.zigTypeTag(mod) != .ErrorSet)
- return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(mod)});
+ return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(pt)});
// Anything merged with anyerror is anyerror.
if (lhs_ty.toIntern() == .anyerror_type or rhs_ty.toIntern() == .anyerror_type) {
@@ -8758,16 +8844,18 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
const name = inst_data.get(sema.code);
- return Air.internedToRef((try mod.intern(.{
+ return Air.internedToRef((try pt.intern(.{
.enum_literal = try mod.intern_pool.getOrPutString(sema.gpa, name, .no_embedded_nulls),
})));
}
fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(inst_data.src_node);
const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
@@ -8777,7 +8865,7 @@ fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag(mod)) {
.Enum => operand,
.Union => blk: {
- try operand_ty.resolveFields(mod);
+ try operand_ty.resolveFields(pt);
const tag_ty = operand_ty.unionTagType(mod) orelse {
return sema.fail(
block,
@@ -8791,7 +8879,7 @@ fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
},
else => {
return sema.fail(block, operand_src, "expected enum or tagged union, found '{}'", .{
- operand_ty.fmt(mod),
+ operand_ty.fmt(pt),
});
},
};
@@ -8802,20 +8890,20 @@ fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
// https://github.com/ziglang/zig/issues/15909
if (enum_tag_ty.enumFieldCount(mod) == 0 and !enum_tag_ty.isNonexhaustiveEnum(mod)) {
return sema.fail(block, operand_src, "cannot use @intFromEnum on empty enum '{}'", .{
- enum_tag_ty.fmt(mod),
+ enum_tag_ty.fmt(pt),
});
}
if (try sema.typeHasOnePossibleValue(enum_tag_ty)) |opv| {
- return Air.internedToRef((try mod.getCoerced(opv, int_tag_ty)).toIntern());
+ return Air.internedToRef((try pt.getCoerced(opv, int_tag_ty)).toIntern());
}
if (try sema.resolveValue(enum_tag)) |enum_tag_val| {
if (enum_tag_val.isUndef(mod)) {
- return mod.undefRef(int_tag_ty);
+ return pt.undefRef(int_tag_ty);
}
- const val = try enum_tag_val.intFromEnum(enum_tag_ty, mod);
+ const val = try enum_tag_val.intFromEnum(enum_tag_ty, pt);
return Air.internedToRef(val.toIntern());
}
@@ -8824,7 +8912,8 @@ fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
}
fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src = block.nodeOffset(inst_data.src_node);
@@ -8833,7 +8922,7 @@ fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const operand = try sema.resolveInst(extra.rhs);
if (dest_ty.zigTypeTag(mod) != .Enum) {
- return sema.fail(block, src, "expected enum, found '{}'", .{dest_ty.fmt(mod)});
+ return sema.fail(block, src, "expected enum, found '{}'", .{dest_ty.fmt(pt)});
}
_ = try sema.checkIntType(block, operand_src, sema.typeOf(operand));
@@ -8841,10 +8930,10 @@ fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
if (dest_ty.isNonexhaustiveEnum(mod)) {
const int_tag_ty = dest_ty.intTagType(mod);
if (try sema.intFitsInType(int_val, int_tag_ty, null)) {
- return Air.internedToRef((try mod.getCoerced(int_val, dest_ty)).toIntern());
+ return Air.internedToRef((try pt.getCoerced(int_val, dest_ty)).toIntern());
}
return sema.fail(block, src, "int value '{}' out of range of non-exhaustive enum '{}'", .{
- int_val.fmtValue(mod, sema), dest_ty.fmt(mod),
+ int_val.fmtValue(pt, sema), dest_ty.fmt(pt),
});
}
if (int_val.isUndef(mod)) {
@@ -8852,10 +8941,10 @@ fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
}
if (!(try sema.enumHasInt(dest_ty, int_val))) {
return sema.fail(block, src, "enum '{}' has no tag with value '{}'", .{
- dest_ty.fmt(mod), int_val.fmtValue(mod, sema),
+ dest_ty.fmt(pt), int_val.fmtValue(pt, sema),
});
}
- return Air.internedToRef((try mod.getCoerced(int_val, dest_ty)).toIntern());
+ return Air.internedToRef((try pt.getCoerced(int_val, dest_ty)).toIntern());
}
if (dest_ty.intTagType(mod).zigTypeTag(mod) == .ComptimeInt) {
@@ -8909,7 +8998,8 @@ fn analyzeOptionalPayloadPtr(
safety_check: bool,
initializing: bool,
) CompileError!Air.Inst.Ref {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const optional_ptr_ty = sema.typeOf(optional_ptr);
assert(optional_ptr_ty.zigTypeTag(zcu) == .Pointer);
@@ -8919,7 +9009,7 @@ fn analyzeOptionalPayloadPtr(
}
const child_type = opt_type.optionalChild(zcu);
- const child_pointer = try zcu.ptrTypeSema(.{
+ const child_pointer = try pt.ptrTypeSema(.{
.child = child_type.toIntern(),
.flags = .{
.is_const = optional_ptr_ty.isConstPtr(zcu),
@@ -8932,8 +9022,8 @@ fn analyzeOptionalPayloadPtr(
if (sema.isComptimeMutablePtr(ptr_val)) {
// Set the optional to non-null at comptime.
// If the payload is OPV, we must use that value instead of undef.
- const payload_val = try sema.typeHasOnePossibleValue(child_type) orelse try zcu.undefValue(child_type);
- const opt_val = try zcu.intern(.{ .opt = .{
+ const payload_val = try sema.typeHasOnePossibleValue(child_type) orelse try pt.undefValue(child_type);
+ const opt_val = try pt.intern(.{ .opt = .{
.ty = opt_type.toIntern(),
.val = payload_val.toIntern(),
} });
@@ -8943,13 +9033,13 @@ fn analyzeOptionalPayloadPtr(
const opt_payload_ptr = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr);
try sema.checkKnownAllocPtr(block, optional_ptr, opt_payload_ptr);
}
- return Air.internedToRef((try ptr_val.ptrOptPayload(zcu)).toIntern());
+ return Air.internedToRef((try ptr_val.ptrOptPayload(pt)).toIntern());
}
if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| {
if (val.isNull(zcu)) {
return sema.fail(block, src, "unable to unwrap null", .{});
}
- return Air.internedToRef((try ptr_val.ptrOptPayload(zcu)).toIntern());
+ return Air.internedToRef((try ptr_val.ptrOptPayload(pt)).toIntern());
}
}
@@ -8978,7 +9068,8 @@ fn zirOptionalPayload(
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(inst_data.src_node);
const operand = try sema.resolveInst(inst_data.operand);
@@ -8992,7 +9083,7 @@ fn zirOptionalPayload(
// TODO https://github.com/ziglang/zig/issues/6597
if (true) break :t operand_ty;
const ptr_info = operand_ty.ptrInfo(mod);
- break :t try mod.ptrTypeSema(.{
+ break :t try pt.ptrTypeSema(.{
.child = ptr_info.child,
.flags = .{
.alignment = ptr_info.flags.alignment,
@@ -9030,7 +9121,8 @@ fn zirErrUnionPayload(
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(inst_data.src_node);
const operand = try sema.resolveInst(inst_data.operand);
@@ -9038,7 +9130,7 @@ fn zirErrUnionPayload(
const err_union_ty = sema.typeOf(operand);
if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(block, operand_src, "expected error union type, found '{}'", .{
- err_union_ty.fmt(mod),
+ err_union_ty.fmt(pt),
});
}
return sema.analyzeErrUnionPayload(block, src, err_union_ty, operand, operand_src, false);
@@ -9053,7 +9145,8 @@ fn analyzeErrUnionPayload(
operand_src: LazySrcLoc,
safety_check: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const payload_ty = err_union_ty.errorUnionPayload(mod);
if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| {
if (val.getErrorName(mod).unwrap()) |name| {
@@ -9098,19 +9191,20 @@ fn analyzeErrUnionPayloadPtr(
safety_check: bool,
initializing: bool,
) CompileError!Air.Inst.Ref {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const operand_ty = sema.typeOf(operand);
assert(operand_ty.zigTypeTag(zcu) == .Pointer);
if (operand_ty.childType(zcu).zigTypeTag(zcu) != .ErrorUnion) {
return sema.fail(block, src, "expected error union type, found '{}'", .{
- operand_ty.childType(zcu).fmt(zcu),
+ operand_ty.childType(zcu).fmt(pt),
});
}
const err_union_ty = operand_ty.childType(zcu);
const payload_ty = err_union_ty.errorUnionPayload(zcu);
- const operand_pointer_ty = try zcu.ptrTypeSema(.{
+ const operand_pointer_ty = try pt.ptrTypeSema(.{
.child = payload_ty.toIntern(),
.flags = .{
.is_const = operand_ty.isConstPtr(zcu),
@@ -9123,8 +9217,8 @@ fn analyzeErrUnionPayloadPtr(
if (sema.isComptimeMutablePtr(ptr_val)) {
// Set the error union to non-error at comptime.
// If the payload is OPV, we must use that value instead of undef.
- const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try zcu.undefValue(payload_ty);
- const eu_val = try zcu.intern(.{ .error_union = .{
+ const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try pt.undefValue(payload_ty);
+ const eu_val = try pt.intern(.{ .error_union = .{
.ty = err_union_ty.toIntern(),
.val = .{ .payload = payload_val.toIntern() },
} });
@@ -9135,13 +9229,13 @@ fn analyzeErrUnionPayloadPtr(
const eu_payload_ptr = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand);
try sema.checkKnownAllocPtr(block, operand, eu_payload_ptr);
}
- return Air.internedToRef((try ptr_val.ptrEuPayload(zcu)).toIntern());
+ return Air.internedToRef((try ptr_val.ptrEuPayload(pt)).toIntern());
}
if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| {
if (val.getErrorName(zcu).unwrap()) |name| {
return sema.failWithComptimeErrorRetTrace(block, src, name);
}
- return Air.internedToRef((try ptr_val.ptrEuPayload(zcu)).toIntern());
+ return Air.internedToRef((try ptr_val.ptrEuPayload(pt)).toIntern());
}
}
@@ -9175,18 +9269,19 @@ fn zirErrUnionCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
}
fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const operand_ty = sema.typeOf(operand);
if (operand_ty.zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(block, src, "expected error union type, found '{}'", .{
- operand_ty.fmt(mod),
+ operand_ty.fmt(pt),
});
}
const result_ty = operand_ty.errorUnionSet(mod);
if (try sema.resolveDefinedValue(block, src, operand)) |val| {
- return Air.internedToRef((try mod.intern(.{ .err = .{
+ return Air.internedToRef((try pt.intern(.{ .err = .{
.ty = result_ty.toIntern(),
.name = mod.intern_pool.indexToKey(val.toIntern()).error_union.val.err_name,
} })));
@@ -9208,13 +9303,14 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
}
fn analyzeErrUnionCodePtr(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const operand_ty = sema.typeOf(operand);
assert(operand_ty.zigTypeTag(mod) == .Pointer);
if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(block, src, "expected error union type, found '{}'", .{
- operand_ty.childType(mod).fmt(mod),
+ operand_ty.childType(mod).fmt(pt),
});
}
@@ -9223,7 +9319,7 @@ fn analyzeErrUnionCodePtr(sema: *Sema, block: *Block, src: LazySrcLoc, operand:
if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| {
if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| {
assert(val.getErrorName(mod) != .none);
- return Air.internedToRef((try mod.intern(.{ .err = .{
+ return Air.internedToRef((try pt.intern(.{ .err = .{
.ty = result_ty.toIntern(),
.name = mod.intern_pool.indexToKey(val.toIntern()).error_union.val.err_name,
} })));
@@ -9240,10 +9336,11 @@ fn zirFunc(
inst: Zir.Inst.Index,
inferred_error_set: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.Func, inst_data.payload_index);
- const target = sema.mod.getTarget();
+ const target = mod.getTarget();
const ret_ty_src = block.src(.{ .node_offset_fn_type_ret_ty = inst_data.src_node });
var extra_index = extra.end;
@@ -9372,7 +9469,8 @@ fn handleExternLibName(
lib_name: []const u8,
) CompileError!void {
blk: {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const comp = mod.comp;
const target = mod.getTarget();
log.debug("extern fn symbol expected in lib '{s}'", .{lib_name});
@@ -9485,7 +9583,8 @@ fn funcCommon(
noalias_bits: u32,
is_noinline: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const target = mod.getTarget();
const ip = &mod.intern_pool;
@@ -9539,13 +9638,13 @@ fn funcCommon(
if (!param_ty.isValidParamType(mod)) {
const opaque_str = if (param_ty.zigTypeTag(mod) == .Opaque) "opaque " else "";
return sema.fail(block, param_src, "parameter of {s}type '{}' not allowed", .{
- opaque_str, param_ty.fmt(mod),
+ opaque_str, param_ty.fmt(pt),
});
}
if (!this_generic and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and !try sema.validateExternType(param_ty, .param_ty)) {
const msg = msg: {
const msg = try sema.errMsg(param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{
- param_ty.fmt(mod), @tagName(cc_resolved),
+ param_ty.fmt(pt), @tagName(cc_resolved),
});
errdefer msg.destroy(sema.gpa);
@@ -9559,7 +9658,7 @@ fn funcCommon(
if (is_source_decl and requires_comptime and !param_is_comptime and has_body and !block.is_comptime) {
const msg = msg: {
const msg = try sema.errMsg(param_src, "parameter of type '{}' must be declared comptime", .{
- param_ty.fmt(mod),
+ param_ty.fmt(pt),
});
errdefer msg.destroy(sema.gpa);
@@ -9580,7 +9679,7 @@ fn funcCommon(
const err_code_size = target.ptrBitWidth();
switch (i) {
0 => if (param_ty.zigTypeTag(mod) != .Pointer) return sema.fail(block, param_src, "first parameter of function with 'Interrupt' calling convention must be a pointer type", .{}),
- 1 => if (param_ty.bitSize(mod) != err_code_size) return sema.fail(block, param_src, "second parameter of function with 'Interrupt' calling convention must be a {d}-bit integer", .{err_code_size}),
+ 1 => if (param_ty.bitSize(pt) != err_code_size) return sema.fail(block, param_src, "second parameter of function with 'Interrupt' calling convention must be a {d}-bit integer", .{err_code_size}),
else => return sema.fail(block, param_src, "'Interrupt' calling convention supports up to 2 parameters, found {d}", .{i + 1}),
}
} else return sema.fail(block, param_src, "parameters are not allowed with 'Interrupt' calling convention", .{}),
@@ -9606,7 +9705,7 @@ fn funcCommon(
if (inferred_error_set) {
try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src);
}
- const func_index = try ip.getFuncInstance(gpa, .{
+ const func_index = try ip.getFuncInstance(gpa, pt.tid, .{
.param_types = param_types,
.noalias_bits = noalias_bits,
.bare_return_type = bare_return_type.toIntern(),
@@ -9655,7 +9754,7 @@ fn funcCommon(
assert(has_body);
if (!ret_poison)
try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src);
- const func_index = try ip.getFuncDeclIes(gpa, .{
+ const func_index = try ip.getFuncDeclIes(gpa, pt.tid, .{
.owner_decl = sema.owner_decl_index,
.param_types = param_types,
@@ -9695,7 +9794,7 @@ fn funcCommon(
);
}
- const func_ty = try ip.getFuncType(gpa, .{
+ const func_ty = try ip.getFuncType(gpa, pt.tid, .{
.param_types = param_types,
.noalias_bits = noalias_bits,
.comptime_bits = comptime_bits,
@@ -9718,7 +9817,7 @@ fn funcCommon(
if (opt_lib_name) |lib_name| try sema.handleExternLibName(block, block.src(.{
.node_offset_lib_name = src_node_offset,
}), lib_name);
- const func_index = try ip.getExternFunc(gpa, .{
+ const func_index = try ip.getExternFunc(gpa, pt.tid, .{
.ty = func_ty,
.decl = sema.owner_decl_index,
.lib_name = try mod.intern_pool.getOrPutStringOpt(gpa, opt_lib_name, .no_embedded_nulls),
@@ -9743,7 +9842,7 @@ fn funcCommon(
}
if (has_body) {
- const func_index = try ip.getFuncDecl(gpa, .{
+ const func_index = try ip.getFuncDecl(gpa, pt.tid, .{
.owner_decl = sema.owner_decl_index,
.ty = func_ty,
.cc = cc,
@@ -9809,7 +9908,8 @@ fn finishFunc(
is_generic: bool,
final_is_generic: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const gpa = sema.gpa;
const target = mod.getTarget();
@@ -9822,7 +9922,7 @@ fn finishFunc(
if (!return_type.isValidReturnType(mod)) {
const opaque_str = if (return_type.zigTypeTag(mod) == .Opaque) "opaque " else "";
return sema.fail(block, ret_ty_src, "{s}return type '{}' not allowed", .{
- opaque_str, return_type.fmt(mod),
+ opaque_str, return_type.fmt(pt),
});
}
if (!ret_poison and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and
@@ -9830,7 +9930,7 @@ fn finishFunc(
{
const msg = msg: {
const msg = try sema.errMsg(ret_ty_src, "return type '{}' not allowed in function with calling convention '{s}'", .{
- return_type.fmt(mod), @tagName(cc_resolved),
+ return_type.fmt(pt), @tagName(cc_resolved),
});
errdefer msg.destroy(gpa);
@@ -9852,7 +9952,7 @@ fn finishFunc(
const msg = try sema.errMsg(
ret_ty_src,
"function with comptime-only return type '{}' requires all parameters to be comptime",
- .{return_type.fmt(mod)},
+ .{return_type.fmt(pt)},
);
try sema.explainWhyTypeIsComptime(msg, ret_ty_src, return_type);
@@ -9938,8 +10038,8 @@ fn finishFunc(
if (!final_is_generic and sema.wantErrorReturnTracing(return_type)) {
// Make sure that StackTrace's fields are resolved so that the backend can
// lower this fn type.
- const unresolved_stack_trace_ty = try mod.getBuiltinType("StackTrace");
- try unresolved_stack_trace_ty.resolveFields(mod);
+ const unresolved_stack_trace_ty = try pt.getBuiltinType("StackTrace");
+ try unresolved_stack_trace_ty.resolveFields(pt);
}
return Air.internedToRef(if (opt_func_index != .none) opt_func_index else func_ty);
@@ -10068,7 +10168,8 @@ fn analyzeAs(
zir_operand: Zir.Inst.Ref,
no_cast_to_comptime_int: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const operand = try sema.resolveInst(zir_operand);
const operand_air_inst = sema.resolveInst(zir_dest_type) catch |err| switch (err) {
error.GenericPoison => return operand,
@@ -10098,7 +10199,8 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const tracy = trace(@src());
defer tracy.end();
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const ptr_src = block.builtinCallArgSrc(inst_data.src_node, 0);
const operand = try sema.resolveInst(inst_data.operand);
@@ -10106,12 +10208,12 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const ptr_ty = operand_ty.scalarType(zcu);
const is_vector = operand_ty.zigTypeTag(zcu) == .Vector;
if (!ptr_ty.isPtrAtRuntime(zcu)) {
- return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(zcu)});
+ return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(pt)});
}
const pointee_ty = ptr_ty.childType(zcu);
if (try sema.typeRequiresComptime(ptr_ty)) {
const msg = msg: {
- const msg = try sema.errMsg(ptr_src, "comptime-only type '{}' has no pointer address", .{pointee_ty.fmt(zcu)});
+ const msg = try sema.errMsg(ptr_src, "comptime-only type '{}' has no pointer address", .{pointee_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsComptime(msg, ptr_src, pointee_ty);
break :msg msg;
@@ -10121,32 +10223,32 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
if (try sema.resolveValueIntable(operand)) |operand_val| ct: {
if (!is_vector) {
if (operand_val.isUndef(zcu)) {
- return Air.internedToRef((try zcu.undefValue(Type.usize)).toIntern());
+ return Air.internedToRef((try pt.undefValue(Type.usize)).toIntern());
}
- return Air.internedToRef((try zcu.intValue(
+ return Air.internedToRef((try pt.intValue(
Type.usize,
- (try operand_val.getUnsignedIntAdvanced(zcu, .sema)).?,
+ (try operand_val.getUnsignedIntAdvanced(pt, .sema)).?,
)).toIntern());
}
const len = operand_ty.vectorLen(zcu);
- const dest_ty = try zcu.vectorType(.{ .child = .usize_type, .len = len });
+ const dest_ty = try pt.vectorType(.{ .child = .usize_type, .len = len });
const new_elems = try sema.arena.alloc(InternPool.Index, len);
for (new_elems, 0..) |*new_elem, i| {
- const ptr_val = try operand_val.elemValue(zcu, i);
+ const ptr_val = try operand_val.elemValue(pt, i);
if (ptr_val.isUndef(zcu)) {
- new_elem.* = (try zcu.undefValue(Type.usize)).toIntern();
+ new_elem.* = (try pt.undefValue(Type.usize)).toIntern();
continue;
}
- const addr = try ptr_val.getUnsignedIntAdvanced(zcu, .sema) orelse {
+ const addr = try ptr_val.getUnsignedIntAdvanced(pt, .sema) orelse {
// A vector element wasn't an integer pointer. This is a runtime operation.
break :ct;
};
- new_elem.* = (try zcu.intValue(
+ new_elem.* = (try pt.intValue(
Type.usize,
addr,
)).toIntern();
}
- return Air.internedToRef(try zcu.intern(.{ .aggregate = .{
+ return Air.internedToRef(try pt.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
.storage = .{ .elems = new_elems },
} }));
@@ -10157,10 +10259,10 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
return block.addUnOp(.int_from_ptr, operand);
}
const len = operand_ty.vectorLen(zcu);
- const dest_ty = try zcu.vectorType(.{ .child = .usize_type, .len = len });
+ const dest_ty = try pt.vectorType(.{ .child = .usize_type, .len = len });
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
for (new_elems, 0..) |*new_elem, i| {
- const idx_ref = try zcu.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(Type.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
new_elem.* = try block.addUnOp(.int_from_ptr, old_elem);
}
@@ -10171,7 +10273,8 @@ fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(inst_data.src_node);
const field_name_src = block.src(.{ .node_offset_field_name = inst_data.src_node });
@@ -10189,7 +10292,8 @@ fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(inst_data.src_node);
const field_name_src = block.src(.{ .node_offset_field_name = inst_data.src_node });
@@ -10207,7 +10311,8 @@ fn zirStructInitFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(inst_data.src_node);
const field_name_src = block.src(.{ .node_offset_field_name_init = inst_data.src_node });
@@ -10284,7 +10389,8 @@ fn intCast(
operand_src: LazySrcLoc,
runtime_safety: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const operand_ty = sema.typeOf(operand);
const dest_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, dest_ty, dest_ty_src);
const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
@@ -10307,7 +10413,7 @@ fn intCast(
if (wanted_bits == 0) {
const ok = if (is_vector) ok: {
- const zeros = try sema.splat(operand_ty, try mod.intValue(operand_scalar_ty, 0));
+ const zeros = try sema.splat(operand_ty, try pt.intValue(operand_scalar_ty, 0));
const zero_inst = Air.internedToRef(zeros.toIntern());
const is_in_range = try block.addCmpVector(operand, zero_inst, .eq);
const all_in_range = try block.addInst(.{
@@ -10316,7 +10422,7 @@ fn intCast(
});
break :ok all_in_range;
} else ok: {
- const zero_inst = Air.internedToRef((try mod.intValue(operand_ty, 0)).toIntern());
+ const zero_inst = Air.internedToRef((try pt.intValue(operand_ty, 0)).toIntern());
const is_in_range = try block.addBinOp(.cmp_lte, operand, zero_inst);
break :ok is_in_range;
};
@@ -10339,7 +10445,7 @@ fn intCast(
// range shrinkage
// requirement: int value fits into target type
if (wanted_value_bits < actual_value_bits) {
- const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod, operand_scalar_ty);
+ const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(pt, operand_scalar_ty);
const dest_max_val = try sema.splat(operand_ty, dest_max_val_scalar);
const dest_max = Air.internedToRef(dest_max_val.toIntern());
@@ -10348,8 +10454,8 @@ fn intCast(
// Reinterpret the sign-bit as part of the value. This will make
// negative differences (`operand` > `dest_max`) appear too big.
- const unsigned_scalar_operand_ty = try mod.intType(.unsigned, actual_bits);
- const unsigned_operand_ty = if (is_vector) try mod.vectorType(.{
+ const unsigned_scalar_operand_ty = try pt.intType(.unsigned, actual_bits);
+ const unsigned_operand_ty = if (is_vector) try pt.vectorType(.{
.len = dest_ty.vectorLen(mod),
.child = unsigned_scalar_operand_ty.toIntern(),
}) else unsigned_scalar_operand_ty;
@@ -10358,14 +10464,14 @@ fn intCast(
// If the destination type is signed, then we need to double its
// range to account for negative values.
const dest_range_val = if (wanted_info.signedness == .signed) range_val: {
- const one_scalar = try mod.intValue(unsigned_scalar_operand_ty, 1);
- const one = if (is_vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ const one_scalar = try pt.intValue(unsigned_scalar_operand_ty, 1);
+ const one = if (is_vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = unsigned_operand_ty.toIntern(),
.storage = .{ .repeated_elem = one_scalar.toIntern() },
- } }))) else one_scalar;
- const range_minus_one = try dest_max_val.shl(one, unsigned_operand_ty, sema.arena, mod);
+ } })) else one_scalar;
+ const range_minus_one = try dest_max_val.shl(one, unsigned_operand_ty, sema.arena, pt);
break :range_val try sema.intAdd(range_minus_one, one, unsigned_operand_ty, undefined);
- } else try mod.getCoerced(dest_max_val, unsigned_operand_ty);
+ } else try pt.getCoerced(dest_max_val, unsigned_operand_ty);
const dest_range = Air.internedToRef(dest_range_val.toIntern());
const ok = if (is_vector) ok: {
@@ -10405,7 +10511,7 @@ fn intCast(
// no shrinkage, yes sign loss
// requirement: signed to unsigned >= 0
const ok = if (is_vector) ok: {
- const scalar_zero = try mod.intValue(operand_scalar_ty, 0);
+ const scalar_zero = try pt.intValue(operand_scalar_ty, 0);
const zero_val = try sema.splat(operand_ty, scalar_zero);
const zero_inst = Air.internedToRef(zero_val.toIntern());
const is_in_range = try block.addCmpVector(operand, zero_inst, .gte);
@@ -10418,7 +10524,7 @@ fn intCast(
});
break :ok all_in_range;
} else ok: {
- const zero_inst = Air.internedToRef((try mod.intValue(operand_ty, 0)).toIntern());
+ const zero_inst = Air.internedToRef((try pt.intValue(operand_ty, 0)).toIntern());
const is_in_range = try block.addBinOp(.cmp_gte, operand, zero_inst);
break :ok is_in_range;
};
@@ -10432,7 +10538,8 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(inst_data.src_node);
const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
@@ -10457,14 +10564,14 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
.Type,
.Undefined,
.Void,
- => return sema.fail(block, src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}),
+ => return sema.fail(block, src, "cannot @bitCast to '{}'", .{dest_ty.fmt(pt)}),
.Enum => {
const msg = msg: {
- const msg = try sema.errMsg(src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)});
+ const msg = try sema.errMsg(src, "cannot @bitCast to '{}'", .{dest_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
switch (operand_ty.zigTypeTag(mod)) {
- .Int, .ComptimeInt => try sema.errNote(src, msg, "use @enumFromInt to cast from '{}'", .{operand_ty.fmt(mod)}),
+ .Int, .ComptimeInt => try sema.errNote(src, msg, "use @enumFromInt to cast from '{}'", .{operand_ty.fmt(pt)}),
else => {},
}
@@ -10475,11 +10582,11 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
.Pointer => {
const msg = msg: {
- const msg = try sema.errMsg(src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)});
+ const msg = try sema.errMsg(src, "cannot @bitCast to '{}'", .{dest_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
switch (operand_ty.zigTypeTag(mod)) {
- .Int, .ComptimeInt => try sema.errNote(src, msg, "use @ptrFromInt to cast from '{}'", .{operand_ty.fmt(mod)}),
- .Pointer => try sema.errNote(src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(mod)}),
+ .Int, .ComptimeInt => try sema.errNote(src, msg, "use @ptrFromInt to cast from '{}'", .{operand_ty.fmt(pt)}),
+ .Pointer => try sema.errNote(src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(pt)}),
else => {},
}
@@ -10494,7 +10601,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
else => unreachable,
};
return sema.fail(block, src, "cannot @bitCast to '{}'; {s} does not have a guaranteed in-memory layout", .{
- dest_ty.fmt(mod), container,
+ dest_ty.fmt(pt), container,
});
},
@@ -10521,14 +10628,14 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
.Type,
.Undefined,
.Void,
- => return sema.fail(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(mod)}),
+ => return sema.fail(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(pt)}),
.Enum => {
const msg = msg: {
- const msg = try sema.errMsg(operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(mod)});
+ const msg = try sema.errMsg(operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
switch (dest_ty.zigTypeTag(mod)) {
- .Int, .ComptimeInt => try sema.errNote(operand_src, msg, "use @intFromEnum to cast to '{}'", .{dest_ty.fmt(mod)}),
+ .Int, .ComptimeInt => try sema.errNote(operand_src, msg, "use @intFromEnum to cast to '{}'", .{dest_ty.fmt(pt)}),
else => {},
}
@@ -10538,11 +10645,11 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
},
.Pointer => {
const msg = msg: {
- const msg = try sema.errMsg(operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(mod)});
+ const msg = try sema.errMsg(operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
switch (dest_ty.zigTypeTag(mod)) {
- .Int, .ComptimeInt => try sema.errNote(operand_src, msg, "use @intFromPtr to cast to '{}'", .{dest_ty.fmt(mod)}),
- .Pointer => try sema.errNote(operand_src, msg, "use @ptrCast to cast to '{}'", .{dest_ty.fmt(mod)}),
+ .Int, .ComptimeInt => try sema.errNote(operand_src, msg, "use @intFromPtr to cast to '{}'", .{dest_ty.fmt(pt)}),
+ .Pointer => try sema.errNote(operand_src, msg, "use @ptrCast to cast to '{}'", .{dest_ty.fmt(pt)}),
else => {},
}
@@ -10557,7 +10664,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
else => unreachable,
};
return sema.fail(block, operand_src, "cannot @bitCast from '{}'; {s} does not have a guaranteed in-memory layout", .{
- operand_ty.fmt(mod), container,
+ operand_ty.fmt(pt), container,
});
},
@@ -10575,7 +10682,8 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(inst_data.src_node);
const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
@@ -10599,7 +10707,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
block,
src,
"expected float or vector type, found '{}'",
- .{dest_ty.fmt(mod)},
+ .{dest_ty.fmt(pt)},
),
};
@@ -10609,21 +10717,21 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
block,
operand_src,
"expected float or vector type, found '{}'",
- .{operand_ty.fmt(mod)},
+ .{operand_ty.fmt(pt)},
),
}
if (try sema.resolveValue(operand)) |operand_val| {
if (!is_vector) {
- return Air.internedToRef((try operand_val.floatCast(dest_ty, mod)).toIntern());
+ return Air.internedToRef((try operand_val.floatCast(dest_ty, pt)).toIntern());
}
const vec_len = operand_ty.vectorLen(mod);
const new_elems = try sema.arena.alloc(InternPool.Index, vec_len);
for (new_elems, 0..) |*new_elem, i| {
- const old_elem = try operand_val.elemValue(mod, i);
- new_elem.* = (try old_elem.floatCast(dest_scalar_ty, mod)).toIntern();
+ const old_elem = try operand_val.elemValue(pt, i);
+ new_elem.* = (try old_elem.floatCast(dest_scalar_ty, pt)).toIntern();
}
- return Air.internedToRef(try mod.intern(.{ .aggregate = .{
+ return Air.internedToRef(try pt.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
.storage = .{ .elems = new_elems },
} }));
@@ -10644,7 +10752,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const vec_len = operand_ty.vectorLen(mod);
const new_elems = try sema.arena.alloc(Air.Inst.Ref, vec_len);
for (new_elems, 0..) |*new_elem, i| {
- const idx_ref = try mod.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(Type.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
new_elem.* = try block.addTyOp(.fptrunc, dest_scalar_ty, old_elem);
}
@@ -10681,10 +10789,9 @@ fn zirElemValImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].elem_val_imm;
const array = try sema.resolveInst(inst_data.operand);
- const elem_index = try mod.intRef(Type.usize, inst_data.idx);
+ const elem_index = try sema.pt.intRef(Type.usize, inst_data.idx);
return sema.elemVal(block, LazySrcLoc.unneeded, array, elem_index, LazySrcLoc.unneeded, false);
}
@@ -10692,7 +10799,8 @@ fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(inst_data.src_node);
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
@@ -10703,7 +10811,7 @@ fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const capture_src = block.src(.{ .for_capture_from_input = inst_data.src_node });
const msg = msg: {
const msg = try sema.errMsg(capture_src, "pointer capture of non pointer type '{}'", .{
- indexable_ty.fmt(mod),
+ indexable_ty.fmt(pt),
});
errdefer msg.destroy(sema.gpa);
if (indexable_ty.isIndexable(mod)) {
@@ -10734,12 +10842,13 @@ fn zirArrayInitElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(inst_data.src_node);
const extra = sema.code.extraData(Zir.Inst.ElemPtrImm, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.ptr);
- const elem_index = try sema.mod.intRef(Type.usize, extra.index);
+ const elem_index = try pt.intRef(Type.usize, extra.index);
const array_ty = sema.typeOf(array_ptr).childType(mod);
switch (array_ty.zigTypeTag(mod)) {
.Array, .Vector => {},
@@ -10892,7 +11001,7 @@ const SwitchProngAnalysis = struct {
inline_case_capture,
);
- if (sema.typeOf(capture_ref).isNoReturn(sema.mod)) {
+ if (sema.typeOf(capture_ref).isNoReturn(sema.pt.zcu)) {
// This prong should be unreachable!
return .unreachable_value;
}
@@ -10948,7 +11057,7 @@ const SwitchProngAnalysis = struct {
inline_case_capture,
);
- if (sema.typeOf(capture_ref).isNoReturn(sema.mod)) {
+ if (sema.typeOf(capture_ref).isNoReturn(sema.pt.zcu)) {
// No need to analyze any further, the prong is unreachable
return;
}
@@ -10968,7 +11077,8 @@ const SwitchProngAnalysis = struct {
inline_case_capture: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const sema = spa.sema;
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const operand_ty = sema.typeOf(spa.operand);
if (operand_ty.zigTypeTag(mod) != .Union) {
const tag_capture_src: LazySrcLoc = .{
@@ -10976,7 +11086,7 @@ const SwitchProngAnalysis = struct {
.offset = .{ .switch_tag_capture = capture_src.offset.switch_capture },
};
return sema.fail(block, tag_capture_src, "cannot capture tag of non-union type '{}'", .{
- operand_ty.fmt(mod),
+ operand_ty.fmt(pt),
});
}
assert(inline_case_capture != .none);
@@ -10993,7 +11103,8 @@ const SwitchProngAnalysis = struct {
inline_case_capture: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const sema = spa.sema;
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const zir_datas = sema.code.instructions.items(.data);
@@ -11010,7 +11121,7 @@ const SwitchProngAnalysis = struct {
const union_obj = zcu.typeToUnion(operand_ty).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
if (capture_byref) {
- const ptr_field_ty = try zcu.ptrTypeSema(.{
+ const ptr_field_ty = try pt.ptrTypeSema(.{
.child = field_ty.toIntern(),
.flags = .{
.is_const = !operand_ptr_ty.ptrIsMutable(zcu),
@@ -11019,7 +11130,7 @@ const SwitchProngAnalysis = struct {
},
});
if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |union_ptr| {
- return Air.internedToRef((try union_ptr.ptrField(field_index, zcu)).toIntern());
+ return Air.internedToRef((try union_ptr.ptrField(field_index, pt)).toIntern());
}
return block.addStructFieldPtr(spa.operand_ptr, field_index, ptr_field_ty);
} else {
@@ -11078,7 +11189,7 @@ const SwitchProngAnalysis = struct {
const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len);
for (dummy_captures, field_indices) |*dummy, field_idx| {
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
- dummy.* = try zcu.undefRef(field_ty);
+ dummy.* = try pt.undefRef(field_ty);
}
const case_srcs = try sema.arena.alloc(?LazySrcLoc, case_vals.len);
@@ -11113,7 +11224,7 @@ const SwitchProngAnalysis = struct {
const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len);
for (field_indices, dummy_captures) |field_idx, *dummy| {
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
- const field_ptr_ty = try zcu.ptrTypeSema(.{
+ const field_ptr_ty = try pt.ptrTypeSema(.{
.child = field_ty.toIntern(),
.flags = .{
.is_const = operand_ptr_info.flags.is_const,
@@ -11122,7 +11233,7 @@ const SwitchProngAnalysis = struct {
.alignment = union_obj.fieldAlign(ip, field_idx),
},
});
- dummy.* = try zcu.undefRef(field_ptr_ty);
+ dummy.* = try pt.undefRef(field_ptr_ty);
}
const case_srcs = try sema.arena.alloc(?LazySrcLoc, case_vals.len);
for (case_srcs, 0..) |*case_src, i| {
@@ -11148,9 +11259,9 @@ const SwitchProngAnalysis = struct {
};
if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |op_ptr_val| {
- if (op_ptr_val.isUndef(zcu)) return zcu.undefRef(capture_ptr_ty);
- const field_ptr_val = try op_ptr_val.ptrField(first_field_index, zcu);
- return Air.internedToRef((try zcu.getCoerced(field_ptr_val, capture_ptr_ty)).toIntern());
+ if (op_ptr_val.isUndef(zcu)) return pt.undefRef(capture_ptr_ty);
+ const field_ptr_val = try op_ptr_val.ptrField(first_field_index, pt);
+ return Air.internedToRef((try pt.getCoerced(field_ptr_val, capture_ptr_ty)).toIntern());
}
try sema.requireRuntimeBlock(block, operand_src, null);
@@ -11158,9 +11269,9 @@ const SwitchProngAnalysis = struct {
}
if (try sema.resolveDefinedValue(block, operand_src, spa.operand)) |operand_val| {
- if (operand_val.isUndef(zcu)) return zcu.undefRef(capture_ty);
+ if (operand_val.isUndef(zcu)) return pt.undefRef(capture_ty);
const union_val = ip.indexToKey(operand_val.toIntern()).un;
- if (Value.fromInterned(union_val.tag).isUndef(zcu)) return zcu.undefRef(capture_ty);
+ if (Value.fromInterned(union_val.tag).isUndef(zcu)) return pt.undefRef(capture_ty);
const uncoerced = Air.internedToRef(union_val.val);
return sema.coerce(block, capture_ty, uncoerced, operand_src);
}
@@ -11304,7 +11415,7 @@ const SwitchProngAnalysis = struct {
if (case_vals.len == 1) {
const item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, case_vals[0], undefined) catch unreachable;
- const item_ty = try zcu.singleErrorSetType(item_val.getErrorName(zcu).unwrap().?);
+ const item_ty = try pt.singleErrorSetType(item_val.getErrorName(zcu).unwrap().?);
return sema.bitCast(block, item_ty, spa.operand, operand_src, null);
}
@@ -11314,7 +11425,7 @@ const SwitchProngAnalysis = struct {
const err_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, err, undefined) catch unreachable;
names.putAssumeCapacityNoClobber(err_val.getErrorName(zcu).unwrap().?, {});
}
- const error_ty = try zcu.errorSetFromUnsortedNames(names.keys());
+ const error_ty = try pt.errorSetFromUnsortedNames(names.keys());
return sema.bitCast(block, error_ty, spa.operand, operand_src, null);
},
else => {
@@ -11336,7 +11447,8 @@ fn switchCond(
src: LazySrcLoc,
operand: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const operand_ty = sema.typeOf(operand);
switch (operand_ty.zigTypeTag(mod)) {
.Type,
@@ -11353,7 +11465,7 @@ fn switchCond(
.Enum,
=> {
if (operand_ty.isSlice(mod)) {
- return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(mod)});
+ return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(pt)});
}
if ((try sema.typeHasOnePossibleValue(operand_ty))) |opv| {
return Air.internedToRef(opv.toIntern());
@@ -11362,7 +11474,7 @@ fn switchCond(
},
.Union => {
- try operand_ty.resolveFields(mod);
+ try operand_ty.resolveFields(pt);
const enum_ty = operand_ty.unionTagType(mod) orelse {
const msg = msg: {
const msg = try sema.errMsg(src, "switch on union with no attached enum", .{});
@@ -11388,7 +11500,7 @@ fn switchCond(
.Vector,
.Frame,
.AnyFrame,
- => return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(mod)}),
+ => return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(pt)}),
}
}
@@ -11398,7 +11510,8 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const switch_src = block.nodeOffset(inst_data.src_node);
@@ -11489,7 +11602,7 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
if (operand_err_set.zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(block, switch_src, "expected error union type, found '{}'", .{
- operand_ty.fmt(mod),
+ operand_ty.fmt(pt),
});
}
@@ -11571,7 +11684,7 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
if (operand_val.errorUnionIsPayload(mod)) {
return sema.resolveBlockBody(block, main_operand_src, &child_block, non_error_case.body, inst, merges);
} else {
- const err_val = Value.fromInterned(try mod.intern(.{
+ const err_val = Value.fromInterned(try pt.intern(.{
.err = .{
.ty = operand_err_set_ty.toIntern(),
.name = operand_val.getErrorName(mod).unwrap().?,
@@ -11708,7 +11821,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(inst_data.src_node);
@@ -11783,7 +11897,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
// Duplicate checking variables later also used for `inline else`.
var seen_enum_fields: []?LazySrcLoc = &.{};
var seen_errors = SwitchErrorSet.init(gpa);
- var range_set = RangeSet.init(gpa, mod);
+ var range_set = RangeSet.init(gpa, pt);
var true_count: u8 = 0;
var false_count: u8 = 0;
@@ -11924,7 +12038,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
operand_ty.srcLoc(mod),
msg,
"enum '{}' declared here",
- .{operand_ty.fmt(mod)},
+ .{operand_ty.fmt(pt)},
);
break :msg msg;
};
@@ -12030,8 +12144,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
check_range: {
if (operand_ty.zigTypeTag(mod) == .Int) {
- const min_int = try operand_ty.minInt(mod, operand_ty);
- const max_int = try operand_ty.maxInt(mod, operand_ty);
+ const min_int = try operand_ty.minInt(pt, operand_ty);
+ const max_int = try operand_ty.maxInt(pt, operand_ty);
if (try range_set.spans(min_int.toIntern(), max_int.toIntern())) {
if (special_prong == .@"else") {
return sema.fail(
@@ -12136,7 +12250,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
block,
src,
"else prong required when switching on type '{}'",
- .{operand_ty.fmt(mod)},
+ .{operand_ty.fmt(pt)},
);
}
@@ -12212,7 +12326,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
.ComptimeFloat,
.Float,
=> return sema.fail(block, operand_src, "invalid switch operand type '{}'", .{
- operand_ty.fmt(mod),
+ operand_ty.fmt(pt),
}),
}
@@ -12386,7 +12500,8 @@ fn analyzeSwitchRuntimeBlock(
cond_dbg_node_index: Zir.Inst.Index,
allow_err_code_unwrap: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
@@ -12496,9 +12611,9 @@ fn analyzeSwitchRuntimeBlock(
var item = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item_first_ref, undefined) catch unreachable;
const item_last = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item_last_ref, undefined) catch unreachable;
- while (item.compareScalar(.lte, item_last, operand_ty, mod)) : ({
+ while (item.compareScalar(.lte, item_last, operand_ty, pt)) : ({
// Previous validation has resolved any possible lazy values.
- item = sema.intAddScalar(item, try mod.intValue(operand_ty, 1), operand_ty) catch |err| switch (err) {
+ item = sema.intAddScalar(item, try pt.intValue(operand_ty, 1), operand_ty) catch |err| switch (err) {
error.Overflow => unreachable,
else => |e| return e,
};
@@ -12537,7 +12652,7 @@ fn analyzeSwitchRuntimeBlock(
cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
- if (item.compareScalar(.eq, item_last, operand_ty, mod)) break;
+ if (item.compareScalar(.eq, item_last, operand_ty, pt)) break;
}
}
@@ -12744,14 +12859,14 @@ fn analyzeSwitchRuntimeBlock(
.Enum => {
if (operand_ty.isNonexhaustiveEnum(mod) and !union_originally) {
return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{
- operand_ty.fmt(mod),
+ operand_ty.fmt(pt),
});
}
for (seen_enum_fields, 0..) |f, i| {
if (f != null) continue;
cases_len += 1;
- const item_val = try mod.enumValueFieldIndex(operand_ty, @intCast(i));
+ const item_val = try pt.enumValueFieldIndex(operand_ty, @intCast(i));
const item_ref = Air.internedToRef(item_val.toIntern());
case_block.instructions.shrinkRetainingCapacity(0);
@@ -12793,7 +12908,7 @@ fn analyzeSwitchRuntimeBlock(
.ErrorSet => {
if (operand_ty.isAnyError(mod)) {
return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{
- operand_ty.fmt(mod),
+ operand_ty.fmt(pt),
});
}
const error_names = operand_ty.errorSetNames(mod);
@@ -12802,7 +12917,7 @@ fn analyzeSwitchRuntimeBlock(
if (seen_errors.contains(error_name)) continue;
cases_len += 1;
- const item_val = try mod.intern(.{ .err = .{
+ const item_val = try pt.intern(.{ .err = .{
.ty = operand_ty.toIntern(),
.name = error_name,
} });
@@ -12930,7 +13045,7 @@ fn analyzeSwitchRuntimeBlock(
}
},
else => return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{
- operand_ty.fmt(mod),
+ operand_ty.fmt(pt),
}),
};
@@ -13051,7 +13166,7 @@ fn resolveSwitchComptime(
const item = case_vals.items[scalar_i];
const item_val = sema.resolveConstDefinedValue(child_block, LazySrcLoc.unneeded, item, undefined) catch unreachable;
- if (operand_val.eql(item_val, operand_ty, sema.mod)) {
+ if (operand_val.eql(item_val, operand_ty, sema.pt.zcu)) {
if (err_set) try sema.maybeErrorUnwrapComptime(child_block, body, cond_operand);
return spa.resolveProngComptime(
child_block,
@@ -13088,7 +13203,7 @@ fn resolveSwitchComptime(
for (items) |item| {
// Validation above ensured these will succeed.
const item_val = sema.resolveConstDefinedValue(child_block, LazySrcLoc.unneeded, item, undefined) catch unreachable;
- if (operand_val.eql(item_val, operand_ty, sema.mod)) {
+ if (operand_val.eql(item_val, operand_ty, sema.pt.zcu)) {
if (err_set) try sema.maybeErrorUnwrapComptime(child_block, body, cond_operand);
return spa.resolveProngComptime(
child_block,
@@ -13162,7 +13277,7 @@ fn resolveSwitchComptime(
}
const RangeSetUnhandledIterator = struct {
- mod: *Module,
+ pt: Zcu.PerThread,
cur: ?InternPool.Index,
max: InternPool.Index,
range_i: usize,
@@ -13172,13 +13287,13 @@ const RangeSetUnhandledIterator = struct {
const preallocated_limbs = math.big.int.calcTwosCompLimbCount(128);
fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator {
- const mod = sema.mod;
- const int_type = mod.intern_pool.indexToKey(ty.toIntern()).int_type;
+ const pt = sema.pt;
+ const int_type = pt.zcu.intern_pool.indexToKey(ty.toIntern()).int_type;
const needed_limbs = math.big.int.calcTwosCompLimbCount(int_type.bits);
return .{
- .mod = mod,
- .cur = (try ty.minInt(mod, ty)).toIntern(),
- .max = (try ty.maxInt(mod, ty)).toIntern(),
+ .pt = pt,
+ .cur = (try ty.minInt(pt, ty)).toIntern(),
+ .max = (try ty.maxInt(pt, ty)).toIntern(),
.range_i = 0,
.ranges = range_set.ranges.items,
.limbs = if (needed_limbs > preallocated_limbs)
@@ -13190,13 +13305,13 @@ const RangeSetUnhandledIterator = struct {
fn addOne(it: *const RangeSetUnhandledIterator, val: InternPool.Index) !?InternPool.Index {
if (val == it.max) return null;
- const int = it.mod.intern_pool.indexToKey(val).int;
+ const int = it.pt.zcu.intern_pool.indexToKey(val).int;
switch (int.storage) {
inline .u64, .i64 => |val_int| {
const next_int = @addWithOverflow(val_int, 1);
if (next_int[1] == 0)
- return (try it.mod.intValue(Type.fromInterned(int.ty), next_int[0])).toIntern();
+ return (try it.pt.intValue(Type.fromInterned(int.ty), next_int[0])).toIntern();
},
.big_int => {},
.lazy_align, .lazy_size => unreachable,
@@ -13212,7 +13327,7 @@ const RangeSetUnhandledIterator = struct {
);
result_bigint.addScalar(val_bigint, 1);
- return (try it.mod.intValue_big(Type.fromInterned(int.ty), result_bigint.toConst())).toIntern();
+ return (try it.pt.intValue_big(Type.fromInterned(int.ty), result_bigint.toConst())).toIntern();
}
fn next(it: *RangeSetUnhandledIterator) !?InternPool.Index {
@@ -13274,7 +13389,8 @@ fn validateErrSetSwitch(
has_else: bool,
) CompileError!?Type {
const gpa = sema.gpa;
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const src_node_offset = inst_data.src_node;
@@ -13426,7 +13542,7 @@ fn validateErrSetSwitch(
}
// No need to keep the hash map metadata correct; here we
// extract the (sorted) keys only.
- return try mod.errorSetFromUnsortedNames(names.keys());
+ return try pt.errorSetFromUnsortedNames(names.keys());
},
}
return null;
@@ -13441,7 +13557,6 @@ fn validateSwitchRange(
operand_ty: Type,
item_src: LazySrcLoc,
) CompileError![2]Air.Inst.Ref {
- const mod = sema.mod;
const first_src: LazySrcLoc = .{
.base_node_inst = item_src.base_node_inst,
.offset = .{ .switch_case_item_range_first = item_src.offset.switch_case_item },
@@ -13452,7 +13567,7 @@ fn validateSwitchRange(
};
const first = try sema.resolveSwitchItemVal(block, first_ref, operand_ty, first_src);
const last = try sema.resolveSwitchItemVal(block, last_ref, operand_ty, last_src);
- if (try Value.fromInterned(first.val).compareAll(.gt, Value.fromInterned(last.val), operand_ty, mod)) {
+ if (try Value.fromInterned(first.val).compareAll(.gt, Value.fromInterned(last.val), operand_ty, sema.pt)) {
return sema.fail(block, item_src, "range start value is greater than the end value", .{});
}
const maybe_prev_src = try range_set.add(first.val, last.val, item_src);
@@ -13483,7 +13598,7 @@ fn validateSwitchItemEnum(
operand_ty: Type,
item_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
- const ip = &sema.mod.intern_pool;
+ const ip = &sema.pt.zcu.intern_pool;
const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, item_src);
const int = ip.indexToKey(item.val).enum_tag.int;
const field_index = ip.loadEnumType(ip.typeOf(item.val)).tagValueIndex(ip, int) orelse {
@@ -13505,9 +13620,8 @@ fn validateSwitchItemError(
operand_ty: Type,
item_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
- const ip = &sema.mod.intern_pool;
const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, item_src);
- const error_name = ip.indexToKey(item.val).err.name;
+ const error_name = sema.pt.zcu.intern_pool.indexToKey(item.val).err.name;
const maybe_prev_src = if (try seen_errors.fetchPut(error_name, item_src)) |prev|
prev.value
else
@@ -13593,7 +13707,7 @@ fn validateSwitchNoRange(
const msg = try sema.errMsg(
operand_src,
"ranges not allowed when switching on type '{}'",
- .{operand_ty.fmt(sema.mod)},
+ .{operand_ty.fmt(sema.pt)},
);
errdefer msg.destroy(sema.gpa);
try sema.errNote(
@@ -13615,7 +13729,8 @@ fn maybeErrorUnwrap(
operand_src: LazySrcLoc,
allow_err_code_inst: bool,
) !bool {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (!mod.backendSupportsFeature(.panic_unwrap_error)) return false;
const tags = sema.code.instructions.items(.tag);
@@ -13654,7 +13769,7 @@ fn maybeErrorUnwrap(
return true;
}
- const panic_fn = try mod.getBuiltin("panicUnwrapError");
+ const panic_fn = try pt.getBuiltin("panicUnwrapError");
const err_return_trace = try sema.getErrorReturnTrace(block);
const args: [2]Air.Inst.Ref = .{ err_return_trace, operand };
try sema.callBuiltin(block, operand_src, panic_fn, .auto, &args, .@"safety check");
@@ -13664,7 +13779,7 @@ fn maybeErrorUnwrap(
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const msg_inst = try sema.resolveInst(inst_data.operand);
- const panic_fn = try mod.getBuiltin("panic");
+ const panic_fn = try pt.getBuiltin("panic");
const err_return_trace = try sema.getErrorReturnTrace(block);
const args: [3]Air.Inst.Ref = .{ msg_inst, err_return_trace, .null_value };
try sema.callBuiltin(block, operand_src, panic_fn, .auto, &args, .@"safety check");
@@ -13680,7 +13795,8 @@ fn maybeErrorUnwrap(
}
fn maybeErrorUnwrapCondbr(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, cond: Zir.Inst.Ref, cond_src: LazySrcLoc) !void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const index = cond.toIndex() orelse return;
if (sema.code.instructions.items(.tag)[@intFromEnum(index)] != .is_non_err) return;
@@ -13713,14 +13829,15 @@ fn maybeErrorUnwrapComptime(sema: *Sema, block: *Block, body: []const Zir.Inst.I
const src = block.nodeOffset(inst_data.src_node);
if (try sema.resolveDefinedValue(block, src, operand)) |val| {
- if (val.getErrorName(sema.mod).unwrap()) |name| {
+ if (val.getErrorName(sema.pt.zcu).unwrap()) |name| {
return sema.failWithComptimeErrorRetTrace(block, src, name);
}
}
}
fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const ty_src = block.builtinCallArgSrc(inst_data.src_node, 0);
@@ -13729,7 +13846,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const field_name = try sema.resolveConstStringIntern(block, name_src, extra.rhs, .{
.needed_comptime_reason = "field name must be comptime-known",
});
- try ty.resolveFields(mod);
+ try ty.resolveFields(pt);
const ip = &mod.intern_pool;
const has_field = hf: {
@@ -13764,14 +13881,15 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
else => {},
}
return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{
- ty.fmt(mod),
+ ty.fmt(pt),
});
};
return if (has_field) .bool_true else .bool_false;
}
fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src = block.nodeOffset(inst_data.src_node);
@@ -13804,7 +13922,8 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const tracy = trace(@src());
defer tracy.end();
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
const operand_src = block.tokenOffset(inst_data.src_tok);
const operand = inst_data.get(sema.code);
@@ -13824,7 +13943,7 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ operand, @errorName(err) });
},
};
- try zcu.ensureFileAnalyzed(result.file_index);
+ try pt.ensureFileAnalyzed(result.file_index);
const file_root_decl_index = zcu.fileRootDecl(result.file_index).unwrap().?;
return sema.analyzeDeclVal(block, operand_src, file_root_decl_index);
}
@@ -13833,7 +13952,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
const name = try sema.resolveConstString(block, operand_src, inst_data.operand, .{
@@ -13844,7 +13963,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
return sema.fail(block, operand_src, "file path name cannot be empty", .{});
}
- const val = mod.embedFile(block.getFileScope(mod), name, operand_src) catch |err| switch (err) {
+ const val = pt.embedFile(block.getFileScope(pt.zcu), name, operand_src) catch |err| switch (err) {
error.ImportOutsideModulePath => {
return sema.fail(block, operand_src, "embed of file outside package path: '{s}'", .{name});
},
@@ -13859,7 +13978,8 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
}
fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
const name = try mod.intern_pool.getOrPutString(
sema.gpa,
@@ -13867,8 +13987,8 @@ fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.R
.no_embedded_nulls,
);
_ = try mod.getErrorValue(name);
- const error_set_type = try mod.singleErrorSetType(name);
- return Air.internedToRef((try mod.intern(.{ .err = .{
+ const error_set_type = try pt.singleErrorSetType(name);
+ return Air.internedToRef((try pt.intern(.{ .err = .{
.ty = error_set_type.toIntern(),
.name = name,
} })));
@@ -13883,7 +14003,8 @@ fn zirShl(
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(inst_data.src_node);
const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -13906,53 +14027,53 @@ fn zirShl(
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
- return mod.undefRef(sema.typeOf(lhs));
+ return pt.undefRef(sema.typeOf(lhs));
}
// If rhs is 0, return lhs without doing any calculations.
- if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
+ if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
return lhs;
}
if (scalar_ty.zigTypeTag(mod) != .ComptimeInt and air_tag != .shl_sat) {
- const bit_value = try mod.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits);
+ const bit_value = try pt.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits);
if (rhs_ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
- const rhs_elem = try rhs_val.elemValue(mod, i);
- if (rhs_elem.compareHetero(.gte, bit_value, mod)) {
+ const rhs_elem = try rhs_val.elemValue(pt, i);
+ if (rhs_elem.compareHetero(.gte, bit_value, pt)) {
return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{
- rhs_elem.fmtValue(mod, sema),
+ rhs_elem.fmtValue(pt, sema),
i,
- scalar_ty.fmt(mod),
+ scalar_ty.fmt(pt),
});
}
}
- } else if (rhs_val.compareHetero(.gte, bit_value, mod)) {
+ } else if (rhs_val.compareHetero(.gte, bit_value, pt)) {
return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{
- rhs_val.fmtValue(mod, sema),
- scalar_ty.fmt(mod),
+ rhs_val.fmtValue(pt, sema),
+ scalar_ty.fmt(pt),
});
}
}
if (rhs_ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
- const rhs_elem = try rhs_val.elemValue(mod, i);
- if (rhs_elem.compareHetero(.lt, try mod.intValue(scalar_rhs_ty, 0), mod)) {
+ const rhs_elem = try rhs_val.elemValue(pt, i);
+ if (rhs_elem.compareHetero(.lt, try pt.intValue(scalar_rhs_ty, 0), pt)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{
- rhs_elem.fmtValue(mod, sema),
+ rhs_elem.fmtValue(pt, sema),
i,
});
}
}
- } else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) {
+ } else if (rhs_val.compareHetero(.lt, try pt.intValue(rhs_ty, 0), pt)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{
- rhs_val.fmtValue(mod, sema),
+ rhs_val.fmtValue(pt, sema),
});
}
}
const runtime_src = if (maybe_lhs_val) |lhs_val| rs: {
- if (lhs_val.isUndef(mod)) return mod.undefRef(lhs_ty);
+ if (lhs_val.isUndef(mod)) return pt.undefRef(lhs_ty);
const rhs_val = maybe_rhs_val orelse {
if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{});
@@ -13960,17 +14081,17 @@ fn zirShl(
break :rs rhs_src;
};
const val = if (scalar_ty.zigTypeTag(mod) == .ComptimeInt)
- try lhs_val.shl(rhs_val, lhs_ty, sema.arena, mod)
+ try lhs_val.shl(rhs_val, lhs_ty, sema.arena, pt)
else switch (air_tag) {
.shl_exact => val: {
- const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, mod);
- if (shifted.overflow_bit.compareAllWithZero(.eq, mod)) {
+ const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, pt);
+ if (shifted.overflow_bit.compareAllWithZero(.eq, pt)) {
break :val shifted.wrapped_result;
}
return sema.fail(block, src, "operation caused overflow", .{});
},
- .shl_sat => try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, mod),
- .shl => try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, mod),
+ .shl_sat => try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, pt),
+ .shl => try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, pt),
else => unreachable,
};
return Air.internedToRef(val.toIntern());
@@ -13981,7 +14102,7 @@ fn zirShl(
if (rhs_is_comptime_int or
scalar_rhs_ty.intInfo(mod).bits > scalar_ty.intInfo(mod).bits)
{
- const max_int = Air.internedToRef((try lhs_ty.maxInt(mod, lhs_ty)).toIntern());
+ const max_int = Air.internedToRef((try lhs_ty.maxInt(pt, lhs_ty)).toIntern());
const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src });
break :rhs try sema.intCast(block, src, lhs_ty, rhs_src, rhs_limited, rhs_src, false);
} else {
@@ -13993,7 +14114,7 @@ fn zirShl(
if (block.wantSafety()) {
const bit_count = scalar_ty.intInfo(mod).bits;
if (!std.math.isPowerOfTwo(bit_count)) {
- const bit_count_val = try mod.intValue(scalar_rhs_ty, bit_count);
+ const bit_count_val = try pt.intValue(scalar_rhs_ty, bit_count);
const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: {
const bit_count_inst = Air.internedToRef((try sema.splat(rhs_ty, bit_count_val)).toIntern());
const lt = try block.addCmpVector(rhs, bit_count_inst, .lt);
@@ -14034,7 +14155,7 @@ fn zirShl(
})
else
ov_bit;
- const zero_ov = Air.internedToRef((try mod.intValue(Type.u1, 0)).toIntern());
+ const zero_ov = Air.internedToRef((try pt.intValue(Type.u1, 0)).toIntern());
const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov);
try sema.addSafetyCheck(block, src, no_ov, .shl_overflow);
@@ -14053,7 +14174,8 @@ fn zirShr(
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(inst_data.src_node);
const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -14071,61 +14193,61 @@ fn zirShr(
const runtime_src = if (maybe_rhs_val) |rhs_val| rs: {
if (rhs_val.isUndef(mod)) {
- return mod.undefRef(lhs_ty);
+ return pt.undefRef(lhs_ty);
}
// If rhs is 0, return lhs without doing any calculations.
- if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
+ if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
return lhs;
}
if (scalar_ty.zigTypeTag(mod) != .ComptimeInt) {
- const bit_value = try mod.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits);
+ const bit_value = try pt.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits);
if (rhs_ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
- const rhs_elem = try rhs_val.elemValue(mod, i);
- if (rhs_elem.compareHetero(.gte, bit_value, mod)) {
+ const rhs_elem = try rhs_val.elemValue(pt, i);
+ if (rhs_elem.compareHetero(.gte, bit_value, pt)) {
return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{
- rhs_elem.fmtValue(mod, sema),
+ rhs_elem.fmtValue(pt, sema),
i,
- scalar_ty.fmt(mod),
+ scalar_ty.fmt(pt),
});
}
}
- } else if (rhs_val.compareHetero(.gte, bit_value, mod)) {
+ } else if (rhs_val.compareHetero(.gte, bit_value, pt)) {
return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{
- rhs_val.fmtValue(mod, sema),
- scalar_ty.fmt(mod),
+ rhs_val.fmtValue(pt, sema),
+ scalar_ty.fmt(pt),
});
}
}
if (rhs_ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
- const rhs_elem = try rhs_val.elemValue(mod, i);
- if (rhs_elem.compareHetero(.lt, try mod.intValue(rhs_ty.childType(mod), 0), mod)) {
+ const rhs_elem = try rhs_val.elemValue(pt, i);
+ if (rhs_elem.compareHetero(.lt, try pt.intValue(rhs_ty.childType(mod), 0), pt)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{
- rhs_elem.fmtValue(mod, sema),
+ rhs_elem.fmtValue(pt, sema),
i,
});
}
}
- } else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) {
+ } else if (rhs_val.compareHetero(.lt, try pt.intValue(rhs_ty, 0), pt)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{
- rhs_val.fmtValue(mod, sema),
+ rhs_val.fmtValue(pt, sema),
});
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
- return mod.undefRef(lhs_ty);
+ return pt.undefRef(lhs_ty);
}
if (air_tag == .shr_exact) {
// Detect if any ones would be shifted out.
- const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, mod);
- if (!(try truncated.compareAllWithZeroSema(.eq, mod))) {
+ const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, pt);
+ if (!(try truncated.compareAllWithZeroSema(.eq, pt))) {
return sema.fail(block, src, "exact shift shifted out 1 bits", .{});
}
}
- const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, mod);
+ const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, pt);
return Air.internedToRef(val.toIntern());
} else {
break :rs lhs_src;
@@ -14141,7 +14263,7 @@ fn zirShr(
if (block.wantSafety()) {
const bit_count = scalar_ty.intInfo(mod).bits;
if (!std.math.isPowerOfTwo(bit_count)) {
- const bit_count_val = try mod.intValue(rhs_ty.scalarType(mod), bit_count);
+ const bit_count_val = try pt.intValue(rhs_ty.scalarType(mod), bit_count);
const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: {
const bit_count_inst = Air.internedToRef((try sema.splat(rhs_ty, bit_count_val)).toIntern());
@@ -14188,7 +14310,8 @@ fn zirBitwise(
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.src(.{ .node_offset_bin_op = inst_data.src_node });
const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -14220,9 +14343,9 @@ fn zirBitwise(
if (try sema.resolveValueIntable(casted_lhs)) |lhs_val| {
if (try sema.resolveValueIntable(casted_rhs)) |rhs_val| {
const result_val = switch (air_tag) {
- .bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena, mod),
- .bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena, mod),
- .xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena, mod),
+ .bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena, pt),
+ .bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena, pt),
+ .xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena, pt),
else => unreachable,
};
return Air.internedToRef(result_val.toIntern());
@@ -14242,7 +14365,8 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(inst_data.src_node);
const operand_src = block.src(.{ .node_offset_un_op = inst_data.src_node });
@@ -14253,26 +14377,26 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
if (scalar_type.zigTypeTag(mod) != .Int) {
return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{
- operand_type.fmt(mod),
+ operand_type.fmt(pt),
});
}
if (try sema.resolveValue(operand)) |val| {
if (val.isUndef(mod)) {
- return mod.undefRef(operand_type);
+ return pt.undefRef(operand_type);
} else if (operand_type.zigTypeTag(mod) == .Vector) {
const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod));
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
for (elems, 0..) |*elem, i| {
- const elem_val = try val.elemValue(mod, i);
- elem.* = (try elem_val.bitwiseNot(scalar_type, sema.arena, mod)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ elem.* = (try elem_val.bitwiseNot(scalar_type, sema.arena, pt)).toIntern();
}
- return Air.internedToRef((try mod.intern(.{ .aggregate = .{
+ return Air.internedToRef((try pt.intern(.{ .aggregate = .{
.ty = operand_type.toIntern(),
.storage = .{ .elems = elems },
} })));
} else {
- const result_val = try val.bitwiseNot(operand_type, sema.arena, mod);
+ const result_val = try val.bitwiseNot(operand_type, sema.arena, pt);
return Air.internedToRef(result_val.toIntern());
}
}
@@ -14288,7 +14412,8 @@ fn analyzeTupleCat(
lhs: Air.Inst.Ref,
rhs: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
const src = block.nodeOffset(src_node);
@@ -14344,14 +14469,14 @@ fn analyzeTupleCat(
break :rs runtime_src;
};
- const tuple_ty = try mod.intern_pool.getAnonStructType(mod.gpa, .{
+ const tuple_ty = try mod.intern_pool.getAnonStructType(mod.gpa, pt.tid, .{
.types = types,
.values = values,
.names = &.{},
});
const runtime_src = opt_runtime_src orelse {
- const tuple_val = try mod.intern(.{ .aggregate = .{
+ const tuple_val = try pt.intern(.{ .aggregate = .{
.ty = tuple_ty,
.storage = .{ .elems = values },
} });
@@ -14386,7 +14511,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs = try sema.resolveInst(extra.lhs);
@@ -14406,11 +14532,11 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, rhs_ty) orelse lhs_info: {
if (lhs_is_tuple) break :lhs_info @as(Type.ArrayInfo, undefined);
- return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(mod)});
+ return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(pt)});
};
const rhs_info = try sema.getArrayCatInfo(block, rhs_src, rhs, lhs_ty) orelse {
assert(!rhs_is_tuple);
- return sema.fail(block, rhs_src, "expected indexable; found '{}'", .{rhs_ty.fmt(mod)});
+ return sema.fail(block, rhs_src, "expected indexable; found '{}'", .{rhs_ty.fmt(pt)});
};
const resolved_elem_ty = t: {
@@ -14472,7 +14598,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
),
};
- const result_ty = try mod.arrayType(.{
+ const result_ty = try pt.arrayType(.{
.len = result_len,
.sentinel = if (res_sent_val) |v| v.toIntern() else .none,
.child = resolved_elem_ty.toIntern(),
@@ -14512,7 +14638,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
while (elem_i < lhs_len) : (elem_i += 1) {
const lhs_elem_i = elem_i;
const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i, mod) else Value.@"unreachable";
- const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try lhs_sub_val.elemValue(mod, lhs_elem_i) else elem_default_val;
+ const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try lhs_sub_val.elemValue(pt, lhs_elem_i) else elem_default_val;
const elem_val_inst = Air.internedToRef(elem_val.toIntern());
const operand_src = block.src(.{ .array_cat_lhs = .{
.array_cat_offset = inst_data.src_node,
@@ -14525,7 +14651,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
while (elem_i < result_len) : (elem_i += 1) {
const rhs_elem_i = elem_i - lhs_len;
const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i, mod) else Value.@"unreachable";
- const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try rhs_sub_val.elemValue(mod, rhs_elem_i) else elem_default_val;
+ const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try rhs_sub_val.elemValue(pt, rhs_elem_i) else elem_default_val;
const elem_val_inst = Air.internedToRef(elem_val.toIntern());
const operand_src = block.src(.{ .array_cat_rhs = .{
.array_cat_offset = inst_data.src_node,
@@ -14535,7 +14661,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const coerced_elem_val = try sema.resolveConstValue(block, operand_src, coerced_elem_val_inst, undefined);
element_vals[elem_i] = coerced_elem_val.toIntern();
}
- return sema.addConstantMaybeRef(try mod.intern(.{ .aggregate = .{
+ return sema.addConstantMaybeRef(try pt.intern(.{ .aggregate = .{
.ty = result_ty.toIntern(),
.storage = .{ .elems = element_vals },
} }), ptr_addrspace != null);
@@ -14545,19 +14671,19 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.requireRuntimeBlock(block, src, runtime_src);
if (ptr_addrspace) |ptr_as| {
- const alloc_ty = try mod.ptrTypeSema(.{
+ const alloc_ty = try pt.ptrTypeSema(.{
.child = result_ty.toIntern(),
.flags = .{ .address_space = ptr_as },
});
const alloc = try block.addTy(.alloc, alloc_ty);
- const elem_ptr_ty = try mod.ptrTypeSema(.{
+ const elem_ptr_ty = try pt.ptrTypeSema(.{
.child = resolved_elem_ty.toIntern(),
.flags = .{ .address_space = ptr_as },
});
var elem_i: u32 = 0;
while (elem_i < lhs_len) : (elem_i += 1) {
- const elem_index = try mod.intRef(Type.usize, elem_i);
+ const elem_index = try pt.intRef(Type.usize, elem_i);
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
const operand_src = block.src(.{ .array_cat_lhs = .{
.array_cat_offset = inst_data.src_node,
@@ -14568,8 +14694,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
while (elem_i < result_len) : (elem_i += 1) {
const rhs_elem_i = elem_i - lhs_len;
- const elem_index = try mod.intRef(Type.usize, elem_i);
- const rhs_index = try mod.intRef(Type.usize, rhs_elem_i);
+ const elem_index = try pt.intRef(Type.usize, elem_i);
+ const rhs_index = try pt.intRef(Type.usize, rhs_elem_i);
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
const operand_src = block.src(.{ .array_cat_rhs = .{
.array_cat_offset = inst_data.src_node,
@@ -14579,9 +14705,9 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.storePtr2(block, src, elem_ptr, src, init, operand_src, .store);
}
if (res_sent_val) |sent_val| {
- const elem_index = try mod.intRef(Type.usize, result_len);
+ const elem_index = try pt.intRef(Type.usize, result_len);
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
- const init = Air.internedToRef((try mod.getCoerced(sent_val, lhs_info.elem_type)).toIntern());
+ const init = Air.internedToRef((try pt.getCoerced(sent_val, lhs_info.elem_type)).toIntern());
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
}
@@ -14592,7 +14718,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
{
var elem_i: u32 = 0;
while (elem_i < lhs_len) : (elem_i += 1) {
- const index = try mod.intRef(Type.usize, elem_i);
+ const index = try pt.intRef(Type.usize, elem_i);
const operand_src = block.src(.{ .array_cat_lhs = .{
.array_cat_offset = inst_data.src_node,
.elem_index = elem_i,
@@ -14602,7 +14728,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
while (elem_i < result_len) : (elem_i += 1) {
const rhs_elem_i = elem_i - lhs_len;
- const index = try mod.intRef(Type.usize, rhs_elem_i);
+ const index = try pt.intRef(Type.usize, rhs_elem_i);
const operand_src = block.src(.{ .array_cat_rhs = .{
.array_cat_offset = inst_data.src_node,
.elem_index = @intCast(rhs_elem_i),
@@ -14616,7 +14742,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, peer_ty: Type) !?Type.ArrayInfo {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const operand_ty = sema.typeOf(operand);
switch (operand_ty.zigTypeTag(mod)) {
.Array => return operand_ty.arrayInfo(mod),
@@ -14633,7 +14760,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins
.none => null,
else => Value.fromInterned(ptr_info.sentinel),
},
- .len = try val.sliceLen(mod),
+ .len = try val.sliceLen(pt),
};
},
.One => {
@@ -14666,7 +14793,8 @@ fn analyzeTupleMul(
operand: Air.Inst.Ref,
factor: usize,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const operand_ty = sema.typeOf(operand);
const src = block.nodeOffset(src_node);
const len_src = block.src(.{ .node_offset_bin_rhs = src_node });
@@ -14702,14 +14830,14 @@ fn analyzeTupleMul(
break :rs runtime_src;
};
- const tuple_ty = try mod.intern_pool.getAnonStructType(mod.gpa, .{
+ const tuple_ty = try mod.intern_pool.getAnonStructType(mod.gpa, pt.tid, .{
.types = types,
.values = values,
.names = &.{},
});
const runtime_src = opt_runtime_src orelse {
- const tuple_val = try mod.intern(.{ .aggregate = .{
+ const tuple_val = try pt.intern(.{ .aggregate = .{
.ty = tuple_ty,
.storage = .{ .elems = values },
} });
@@ -14739,7 +14867,8 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.ArrayMul, inst_data.payload_index).data;
const uncoerced_lhs = try sema.resolveInst(extra.lhs);
@@ -14762,12 +14891,12 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const lhs_len = uncoerced_lhs_ty.structFieldCount(mod);
const lhs_dest_ty = switch (res_ty.zigTypeTag(mod)) {
else => break :no_coerce,
- .Array => try mod.arrayType(.{
+ .Array => try pt.arrayType(.{
.child = res_ty.childType(mod).toIntern(),
.len = lhs_len,
.sentinel = if (res_ty.sentinel(mod)) |s| s.toIntern() else .none,
}),
- .Vector => try mod.vectorType(.{
+ .Vector => try pt.vectorType(.{
.child = res_ty.childType(mod).toIntern(),
.len = lhs_len,
}),
@@ -14796,7 +14925,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// Analyze the lhs first, to catch the case that someone tried to do exponentiation
const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, lhs_ty) orelse {
const msg = msg: {
- const msg = try sema.errMsg(lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(mod)});
+ const msg = try sema.errMsg(lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
switch (lhs_ty.zigTypeTag(mod)) {
.Int, .Float, .ComptimeFloat, .ComptimeInt, .Vector => {
@@ -14818,7 +14947,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
return sema.fail(block, rhs_src, "operation results in overflow", .{});
const result_len = try sema.usizeCast(block, src, result_len_u64);
- const result_ty = try mod.arrayType(.{
+ const result_ty = try pt.arrayType(.{
.len = result_len,
.sentinel = if (lhs_info.sentinel) |s| s.toIntern() else .none,
.child = lhs_info.elem_type.toIntern(),
@@ -14839,8 +14968,8 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// Optimization for the common pattern of a single element repeated N times, such
// as zero-filling a byte array.
if (lhs_len == 1 and lhs_info.sentinel == null) {
- const elem_val = try lhs_sub_val.elemValue(mod, 0);
- break :v try mod.intern(.{ .aggregate = .{
+ const elem_val = try lhs_sub_val.elemValue(pt, 0);
+ break :v try pt.intern(.{ .aggregate = .{
.ty = result_ty.toIntern(),
.storage = .{ .repeated_elem = elem_val.toIntern() },
} });
@@ -14851,12 +14980,12 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
while (elem_i < result_len) {
var lhs_i: usize = 0;
while (lhs_i < lhs_len) : (lhs_i += 1) {
- const elem_val = try lhs_sub_val.elemValue(mod, lhs_i);
+ const elem_val = try lhs_sub_val.elemValue(pt, lhs_i);
element_vals[elem_i] = elem_val.toIntern();
elem_i += 1;
}
}
- break :v try mod.intern(.{ .aggregate = .{
+ break :v try pt.intern(.{ .aggregate = .{
.ty = result_ty.toIntern(),
.storage = .{ .elems = element_vals },
} });
@@ -14870,17 +14999,17 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// to get the same elem values.
const lhs_vals = try sema.arena.alloc(Air.Inst.Ref, lhs_len);
for (lhs_vals, 0..) |*lhs_val, idx| {
- const idx_ref = try mod.intRef(Type.usize, idx);
+ const idx_ref = try pt.intRef(Type.usize, idx);
lhs_val.* = try sema.elemVal(block, lhs_src, lhs, idx_ref, src, false);
}
if (ptr_addrspace) |ptr_as| {
- const alloc_ty = try mod.ptrTypeSema(.{
+ const alloc_ty = try pt.ptrTypeSema(.{
.child = result_ty.toIntern(),
.flags = .{ .address_space = ptr_as },
});
const alloc = try block.addTy(.alloc, alloc_ty);
- const elem_ptr_ty = try mod.ptrTypeSema(.{
+ const elem_ptr_ty = try pt.ptrTypeSema(.{
.child = lhs_info.elem_type.toIntern(),
.flags = .{ .address_space = ptr_as },
});
@@ -14888,14 +15017,14 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
var elem_i: usize = 0;
while (elem_i < result_len) {
for (lhs_vals) |lhs_val| {
- const elem_index = try mod.intRef(Type.usize, elem_i);
+ const elem_index = try pt.intRef(Type.usize, elem_i);
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
try sema.storePtr2(block, src, elem_ptr, src, lhs_val, lhs_src, .store);
elem_i += 1;
}
}
if (lhs_info.sentinel) |sent_val| {
- const elem_index = try mod.intRef(Type.usize, result_len);
+ const elem_index = try pt.intRef(Type.usize, result_len);
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
const init = Air.internedToRef(sent_val.toIntern());
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
@@ -14912,7 +15041,8 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(inst_data.src_node);
const lhs_src = src;
@@ -14926,25 +15056,26 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
.Int, .ComptimeInt, .Float, .ComptimeFloat => false,
else => true,
}) {
- return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(mod)});
+ return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(pt)});
}
if (rhs_scalar_ty.isAnyFloat()) {
// We handle float negation here to ensure negative zero is represented in the bits.
if (try sema.resolveValue(rhs)) |rhs_val| {
- if (rhs_val.isUndef(mod)) return mod.undefRef(rhs_ty);
- return Air.internedToRef((try rhs_val.floatNeg(rhs_ty, sema.arena, mod)).toIntern());
+ if (rhs_val.isUndef(mod)) return pt.undefRef(rhs_ty);
+ return Air.internedToRef((try rhs_val.floatNeg(rhs_ty, sema.arena, pt)).toIntern());
}
try sema.requireRuntimeBlock(block, src, null);
return block.addUnOp(if (block.float_mode == .optimized) .neg_optimized else .neg, rhs);
}
- const lhs = Air.internedToRef((try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0))).toIntern());
+ const lhs = Air.internedToRef((try sema.splat(rhs_ty, try pt.intValue(rhs_scalar_ty, 0))).toIntern());
return sema.analyzeArithmetic(block, .sub, lhs, rhs, src, lhs_src, rhs_src, true);
}
fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(inst_data.src_node);
const lhs_src = src;
@@ -14956,10 +15087,10 @@ fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
switch (rhs_scalar_ty.zigTypeTag(mod)) {
.Int, .ComptimeInt, .Float, .ComptimeFloat => {},
- else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(mod)}),
+ else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(pt)}),
}
- const lhs = Air.internedToRef((try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0))).toIntern());
+ const lhs = Air.internedToRef((try sema.splat(rhs_ty, try pt.intValue(rhs_scalar_ty, 0))).toIntern());
return sema.analyzeArithmetic(block, .subwrap, lhs, rhs, src, lhs_src, rhs_src, true);
}
@@ -14985,7 +15116,8 @@ fn zirArithmetic(
}
fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.src(.{ .node_offset_bin_op = inst_data.src_node });
const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -15026,13 +15158,13 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
// If lhs % rhs is 0, it doesn't matter.
const lhs_val = maybe_lhs_val orelse unreachable;
const rhs_val = maybe_rhs_val orelse unreachable;
- const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod) catch unreachable;
- if (!rem.compareAllWithZero(.eq, mod)) {
+ const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, pt) catch unreachable;
+ if (!rem.compareAllWithZero(.eq, pt)) {
return sema.fail(
block,
src,
"ambiguous coercion of division operands '{}' and '{}'; non-zero remainder '{}'",
- .{ lhs_ty.fmt(mod), rhs_ty.fmt(mod), rem.fmtValue(mod, sema) },
+ .{ lhs_ty.fmt(pt), rhs_ty.fmt(pt), rem.fmtValue(pt, sema) },
);
}
}
@@ -15068,10 +15200,10 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
.Int, .ComptimeInt, .ComptimeFloat => {
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod)) {
- if (try lhs_val.compareAllWithZeroSema(.eq, mod)) {
+ if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
const scalar_zero = switch (scalar_tag) {
- .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
- .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
+ .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0),
+ .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0),
else => unreachable,
};
const zero_val = try sema.splat(resolved_type, scalar_zero);
@@ -15083,7 +15215,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
- if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) {
+ if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) {
return sema.failWithDivideByZero(block, rhs_src);
}
// TODO: if the RHS is one, return the LHS directly
@@ -15097,25 +15229,25 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
if (lhs_val.isUndef(mod)) {
if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) {
if (maybe_rhs_val) |rhs_val| {
- if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) {
- return mod.undefRef(resolved_type);
+ if (try sema.compareAll(rhs_val, .neq, try pt.intValue(resolved_type, -1), resolved_type)) {
+ return pt.undefRef(resolved_type);
}
}
return sema.failWithUseOfUndef(block, rhs_src);
}
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
var overflow_idx: ?usize = null;
- const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, mod);
+ const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, pt);
if (overflow_idx) |vec_idx| {
return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx);
}
return Air.internedToRef(res.toIntern());
} else {
- return Air.internedToRef((try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, mod)).toIntern());
+ return Air.internedToRef((try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, pt)).toIntern());
}
} else {
break :rs rhs_src;
@@ -15138,7 +15270,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
block,
src,
"division with '{}' and '{}': signed integers must use @divTrunc, @divFloor, or @divExact",
- .{ lhs_ty.fmt(mod), rhs_ty.fmt(mod) },
+ .{ lhs_ty.fmt(pt), rhs_ty.fmt(pt) },
);
}
break :blk Air.Inst.Tag.div_trunc;
@@ -15150,7 +15282,8 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
}
fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.src(.{ .node_offset_bin_op = inst_data.src_node });
const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -15204,10 +15337,10 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (lhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
} else {
- if (try lhs_val.compareAllWithZeroSema(.eq, mod)) {
+ if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
const scalar_zero = switch (scalar_tag) {
- .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
- .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
+ .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0),
+ .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0),
else => unreachable,
};
const zero_val = try sema.splat(resolved_type, scalar_zero);
@@ -15219,7 +15352,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
- if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) {
+ if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) {
return sema.failWithDivideByZero(block, rhs_src);
}
// TODO: if the RHS is one, return the LHS directly
@@ -15227,22 +15360,22 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (maybe_lhs_val) |lhs_val| {
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
- const modulus_val = try lhs_val.intMod(rhs_val, resolved_type, sema.arena, mod);
- if (!(modulus_val.compareAllWithZero(.eq, mod))) {
+ const modulus_val = try lhs_val.intMod(rhs_val, resolved_type, sema.arena, pt);
+ if (!(modulus_val.compareAllWithZero(.eq, pt))) {
return sema.fail(block, src, "exact division produced remainder", .{});
}
var overflow_idx: ?usize = null;
- const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, mod);
+ const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, pt);
if (overflow_idx) |vec_idx| {
return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx);
}
return Air.internedToRef(res.toIntern());
} else {
- const modulus_val = try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, mod);
- if (!(modulus_val.compareAllWithZero(.eq, mod))) {
+ const modulus_val = try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, pt);
+ if (!(modulus_val.compareAllWithZero(.eq, pt))) {
return sema.fail(block, src, "exact division produced remainder", .{});
}
- return Air.internedToRef((try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, mod)).toIntern());
+ return Air.internedToRef((try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, pt)).toIntern());
}
} else break :rs rhs_src;
} else break :rs lhs_src;
@@ -15286,8 +15419,8 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const remainder = try block.addBinOp(.rem, casted_lhs, casted_rhs);
const scalar_zero = switch (scalar_tag) {
- .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
- .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
+ .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0),
+ .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0),
else => unreachable,
};
if (resolved_type.zigTypeTag(mod) == .Vector) {
@@ -15315,7 +15448,8 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.src(.{ .node_offset_bin_op = inst_data.src_node });
const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -15371,10 +15505,10 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// If the lhs is undefined, result is undefined.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod)) {
- if (try lhs_val.compareAllWithZeroSema(.eq, mod)) {
+ if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
const scalar_zero = switch (scalar_tag) {
- .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
- .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
+ .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0),
+ .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0),
else => unreachable,
};
const zero_val = try sema.splat(resolved_type, scalar_zero);
@@ -15386,7 +15520,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
- if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) {
+ if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) {
return sema.failWithDivideByZero(block, rhs_src);
}
// TODO: if the RHS is one, return the LHS directly
@@ -15395,20 +15529,20 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (lhs_val.isUndef(mod)) {
if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) {
if (maybe_rhs_val) |rhs_val| {
- if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) {
- return mod.undefRef(resolved_type);
+ if (try sema.compareAll(rhs_val, .neq, try pt.intValue(resolved_type, -1), resolved_type)) {
+ return pt.undefRef(resolved_type);
}
}
return sema.failWithUseOfUndef(block, rhs_src);
}
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
- return Air.internedToRef((try lhs_val.intDivFloor(rhs_val, resolved_type, sema.arena, mod)).toIntern());
+ return Air.internedToRef((try lhs_val.intDivFloor(rhs_val, resolved_type, sema.arena, pt)).toIntern());
} else {
- return Air.internedToRef((try lhs_val.floatDivFloor(rhs_val, resolved_type, sema.arena, mod)).toIntern());
+ return Air.internedToRef((try lhs_val.floatDivFloor(rhs_val, resolved_type, sema.arena, pt)).toIntern());
}
} else break :rs rhs_src;
} else break :rs lhs_src;
@@ -15425,7 +15559,8 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.src(.{ .node_offset_bin_op = inst_data.src_node });
const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -15481,10 +15616,10 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// If the lhs is undefined, result is undefined.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod)) {
- if (try lhs_val.compareAllWithZeroSema(.eq, mod)) {
+ if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
const scalar_zero = switch (scalar_tag) {
- .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
- .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
+ .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0),
+ .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0),
else => unreachable,
};
const zero_val = try sema.splat(resolved_type, scalar_zero);
@@ -15496,7 +15631,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
- if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) {
+ if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) {
return sema.failWithDivideByZero(block, rhs_src);
}
}
@@ -15504,25 +15639,25 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (lhs_val.isUndef(mod)) {
if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) {
if (maybe_rhs_val) |rhs_val| {
- if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) {
- return mod.undefRef(resolved_type);
+ if (try sema.compareAll(rhs_val, .neq, try pt.intValue(resolved_type, -1), resolved_type)) {
+ return pt.undefRef(resolved_type);
}
}
return sema.failWithUseOfUndef(block, rhs_src);
}
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
var overflow_idx: ?usize = null;
- const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, mod);
+ const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, pt);
if (overflow_idx) |vec_idx| {
return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx);
}
return Air.internedToRef(res.toIntern());
} else {
- return Air.internedToRef((try lhs_val.floatDivTrunc(rhs_val, resolved_type, sema.arena, mod)).toIntern());
+ return Air.internedToRef((try lhs_val.floatDivTrunc(rhs_val, resolved_type, sema.arena, pt)).toIntern());
}
} else break :rs rhs_src;
} else break :rs lhs_src;
@@ -15550,7 +15685,8 @@ fn addDivIntOverflowSafety(
casted_rhs: Air.Inst.Ref,
is_int: bool,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (!is_int) return;
// If the LHS is unsigned, it cannot cause overflow.
@@ -15561,19 +15697,19 @@ fn addDivIntOverflowSafety(
return;
}
- const min_int = try resolved_type.minInt(mod, resolved_type);
- const neg_one_scalar = try mod.intValue(lhs_scalar_ty, -1);
+ const min_int = try resolved_type.minInt(pt, resolved_type);
+ const neg_one_scalar = try pt.intValue(lhs_scalar_ty, -1);
const neg_one = try sema.splat(resolved_type, neg_one_scalar);
// If the LHS is comptime-known to be not equal to the min int,
// no overflow is possible.
if (maybe_lhs_val) |lhs_val| {
- if (try lhs_val.compareAll(.neq, min_int, resolved_type, mod)) return;
+ if (try lhs_val.compareAll(.neq, min_int, resolved_type, pt)) return;
}
// If the RHS is comptime-known to not be equal to -1, no overflow is possible.
if (maybe_rhs_val) |rhs_val| {
- if (try rhs_val.compareAll(.neq, neg_one, resolved_type, mod)) return;
+ if (try rhs_val.compareAll(.neq, neg_one, resolved_type, pt)) return;
}
var ok: Air.Inst.Ref = .none;
@@ -15634,11 +15770,12 @@ fn addDivByZeroSafety(
// emitted above.
if (maybe_rhs_val != null) return;
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const scalar_zero = if (is_int)
- try mod.intValue(resolved_type.scalarType(mod), 0)
+ try pt.intValue(resolved_type.scalarType(mod), 0)
else
- try mod.floatValue(resolved_type.scalarType(mod), 0.0);
+ try pt.floatValue(resolved_type.scalarType(mod), 0.0);
const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: {
const zero_val = try sema.splat(resolved_type, scalar_zero);
const zero = Air.internedToRef(zero_val.toIntern());
@@ -15666,7 +15803,8 @@ fn airTag(block: *Block, is_int: bool, normal: Air.Inst.Tag, optimized: Air.Inst
}
fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.src(.{ .node_offset_bin_op = inst_data.src_node });
const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -15721,16 +15859,16 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
if (lhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, lhs_src);
}
- if (try lhs_val.compareAllWithZeroSema(.eq, mod)) {
+ if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
const scalar_zero = switch (scalar_tag) {
- .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
- .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
+ .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0),
+ .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0),
else => unreachable,
};
- const zero_val = if (is_vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ const zero_val = if (is_vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = resolved_type.toIntern(),
.storage = .{ .repeated_elem = scalar_zero.toIntern() },
- } }))) else scalar_zero;
+ } })) else scalar_zero;
return Air.internedToRef(zero_val.toIntern());
}
} else if (lhs_scalar_ty.isSignedInt(mod)) {
@@ -15740,18 +15878,18 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
- if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) {
+ if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) {
return sema.failWithDivideByZero(block, rhs_src);
}
- if (!(try rhs_val.compareAllWithZeroSema(.gte, mod))) {
+ if (!(try rhs_val.compareAllWithZeroSema(.gte, pt))) {
return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
}
if (maybe_lhs_val) |lhs_val| {
const rem_result = try sema.intRem(resolved_type, lhs_val, rhs_val);
// If this answer could possibly be different by doing `intMod`,
// we must emit a compile error. Otherwise, it's OK.
- if (!(try lhs_val.compareAllWithZeroSema(.gte, mod)) and
- !(try rem_result.compareAllWithZeroSema(.eq, mod)))
+ if (!(try lhs_val.compareAllWithZeroSema(.gte, pt)) and
+ !(try rem_result.compareAllWithZeroSema(.eq, pt)))
{
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
}
@@ -15769,17 +15907,17 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
- if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) {
+ if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) {
return sema.failWithDivideByZero(block, rhs_src);
}
- if (!(try rhs_val.compareAllWithZeroSema(.gte, mod))) {
+ if (!(try rhs_val.compareAllWithZeroSema(.gte, pt))) {
return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
}
if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroSema(.gte, mod))) {
+ if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroSema(.gte, pt))) {
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
}
- return Air.internedToRef((try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod)).toIntern());
+ return Air.internedToRef((try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, pt)).toIntern());
} else {
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
}
@@ -15804,31 +15942,32 @@ fn intRem(
lhs: Value,
rhs: Value,
) CompileError!Value {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod));
const scalar_ty = ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
scalar.* = (try sema.intRemScalar(lhs_elem, rhs_elem, scalar_ty)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
return sema.intRemScalar(lhs, rhs, ty);
}
fn intRemScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) CompileError!Value {
- const mod = sema.mod;
+ const pt = sema.pt;
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema);
- const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema);
+ const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, pt, .sema);
+ const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, pt, .sema);
const limbs_q = try sema.arena.alloc(
math.big.Limb,
lhs_bigint.limbs.len,
@@ -15846,11 +15985,12 @@ fn intRemScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) CompileErr
var result_q = math.big.int.Mutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
var result_r = math.big.int.Mutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
- return mod.intValue_big(scalar_ty, result_r.toConst());
+ return pt.intValue_big(scalar_ty, result_r.toConst());
}
fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.src(.{ .node_offset_bin_op = inst_data.src_node });
const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -15904,11 +16044,11 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
- if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) {
+ if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) {
return sema.failWithDivideByZero(block, rhs_src);
}
if (maybe_lhs_val) |lhs_val| {
- return Air.internedToRef((try lhs_val.intMod(rhs_val, resolved_type, sema.arena, mod)).toIntern());
+ return Air.internedToRef((try lhs_val.intMod(rhs_val, resolved_type, sema.arena, pt)).toIntern());
}
break :rs lhs_src;
} else {
@@ -15920,16 +16060,16 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
- if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) {
+ if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) {
return sema.failWithDivideByZero(block, rhs_src);
}
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
- return Air.internedToRef((try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, mod)).toIntern());
+ return Air.internedToRef((try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, pt)).toIntern());
} else break :rs rhs_src;
} else break :rs lhs_src;
};
@@ -15945,7 +16085,8 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
}
fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.src(.{ .node_offset_bin_op = inst_data.src_node });
const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -15999,7 +16140,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
- if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) {
+ if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) {
return sema.failWithDivideByZero(block, rhs_src);
}
if (maybe_lhs_val) |lhs_val| {
@@ -16015,16 +16156,16 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
- if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) {
+ if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) {
return sema.failWithDivideByZero(block, rhs_src);
}
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
- return Air.internedToRef((try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod)).toIntern());
+ return Air.internedToRef((try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, pt)).toIntern());
} else break :rs rhs_src;
} else break :rs lhs_src;
};
@@ -16059,7 +16200,8 @@ fn zirOverflowArithmetic(
const lhs_ty = sema.typeOf(uncasted_lhs);
const rhs_ty = sema.typeOf(uncasted_rhs);
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
@@ -16081,7 +16223,7 @@ fn zirOverflowArithmetic(
const rhs = try sema.coerce(block, rhs_dest_ty, uncasted_rhs, rhs_src);
if (dest_ty.scalarType(mod).zigTypeTag(mod) != .Int) {
- return sema.fail(block, src, "expected vector of integers or integer tag type, found '{}'", .{dest_ty.fmt(mod)});
+ return sema.fail(block, src, "expected vector of integers or integer tag type, found '{}'", .{dest_ty.fmt(pt)});
}
const maybe_lhs_val = try sema.resolveValue(lhs);
@@ -16095,19 +16237,19 @@ fn zirOverflowArithmetic(
wrapped: Value = Value.@"unreachable",
overflow_bit: Value,
} = result: {
- const zero_bit = try mod.intValue(Type.u1, 0);
+ const zero_bit = try pt.intValue(Type.u1, 0);
switch (zir_tag) {
.add_with_overflow => {
// If either of the arguments is zero, `false` is returned and the other is stored
// to the result, even if it is undefined..
// Otherwise, if either of the argument is undefined, undefined is returned.
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) {
+ if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) {
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs };
}
}
if (maybe_rhs_val) |rhs_val| {
- if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, mod))) {
+ if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, pt))) {
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
}
}
@@ -16128,7 +16270,7 @@ fn zirOverflowArithmetic(
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
- } else if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
+ } else if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
} else if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
@@ -16144,10 +16286,10 @@ fn zirOverflowArithmetic(
// If either of the arguments is zero, the result is zero and no overflow occured.
// If either of the arguments is one, the result is the other and no overflow occured.
// Otherwise, if either of the arguments is undefined, both results are undefined.
- const scalar_one = try mod.intValue(dest_ty.scalarType(mod), 1);
+ const scalar_one = try pt.intValue(dest_ty.scalarType(mod), 1);
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod)) {
- if (try lhs_val.compareAllWithZeroSema(.eq, mod)) {
+ if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
} else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) {
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs };
@@ -16157,7 +16299,7 @@ fn zirOverflowArithmetic(
if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef(mod)) {
- if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
+ if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs };
} else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) {
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
@@ -16171,7 +16313,7 @@ fn zirOverflowArithmetic(
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
}
- const result = try lhs_val.intMulWithOverflow(rhs_val, dest_ty, sema.arena, mod);
+ const result = try lhs_val.intMulWithOverflow(rhs_val, dest_ty, sema.arena, pt);
break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result };
}
}
@@ -16181,12 +16323,12 @@ fn zirOverflowArithmetic(
// If rhs is zero, the result is lhs (even if undefined) and no overflow occurred.
// Oterhwise if either of the arguments is undefined, both results are undefined.
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) {
+ if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) {
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
}
}
if (maybe_rhs_val) |rhs_val| {
- if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, mod))) {
+ if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, pt))) {
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
}
}
@@ -16196,7 +16338,7 @@ fn zirOverflowArithmetic(
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
}
- const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, mod);
+ const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, pt);
break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result };
}
}
@@ -16235,7 +16377,7 @@ fn zirOverflowArithmetic(
}
if (result.inst == .none) {
- return Air.internedToRef((try mod.intern(.{ .aggregate = .{
+ return Air.internedToRef((try pt.intern(.{ .aggregate = .{
.ty = tuple_ty.toIntern(),
.storage = .{ .elems = &.{
result.wrapped.toIntern(),
@@ -16251,9 +16393,10 @@ fn zirOverflowArithmetic(
}
fn splat(sema: *Sema, ty: Type, val: Value) !Value {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) != .Vector) return val;
- const repeated = try mod.intern(.{ .aggregate = .{
+ const repeated = try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .repeated_elem = val.toIntern() },
} });
@@ -16261,16 +16404,17 @@ fn splat(sema: *Sema, ty: Type, val: Value) !Value {
}
fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
- const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try mod.vectorType(.{
+ const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try pt.vectorType(.{
.len = ty.vectorLen(mod),
.child = .u1_type,
}) else Type.u1;
const types = [2]InternPool.Index{ ty.toIntern(), ov_ty.toIntern() };
const values = [2]InternPool.Index{ .none, .none };
- const tuple_ty = try ip.getAnonStructType(mod.gpa, .{
+ const tuple_ty = try ip.getAnonStructType(mod.gpa, pt.tid, .{
.types = &types,
.values = &values,
.names = &.{},
@@ -16290,7 +16434,8 @@ fn analyzeArithmetic(
rhs_src: LazySrcLoc,
want_safety: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
@@ -16337,7 +16482,7 @@ fn analyzeArithmetic(
// overflow (max_int), causing illegal behavior.
// For floats: either operand being undef makes the result undef.
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) {
+ if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) {
return casted_rhs;
}
}
@@ -16346,10 +16491,10 @@ fn analyzeArithmetic(
if (is_int) {
return sema.failWithUseOfUndef(block, rhs_src);
} else {
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
}
- if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
+ if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
return casted_lhs;
}
}
@@ -16359,7 +16504,7 @@ fn analyzeArithmetic(
if (is_int) {
return sema.failWithUseOfUndef(block, lhs_src);
} else {
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
}
if (maybe_rhs_val) |rhs_val| {
@@ -16371,7 +16516,7 @@ fn analyzeArithmetic(
}
return Air.internedToRef(sum.toIntern());
} else {
- return Air.internedToRef((try Value.floatAdd(lhs_val, rhs_val, resolved_type, sema.arena, mod)).toIntern());
+ return Air.internedToRef((try Value.floatAdd(lhs_val, rhs_val, resolved_type, sema.arena, pt)).toIntern());
}
} else break :rs .{ rhs_src, air_tag, .add_safe };
} else break :rs .{ lhs_src, air_tag, .add_safe };
@@ -16381,15 +16526,15 @@ fn analyzeArithmetic(
// If either of the operands are zero, the other operand is returned.
// If either of the operands are undefined, the result is undefined.
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) {
+ if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) {
return casted_rhs;
}
}
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
- if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
+ if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
return casted_lhs;
}
if (maybe_lhs_val) |lhs_val| {
@@ -16402,26 +16547,26 @@ fn analyzeArithmetic(
// If either of the operands are zero, then the other operand is returned.
// If either of the operands are undefined, the result is undefined.
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) {
+ if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) {
return casted_rhs;
}
}
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
- if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
+ if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
return casted_lhs;
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
const val = if (scalar_tag == .ComptimeInt)
try sema.intAdd(lhs_val, rhs_val, resolved_type, undefined)
else
- try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, mod);
+ try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, pt);
return Air.internedToRef(val.toIntern());
} else break :rs .{
@@ -16448,10 +16593,10 @@ fn analyzeArithmetic(
if (is_int) {
return sema.failWithUseOfUndef(block, rhs_src);
} else {
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
}
- if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
+ if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
return casted_lhs;
}
}
@@ -16461,7 +16606,7 @@ fn analyzeArithmetic(
if (is_int) {
return sema.failWithUseOfUndef(block, lhs_src);
} else {
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
}
if (maybe_rhs_val) |rhs_val| {
@@ -16473,7 +16618,7 @@ fn analyzeArithmetic(
}
return Air.internedToRef(diff.toIntern());
} else {
- return Air.internedToRef((try Value.floatSub(lhs_val, rhs_val, resolved_type, sema.arena, mod)).toIntern());
+ return Air.internedToRef((try Value.floatSub(lhs_val, rhs_val, resolved_type, sema.arena, pt)).toIntern());
}
} else break :rs .{ rhs_src, air_tag, .sub_safe };
} else break :rs .{ lhs_src, air_tag, .sub_safe };
@@ -16484,15 +16629,15 @@ fn analyzeArithmetic(
// If either of the operands are undefined, the result is undefined.
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
- if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
+ if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
return casted_lhs;
}
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
return Air.internedToRef((try sema.numberSubWrapScalar(lhs_val, rhs_val, resolved_type)).toIntern());
@@ -16505,21 +16650,21 @@ fn analyzeArithmetic(
// If either of the operands are undefined, the result is undefined.
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
- if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
+ if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
return casted_lhs;
}
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
const val = if (scalar_tag == .ComptimeInt)
try sema.intSub(lhs_val, rhs_val, resolved_type, undefined)
else
- try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, mod);
+ try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, pt);
return Air.internedToRef(val.toIntern());
} else break :rs .{ rhs_src, .sub_sat, .sub_sat };
@@ -16540,13 +16685,13 @@ fn analyzeArithmetic(
// the result is nan.
// If either of the operands are nan, the result is nan.
const scalar_zero = switch (scalar_tag) {
- .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0),
- .ComptimeInt, .Int => try mod.intValue(scalar_type, 0),
+ .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 0.0),
+ .ComptimeInt, .Int => try pt.intValue(scalar_type, 0),
else => unreachable,
};
const scalar_one = switch (scalar_tag) {
- .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0),
- .ComptimeInt, .Int => try mod.intValue(scalar_type, 1),
+ .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 1.0),
+ .ComptimeInt, .Int => try pt.intValue(scalar_type, 1),
else => unreachable,
};
if (maybe_lhs_val) |lhs_val| {
@@ -16554,13 +16699,13 @@ fn analyzeArithmetic(
if (lhs_val.isNan(mod)) {
return Air.internedToRef(lhs_val.toIntern());
}
- if (try lhs_val.compareAllWithZeroSema(.eq, mod)) lz: {
+ if (try lhs_val.compareAllWithZeroSema(.eq, pt)) lz: {
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isNan(mod)) {
return Air.internedToRef(rhs_val.toIntern());
}
if (rhs_val.isInf(mod)) {
- return Air.internedToRef((try mod.floatValue(resolved_type, std.math.nan(f128))).toIntern());
+ return Air.internedToRef((try pt.floatValue(resolved_type, std.math.nan(f128))).toIntern());
}
} else if (resolved_type.isAnyFloat()) {
break :lz;
@@ -16579,16 +16724,16 @@ fn analyzeArithmetic(
if (is_int) {
return sema.failWithUseOfUndef(block, rhs_src);
} else {
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
}
if (rhs_val.isNan(mod)) {
return Air.internedToRef(rhs_val.toIntern());
}
- if (try rhs_val.compareAllWithZeroSema(.eq, mod)) rz: {
+ if (try rhs_val.compareAllWithZeroSema(.eq, pt)) rz: {
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isInf(mod)) {
- return Air.internedToRef((try mod.floatValue(resolved_type, std.math.nan(f128))).toIntern());
+ return Air.internedToRef((try pt.floatValue(resolved_type, std.math.nan(f128))).toIntern());
}
} else if (resolved_type.isAnyFloat()) {
break :rz;
@@ -16604,18 +16749,18 @@ fn analyzeArithmetic(
if (is_int) {
return sema.failWithUseOfUndef(block, lhs_src);
} else {
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
}
if (is_int) {
var overflow_idx: ?usize = null;
- const product = try lhs_val.intMul(rhs_val, resolved_type, &overflow_idx, sema.arena, mod);
+ const product = try lhs_val.intMul(rhs_val, resolved_type, &overflow_idx, sema.arena, pt);
if (overflow_idx) |vec_idx| {
return sema.failWithIntegerOverflow(block, src, resolved_type, product, vec_idx);
}
return Air.internedToRef(product.toIntern());
} else {
- return Air.internedToRef((try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, mod)).toIntern());
+ return Air.internedToRef((try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, pt)).toIntern());
}
} else break :rs .{ lhs_src, air_tag, .mul_safe };
} else break :rs .{ rhs_src, air_tag, .mul_safe };
@@ -16626,18 +16771,18 @@ fn analyzeArithmetic(
// If either of the operands are one, result is the other operand.
// If either of the operands are undefined, result is undefined.
const scalar_zero = switch (scalar_tag) {
- .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0),
- .ComptimeInt, .Int => try mod.intValue(scalar_type, 0),
+ .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 0.0),
+ .ComptimeInt, .Int => try pt.intValue(scalar_type, 0),
else => unreachable,
};
const scalar_one = switch (scalar_tag) {
- .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0),
- .ComptimeInt, .Int => try mod.intValue(scalar_type, 1),
+ .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 1.0),
+ .ComptimeInt, .Int => try pt.intValue(scalar_type, 1),
else => unreachable,
};
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod)) {
- if (try lhs_val.compareAllWithZeroSema(.eq, mod)) {
+ if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
const zero_val = try sema.splat(resolved_type, scalar_zero);
return Air.internedToRef(zero_val.toIntern());
}
@@ -16648,9 +16793,9 @@ fn analyzeArithmetic(
}
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
- if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
+ if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
const zero_val = try sema.splat(resolved_type, scalar_zero);
return Air.internedToRef(zero_val.toIntern());
}
@@ -16659,9 +16804,9 @@ fn analyzeArithmetic(
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
- return Air.internedToRef((try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, mod)).toIntern());
+ return Air.internedToRef((try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, pt)).toIntern());
} else break :rs .{ lhs_src, .mul_wrap, .mul_wrap };
} else break :rs .{ rhs_src, .mul_wrap, .mul_wrap };
},
@@ -16671,18 +16816,18 @@ fn analyzeArithmetic(
// If either of the operands are one, result is the other operand.
// If either of the operands are undefined, result is undefined.
const scalar_zero = switch (scalar_tag) {
- .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0),
- .ComptimeInt, .Int => try mod.intValue(scalar_type, 0),
+ .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 0.0),
+ .ComptimeInt, .Int => try pt.intValue(scalar_type, 0),
else => unreachable,
};
const scalar_one = switch (scalar_tag) {
- .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0),
- .ComptimeInt, .Int => try mod.intValue(scalar_type, 1),
+ .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 1.0),
+ .ComptimeInt, .Int => try pt.intValue(scalar_type, 1),
else => unreachable,
};
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod)) {
- if (try lhs_val.compareAllWithZeroSema(.eq, mod)) {
+ if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
const zero_val = try sema.splat(resolved_type, scalar_zero);
return Air.internedToRef(zero_val.toIntern());
}
@@ -16693,9 +16838,9 @@ fn analyzeArithmetic(
}
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
- if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
+ if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
const zero_val = try sema.splat(resolved_type, scalar_zero);
return Air.internedToRef(zero_val.toIntern());
}
@@ -16704,13 +16849,13 @@ fn analyzeArithmetic(
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
- return mod.undefRef(resolved_type);
+ return pt.undefRef(resolved_type);
}
const val = if (scalar_tag == .ComptimeInt)
- try lhs_val.intMul(rhs_val, resolved_type, undefined, sema.arena, mod)
+ try lhs_val.intMul(rhs_val, resolved_type, undefined, sema.arena, pt)
else
- try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, mod);
+ try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, pt);
return Air.internedToRef(val.toIntern());
} else break :rs .{ lhs_src, .mul_sat, .mul_sat };
@@ -16758,7 +16903,7 @@ fn analyzeArithmetic(
})
else
ov_bit;
- const zero_ov = Air.internedToRef((try mod.intValue(Type.u1, 0)).toIntern());
+ const zero_ov = Air.internedToRef((try pt.intValue(Type.u1, 0)).toIntern());
const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov);
try sema.addSafetyCheck(block, src, no_ov, .integer_overflow);
@@ -16782,7 +16927,8 @@ fn analyzePtrArithmetic(
// TODO if the operand is comptime-known to be negative, or is a negative int,
// coerce to isize instead of usize.
const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src);
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const opt_ptr_val = try sema.resolveValue(ptr);
const opt_off_val = try sema.resolveDefinedValue(block, offset_src, offset);
const ptr_ty = sema.typeOf(ptr);
@@ -16800,7 +16946,7 @@ fn analyzePtrArithmetic(
// it being a multiple of the type size.
const elem_size = try sema.typeAbiSize(Type.fromInterned(ptr_info.child));
const addend = if (opt_off_val) |off_val| a: {
- const off_int = try sema.usizeCast(block, offset_src, try off_val.toUnsignedIntSema(mod));
+ const off_int = try sema.usizeCast(block, offset_src, try off_val.toUnsignedIntSema(pt));
break :a elem_size * off_int;
} else elem_size;
@@ -16813,7 +16959,7 @@ fn analyzePtrArithmetic(
));
assert(new_align != .none);
- break :t try mod.ptrTypeSema(.{
+ break :t try pt.ptrTypeSema(.{
.child = ptr_info.child,
.sentinel = ptr_info.sentinel,
.flags = .{
@@ -16830,16 +16976,16 @@ fn analyzePtrArithmetic(
const runtime_src = rs: {
if (opt_ptr_val) |ptr_val| {
if (opt_off_val) |offset_val| {
- if (ptr_val.isUndef(mod)) return mod.undefRef(new_ptr_ty);
+ if (ptr_val.isUndef(mod)) return pt.undefRef(new_ptr_ty);
- const offset_int = try sema.usizeCast(block, offset_src, try offset_val.toUnsignedIntSema(mod));
+ const offset_int = try sema.usizeCast(block, offset_src, try offset_val.toUnsignedIntSema(pt));
if (offset_int == 0) return ptr;
if (air_tag == .ptr_sub) {
const elem_size = try sema.typeAbiSize(Type.fromInterned(ptr_info.child));
const new_ptr_val = try sema.ptrSubtract(block, op_src, ptr_val, offset_int * elem_size, new_ptr_ty);
return Air.internedToRef(new_ptr_val.toIntern());
} else {
- const new_ptr_val = try mod.getCoerced(try ptr_val.ptrElem(offset_int, mod), new_ptr_ty);
+ const new_ptr_val = try pt.getCoerced(try ptr_val.ptrElem(offset_int, pt), new_ptr_ty);
return Air.internedToRef(new_ptr_val.toIntern());
}
} else break :rs offset_src;
@@ -16879,6 +17025,8 @@ fn zirAsm(
const tracy = trace(@src());
defer tracy.end();
+ const pt = sema.pt;
+ const mod = pt.zcu;
const extra = sema.code.extraData(Zir.Inst.Asm, extended.operand);
const src = block.nodeOffset(extra.data.src_node);
const ret_ty_src = block.src(.{ .node_offset_asm_ret_ty = extra.data.src_node });
@@ -16910,7 +17058,7 @@ fn zirAsm(
if (is_volatile) {
return sema.fail(block, src, "volatile keyword is redundant on module-level assembly", .{});
}
- try sema.mod.addGlobalAssembly(sema.owner_decl_index, asm_source);
+ try mod.addGlobalAssembly(sema.owner_decl_index, asm_source);
return .void_value;
}
@@ -16959,7 +17107,6 @@ fn zirAsm(
const args = try sema.arena.alloc(Air.Inst.Ref, inputs_len);
const inputs = try sema.arena.alloc(ConstraintName, inputs_len);
- const mod = sema.mod;
for (args, 0..) |*arg, arg_i| {
const input = sema.code.extraData(Zir.Inst.Asm.Input, extra_i);
@@ -17049,7 +17196,8 @@ fn zirCmpEq(
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src: LazySrcLoc = block.nodeOffset(inst_data.src_node);
@@ -17077,7 +17225,7 @@ fn zirCmpEq(
if (lhs_ty_tag == .Null or rhs_ty_tag == .Null) {
const non_null_type = if (lhs_ty_tag == .Null) rhs_ty else lhs_ty;
- return sema.fail(block, src, "comparison of '{}' with null", .{non_null_type.fmt(mod)});
+ return sema.fail(block, src, "comparison of '{}' with null", .{non_null_type.fmt(pt)});
}
if (lhs_ty_tag == .Union and (rhs_ty_tag == .EnumLiteral or rhs_ty_tag == .Enum)) {
@@ -17092,7 +17240,7 @@ fn zirCmpEq(
if (try sema.resolveValue(lhs)) |lval| {
if (try sema.resolveValue(rhs)) |rval| {
if (lval.isUndef(mod) or rval.isUndef(mod)) {
- return mod.undefRef(Type.bool);
+ return pt.undefRef(Type.bool);
}
const lkey = mod.intern_pool.indexToKey(lval.toIntern());
const rkey = mod.intern_pool.indexToKey(rval.toIntern());
@@ -17128,14 +17276,15 @@ fn analyzeCmpUnionTag(
tag_src: LazySrcLoc,
op: std.math.CompareOperator,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const union_ty = sema.typeOf(un);
- try union_ty.resolveFields(mod);
+ try union_ty.resolveFields(pt);
const union_tag_ty = union_ty.unionTagType(mod) orelse {
const msg = msg: {
const msg = try sema.errMsg(un_src, "comparison of union and enum literal is only valid for tagged union types", .{});
errdefer msg.destroy(sema.gpa);
- try sema.errNote(union_ty.srcLoc(mod), msg, "union '{}' is not a tagged union", .{union_ty.fmt(mod)});
+ try sema.errNote(union_ty.srcLoc(mod), msg, "union '{}' is not a tagged union", .{union_ty.fmt(pt)});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
@@ -17146,7 +17295,7 @@ fn analyzeCmpUnionTag(
const coerced_union = try sema.coerce(block, union_tag_ty, un, un_src);
if (try sema.resolveValue(coerced_tag)) |enum_val| {
- if (enum_val.isUndef(mod)) return mod.undefRef(Type.bool);
+ if (enum_val.isUndef(mod)) return pt.undefRef(Type.bool);
const field_ty = union_ty.unionFieldType(enum_val, mod).?;
if (field_ty.zigTypeTag(mod) == .NoReturn) {
return .bool_false;
@@ -17187,7 +17336,8 @@ fn analyzeCmp(
rhs_src: LazySrcLoc,
is_equality_cmp: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
if (lhs_ty.zigTypeTag(mod) != .Optional and rhs_ty.zigTypeTag(mod) != .Optional) {
@@ -17215,7 +17365,7 @@ fn analyzeCmp(
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } });
if (!resolved_type.isSelfComparable(mod, is_equality_cmp)) {
return sema.fail(block, src, "operator {s} not allowed for type '{}'", .{
- compareOperatorName(op), resolved_type.fmt(mod),
+ compareOperatorName(op), resolved_type.fmt(pt),
});
}
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
@@ -17244,13 +17394,14 @@ fn cmpSelf(
lhs_src: LazySrcLoc,
rhs_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const resolved_type = sema.typeOf(casted_lhs);
const runtime_src: LazySrcLoc = src: {
if (try sema.resolveValue(casted_lhs)) |lhs_val| {
- if (lhs_val.isUndef(mod)) return mod.undefRef(Type.bool);
+ if (lhs_val.isUndef(mod)) return pt.undefRef(Type.bool);
if (try sema.resolveValue(casted_rhs)) |rhs_val| {
- if (rhs_val.isUndef(mod)) return mod.undefRef(Type.bool);
+ if (rhs_val.isUndef(mod)) return pt.undefRef(Type.bool);
if (resolved_type.zigTypeTag(mod) == .Vector) {
const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_type);
@@ -17273,7 +17424,7 @@ fn cmpSelf(
// bool eq/neq more efficiently.
if (resolved_type.zigTypeTag(mod) == .Bool) {
if (try sema.resolveValue(casted_rhs)) |rhs_val| {
- if (rhs_val.isUndef(mod)) return mod.undefRef(Type.bool);
+ if (rhs_val.isUndef(mod)) return pt.undefRef(Type.bool);
return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(), lhs_src);
}
}
@@ -17310,24 +17461,24 @@ fn runtimeBoolCmp(
}
fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
const ty = try sema.resolveType(block, operand_src, inst_data.operand);
- switch (ty.zigTypeTag(mod)) {
+ switch (ty.zigTypeTag(pt.zcu)) {
.Fn,
.NoReturn,
.Undefined,
.Null,
.Opaque,
- => return sema.fail(block, operand_src, "no size available for type '{}'", .{ty.fmt(mod)}),
+ => return sema.fail(block, operand_src, "no size available for type '{}'", .{ty.fmt(pt)}),
.Type,
.EnumLiteral,
.ComptimeFloat,
.ComptimeInt,
.Void,
- => return mod.intRef(Type.comptime_int, 0),
+ => return pt.intRef(Type.comptime_int, 0),
.Bool,
.Int,
@@ -17345,12 +17496,13 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
.AnyFrame,
=> {},
}
- const val = try ty.lazyAbiSize(mod);
+ const val = try ty.lazyAbiSize(pt);
return Air.internedToRef(val.toIntern());
}
fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand);
@@ -17360,14 +17512,14 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
.Undefined,
.Null,
.Opaque,
- => return sema.fail(block, operand_src, "no size available for type '{}'", .{operand_ty.fmt(mod)}),
+ => return sema.fail(block, operand_src, "no size available for type '{}'", .{operand_ty.fmt(pt)}),
.Type,
.EnumLiteral,
.ComptimeFloat,
.ComptimeInt,
.Void,
- => return mod.intRef(Type.comptime_int, 0),
+ => return pt.intRef(Type.comptime_int, 0),
.Bool,
.Int,
@@ -17385,8 +17537,8 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
.AnyFrame,
=> {},
}
- const bit_size = try operand_ty.bitSizeAdvanced(mod, .sema);
- return mod.intRef(Type.comptime_int, bit_size);
+ const bit_size = try operand_ty.bitSizeAdvanced(pt, .sema);
+ return pt.intRef(Type.comptime_int, bit_size);
}
fn zirThis(
@@ -17394,14 +17546,16 @@ fn zirThis(
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const this_decl_index = mod.namespacePtr(block.namespace).decl_index;
const src = block.nodeOffset(@bitCast(extended.operand));
return sema.analyzeDeclVal(block, src, this_decl_index);
}
fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const captures = mod.namespacePtr(block.namespace).getType(mod).getCaptures(mod);
@@ -17489,7 +17643,7 @@ fn zirRetAddr(
_ = extended;
if (block.is_comptime) {
// TODO: we could give a meaningful lazy value here. #14938
- return sema.mod.intRef(Type.usize, 0);
+ return sema.pt.intRef(Type.usize, 0);
} else {
return block.addNoOp(.ret_addr);
}
@@ -17514,7 +17668,8 @@ fn zirBuiltinSrc(
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const extra = sema.code.extraData(Zir.Inst.Src, extended.operand).data;
const fn_owner_decl = mod.funcOwnerDeclPtr(sema.func_index);
const ip = &mod.intern_pool;
@@ -17522,43 +17677,43 @@ fn zirBuiltinSrc(
const func_name_val = v: {
const func_name_len = fn_owner_decl.name.length(ip);
- const array_ty = try ip.get(gpa, .{ .array_type = .{
+ const array_ty = try pt.intern(.{ .array_type = .{
.len = func_name_len,
.sentinel = .zero_u8,
.child = .u8_type,
} });
- break :v try ip.get(gpa, .{ .slice = .{
+ break :v try pt.intern(.{ .slice = .{
.ty = .slice_const_u8_sentinel_0_type,
- .ptr = try ip.get(gpa, .{ .ptr = .{
+ .ptr = try pt.intern(.{ .ptr = .{
.ty = .manyptr_const_u8_sentinel_0_type,
.base_addr = .{ .anon_decl = .{
.orig_ty = .slice_const_u8_sentinel_0_type,
- .val = try ip.get(gpa, .{ .aggregate = .{
+ .val = try pt.intern(.{ .aggregate = .{
.ty = array_ty,
.storage = .{ .bytes = fn_owner_decl.name.toString() },
} }),
} },
.byte_offset = 0,
} }),
- .len = (try mod.intValue(Type.usize, func_name_len)).toIntern(),
+ .len = (try pt.intValue(Type.usize, func_name_len)).toIntern(),
} });
};
const file_name_val = v: {
// The compiler must not call realpath anywhere.
const file_name = try fn_owner_decl.getFileScope(mod).fullPath(sema.arena);
- const array_ty = try ip.get(gpa, .{ .array_type = .{
+ const array_ty = try pt.intern(.{ .array_type = .{
.len = file_name.len,
.sentinel = .zero_u8,
.child = .u8_type,
} });
- break :v try ip.get(gpa, .{ .slice = .{
+ break :v try pt.intern(.{ .slice = .{
.ty = .slice_const_u8_sentinel_0_type,
- .ptr = try ip.get(gpa, .{ .ptr = .{
+ .ptr = try pt.intern(.{ .ptr = .{
.ty = .manyptr_const_u8_sentinel_0_type,
.base_addr = .{ .anon_decl = .{
.orig_ty = .slice_const_u8_sentinel_0_type,
- .val = try ip.get(gpa, .{ .aggregate = .{
+ .val = try pt.intern(.{ .aggregate = .{
.ty = array_ty,
.storage = .{
.bytes = try ip.getOrPutString(gpa, file_name, .maybe_embedded_nulls),
@@ -17567,35 +17722,36 @@ fn zirBuiltinSrc(
} },
.byte_offset = 0,
} }),
- .len = (try mod.intValue(Type.usize, file_name.len)).toIntern(),
+ .len = (try pt.intValue(Type.usize, file_name.len)).toIntern(),
} });
};
- const src_loc_ty = try mod.getBuiltinType("SourceLocation");
+ const src_loc_ty = try pt.getBuiltinType("SourceLocation");
const fields = .{
// file: [:0]const u8,
file_name_val,
// fn_name: [:0]const u8,
func_name_val,
// line: u32,
- (try mod.intValue(Type.u32, extra.line + 1)).toIntern(),
+ (try pt.intValue(Type.u32, extra.line + 1)).toIntern(),
// column: u32,
- (try mod.intValue(Type.u32, extra.column + 1)).toIntern(),
+ (try pt.intValue(Type.u32, extra.column + 1)).toIntern(),
};
- return Air.internedToRef((try mod.intern(.{ .aggregate = .{
+ return Air.internedToRef((try pt.intern(.{ .aggregate = .{
.ty = src_loc_ty.toIntern(),
.storage = .{ .elems = &fields },
} })));
}
fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(inst_data.src_node);
const ty = try sema.resolveType(block, src, inst_data.operand);
- const type_info_ty = try mod.getBuiltinType("Type");
+ const type_info_ty = try pt.getBuiltinType("Type");
const type_info_tag_ty = type_info_ty.unionTagType(mod).?;
if (ty.typeDeclInst(mod)) |type_decl_inst| {
@@ -17612,9 +17768,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.Undefined,
.Null,
.EnumLiteral,
- => |type_info_tag| return Air.internedToRef((try mod.intern(.{ .un = .{
+ => |type_info_tag| return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(type_info_tag))).toIntern(),
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(type_info_tag))).toIntern(),
.val = .void_value,
} }))),
.Fn => {
@@ -17643,8 +17799,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
for (param_vals, 0..) |*param_val, i| {
const param_ty = func_ty_info.param_types.get(ip)[i];
const is_generic = param_ty == .generic_poison_type;
- const param_ty_val = try ip.get(gpa, .{ .opt = .{
- .ty = try ip.get(gpa, .{ .opt_type = .type_type }),
+ const param_ty_val = try pt.intern(.{ .opt = .{
+ .ty = try pt.intern(.{ .opt_type = .type_type }),
.val = if (is_generic) .none else param_ty,
} });
@@ -17661,22 +17817,22 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// type: ?type,
param_ty_val,
};
- param_val.* = try mod.intern(.{ .aggregate = .{
+ param_val.* = try pt.intern(.{ .aggregate = .{
.ty = param_info_ty.toIntern(),
.storage = .{ .elems = ¶m_fields },
} });
}
const args_val = v: {
- const new_decl_ty = try mod.arrayType(.{
+ const new_decl_ty = try pt.arrayType(.{
.len = param_vals.len,
.child = param_info_ty.toIntern(),
});
- const new_decl_val = try mod.intern(.{ .aggregate = .{
+ const new_decl_val = try pt.intern(.{ .aggregate = .{
.ty = new_decl_ty.toIntern(),
.storage = .{ .elems = param_vals },
} });
- const slice_ty = (try mod.ptrTypeSema(.{
+ const slice_ty = (try pt.ptrTypeSema(.{
.child = param_info_ty.toIntern(),
.flags = .{
.size = .Slice,
@@ -17684,9 +17840,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
},
})).toIntern();
const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern();
- break :v try mod.intern(.{ .slice = .{
+ break :v try pt.intern(.{ .slice = .{
.ty = slice_ty,
- .ptr = try mod.intern(.{ .ptr = .{
+ .ptr = try pt.intern(.{ .ptr = .{
.ty = manyptr_ty,
.base_addr = .{ .anon_decl = .{
.orig_ty = manyptr_ty,
@@ -17694,23 +17850,23 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try mod.intValue(Type.usize, param_vals.len)).toIntern(),
+ .len = (try pt.intValue(Type.usize, param_vals.len)).toIntern(),
} });
};
- const ret_ty_opt = try mod.intern(.{ .opt = .{
- .ty = try ip.get(gpa, .{ .opt_type = .type_type }),
+ const ret_ty_opt = try pt.intern(.{ .opt = .{
+ .ty = try pt.intern(.{ .opt_type = .type_type }),
.val = if (func_ty_info.return_type == .generic_poison_type)
.none
else
func_ty_info.return_type,
} });
- const callconv_ty = try mod.getBuiltinType("CallingConvention");
+ const callconv_ty = try pt.getBuiltinType("CallingConvention");
const field_values = .{
// calling_convention: CallingConvention,
- (try mod.enumValueFieldIndex(callconv_ty, @intFromEnum(func_ty_info.cc))).toIntern(),
+ (try pt.enumValueFieldIndex(callconv_ty, @intFromEnum(func_ty_info.cc))).toIntern(),
// is_generic: bool,
Value.makeBool(func_ty_info.is_generic).toIntern(),
// is_var_args: bool,
@@ -17720,10 +17876,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// args: []const Fn.Param,
args_val,
};
- return Air.internedToRef((try mod.intern(.{ .un = .{
+ return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Fn))).toIntern(),
- .val = try mod.intern(.{ .aggregate = .{
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Fn))).toIntern(),
+ .val = try pt.intern(.{ .aggregate = .{
.ty = fn_info_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
@@ -17740,18 +17896,18 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const int_info_decl = mod.declPtr(int_info_decl_index);
const int_info_ty = int_info_decl.val.toType();
- const signedness_ty = try mod.getBuiltinType("Signedness");
+ const signedness_ty = try pt.getBuiltinType("Signedness");
const info = ty.intInfo(mod);
const field_values = .{
// signedness: Signedness,
- (try mod.enumValueFieldIndex(signedness_ty, @intFromEnum(info.signedness))).toIntern(),
+ (try pt.enumValueFieldIndex(signedness_ty, @intFromEnum(info.signedness))).toIntern(),
// bits: u16,
- (try mod.intValue(Type.u16, info.bits)).toIntern(),
+ (try pt.intValue(Type.u16, info.bits)).toIntern(),
};
- return Air.internedToRef((try mod.intern(.{ .un = .{
+ return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Int))).toIntern(),
- .val = try mod.intern(.{ .aggregate = .{
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Int))).toIntern(),
+ .val = try pt.intern(.{ .aggregate = .{
.ty = int_info_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
@@ -17770,12 +17926,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const field_vals = .{
// bits: u16,
- (try mod.intValue(Type.u16, ty.bitSize(mod))).toIntern(),
+ (try pt.intValue(Type.u16, ty.bitSize(pt))).toIntern(),
};
- return Air.internedToRef((try mod.intern(.{ .un = .{
+ return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Float))).toIntern(),
- .val = try mod.intern(.{ .aggregate = .{
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Float))).toIntern(),
+ .val = try pt.intern(.{ .aggregate = .{
.ty = float_info_ty.toIntern(),
.storage = .{ .elems = &field_vals },
} }),
@@ -17784,16 +17940,16 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.Pointer => {
const info = ty.ptrInfo(mod);
const alignment = if (info.flags.alignment.toByteUnits()) |alignment|
- try mod.intValue(Type.comptime_int, alignment)
+ try pt.intValue(Type.comptime_int, alignment)
else
- try Type.fromInterned(info.child).lazyAbiAlignment(mod);
+ try Type.fromInterned(info.child).lazyAbiAlignment(pt);
- const addrspace_ty = try mod.getBuiltinType("AddressSpace");
+ const addrspace_ty = try pt.getBuiltinType("AddressSpace");
const pointer_ty = t: {
const decl_index = (try sema.namespaceLookup(
block,
src,
- (try mod.getBuiltinType("Type")).getNamespaceIndex(mod),
+ (try pt.getBuiltinType("Type")).getNamespaceIndex(mod),
try ip.getOrPutString(gpa, "Pointer", .no_embedded_nulls),
)).?;
try sema.ensureDeclAnalyzed(decl_index);
@@ -17814,7 +17970,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const field_values = .{
// size: Size,
- (try mod.enumValueFieldIndex(ptr_size_ty, @intFromEnum(info.flags.size))).toIntern(),
+ (try pt.enumValueFieldIndex(ptr_size_ty, @intFromEnum(info.flags.size))).toIntern(),
// is_const: bool,
Value.makeBool(info.flags.is_const).toIntern(),
// is_volatile: bool,
@@ -17822,7 +17978,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// alignment: comptime_int,
alignment.toIntern(),
// address_space: AddressSpace
- (try mod.enumValueFieldIndex(addrspace_ty, @intFromEnum(info.flags.address_space))).toIntern(),
+ (try pt.enumValueFieldIndex(addrspace_ty, @intFromEnum(info.flags.address_space))).toIntern(),
// child: type,
info.child,
// is_allowzero: bool,
@@ -17833,10 +17989,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
else => Value.fromInterned(info.sentinel),
})).toIntern(),
};
- return Air.internedToRef((try mod.intern(.{ .un = .{
+ return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Pointer))).toIntern(),
- .val = try mod.intern(.{ .aggregate = .{
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Pointer))).toIntern(),
+ .val = try pt.intern(.{ .aggregate = .{
.ty = pointer_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
@@ -17858,16 +18014,16 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const info = ty.arrayInfo(mod);
const field_values = .{
// len: comptime_int,
- (try mod.intValue(Type.comptime_int, info.len)).toIntern(),
+ (try pt.intValue(Type.comptime_int, info.len)).toIntern(),
// child: type,
info.elem_type.toIntern(),
// sentinel: ?*const anyopaque,
(try sema.optRefValue(info.sentinel)).toIntern(),
};
- return Air.internedToRef((try mod.intern(.{ .un = .{
+ return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Array))).toIntern(),
- .val = try mod.intern(.{ .aggregate = .{
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Array))).toIntern(),
+ .val = try pt.intern(.{ .aggregate = .{
.ty = array_field_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
@@ -17889,14 +18045,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const info = ty.arrayInfo(mod);
const field_values = .{
// len: comptime_int,
- (try mod.intValue(Type.comptime_int, info.len)).toIntern(),
+ (try pt.intValue(Type.comptime_int, info.len)).toIntern(),
// child: type,
info.elem_type.toIntern(),
};
- return Air.internedToRef((try mod.intern(.{ .un = .{
+ return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Vector))).toIntern(),
- .val = try mod.intern(.{ .aggregate = .{
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Vector))).toIntern(),
+ .val = try pt.intern(.{ .aggregate = .{
.ty = vector_field_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
@@ -17919,10 +18075,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// child: type,
ty.optionalChild(mod).toIntern(),
};
- return Air.internedToRef((try mod.intern(.{ .un = .{
+ return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Optional))).toIntern(),
- .val = try mod.intern(.{ .aggregate = .{
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Optional))).toIntern(),
+ .val = try pt.intern(.{ .aggregate = .{
.ty = optional_field_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
@@ -17954,18 +18110,18 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const error_name = names.get(ip)[error_index];
const error_name_len = error_name.length(ip);
const error_name_val = v: {
- const new_decl_ty = try mod.arrayType(.{
+ const new_decl_ty = try pt.arrayType(.{
.len = error_name_len,
.sentinel = .zero_u8,
.child = .u8_type,
});
- const new_decl_val = try mod.intern(.{ .aggregate = .{
+ const new_decl_val = try pt.intern(.{ .aggregate = .{
.ty = new_decl_ty.toIntern(),
.storage = .{ .bytes = error_name.toString() },
} });
- break :v try mod.intern(.{ .slice = .{
+ break :v try pt.intern(.{ .slice = .{
.ty = .slice_const_u8_sentinel_0_type,
- .ptr = try mod.intern(.{ .ptr = .{
+ .ptr = try pt.intern(.{ .ptr = .{
.ty = .manyptr_const_u8_sentinel_0_type,
.base_addr = .{ .anon_decl = .{
.val = new_decl_val,
@@ -17973,7 +18129,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try mod.intValue(Type.usize, error_name_len)).toIntern(),
+ .len = (try pt.intValue(Type.usize, error_name_len)).toIntern(),
} });
};
@@ -17981,7 +18137,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// name: [:0]const u8,
error_name_val,
};
- field_val.* = try mod.intern(.{ .aggregate = .{
+ field_val.* = try pt.intern(.{ .aggregate = .{
.ty = error_field_ty.toIntern(),
.storage = .{ .elems = &error_field_fields },
} });
@@ -17992,27 +18148,27 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
};
// Build our ?[]const Error value
- const slice_errors_ty = try mod.ptrTypeSema(.{
+ const slice_errors_ty = try pt.ptrTypeSema(.{
.child = error_field_ty.toIntern(),
.flags = .{
.size = .Slice,
.is_const = true,
},
});
- const opt_slice_errors_ty = try mod.optionalType(slice_errors_ty.toIntern());
+ const opt_slice_errors_ty = try pt.optionalType(slice_errors_ty.toIntern());
const errors_payload_val: InternPool.Index = if (error_field_vals) |vals| v: {
- const array_errors_ty = try mod.arrayType(.{
+ const array_errors_ty = try pt.arrayType(.{
.len = vals.len,
.child = error_field_ty.toIntern(),
});
- const new_decl_val = try mod.intern(.{ .aggregate = .{
+ const new_decl_val = try pt.intern(.{ .aggregate = .{
.ty = array_errors_ty.toIntern(),
.storage = .{ .elems = vals },
} });
const manyptr_errors_ty = slice_errors_ty.slicePtrFieldType(mod).toIntern();
- break :v try mod.intern(.{ .slice = .{
+ break :v try pt.intern(.{ .slice = .{
.ty = slice_errors_ty.toIntern(),
- .ptr = try mod.intern(.{ .ptr = .{
+ .ptr = try pt.intern(.{ .ptr = .{
.ty = manyptr_errors_ty,
.base_addr = .{ .anon_decl = .{
.orig_ty = manyptr_errors_ty,
@@ -18020,18 +18176,18 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try mod.intValue(Type.usize, vals.len)).toIntern(),
+ .len = (try pt.intValue(Type.usize, vals.len)).toIntern(),
} });
} else .none;
- const errors_val = try mod.intern(.{ .opt = .{
+ const errors_val = try pt.intern(.{ .opt = .{
.ty = opt_slice_errors_ty.toIntern(),
.val = errors_payload_val,
} });
// Construct Type{ .ErrorSet = errors_val }
- return Air.internedToRef((try mod.intern(.{ .un = .{
+ return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.ErrorSet))).toIntern(),
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.ErrorSet))).toIntern(),
.val = errors_val,
} })));
},
@@ -18054,10 +18210,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// payload: type,
ty.errorUnionPayload(mod).toIntern(),
};
- return Air.internedToRef((try mod.intern(.{ .un = .{
+ return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.ErrorUnion))).toIntern(),
- .val = try mod.intern(.{ .aggregate = .{
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.ErrorUnion))).toIntern(),
+ .val = try pt.intern(.{ .aggregate = .{
.ty = error_union_field_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
@@ -18082,30 +18238,31 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
for (enum_field_vals, 0..) |*field_val, tag_index| {
const enum_type = ip.loadEnumType(ty.toIntern());
const value_val = if (enum_type.values.len > 0)
- try mod.intern_pool.getCoercedInts(
+ try ip.getCoercedInts(
mod.gpa,
- mod.intern_pool.indexToKey(enum_type.values.get(ip)[tag_index]).int,
+ pt.tid,
+ ip.indexToKey(enum_type.values.get(ip)[tag_index]).int,
.comptime_int_type,
)
else
- (try mod.intValue(Type.comptime_int, tag_index)).toIntern();
+ (try pt.intValue(Type.comptime_int, tag_index)).toIntern();
// TODO: write something like getCoercedInts to avoid needing to dupe
const name_val = v: {
const tag_name = enum_type.names.get(ip)[tag_index];
const tag_name_len = tag_name.length(ip);
- const new_decl_ty = try mod.arrayType(.{
+ const new_decl_ty = try pt.arrayType(.{
.len = tag_name_len,
.sentinel = .zero_u8,
.child = .u8_type,
});
- const new_decl_val = try mod.intern(.{ .aggregate = .{
+ const new_decl_val = try pt.intern(.{ .aggregate = .{
.ty = new_decl_ty.toIntern(),
.storage = .{ .bytes = tag_name.toString() },
} });
- break :v try mod.intern(.{ .slice = .{
+ break :v try pt.intern(.{ .slice = .{
.ty = .slice_const_u8_sentinel_0_type,
- .ptr = try mod.intern(.{ .ptr = .{
+ .ptr = try pt.intern(.{ .ptr = .{
.ty = .manyptr_const_u8_sentinel_0_type,
.base_addr = .{ .anon_decl = .{
.val = new_decl_val,
@@ -18113,7 +18270,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try mod.intValue(Type.usize, tag_name_len)).toIntern(),
+ .len = (try pt.intValue(Type.usize, tag_name_len)).toIntern(),
} });
};
@@ -18123,22 +18280,22 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// value: comptime_int,
value_val,
};
- field_val.* = try mod.intern(.{ .aggregate = .{
+ field_val.* = try pt.intern(.{ .aggregate = .{
.ty = enum_field_ty.toIntern(),
.storage = .{ .elems = &enum_field_fields },
} });
}
const fields_val = v: {
- const fields_array_ty = try mod.arrayType(.{
+ const fields_array_ty = try pt.arrayType(.{
.len = enum_field_vals.len,
.child = enum_field_ty.toIntern(),
});
- const new_decl_val = try mod.intern(.{ .aggregate = .{
+ const new_decl_val = try pt.intern(.{ .aggregate = .{
.ty = fields_array_ty.toIntern(),
.storage = .{ .elems = enum_field_vals },
} });
- const slice_ty = (try mod.ptrTypeSema(.{
+ const slice_ty = (try pt.ptrTypeSema(.{
.child = enum_field_ty.toIntern(),
.flags = .{
.size = .Slice,
@@ -18146,9 +18303,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
},
})).toIntern();
const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern();
- break :v try mod.intern(.{ .slice = .{
+ break :v try pt.intern(.{ .slice = .{
.ty = slice_ty,
- .ptr = try mod.intern(.{ .ptr = .{
+ .ptr = try pt.intern(.{ .ptr = .{
.ty = manyptr_ty,
.base_addr = .{ .anon_decl = .{
.val = new_decl_val,
@@ -18156,7 +18313,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try mod.intValue(Type.usize, enum_field_vals.len)).toIntern(),
+ .len = (try pt.intValue(Type.usize, enum_field_vals.len)).toIntern(),
} });
};
@@ -18184,10 +18341,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// is_exhaustive: bool,
is_exhaustive.toIntern(),
};
- return Air.internedToRef((try mod.intern(.{ .un = .{
+ return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Enum))).toIntern(),
- .val = try mod.intern(.{ .aggregate = .{
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Enum))).toIntern(),
+ .val = try pt.intern(.{ .aggregate = .{
.ty = type_enum_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
@@ -18218,7 +18375,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
break :t union_field_ty_decl.val.toType();
};
- try ty.resolveLayout(mod); // Getting alignment requires type layout
+ try ty.resolveLayout(pt); // Getting alignment requires type layout
const union_obj = mod.typeToUnion(ty).?;
const tag_type = union_obj.loadTagType(ip);
const layout = union_obj.getLayout(ip);
@@ -18230,18 +18387,18 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const name_val = v: {
const field_name = tag_type.names.get(ip)[field_index];
const field_name_len = field_name.length(ip);
- const new_decl_ty = try mod.arrayType(.{
+ const new_decl_ty = try pt.arrayType(.{
.len = field_name_len,
.sentinel = .zero_u8,
.child = .u8_type,
});
- const new_decl_val = try mod.intern(.{ .aggregate = .{
+ const new_decl_val = try pt.intern(.{ .aggregate = .{
.ty = new_decl_ty.toIntern(),
.storage = .{ .bytes = field_name.toString() },
} });
- break :v try mod.intern(.{ .slice = .{
+ break :v try pt.intern(.{ .slice = .{
.ty = .slice_const_u8_sentinel_0_type,
- .ptr = try mod.intern(.{ .ptr = .{
+ .ptr = try pt.intern(.{ .ptr = .{
.ty = .manyptr_const_u8_sentinel_0_type,
.base_addr = .{ .anon_decl = .{
.val = new_decl_val,
@@ -18249,12 +18406,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try mod.intValue(Type.usize, field_name_len)).toIntern(),
+ .len = (try pt.intValue(Type.usize, field_name_len)).toIntern(),
} });
};
const alignment = switch (layout) {
- .auto, .@"extern" => try mod.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(field_index), .sema),
+ .auto, .@"extern" => try pt.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(field_index), .sema),
.@"packed" => .none,
};
@@ -18265,24 +18422,24 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// type: type,
field_ty,
// alignment: comptime_int,
- (try mod.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(),
+ (try pt.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(),
};
- field_val.* = try mod.intern(.{ .aggregate = .{
+ field_val.* = try pt.intern(.{ .aggregate = .{
.ty = union_field_ty.toIntern(),
.storage = .{ .elems = &union_field_fields },
} });
}
const fields_val = v: {
- const array_fields_ty = try mod.arrayType(.{
+ const array_fields_ty = try pt.arrayType(.{
.len = union_field_vals.len,
.child = union_field_ty.toIntern(),
});
- const new_decl_val = try mod.intern(.{ .aggregate = .{
+ const new_decl_val = try pt.intern(.{ .aggregate = .{
.ty = array_fields_ty.toIntern(),
.storage = .{ .elems = union_field_vals },
} });
- const slice_ty = (try mod.ptrTypeSema(.{
+ const slice_ty = (try pt.ptrTypeSema(.{
.child = union_field_ty.toIntern(),
.flags = .{
.size = .Slice,
@@ -18290,9 +18447,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
},
})).toIntern();
const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern();
- break :v try mod.intern(.{ .slice = .{
+ break :v try pt.intern(.{ .slice = .{
.ty = slice_ty,
- .ptr = try mod.intern(.{ .ptr = .{
+ .ptr = try pt.intern(.{ .ptr = .{
.ty = manyptr_ty,
.base_addr = .{ .anon_decl = .{
.orig_ty = manyptr_ty,
@@ -18300,14 +18457,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try mod.intValue(Type.usize, union_field_vals.len)).toIntern(),
+ .len = (try pt.intValue(Type.usize, union_field_vals.len)).toIntern(),
} });
};
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespaceIndex(mod));
- const enum_tag_ty_val = try mod.intern(.{ .opt = .{
- .ty = (try mod.optionalType(.type_type)).toIntern(),
+ const enum_tag_ty_val = try pt.intern(.{ .opt = .{
+ .ty = (try pt.optionalType(.type_type)).toIntern(),
.val = if (ty.unionTagType(mod)) |tag_ty| tag_ty.toIntern() else .none,
} });
@@ -18315,7 +18472,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const decl_index = (try sema.namespaceLookup(
block,
src,
- (try mod.getBuiltinType("Type")).getNamespaceIndex(mod),
+ (try pt.getBuiltinType("Type")).getNamespaceIndex(mod),
try ip.getOrPutString(gpa, "ContainerLayout", .no_embedded_nulls),
)).?;
try sema.ensureDeclAnalyzed(decl_index);
@@ -18325,7 +18482,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const field_values = .{
// layout: ContainerLayout,
- (try mod.enumValueFieldIndex(container_layout_ty, @intFromEnum(layout))).toIntern(),
+ (try pt.enumValueFieldIndex(container_layout_ty, @intFromEnum(layout))).toIntern(),
// tag_type: ?type,
enum_tag_ty_val,
@@ -18334,10 +18491,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// decls: []const Declaration,
decls_val,
};
- return Air.internedToRef((try mod.intern(.{ .un = .{
+ return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Union))).toIntern(),
- .val = try mod.intern(.{ .aggregate = .{
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Union))).toIntern(),
+ .val = try pt.intern(.{ .aggregate = .{
.ty = type_union_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
@@ -18368,7 +18525,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
break :t struct_field_ty_decl.val.toType();
};
- try ty.resolveLayout(mod); // Getting alignment requires type layout
+ try ty.resolveLayout(pt); // Getting alignment requires type layout
var struct_field_vals: []InternPool.Index = &.{};
defer gpa.free(struct_field_vals);
@@ -18385,18 +18542,18 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
else
try ip.getOrPutStringFmt(gpa, "{d}", .{field_index}, .no_embedded_nulls);
const field_name_len = field_name.length(ip);
- const new_decl_ty = try mod.arrayType(.{
+ const new_decl_ty = try pt.arrayType(.{
.len = field_name_len,
.sentinel = .zero_u8,
.child = .u8_type,
});
- const new_decl_val = try mod.intern(.{ .aggregate = .{
+ const new_decl_val = try pt.intern(.{ .aggregate = .{
.ty = new_decl_ty.toIntern(),
.storage = .{ .bytes = field_name.toString() },
} });
- break :v try mod.intern(.{ .slice = .{
+ break :v try pt.intern(.{ .slice = .{
.ty = .slice_const_u8_sentinel_0_type,
- .ptr = try mod.intern(.{ .ptr = .{
+ .ptr = try pt.intern(.{ .ptr = .{
.ty = .manyptr_const_u8_sentinel_0_type,
.base_addr = .{ .anon_decl = .{
.val = new_decl_val,
@@ -18404,11 +18561,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try mod.intValue(Type.usize, field_name_len)).toIntern(),
+ .len = (try pt.intValue(Type.usize, field_name_len)).toIntern(),
} });
};
- try Type.fromInterned(field_ty).resolveLayout(mod);
+ try Type.fromInterned(field_ty).resolveLayout(pt);
const is_comptime = field_val != .none;
const opt_default_val = if (is_comptime) Value.fromInterned(field_val) else null;
@@ -18423,9 +18580,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// is_comptime: bool,
Value.makeBool(is_comptime).toIntern(),
// alignment: comptime_int,
- (try mod.intValue(Type.comptime_int, Type.fromInterned(field_ty).abiAlignment(mod).toByteUnits() orelse 0)).toIntern(),
+ (try pt.intValue(Type.comptime_int, Type.fromInterned(field_ty).abiAlignment(pt).toByteUnits() orelse 0)).toIntern(),
};
- struct_field_val.* = try mod.intern(.{ .aggregate = .{
+ struct_field_val.* = try pt.intern(.{ .aggregate = .{
.ty = struct_field_ty.toIntern(),
.storage = .{ .elems = &struct_field_fields },
} });
@@ -18437,7 +18594,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
};
struct_field_vals = try gpa.alloc(InternPool.Index, struct_type.field_types.len);
- try ty.resolveStructFieldInits(mod);
+ try ty.resolveStructFieldInits(pt);
for (struct_field_vals, 0..) |*field_val, field_index| {
const field_name = if (struct_type.fieldName(ip, field_index).unwrap()) |field_name|
@@ -18449,18 +18606,18 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const field_init = struct_type.fieldInit(ip, field_index);
const field_is_comptime = struct_type.fieldIsComptime(ip, field_index);
const name_val = v: {
- const new_decl_ty = try mod.arrayType(.{
+ const new_decl_ty = try pt.arrayType(.{
.len = field_name_len,
.sentinel = .zero_u8,
.child = .u8_type,
});
- const new_decl_val = try mod.intern(.{ .aggregate = .{
+ const new_decl_val = try pt.intern(.{ .aggregate = .{
.ty = new_decl_ty.toIntern(),
.storage = .{ .bytes = field_name.toString() },
} });
- break :v try mod.intern(.{ .slice = .{
+ break :v try pt.intern(.{ .slice = .{
.ty = .slice_const_u8_sentinel_0_type,
- .ptr = try mod.intern(.{ .ptr = .{
+ .ptr = try pt.intern(.{ .ptr = .{
.ty = .manyptr_const_u8_sentinel_0_type,
.base_addr = .{ .anon_decl = .{
.val = new_decl_val,
@@ -18468,7 +18625,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try mod.intValue(Type.usize, field_name_len)).toIntern(),
+ .len = (try pt.intValue(Type.usize, field_name_len)).toIntern(),
} });
};
@@ -18476,7 +18633,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const default_val_ptr = try sema.optRefValue(opt_default_val);
const alignment = switch (struct_type.layout) {
.@"packed" => .none,
- else => try mod.structFieldAlignmentAdvanced(
+ else => try pt.structFieldAlignmentAdvanced(
struct_type.fieldAlign(ip, field_index),
field_ty,
struct_type.layout,
@@ -18494,9 +18651,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// is_comptime: bool,
Value.makeBool(field_is_comptime).toIntern(),
// alignment: comptime_int,
- (try mod.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(),
+ (try pt.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(),
};
- field_val.* = try mod.intern(.{ .aggregate = .{
+ field_val.* = try pt.intern(.{ .aggregate = .{
.ty = struct_field_ty.toIntern(),
.storage = .{ .elems = &struct_field_fields },
} });
@@ -18504,15 +18661,15 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
const fields_val = v: {
- const array_fields_ty = try mod.arrayType(.{
+ const array_fields_ty = try pt.arrayType(.{
.len = struct_field_vals.len,
.child = struct_field_ty.toIntern(),
});
- const new_decl_val = try mod.intern(.{ .aggregate = .{
+ const new_decl_val = try pt.intern(.{ .aggregate = .{
.ty = array_fields_ty.toIntern(),
.storage = .{ .elems = struct_field_vals },
} });
- const slice_ty = (try mod.ptrTypeSema(.{
+ const slice_ty = (try pt.ptrTypeSema(.{
.child = struct_field_ty.toIntern(),
.flags = .{
.size = .Slice,
@@ -18520,9 +18677,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
},
})).toIntern();
const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern();
- break :v try mod.intern(.{ .slice = .{
+ break :v try pt.intern(.{ .slice = .{
.ty = slice_ty,
- .ptr = try mod.intern(.{ .ptr = .{
+ .ptr = try pt.intern(.{ .ptr = .{
.ty = manyptr_ty,
.base_addr = .{ .anon_decl = .{
.orig_ty = manyptr_ty,
@@ -18530,14 +18687,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try mod.intValue(Type.usize, struct_field_vals.len)).toIntern(),
+ .len = (try pt.intValue(Type.usize, struct_field_vals.len)).toIntern(),
} });
};
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespaceIndex(mod));
- const backing_integer_val = try mod.intern(.{ .opt = .{
- .ty = (try mod.optionalType(.type_type)).toIntern(),
+ const backing_integer_val = try pt.intern(.{ .opt = .{
+ .ty = (try pt.optionalType(.type_type)).toIntern(),
.val = if (mod.typeToPackedStruct(ty)) |packed_struct| val: {
assert(Type.fromInterned(packed_struct.backingIntType(ip).*).isInt(mod));
break :val packed_struct.backingIntType(ip).*;
@@ -18548,7 +18705,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const decl_index = (try sema.namespaceLookup(
block,
src,
- (try mod.getBuiltinType("Type")).getNamespaceIndex(mod),
+ (try pt.getBuiltinType("Type")).getNamespaceIndex(mod),
try ip.getOrPutString(gpa, "ContainerLayout", .no_embedded_nulls),
)).?;
try sema.ensureDeclAnalyzed(decl_index);
@@ -18560,7 +18717,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const field_values = [_]InternPool.Index{
// layout: ContainerLayout,
- (try mod.enumValueFieldIndex(container_layout_ty, @intFromEnum(layout))).toIntern(),
+ (try pt.enumValueFieldIndex(container_layout_ty, @intFromEnum(layout))).toIntern(),
// backing_integer: ?type,
backing_integer_val,
// fields: []const StructField,
@@ -18570,10 +18727,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// is_tuple: bool,
Value.makeBool(ty.isTuple(mod)).toIntern(),
};
- return Air.internedToRef((try mod.intern(.{ .un = .{
+ return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Struct))).toIntern(),
- .val = try mod.intern(.{ .aggregate = .{
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Struct))).toIntern(),
+ .val = try pt.intern(.{ .aggregate = .{
.ty = type_struct_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
@@ -18592,17 +18749,17 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
break :t type_opaque_ty_decl.val.toType();
};
- try ty.resolveFields(mod);
+ try ty.resolveFields(pt);
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespaceIndex(mod));
const field_values = .{
// decls: []const Declaration,
decls_val,
};
- return Air.internedToRef((try mod.intern(.{ .un = .{
+ return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Opaque))).toIntern(),
- .val = try mod.intern(.{ .aggregate = .{
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Opaque))).toIntern(),
+ .val = try pt.intern(.{ .aggregate = .{
.ty = type_opaque_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
@@ -18620,7 +18777,8 @@ fn typeInfoDecls(
type_info_ty: Type,
opt_namespace: InternPool.OptionalNamespaceIndex,
) CompileError!InternPool.Index {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const declaration_ty = t: {
@@ -18643,15 +18801,15 @@ fn typeInfoDecls(
try sema.typeInfoNamespaceDecls(block, opt_namespace, declaration_ty, &decl_vals, &seen_namespaces);
- const array_decl_ty = try mod.arrayType(.{
+ const array_decl_ty = try pt.arrayType(.{
.len = decl_vals.items.len,
.child = declaration_ty.toIntern(),
});
- const new_decl_val = try mod.intern(.{ .aggregate = .{
+ const new_decl_val = try pt.intern(.{ .aggregate = .{
.ty = array_decl_ty.toIntern(),
.storage = .{ .elems = decl_vals.items },
} });
- const slice_ty = (try mod.ptrTypeSema(.{
+ const slice_ty = (try pt.ptrTypeSema(.{
.child = declaration_ty.toIntern(),
.flags = .{
.size = .Slice,
@@ -18659,9 +18817,9 @@ fn typeInfoDecls(
},
})).toIntern();
const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern();
- return try mod.intern(.{ .slice = .{
+ return try pt.intern(.{ .slice = .{
.ty = slice_ty,
- .ptr = try mod.intern(.{ .ptr = .{
+ .ptr = try pt.intern(.{ .ptr = .{
.ty = manyptr_ty,
.base_addr = .{ .anon_decl = .{
.orig_ty = manyptr_ty,
@@ -18669,7 +18827,7 @@ fn typeInfoDecls(
} },
.byte_offset = 0,
} }),
- .len = (try mod.intValue(Type.usize, decl_vals.items.len)).toIntern(),
+ .len = (try pt.intValue(Type.usize, decl_vals.items.len)).toIntern(),
} });
}
@@ -18681,7 +18839,8 @@ fn typeInfoNamespaceDecls(
decl_vals: *std.ArrayList(InternPool.Index),
seen_namespaces: *std.AutoHashMap(*Namespace, void),
) !void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const namespace_index = opt_namespace_index.unwrap() orelse return;
@@ -18703,18 +18862,18 @@ fn typeInfoNamespaceDecls(
if (decl.kind != .named) continue;
const name_val = v: {
const decl_name_len = decl.name.length(ip);
- const new_decl_ty = try mod.arrayType(.{
+ const new_decl_ty = try pt.arrayType(.{
.len = decl_name_len,
.sentinel = .zero_u8,
.child = .u8_type,
});
- const new_decl_val = try mod.intern(.{ .aggregate = .{
+ const new_decl_val = try pt.intern(.{ .aggregate = .{
.ty = new_decl_ty.toIntern(),
.storage = .{ .bytes = decl.name.toString() },
} });
- break :v try mod.intern(.{ .slice = .{
+ break :v try pt.intern(.{ .slice = .{
.ty = .slice_const_u8_sentinel_0_type,
- .ptr = try mod.intern(.{ .ptr = .{
+ .ptr = try pt.intern(.{ .ptr = .{
.ty = .manyptr_const_u8_sentinel_0_type,
.base_addr = .{ .anon_decl = .{
.orig_ty = .slice_const_u8_sentinel_0_type,
@@ -18722,7 +18881,7 @@ fn typeInfoNamespaceDecls(
} },
.byte_offset = 0,
} }),
- .len = (try mod.intValue(Type.usize, decl_name_len)).toIntern(),
+ .len = (try pt.intValue(Type.usize, decl_name_len)).toIntern(),
} });
};
@@ -18730,7 +18889,7 @@ fn typeInfoNamespaceDecls(
//name: [:0]const u8,
name_val,
};
- try decl_vals.append(try mod.intern(.{ .aggregate = .{
+ try decl_vals.append(try pt.intern(.{ .aggregate = .{
.ty = declaration_ty.toIntern(),
.storage = .{ .elems = &fields },
} }));
@@ -18782,11 +18941,12 @@ fn zirTypeofLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
}
fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) CompileError!Type {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
switch (operand.zigTypeTag(mod)) {
.ComptimeInt => return Type.comptime_int,
.Int => {
- const bits = operand.bitSize(mod);
+ const bits = operand.bitSize(pt);
const count = if (bits == 0)
0
else blk: {
@@ -18797,12 +18957,12 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi
}
break :blk count;
};
- return mod.intType(.unsigned, count);
+ return pt.intType(.unsigned, count);
},
.Vector => {
const elem_ty = operand.elemType2(mod);
const log2_elem_ty = try sema.log2IntType(block, elem_ty, src);
- return mod.vectorType(.{
+ return pt.vectorType(.{
.len = operand.vectorLen(mod),
.child = log2_elem_ty.toIntern(),
});
@@ -18813,7 +18973,7 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi
block,
src,
"bit shifting operation expected integer type, found '{}'",
- .{operand.fmt(mod)},
+ .{operand.fmt(pt)},
);
}
@@ -18865,7 +19025,8 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(inst_data.src_node);
const operand_src = block.src(.{ .node_offset_un_op = inst_data.src_node });
@@ -18874,7 +19035,7 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const operand = try sema.coerce(block, Type.bool, uncasted_operand, operand_src);
if (try sema.resolveValue(operand)) |val| {
return if (val.isUndef(mod))
- mod.undefRef(Type.bool)
+ pt.undefRef(Type.bool)
else if (val.toBool()) .bool_false else .bool_true;
}
try sema.requireRuntimeBlock(block, src, null);
@@ -18890,7 +19051,8 @@ fn zirBoolBr(
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const datas = sema.code.instructions.items(.data);
@@ -19006,7 +19168,8 @@ fn finishCondBr(
}
fn checkNullableType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
switch (ty.zigTypeTag(mod)) {
.Optional, .Null, .Undefined => return,
.Pointer => if (ty.isPtrLikeOptional(mod)) return,
@@ -19038,7 +19201,8 @@ fn zirIsNonNullPtr(
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(inst_data.src_node);
const ptr = try sema.resolveInst(inst_data.operand);
@@ -19051,11 +19215,12 @@ fn zirIsNonNullPtr(
}
fn checkErrorType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
switch (ty.zigTypeTag(mod)) {
.ErrorSet, .ErrorUnion, .Undefined => return,
else => return sema.fail(block, src, "expected error union type, found '{}'", .{
- ty.fmt(mod),
+ ty.fmt(pt),
}),
}
}
@@ -19075,7 +19240,8 @@ fn zirIsNonErrPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(inst_data.src_node);
const ptr = try sema.resolveInst(inst_data.operand);
@@ -19102,7 +19268,8 @@ fn zirCondbr(
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const cond_src = parent_block.src(.{ .node_offset_if_cond = inst_data.src_node });
const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index);
@@ -19177,10 +19344,11 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!
const body = sema.code.bodySlice(extra.end, extra.data.body_len);
const err_union = try sema.resolveInst(extra.data.operand);
const err_union_ty = sema.typeOf(err_union);
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{
- err_union_ty.fmt(mod),
+ err_union_ty.fmt(pt),
});
}
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union);
@@ -19225,10 +19393,11 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr
const operand = try sema.resolveInst(extra.data.operand);
const err_union = try sema.analyzeLoad(parent_block, src, operand, operand_src);
const err_union_ty = sema.typeOf(err_union);
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{
- err_union_ty.fmt(mod),
+ err_union_ty.fmt(pt),
});
}
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union);
@@ -19251,7 +19420,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr
const operand_ty = sema.typeOf(operand);
const ptr_info = operand_ty.ptrInfo(mod);
- const res_ty = try mod.ptrTypeSema(.{
+ const res_ty = try pt.ptrTypeSema(.{
.child = err_union_ty.errorUnionPayload(mod).toIntern(),
.flags = .{
.is_const = ptr_info.flags.is_const,
@@ -19366,7 +19535,8 @@ fn zirRetErrValue(
block: *Block,
inst: Zir.Inst.Index,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
const src = block.tokenOffset(inst_data.src_tok);
const err_name = try mod.intern_pool.getOrPutString(
@@ -19376,8 +19546,8 @@ fn zirRetErrValue(
);
_ = try mod.getErrorValue(err_name);
// Return the error code from the function.
- const error_set_type = try mod.singleErrorSetType(err_name);
- const result_inst = Air.internedToRef((try mod.intern(.{ .err = .{
+ const error_set_type = try pt.singleErrorSetType(err_name);
+ const result_inst = Air.internedToRef((try pt.intern(.{ .err = .{
.ty = error_set_type.toIntern(),
.name = err_name,
} })));
@@ -19392,7 +19562,8 @@ fn zirRetImplicit(
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_tok;
const r_brace_src = block.tokenOffset(inst_data.src_tok);
if (block.inlining == null and sema.func_is_naked) {
@@ -19412,7 +19583,7 @@ fn zirRetImplicit(
if (base_tag == .NoReturn) {
const msg = msg: {
const msg = try sema.errMsg(ret_ty_src, "function declared '{}' implicitly returns", .{
- sema.fn_ret_ty.fmt(mod),
+ sema.fn_ret_ty.fmt(pt),
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(r_brace_src, msg, "control flow reaches end of body here", .{});
@@ -19422,7 +19593,7 @@ fn zirRetImplicit(
} else if (base_tag != .Void) {
const msg = msg: {
const msg = try sema.errMsg(ret_ty_src, "function with non-void return type '{}' implicitly returns", .{
- sema.fn_ret_ty.fmt(mod),
+ sema.fn_ret_ty.fmt(pt),
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(r_brace_src, msg, "control flow reaches end of body here", .{});
@@ -19474,7 +19645,7 @@ fn retWithErrTracing(
ret_tag: Air.Inst.Tag,
operand: Air.Inst.Ref,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
const need_check = switch (is_non_err) {
.bool_true => {
_ = try block.addUnOp(ret_tag, operand);
@@ -19484,11 +19655,11 @@ fn retWithErrTracing(
else => true,
};
const gpa = sema.gpa;
- const stack_trace_ty = try mod.getBuiltinType("StackTrace");
- try stack_trace_ty.resolveFields(mod);
- const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
+ const stack_trace_ty = try pt.getBuiltinType("StackTrace");
+ try stack_trace_ty.resolveFields(pt);
+ const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty);
const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty);
- const return_err_fn = try mod.getBuiltin("returnError");
+ const return_err_fn = try pt.getBuiltin("returnError");
const args: [1]Air.Inst.Ref = .{err_return_trace};
if (!need_check) {
@@ -19524,12 +19695,14 @@ fn retWithErrTracing(
}
fn wantErrorReturnTracing(sema: *Sema, fn_ret_ty: Type) bool {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
return fn_ret_ty.isError(mod) and mod.comp.config.any_error_tracing;
}
fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].save_err_ret_index;
if (!block.ownerModule().error_tracing) return;
@@ -19559,7 +19732,8 @@ fn restoreErrRetIndex(sema: *Sema, start_block: *Block, src: LazySrcLoc, target_
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const saved_index = if (target_block.toIndexAllowNone()) |zir_block| b: {
var block = start_block;
@@ -19597,7 +19771,7 @@ fn restoreErrRetIndex(sema: *Sema, start_block: *Block, src: LazySrcLoc, target_
if (is_non_error) return;
const saved_index_val = try sema.resolveDefinedValue(start_block, src, saved_index);
- const saved_index_int = saved_index_val.?.toUnsignedInt(mod);
+ const saved_index_int = saved_index_val.?.toUnsignedInt(pt);
assert(saved_index_int <= sema.comptime_err_ret_trace.items.len);
sema.comptime_err_ret_trace.items.len = @intCast(saved_index_int);
return;
@@ -19612,7 +19786,8 @@ fn restoreErrRetIndex(sema: *Sema, start_block: *Block, src: LazySrcLoc, target_
}
fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
assert(sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion);
const err_set_ty = sema.fn_ret_ty.errorUnionSet(mod).toIntern();
@@ -19632,7 +19807,8 @@ fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void {
fn addToInferredErrorSetPtr(sema: *Sema, ies: *InferredErrorSet, op_ty: Type) !void {
const arena = sema.arena;
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
switch (op_ty.zigTypeTag(mod)) {
.ErrorSet => try ies.addErrorSet(op_ty, ip, arena),
@@ -19651,7 +19827,8 @@ fn analyzeRet(
// Special case for returning an error to an inferred error set; we need to
// add the error tag to the inferred error set of the in-scope function, so
// that the coercion below works correctly.
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (sema.fn_ret_ty_ies != null and sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) {
try sema.addToInferredErrorSet(uncasted_operand);
}
@@ -19691,7 +19868,7 @@ fn analyzeRet(
return sema.failWithOwnedErrorMsg(block, msg);
}
- try sema.fn_ret_ty.resolveLayout(mod);
+ try sema.fn_ret_ty.resolveLayout(pt);
try sema.validateRuntimeValue(block, operand_src, operand);
@@ -19718,7 +19895,8 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].ptr_type;
const extra = sema.code.extraData(Zir.Inst.PtrType, inst_data.payload_index);
const elem_ty_src = block.src(.{ .node_offset_ptr_elem = extra.data.src_node });
@@ -19773,7 +19951,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
},
else => {},
}
- const align_bytes = (try val.getUnsignedIntAdvanced(mod, .sema)).?;
+ const align_bytes = (try val.getUnsignedIntAdvanced(pt, .sema)).?;
break :blk try sema.validateAlignAllowZero(block, align_src, align_bytes);
} else .none;
@@ -19804,13 +19982,13 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
if (host_size != 0) {
if (bit_offset >= host_size * 8) {
return sema.fail(block, bitoffset_src, "packed type '{}' at bit offset {} starts {} bits after the end of a {} byte host integer", .{
- elem_ty.fmt(mod), bit_offset, bit_offset - host_size * 8, host_size,
+ elem_ty.fmt(pt), bit_offset, bit_offset - host_size * 8, host_size,
});
}
- const elem_bit_size = try elem_ty.bitSizeAdvanced(mod, .sema);
+ const elem_bit_size = try elem_ty.bitSizeAdvanced(pt, .sema);
if (elem_bit_size > host_size * 8 - bit_offset) {
return sema.fail(block, bitoffset_src, "packed type '{}' at bit offset {} ends {} bits after the end of a {} byte host integer", .{
- elem_ty.fmt(mod), bit_offset, elem_bit_size - (host_size * 8 - bit_offset), host_size,
+ elem_ty.fmt(pt), bit_offset, elem_bit_size - (host_size * 8 - bit_offset), host_size,
});
}
}
@@ -19824,7 +20002,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
} else if (inst_data.size == .C) {
if (!try sema.validateExternType(elem_ty, .other)) {
const msg = msg: {
- const msg = try sema.errMsg(elem_ty_src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(mod)});
+ const msg = try sema.errMsg(elem_ty_src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsNotExtern(msg, elem_ty_src, elem_ty, .other);
@@ -19841,14 +20019,14 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
if (host_size != 0 and !try sema.validatePackedType(elem_ty)) {
return sema.failWithOwnedErrorMsg(block, msg: {
- const msg = try sema.errMsg(elem_ty_src, "bit-pointer cannot refer to value of type '{}'", .{elem_ty.fmt(mod)});
+ const msg = try sema.errMsg(elem_ty_src, "bit-pointer cannot refer to value of type '{}'", .{elem_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsNotPacked(msg, elem_ty_src, elem_ty);
break :msg msg;
});
}
- const ty = try mod.ptrTypeSema(.{
+ const ty = try pt.ptrTypeSema(.{
.child = elem_ty.toIntern(),
.sentinel = sentinel,
.flags = .{
@@ -19875,7 +20053,8 @@ fn zirStructInitEmpty(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
const src = block.nodeOffset(inst_data.src_node);
const ty_src = block.src(.{ .node_offset_init_ty = inst_data.src_node });
const obj_ty = try sema.resolveType(block, ty_src, inst_data.operand);
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
switch (obj_ty.zigTypeTag(mod)) {
.Struct => return sema.structInitEmpty(block, obj_ty, src, src),
@@ -19890,7 +20069,8 @@ fn zirStructInitEmptyResult(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(inst_data.src_node);
const ty_operand = sema.resolveType(block, src, inst_data.operand) catch |err| switch (err) {
@@ -19905,7 +20085,7 @@ fn zirStructInitEmptyResult(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is
break :ty ptr_ty.childType(mod);
}
// To make `&.{}` a `[:s]T`, the init should be a `[0:s]T`.
- break :ty try mod.arrayType(.{
+ break :ty try pt.arrayType(.{
.len = 0,
.sentinel = if (ptr_ty.sentinel(mod)) |s| s.toIntern() else .none,
.child = ptr_ty.childType(mod).toIntern(),
@@ -19936,10 +20116,11 @@ fn structInitEmpty(
dest_src: LazySrcLoc,
init_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
// This logic must be synchronized with that in `zirStructInit`.
- try struct_ty.resolveFields(mod);
+ try struct_ty.resolveFields(pt);
// The init values to use for the struct instance.
const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount(mod));
@@ -19950,7 +20131,8 @@ fn structInitEmpty(
}
fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const arr_len = obj_ty.arrayLen(mod);
if (arr_len != 0) {
if (obj_ty.zigTypeTag(mod) == .Array) {
@@ -19959,21 +20141,22 @@ fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) Com
return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len});
}
}
- return Air.internedToRef((try mod.intern(.{ .aggregate = .{
+ return Air.internedToRef((try pt.intern(.{ .aggregate = .{
.ty = obj_ty.toIntern(),
.storage = .{ .elems = &.{} },
} })));
}
fn zirUnionInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const pt = sema.pt;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const ty_src = block.builtinCallArgSrc(inst_data.src_node, 0);
const field_src = block.builtinCallArgSrc(inst_data.src_node, 1);
const init_src = block.builtinCallArgSrc(inst_data.src_node, 2);
const extra = sema.code.extraData(Zir.Inst.UnionInit, inst_data.payload_index).data;
const union_ty = try sema.resolveType(block, ty_src, extra.union_type);
- if (union_ty.zigTypeTag(sema.mod) != .Union) {
- return sema.fail(block, ty_src, "expected union type, found '{}'", .{union_ty.fmt(sema.mod)});
+ if (union_ty.zigTypeTag(pt.zcu) != .Union) {
+ return sema.fail(block, ty_src, "expected union type, found '{}'", .{union_ty.fmt(pt)});
}
const field_name = try sema.resolveConstStringIntern(block, field_src, extra.field_name, .{
.needed_comptime_reason = "name of field being initialized must be comptime-known",
@@ -19992,7 +20175,8 @@ fn unionInit(
field_name: InternPool.NullTerminatedString,
field_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_src);
const field_ty = Type.fromInterned(mod.typeToUnion(union_ty).?.field_types.get(ip)[field_index]);
@@ -20000,8 +20184,8 @@ fn unionInit(
if (try sema.resolveValue(init)) |init_val| {
const tag_ty = union_ty.unionTagTypeHypothetical(mod);
- const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);
- return Air.internedToRef((try mod.intern(.{ .un = .{
+ const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
+ return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = union_ty.toIntern(),
.tag = tag_val.toIntern(),
.val = init_val.toIntern(),
@@ -20025,7 +20209,8 @@ fn zirStructInit(
const extra = sema.code.extraData(Zir.Inst.StructInit, inst_data.payload_index);
const src = block.nodeOffset(inst_data.src_node);
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const first_item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end).data;
const first_field_type_data = zir_datas[@intFromEnum(first_item.field_type)].pl_node;
@@ -20038,7 +20223,7 @@ fn zirStructInit(
else => |e| return e,
};
const resolved_ty = result_ty.optEuBaseType(mod);
- try resolved_ty.resolveLayout(mod);
+ try resolved_ty.resolveLayout(pt);
if (resolved_ty.zigTypeTag(mod) == .Struct) {
// This logic must be synchronized with that in `zirStructInitEmpty`.
@@ -20079,8 +20264,8 @@ fn zirStructInit(
const field_ty = resolved_ty.structFieldType(field_index, mod);
field_inits[field_index] = try sema.coerce(block, field_ty, uncoerced_init, field_src);
if (!is_packed) {
- try resolved_ty.resolveStructFieldInits(mod);
- if (try resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| {
+ try resolved_ty.resolveStructFieldInits(pt);
+ if (try resolved_ty.structFieldValueComptime(pt, field_index)) |default_value| {
const init_val = (try sema.resolveValue(field_inits[field_index])) orelse {
return sema.failWithNeededComptime(block, field_src, .{
.needed_comptime_reason = "value stored in comptime field must be comptime-known",
@@ -20112,7 +20297,7 @@ fn zirStructInit(
);
const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src);
const tag_ty = resolved_ty.unionTagTypeHypothetical(mod);
- const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);
+ const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
const field_ty = Type.fromInterned(mod.typeToUnion(resolved_ty).?.field_types.get(ip)[field_index]);
if (field_ty.zigTypeTag(mod) == .NoReturn) {
@@ -20132,11 +20317,11 @@ fn zirStructInit(
const init_inst = try sema.coerce(block, field_ty, uncoerced_init_inst, field_src);
if (try sema.resolveValue(init_inst)) |val| {
- const struct_val = Value.fromInterned((try mod.intern(.{ .un = .{
+ const struct_val = Value.fromInterned(try pt.intern(.{ .un = .{
.ty = resolved_ty.toIntern(),
.tag = tag_val.toIntern(),
.val = val.toIntern(),
- } })));
+ } }));
const final_val_inst = try sema.coerce(block, result_ty, Air.internedToRef(struct_val.toIntern()), src);
const final_val = (try sema.resolveValue(final_val_inst)).?;
return sema.addConstantMaybeRef(final_val.toIntern(), is_ref);
@@ -20152,7 +20337,7 @@ fn zirStructInit(
if (is_ref) {
const target = mod.getTarget();
- const alloc_ty = try mod.ptrTypeSema(.{
+ const alloc_ty = try pt.ptrTypeSema(.{
.child = result_ty.toIntern(),
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
@@ -20182,7 +20367,8 @@ fn finishStructInit(
result_ty: Type,
is_ref: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
var root_msg: ?*Module.ErrorMsg = null;
@@ -20242,7 +20428,7 @@ fn finishStructInit(
continue;
}
- try struct_ty.resolveStructFieldInits(mod);
+ try struct_ty.resolveStructFieldInits(pt);
const field_init = struct_type.fieldInit(ip, i);
if (field_init == .none) {
@@ -20289,7 +20475,7 @@ fn finishStructInit(
for (elems, field_inits) |*elem, field_init| {
elem.* = (sema.resolveValue(field_init) catch unreachable).?.toIntern();
}
- const struct_val = try mod.intern(.{ .aggregate = .{
+ const struct_val = try pt.intern(.{ .aggregate = .{
.ty = struct_ty.toIntern(),
.storage = .{ .elems = elems },
} });
@@ -20312,9 +20498,9 @@ fn finishStructInit(
}
if (is_ref) {
- try struct_ty.resolveLayout(mod);
- const target = sema.mod.getTarget();
- const alloc_ty = try mod.ptrTypeSema(.{
+ try struct_ty.resolveLayout(pt);
+ const target = mod.getTarget();
+ const alloc_ty = try pt.ptrTypeSema(.{
.child = result_ty.toIntern(),
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
@@ -20334,7 +20520,7 @@ fn finishStructInit(
.init_node_offset = init_src.offset.node_offset.x,
.elem_index = @intCast(runtime_index),
} }));
- try struct_ty.resolveStructFieldInits(mod);
+ try struct_ty.resolveStructFieldInits(pt);
const struct_val = try block.addAggregateInit(struct_ty, field_inits);
return sema.coerce(block, result_ty, struct_val, init_src);
}
@@ -20364,7 +20550,8 @@ fn structInitAnon(
extra_end: usize,
is_ref: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
const zir_datas = sema.code.instructions.items(.data);
@@ -20422,14 +20609,14 @@ fn structInitAnon(
break :rs runtime_index;
};
- const tuple_ty = try ip.getAnonStructType(gpa, .{
+ const tuple_ty = try ip.getAnonStructType(gpa, pt.tid, .{
.names = names,
.types = types,
.values = values,
});
const runtime_index = opt_runtime_index orelse {
- const tuple_val = try mod.intern(.{ .aggregate = .{
+ const tuple_val = try pt.intern(.{ .aggregate = .{
.ty = tuple_ty,
.storage = .{ .elems = values },
} });
@@ -20443,7 +20630,7 @@ fn structInitAnon(
if (is_ref) {
const target = mod.getTarget();
- const alloc_ty = try mod.ptrTypeSema(.{
+ const alloc_ty = try pt.ptrTypeSema(.{
.child = tuple_ty,
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
@@ -20457,7 +20644,7 @@ fn structInitAnon(
};
extra_index = item.end;
- const field_ptr_ty = try mod.ptrTypeSema(.{
+ const field_ptr_ty = try pt.ptrTypeSema(.{
.child = field_ty,
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
@@ -20491,7 +20678,8 @@ fn zirArrayInit(
inst: Zir.Inst.Index,
is_ref: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(inst_data.src_node);
@@ -20550,8 +20738,8 @@ fn zirArrayInit(
dest.* = try sema.coerce(block, elem_ty, resolved_arg, elem_src);
if (is_tuple) {
if (array_ty.structFieldIsComptime(i, mod))
- try array_ty.resolveStructFieldInits(mod);
- if (try array_ty.structFieldValueComptime(mod, i)) |field_val| {
+ try array_ty.resolveStructFieldInits(pt);
+ if (try array_ty.structFieldValueComptime(pt, i)) |field_val| {
const init_val = try sema.resolveValue(dest.*) orelse {
return sema.failWithNeededComptime(block, elem_src, .{
.needed_comptime_reason = "value stored in comptime field must be comptime-known",
@@ -20581,7 +20769,7 @@ fn zirArrayInit(
// We checked that all args are comptime above.
val.* = (sema.resolveValue(arg) catch unreachable).?.toIntern();
}
- const arr_val = try mod.intern(.{ .aggregate = .{
+ const arr_val = try pt.intern(.{ .aggregate = .{
.ty = array_ty.toIntern(),
.storage = .{ .elems = elem_vals },
} });
@@ -20597,7 +20785,7 @@ fn zirArrayInit(
if (is_ref) {
const target = mod.getTarget();
- const alloc_ty = try mod.ptrTypeSema(.{
+ const alloc_ty = try pt.ptrTypeSema(.{
.child = result_ty.toIntern(),
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
@@ -20606,27 +20794,27 @@ fn zirArrayInit(
if (is_tuple) {
for (resolved_args, 0..) |arg, i| {
- const elem_ptr_ty = try mod.ptrTypeSema(.{
+ const elem_ptr_ty = try pt.ptrTypeSema(.{
.child = array_ty.structFieldType(i, mod).toIntern(),
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
const elem_ptr_ty_ref = Air.internedToRef(elem_ptr_ty.toIntern());
- const index = try mod.intRef(Type.usize, i);
+ const index = try pt.intRef(Type.usize, i);
const elem_ptr = try block.addPtrElemPtrTypeRef(base_ptr, index, elem_ptr_ty_ref);
_ = try block.addBinOp(.store, elem_ptr, arg);
}
return sema.makePtrConst(block, alloc);
}
- const elem_ptr_ty = try mod.ptrTypeSema(.{
+ const elem_ptr_ty = try pt.ptrTypeSema(.{
.child = array_ty.elemType2(mod).toIntern(),
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
const elem_ptr_ty_ref = Air.internedToRef(elem_ptr_ty.toIntern());
for (resolved_args, 0..) |arg, i| {
- const index = try mod.intRef(Type.usize, i);
+ const index = try pt.intRef(Type.usize, i);
const elem_ptr = try block.addPtrElemPtrTypeRef(base_ptr, index, elem_ptr_ty_ref);
_ = try block.addBinOp(.store, elem_ptr, arg);
}
@@ -20656,7 +20844,8 @@ fn arrayInitAnon(
operands: []const Zir.Inst.Ref,
is_ref: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
@@ -20689,14 +20878,14 @@ fn arrayInitAnon(
break :rs runtime_src;
};
- const tuple_ty = try ip.getAnonStructType(gpa, .{
+ const tuple_ty = try ip.getAnonStructType(gpa, pt.tid, .{
.types = types,
.values = values,
.names = &.{},
});
const runtime_src = opt_runtime_src orelse {
- const tuple_val = try mod.intern(.{ .aggregate = .{
+ const tuple_val = try pt.intern(.{ .aggregate = .{
.ty = tuple_ty,
.storage = .{ .elems = values },
} });
@@ -20706,15 +20895,15 @@ fn arrayInitAnon(
try sema.requireRuntimeBlock(block, src, runtime_src);
if (is_ref) {
- const target = sema.mod.getTarget();
- const alloc_ty = try mod.ptrTypeSema(.{
+ const target = sema.pt.zcu.getTarget();
+ const alloc_ty = try pt.ptrTypeSema(.{
.child = tuple_ty,
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
const alloc = try block.addTy(.alloc, alloc_ty);
for (operands, 0..) |operand, i_usize| {
const i: u32 = @intCast(i_usize);
- const field_ptr_ty = try mod.ptrTypeSema(.{
+ const field_ptr_ty = try pt.ptrTypeSema(.{
.child = types[i],
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
@@ -20752,7 +20941,8 @@ fn zirFieldTypeRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
}
fn zirStructInitFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data;
@@ -20780,11 +20970,12 @@ fn fieldType(
field_src: LazySrcLoc,
ty_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
var cur_ty = aggregate_ty;
while (true) {
- try cur_ty.resolveFields(mod);
+ try cur_ty.resolveFields(pt);
switch (cur_ty.zigTypeTag(mod)) {
.Struct => switch (ip.indexToKey(cur_ty.toIntern())) {
.anon_struct_type => |anon_struct| {
@@ -20823,7 +21014,7 @@ fn fieldType(
else => {},
}
return sema.fail(block, ty_src, "expected struct or union; found '{}'", .{
- cur_ty.fmt(sema.mod),
+ cur_ty.fmt(pt),
});
}
}
@@ -20833,12 +21024,13 @@ fn zirErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
}
fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
- const stack_trace_ty = try mod.getBuiltinType("StackTrace");
- try stack_trace_ty.resolveFields(mod);
- const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
- const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern());
+ const stack_trace_ty = try pt.getBuiltinType("StackTrace");
+ try stack_trace_ty.resolveFields(pt);
+ const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty);
+ const opt_ptr_stack_trace_ty = try pt.optionalType(ptr_stack_trace_ty.toIntern());
if (sema.owner_func_index != .none and
ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn and
@@ -20846,7 +21038,7 @@ fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
{
return block.addTy(.err_return_trace, opt_ptr_stack_trace_ty);
}
- return Air.internedToRef((try mod.intern(.{ .opt = .{
+ return Air.internedToRef((try pt.intern(.{ .opt = .{
.ty = opt_ptr_stack_trace_ty.toIntern(),
.val = .none,
} })));
@@ -20862,19 +21054,20 @@ fn zirFrame(
}
fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
const ty = try sema.resolveType(block, operand_src, inst_data.operand);
- if (ty.isNoReturn(mod)) {
- return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)});
+ if (ty.isNoReturn(pt.zcu)) {
+ return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(pt)});
}
- const val = try ty.lazyAbiAlignment(mod);
+ const val = try ty.lazyAbiAlignment(pt);
return Air.internedToRef(val.toIntern());
}
fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(inst_data.src_node);
const operand = try sema.resolveInst(inst_data.operand);
@@ -20886,25 +21079,25 @@ fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
}
if (try sema.resolveValue(operand)) |val| {
if (!is_vector) {
- if (val.isUndef(mod)) return mod.undefRef(Type.u1);
- if (val.toBool()) return Air.internedToRef((try mod.intValue(Type.u1, 1)).toIntern());
- return Air.internedToRef((try mod.intValue(Type.u1, 0)).toIntern());
+ if (val.isUndef(mod)) return pt.undefRef(Type.u1);
+ if (val.toBool()) return Air.internedToRef((try pt.intValue(Type.u1, 1)).toIntern());
+ return Air.internedToRef((try pt.intValue(Type.u1, 0)).toIntern());
}
const len = operand_ty.vectorLen(mod);
- const dest_ty = try mod.vectorType(.{ .child = .u1_type, .len = len });
- if (val.isUndef(mod)) return mod.undefRef(dest_ty);
+ const dest_ty = try pt.vectorType(.{ .child = .u1_type, .len = len });
+ if (val.isUndef(mod)) return pt.undefRef(dest_ty);
const new_elems = try sema.arena.alloc(InternPool.Index, len);
for (new_elems, 0..) |*new_elem, i| {
- const old_elem = try val.elemValue(mod, i);
+ const old_elem = try val.elemValue(pt, i);
const new_val = if (old_elem.isUndef(mod))
- try mod.undefValue(Type.u1)
+ try pt.undefValue(Type.u1)
else if (old_elem.toBool())
- try mod.intValue(Type.u1, 1)
+ try pt.intValue(Type.u1, 1)
else
- try mod.intValue(Type.u1, 0);
+ try pt.intValue(Type.u1, 0);
new_elem.* = new_val.toIntern();
}
- return Air.internedToRef(try mod.intern(.{ .aggregate = .{
+ return Air.internedToRef(try pt.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
.storage = .{ .elems = new_elems },
} }));
@@ -20913,10 +21106,10 @@ fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
return block.addUnOp(.int_from_bool, operand);
}
const len = operand_ty.vectorLen(mod);
- const dest_ty = try mod.vectorType(.{ .child = .u1_type, .len = len });
+ const dest_ty = try pt.vectorType(.{ .child = .u1_type, .len = len });
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
for (new_elems, 0..) |*new_elem, i| {
- const idx_ref = try mod.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(Type.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
new_elem.* = try block.addUnOp(.int_from_bool, old_elem);
}
@@ -20930,7 +21123,7 @@ fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const operand = try sema.coerce(block, Type.anyerror, uncoerced_operand, operand_src);
if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| {
- const err_name = sema.mod.intern_pool.indexToKey(val.toIntern()).err.name;
+ const err_name = sema.pt.zcu.intern_pool.indexToKey(val.toIntern()).err.name;
return sema.addNullTerminatedStrLit(err_name);
}
@@ -20944,7 +21137,8 @@ fn zirAbs(
block: *Block,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const operand = try sema.resolveInst(inst_data.operand);
const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
@@ -20953,12 +21147,12 @@ fn zirAbs(
const result_ty = switch (scalar_ty.zigTypeTag(mod)) {
.ComptimeFloat, .Float, .ComptimeInt => operand_ty,
- .Int => if (scalar_ty.isSignedInt(mod)) try operand_ty.toUnsigned(mod) else return operand,
+ .Int => if (scalar_ty.isSignedInt(mod)) try operand_ty.toUnsigned(pt) else return operand,
else => return sema.fail(
block,
operand_src,
"expected integer, float, or vector of either integers or floats, found '{}'",
- .{operand_ty.fmt(mod)},
+ .{operand_ty.fmt(pt)},
),
};
@@ -20972,30 +21166,31 @@ fn maybeConstantUnaryMath(
sema: *Sema,
operand: Air.Inst.Ref,
result_ty: Type,
- comptime eval: fn (Value, Type, Allocator, *Module) Allocator.Error!Value,
+ comptime eval: fn (Value, Type, Allocator, Zcu.PerThread) Allocator.Error!Value,
) CompileError!?Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
switch (result_ty.zigTypeTag(mod)) {
.Vector => if (try sema.resolveValue(operand)) |val| {
const scalar_ty = result_ty.scalarType(mod);
const vec_len = result_ty.vectorLen(mod);
if (val.isUndef(mod))
- return try mod.undefRef(result_ty);
+ return try pt.undefRef(result_ty);
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
for (elems, 0..) |*elem, i| {
- const elem_val = try val.elemValue(sema.mod, i);
- elem.* = (try eval(elem_val, scalar_ty, sema.arena, sema.mod)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ elem.* = (try eval(elem_val, scalar_ty, sema.arena, pt)).toIntern();
}
- return Air.internedToRef((try mod.intern(.{ .aggregate = .{
+ return Air.internedToRef((try pt.intern(.{ .aggregate = .{
.ty = result_ty.toIntern(),
.storage = .{ .elems = elems },
} })));
},
else => if (try sema.resolveValue(operand)) |operand_val| {
if (operand_val.isUndef(mod))
- return try mod.undefRef(result_ty);
- const result_val = try eval(operand_val, result_ty, sema.arena, sema.mod);
+ return try pt.undefRef(result_ty);
+ const result_val = try eval(operand_val, result_ty, sema.arena, pt);
return Air.internedToRef(result_val.toIntern());
},
}
@@ -21007,12 +21202,13 @@ fn zirUnaryMath(
block: *Block,
inst: Zir.Inst.Index,
air_tag: Air.Inst.Tag,
- comptime eval: fn (Value, Type, Allocator, *Module) Allocator.Error!Value,
+ comptime eval: fn (Value, Type, Allocator, Zcu.PerThread) Allocator.Error!Value,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const operand = try sema.resolveInst(inst_data.operand);
const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
@@ -21025,7 +21221,7 @@ fn zirUnaryMath(
block,
operand_src,
"expected vector of floats or float type, found '{}'",
- .{operand_ty.fmt(sema.mod)},
+ .{operand_ty.fmt(pt)},
),
}
@@ -21041,10 +21237,11 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const src = block.nodeOffset(inst_data.src_node);
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
- try operand_ty.resolveLayout(mod);
+ try operand_ty.resolveLayout(pt);
const enum_ty = switch (operand_ty.zigTypeTag(mod)) {
.EnumLiteral => {
const val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, operand, undefined);
@@ -21053,9 +21250,9 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
},
.Enum => operand_ty,
.Union => operand_ty.unionTagType(mod) orelse
- return sema.fail(block, src, "union '{}' is untagged", .{operand_ty.fmt(sema.mod)}),
+ return sema.fail(block, src, "union '{}' is untagged", .{operand_ty.fmt(pt)}),
else => return sema.fail(block, operand_src, "expected enum or union; found '{}'", .{
- operand_ty.fmt(mod),
+ operand_ty.fmt(pt),
}),
};
if (enum_ty.enumFieldCount(mod) == 0) {
@@ -21063,7 +21260,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
// it prevents a crash.
// https://github.com/ziglang/zig/issues/15909
return sema.fail(block, operand_src, "cannot get @tagName of empty enum '{}'", .{
- enum_ty.fmt(mod),
+ enum_ty.fmt(pt),
});
}
const enum_decl_index = enum_ty.getOwnerDecl(mod);
@@ -21072,7 +21269,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const field_index = enum_ty.enumTagFieldIndex(val, mod) orelse {
const msg = msg: {
const msg = try sema.errMsg(src, "no field with value '{}' in enum '{}'", .{
- val.fmtValue(sema.mod, sema), mod.declPtr(enum_decl_index).name.fmt(ip),
+ val.fmtValue(pt, sema), mod.declPtr(enum_decl_index).name.fmt(ip),
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(enum_ty.srcLoc(mod), msg, "declared here", .{});
@@ -21085,7 +21282,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
return sema.addNullTerminatedStrLit(field_name);
}
try sema.requireRuntimeBlock(block, src, operand_src);
- if (block.wantSafety() and sema.mod.backendSupportsFeature(.is_named_enum_value)) {
+ if (block.wantSafety() and mod.backendSupportsFeature(.is_named_enum_value)) {
const ok = try block.addUnOp(.is_named_enum_value, casted_operand);
try sema.addSafetyCheck(block, src, ok, .invalid_enum_value);
}
@@ -21101,7 +21298,8 @@ fn zirReify(
extended: Zir.Inst.Extended.InstData,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
const name_strategy: Zir.Inst.NameStrategy = @enumFromInt(extended.small);
@@ -21120,7 +21318,7 @@ fn zirReify(
},
},
};
- const type_info_ty = try mod.getBuiltinType("Type");
+ const type_info_ty = try pt.getBuiltinType("Type");
const uncasted_operand = try sema.resolveInst(extra.operand);
const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src);
const val = try sema.resolveConstDefinedValue(block, operand_src, type_info, .{
@@ -21145,36 +21343,36 @@ fn zirReify(
.Int => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const signedness_val = try Value.fromInterned(union_val.val).fieldValue(
- mod,
+ pt,
struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "signedness", .no_embedded_nulls)).?,
);
const bits_val = try Value.fromInterned(union_val.val).fieldValue(
- mod,
+ pt,
struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "bits", .no_embedded_nulls)).?,
);
const signedness = mod.toEnum(std.builtin.Signedness, signedness_val);
- const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(mod));
- const ty = try mod.intType(signedness, bits);
+ const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(pt));
+ const ty = try pt.intType(signedness, bits);
return Air.internedToRef(ty.toIntern());
},
.Vector => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
- const len_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const len_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "len", .no_embedded_nulls),
).?);
- const child_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "child", .no_embedded_nulls),
).?);
- const len: u32 = @intCast(try len_val.toUnsignedIntSema(mod));
+ const len: u32 = @intCast(try len_val.toUnsignedIntSema(pt));
const child_ty = child_val.toType();
try sema.checkVectorElemType(block, src, child_ty);
- const ty = try mod.vectorType(.{
+ const ty = try pt.vectorType(.{
.len = len,
.child = child_ty.toIntern(),
});
@@ -21182,12 +21380,12 @@ fn zirReify(
},
.Float => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
- const bits_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const bits_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "bits", .no_embedded_nulls),
).?);
- const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(mod));
+ const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(pt));
const ty = switch (bits) {
16 => Type.f16,
32 => Type.f32,
@@ -21200,35 +21398,35 @@ fn zirReify(
},
.Pointer => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
- const size_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const size_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "size", .no_embedded_nulls),
).?);
- const is_const_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const is_const_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_const", .no_embedded_nulls),
).?);
- const is_volatile_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const is_volatile_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_volatile", .no_embedded_nulls),
).?);
- const alignment_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const alignment_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "alignment", .no_embedded_nulls),
).?);
- const address_space_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const address_space_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "address_space", .no_embedded_nulls),
).?);
- const child_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "child", .no_embedded_nulls),
).?);
- const is_allowzero_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const is_allowzero_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_allowzero", .no_embedded_nulls),
).?);
- const sentinel_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const sentinel_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "sentinel", .no_embedded_nulls),
).?);
@@ -21237,7 +21435,7 @@ fn zirReify(
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
}
- const alignment_val_int = (try alignment_val.getUnsignedIntAdvanced(mod, .sema)).?;
+ const alignment_val_int = (try alignment_val.getUnsignedIntAdvanced(pt, .sema)).?;
if (alignment_val_int > 0 and !math.isPowerOfTwo(alignment_val_int)) {
return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{alignment_val_int});
}
@@ -21245,7 +21443,7 @@ fn zirReify(
const elem_ty = child_val.toType();
if (abi_align != .none) {
- try elem_ty.resolveLayout(mod);
+ try elem_ty.resolveLayout(pt);
}
const ptr_size = mod.toEnum(std.builtin.Type.Pointer.Size, size_val);
@@ -21256,7 +21454,7 @@ fn zirReify(
return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{});
}
const sentinel_ptr_val = sentinel_val.optionalValue(mod).?;
- const ptr_ty = try mod.singleMutPtrType(elem_ty);
+ const ptr_ty = try pt.singleMutPtrType(elem_ty);
const sent_val = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?;
break :s sent_val.toIntern();
}
@@ -21274,7 +21472,7 @@ fn zirReify(
} else if (ptr_size == .C) {
if (!try sema.validateExternType(elem_ty, .other)) {
const msg = msg: {
- const msg = try sema.errMsg(src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(mod)});
+ const msg = try sema.errMsg(src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(pt)});
errdefer msg.destroy(gpa);
try sema.explainWhyTypeIsNotExtern(msg, src, elem_ty, .other);
@@ -21289,7 +21487,7 @@ fn zirReify(
}
}
- const ty = try mod.ptrTypeSema(.{
+ const ty = try pt.ptrTypeSema(.{
.child = elem_ty.toIntern(),
.sentinel = actual_sentinel,
.flags = .{
@@ -21305,27 +21503,27 @@ fn zirReify(
},
.Array => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
- const len_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const len_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "len", .no_embedded_nulls),
).?);
- const child_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "child", .no_embedded_nulls),
).?);
- const sentinel_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const sentinel_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "sentinel", .no_embedded_nulls),
).?);
- const len = try len_val.toUnsignedIntSema(mod);
+ const len = try len_val.toUnsignedIntSema(pt);
const child_ty = child_val.toType();
const sentinel = if (sentinel_val.optionalValue(mod)) |p| blk: {
- const ptr_ty = try mod.singleMutPtrType(child_ty);
+ const ptr_ty = try pt.singleMutPtrType(child_ty);
break :blk (try sema.pointerDeref(block, src, p, ptr_ty)).?;
} else null;
- const ty = try mod.arrayType(.{
+ const ty = try pt.arrayType(.{
.len = len,
.sentinel = if (sentinel) |s| s.toIntern() else .none,
.child = child_ty.toIntern(),
@@ -21334,23 +21532,23 @@ fn zirReify(
},
.Optional => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
- const child_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "child", .no_embedded_nulls),
).?);
const child_ty = child_val.toType();
- const ty = try mod.optionalType(child_ty.toIntern());
+ const ty = try pt.optionalType(child_ty.toIntern());
return Air.internedToRef(ty.toIntern());
},
.ErrorUnion => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
- const error_set_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const error_set_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "error_set", .no_embedded_nulls),
).?);
- const payload_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const payload_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "payload", .no_embedded_nulls),
).?);
@@ -21362,7 +21560,7 @@ fn zirReify(
return sema.fail(block, src, "Type.ErrorUnion.error_set must be an error set type", .{});
}
- const ty = try mod.errorUnionType(error_set_ty, payload_ty);
+ const ty = try pt.errorUnionType(error_set_ty, payload_ty);
return Air.internedToRef(ty.toIntern());
},
.ErrorSet => {
@@ -21377,9 +21575,9 @@ fn zirReify(
var names: InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, len);
for (0..len) |i| {
- const elem_val = try names_val.elemValue(mod, i);
+ const elem_val = try names_val.elemValue(pt, i);
const elem_struct_type = ip.loadStructType(ip.typeOf(elem_val.toIntern()));
- const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
+ const name_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "name", .no_embedded_nulls),
).?);
@@ -21396,28 +21594,28 @@ fn zirReify(
}
}
- const ty = try mod.errorSetFromUnsortedNames(names.keys());
+ const ty = try pt.errorSetFromUnsortedNames(names.keys());
return Air.internedToRef(ty.toIntern());
},
.Struct => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
- const layout_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const layout_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "layout", .no_embedded_nulls),
).?);
- const backing_integer_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const backing_integer_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "backing_integer", .no_embedded_nulls),
).?);
- const fields_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const fields_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "fields", .no_embedded_nulls),
).?);
- const decls_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "decls", .no_embedded_nulls),
).?);
- const is_tuple_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const is_tuple_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_tuple", .no_embedded_nulls),
).?);
@@ -21425,7 +21623,7 @@ fn zirReify(
const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val);
// Decls
- if (try decls_val.sliceLen(mod) > 0) {
+ if (try decls_val.sliceLen(pt) > 0) {
return sema.fail(block, src, "reified structs must have no decls", .{});
}
@@ -21441,24 +21639,24 @@ fn zirReify(
},
.Enum => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
- const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "tag_type", .no_embedded_nulls),
).?);
- const fields_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const fields_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "fields", .no_embedded_nulls),
).?);
- const decls_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "decls", .no_embedded_nulls),
).?);
- const is_exhaustive_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const is_exhaustive_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_exhaustive", .no_embedded_nulls),
).?);
- if (try decls_val.sliceLen(mod) > 0) {
+ if (try decls_val.sliceLen(pt) > 0) {
return sema.fail(block, src, "reified enums must have no decls", .{});
}
@@ -21470,17 +21668,17 @@ fn zirReify(
},
.Opaque => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
- const decls_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "decls", .no_embedded_nulls),
).?);
// Decls
- if (try decls_val.sliceLen(mod) > 0) {
+ if (try decls_val.sliceLen(pt) > 0) {
return sema.fail(block, src, "reified opaque must have no decls", .{});
}
- const wip_ty = switch (try ip.getOpaqueType(gpa, .{
+ const wip_ty = switch (try ip.getOpaqueType(gpa, pt.tid, .{
.has_namespace = false,
.key = .{ .reified = .{
.zir_index = try block.trackZir(inst),
@@ -21501,30 +21699,30 @@ fn zirReify(
mod.declPtr(new_decl_index).owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
- try mod.finalizeAnonDecl(new_decl_index);
+ try pt.finalizeAnonDecl(new_decl_index);
return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none));
},
.Union => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
- const layout_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const layout_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "layout", .no_embedded_nulls),
).?);
- const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "tag_type", .no_embedded_nulls),
).?);
- const fields_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const fields_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "fields", .no_embedded_nulls),
).?);
- const decls_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "decls", .no_embedded_nulls),
).?);
- if (try decls_val.sliceLen(mod) > 0) {
+ if (try decls_val.sliceLen(pt) > 0) {
return sema.fail(block, src, "reified unions must have no decls", .{});
}
const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val);
@@ -21537,23 +21735,23 @@ fn zirReify(
},
.Fn => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
- const calling_convention_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const calling_convention_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "calling_convention", .no_embedded_nulls),
).?);
- const is_generic_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const is_generic_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_generic", .no_embedded_nulls),
).?);
- const is_var_args_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const is_var_args_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_var_args", .no_embedded_nulls),
).?);
- const return_type_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const return_type_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "return_type", .no_embedded_nulls),
).?);
- const params_slice_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
+ const params_slice_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "params", .no_embedded_nulls),
).?);
@@ -21581,17 +21779,17 @@ fn zirReify(
var noalias_bits: u32 = 0;
for (param_types, 0..) |*param_type, i| {
- const elem_val = try params_val.elemValue(mod, i);
+ const elem_val = try params_val.elemValue(pt, i);
const elem_struct_type = ip.loadStructType(ip.typeOf(elem_val.toIntern()));
- const param_is_generic_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
+ const param_is_generic_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_generic", .no_embedded_nulls),
).?);
- const param_is_noalias_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
+ const param_is_noalias_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_noalias", .no_embedded_nulls),
).?);
- const opt_param_type_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
+ const opt_param_type_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "type", .no_embedded_nulls),
).?);
@@ -21613,7 +21811,7 @@ fn zirReify(
}
}
- const ty = try mod.funcType(.{
+ const ty = try pt.funcType(.{
.param_types = param_types,
.noalias_bits = noalias_bits,
.return_type = return_type.toIntern(),
@@ -21636,7 +21834,8 @@ fn reifyEnum(
fields_val: Value,
name_strategy: Zir.Inst.NameStrategy,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
@@ -21656,10 +21855,10 @@ fn reifyEnum(
std.hash.autoHash(&hasher, fields_len);
for (0..fields_len) |field_idx| {
- const field_info = try fields_val.elemValue(mod, field_idx);
+ const field_info = try fields_val.elemValue(pt, field_idx);
- const field_name_val = try field_info.fieldValue(mod, 0);
- const field_value_val = try sema.resolveLazyValue(try field_info.fieldValue(mod, 1));
+ const field_name_val = try field_info.fieldValue(pt, 0);
+ const field_value_val = try sema.resolveLazyValue(try field_info.fieldValue(pt, 1));
const field_name = try sema.sliceToIpString(block, src, field_name_val, .{
.needed_comptime_reason = "enum field name must be comptime-known",
@@ -21671,7 +21870,7 @@ fn reifyEnum(
});
}
- const wip_ty = switch (try ip.getEnumType(gpa, .{
+ const wip_ty = switch (try ip.getEnumType(gpa, pt.tid, .{
.has_namespace = false,
.has_values = true,
.tag_mode = if (is_exhaustive) .explicit else .nonexhaustive,
@@ -21704,10 +21903,10 @@ fn reifyEnum(
wip_ty.setTagTy(ip, tag_ty.toIntern());
for (0..fields_len) |field_idx| {
- const field_info = try fields_val.elemValue(mod, field_idx);
+ const field_info = try fields_val.elemValue(pt, field_idx);
- const field_name_val = try field_info.fieldValue(mod, 0);
- const field_value_val = try sema.resolveLazyValue(try field_info.fieldValue(mod, 1));
+ const field_name_val = try field_info.fieldValue(pt, 0);
+ const field_value_val = try sema.resolveLazyValue(try field_info.fieldValue(pt, 1));
// Don't pass a reason; first loop acts as an assertion that this is valid.
const field_name = try sema.sliceToIpString(block, src, field_name_val, undefined);
@@ -21716,12 +21915,12 @@ fn reifyEnum(
// TODO: better source location
return sema.fail(block, src, "field '{}' with enumeration value '{}' is too large for backing int type '{}'", .{
field_name.fmt(ip),
- field_value_val.fmtValue(mod, sema),
- tag_ty.fmt(mod),
+ field_value_val.fmtValue(pt, sema),
+ tag_ty.fmt(pt),
});
}
- const coerced_field_val = try mod.getCoerced(field_value_val, tag_ty);
+ const coerced_field_val = try pt.getCoerced(field_value_val, tag_ty);
if (wip_ty.nextField(ip, field_name, coerced_field_val.toIntern())) |conflict| {
return sema.failWithOwnedErrorMsg(block, switch (conflict.kind) {
.name => msg: {
@@ -21732,7 +21931,7 @@ fn reifyEnum(
break :msg msg;
},
.value => msg: {
- const msg = try sema.errMsg(src, "enum tag value {} already taken", .{field_value_val.fmtValue(mod, sema)});
+ const msg = try sema.errMsg(src, "enum tag value {} already taken", .{field_value_val.fmtValue(pt, sema)});
errdefer msg.destroy(gpa);
_ = conflict.prev_field_idx; // TODO: this note is incorrect
try sema.errNote(src, msg, "other enum tag value here", .{});
@@ -21742,11 +21941,11 @@ fn reifyEnum(
}
}
- if (!is_exhaustive and fields_len > 1 and std.math.log2_int(u64, fields_len) == tag_ty.bitSize(mod)) {
+ if (!is_exhaustive and fields_len > 1 and std.math.log2_int(u64, fields_len) == tag_ty.bitSize(pt)) {
return sema.fail(block, src, "non-exhaustive enum specified every value", .{});
}
- try mod.finalizeAnonDecl(new_decl_index);
+ try pt.finalizeAnonDecl(new_decl_index);
return Air.internedToRef(wip_ty.index);
}
@@ -21760,7 +21959,8 @@ fn reifyUnion(
fields_val: Value,
name_strategy: Zir.Inst.NameStrategy,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
@@ -21782,11 +21982,11 @@ fn reifyUnion(
var any_aligns = false;
for (0..fields_len) |field_idx| {
- const field_info = try fields_val.elemValue(mod, field_idx);
+ const field_info = try fields_val.elemValue(pt, field_idx);
- const field_name_val = try field_info.fieldValue(mod, 0);
- const field_type_val = try field_info.fieldValue(mod, 1);
- const field_align_val = try sema.resolveLazyValue(try field_info.fieldValue(mod, 2));
+ const field_name_val = try field_info.fieldValue(pt, 0);
+ const field_type_val = try field_info.fieldValue(pt, 1);
+ const field_align_val = try sema.resolveLazyValue(try field_info.fieldValue(pt, 2));
const field_name = try sema.sliceToIpString(block, src, field_name_val, .{
.needed_comptime_reason = "union field name must be comptime-known",
@@ -21798,12 +21998,12 @@ fn reifyUnion(
field_align_val.toIntern(),
});
- if (field_align_val.toUnsignedInt(mod) != 0) {
+ if (field_align_val.toUnsignedInt(pt) != 0) {
any_aligns = true;
}
}
- const wip_ty = switch (try ip.getUnionType(gpa, .{
+ const wip_ty = switch (try ip.getUnionType(gpa, pt.tid, .{
.flags = .{
.layout = layout,
.status = .none,
@@ -21861,10 +22061,10 @@ fn reifyUnion(
var seen_tags = try std.DynamicBitSetUnmanaged.initEmpty(sema.arena, tag_ty_fields_len);
for (field_types, 0..) |*field_ty, field_idx| {
- const field_info = try fields_val.elemValue(mod, field_idx);
+ const field_info = try fields_val.elemValue(pt, field_idx);
- const field_name_val = try field_info.fieldValue(mod, 0);
- const field_type_val = try field_info.fieldValue(mod, 1);
+ const field_name_val = try field_info.fieldValue(pt, 0);
+ const field_type_val = try field_info.fieldValue(pt, 1);
// Don't pass a reason; first loop acts as an assertion that this is valid.
const field_name = try sema.sliceToIpString(block, src, field_name_val, undefined);
@@ -21872,7 +22072,7 @@ fn reifyUnion(
const enum_index = enum_tag_ty.enumFieldIndex(field_name, mod) orelse {
// TODO: better source location
return sema.fail(block, src, "no field named '{}' in enum '{}'", .{
- field_name.fmt(ip), enum_tag_ty.fmt(mod),
+ field_name.fmt(ip), enum_tag_ty.fmt(pt),
});
};
if (seen_tags.isSet(enum_index)) {
@@ -21883,7 +22083,7 @@ fn reifyUnion(
field_ty.* = field_type_val.toIntern();
if (any_aligns) {
- const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntSema(mod);
+ const byte_align = try (try field_info.fieldValue(pt, 2)).toUnsignedIntSema(pt);
if (byte_align > 0 and !math.isPowerOfTwo(byte_align)) {
// TODO: better source location
return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align});
@@ -21913,10 +22113,10 @@ fn reifyUnion(
try field_names.ensureTotalCapacity(sema.arena, fields_len);
for (field_types, 0..) |*field_ty, field_idx| {
- const field_info = try fields_val.elemValue(mod, field_idx);
+ const field_info = try fields_val.elemValue(pt, field_idx);
- const field_name_val = try field_info.fieldValue(mod, 0);
- const field_type_val = try field_info.fieldValue(mod, 1);
+ const field_name_val = try field_info.fieldValue(pt, 0);
+ const field_type_val = try field_info.fieldValue(pt, 1);
// Don't pass a reason; first loop acts as an assertion that this is valid.
const field_name = try sema.sliceToIpString(block, src, field_name_val, undefined);
@@ -21928,7 +22128,7 @@ fn reifyUnion(
field_ty.* = field_type_val.toIntern();
if (any_aligns) {
- const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntSema(mod);
+ const byte_align = try (try field_info.fieldValue(pt, 2)).toUnsignedIntSema(pt);
if (byte_align > 0 and !math.isPowerOfTwo(byte_align)) {
// TODO: better source location
return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align});
@@ -21955,7 +22155,7 @@ fn reifyUnion(
}
if (layout == .@"extern" and !try sema.validateExternType(field_ty, .union_field)) {
return sema.failWithOwnedErrorMsg(block, msg: {
- const msg = try sema.errMsg(src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
+ const msg = try sema.errMsg(src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(pt)});
errdefer msg.destroy(gpa);
try sema.explainWhyTypeIsNotExtern(msg, src, field_ty, .union_field);
@@ -21965,7 +22165,7 @@ fn reifyUnion(
});
} else if (layout == .@"packed" and !try sema.validatePackedType(field_ty)) {
return sema.failWithOwnedErrorMsg(block, msg: {
- const msg = try sema.errMsg(src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
+ const msg = try sema.errMsg(src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(pt)});
errdefer msg.destroy(gpa);
try sema.explainWhyTypeIsNotPacked(msg, src, field_ty);
@@ -21984,7 +22184,7 @@ fn reifyUnion(
loaded_union.tagTypePtr(ip).* = enum_tag_ty;
loaded_union.flagsPtr(ip).status = .have_field_types;
- try mod.finalizeAnonDecl(new_decl_index);
+ try pt.finalizeAnonDecl(new_decl_index);
try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index });
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index }));
return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none));
@@ -22001,7 +22201,8 @@ fn reifyStruct(
name_strategy: Zir.Inst.NameStrategy,
is_tuple: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
@@ -22026,20 +22227,20 @@ fn reifyStruct(
var any_aligned_fields = false;
for (0..fields_len) |field_idx| {
- const field_info = try fields_val.elemValue(mod, field_idx);
+ const field_info = try fields_val.elemValue(pt, field_idx);
- const field_name_val = try field_info.fieldValue(mod, 0);
- const field_type_val = try field_info.fieldValue(mod, 1);
- const field_default_value_val = try field_info.fieldValue(mod, 2);
- const field_is_comptime_val = try field_info.fieldValue(mod, 3);
- const field_alignment_val = try sema.resolveLazyValue(try field_info.fieldValue(mod, 4));
+ const field_name_val = try field_info.fieldValue(pt, 0);
+ const field_type_val = try field_info.fieldValue(pt, 1);
+ const field_default_value_val = try field_info.fieldValue(pt, 2);
+ const field_is_comptime_val = try field_info.fieldValue(pt, 3);
+ const field_alignment_val = try sema.resolveLazyValue(try field_info.fieldValue(pt, 4));
const field_name = try sema.sliceToIpString(block, src, field_name_val, .{
.needed_comptime_reason = "struct field name must be comptime-known",
});
const field_is_comptime = field_is_comptime_val.toBool();
const field_default_value: InternPool.Index = if (field_default_value_val.optionalValue(mod)) |ptr_val| d: {
- const ptr_ty = try mod.singleConstPtrType(field_type_val.toType());
+ const ptr_ty = try pt.singleConstPtrType(field_type_val.toType());
// We need to do this deref here, so we won't check for this error case later on.
const val = try sema.pointerDeref(block, src, ptr_val, ptr_ty) orelse return sema.failWithNeededComptime(
block,
@@ -22060,14 +22261,14 @@ fn reifyStruct(
if (field_is_comptime) any_comptime_fields = true;
if (field_default_value != .none) any_default_inits = true;
- switch (try field_alignment_val.orderAgainstZeroAdvanced(mod, .sema)) {
+ switch (try field_alignment_val.orderAgainstZeroAdvanced(pt, .sema)) {
.eq => {},
.gt => any_aligned_fields = true,
.lt => unreachable,
}
}
- const wip_ty = switch (try ip.getStructType(gpa, .{
+ const wip_ty = switch (try ip.getStructType(gpa, pt.tid, .{
.layout = layout,
.fields_len = fields_len,
.known_non_opv = false,
@@ -22107,13 +22308,13 @@ fn reifyStruct(
const struct_type = ip.loadStructType(wip_ty.index);
for (0..fields_len) |field_idx| {
- const field_info = try fields_val.elemValue(mod, field_idx);
+ const field_info = try fields_val.elemValue(pt, field_idx);
- const field_name_val = try field_info.fieldValue(mod, 0);
- const field_type_val = try field_info.fieldValue(mod, 1);
- const field_default_value_val = try field_info.fieldValue(mod, 2);
- const field_is_comptime_val = try field_info.fieldValue(mod, 3);
- const field_alignment_val = try field_info.fieldValue(mod, 4);
+ const field_name_val = try field_info.fieldValue(pt, 0);
+ const field_type_val = try field_info.fieldValue(pt, 1);
+ const field_default_value_val = try field_info.fieldValue(pt, 2);
+ const field_is_comptime_val = try field_info.fieldValue(pt, 3);
+ const field_alignment_val = try field_info.fieldValue(pt, 4);
const field_ty = field_type_val.toType();
// Don't pass a reason; first loop acts as an assertion that this is valid.
@@ -22143,7 +22344,7 @@ fn reifyStruct(
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
}
- const byte_align = try field_alignment_val.toUnsignedIntSema(mod);
+ const byte_align = try field_alignment_val.toUnsignedIntSema(pt);
if (byte_align == 0) {
if (layout != .@"packed") {
struct_type.field_aligns.get(ip)[field_idx] = .none;
@@ -22168,7 +22369,7 @@ fn reifyStruct(
const field_default: InternPool.Index = d: {
if (!any_default_inits) break :d .none;
const ptr_val = field_default_value_val.optionalValue(mod) orelse break :d .none;
- const ptr_ty = try mod.singleConstPtrType(field_ty);
+ const ptr_ty = try pt.singleConstPtrType(field_ty);
// Asserted comptime-dereferencable above.
const val = (try sema.pointerDeref(block, src, ptr_val, ptr_ty)).?;
// We already resolved this for deduplication, so we may as well do it now.
@@ -22204,7 +22405,7 @@ fn reifyStruct(
}
if (layout == .@"extern" and !try sema.validateExternType(field_ty, .struct_field)) {
return sema.failWithOwnedErrorMsg(block, msg: {
- const msg = try sema.errMsg(src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
+ const msg = try sema.errMsg(src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(pt)});
errdefer msg.destroy(gpa);
try sema.explainWhyTypeIsNotExtern(msg, src, field_ty, .struct_field);
@@ -22214,7 +22415,7 @@ fn reifyStruct(
});
} else if (layout == .@"packed" and !try sema.validatePackedType(field_ty)) {
return sema.failWithOwnedErrorMsg(block, msg: {
- const msg = try sema.errMsg(src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
+ const msg = try sema.errMsg(src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(pt)});
errdefer msg.destroy(gpa);
try sema.explainWhyTypeIsNotPacked(msg, src, field_ty);
@@ -22229,7 +22430,7 @@ fn reifyStruct(
var fields_bit_sum: u64 = 0;
for (0..struct_type.field_types.len) |field_idx| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_idx]);
- field_ty.resolveLayout(mod) catch |err| switch (err) {
+ field_ty.resolveLayout(pt) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
try sema.errNote(src, msg, "while checking a field of this struct", .{});
@@ -22237,7 +22438,7 @@ fn reifyStruct(
},
else => return err,
};
- fields_bit_sum += field_ty.bitSize(mod);
+ fields_bit_sum += field_ty.bitSize(pt);
}
if (opt_backing_int_val.optionalValue(mod)) |backing_int_val| {
@@ -22245,20 +22446,21 @@ fn reifyStruct(
try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum);
struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
} else {
- const backing_int_ty = try mod.intType(.unsigned, @intCast(fields_bit_sum));
+ const backing_int_ty = try pt.intType(.unsigned, @intCast(fields_bit_sum));
struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
}
}
- try mod.finalizeAnonDecl(new_decl_index);
+ try pt.finalizeAnonDecl(new_decl_index);
try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index });
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index }));
return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none));
}
fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) CompileError!Air.Inst.Ref {
- const va_list_ty = try sema.mod.getBuiltinType("VaList");
- const va_list_ptr = try sema.mod.singleMutPtrType(va_list_ty);
+ const pt = sema.pt;
+ const va_list_ty = try pt.getBuiltinType("VaList");
+ const va_list_ptr = try pt.singleMutPtrType(va_list_ty);
const inst = try sema.resolveInst(zir_ref);
return sema.coerce(block, va_list_ptr, inst, src);
@@ -22275,7 +22477,7 @@ fn zirCVaArg(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C
if (!try sema.validateExternType(arg_ty, .param_ty)) {
const msg = msg: {
- const msg = try sema.errMsg(ty_src, "cannot get '{}' from variadic argument", .{arg_ty.fmt(sema.mod)});
+ const msg = try sema.errMsg(ty_src, "cannot get '{}' from variadic argument", .{arg_ty.fmt(sema.pt)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsNotExtern(msg, ty_src, arg_ty, .param_ty);
@@ -22296,7 +22498,7 @@ fn zirCVaCopy(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData)
const va_list_src = block.builtinCallArgSrc(extra.node, 0);
const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.operand);
- const va_list_ty = try sema.mod.getBuiltinType("VaList");
+ const va_list_ty = try sema.pt.getBuiltinType("VaList");
try sema.requireRuntimeBlock(block, src, null);
return block.addTyOp(.c_va_copy, va_list_ty, va_list_ref);
@@ -22316,7 +22518,7 @@ fn zirCVaEnd(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C
fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
const src = block.nodeOffset(@bitCast(extended.operand));
- const va_list_ty = try sema.mod.getBuiltinType("VaList");
+ const va_list_ty = try sema.pt.getBuiltinType("VaList");
try sema.requireRuntimeBlock(block, src, null);
return block.addInst(.{
.tag = .c_va_start,
@@ -22325,14 +22527,15 @@ fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData)
}
fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const ty_src = block.builtinCallArgSrc(inst_data.src_node, 0);
const ty = try sema.resolveType(block, ty_src, inst_data.operand);
- const type_name = try ip.getOrPutStringFmt(sema.gpa, "{}", .{ty.fmt(mod)}, .no_embedded_nulls);
+ const type_name = try ip.getOrPutStringFmt(sema.gpa, "{}", .{ty.fmt(pt)}, .no_embedded_nulls);
return sema.addNullTerminatedStrLit(type_name);
}
@@ -22349,7 +22552,8 @@ fn zirFrameSize(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
}
fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(inst_data.src_node);
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
@@ -22380,23 +22584,23 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
if (dest_scalar_ty.intInfo(mod).bits == 0) {
if (!is_vector) {
if (block.wantSafety()) {
- const ok = try block.addBinOp(if (block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, operand, Air.internedToRef((try mod.floatValue(operand_ty, 0.0)).toIntern()));
+ const ok = try block.addBinOp(if (block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, operand, Air.internedToRef((try pt.floatValue(operand_ty, 0.0)).toIntern()));
try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds);
}
- return Air.internedToRef((try mod.intValue(dest_ty, 0)).toIntern());
+ return Air.internedToRef((try pt.intValue(dest_ty, 0)).toIntern());
}
if (block.wantSafety()) {
const len = dest_ty.vectorLen(mod);
for (0..len) |i| {
- const idx_ref = try mod.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(Type.usize, i);
const elem_ref = try block.addBinOp(.array_elem_val, operand, idx_ref);
- const ok = try block.addBinOp(if (block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, elem_ref, Air.internedToRef((try mod.floatValue(operand_scalar_ty, 0.0)).toIntern()));
+ const ok = try block.addBinOp(if (block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, elem_ref, Air.internedToRef((try pt.floatValue(operand_scalar_ty, 0.0)).toIntern()));
try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds);
}
}
- return Air.internedToRef(try mod.intern(.{ .aggregate = .{
+ return Air.internedToRef(try pt.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
- .storage = .{ .repeated_elem = (try mod.intValue(dest_scalar_ty, 0)).toIntern() },
+ .storage = .{ .repeated_elem = (try pt.intValue(dest_scalar_ty, 0)).toIntern() },
} }));
}
if (!is_vector) {
@@ -22404,8 +22608,8 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
if (block.wantSafety()) {
const back = try block.addTyOp(.float_from_int, operand_ty, result);
const diff = try block.addBinOp(.sub, operand, back);
- const ok_pos = try block.addBinOp(if (block.float_mode == .optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try mod.floatValue(operand_ty, 1.0)).toIntern()));
- const ok_neg = try block.addBinOp(if (block.float_mode == .optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try mod.floatValue(operand_ty, -1.0)).toIntern()));
+ const ok_pos = try block.addBinOp(if (block.float_mode == .optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try pt.floatValue(operand_ty, 1.0)).toIntern()));
+ const ok_neg = try block.addBinOp(if (block.float_mode == .optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try pt.floatValue(operand_ty, -1.0)).toIntern()));
const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg);
try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds);
}
@@ -22414,14 +22618,14 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
const len = dest_ty.vectorLen(mod);
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
for (new_elems, 0..) |*new_elem, i| {
- const idx_ref = try mod.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(Type.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
const result = try block.addTyOp(if (block.float_mode == .optimized) .int_from_float_optimized else .int_from_float, dest_scalar_ty, old_elem);
if (block.wantSafety()) {
const back = try block.addTyOp(.float_from_int, operand_scalar_ty, result);
const diff = try block.addBinOp(.sub, old_elem, back);
- const ok_pos = try block.addBinOp(if (block.float_mode == .optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try mod.floatValue(operand_scalar_ty, 1.0)).toIntern()));
- const ok_neg = try block.addBinOp(if (block.float_mode == .optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try mod.floatValue(operand_scalar_ty, -1.0)).toIntern()));
+ const ok_pos = try block.addBinOp(if (block.float_mode == .optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try pt.floatValue(operand_scalar_ty, 1.0)).toIntern()));
+ const ok_neg = try block.addBinOp(if (block.float_mode == .optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try pt.floatValue(operand_scalar_ty, -1.0)).toIntern()));
const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg);
try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds);
}
@@ -22431,7 +22635,8 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
}
fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(inst_data.src_node);
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
@@ -22450,7 +22655,7 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
_ = try sema.checkIntType(block, operand_src, operand_scalar_ty);
if (try sema.resolveValue(operand)) |operand_val| {
- const result_val = try operand_val.floatFromIntAdvanced(sema.arena, operand_ty, dest_ty, mod, .sema);
+ const result_val = try operand_val.floatFromIntAdvanced(sema.arena, operand_ty, dest_ty, pt, .sema);
return Air.internedToRef(result_val.toIntern());
} else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeFloat) {
return sema.failWithNeededComptime(block, operand_src, .{
@@ -22465,7 +22670,7 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
const len = operand_ty.vectorLen(mod);
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
for (new_elems, 0..) |*new_elem, i| {
- const idx_ref = try mod.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(Type.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
new_elem.* = try block.addTyOp(.float_from_int, dest_scalar_ty, old_elem);
}
@@ -22473,7 +22678,8 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
}
fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(inst_data.src_node);
@@ -22489,7 +22695,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const is_vector = dest_ty.zigTypeTag(mod) == .Vector;
const operand_ty = if (is_vector) operand_ty: {
const len = dest_ty.vectorLen(mod);
- break :operand_ty try mod.vectorType(.{ .child = .usize_type, .len = len });
+ break :operand_ty try pt.vectorType(.{ .child = .usize_type, .len = len });
} else Type.usize;
const operand_coerced = try sema.coerce(block, operand_ty, operand_res, operand_src);
@@ -22498,11 +22704,11 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
try sema.checkPtrType(block, src, ptr_ty, true);
const elem_ty = ptr_ty.elemType2(mod);
- const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, .sema);
+ const ptr_align = try ptr_ty.ptrAlignmentAdvanced(pt, .sema);
if (ptr_ty.isSlice(mod)) {
const msg = msg: {
- const msg = try sema.errMsg(src, "integer cannot be converted to slice type '{}'", .{ptr_ty.fmt(sema.mod)});
+ const msg = try sema.errMsg(src, "integer cannot be converted to slice type '{}'", .{ptr_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.errNote(src, msg, "slice length cannot be inferred from address", .{});
break :msg msg;
@@ -22518,18 +22724,18 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const len = dest_ty.vectorLen(mod);
const new_elems = try sema.arena.alloc(InternPool.Index, len);
for (new_elems, 0..) |*new_elem, i| {
- const elem = try val.elemValue(mod, i);
+ const elem = try val.elemValue(pt, i);
const ptr_val = try sema.ptrFromIntVal(block, operand_src, elem, ptr_ty, ptr_align);
new_elem.* = ptr_val.toIntern();
}
- return Air.internedToRef(try mod.intern(.{ .aggregate = .{
+ return Air.internedToRef(try pt.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
.storage = .{ .elems = new_elems },
} }));
}
if (try sema.typeRequiresComptime(ptr_ty)) {
return sema.failWithOwnedErrorMsg(block, msg: {
- const msg = try sema.errMsg(src, "pointer to comptime-only type '{}' must be comptime-known, but operand is runtime-known", .{ptr_ty.fmt(mod)});
+ const msg = try sema.errMsg(src, "pointer to comptime-only type '{}' must be comptime-known, but operand is runtime-known", .{ptr_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsComptime(msg, src, ptr_ty);
@@ -22545,7 +22751,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
}
if (ptr_align.compare(.gt, .@"1")) {
const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1;
- const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern());
+ const align_minus_1 = Air.internedToRef((try pt.intValue(Type.usize, align_bytes_minus_1)).toIntern());
const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1);
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
try sema.addSafetyCheck(block, src, is_aligned, .incorrect_alignment);
@@ -22557,7 +22763,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const len = dest_ty.vectorLen(mod);
if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag(mod) == .Fn)) {
for (0..len) |i| {
- const idx_ref = try mod.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(Type.usize, i);
const elem_coerced = try block.addBinOp(.array_elem_val, operand_coerced, idx_ref);
if (!ptr_ty.isAllowzeroPtr(mod)) {
const is_non_zero = try block.addBinOp(.cmp_neq, elem_coerced, .zero_usize);
@@ -22565,7 +22771,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
}
if (ptr_align.compare(.gt, .@"1")) {
const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1;
- const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern());
+ const align_minus_1 = Air.internedToRef((try pt.intValue(Type.usize, align_bytes_minus_1)).toIntern());
const remainder = try block.addBinOp(.bit_and, elem_coerced, align_minus_1);
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
try sema.addSafetyCheck(block, src, is_aligned, .incorrect_alignment);
@@ -22575,7 +22781,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
for (new_elems, 0..) |*new_elem, i| {
- const idx_ref = try mod.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(Type.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand_coerced, idx_ref);
new_elem.* = try block.addBitCast(ptr_ty, old_elem);
}
@@ -22590,31 +22796,33 @@ fn ptrFromIntVal(
ptr_ty: Type,
ptr_align: Alignment,
) !Value {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
if (operand_val.isUndef(zcu)) {
if (ptr_ty.isAllowzeroPtr(zcu) and ptr_align == .@"1") {
- return zcu.undefValue(ptr_ty);
+ return pt.undefValue(ptr_ty);
}
return sema.failWithUseOfUndef(block, operand_src);
}
- const addr = try operand_val.toUnsignedIntSema(zcu);
+ const addr = try operand_val.toUnsignedIntSema(pt);
if (!ptr_ty.isAllowzeroPtr(zcu) and addr == 0)
- return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(zcu)});
+ return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(pt)});
if (addr != 0 and ptr_align != .none and !ptr_align.check(addr))
- return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(zcu)});
+ return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(pt)});
return switch (ptr_ty.zigTypeTag(zcu)) {
- .Optional => Value.fromInterned((try zcu.intern(.{ .opt = .{
+ .Optional => Value.fromInterned(try pt.intern(.{ .opt = .{
.ty = ptr_ty.toIntern(),
- .val = if (addr == 0) .none else (try zcu.ptrIntValue(ptr_ty.childType(zcu), addr)).toIntern(),
- } }))),
- .Pointer => try zcu.ptrIntValue(ptr_ty, addr),
+ .val = if (addr == 0) .none else (try pt.ptrIntValue(ptr_ty.childType(zcu), addr)).toIntern(),
+ } })),
+ .Pointer => try pt.ptrIntValue(ptr_ty, addr),
else => unreachable,
};
}
fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const src = block.nodeOffset(extra.node);
@@ -22642,8 +22850,8 @@ fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData
errdefer msg.destroy(sema.gpa);
const dest_ty = base_dest_ty.errorUnionPayload(mod);
const operand_ty = base_operand_ty.errorUnionPayload(mod);
- try sema.errNote(src, msg, "destination payload is '{}'", .{dest_ty.fmt(mod)});
- try sema.errNote(src, msg, "operand payload is '{}'", .{operand_ty.fmt(mod)});
+ try sema.errNote(src, msg, "destination payload is '{}'", .{dest_ty.fmt(pt)});
+ try sema.errNote(src, msg, "operand payload is '{}'", .{operand_ty.fmt(pt)});
try addDeclaredHereNote(sema, msg, dest_ty);
try addDeclaredHereNote(sema, msg, operand_ty);
break :msg msg;
@@ -22684,7 +22892,7 @@ fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData
};
if (disjoint and dest_tag != .ErrorUnion) {
return sema.fail(block, src, "error sets '{}' and '{}' have no common errors", .{
- operand_ty.fmt(sema.mod), dest_ty.fmt(sema.mod),
+ operand_ty.fmt(pt), dest_ty.fmt(pt),
});
}
@@ -22700,24 +22908,24 @@ fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData
}
if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), error_name)) {
return sema.fail(block, src, "'error.{}' not a member of error set '{}'", .{
- error_name.fmt(ip), dest_ty.fmt(sema.mod),
+ error_name.fmt(ip), dest_ty.fmt(pt),
});
}
}
- return Air.internedToRef((try mod.getCoerced(val, base_dest_ty)).toIntern());
+ return Air.internedToRef((try pt.getCoerced(val, base_dest_ty)).toIntern());
}
try sema.requireRuntimeBlock(block, src, operand_src);
- const err_int_ty = try mod.errorIntType();
+ const err_int_ty = try pt.errorIntType();
if (block.wantSafety() and !dest_ty.isAnyError(mod) and
dest_ty.toIntern() != .adhoc_inferred_error_set_type and
- sema.mod.backendSupportsFeature(.error_set_has_value))
+ mod.backendSupportsFeature(.error_set_has_value))
{
if (dest_tag == .ErrorUnion) {
const err_code = try sema.analyzeErrUnionCode(block, operand_src, operand);
const err_int = try block.addBitCast(err_int_ty, err_code);
- const zero_err = try mod.intRef(try mod.errorIntType(), 0);
+ const zero_err = try pt.intRef(try pt.errorIntType(), 0);
const is_zero = try block.addBinOp(.cmp_eq, err_int, zero_err);
if (disjoint) {
@@ -22786,7 +22994,8 @@ fn ptrCastFull(
dest_ty: Type,
operation: []const u8,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const operand_ty = sema.typeOf(operand);
try sema.checkPtrType(block, src, dest_ty, true);
@@ -22795,8 +23004,8 @@ fn ptrCastFull(
const src_info = operand_ty.ptrInfo(mod);
const dest_info = dest_ty.ptrInfo(mod);
- try Type.fromInterned(src_info.child).resolveLayout(mod);
- try Type.fromInterned(dest_info.child).resolveLayout(mod);
+ try Type.fromInterned(src_info.child).resolveLayout(pt);
+ try Type.fromInterned(dest_info.child).resolveLayout(pt);
const src_slice_like = src_info.flags.size == .Slice or
(src_info.flags.size == .One and Type.fromInterned(src_info.child).zigTypeTag(mod) == .Array);
@@ -22810,12 +23019,12 @@ fn ptrCastFull(
if (dest_info.flags.size == .Slice) {
const src_elem_size = switch (src_info.flags.size) {
- .Slice => Type.fromInterned(src_info.child).abiSize(mod),
+ .Slice => Type.fromInterned(src_info.child).abiSize(pt),
// pointer to array
- .One => Type.fromInterned(src_info.child).childType(mod).abiSize(mod),
+ .One => Type.fromInterned(src_info.child).childType(mod).abiSize(pt),
else => unreachable,
};
- const dest_elem_size = Type.fromInterned(dest_info.child).abiSize(mod);
+ const dest_elem_size = Type.fromInterned(dest_info.child).abiSize(pt);
if (src_elem_size != dest_elem_size) {
return sema.fail(block, src, "TODO: implement {s} between slices changing the length", .{operation});
}
@@ -22867,8 +23076,7 @@ fn ptrCastFull(
if (imc_res == .ok) break :check_child;
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(src, "pointer element type '{}' cannot coerce into element type '{}'", .{
- src_child.fmt(mod),
- dest_child.fmt(mod),
+ src_child.fmt(pt), dest_child.fmt(pt),
});
errdefer msg.destroy(sema.gpa);
try imc_res.report(sema, src, msg);
@@ -22881,26 +23089,26 @@ fn ptrCastFull(
if (dest_info.sentinel == .none) break :check_sent;
if (src_info.flags.size == .C) break :check_sent;
if (src_info.sentinel != .none) {
- const coerced_sent = try mod.intern_pool.getCoerced(sema.gpa, src_info.sentinel, dest_info.child);
+ const coerced_sent = try mod.intern_pool.getCoerced(sema.gpa, pt.tid, src_info.sentinel, dest_info.child);
if (dest_info.sentinel == coerced_sent) break :check_sent;
}
if (src_slice_like and src_info.flags.size == .One and dest_info.flags.size == .Slice) {
// [*]nT -> []T
const arr_ty = Type.fromInterned(src_info.child);
if (arr_ty.sentinel(mod)) |src_sentinel| {
- const coerced_sent = try mod.intern_pool.getCoerced(sema.gpa, src_sentinel.toIntern(), dest_info.child);
+ const coerced_sent = try mod.intern_pool.getCoerced(sema.gpa, pt.tid, src_sentinel.toIntern(), dest_info.child);
if (dest_info.sentinel == coerced_sent) break :check_sent;
}
}
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = if (src_info.sentinel == .none) blk: {
break :blk try sema.errMsg(src, "destination pointer requires '{}' sentinel", .{
- Value.fromInterned(dest_info.sentinel).fmtValue(mod, sema),
+ Value.fromInterned(dest_info.sentinel).fmtValue(pt, sema),
});
} else blk: {
break :blk try sema.errMsg(src, "pointer sentinel '{}' cannot coerce into pointer sentinel '{}'", .{
- Value.fromInterned(src_info.sentinel).fmtValue(mod, sema),
- Value.fromInterned(dest_info.sentinel).fmtValue(mod, sema),
+ Value.fromInterned(src_info.sentinel).fmtValue(pt, sema),
+ Value.fromInterned(dest_info.sentinel).fmtValue(pt, sema),
});
};
errdefer msg.destroy(sema.gpa);
@@ -22941,8 +23149,8 @@ fn ptrCastFull(
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(src, "'{}' could have null values which are illegal in type '{}'", .{
- operand_ty.fmt(mod),
- dest_ty.fmt(mod),
+ operand_ty.fmt(pt),
+ dest_ty.fmt(pt),
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(src, msg, "use @ptrCast to assert the pointer is not null", .{});
@@ -22956,12 +23164,12 @@ fn ptrCastFull(
const src_align = if (src_info.flags.alignment != .none)
src_info.flags.alignment
else
- Type.fromInterned(src_info.child).abiAlignment(mod);
+ Type.fromInterned(src_info.child).abiAlignment(pt);
const dest_align = if (dest_info.flags.alignment != .none)
dest_info.flags.alignment
else
- Type.fromInterned(dest_info.child).abiAlignment(mod);
+ Type.fromInterned(dest_info.child).abiAlignment(pt);
if (!flags.align_cast) {
if (dest_align.compare(.gt, src_align)) {
@@ -22969,10 +23177,10 @@ fn ptrCastFull(
const msg = try sema.errMsg(src, "{s} increases pointer alignment", .{operation});
errdefer msg.destroy(sema.gpa);
try sema.errNote(operand_src, msg, "'{}' has alignment '{d}'", .{
- operand_ty.fmt(mod), src_align.toByteUnits() orelse 0,
+ operand_ty.fmt(pt), src_align.toByteUnits() orelse 0,
});
try sema.errNote(src, msg, "'{}' has alignment '{d}'", .{
- dest_ty.fmt(mod), dest_align.toByteUnits() orelse 0,
+ dest_ty.fmt(pt), dest_align.toByteUnits() orelse 0,
});
try sema.errNote(src, msg, "use @alignCast to assert pointer alignment", .{});
break :msg msg;
@@ -22986,10 +23194,10 @@ fn ptrCastFull(
const msg = try sema.errMsg(src, "{s} changes pointer address space", .{operation});
errdefer msg.destroy(sema.gpa);
try sema.errNote(operand_src, msg, "'{}' has address space '{s}'", .{
- operand_ty.fmt(mod), @tagName(src_info.flags.address_space),
+ operand_ty.fmt(pt), @tagName(src_info.flags.address_space),
});
try sema.errNote(src, msg, "'{}' has address space '{s}'", .{
- dest_ty.fmt(mod), @tagName(dest_info.flags.address_space),
+ dest_ty.fmt(pt), @tagName(dest_info.flags.address_space),
});
try sema.errNote(src, msg, "use @addrSpaceCast to cast pointer address space", .{});
break :msg msg;
@@ -23044,9 +23252,9 @@ fn ptrCastFull(
// Only convert to a many-pointer at first
var info = dest_info;
info.flags.size = .Many;
- const ty = try mod.ptrTypeSema(info);
+ const ty = try pt.ptrTypeSema(info);
if (dest_ty.zigTypeTag(mod) == .Optional) {
- break :blk try mod.optionalType(ty.toIntern());
+ break :blk try pt.optionalType(ty.toIntern());
} else {
break :blk ty;
}
@@ -23059,10 +23267,10 @@ fn ptrCastFull(
return sema.failWithUseOfUndef(block, operand_src);
}
if (!dest_ty.ptrAllowsZero(mod) and ptr_val.isNull(mod)) {
- return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)});
+ return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(pt)});
}
if (dest_align.compare(.gt, src_align)) {
- if (try ptr_val.getUnsignedIntAdvanced(mod, .sema)) |addr| {
+ if (try ptr_val.getUnsignedIntAdvanced(pt, .sema)) |addr| {
if (!dest_align.check(addr)) {
return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{
addr,
@@ -23072,12 +23280,12 @@ fn ptrCastFull(
}
}
if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) {
- if (ptr_val.isUndef(mod)) return mod.undefRef(dest_ty);
- const arr_len = try mod.intValue(Type.usize, Type.fromInterned(src_info.child).arrayLen(mod));
+ if (ptr_val.isUndef(mod)) return pt.undefRef(dest_ty);
+ const arr_len = try pt.intValue(Type.usize, Type.fromInterned(src_info.child).arrayLen(mod));
const ptr_val_key = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr;
- return Air.internedToRef((try mod.intern(.{ .slice = .{
+ return Air.internedToRef((try pt.intern(.{ .slice = .{
.ty = dest_ty.toIntern(),
- .ptr = try mod.intern(.{ .ptr = .{
+ .ptr = try pt.intern(.{ .ptr = .{
.ty = dest_ty.slicePtrFieldType(mod).toIntern(),
.base_addr = ptr_val_key.base_addr,
.byte_offset = ptr_val_key.byte_offset,
@@ -23086,7 +23294,7 @@ fn ptrCastFull(
} })));
} else {
assert(dest_ptr_ty.eql(dest_ty, mod));
- return Air.internedToRef((try mod.getCoerced(ptr_val, dest_ty)).toIntern());
+ return Air.internedToRef((try pt.getCoerced(ptr_val, dest_ty)).toIntern());
}
}
}
@@ -23112,7 +23320,7 @@ fn ptrCastFull(
try sema.typeHasRuntimeBits(Type.fromInterned(dest_info.child)))
{
const align_bytes_minus_1 = dest_align.toByteUnits().? - 1;
- const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern());
+ const align_minus_1 = Air.internedToRef((try pt.intValue(Type.usize, align_bytes_minus_1)).toIntern());
const ptr_int = try block.addUnOp(.int_from_ptr, ptr);
const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1);
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
@@ -23129,9 +23337,9 @@ fn ptrCastFull(
// We can't change address spaces with a bitcast, so this requires two instructions
var intermediate_info = src_info;
intermediate_info.flags.address_space = dest_info.flags.address_space;
- const intermediate_ptr_ty = try mod.ptrTypeSema(intermediate_info);
+ const intermediate_ptr_ty = try pt.ptrTypeSema(intermediate_info);
const intermediate_ty = if (dest_ptr_ty.zigTypeTag(mod) == .Optional) blk: {
- break :blk try mod.optionalType(intermediate_ptr_ty.toIntern());
+ break :blk try pt.optionalType(intermediate_ptr_ty.toIntern());
} else intermediate_ptr_ty;
const intermediate = try block.addInst(.{
.tag = .addrspace_cast,
@@ -23152,7 +23360,7 @@ fn ptrCastFull(
if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) {
// We have to construct a slice using the operand's child's array length
// Note that we know from the check at the start of the function that operand_ty is slice-like
- const arr_len = Air.internedToRef((try mod.intValue(Type.usize, Type.fromInterned(src_info.child).arrayLen(mod))).toIntern());
+ const arr_len = Air.internedToRef((try pt.intValue(Type.usize, Type.fromInterned(src_info.child).arrayLen(mod))).toIntern());
return block.addInst(.{
.tag = .slice,
.data = .{ .ty_pl = .{
@@ -23171,7 +23379,8 @@ fn ptrCastFull(
}
fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?;
const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(FlagsInt, @truncate(extended.small)));
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
@@ -23186,15 +23395,15 @@ fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
if (flags.volatile_cast) ptr_info.flags.is_volatile = false;
const dest_ty = blk: {
- const dest_ty = try mod.ptrTypeSema(ptr_info);
+ const dest_ty = try pt.ptrTypeSema(ptr_info);
if (operand_ty.zigTypeTag(mod) == .Optional) {
- break :blk try mod.optionalType(dest_ty.toIntern());
+ break :blk try pt.optionalType(dest_ty.toIntern());
}
break :blk dest_ty;
};
if (try sema.resolveValue(operand)) |operand_val| {
- return Air.internedToRef((try mod.getCoerced(operand_val, dest_ty)).toIntern());
+ return Air.internedToRef((try pt.getCoerced(operand_val, dest_ty)).toIntern());
}
try sema.requireRuntimeBlock(block, src, null);
@@ -23204,7 +23413,8 @@ fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
}
fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(inst_data.src_node);
const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
@@ -23218,7 +23428,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const operand_is_vector = operand_ty.zigTypeTag(mod) == .Vector;
const dest_is_vector = dest_ty.zigTypeTag(mod) == .Vector;
if (operand_is_vector != dest_is_vector) {
- return sema.fail(block, operand_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(mod), operand_ty.fmt(mod) });
+ return sema.fail(block, operand_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(pt), operand_ty.fmt(pt) });
}
if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
@@ -23239,7 +23449,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (operand_info.signedness != dest_info.signedness) {
return sema.fail(block, operand_src, "expected {s} integer type, found '{}'", .{
- @tagName(dest_info.signedness), operand_ty.fmt(mod),
+ @tagName(dest_info.signedness), operand_ty.fmt(pt),
});
}
if (operand_info.bits < dest_info.bits) {
@@ -23247,7 +23457,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const msg = try sema.errMsg(
src,
"destination type '{}' has more bits than source type '{}'",
- .{ dest_ty.fmt(mod), operand_ty.fmt(mod) },
+ .{ dest_ty.fmt(pt), operand_ty.fmt(pt) },
);
errdefer msg.destroy(sema.gpa);
try sema.errNote(src, msg, "destination type has {d} bits", .{
@@ -23263,20 +23473,20 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
if (try sema.resolveValueIntable(operand)) |val| {
- if (val.isUndef(mod)) return mod.undefRef(dest_ty);
+ if (val.isUndef(mod)) return pt.undefRef(dest_ty);
if (!dest_is_vector) {
- return Air.internedToRef((try mod.getCoerced(
- try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, mod),
+ return Air.internedToRef((try pt.getCoerced(
+ try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, pt),
dest_ty,
)).toIntern());
}
const elems = try sema.arena.alloc(InternPool.Index, operand_ty.vectorLen(mod));
for (elems, 0..) |*elem, i| {
- const elem_val = try val.elemValue(mod, i);
- const uncoerced_elem = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, mod);
- elem.* = (try mod.getCoerced(uncoerced_elem, dest_scalar_ty)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ const uncoerced_elem = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, pt);
+ elem.* = (try pt.getCoerced(uncoerced_elem, dest_scalar_ty)).toIntern();
}
- return Air.internedToRef((try mod.intern(.{ .aggregate = .{
+ return Air.internedToRef((try pt.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
.storage = .{ .elems = elems },
} })));
@@ -23291,9 +23501,10 @@ fn zirBitCount(
block: *Block,
inst: Zir.Inst.Index,
air_tag: Air.Inst.Tag,
- comptime comptimeOp: fn (val: Value, ty: Type, mod: *Module) u64,
+ comptime comptimeOp: fn (val: Value, ty: Type, pt: Zcu.PerThread) u64,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(inst_data.src_node);
const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
@@ -23306,25 +23517,25 @@ fn zirBitCount(
return Air.internedToRef(val.toIntern());
}
- const result_scalar_ty = try mod.smallestUnsignedInt(bits);
+ const result_scalar_ty = try pt.smallestUnsignedInt(bits);
switch (operand_ty.zigTypeTag(mod)) {
.Vector => {
const vec_len = operand_ty.vectorLen(mod);
- const result_ty = try mod.vectorType(.{
+ const result_ty = try pt.vectorType(.{
.len = vec_len,
.child = result_scalar_ty.toIntern(),
});
if (try sema.resolveValue(operand)) |val| {
- if (val.isUndef(mod)) return mod.undefRef(result_ty);
+ if (val.isUndef(mod)) return pt.undefRef(result_ty);
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
const scalar_ty = operand_ty.scalarType(mod);
for (elems, 0..) |*elem, i| {
- const elem_val = try val.elemValue(mod, i);
- const count = comptimeOp(elem_val, scalar_ty, mod);
- elem.* = (try mod.intValue(result_scalar_ty, count)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ const count = comptimeOp(elem_val, scalar_ty, pt);
+ elem.* = (try pt.intValue(result_scalar_ty, count)).toIntern();
}
- return Air.internedToRef((try mod.intern(.{ .aggregate = .{
+ return Air.internedToRef((try pt.intern(.{ .aggregate = .{
.ty = result_ty.toIntern(),
.storage = .{ .elems = elems },
} })));
@@ -23335,8 +23546,8 @@ fn zirBitCount(
},
.Int => {
if (try sema.resolveValueResolveLazy(operand)) |val| {
- if (val.isUndef(mod)) return mod.undefRef(result_scalar_ty);
- return mod.intRef(result_scalar_ty, comptimeOp(val, operand_ty, mod));
+ if (val.isUndef(mod)) return pt.undefRef(result_scalar_ty);
+ return pt.intRef(result_scalar_ty, comptimeOp(val, operand_ty, pt));
} else {
try sema.requireRuntimeBlock(block, src, operand_src);
return block.addTyOp(air_tag, result_scalar_ty, operand);
@@ -23347,7 +23558,8 @@ fn zirBitCount(
}
fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(inst_data.src_node);
const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
@@ -23360,7 +23572,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
block,
operand_src,
"@byteSwap requires the number of bits to be evenly divisible by 8, but {} has {} bits",
- .{ scalar_ty.fmt(mod), bits },
+ .{ scalar_ty.fmt(pt), bits },
);
}
@@ -23371,8 +23583,8 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
switch (operand_ty.zigTypeTag(mod)) {
.Int => {
const runtime_src = if (try sema.resolveValue(operand)) |val| {
- if (val.isUndef(mod)) return mod.undefRef(operand_ty);
- const result_val = try val.byteSwap(operand_ty, mod, sema.arena);
+ if (val.isUndef(mod)) return pt.undefRef(operand_ty);
+ const result_val = try val.byteSwap(operand_ty, pt, sema.arena);
return Air.internedToRef(result_val.toIntern());
} else operand_src;
@@ -23382,15 +23594,15 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.Vector => {
const runtime_src = if (try sema.resolveValue(operand)) |val| {
if (val.isUndef(mod))
- return mod.undefRef(operand_ty);
+ return pt.undefRef(operand_ty);
const vec_len = operand_ty.vectorLen(mod);
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
for (elems, 0..) |*elem, i| {
- const elem_val = try val.elemValue(mod, i);
- elem.* = (try elem_val.byteSwap(scalar_ty, mod, sema.arena)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ elem.* = (try elem_val.byteSwap(scalar_ty, pt, sema.arena)).toIntern();
}
- return Air.internedToRef((try mod.intern(.{ .aggregate = .{
+ return Air.internedToRef((try pt.intern(.{ .aggregate = .{
.ty = operand_ty.toIntern(),
.storage = .{ .elems = elems },
} })));
@@ -23415,12 +23627,13 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
return Air.internedToRef(val.toIntern());
}
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
switch (operand_ty.zigTypeTag(mod)) {
.Int => {
const runtime_src = if (try sema.resolveValue(operand)) |val| {
- if (val.isUndef(mod)) return mod.undefRef(operand_ty);
- const result_val = try val.bitReverse(operand_ty, mod, sema.arena);
+ if (val.isUndef(mod)) return pt.undefRef(operand_ty);
+ const result_val = try val.bitReverse(operand_ty, pt, sema.arena);
return Air.internedToRef(result_val.toIntern());
} else operand_src;
@@ -23430,15 +23643,15 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
.Vector => {
const runtime_src = if (try sema.resolveValue(operand)) |val| {
if (val.isUndef(mod))
- return mod.undefRef(operand_ty);
+ return pt.undefRef(operand_ty);
const vec_len = operand_ty.vectorLen(mod);
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
for (elems, 0..) |*elem, i| {
- const elem_val = try val.elemValue(mod, i);
- elem.* = (try elem_val.bitReverse(scalar_ty, mod, sema.arena)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ elem.* = (try elem_val.bitReverse(scalar_ty, pt, sema.arena)).toIntern();
}
- return Air.internedToRef((try mod.intern(.{ .aggregate = .{
+ return Air.internedToRef((try pt.intern(.{ .aggregate = .{
.ty = operand_ty.toIntern(),
.storage = .{ .elems = elems },
} })));
@@ -23453,13 +23666,13 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
fn zirBitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const offset = try sema.bitOffsetOf(block, inst);
- return sema.mod.intRef(Type.comptime_int, offset);
+ return sema.pt.intRef(Type.comptime_int, offset);
}
fn zirOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const offset = try sema.bitOffsetOf(block, inst);
// TODO reminder to make this a compile error for packed structs
- return sema.mod.intRef(Type.comptime_int, offset / 8);
+ return sema.pt.intRef(Type.comptime_int, offset / 8);
}
fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u64 {
@@ -23474,12 +23687,13 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
.needed_comptime_reason = "name of field must be comptime-known",
});
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
- try ty.resolveLayout(mod);
+ try ty.resolveLayout(pt);
switch (ty.zigTypeTag(mod)) {
.Struct => {},
- else => return sema.fail(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(mod)}),
+ else => return sema.fail(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(pt)}),
}
const field_index = if (ty.isTuple(mod)) blk: {
@@ -23502,28 +23716,30 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
return bit_sum;
}
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
- bit_sum += field_ty.bitSize(mod);
+ bit_sum += field_ty.bitSize(pt);
} else unreachable;
},
- else => return ty.structFieldOffset(field_index, mod) * 8,
+ else => return ty.structFieldOffset(field_index, pt) * 8,
}
}
fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
switch (ty.zigTypeTag(mod)) {
.Struct, .Enum, .Union, .Opaque => return,
- else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(mod)}),
+ else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(pt)}),
}
}
/// Returns `true` if the type was a comptime_int.
fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
switch (try ty.zigTypeTagOrPoison(mod)) {
.ComptimeInt => return true,
.Int => return false,
- else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(mod)}),
+ else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(pt)}),
}
}
@@ -23533,7 +23749,8 @@ fn checkInvalidPtrArithmetic(
src: LazySrcLoc,
ty: Type,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
switch (try ty.zigTypeTagOrPoison(mod)) {
.Pointer => switch (ty.ptrSize(mod)) {
.One, .Slice => return,
@@ -23573,7 +23790,8 @@ fn checkPtrOperand(
ty_src: LazySrcLoc,
ty: Type,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
switch (ty.zigTypeTag(mod)) {
.Pointer => return,
.Fn => {
@@ -23581,7 +23799,7 @@ fn checkPtrOperand(
const msg = try sema.errMsg(
ty_src,
"expected pointer, found '{}'",
- .{ty.fmt(mod)},
+ .{ty.fmt(pt)},
);
errdefer msg.destroy(sema.gpa);
@@ -23594,7 +23812,7 @@ fn checkPtrOperand(
.Optional => if (ty.childType(mod).zigTypeTag(mod) == .Pointer) return,
else => {},
}
- return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)});
+ return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(pt)});
}
fn checkPtrType(
@@ -23604,7 +23822,8 @@ fn checkPtrType(
ty: Type,
allow_slice: bool,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
switch (ty.zigTypeTag(mod)) {
.Pointer => if (allow_slice or !ty.isSlice(mod)) return,
.Fn => {
@@ -23612,7 +23831,7 @@ fn checkPtrType(
const msg = try sema.errMsg(
ty_src,
"expected pointer type, found '{}'",
- .{ty.fmt(mod)},
+ .{ty.fmt(pt)},
);
errdefer msg.destroy(sema.gpa);
@@ -23625,7 +23844,7 @@ fn checkPtrType(
.Optional => if (ty.childType(mod).zigTypeTag(mod) == .Pointer) return,
else => {},
}
- return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)});
+ return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(pt)});
}
fn checkVectorElemType(
@@ -23634,13 +23853,14 @@ fn checkVectorElemType(
ty_src: LazySrcLoc,
ty: Type,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
switch (ty.zigTypeTag(mod)) {
.Int, .Float, .Bool => return,
.Optional, .Pointer => if (ty.isPtrAtRuntime(mod)) return,
else => {},
}
- return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(mod)});
+ return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(pt)});
}
fn checkFloatType(
@@ -23649,10 +23869,11 @@ fn checkFloatType(
ty_src: LazySrcLoc,
ty: Type,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
switch (ty.zigTypeTag(mod)) {
.ComptimeInt, .ComptimeFloat, .Float => {},
- else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(mod)}),
+ else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(pt)}),
}
}
@@ -23662,14 +23883,15 @@ fn checkNumericType(
ty_src: LazySrcLoc,
ty: Type,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
switch (ty.zigTypeTag(mod)) {
.ComptimeFloat, .Float, .ComptimeInt, .Int => {},
.Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
.ComptimeFloat, .Float, .ComptimeInt, .Int => {},
else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}),
},
- else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty.fmt(mod)}),
+ else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty.fmt(pt)}),
}
}
@@ -23683,7 +23905,8 @@ fn checkAtomicPtrOperand(
ptr_src: LazySrcLoc,
ptr_const: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
var diag: Module.AtomicPtrAlignmentDiagnostics = .{};
const alignment = mod.atomicPtrAlignment(elem_ty, &diag) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
@@ -23703,7 +23926,7 @@ fn checkAtomicPtrOperand(
block,
elem_ty_src,
"expected bool, integer, float, enum, or pointer type; found '{}'",
- .{elem_ty.fmt(mod)},
+ .{elem_ty.fmt(pt)},
),
};
@@ -23719,7 +23942,7 @@ fn checkAtomicPtrOperand(
const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) {
.Pointer => ptr_ty.ptrInfo(mod),
else => {
- const wanted_ptr_ty = try mod.ptrTypeSema(wanted_ptr_data);
+ const wanted_ptr_ty = try pt.ptrTypeSema(wanted_ptr_data);
_ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src);
unreachable;
},
@@ -23729,7 +23952,7 @@ fn checkAtomicPtrOperand(
wanted_ptr_data.flags.is_allowzero = ptr_data.flags.is_allowzero;
wanted_ptr_data.flags.is_volatile = ptr_data.flags.is_volatile;
- const wanted_ptr_ty = try mod.ptrTypeSema(wanted_ptr_data);
+ const wanted_ptr_ty = try pt.ptrTypeSema(wanted_ptr_data);
const casted_ptr = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src);
return casted_ptr;
@@ -23754,7 +23977,8 @@ fn checkIntOrVector(
operand: Air.Inst.Ref,
operand_src: LazySrcLoc,
) CompileError!Type {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const operand_ty = sema.typeOf(operand);
switch (try operand_ty.zigTypeTagOrPoison(mod)) {
.Int => return operand_ty,
@@ -23763,12 +23987,12 @@ fn checkIntOrVector(
switch (try elem_ty.zigTypeTagOrPoison(mod)) {
.Int => return elem_ty,
else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{
- elem_ty.fmt(mod),
+ elem_ty.fmt(pt),
}),
}
},
else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{
- operand_ty.fmt(mod),
+ operand_ty.fmt(pt),
}),
}
}
@@ -23779,7 +24003,8 @@ fn checkIntOrVectorAllowComptime(
operand_ty: Type,
operand_src: LazySrcLoc,
) CompileError!Type {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
switch (try operand_ty.zigTypeTagOrPoison(mod)) {
.Int, .ComptimeInt => return operand_ty,
.Vector => {
@@ -23787,12 +24012,12 @@ fn checkIntOrVectorAllowComptime(
switch (try elem_ty.zigTypeTagOrPoison(mod)) {
.Int, .ComptimeInt => return elem_ty,
else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{
- elem_ty.fmt(mod),
+ elem_ty.fmt(pt),
}),
}
},
else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{
- operand_ty.fmt(mod),
+ operand_ty.fmt(pt),
}),
}
}
@@ -23819,7 +24044,8 @@ fn checkSimdBinOp(
lhs_src: LazySrcLoc,
rhs_src: LazySrcLoc,
) CompileError!SimdBinOp {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const lhs_ty = sema.typeOf(uncasted_lhs);
const rhs_ty = sema.typeOf(uncasted_rhs);
@@ -23851,7 +24077,8 @@ fn checkVectorizableBinaryOperands(
lhs_src: LazySrcLoc,
rhs_src: LazySrcLoc,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
if (lhs_zig_ty_tag != .Vector and rhs_zig_ty_tag != .Vector) return;
@@ -23881,7 +24108,7 @@ fn checkVectorizableBinaryOperands(
} else {
const msg = msg: {
const msg = try sema.errMsg(src, "mixed scalar and vector operands: '{}' and '{}'", .{
- lhs_ty.fmt(mod), rhs_ty.fmt(mod),
+ lhs_ty.fmt(pt), rhs_ty.fmt(pt),
});
errdefer msg.destroy(sema.gpa);
if (lhs_is_vector) {
@@ -23903,10 +24130,11 @@ fn resolveExportOptions(
src: LazySrcLoc,
zir_ref: Zir.Inst.Ref,
) CompileError!Module.Export.Options {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
- const export_options_ty = try mod.getBuiltinType("ExportOptions");
+ const export_options_ty = try pt.getBuiltinType("ExportOptions");
const air_ref = try sema.resolveInst(zir_ref);
const options = try sema.coerce(block, export_options_ty, air_ref, src);
@@ -23969,12 +24197,12 @@ fn resolveBuiltinEnum(
comptime name: []const u8,
reason: NeededComptimeReason,
) CompileError!@field(std.builtin, name) {
- const mod = sema.mod;
- const ty = try mod.getBuiltinType(name);
+ const pt = sema.pt;
+ const ty = try pt.getBuiltinType(name);
const air_ref = try sema.resolveInst(zir_ref);
const coerced = try sema.coerce(block, ty, air_ref, src);
const val = try sema.resolveConstDefinedValue(block, src, coerced, reason);
- return mod.toEnum(@field(std.builtin, name), val);
+ return pt.zcu.toEnum(@field(std.builtin, name), val);
}
fn resolveAtomicOrder(
@@ -24003,7 +24231,8 @@ fn zirCmpxchg(
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const extra = sema.code.extraData(Zir.Inst.Cmpxchg, extended.operand).data;
const air_tag: Air.Inst.Tag = switch (extended.small) {
0 => .cmpxchg_weak,
@@ -24026,7 +24255,7 @@ fn zirCmpxchg(
block,
elem_ty_src,
"expected bool, integer, enum, or pointer type; found '{}'",
- .{elem_ty.fmt(mod)},
+ .{elem_ty.fmt(pt)},
);
}
const uncasted_ptr = try sema.resolveInst(extra.ptr);
@@ -24052,11 +24281,11 @@ fn zirCmpxchg(
return sema.fail(block, failure_order_src, "failure atomic ordering must not be release or acq_rel", .{});
}
- const result_ty = try mod.optionalType(elem_ty.toIntern());
+ const result_ty = try pt.optionalType(elem_ty.toIntern());
// special case zero bit types
if ((try sema.typeHasOnePossibleValue(elem_ty)) != null) {
- return Air.internedToRef((try mod.intern(.{ .opt = .{
+ return Air.internedToRef((try pt.intern(.{ .opt = .{
.ty = result_ty.toIntern(),
.val = .none,
} })));
@@ -24068,11 +24297,11 @@ fn zirCmpxchg(
if (expected_val.isUndef(mod) or new_val.isUndef(mod)) {
// TODO: this should probably cause the memory stored at the pointer
// to become undef as well
- return mod.undefRef(result_ty);
+ return pt.undefRef(result_ty);
}
const ptr_ty = sema.typeOf(ptr);
const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src;
- const result_val = try mod.intern(.{ .opt = .{
+ const result_val = try pt.intern(.{ .opt = .{
.ty = result_ty.toIntern(),
.val = if (stored_val.eql(expected_val, elem_ty, mod)) blk: {
try sema.storePtr(block, src, ptr, new_value);
@@ -24103,17 +24332,18 @@ fn zirCmpxchg(
}
fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src = block.nodeOffset(inst_data.src_node);
const scalar_src = block.builtinCallArgSrc(inst_data.src_node, 0);
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@splat");
- if (!dest_ty.isVector(mod)) return sema.fail(block, src, "expected vector type, found '{}'", .{dest_ty.fmt(mod)});
+ if (!dest_ty.isVector(mod)) return sema.fail(block, src, "expected vector type, found '{}'", .{dest_ty.fmt(pt)});
- if (!dest_ty.hasRuntimeBits(mod)) {
- const empty_aggregate = try mod.intern(.{ .aggregate = .{
+ if (!dest_ty.hasRuntimeBits(pt)) {
+ const empty_aggregate = try pt.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
.storage = .{ .elems = &[_]InternPool.Index{} },
} });
@@ -24124,7 +24354,7 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
const scalar_ty = dest_ty.childType(mod);
const scalar = try sema.coerce(block, scalar_ty, operand, scalar_src);
if (try sema.resolveValue(scalar)) |scalar_val| {
- if (scalar_val.isUndef(mod)) return mod.undefRef(dest_ty);
+ if (scalar_val.isUndef(mod)) return pt.undefRef(dest_ty);
return Air.internedToRef((try sema.splat(dest_ty, scalar_val)).toIntern());
}
@@ -24142,10 +24372,11 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
});
const operand = try sema.resolveInst(extra.rhs);
const operand_ty = sema.typeOf(operand);
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (operand_ty.zigTypeTag(mod) != .Vector) {
- return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(mod)});
+ return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(pt)});
}
const scalar_ty = operand_ty.childType(mod);
@@ -24155,13 +24386,13 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
.And, .Or, .Xor => switch (scalar_ty.zigTypeTag(mod)) {
.Int, .Bool => {},
else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or boolean operand; found '{}'", .{
- @tagName(operation), operand_ty.fmt(mod),
+ @tagName(operation), operand_ty.fmt(pt),
}),
},
.Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag(mod)) {
.Int, .Float => {},
else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or float operand; found '{}'", .{
- @tagName(operation), operand_ty.fmt(mod),
+ @tagName(operation), operand_ty.fmt(pt),
}),
},
}
@@ -24174,20 +24405,20 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
}
if (try sema.resolveValue(operand)) |operand_val| {
- if (operand_val.isUndef(mod)) return mod.undefRef(scalar_ty);
+ if (operand_val.isUndef(mod)) return pt.undefRef(scalar_ty);
- var accum: Value = try operand_val.elemValue(mod, 0);
+ var accum: Value = try operand_val.elemValue(pt, 0);
var i: u32 = 1;
while (i < vec_len) : (i += 1) {
- const elem_val = try operand_val.elemValue(mod, i);
+ const elem_val = try operand_val.elemValue(pt, i);
switch (operation) {
- .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, mod),
- .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, mod),
- .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, mod),
- .Min => accum = accum.numberMin(elem_val, mod),
- .Max => accum = accum.numberMax(elem_val, mod),
+ .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, pt),
+ .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, pt),
+ .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, pt),
+ .Min => accum = accum.numberMin(elem_val, pt),
+ .Max => accum = accum.numberMax(elem_val, pt),
.Add => accum = try sema.numberAddWrapScalar(accum, elem_val, scalar_ty),
- .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, mod),
+ .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, pt),
}
}
return Air.internedToRef(accum.toIntern());
@@ -24204,7 +24435,8 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
}
fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.Shuffle, inst_data.payload_index).data;
const elem_ty_src = block.builtinCallArgSrc(inst_data.src_node, 0);
@@ -24219,9 +24451,9 @@ fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const mask_len = switch (sema.typeOf(mask).zigTypeTag(mod)) {
.Array, .Vector => sema.typeOf(mask).arrayLen(mod),
- else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(sema.mod)}),
+ else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(pt)}),
};
- mask_ty = try mod.vectorType(.{
+ mask_ty = try pt.vectorType(.{
.len = @intCast(mask_len),
.child = .i32_type,
});
@@ -24242,51 +24474,51 @@ fn analyzeShuffle(
mask: Value,
mask_len: u32,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
const a_src = block.builtinCallArgSrc(src_node, 1);
const b_src = block.builtinCallArgSrc(src_node, 2);
const mask_src = block.builtinCallArgSrc(src_node, 3);
var a = a_arg;
var b = b_arg;
- const res_ty = try mod.vectorType(.{
+ const res_ty = try pt.vectorType(.{
.len = mask_len,
.child = elem_ty.toIntern(),
});
- const maybe_a_len = switch (sema.typeOf(a).zigTypeTag(mod)) {
- .Array, .Vector => sema.typeOf(a).arrayLen(mod),
+ const maybe_a_len = switch (sema.typeOf(a).zigTypeTag(pt.zcu)) {
+ .Array, .Vector => sema.typeOf(a).arrayLen(pt.zcu),
.Undefined => null,
else => return sema.fail(block, a_src, "expected vector or array with element type '{}', found '{}'", .{
- elem_ty.fmt(sema.mod),
- sema.typeOf(a).fmt(sema.mod),
+ elem_ty.fmt(pt),
+ sema.typeOf(a).fmt(pt),
}),
};
- const maybe_b_len = switch (sema.typeOf(b).zigTypeTag(mod)) {
- .Array, .Vector => sema.typeOf(b).arrayLen(mod),
+ const maybe_b_len = switch (sema.typeOf(b).zigTypeTag(pt.zcu)) {
+ .Array, .Vector => sema.typeOf(b).arrayLen(pt.zcu),
.Undefined => null,
else => return sema.fail(block, b_src, "expected vector or array with element type '{}', found '{}'", .{
- elem_ty.fmt(sema.mod),
- sema.typeOf(b).fmt(sema.mod),
+ elem_ty.fmt(pt),
+ sema.typeOf(b).fmt(pt),
}),
};
if (maybe_a_len == null and maybe_b_len == null) {
- return mod.undefRef(res_ty);
+ return pt.undefRef(res_ty);
}
const a_len: u32 = @intCast(maybe_a_len orelse maybe_b_len.?);
const b_len: u32 = @intCast(maybe_b_len orelse a_len);
- const a_ty = try mod.vectorType(.{
+ const a_ty = try pt.vectorType(.{
.len = a_len,
.child = elem_ty.toIntern(),
});
- const b_ty = try mod.vectorType(.{
+ const b_ty = try pt.vectorType(.{
.len = b_len,
.child = elem_ty.toIntern(),
});
- if (maybe_a_len == null) a = try mod.undefRef(a_ty) else a = try sema.coerce(block, a_ty, a, a_src);
- if (maybe_b_len == null) b = try mod.undefRef(b_ty) else b = try sema.coerce(block, b_ty, b, b_src);
+ if (maybe_a_len == null) a = try pt.undefRef(a_ty) else a = try sema.coerce(block, a_ty, a, a_src);
+ if (maybe_b_len == null) b = try pt.undefRef(b_ty) else b = try sema.coerce(block, b_ty, b, b_src);
const operand_info = [2]std.meta.Tuple(&.{ u64, LazySrcLoc, Type }){
.{ a_len, a_src, a_ty },
@@ -24294,10 +24526,10 @@ fn analyzeShuffle(
};
for (0..@intCast(mask_len)) |i| {
- const elem = try mask.elemValue(sema.mod, i);
- if (elem.isUndef(mod)) continue;
+ const elem = try mask.elemValue(pt, i);
+ if (elem.isUndef(pt.zcu)) continue;
const elem_resolved = try sema.resolveLazyValue(elem);
- const int = elem_resolved.toSignedInt(mod);
+ const int = elem_resolved.toSignedInt(pt);
var unsigned: u32 = undefined;
var chosen: u32 = undefined;
if (int >= 0) {
@@ -24314,7 +24546,7 @@ fn analyzeShuffle(
try sema.errNote(operand_info[chosen][1], msg, "selected index '{d}' out of bounds of '{}'", .{
unsigned,
- operand_info[chosen][2].fmt(sema.mod),
+ operand_info[chosen][2].fmt(pt),
});
if (chosen == 0) {
@@ -24331,16 +24563,16 @@ fn analyzeShuffle(
if (try sema.resolveValue(b)) |b_val| {
const values = try sema.arena.alloc(InternPool.Index, mask_len);
for (values, 0..) |*value, i| {
- const mask_elem_val = try mask.elemValue(sema.mod, i);
- if (mask_elem_val.isUndef(mod)) {
- value.* = try mod.intern(.{ .undef = elem_ty.toIntern() });
+ const mask_elem_val = try mask.elemValue(pt, i);
+ if (mask_elem_val.isUndef(pt.zcu)) {
+ value.* = try pt.intern(.{ .undef = elem_ty.toIntern() });
continue;
}
- const int = mask_elem_val.toSignedInt(mod);
+ const int = mask_elem_val.toSignedInt(pt);
const unsigned: u32 = @intCast(if (int >= 0) int else ~int);
- values[i] = (try (if (int >= 0) a_val else b_val).elemValue(mod, unsigned)).toIntern();
+ values[i] = (try (if (int >= 0) a_val else b_val).elemValue(pt, unsigned)).toIntern();
}
- return Air.internedToRef((try mod.intern(.{ .aggregate = .{
+ return Air.internedToRef((try pt.intern(.{ .aggregate = .{
.ty = res_ty.toIntern(),
.storage = .{ .elems = values },
} })));
@@ -24359,21 +24591,21 @@ fn analyzeShuffle(
const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len);
for (@intCast(0)..@intCast(min_len)) |i| {
- expand_mask_values[i] = (try mod.intValue(Type.comptime_int, i)).toIntern();
+ expand_mask_values[i] = (try pt.intValue(Type.comptime_int, i)).toIntern();
}
for (@intCast(min_len)..@intCast(max_len)) |i| {
- expand_mask_values[i] = (try mod.intValue(Type.comptime_int, -1)).toIntern();
+ expand_mask_values[i] = (try pt.intValue(Type.comptime_int, -1)).toIntern();
}
- const expand_mask = try mod.intern(.{ .aggregate = .{
- .ty = (try mod.vectorType(.{ .len = @intCast(max_len), .child = .comptime_int_type })).toIntern(),
+ const expand_mask = try pt.intern(.{ .aggregate = .{
+ .ty = (try pt.vectorType(.{ .len = @intCast(max_len), .child = .comptime_int_type })).toIntern(),
.storage = .{ .elems = expand_mask_values },
} });
if (a_len < b_len) {
- const undef = try mod.undefRef(a_ty);
+ const undef = try pt.undefRef(a_ty);
a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, Value.fromInterned(expand_mask), @intCast(max_len));
} else {
- const undef = try mod.undefRef(b_ty);
+ const undef = try pt.undefRef(b_ty);
b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, Value.fromInterned(expand_mask), @intCast(max_len));
}
}
@@ -24393,7 +24625,8 @@ fn analyzeShuffle(
}
fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const extra = sema.code.extraData(Zir.Inst.Select, extended.operand).data;
const src = block.nodeOffset(extra.node);
@@ -24409,17 +24642,17 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C
const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(mod)) {
.Vector, .Array => pred_ty.arrayLen(mod),
- else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(mod)}),
+ else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(pt)}),
};
const vec_len: u32 = @intCast(try sema.usizeCast(block, pred_src, vec_len_u64));
- const bool_vec_ty = try mod.vectorType(.{
+ const bool_vec_ty = try pt.vectorType(.{
.len = vec_len,
.child = .bool_type,
});
const pred = try sema.coerce(block, bool_vec_ty, pred_uncoerced, pred_src);
- const vec_ty = try mod.vectorType(.{
+ const vec_ty = try pt.vectorType(.{
.len = vec_len,
.child = elem_ty.toIntern(),
});
@@ -24431,23 +24664,23 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C
const maybe_b = try sema.resolveValue(b);
const runtime_src = if (maybe_pred) |pred_val| rs: {
- if (pred_val.isUndef(mod)) return mod.undefRef(vec_ty);
+ if (pred_val.isUndef(mod)) return pt.undefRef(vec_ty);
if (maybe_a) |a_val| {
- if (a_val.isUndef(mod)) return mod.undefRef(vec_ty);
+ if (a_val.isUndef(mod)) return pt.undefRef(vec_ty);
if (maybe_b) |b_val| {
- if (b_val.isUndef(mod)) return mod.undefRef(vec_ty);
+ if (b_val.isUndef(mod)) return pt.undefRef(vec_ty);
const elems = try sema.gpa.alloc(InternPool.Index, vec_len);
defer sema.gpa.free(elems);
for (elems, 0..) |*elem, i| {
- const pred_elem_val = try pred_val.elemValue(mod, i);
+ const pred_elem_val = try pred_val.elemValue(pt, i);
const should_choose_a = pred_elem_val.toBool();
- elem.* = (try (if (should_choose_a) a_val else b_val).elemValue(mod, i)).toIntern();
+ elem.* = (try (if (should_choose_a) a_val else b_val).elemValue(pt, i)).toIntern();
}
- return Air.internedToRef((try mod.intern(.{ .aggregate = .{
+ return Air.internedToRef((try pt.intern(.{ .aggregate = .{
.ty = vec_ty.toIntern(),
.storage = .{ .elems = elems },
} })));
@@ -24456,16 +24689,16 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C
}
} else {
if (maybe_b) |b_val| {
- if (b_val.isUndef(mod)) return mod.undefRef(vec_ty);
+ if (b_val.isUndef(mod)) return pt.undefRef(vec_ty);
}
break :rs a_src;
}
} else rs: {
if (maybe_a) |a_val| {
- if (a_val.isUndef(mod)) return mod.undefRef(vec_ty);
+ if (a_val.isUndef(mod)) return pt.undefRef(vec_ty);
}
if (maybe_b) |b_val| {
- if (b_val.isUndef(mod)) return mod.undefRef(vec_ty);
+ if (b_val.isUndef(mod)) return pt.undefRef(vec_ty);
}
break :rs pred_src;
};
@@ -24531,7 +24764,8 @@ fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
}
fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data;
const src = block.nodeOffset(inst_data.src_node);
@@ -24588,12 +24822,12 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
.Xchg => operand_val,
.Add => try sema.numberAddWrapScalar(stored_val, operand_val, elem_ty),
.Sub => try sema.numberSubWrapScalar(stored_val, operand_val, elem_ty),
- .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, mod),
- .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, mod),
- .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, mod),
- .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, mod),
- .Max => stored_val.numberMax (operand_val, mod),
- .Min => stored_val.numberMin (operand_val, mod),
+ .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, pt),
+ .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, pt),
+ .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, pt),
+ .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, pt),
+ .Max => stored_val.numberMax (operand_val, pt),
+ .Min => stored_val.numberMin (operand_val, pt),
// zig fmt: on
};
try sema.storePtrVal(block, src, ptr_val, new_val, elem_ty);
@@ -24669,36 +24903,37 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const maybe_mulend1 = try sema.resolveValue(mulend1);
const maybe_mulend2 = try sema.resolveValue(mulend2);
const maybe_addend = try sema.resolveValue(addend);
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
switch (ty.scalarType(mod).zigTypeTag(mod)) {
.ComptimeFloat, .Float => {},
- else => return sema.fail(block, src, "expected vector of floats or float type, found '{}'", .{ty.fmt(sema.mod)}),
+ else => return sema.fail(block, src, "expected vector of floats or float type, found '{}'", .{ty.fmt(pt)}),
}
const runtime_src = if (maybe_mulend1) |mulend1_val| rs: {
if (maybe_mulend2) |mulend2_val| {
- if (mulend2_val.isUndef(mod)) return mod.undefRef(ty);
+ if (mulend2_val.isUndef(mod)) return pt.undefRef(ty);
if (maybe_addend) |addend_val| {
- if (addend_val.isUndef(mod)) return mod.undefRef(ty);
- const result_val = try Value.mulAdd(ty, mulend1_val, mulend2_val, addend_val, sema.arena, sema.mod);
+ if (addend_val.isUndef(mod)) return pt.undefRef(ty);
+ const result_val = try Value.mulAdd(ty, mulend1_val, mulend2_val, addend_val, sema.arena, pt);
return Air.internedToRef(result_val.toIntern());
} else {
break :rs addend_src;
}
} else {
if (maybe_addend) |addend_val| {
- if (addend_val.isUndef(mod)) return mod.undefRef(ty);
+ if (addend_val.isUndef(mod)) return pt.undefRef(ty);
}
break :rs mulend2_src;
}
} else rs: {
if (maybe_mulend2) |mulend2_val| {
- if (mulend2_val.isUndef(mod)) return mod.undefRef(ty);
+ if (mulend2_val.isUndef(mod)) return pt.undefRef(ty);
}
if (maybe_addend) |addend_val| {
- if (addend_val.isUndef(mod)) return mod.undefRef(ty);
+ if (addend_val.isUndef(mod)) return pt.undefRef(ty);
}
break :rs mulend1_src;
};
@@ -24720,7 +24955,8 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const modifier_src = block.builtinCallArgSrc(inst_data.src_node, 0);
const func_src = block.builtinCallArgSrc(inst_data.src_node, 1);
@@ -24730,7 +24966,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const extra = sema.code.extraData(Zir.Inst.BuiltinCall, inst_data.payload_index).data;
const func = try sema.resolveInst(extra.callee);
- const modifier_ty = try mod.getBuiltinType("CallModifier");
+ const modifier_ty = try pt.getBuiltinType("CallModifier");
const air_ref = try sema.resolveInst(extra.modifier);
const modifier_ref = try sema.coerce(block, modifier_ty, air_ref, modifier_src);
const modifier_val = try sema.resolveConstDefinedValue(block, modifier_src, modifier_ref, .{
@@ -24783,7 +25019,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const args_ty = sema.typeOf(args);
if (!args_ty.isTuple(mod) and args_ty.toIntern() != .empty_struct_type) {
- return sema.fail(block, args_src, "expected a tuple, found '{}'", .{args_ty.fmt(sema.mod)});
+ return sema.fail(block, args_src, "expected a tuple, found '{}'", .{args_ty.fmt(pt)});
}
const resolved_args: []Air.Inst.Ref = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount(mod));
@@ -24812,7 +25048,8 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
}
fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const extra = sema.code.extraData(Zir.Inst.FieldParentPtr, extended.operand).data;
@@ -24827,14 +25064,14 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
try sema.checkPtrType(block, inst_src, parent_ptr_ty, true);
const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu);
if (parent_ptr_info.flags.size != .One) {
- return sema.fail(block, inst_src, "expected single pointer type, found '{}'", .{parent_ptr_ty.fmt(zcu)});
+ return sema.fail(block, inst_src, "expected single pointer type, found '{}'", .{parent_ptr_ty.fmt(pt)});
}
const parent_ty = Type.fromInterned(parent_ptr_info.child);
switch (parent_ty.zigTypeTag(zcu)) {
.Struct, .Union => {},
- else => return sema.fail(block, inst_src, "expected pointer to struct or union type, found '{}'", .{parent_ptr_ty.fmt(zcu)}),
+ else => return sema.fail(block, inst_src, "expected pointer to struct or union type, found '{}'", .{parent_ptr_ty.fmt(pt)}),
}
- try parent_ty.resolveLayout(zcu);
+ try parent_ty.resolveLayout(pt);
const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, .{
.needed_comptime_reason = "field name must be comptime-known",
@@ -24865,7 +25102,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
var actual_parent_ptr_info: InternPool.Key.PtrType = .{
.child = parent_ty.toIntern(),
.flags = .{
- .alignment = try parent_ptr_ty.ptrAlignmentAdvanced(zcu, .sema),
+ .alignment = try parent_ptr_ty.ptrAlignmentAdvanced(pt, .sema),
.is_const = field_ptr_info.flags.is_const,
.is_volatile = field_ptr_info.flags.is_volatile,
.is_allowzero = field_ptr_info.flags.is_allowzero,
@@ -24877,7 +25114,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
var actual_field_ptr_info: InternPool.Key.PtrType = .{
.child = field_ty.toIntern(),
.flags = .{
- .alignment = try field_ptr_ty.ptrAlignmentAdvanced(zcu, .sema),
+ .alignment = try field_ptr_ty.ptrAlignmentAdvanced(pt, .sema),
.is_const = field_ptr_info.flags.is_const,
.is_volatile = field_ptr_info.flags.is_volatile,
.is_allowzero = field_ptr_info.flags.is_allowzero,
@@ -24888,13 +25125,13 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
switch (parent_ty.containerLayout(zcu)) {
.auto => {
actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict(
- if (zcu.typeToStruct(parent_ty)) |struct_obj| try zcu.structFieldAlignmentAdvanced(
+ if (zcu.typeToStruct(parent_ty)) |struct_obj| try pt.structFieldAlignmentAdvanced(
struct_obj.fieldAlign(ip, field_index),
field_ty,
struct_obj.layout,
.sema,
) else if (zcu.typeToUnion(parent_ty)) |union_obj|
- try zcu.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema)
+ try pt.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema)
else
actual_field_ptr_info.flags.alignment,
);
@@ -24903,7 +25140,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
actual_field_ptr_info.packed_offset = .{ .bit_offset = 0, .host_size = 0 };
},
.@"extern" => {
- const field_offset = parent_ty.structFieldOffset(field_index, zcu);
+ const field_offset = parent_ty.structFieldOffset(field_index, pt);
actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict(if (field_offset > 0)
Alignment.fromLog2Units(@ctz(field_offset))
else
@@ -24914,7 +25151,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
},
.@"packed" => {
const byte_offset = std.math.divExact(u32, @abs(@as(i32, actual_parent_ptr_info.packed_offset.bit_offset) +
- (if (zcu.typeToStruct(parent_ty)) |struct_obj| zcu.structPackedFieldBitOffset(struct_obj, field_index) else 0) -
+ (if (zcu.typeToStruct(parent_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, field_index) else 0) -
actual_field_ptr_info.packed_offset.bit_offset), 8) catch
return sema.fail(block, inst_src, "pointer bit-offset mismatch", .{});
actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict(if (byte_offset > 0)
@@ -24924,16 +25161,16 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
},
}
- const actual_field_ptr_ty = try zcu.ptrTypeSema(actual_field_ptr_info);
+ const actual_field_ptr_ty = try pt.ptrTypeSema(actual_field_ptr_info);
const casted_field_ptr = try sema.coerce(block, actual_field_ptr_ty, field_ptr, field_ptr_src);
- const actual_parent_ptr_ty = try zcu.ptrTypeSema(actual_parent_ptr_info);
+ const actual_parent_ptr_ty = try pt.ptrTypeSema(actual_parent_ptr_info);
const result = if (try sema.resolveDefinedValue(block, field_ptr_src, casted_field_ptr)) |field_ptr_val| result: {
switch (parent_ty.zigTypeTag(zcu)) {
.Struct => switch (parent_ty.containerLayout(zcu)) {
.auto => {},
.@"extern" => {
- const byte_offset = parent_ty.structFieldOffset(field_index, zcu);
+ const byte_offset = parent_ty.structFieldOffset(field_index, pt);
const parent_ptr_val = try sema.ptrSubtract(block, field_ptr_src, field_ptr_val, byte_offset, actual_parent_ptr_ty);
break :result Air.internedToRef(parent_ptr_val.toIntern());
},
@@ -24941,7 +25178,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
// Logic lifted from type computation above - I'm just assuming it's correct.
// `catch unreachable` since error case handled above.
const byte_offset = std.math.divExact(u32, @abs(@as(i32, actual_parent_ptr_info.packed_offset.bit_offset) +
- zcu.structPackedFieldBitOffset(zcu.typeToStruct(parent_ty).?, field_index) -
+ pt.structPackedFieldBitOffset(zcu.typeToStruct(parent_ty).?, field_index) -
actual_field_ptr_info.packed_offset.bit_offset), 8) catch unreachable;
const parent_ptr_val = try sema.ptrSubtract(block, field_ptr_src, field_ptr_val, byte_offset, actual_parent_ptr_ty);
break :result Air.internedToRef(parent_ptr_val.toIntern());
@@ -24951,7 +25188,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
.auto => {},
.@"extern", .@"packed" => {
// For an extern or packed union, just coerce the pointer.
- const parent_ptr_val = try zcu.getCoerced(field_ptr_val, actual_parent_ptr_ty);
+ const parent_ptr_val = try pt.getCoerced(field_ptr_val, actual_parent_ptr_ty);
break :result Air.internedToRef(parent_ptr_val.toIntern());
},
},
@@ -24980,7 +25217,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
if (field.index != field_index) {
return sema.fail(block, inst_src, "field '{}' has index '{d}' but pointer value is index '{d}' of struct '{}'", .{
- field_name.fmt(ip), field_index, field.index, parent_ty.fmt(zcu),
+ field_name.fmt(ip), field_index, field.index, parent_ty.fmt(pt),
});
}
break :result try sema.coerce(block, actual_parent_ptr_ty, Air.internedToRef(field.base), inst_src);
@@ -25001,8 +25238,9 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
}
fn ptrSubtract(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, byte_subtract: u64, new_ty: Type) !Value {
- const zcu = sema.mod;
- if (byte_subtract == 0) return zcu.getCoerced(ptr_val, new_ty);
+ const pt = sema.pt;
+ const zcu = pt.zcu;
+ if (byte_subtract == 0) return pt.getCoerced(ptr_val, new_ty);
var ptr = switch (zcu.intern_pool.indexToKey(ptr_val.toIntern())) {
.undef => return sema.failWithUseOfUndef(block, src),
.ptr => |ptr| ptr,
@@ -25018,7 +25256,7 @@ fn ptrSubtract(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, byte
}
ptr.byte_offset -= byte_subtract;
ptr.ty = new_ty.toIntern();
- return Value.fromInterned(try zcu.intern(.{ .ptr = ptr }));
+ return Value.fromInterned(try pt.intern(.{ .ptr = ptr }));
}
fn zirMinMax(
@@ -25072,7 +25310,8 @@ fn analyzeMinMax(
) CompileError!Air.Inst.Ref {
assert(operands.len == operand_srcs.len);
assert(operands.len > 0);
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (operands.len == 1) return operands[0];
@@ -25115,15 +25354,15 @@ fn analyzeMinMax(
break :refine_bounds;
}
const scalar_bounds: ?[2]Value = bounds: {
- if (!ty.isVector(mod)) break :bounds try uncoerced_val.intValueBounds(mod);
- var cur_bounds: [2]Value = try Value.intValueBounds(try uncoerced_val.elemValue(mod, 0), mod) orelse break :bounds null;
+ if (!ty.isVector(mod)) break :bounds try uncoerced_val.intValueBounds(pt);
+ var cur_bounds: [2]Value = try Value.intValueBounds(try uncoerced_val.elemValue(pt, 0), pt) orelse break :bounds null;
const len = try sema.usizeCast(block, src, ty.vectorLen(mod));
for (1..len) |i| {
- const elem = try uncoerced_val.elemValue(mod, i);
- const elem_bounds = try elem.intValueBounds(mod) orelse break :bounds null;
+ const elem = try uncoerced_val.elemValue(pt, i);
+ const elem_bounds = try elem.intValueBounds(pt) orelse break :bounds null;
cur_bounds = .{
- Value.numberMin(elem_bounds[0], cur_bounds[0], mod),
- Value.numberMax(elem_bounds[1], cur_bounds[1], mod),
+ Value.numberMin(elem_bounds[0], cur_bounds[0], pt),
+ Value.numberMax(elem_bounds[1], cur_bounds[1], pt),
};
}
break :bounds cur_bounds;
@@ -25134,8 +25373,8 @@ fn analyzeMinMax(
cur_max_scalar = bounds[1];
bounds_status = .defined;
} else {
- cur_min_scalar = opFunc(cur_min_scalar, bounds[0], mod);
- cur_max_scalar = opFunc(cur_max_scalar, bounds[1], mod);
+ cur_min_scalar = opFunc(cur_min_scalar, bounds[0], pt);
+ cur_max_scalar = opFunc(cur_max_scalar, bounds[1], pt);
}
}
},
@@ -25153,18 +25392,18 @@ fn analyzeMinMax(
const operand_val = try sema.resolveLazyValue(simd_op.rhs_val.?); // we checked the operand was resolvable above
const vec_len = simd_op.len orelse {
- const result_val = opFunc(cur_val, operand_val, mod);
+ const result_val = opFunc(cur_val, operand_val, pt);
cur_minmax = Air.internedToRef(result_val.toIntern());
continue;
};
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
for (elems, 0..) |*elem, i| {
- const lhs_elem_val = try cur_val.elemValue(mod, i);
- const rhs_elem_val = try operand_val.elemValue(mod, i);
- const uncoerced_elem = opFunc(lhs_elem_val, rhs_elem_val, mod);
- elem.* = (try mod.getCoerced(uncoerced_elem, simd_op.scalar_ty)).toIntern();
+ const lhs_elem_val = try cur_val.elemValue(pt, i);
+ const rhs_elem_val = try operand_val.elemValue(pt, i);
+ const uncoerced_elem = opFunc(lhs_elem_val, rhs_elem_val, pt);
+ elem.* = (try pt.getCoerced(uncoerced_elem, simd_op.scalar_ty)).toIntern();
}
- cur_minmax = Air.internedToRef((try mod.intern(.{ .aggregate = .{
+ cur_minmax = Air.internedToRef((try pt.intern(.{ .aggregate = .{
.ty = simd_op.result_ty.toIntern(),
.storage = .{ .elems = elems },
} })));
@@ -25191,8 +25430,8 @@ fn analyzeMinMax(
assert(bounds_status == .defined); // there was a non-comptime-int integral comptime-known arg
- const refined_scalar_ty = try mod.intFittingRange(cur_min_scalar, cur_max_scalar);
- const refined_ty = if (orig_ty.isVector(mod)) try mod.vectorType(.{
+ const refined_scalar_ty = try pt.intFittingRange(cur_min_scalar, cur_max_scalar);
+ const refined_ty = if (orig_ty.isVector(mod)) try pt.vectorType(.{
.len = orig_ty.vectorLen(mod),
.child = refined_scalar_ty.toIntern(),
}) else refined_scalar_ty;
@@ -25226,8 +25465,8 @@ fn analyzeMinMax(
runtime_known.unset(0); // don't look at this operand in the loop below
const scalar_ty = sema.typeOf(cur_minmax.?).scalarType(mod);
if (scalar_ty.isInt(mod)) {
- cur_min_scalar = try scalar_ty.minInt(mod, scalar_ty);
- cur_max_scalar = try scalar_ty.maxInt(mod, scalar_ty);
+ cur_min_scalar = try scalar_ty.minInt(pt, scalar_ty);
+ cur_max_scalar = try scalar_ty.maxInt(pt, scalar_ty);
bounds_status = .defined;
} else {
bounds_status = .non_integral;
@@ -25242,7 +25481,7 @@ fn analyzeMinMax(
const rhs_src = operand_srcs[idx];
const simd_op = try sema.checkSimdBinOp(block, src, lhs, rhs, lhs_src, rhs_src);
if (known_undef) {
- cur_minmax = try mod.undefRef(simd_op.result_ty);
+ cur_minmax = try pt.undefRef(simd_op.result_ty);
} else {
cur_minmax = try block.addBinOp(air_tag, simd_op.lhs, simd_op.rhs);
}
@@ -25254,15 +25493,15 @@ fn analyzeMinMax(
bounds_status = .non_integral;
break :refine_bounds;
}
- const scalar_min = try scalar_ty.minInt(mod, scalar_ty);
- const scalar_max = try scalar_ty.maxInt(mod, scalar_ty);
+ const scalar_min = try scalar_ty.minInt(pt, scalar_ty);
+ const scalar_max = try scalar_ty.maxInt(pt, scalar_ty);
if (bounds_status == .unknown) {
cur_min_scalar = scalar_min;
cur_max_scalar = scalar_max;
bounds_status = .defined;
} else {
- cur_min_scalar = opFunc(cur_min_scalar, scalar_min, mod);
- cur_max_scalar = opFunc(cur_max_scalar, scalar_max, mod);
+ cur_min_scalar = opFunc(cur_min_scalar, scalar_min, pt);
+ cur_max_scalar = opFunc(cur_max_scalar, scalar_max, pt);
}
},
.non_integral => {},
@@ -25276,8 +25515,8 @@ fn analyzeMinMax(
return cur_minmax.?;
}
assert(bounds_status == .defined); // there were integral runtime operands
- const refined_scalar_ty = try mod.intFittingRange(cur_min_scalar, cur_max_scalar);
- const refined_ty = if (unrefined_ty.isVector(mod)) try mod.vectorType(.{
+ const refined_scalar_ty = try pt.intFittingRange(cur_min_scalar, cur_max_scalar);
+ const refined_ty = if (unrefined_ty.isVector(mod)) try pt.vectorType(.{
.len = unrefined_ty.vectorLen(mod),
.child = refined_scalar_ty.toIntern(),
}) else refined_scalar_ty;
@@ -25291,15 +25530,16 @@ fn analyzeMinMax(
}
fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ptr_ty = sema.typeOf(ptr);
const info = ptr_ty.ptrInfo(mod);
if (info.flags.size == .One) {
// Already an array pointer.
return ptr;
}
- const new_ty = try mod.ptrTypeSema(.{
- .child = (try mod.arrayType(.{
+ const new_ty = try pt.ptrTypeSema(.{
+ .child = (try pt.arrayType(.{
.len = len,
.sentinel = info.sentinel,
.child = info.child,
@@ -25331,8 +25571,9 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
const src_ty = sema.typeOf(src_ptr);
const dest_len = try indexablePtrLenOrNone(sema, block, dest_src, dest_ptr);
const src_len = try indexablePtrLenOrNone(sema, block, src_src, src_ptr);
- const target = sema.mod.getTarget();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
+ const target = mod.getTarget();
if (dest_ty.isConstPtr(mod)) {
return sema.fail(block, dest_src, "cannot memcpy to constant pointer", .{});
@@ -25343,10 +25584,10 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
const msg = try sema.errMsg(src, "unknown @memcpy length", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(dest_src, msg, "destination type '{}' provides no length", .{
- dest_ty.fmt(sema.mod),
+ dest_ty.fmt(pt),
});
try sema.errNote(src_src, msg, "source type '{}' provides no length", .{
- src_ty.fmt(sema.mod),
+ src_ty.fmt(pt),
});
break :msg msg;
};
@@ -25365,10 +25606,10 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
const msg = try sema.errMsg(src, "non-matching @memcpy lengths", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(dest_src, msg, "length {} here", .{
- dest_len_val.fmtValue(sema.mod, sema),
+ dest_len_val.fmtValue(pt, sema),
});
try sema.errNote(src_src, msg, "length {} here", .{
- src_len_val.fmtValue(sema.mod, sema),
+ src_len_val.fmtValue(pt, sema),
});
break :msg msg;
};
@@ -25397,10 +25638,10 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: {
if (!sema.isComptimeMutablePtr(dest_ptr_val)) break :rs dest_src;
if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| {
- const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, .sema)).?;
+ const len_u64 = (try len_val.?.getUnsignedIntAdvanced(pt, .sema)).?;
const len = try sema.usizeCast(block, dest_src, len_u64);
for (0..len) |i| {
- const elem_index = try mod.intRef(Type.usize, i);
+ const elem_index = try pt.intRef(Type.usize, i);
const dest_elem_ptr = try sema.elemPtrOneLayerOnly(
block,
src,
@@ -25456,7 +25697,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
var new_dest_ptr = dest_ptr;
var new_src_ptr = src_ptr;
if (len_val) |val| {
- const len = try val.toUnsignedIntSema(mod);
+ const len = try val.toUnsignedIntSema(pt);
if (len == 0) {
// This AIR instruction guarantees length > 0 if it is comptime-known.
return;
@@ -25503,7 +25744,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
assert(dest_manyptr_ty_key.flags.size == .One);
dest_manyptr_ty_key.child = dest_elem_ty.toIntern();
dest_manyptr_ty_key.flags.size = .Many;
- break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrTypeSema(dest_manyptr_ty_key), new_dest_ptr, dest_src);
+ break :ptr try sema.coerceCompatiblePtrs(block, try pt.ptrTypeSema(dest_manyptr_ty_key), new_dest_ptr, dest_src);
} else new_dest_ptr;
const new_src_ptr_ty = sema.typeOf(new_src_ptr);
@@ -25514,7 +25755,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
assert(src_manyptr_ty_key.flags.size == .One);
src_manyptr_ty_key.child = src_elem_ty.toIntern();
src_manyptr_ty_key.flags.size = .Many;
- break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrTypeSema(src_manyptr_ty_key), new_src_ptr, src_src);
+ break :ptr try sema.coerceCompatiblePtrs(block, try pt.ptrTypeSema(src_manyptr_ty_key), new_src_ptr, src_src);
} else new_src_ptr;
// ok1: dest >= src + len
@@ -25537,7 +25778,8 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
}
fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
@@ -25569,7 +25811,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
const msg = try sema.errMsg(src, "unknown @memset length", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(dest_src, msg, "destination type '{}' provides no length", .{
- dest_ptr_ty.fmt(mod),
+ dest_ptr_ty.fmt(pt),
});
break :msg msg;
});
@@ -25581,7 +25823,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
const ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr) orelse break :rs dest_src;
const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, "len", .no_embedded_nulls), dest_src);
const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse break :rs dest_src;
- const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, .sema)).?;
+ const len_u64 = (try len_val.getUnsignedIntAdvanced(pt, .sema)).?;
const len = try sema.usizeCast(block, dest_src, len_u64);
if (len == 0) {
// This AIR instruction guarantees length > 0 if it is comptime-known.
@@ -25590,22 +25832,22 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
if (!sema.isComptimeMutablePtr(ptr_val)) break :rs dest_src;
const elem_val = try sema.resolveValue(elem) orelse break :rs value_src;
- const array_ty = try mod.arrayType(.{
+ const array_ty = try pt.arrayType(.{
.child = dest_elem_ty.toIntern(),
.len = len_u64,
});
- const array_val = Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ const array_val = Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = array_ty.toIntern(),
.storage = .{ .repeated_elem = elem_val.toIntern() },
- } })));
+ } }));
const array_ptr_ty = ty: {
var info = dest_ptr_ty.ptrInfo(mod);
info.flags.size = .One;
info.child = array_ty.toIntern();
- break :ty try mod.ptrType(info);
+ break :ty try pt.ptrType(info);
};
const raw_ptr_val = if (dest_ptr_ty.isSlice(mod)) ptr_val.slicePtr(mod) else ptr_val;
- const array_ptr_val = try mod.getCoerced(raw_ptr_val, array_ptr_ty);
+ const array_ptr_val = try pt.getCoerced(raw_ptr_val, array_ptr_ty);
return sema.storePtrVal(block, src, array_ptr_val, array_val, array_ty);
};
@@ -25658,7 +25900,8 @@ fn zirVarExtended(
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand);
const ty_src = block.src(.{ .node_offset_var_decl_ty = 0 });
const init_src = block.src(.{ .node_offset_var_decl_init = 0 });
@@ -25705,7 +25948,7 @@ fn zirVarExtended(
try sema.validateVarType(block, ty_src, var_ty, small.is_extern);
- return Air.internedToRef((try mod.intern(.{ .variable = .{
+ return Air.internedToRef((try pt.intern(.{ .variable = .{
.ty = var_ty.toIntern(),
.init = init_val,
.decl = sema.owner_decl_index,
@@ -25721,7 +25964,8 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const tracy = trace(@src());
defer tracy.end();
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.FuncFancy, inst_data.payload_index);
const target = mod.getTarget();
@@ -25761,7 +26005,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
if (val.isGenericPoison()) {
break :blk null;
}
- const alignment = try sema.validateAlignAllowZero(block, align_src, try val.toUnsignedIntSema(mod));
+ const alignment = try sema.validateAlignAllowZero(block, align_src, try val.toUnsignedIntSema(pt));
const default = target_util.defaultFunctionAlignment(target);
break :blk if (alignment == default) .none else alignment;
} else if (extra.data.bits.has_align_ref) blk: {
@@ -25781,7 +26025,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
error.GenericPoison => break :blk null,
else => |e| return e,
};
- const alignment = try sema.validateAlignAllowZero(block, align_src, try align_val.toUnsignedIntSema(mod));
+ const alignment = try sema.validateAlignAllowZero(block, align_src, try align_val.toUnsignedIntSema(pt));
const default = target_util.defaultFunctionAlignment(target);
break :blk if (alignment == default) .none else alignment;
} else .none;
@@ -25857,7 +26101,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const body = sema.code.bodySlice(extra_index, body_len);
extra_index += body.len;
- const cc_ty = try mod.getBuiltinType("CallingConvention");
+ const cc_ty = try pt.getBuiltinType("CallingConvention");
const val = try sema.resolveGenericBody(block, cc_src, body, inst, cc_ty, .{
.needed_comptime_reason = "calling convention must be comptime-known",
});
@@ -25986,7 +26230,8 @@ fn zirCDefine(
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const name_src = block.builtinCallArgSrc(extra.node, 0);
const val_src = block.builtinCallArgSrc(extra.node, 1);
@@ -26014,7 +26259,7 @@ fn zirWasmMemorySize(
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const index_src = block.builtinCallArgSrc(extra.node, 0);
const builtin_src = block.nodeOffset(extra.node);
- const target = sema.mod.getTarget();
+ const target = sema.pt.zcu.getTarget();
if (!target.isWasm()) {
return sema.fail(block, builtin_src, "builtin @wasmMemorySize is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)});
}
@@ -26041,7 +26286,7 @@ fn zirWasmMemoryGrow(
const builtin_src = block.nodeOffset(extra.node);
const index_src = block.builtinCallArgSrc(extra.node, 0);
const delta_src = block.builtinCallArgSrc(extra.node, 1);
- const target = sema.mod.getTarget();
+ const target = sema.pt.zcu.getTarget();
if (!target.isWasm()) {
return sema.fail(block, builtin_src, "builtin @wasmMemoryGrow is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)});
}
@@ -26067,10 +26312,11 @@ fn resolvePrefetchOptions(
src: LazySrcLoc,
zir_ref: Zir.Inst.Ref,
) CompileError!std.builtin.PrefetchOptions {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
- const options_ty = try mod.getBuiltinType("PrefetchOptions");
+ const options_ty = try pt.getBuiltinType("PrefetchOptions");
const options = try sema.coerce(block, options_ty, try sema.resolveInst(zir_ref), src);
const rw_src = block.src(.{ .init_field_rw = src.offset.node_offset_builtin_call_arg.builtin_call_node });
@@ -26094,7 +26340,7 @@ fn resolvePrefetchOptions(
return std.builtin.PrefetchOptions{
.rw = mod.toEnum(std.builtin.PrefetchOptions.Rw, rw_val),
- .locality = @intCast(try locality_val.toUnsignedIntSema(mod)),
+ .locality = @intCast(try locality_val.toUnsignedIntSema(pt)),
.cache = mod.toEnum(std.builtin.PrefetchOptions.Cache, cache_val),
};
}
@@ -26138,11 +26384,12 @@ fn resolveExternOptions(
linkage: std.builtin.GlobalLinkage = .strong,
is_thread_local: bool = false,
} {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
const options_inst = try sema.resolveInst(zir_ref);
- const extern_options_ty = try mod.getBuiltinType("ExternOptions");
+ const extern_options_ty = try pt.getBuiltinType("ExternOptions");
const options = try sema.coerce(block, extern_options_ty, options_inst, src);
const name_src = block.src(.{ .init_field_name = src.offset.node_offset_builtin_call_arg.builtin_call_node });
@@ -26203,7 +26450,8 @@ fn zirBuiltinExtern(
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const ty_src = block.builtinCallArgSrc(extra.node, 0);
@@ -26215,7 +26463,7 @@ fn zirBuiltinExtern(
}
if (!try sema.validateExternType(ty, .other)) {
const msg = msg: {
- const msg = try sema.errMsg(ty_src, "extern symbol cannot have type '{}'", .{ty.fmt(mod)});
+ const msg = try sema.errMsg(ty_src, "extern symbol cannot have type '{}'", .{ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsNotExtern(msg, ty_src, ty, .other);
break :msg msg;
@@ -26226,7 +26474,7 @@ fn zirBuiltinExtern(
const options = try sema.resolveExternOptions(block, options_src, extra.rhs);
if (options.linkage == .weak and !ty.ptrAllowsZero(mod)) {
- ty = try mod.optionalType(ty.toIntern());
+ ty = try pt.optionalType(ty.toIntern());
}
const ptr_info = ty.ptrInfo(mod);
@@ -26237,13 +26485,13 @@ fn zirBuiltinExtern(
new_decl_index,
Value.fromInterned(
if (Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Fn)
- try ip.getExternFunc(sema.gpa, .{
+ try ip.getExternFunc(sema.gpa, pt.tid, .{
.ty = ptr_info.child,
.decl = new_decl_index,
.lib_name = options.library_name,
})
else
- try mod.intern(.{ .variable = .{
+ try pt.intern(.{ .variable = .{
.ty = ptr_info.child,
.init = .none,
.decl = new_decl_index,
@@ -26259,9 +26507,9 @@ fn zirBuiltinExtern(
new_decl.owns_tv = true;
// Note that this will queue the anon decl for codegen, so that the backend can
// correctly handle the extern, including duplicate detection.
- try mod.finalizeAnonDecl(new_decl_index);
+ try pt.finalizeAnonDecl(new_decl_index);
- return Air.internedToRef((try mod.getCoerced(Value.fromInterned((try mod.intern(.{ .ptr = .{
+ return Air.internedToRef((try pt.getCoerced(Value.fromInterned(try pt.intern(.{ .ptr = .{
.ty = switch (ip.indexToKey(ty.toIntern())) {
.ptr_type => ty.toIntern(),
.opt_type => |child_type| child_type,
@@ -26269,7 +26517,7 @@ fn zirBuiltinExtern(
},
.base_addr = .{ .decl = new_decl_index },
.byte_offset = 0,
- } }))), ty)).toIntern());
+ } })), ty)).toIntern());
}
fn zirWorkItem(
@@ -26281,7 +26529,7 @@ fn zirWorkItem(
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const dimension_src = block.builtinCallArgSrc(extra.node, 0);
const builtin_src = block.nodeOffset(extra.node);
- const target = sema.mod.getTarget();
+ const target = sema.pt.zcu.getTarget();
switch (target.cpu.arch) {
// TODO: Allow for other GPU targets.
@@ -26344,11 +26592,12 @@ fn validateVarType(
var_ty: Type,
is_extern: bool,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (is_extern) {
if (!try sema.validateExternType(var_ty, .other)) {
const msg = msg: {
- const msg = try sema.errMsg(src, "extern variable cannot have type '{}'", .{var_ty.fmt(mod)});
+ const msg = try sema.errMsg(src, "extern variable cannot have type '{}'", .{var_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsNotExtern(msg, src, var_ty, .other);
break :msg msg;
@@ -26361,7 +26610,7 @@ fn validateVarType(
block,
src,
"non-extern variable with opaque type '{}'",
- .{var_ty.fmt(mod)},
+ .{var_ty.fmt(pt)},
);
}
}
@@ -26369,7 +26618,7 @@ fn validateVarType(
if (!try sema.typeRequiresComptime(var_ty)) return;
const msg = msg: {
- const msg = try sema.errMsg(src, "variable of type '{}' must be const or comptime", .{var_ty.fmt(mod)});
+ const msg = try sema.errMsg(src, "variable of type '{}' must be const or comptime", .{var_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsComptime(msg, src, var_ty);
@@ -26393,7 +26642,7 @@ fn explainWhyTypeIsComptime(
var type_set = TypeSet{};
defer type_set.deinit(sema.gpa);
- try ty.resolveFully(sema.mod);
+ try ty.resolveFully(sema.pt);
return sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty, &type_set);
}
@@ -26404,7 +26653,8 @@ fn explainWhyTypeIsComptimeInner(
ty: Type,
type_set: *TypeSet,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
switch (ty.zigTypeTag(mod)) {
.Bool,
@@ -26418,9 +26668,7 @@ fn explainWhyTypeIsComptimeInner(
=> return,
.Fn => {
- try sema.errNote(src_loc, msg, "use '*const {}' for a function pointer type", .{
- ty.fmt(sema.mod),
- });
+ try sema.errNote(src_loc, msg, "use '*const {}' for a function pointer type", .{ty.fmt(pt)});
},
.Type => {
@@ -26436,7 +26684,7 @@ fn explainWhyTypeIsComptimeInner(
=> return,
.Opaque => {
- try sema.errNote(src_loc, msg, "opaque type '{}' has undefined size", .{ty.fmt(sema.mod)});
+ try sema.errNote(src_loc, msg, "opaque type '{}' has undefined size", .{ty.fmt(pt)});
},
.Array, .Vector => {
@@ -26453,7 +26701,7 @@ fn explainWhyTypeIsComptimeInner(
.Inline => try sema.errNote(src_loc, msg, "function has inline calling convention", .{}),
else => {},
}
- if (Type.fromInterned(fn_info.return_type).comptimeOnly(mod)) {
+ if (Type.fromInterned(fn_info.return_type).comptimeOnly(pt)) {
try sema.errNote(src_loc, msg, "function has a comptime-only return type", .{});
}
return;
@@ -26526,7 +26774,8 @@ fn validateExternType(
ty: Type,
position: ExternPosition,
) !bool {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
switch (ty.zigTypeTag(mod)) {
.Type,
.ComptimeFloat,
@@ -26557,7 +26806,7 @@ fn validateExternType(
},
.Fn => {
if (position != .other) return false;
- const target = sema.mod.getTarget();
+ const target = mod.getTarget();
// For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI.
// The goal is to experiment with more integrated CPU/GPU code.
if (ty.fnCallingConvention(mod) == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) {
@@ -26571,7 +26820,7 @@ fn validateExternType(
.Struct, .Union => switch (ty.containerLayout(mod)) {
.@"extern" => return true,
.@"packed" => {
- const bit_size = try ty.bitSizeAdvanced(mod, .sema);
+ const bit_size = try ty.bitSizeAdvanced(pt, .sema);
switch (bit_size) {
0, 8, 16, 32, 64, 128 => return true,
else => return false,
@@ -26595,7 +26844,8 @@ fn explainWhyTypeIsNotExtern(
ty: Type,
position: ExternPosition,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
switch (ty.zigTypeTag(mod)) {
.Opaque,
.Bool,
@@ -26622,7 +26872,7 @@ fn explainWhyTypeIsNotExtern(
if (!ty.isConstPtr(mod) and pointee_ty.zigTypeTag(mod) == .Fn) {
try sema.errNote(src_loc, msg, "pointer to extern function must be 'const'", .{});
} else if (try sema.typeRequiresComptime(ty)) {
- try sema.errNote(src_loc, msg, "pointer to comptime-only type '{}'", .{pointee_ty.fmt(sema.mod)});
+ try sema.errNote(src_loc, msg, "pointer to comptime-only type '{}'", .{pointee_ty.fmt(pt)});
try sema.explainWhyTypeIsComptime(msg, src_loc, ty);
}
try sema.explainWhyTypeIsNotExtern(msg, src_loc, pointee_ty, .other);
@@ -26650,7 +26900,7 @@ fn explainWhyTypeIsNotExtern(
},
.Enum => {
const tag_ty = ty.intTagType(mod);
- try sema.errNote(src_loc, msg, "enum tag type '{}' is not extern compatible", .{tag_ty.fmt(sema.mod)});
+ try sema.errNote(src_loc, msg, "enum tag type '{}' is not extern compatible", .{tag_ty.fmt(pt)});
try sema.explainWhyTypeIsNotExtern(msg, src_loc, tag_ty, position);
},
.Struct => try sema.errNote(src_loc, msg, "only extern structs and ABI sized packed structs are extern compatible", .{}),
@@ -26671,7 +26921,8 @@ fn explainWhyTypeIsNotExtern(
/// Returns true if `ty` is allowed in packed types.
/// Does not require `ty` to be resolved in any way, but may resolve whether it is comptime-only.
fn validatePackedType(sema: *Sema, ty: Type) !bool {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
return switch (ty.zigTypeTag(zcu)) {
.Type,
.ComptimeFloat,
@@ -26710,7 +26961,8 @@ fn explainWhyTypeIsNotPacked(
src_loc: LazySrcLoc,
ty: Type,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
switch (ty.zigTypeTag(mod)) {
.Void,
.Bool,
@@ -26750,10 +27002,11 @@ fn explainWhyTypeIsNotPacked(
}
fn prepareSimplePanic(sema: *Sema) !void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (mod.panic_func_index == .none) {
- const decl_index = (try mod.getBuiltinDecl("panic"));
+ const decl_index = (try pt.getBuiltinDecl("panic"));
// decl_index may be an alias; we must find the decl that actually
// owns the function.
try sema.ensureDeclAnalyzed(decl_index);
@@ -26766,17 +27019,17 @@ fn prepareSimplePanic(sema: *Sema) !void {
}
if (mod.null_stack_trace == .none) {
- const stack_trace_ty = try mod.getBuiltinType("StackTrace");
- try stack_trace_ty.resolveFields(mod);
+ const stack_trace_ty = try pt.getBuiltinType("StackTrace");
+ try stack_trace_ty.resolveFields(pt);
const target = mod.getTarget();
- const ptr_stack_trace_ty = try mod.ptrTypeSema(.{
+ const ptr_stack_trace_ty = try pt.ptrTypeSema(.{
.child = stack_trace_ty.toIntern(),
.flags = .{
.address_space = target_util.defaultAddressSpace(target, .global_constant),
},
});
- const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern());
- mod.null_stack_trace = try mod.intern(.{ .opt = .{
+ const opt_ptr_stack_trace_ty = try pt.optionalType(ptr_stack_trace_ty.toIntern());
+ mod.null_stack_trace = try pt.intern(.{ .opt = .{
.ty = opt_ptr_stack_trace_ty.toIntern(),
.val = .none,
} });
@@ -26787,13 +27040,14 @@ fn prepareSimplePanic(sema: *Sema) !void {
/// instructions. This function ensures the panic function will be available to
/// be called during that time.
fn preparePanicId(sema: *Sema, block: *Block, panic_id: Module.PanicId) !InternPool.DeclIndex {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
if (mod.panic_messages[@intFromEnum(panic_id)].unwrap()) |x| return x;
try sema.prepareSimplePanic();
- const panic_messages_ty = try mod.getBuiltinType("panic_messages");
+ const panic_messages_ty = try pt.getBuiltinType("panic_messages");
const msg_decl_index = (sema.namespaceLookup(
block,
LazySrcLoc.unneeded,
@@ -26892,7 +27146,8 @@ fn addSafetyCheckExtra(
}
fn panicWithMsg(sema: *Sema, block: *Block, src: LazySrcLoc, msg_inst: Air.Inst.Ref, operation: CallOperation) !void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (!mod.backendSupportsFeature(.panic_fn)) {
_ = try block.addNoOp(.trap);
@@ -26905,8 +27160,8 @@ fn panicWithMsg(sema: *Sema, block: *Block, src: LazySrcLoc, msg_inst: Air.Inst.
const panic_fn = try sema.analyzeDeclVal(block, src, panic_func.owner_decl);
const null_stack_trace = Air.internedToRef(mod.null_stack_trace);
- const opt_usize_ty = try mod.optionalType(.usize_type);
- const null_ret_addr = Air.internedToRef((try mod.intern(.{ .opt = .{
+ const opt_usize_ty = try pt.optionalType(.usize_type);
+ const null_ret_addr = Air.internedToRef((try pt.intern(.{ .opt = .{
.ty = opt_usize_ty.toIntern(),
.val = .none,
} })));
@@ -26921,9 +27176,10 @@ fn panicUnwrapError(
unwrap_err_tag: Air.Inst.Tag,
is_non_err_tag: Air.Inst.Tag,
) !void {
+ const pt = sema.pt;
assert(!parent_block.is_comptime);
const ok = try parent_block.addUnOp(is_non_err_tag, operand);
- if (!sema.mod.comp.formatted_panics) {
+ if (!pt.zcu.comp.formatted_panics) {
return sema.addSafetyCheck(parent_block, src, ok, .unwrap_error);
}
const gpa = sema.gpa;
@@ -26942,10 +27198,10 @@ fn panicUnwrapError(
defer fail_block.instructions.deinit(gpa);
{
- if (!sema.mod.backendSupportsFeature(.panic_unwrap_error)) {
+ if (!pt.zcu.backendSupportsFeature(.panic_unwrap_error)) {
_ = try fail_block.addNoOp(.trap);
} else {
- const panic_fn = try sema.mod.getBuiltin("panicUnwrapError");
+ const panic_fn = try sema.pt.getBuiltin("panicUnwrapError");
const err = try fail_block.addTyOp(unwrap_err_tag, Type.anyerror, operand);
const err_return_trace = try sema.getErrorReturnTrace(&fail_block);
const args: [2]Air.Inst.Ref = .{ err_return_trace, err };
@@ -26965,7 +27221,7 @@ fn panicIndexOutOfBounds(
) !void {
assert(!parent_block.is_comptime);
const ok = try parent_block.addBinOp(cmp_op, index, len);
- if (!sema.mod.comp.formatted_panics) {
+ if (!sema.pt.zcu.comp.formatted_panics) {
return sema.addSafetyCheck(parent_block, src, ok, .index_out_of_bounds);
}
try sema.safetyCheckFormatted(parent_block, src, ok, "panicOutOfBounds", &.{ index, len });
@@ -26980,7 +27236,7 @@ fn panicInactiveUnionField(
) !void {
assert(!parent_block.is_comptime);
const ok = try parent_block.addBinOp(.cmp_eq, active_tag, wanted_tag);
- if (!sema.mod.comp.formatted_panics) {
+ if (!sema.pt.zcu.comp.formatted_panics) {
return sema.addSafetyCheck(parent_block, src, ok, .inactive_union_field);
}
try sema.safetyCheckFormatted(parent_block, src, ok, "panicInactiveUnionField", &.{ active_tag, wanted_tag });
@@ -26996,7 +27252,8 @@ fn panicSentinelMismatch(
sentinel_index: Air.Inst.Ref,
) !void {
assert(!parent_block.is_comptime);
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const expected_sentinel_val = maybe_sentinel orelse return;
const expected_sentinel = Air.internedToRef(expected_sentinel_val.toIntern());
@@ -27004,7 +27261,7 @@ fn panicSentinelMismatch(
const actual_sentinel = if (ptr_ty.isSlice(mod))
try parent_block.addBinOp(.slice_elem_val, ptr, sentinel_index)
else blk: {
- const elem_ptr_ty = try ptr_ty.elemPtrType(null, mod);
+ const elem_ptr_ty = try ptr_ty.elemPtrType(null, pt);
const sentinel_ptr = try parent_block.addPtrElemPtr(ptr, sentinel_index, elem_ptr_ty);
break :blk try parent_block.addTyOp(.load, sentinel_ty, sentinel_ptr);
};
@@ -27022,13 +27279,13 @@ fn panicSentinelMismatch(
} else if (sentinel_ty.isSelfComparable(mod, true))
try parent_block.addBinOp(.cmp_eq, expected_sentinel, actual_sentinel)
else {
- const panic_fn = try mod.getBuiltin("checkNonScalarSentinel");
+ const panic_fn = try pt.getBuiltin("checkNonScalarSentinel");
const args: [2]Air.Inst.Ref = .{ expected_sentinel, actual_sentinel };
try sema.callBuiltin(parent_block, src, panic_fn, .auto, &args, .@"safety check");
return;
};
- if (!sema.mod.comp.formatted_panics) {
+ if (!pt.zcu.comp.formatted_panics) {
return sema.addSafetyCheck(parent_block, src, ok, .sentinel_mismatch);
}
try sema.safetyCheckFormatted(parent_block, src, ok, "panicSentinelMismatch", &.{ expected_sentinel, actual_sentinel });
@@ -27042,7 +27299,9 @@ fn safetyCheckFormatted(
func: []const u8,
args: []const Air.Inst.Ref,
) CompileError!void {
- assert(sema.mod.comp.formatted_panics);
+ const pt = sema.pt;
+ const zcu = pt.zcu;
+ assert(zcu.comp.formatted_panics);
const gpa = sema.gpa;
var fail_block: Block = .{
@@ -27058,10 +27317,10 @@ fn safetyCheckFormatted(
defer fail_block.instructions.deinit(gpa);
- if (!sema.mod.backendSupportsFeature(.safety_check_formatted)) {
+ if (!zcu.backendSupportsFeature(.safety_check_formatted)) {
_ = try fail_block.addNoOp(.trap);
} else {
- const panic_fn = try sema.mod.getBuiltin(func);
+ const panic_fn = try pt.getBuiltin(func);
try sema.callBuiltin(&fail_block, src, panic_fn, .auto, args, .@"safety check");
}
try sema.addSafetyCheckExtra(parent_block, ok, &fail_block);
@@ -27102,7 +27361,8 @@ fn fieldVal(
// When editing this function, note that there is corresponding logic to be edited
// in `fieldPtr`. This function takes a value and returns a value.
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const object_src = src; // TODO better source location
const object_ty = sema.typeOf(object);
@@ -27120,10 +27380,10 @@ fn fieldVal(
switch (inner_ty.zigTypeTag(mod)) {
.Array => {
if (field_name.eqlSlice("len", ip)) {
- return Air.internedToRef((try mod.intValue(Type.usize, inner_ty.arrayLen(mod))).toIntern());
+ return Air.internedToRef((try pt.intValue(Type.usize, inner_ty.arrayLen(mod))).toIntern());
} else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) {
const ptr_info = object_ty.ptrInfo(mod);
- const result_ty = try mod.ptrTypeSema(.{
+ const result_ty = try pt.ptrTypeSema(.{
.child = Type.fromInterned(ptr_info.child).childType(mod).toIntern(),
.sentinel = if (inner_ty.sentinel(mod)) |s| s.toIntern() else .none,
.flags = .{
@@ -27143,7 +27403,7 @@ fn fieldVal(
block,
field_name_src,
"no member named '{}' in '{}'",
- .{ field_name.fmt(ip), object_ty.fmt(mod) },
+ .{ field_name.fmt(ip), object_ty.fmt(pt) },
);
}
},
@@ -27167,7 +27427,7 @@ fn fieldVal(
block,
field_name_src,
"no member named '{}' in '{}'",
- .{ field_name.fmt(ip), object_ty.fmt(mod) },
+ .{ field_name.fmt(ip), object_ty.fmt(pt) },
);
}
}
@@ -27194,7 +27454,7 @@ fn fieldVal(
.error_set_type => |error_set_type| blk: {
if (error_set_type.nameIndex(ip, field_name) != null) break :blk;
return sema.fail(block, src, "no error named '{}' in '{}'", .{
- field_name.fmt(ip), child_type.fmt(mod),
+ field_name.fmt(ip), child_type.fmt(pt),
});
},
.inferred_error_set_type => {
@@ -27210,8 +27470,8 @@ fn fieldVal(
const error_set_type = if (!child_type.isAnyError(mod))
child_type
else
- try mod.singleErrorSetType(field_name);
- return Air.internedToRef((try mod.intern(.{ .err = .{
+ try pt.singleErrorSetType(field_name);
+ return Air.internedToRef((try pt.intern(.{ .err = .{
.ty = error_set_type.toIntern(),
.name = field_name,
} })));
@@ -27220,11 +27480,11 @@ fn fieldVal(
if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| {
return inst;
}
- try child_type.resolveFields(mod);
+ try child_type.resolveFields(pt);
if (child_type.unionTagType(mod)) |enum_ty| {
if (enum_ty.enumFieldIndex(field_name, mod)) |field_index_usize| {
const field_index: u32 = @intCast(field_index_usize);
- return Air.internedToRef((try mod.enumValueFieldIndex(enum_ty, field_index)).toIntern());
+ return Air.internedToRef((try pt.enumValueFieldIndex(enum_ty, field_index)).toIntern());
}
}
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
@@ -27236,7 +27496,7 @@ fn fieldVal(
const field_index_usize = child_type.enumFieldIndex(field_name, mod) orelse
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
const field_index: u32 = @intCast(field_index_usize);
- const enum_val = try mod.enumValueFieldIndex(child_type, field_index);
+ const enum_val = try pt.enumValueFieldIndex(child_type, field_index);
return Air.internedToRef(enum_val.toIntern());
},
.Struct, .Opaque => {
@@ -27247,7 +27507,7 @@ fn fieldVal(
},
else => {
const msg = msg: {
- const msg = try sema.errMsg(src, "type '{}' has no members", .{child_type.fmt(mod)});
+ const msg = try sema.errMsg(src, "type '{}' has no members", .{child_type.fmt(pt)});
errdefer msg.destroy(sema.gpa);
if (child_type.isSlice(mod)) try sema.errNote(src, msg, "slice values have 'len' and 'ptr' members", .{});
if (child_type.zigTypeTag(mod) == .Array) try sema.errNote(src, msg, "array values have 'len' member", .{});
@@ -27288,13 +27548,14 @@ fn fieldPtr(
// When editing this function, note that there is corresponding logic to be edited
// in `fieldVal`. This function takes a pointer and returns a pointer.
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const object_ptr_src = src; // TODO better source location
const object_ptr_ty = sema.typeOf(object_ptr);
const object_ty = switch (object_ptr_ty.zigTypeTag(mod)) {
.Pointer => object_ptr_ty.childType(mod),
- else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(mod)}),
+ else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(pt)}),
};
// Zig allows dereferencing a single pointer during field lookup. Note that
@@ -27310,11 +27571,11 @@ fn fieldPtr(
switch (inner_ty.zigTypeTag(mod)) {
.Array => {
if (field_name.eqlSlice("len", ip)) {
- const int_val = try mod.intValue(Type.usize, inner_ty.arrayLen(mod));
+ const int_val = try pt.intValue(Type.usize, inner_ty.arrayLen(mod));
return anonDeclRef(sema, int_val.toIntern());
} else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) {
const ptr_info = object_ty.ptrInfo(mod);
- const new_ptr_ty = try mod.ptrTypeSema(.{
+ const new_ptr_ty = try pt.ptrTypeSema(.{
.child = Type.fromInterned(ptr_info.child).childType(mod).toIntern(),
.sentinel = if (object_ty.sentinel(mod)) |s| s.toIntern() else .none,
.flags = .{
@@ -27329,7 +27590,7 @@ fn fieldPtr(
.packed_offset = ptr_info.packed_offset,
});
const ptr_ptr_info = object_ptr_ty.ptrInfo(mod);
- const result_ty = try mod.ptrTypeSema(.{
+ const result_ty = try pt.ptrTypeSema(.{
.child = new_ptr_ty.toIntern(),
.sentinel = if (object_ptr_ty.sentinel(mod)) |s| s.toIntern() else .none,
.flags = .{
@@ -27348,7 +27609,7 @@ fn fieldPtr(
block,
field_name_src,
"no member named '{}' in '{}'",
- .{ field_name.fmt(ip), object_ty.fmt(mod) },
+ .{ field_name.fmt(ip), object_ty.fmt(pt) },
);
}
},
@@ -27363,7 +27624,7 @@ fn fieldPtr(
if (field_name.eqlSlice("ptr", ip)) {
const slice_ptr_ty = inner_ty.slicePtrFieldType(mod);
- const result_ty = try mod.ptrTypeSema(.{
+ const result_ty = try pt.ptrTypeSema(.{
.child = slice_ptr_ty.toIntern(),
.flags = .{
.is_const = !attr_ptr_ty.ptrIsMutable(mod),
@@ -27373,7 +27634,7 @@ fn fieldPtr(
});
if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
- return Air.internedToRef((try val.ptrField(Value.slice_ptr_index, mod)).toIntern());
+ return Air.internedToRef((try val.ptrField(Value.slice_ptr_index, pt)).toIntern());
}
try sema.requireRuntimeBlock(block, src, null);
@@ -27381,7 +27642,7 @@ fn fieldPtr(
try sema.checkKnownAllocPtr(block, inner_ptr, field_ptr);
return field_ptr;
} else if (field_name.eqlSlice("len", ip)) {
- const result_ty = try mod.ptrTypeSema(.{
+ const result_ty = try pt.ptrTypeSema(.{
.child = .usize_type,
.flags = .{
.is_const = !attr_ptr_ty.ptrIsMutable(mod),
@@ -27391,7 +27652,7 @@ fn fieldPtr(
});
if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
- return Air.internedToRef((try val.ptrField(Value.slice_len_index, mod)).toIntern());
+ return Air.internedToRef((try val.ptrField(Value.slice_len_index, pt)).toIntern());
}
try sema.requireRuntimeBlock(block, src, null);
@@ -27403,7 +27664,7 @@ fn fieldPtr(
block,
field_name_src,
"no member named '{}' in '{}'",
- .{ field_name.fmt(ip), object_ty.fmt(mod) },
+ .{ field_name.fmt(ip), object_ty.fmt(pt) },
);
}
},
@@ -27433,7 +27694,7 @@ fn fieldPtr(
break :blk;
}
return sema.fail(block, src, "no error named '{}' in '{}'", .{
- field_name.fmt(ip), child_type.fmt(mod),
+ field_name.fmt(ip), child_type.fmt(pt),
});
},
.inferred_error_set_type => {
@@ -27449,8 +27710,8 @@ fn fieldPtr(
const error_set_type = if (!child_type.isAnyError(mod))
child_type
else
- try mod.singleErrorSetType(field_name);
- return anonDeclRef(sema, try mod.intern(.{ .err = .{
+ try pt.singleErrorSetType(field_name);
+ return anonDeclRef(sema, try pt.intern(.{ .err = .{
.ty = error_set_type.toIntern(),
.name = field_name,
} }));
@@ -27459,11 +27720,11 @@ fn fieldPtr(
if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| {
return inst;
}
- try child_type.resolveFields(mod);
+ try child_type.resolveFields(pt);
if (child_type.unionTagType(mod)) |enum_ty| {
if (enum_ty.enumFieldIndex(field_name, mod)) |field_index| {
const field_index_u32: u32 = @intCast(field_index);
- const idx_val = try mod.enumValueFieldIndex(enum_ty, field_index_u32);
+ const idx_val = try pt.enumValueFieldIndex(enum_ty, field_index_u32);
return anonDeclRef(sema, idx_val.toIntern());
}
}
@@ -27477,7 +27738,7 @@ fn fieldPtr(
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
};
const field_index_u32: u32 = @intCast(field_index);
- const idx_val = try mod.enumValueFieldIndex(child_type, field_index_u32);
+ const idx_val = try pt.enumValueFieldIndex(child_type, field_index_u32);
return anonDeclRef(sema, idx_val.toIntern());
},
.Struct, .Opaque => {
@@ -27486,7 +27747,7 @@ fn fieldPtr(
}
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
},
- else => return sema.fail(block, src, "type '{}' has no members", .{child_type.fmt(mod)}),
+ else => return sema.fail(block, src, "type '{}' has no members", .{child_type.fmt(pt)}),
}
},
.Struct => {
@@ -27533,14 +27794,15 @@ fn fieldCallBind(
// When editing this function, note that there is corresponding logic to be edited
// in `fieldVal`. This function takes a pointer and returns a pointer.
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const raw_ptr_src = src; // TODO better source location
const raw_ptr_ty = sema.typeOf(raw_ptr);
const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize(mod) == .One or raw_ptr_ty.ptrSize(mod) == .C))
raw_ptr_ty.childType(mod)
else
- return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(mod)});
+ return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(pt)});
// Optionally dereference a second pointer to get the concrete type.
const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize(mod) == .One;
@@ -27554,7 +27816,7 @@ fn fieldCallBind(
find_field: {
switch (concrete_ty.zigTypeTag(mod)) {
.Struct => {
- try concrete_ty.resolveFields(mod);
+ try concrete_ty.resolveFields(pt);
if (mod.typeToStruct(concrete_ty)) |struct_type| {
const field_index = struct_type.nameIndex(ip, field_name) orelse
break :find_field;
@@ -27563,7 +27825,7 @@ fn fieldCallBind(
return sema.finishFieldCallBind(block, src, ptr_ty, field_ty, field_index, object_ptr);
} else if (concrete_ty.isTuple(mod)) {
if (field_name.eqlSlice("len", ip)) {
- return .{ .direct = try mod.intRef(Type.usize, concrete_ty.structFieldCount(mod)) };
+ return .{ .direct = try pt.intRef(Type.usize, concrete_ty.structFieldCount(mod)) };
}
if (field_name.toUnsigned(ip)) |field_index| {
if (field_index >= concrete_ty.structFieldCount(mod)) break :find_field;
@@ -27580,7 +27842,7 @@ fn fieldCallBind(
}
},
.Union => {
- try concrete_ty.resolveFields(mod);
+ try concrete_ty.resolveFields(pt);
const union_obj = mod.typeToUnion(concrete_ty).?;
_ = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse break :find_field;
const field_ptr = try unionFieldPtr(sema, block, src, object_ptr, field_name, field_name_src, concrete_ty, false);
@@ -27661,7 +27923,7 @@ fn fieldCallBind(
const msg = msg: {
const msg = try sema.errMsg(src, "no field or member function named '{}' in '{}'", .{
field_name.fmt(ip),
- concrete_ty.fmt(mod),
+ concrete_ty.fmt(pt),
});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, concrete_ty);
@@ -27689,8 +27951,9 @@ fn finishFieldCallBind(
field_index: u32,
object_ptr: Air.Inst.Ref,
) CompileError!ResolvedFieldCallee {
- const mod = sema.mod;
- const ptr_field_ty = try mod.ptrTypeSema(.{
+ const pt = sema.pt;
+ const mod = pt.zcu;
+ const ptr_field_ty = try pt.ptrTypeSema(.{
.child = field_ty.toIntern(),
.flags = .{
.is_const = !ptr_ty.ptrIsMutable(mod),
@@ -27701,14 +27964,14 @@ fn finishFieldCallBind(
const container_ty = ptr_ty.childType(mod);
if (container_ty.zigTypeTag(mod) == .Struct) {
if (container_ty.structFieldIsComptime(field_index, mod)) {
- try container_ty.resolveStructFieldInits(mod);
- const default_val = (try container_ty.structFieldValueComptime(mod, field_index)).?;
+ try container_ty.resolveStructFieldInits(pt);
+ const default_val = (try container_ty.structFieldValueComptime(pt, field_index)).?;
return .{ .direct = Air.internedToRef(default_val.toIntern()) };
}
}
if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| {
- const ptr_val = try struct_ptr_val.ptrField(field_index, mod);
+ const ptr_val = try struct_ptr_val.ptrField(field_index, pt);
const pointer = Air.internedToRef(ptr_val.toIntern());
return .{ .direct = try sema.analyzeLoad(block, src, pointer, src) };
}
@@ -27725,7 +27988,8 @@ fn namespaceLookup(
opt_namespace: InternPool.OptionalNamespaceIndex,
decl_name: InternPool.NullTerminatedString,
) CompileError!?InternPool.DeclIndex {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
if (try sema.lookupInNamespace(block, src, opt_namespace, decl_name, true)) |decl_index| {
const decl = mod.declPtr(decl_index);
@@ -27780,16 +28044,17 @@ fn structFieldPtr(
struct_ty: Type,
initializing: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
assert(struct_ty.zigTypeTag(mod) == .Struct);
- try struct_ty.resolveFields(mod);
- try struct_ty.resolveLayout(mod);
+ try struct_ty.resolveFields(pt);
+ try struct_ty.resolveLayout(pt);
if (struct_ty.isTuple(mod)) {
if (field_name.eqlSlice("len", ip)) {
- const len_inst = try mod.intRef(Type.usize, struct_ty.structFieldCount(mod));
+ const len_inst = try pt.intRef(Type.usize, struct_ty.structFieldCount(mod));
return sema.analyzeRef(block, src, len_inst);
}
const field_index = try sema.tupleFieldIndex(block, struct_ty, field_name, field_name_src);
@@ -27817,14 +28082,15 @@ fn structFieldPtrByIndex(
struct_ty: Type,
initializing: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
if (struct_ty.isAnonStruct(mod)) {
return sema.tupleFieldPtr(block, src, struct_ptr, field_src, field_index, initializing);
}
if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| {
- const val = try struct_ptr_val.ptrField(field_index, mod);
+ const val = try struct_ptr_val.ptrField(field_index, pt);
return Air.internedToRef(val.toIntern());
}
@@ -27848,7 +28114,7 @@ fn structFieldPtrByIndex(
try sema.typeAbiAlignment(Type.fromInterned(struct_ptr_ty_info.child));
if (struct_type.layout == .@"packed") {
- switch (struct_ty.packedStructFieldPtrInfo(struct_ptr_ty, field_index, mod)) {
+ switch (struct_ty.packedStructFieldPtrInfo(struct_ptr_ty, field_index, pt)) {
.bit_ptr => |packed_offset| {
ptr_ty_data.flags.alignment = parent_align;
ptr_ty_data.packed_offset = packed_offset;
@@ -27861,14 +28127,14 @@ fn structFieldPtrByIndex(
// For extern structs, field alignment might be bigger than type's
// natural alignment. Eg, in `extern struct { x: u32, y: u16 }` the
// second field is aligned as u32.
- const field_offset = struct_ty.structFieldOffset(field_index, mod);
+ const field_offset = struct_ty.structFieldOffset(field_index, pt);
ptr_ty_data.flags.alignment = if (parent_align == .none)
.none
else
@enumFromInt(@min(@intFromEnum(parent_align), @ctz(field_offset)));
} else {
// Our alignment is capped at the field alignment.
- const field_align = try mod.structFieldAlignmentAdvanced(
+ const field_align = try pt.structFieldAlignmentAdvanced(
struct_type.fieldAlign(ip, field_index),
Type.fromInterned(field_ty),
struct_type.layout,
@@ -27880,11 +28146,11 @@ fn structFieldPtrByIndex(
field_align.min(parent_align);
}
- const ptr_field_ty = try mod.ptrTypeSema(ptr_ty_data);
+ const ptr_field_ty = try pt.ptrTypeSema(ptr_ty_data);
if (struct_type.fieldIsComptime(ip, field_index)) {
- try struct_ty.resolveStructFieldInits(mod);
- const val = try mod.intern(.{ .ptr = .{
+ try struct_ty.resolveStructFieldInits(pt);
+ const val = try pt.intern(.{ .ptr = .{
.ty = ptr_field_ty.toIntern(),
.base_addr = .{ .comptime_field = struct_type.field_inits.get(ip)[field_index] },
.byte_offset = 0,
@@ -27905,11 +28171,12 @@ fn structFieldVal(
field_name_src: LazySrcLoc,
struct_ty: Type,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
assert(struct_ty.zigTypeTag(mod) == .Struct);
- try struct_ty.resolveFields(mod);
+ try struct_ty.resolveFields(pt);
switch (ip.indexToKey(struct_ty.toIntern())) {
.struct_type => {
@@ -27920,7 +28187,7 @@ fn structFieldVal(
const field_index = struct_type.nameIndex(ip, field_name) orelse
return sema.failWithBadStructFieldAccess(block, struct_ty, struct_type, field_name_src, field_name);
if (struct_type.fieldIsComptime(ip, field_index)) {
- try struct_ty.resolveStructFieldInits(mod);
+ try struct_ty.resolveStructFieldInits(pt);
return Air.internedToRef(struct_type.field_inits.get(ip)[field_index]);
}
@@ -27929,15 +28196,15 @@ fn structFieldVal(
return Air.internedToRef(field_val.toIntern());
if (try sema.resolveValue(struct_byval)) |struct_val| {
- if (struct_val.isUndef(mod)) return mod.undefRef(field_ty);
+ if (struct_val.isUndef(mod)) return pt.undefRef(field_ty);
if ((try sema.typeHasOnePossibleValue(field_ty))) |opv| {
return Air.internedToRef(opv.toIntern());
}
- return Air.internedToRef((try struct_val.fieldValue(mod, field_index)).toIntern());
+ return Air.internedToRef((try struct_val.fieldValue(pt, field_index)).toIntern());
}
try sema.requireRuntimeBlock(block, src, null);
- try field_ty.resolveLayout(mod);
+ try field_ty.resolveLayout(pt);
return block.addStructFieldVal(struct_byval, field_index, field_ty);
},
.anon_struct_type => |anon_struct| {
@@ -27961,9 +28228,10 @@ fn tupleFieldVal(
field_name_src: LazySrcLoc,
tuple_ty: Type,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (field_name.eqlSlice("len", &mod.intern_pool)) {
- return mod.intRef(Type.usize, tuple_ty.structFieldCount(mod));
+ return pt.intRef(Type.usize, tuple_ty.structFieldCount(mod));
}
const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_name_src);
return sema.tupleFieldValByIndex(block, src, tuple_byval, field_index, tuple_ty);
@@ -27977,18 +28245,18 @@ fn tupleFieldIndex(
field_name: InternPool.NullTerminatedString,
field_name_src: LazySrcLoc,
) CompileError!u32 {
- const mod = sema.mod;
- const ip = &mod.intern_pool;
+ const pt = sema.pt;
+ const ip = &pt.zcu.intern_pool;
assert(!field_name.eqlSlice("len", ip));
if (field_name.toUnsigned(ip)) |field_index| {
- if (field_index < tuple_ty.structFieldCount(mod)) return field_index;
+ if (field_index < tuple_ty.structFieldCount(pt.zcu)) return field_index;
return sema.fail(block, field_name_src, "index '{}' out of bounds of tuple '{}'", .{
- field_name.fmt(ip), tuple_ty.fmt(mod),
+ field_name.fmt(ip), tuple_ty.fmt(pt),
});
}
return sema.fail(block, field_name_src, "no field named '{}' in tuple '{}'", .{
- field_name.fmt(ip), tuple_ty.fmt(mod),
+ field_name.fmt(ip), tuple_ty.fmt(pt),
});
}
@@ -28000,12 +28268,13 @@ fn tupleFieldValByIndex(
field_index: u32,
tuple_ty: Type,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const field_ty = tuple_ty.structFieldType(field_index, mod);
if (tuple_ty.structFieldIsComptime(field_index, mod))
- try tuple_ty.resolveStructFieldInits(mod);
- if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| {
+ try tuple_ty.resolveStructFieldInits(pt);
+ if (try tuple_ty.structFieldValueComptime(pt, field_index)) |default_value| {
return Air.internedToRef(default_value.toIntern());
}
@@ -28014,9 +28283,9 @@ fn tupleFieldValByIndex(
return Air.internedToRef(opv.toIntern());
}
return switch (mod.intern_pool.indexToKey(tuple_val.toIntern())) {
- .undef => mod.undefRef(field_ty),
+ .undef => pt.undefRef(field_ty),
.aggregate => |aggregate| Air.internedToRef(switch (aggregate.storage) {
- .bytes => |bytes| try mod.intValue(Type.u8, bytes.at(field_index, &mod.intern_pool)),
+ .bytes => |bytes| try pt.intValue(Type.u8, bytes.at(field_index, &mod.intern_pool)),
.elems => |elems| Value.fromInterned(elems[field_index]),
.repeated_elem => |elem| Value.fromInterned(elem),
}.toIntern()),
@@ -28025,7 +28294,7 @@ fn tupleFieldValByIndex(
}
try sema.requireRuntimeBlock(block, src, null);
- try field_ty.resolveLayout(mod);
+ try field_ty.resolveLayout(pt);
return block.addStructFieldVal(tuple_byval, field_index, field_ty);
}
@@ -28039,18 +28308,19 @@ fn unionFieldPtr(
union_ty: Type,
initializing: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
assert(union_ty.zigTypeTag(mod) == .Union);
const union_ptr_ty = sema.typeOf(union_ptr);
const union_ptr_info = union_ptr_ty.ptrInfo(mod);
- try union_ty.resolveFields(mod);
+ try union_ty.resolveFields(pt);
const union_obj = mod.typeToUnion(union_ty).?;
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
- const ptr_field_ty = try mod.ptrTypeSema(.{
+ const ptr_field_ty = try pt.ptrTypeSema(.{
.child = field_ty.toIntern(),
.flags = .{
.is_const = union_ptr_info.flags.is_const,
@@ -28061,7 +28331,7 @@ fn unionFieldPtr(
union_ptr_info.flags.alignment
else
try sema.typeAbiAlignment(union_ty);
- const field_align = try mod.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema);
+ const field_align = try pt.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema);
break :blk union_align.min(field_align);
} else union_ptr_info.flags.alignment,
},
@@ -28087,9 +28357,9 @@ fn unionFieldPtr(
switch (union_obj.getLayout(ip)) {
.auto => if (initializing) {
// Store to the union to initialize the tag.
- const field_tag = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
+ const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const payload_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
- const new_union_val = try mod.unionValue(union_ty, field_tag, try mod.undefValue(payload_ty));
+ const new_union_val = try pt.unionValue(union_ty, field_tag, try pt.undefValue(payload_ty));
try sema.storePtrVal(block, src, union_ptr_val, new_union_val, union_ty);
} else {
const union_val = (try sema.pointerDeref(block, src, union_ptr_val, union_ptr_ty)) orelse
@@ -28098,7 +28368,7 @@ fn unionFieldPtr(
return sema.failWithUseOfUndef(block, src);
}
const un = ip.indexToKey(union_val.toIntern()).un;
- const field_tag = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
+ const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const tag_matches = un.tag == field_tag.toIntern();
if (!tag_matches) {
const msg = msg: {
@@ -28117,7 +28387,7 @@ fn unionFieldPtr(
},
.@"packed", .@"extern" => {},
}
- const field_ptr_val = try union_ptr_val.ptrField(field_index, mod);
+ const field_ptr_val = try union_ptr_val.ptrField(field_index, pt);
return Air.internedToRef(field_ptr_val.toIntern());
}
@@ -28125,7 +28395,7 @@ fn unionFieldPtr(
if (!initializing and union_obj.getLayout(ip) == .auto and block.wantSafety() and
union_ty.unionTagTypeSafety(mod) != null and union_obj.field_types.len > 1)
{
- const wanted_tag_val = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
+ const wanted_tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern());
// TODO would it be better if get_union_tag supported pointers to unions?
const union_val = try block.addTyOp(.load, union_ty, union_ptr);
@@ -28148,21 +28418,22 @@ fn unionFieldVal(
field_name_src: LazySrcLoc,
union_ty: Type,
) CompileError!Air.Inst.Ref {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
assert(union_ty.zigTypeTag(zcu) == .Union);
- try union_ty.resolveFields(zcu);
+ try union_ty.resolveFields(pt);
const union_obj = zcu.typeToUnion(union_ty).?;
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
const enum_field_index: u32 = @intCast(Type.fromInterned(union_obj.enum_tag_ty).enumFieldIndex(field_name, zcu).?);
if (try sema.resolveValue(union_byval)) |union_val| {
- if (union_val.isUndef(zcu)) return zcu.undefRef(field_ty);
+ if (union_val.isUndef(zcu)) return pt.undefRef(field_ty);
const un = ip.indexToKey(union_val.toIntern()).un;
- const field_tag = try zcu.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
+ const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const tag_matches = un.tag == field_tag.toIntern();
switch (union_obj.getLayout(ip)) {
.auto => {
@@ -28191,7 +28462,7 @@ fn unionFieldVal(
.@"packed" => if (tag_matches) {
// Fast path - no need to use bitcast logic.
return Air.internedToRef(un.val);
- } else if (try sema.bitCastVal(union_val, field_ty, 0, try union_ty.bitSizeAdvanced(zcu, .sema), 0)) |field_val| {
+ } else if (try sema.bitCastVal(union_val, field_ty, 0, try union_ty.bitSizeAdvanced(pt, .sema), 0)) |field_val| {
return Air.internedToRef(field_val.toIntern());
},
}
@@ -28201,7 +28472,7 @@ fn unionFieldVal(
if (union_obj.getLayout(ip) == .auto and block.wantSafety() and
union_ty.unionTagTypeSafety(zcu) != null and union_obj.field_types.len > 1)
{
- const wanted_tag_val = try zcu.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
+ const wanted_tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern());
const active_tag = try block.addTyOp(.get_union_tag, Type.fromInterned(union_obj.enum_tag_ty), union_byval);
try sema.panicInactiveUnionField(block, src, active_tag, wanted_tag);
@@ -28210,7 +28481,7 @@ fn unionFieldVal(
_ = try block.addNoOp(.unreach);
return .unreachable_value;
}
- try field_ty.resolveLayout(zcu);
+ try field_ty.resolveLayout(pt);
return block.addStructFieldVal(union_byval, field_index, field_ty);
}
@@ -28224,13 +28495,14 @@ fn elemPtr(
init: bool,
oob_safety: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const indexable_ptr_src = src; // TODO better source location
const indexable_ptr_ty = sema.typeOf(indexable_ptr);
const indexable_ty = switch (indexable_ptr_ty.zigTypeTag(mod)) {
.Pointer => indexable_ptr_ty.childType(mod),
- else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(mod)}),
+ else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(pt)}),
};
try checkIndexable(sema, block, src, indexable_ty);
@@ -28241,7 +28513,7 @@ fn elemPtr(
const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{
.needed_comptime_reason = "tuple field access index must be comptime-known",
});
- const index: u32 = @intCast(try index_val.toUnsignedIntSema(mod));
+ const index: u32 = @intCast(try index_val.toUnsignedIntSema(pt));
break :blk try sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init);
},
else => {
@@ -28267,7 +28539,8 @@ fn elemPtrOneLayerOnly(
) CompileError!Air.Inst.Ref {
const indexable_src = src; // TODO better source location
const indexable_ty = sema.typeOf(indexable);
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
try checkIndexable(sema, block, src, indexable_ty);
@@ -28279,11 +28552,11 @@ fn elemPtrOneLayerOnly(
const runtime_src = rs: {
const ptr_val = maybe_ptr_val orelse break :rs indexable_src;
const index_val = maybe_index_val orelse break :rs elem_index_src;
- const index: usize = @intCast(try index_val.toUnsignedIntSema(mod));
- const elem_ptr = try ptr_val.ptrElem(index, mod);
+ const index: usize = @intCast(try index_val.toUnsignedIntSema(pt));
+ const elem_ptr = try ptr_val.ptrElem(index, pt);
return Air.internedToRef(elem_ptr.toIntern());
};
- const result_ty = try indexable_ty.elemPtrType(null, mod);
+ const result_ty = try indexable_ty.elemPtrType(null, pt);
try sema.requireRuntimeBlock(block, src, runtime_src);
return block.addPtrElemPtr(indexable, elem_index, result_ty);
@@ -28297,7 +28570,7 @@ fn elemPtrOneLayerOnly(
const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{
.needed_comptime_reason = "tuple field access index must be comptime-known",
});
- const index: u32 = @intCast(try index_val.toUnsignedIntSema(mod));
+ const index: u32 = @intCast(try index_val.toUnsignedIntSema(pt));
break :blk try sema.tupleFieldPtr(block, indexable_src, indexable, elem_index_src, index, false);
},
else => unreachable, // Guaranteed by checkIndexable
@@ -28319,7 +28592,8 @@ fn elemVal(
) CompileError!Air.Inst.Ref {
const indexable_src = src; // TODO better source location
const indexable_ty = sema.typeOf(indexable);
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
try checkIndexable(sema, block, src, indexable_ty);
@@ -28337,14 +28611,14 @@ fn elemVal(
const runtime_src = rs: {
const indexable_val = maybe_indexable_val orelse break :rs indexable_src;
const index_val = maybe_index_val orelse break :rs elem_index_src;
- const index: usize = @intCast(try index_val.toUnsignedIntSema(mod));
+ const index: usize = @intCast(try index_val.toUnsignedIntSema(pt));
const elem_ty = indexable_ty.elemType2(mod);
- const many_ptr_ty = try mod.manyConstPtrType(elem_ty);
- const many_ptr_val = try mod.getCoerced(indexable_val, many_ptr_ty);
- const elem_ptr_ty = try mod.singleConstPtrType(elem_ty);
- const elem_ptr_val = try many_ptr_val.ptrElem(index, mod);
+ const many_ptr_ty = try pt.manyConstPtrType(elem_ty);
+ const many_ptr_val = try pt.getCoerced(indexable_val, many_ptr_ty);
+ const elem_ptr_ty = try pt.singleConstPtrType(elem_ty);
+ const elem_ptr_val = try many_ptr_val.ptrElem(index, pt);
if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, elem_ptr_ty)) |elem_val| {
- return Air.internedToRef((try mod.getCoerced(elem_val, elem_ty)).toIntern());
+ return Air.internedToRef((try pt.getCoerced(elem_val, elem_ty)).toIntern());
}
break :rs indexable_src;
};
@@ -28358,7 +28632,7 @@ fn elemVal(
if (inner_ty.zigTypeTag(mod) != .Array) break :arr_sent;
const sentinel = inner_ty.sentinel(mod) orelse break :arr_sent;
const index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index) orelse break :arr_sent;
- const index = try sema.usizeCast(block, src, try index_val.toUnsignedIntSema(mod));
+ const index = try sema.usizeCast(block, src, try index_val.toUnsignedIntSema(pt));
if (index != inner_ty.arrayLen(mod)) break :arr_sent;
return Air.internedToRef(sentinel.toIntern());
}
@@ -28376,7 +28650,7 @@ fn elemVal(
const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{
.needed_comptime_reason = "tuple field access index must be comptime-known",
});
- const index: u32 = @intCast(try index_val.toUnsignedIntSema(mod));
+ const index: u32 = @intCast(try index_val.toUnsignedIntSema(pt));
return sema.tupleField(block, indexable_src, indexable, elem_index_src, index);
},
else => unreachable,
@@ -28391,13 +28665,12 @@ fn validateRuntimeElemAccess(
parent_ty: Type,
parent_src: LazySrcLoc,
) CompileError!void {
- const mod = sema.mod;
if (try sema.typeRequiresComptime(elem_ty)) {
const msg = msg: {
const msg = try sema.errMsg(
elem_index_src,
"values of type '{}' must be comptime-known, but index value is runtime-known",
- .{parent_ty.fmt(mod)},
+ .{parent_ty.fmt(sema.pt)},
);
errdefer msg.destroy(sema.gpa);
@@ -28418,10 +28691,11 @@ fn tupleFieldPtr(
field_index: u32,
init: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const tuple_ptr_ty = sema.typeOf(tuple_ptr);
const tuple_ty = tuple_ptr_ty.childType(mod);
- try tuple_ty.resolveFields(mod);
+ try tuple_ty.resolveFields(pt);
const field_count = tuple_ty.structFieldCount(mod);
if (field_count == 0) {
@@ -28435,7 +28709,7 @@ fn tupleFieldPtr(
}
const field_ty = tuple_ty.structFieldType(field_index, mod);
- const ptr_field_ty = try mod.ptrTypeSema(.{
+ const ptr_field_ty = try pt.ptrTypeSema(.{
.child = field_ty.toIntern(),
.flags = .{
.is_const = !tuple_ptr_ty.ptrIsMutable(mod),
@@ -28445,10 +28719,10 @@ fn tupleFieldPtr(
});
if (tuple_ty.structFieldIsComptime(field_index, mod))
- try tuple_ty.resolveStructFieldInits(mod);
+ try tuple_ty.resolveStructFieldInits(pt);
- if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| {
- return Air.internedToRef((try mod.intern(.{ .ptr = .{
+ if (try tuple_ty.structFieldValueComptime(pt, field_index)) |default_val| {
+ return Air.internedToRef((try pt.intern(.{ .ptr = .{
.ty = ptr_field_ty.toIntern(),
.base_addr = .{ .comptime_field = default_val.toIntern() },
.byte_offset = 0,
@@ -28456,7 +28730,7 @@ fn tupleFieldPtr(
}
if (try sema.resolveValue(tuple_ptr)) |tuple_ptr_val| {
- const field_ptr_val = try tuple_ptr_val.ptrField(field_index, mod);
+ const field_ptr_val = try tuple_ptr_val.ptrField(field_index, pt);
return Air.internedToRef(field_ptr_val.toIntern());
}
@@ -28476,9 +28750,10 @@ fn tupleField(
field_index_src: LazySrcLoc,
field_index: u32,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const tuple_ty = sema.typeOf(tuple);
- try tuple_ty.resolveFields(mod);
+ try tuple_ty.resolveFields(pt);
const field_count = tuple_ty.structFieldCount(mod);
if (field_count == 0) {
@@ -28494,20 +28769,20 @@ fn tupleField(
const field_ty = tuple_ty.structFieldType(field_index, mod);
if (tuple_ty.structFieldIsComptime(field_index, mod))
- try tuple_ty.resolveStructFieldInits(mod);
- if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| {
+ try tuple_ty.resolveStructFieldInits(pt);
+ if (try tuple_ty.structFieldValueComptime(pt, field_index)) |default_value| {
return Air.internedToRef(default_value.toIntern()); // comptime field
}
if (try sema.resolveValue(tuple)) |tuple_val| {
- if (tuple_val.isUndef(mod)) return mod.undefRef(field_ty);
- return Air.internedToRef((try tuple_val.fieldValue(mod, field_index)).toIntern());
+ if (tuple_val.isUndef(mod)) return pt.undefRef(field_ty);
+ return Air.internedToRef((try tuple_val.fieldValue(pt, field_index)).toIntern());
}
try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src);
try sema.requireRuntimeBlock(block, tuple_src, null);
- try field_ty.resolveLayout(mod);
+ try field_ty.resolveLayout(pt);
return block.addStructFieldVal(tuple, field_index, field_ty);
}
@@ -28521,7 +28796,8 @@ fn elemValArray(
elem_index: Air.Inst.Ref,
oob_safety: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const array_ty = sema.typeOf(array);
const array_sent = array_ty.sentinel(mod);
const array_len = array_ty.arrayLen(mod);
@@ -28537,7 +28813,7 @@ fn elemValArray(
const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
if (maybe_index_val) |index_val| {
- const index: usize = @intCast(try index_val.toUnsignedIntSema(mod));
+ const index: usize = @intCast(try index_val.toUnsignedIntSema(pt));
if (array_sent) |s| {
if (index == array_len) {
return Air.internedToRef(s.toIntern());
@@ -28550,11 +28826,11 @@ fn elemValArray(
}
if (maybe_undef_array_val) |array_val| {
if (array_val.isUndef(mod)) {
- return mod.undefRef(elem_ty);
+ return pt.undefRef(elem_ty);
}
if (maybe_index_val) |index_val| {
- const index: usize = @intCast(try index_val.toUnsignedIntSema(mod));
- const elem_val = try array_val.elemValue(mod, index);
+ const index: usize = @intCast(try index_val.toUnsignedIntSema(pt));
+ const elem_val = try array_val.elemValue(pt, index);
return Air.internedToRef(elem_val.toIntern());
}
}
@@ -28565,7 +28841,7 @@ fn elemValArray(
if (oob_safety and block.wantSafety()) {
// Runtime check is only needed if unable to comptime check
if (maybe_index_val == null) {
- const len_inst = try mod.intRef(Type.usize, array_len);
+ const len_inst = try pt.intRef(Type.usize, array_len);
const cmp_op: Air.Inst.Tag = if (array_sent != null) .cmp_lte else .cmp_lt;
try sema.panicIndexOutOfBounds(block, src, elem_index, len_inst, cmp_op);
}
@@ -28589,7 +28865,8 @@ fn elemPtrArray(
init: bool,
oob_safety: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const array_ptr_ty = sema.typeOf(array_ptr);
const array_ty = array_ptr_ty.childType(mod);
const array_sent = array_ty.sentinel(mod) != null;
@@ -28603,7 +28880,7 @@ fn elemPtrArray(
const maybe_undef_array_ptr_val = try sema.resolveValue(array_ptr);
// The index must not be undefined since it can be out of bounds.
const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: {
- const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntSema(mod));
+ const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntSema(pt));
if (index >= array_len_s) {
const sentinel_label: []const u8 = if (array_sent) " +1 (sentinel)" else "";
return sema.fail(block, elem_index_src, "index {d} outside array of length {d}{s}", .{ index, array_len, sentinel_label });
@@ -28611,14 +28888,14 @@ fn elemPtrArray(
break :o index;
} else null;
- const elem_ptr_ty = try array_ptr_ty.elemPtrType(offset, mod);
+ const elem_ptr_ty = try array_ptr_ty.elemPtrType(offset, pt);
if (maybe_undef_array_ptr_val) |array_ptr_val| {
if (array_ptr_val.isUndef(mod)) {
- return mod.undefRef(elem_ptr_ty);
+ return pt.undefRef(elem_ptr_ty);
}
if (offset) |index| {
- const elem_ptr = try array_ptr_val.ptrElem(index, mod);
+ const elem_ptr = try array_ptr_val.ptrElem(index, pt);
return Air.internedToRef(elem_ptr.toIntern());
}
}
@@ -28632,7 +28909,7 @@ fn elemPtrArray(
// Runtime check is only needed if unable to comptime check.
if (oob_safety and block.wantSafety() and offset == null) {
- const len_inst = try mod.intRef(Type.usize, array_len);
+ const len_inst = try pt.intRef(Type.usize, array_len);
const cmp_op: Air.Inst.Tag = if (array_sent) .cmp_lte else .cmp_lt;
try sema.panicIndexOutOfBounds(block, src, elem_index, len_inst, cmp_op);
}
@@ -28650,7 +28927,8 @@ fn elemValSlice(
elem_index: Air.Inst.Ref,
oob_safety: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const slice_ty = sema.typeOf(slice);
const slice_sent = slice_ty.sentinel(mod) != null;
const elem_ty = slice_ty.elemType2(mod);
@@ -28663,19 +28941,19 @@ fn elemValSlice(
if (maybe_slice_val) |slice_val| {
runtime_src = elem_index_src;
- const slice_len = try slice_val.sliceLen(mod);
+ const slice_len = try slice_val.sliceLen(pt);
const slice_len_s = slice_len + @intFromBool(slice_sent);
if (slice_len_s == 0) {
return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{});
}
if (maybe_index_val) |index_val| {
- const index: usize = @intCast(try index_val.toUnsignedIntSema(mod));
+ const index: usize = @intCast(try index_val.toUnsignedIntSema(pt));
if (index >= slice_len_s) {
const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else "";
return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label });
}
- const elem_ptr_ty = try slice_ty.elemPtrType(index, mod);
- const elem_ptr_val = try slice_val.ptrElem(index, mod);
+ const elem_ptr_ty = try slice_ty.elemPtrType(index, pt);
+ const elem_ptr_val = try slice_val.ptrElem(index, pt);
if (try sema.pointerDeref(block, slice_src, elem_ptr_val, elem_ptr_ty)) |elem_val| {
return Air.internedToRef(elem_val.toIntern());
}
@@ -28688,7 +28966,7 @@ fn elemValSlice(
try sema.requireRuntimeBlock(block, src, runtime_src);
if (oob_safety and block.wantSafety()) {
const len_inst = if (maybe_slice_val) |slice_val|
- try mod.intRef(Type.usize, try slice_val.sliceLen(mod))
+ try pt.intRef(Type.usize, try slice_val.sliceLen(pt))
else
try block.addTyOp(.slice_len, Type.usize, slice);
const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt;
@@ -28707,24 +28985,25 @@ fn elemPtrSlice(
elem_index: Air.Inst.Ref,
oob_safety: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const slice_ty = sema.typeOf(slice);
const slice_sent = slice_ty.sentinel(mod) != null;
const maybe_undef_slice_val = try sema.resolveValue(slice);
// The index must not be undefined since it can be out of bounds.
const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: {
- const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntSema(mod));
+ const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntSema(pt));
break :o index;
} else null;
- const elem_ptr_ty = try slice_ty.elemPtrType(offset, mod);
+ const elem_ptr_ty = try slice_ty.elemPtrType(offset, pt);
if (maybe_undef_slice_val) |slice_val| {
if (slice_val.isUndef(mod)) {
- return mod.undefRef(elem_ptr_ty);
+ return pt.undefRef(elem_ptr_ty);
}
- const slice_len = try slice_val.sliceLen(mod);
+ const slice_len = try slice_val.sliceLen(pt);
const slice_len_s = slice_len + @intFromBool(slice_sent);
if (slice_len_s == 0) {
return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{});
@@ -28734,7 +29013,7 @@ fn elemPtrSlice(
const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else "";
return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label });
}
- const elem_ptr_val = try slice_val.ptrElem(index, mod);
+ const elem_ptr_val = try slice_val.ptrElem(index, pt);
return Air.internedToRef(elem_ptr_val.toIntern());
}
}
@@ -28747,7 +29026,7 @@ fn elemPtrSlice(
const len_inst = len: {
if (maybe_undef_slice_val) |slice_val|
if (!slice_val.isUndef(mod))
- break :len try mod.intRef(Type.usize, try slice_val.sliceLen(mod));
+ break :len try pt.intRef(Type.usize, try slice_val.sliceLen(pt));
break :len try block.addTyOp(.slice_len, Type.usize, slice);
};
const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt;
@@ -28810,11 +29089,12 @@ fn coerceExtra(
opts: CoerceOpts,
) CoersionError!Air.Inst.Ref {
if (dest_ty.isGenericPoison()) return inst;
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const dest_ty_src = inst_src; // TODO better source location
- try dest_ty.resolveFields(zcu);
+ try dest_ty.resolveFields(pt);
const inst_ty = sema.typeOf(inst);
- try inst_ty.resolveFields(zcu);
+ try inst_ty.resolveFields(pt);
const target = zcu.getTarget();
// If the types are the same, we can return the operand.
if (dest_ty.eql(inst_ty, zcu))
@@ -28838,12 +29118,12 @@ fn coerceExtra(
if (maybe_inst_val) |val| {
// undefined sets the optional bit also to undefined.
if (val.toIntern() == .undef) {
- return zcu.undefRef(dest_ty);
+ return pt.undefRef(dest_ty);
}
// null to ?T
if (val.toIntern() == .null_value) {
- return Air.internedToRef((try zcu.intern(.{ .opt = .{
+ return Air.internedToRef((try pt.intern(.{ .opt = .{
.ty = dest_ty.toIntern(),
.val = .none,
} })));
@@ -29018,7 +29298,7 @@ fn coerceExtra(
switch (dest_info.flags.size) {
// coercion to C pointer
.C => switch (inst_ty.zigTypeTag(zcu)) {
- .Null => return Air.internedToRef(try zcu.intern(.{ .ptr = .{
+ .Null => return Air.internedToRef(try pt.intern(.{ .ptr = .{
.ty = dest_ty.toIntern(),
.base_addr = .int,
.byte_offset = 0,
@@ -29063,7 +29343,7 @@ fn coerceExtra(
if (inst_info.flags.size == .Slice) {
assert(dest_info.sentinel == .none);
if (inst_info.sentinel == .none or
- inst_info.sentinel != (try zcu.intValue(Type.fromInterned(inst_info.child), 0)).toIntern())
+ inst_info.sentinel != (try pt.intValue(Type.fromInterned(inst_info.child), 0)).toIntern())
break :p;
const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty);
@@ -29112,7 +29392,7 @@ fn coerceExtra(
block,
inst_src,
"array literal requires address-of operator (&) to coerce to slice type '{}'",
- .{dest_ty.fmt(zcu)},
+ .{dest_ty.fmt(pt)},
);
}
@@ -29123,10 +29403,10 @@ fn coerceExtra(
// empty tuple to zero-length slice
// note that this allows coercing to a mutable slice.
if (inst_child_ty.structFieldCount(zcu) == 0) {
- const align_val = try dest_ty.ptrAlignmentAdvanced(zcu, .sema);
- return Air.internedToRef(try zcu.intern(.{ .slice = .{
+ const align_val = try dest_ty.ptrAlignmentAdvanced(pt, .sema);
+ return Air.internedToRef(try pt.intern(.{ .slice = .{
.ty = dest_ty.toIntern(),
- .ptr = try zcu.intern(.{ .ptr = .{
+ .ptr = try pt.intern(.{ .ptr = .{
.ty = dest_ty.slicePtrFieldType(zcu).toIntern(),
.base_addr = .int,
.byte_offset = align_val.toByteUnits().?,
@@ -29138,7 +29418,7 @@ fn coerceExtra(
// pointer to tuple to slice
if (!dest_info.flags.is_const) {
const err_msg = err_msg: {
- const err_msg = try sema.errMsg(inst_src, "cannot cast pointer to tuple to '{}'", .{dest_ty.fmt(zcu)});
+ const err_msg = try sema.errMsg(inst_src, "cannot cast pointer to tuple to '{}'", .{dest_ty.fmt(pt)});
errdefer err_msg.destroy(sema.gpa);
try sema.errNote(dest_ty_src, err_msg, "pointers to tuples can only coerce to constant pointers", .{});
break :err_msg err_msg;
@@ -29194,12 +29474,12 @@ fn coerceExtra(
// comptime-known integer to other number
if (!(try sema.intFitsInType(val, dest_ty, null))) {
if (!opts.report_err) return error.NotCoercible;
- return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(zcu), val.fmtValue(zcu, sema) });
+ return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(pt), val.fmtValue(pt, sema) });
}
return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
- .undef => try zcu.undefRef(dest_ty),
+ .undef => try pt.undefRef(dest_ty),
.int => |int| Air.internedToRef(
- try zcu.intern_pool.getCoercedInts(zcu.gpa, int, dest_ty.toIntern()),
+ try zcu.intern_pool.getCoercedInts(zcu.gpa, pt.tid, int, dest_ty.toIntern()),
),
else => unreachable,
};
@@ -29228,18 +29508,18 @@ fn coerceExtra(
.Float, .ComptimeFloat => switch (inst_ty.zigTypeTag(zcu)) {
.ComptimeFloat => {
const val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inst, undefined);
- const result_val = try val.floatCast(dest_ty, zcu);
+ const result_val = try val.floatCast(dest_ty, pt);
return Air.internedToRef(result_val.toIntern());
},
.Float => {
if (maybe_inst_val) |val| {
- const result_val = try val.floatCast(dest_ty, zcu);
- if (!val.eql(try result_val.floatCast(inst_ty, zcu), inst_ty, zcu)) {
+ const result_val = try val.floatCast(dest_ty, pt);
+ if (!val.eql(try result_val.floatCast(inst_ty, pt), inst_ty, zcu)) {
return sema.fail(
block,
inst_src,
"type '{}' cannot represent float value '{}'",
- .{ dest_ty.fmt(zcu), val.fmtValue(zcu, sema) },
+ .{ dest_ty.fmt(pt), val.fmtValue(pt, sema) },
);
}
return Air.internedToRef(result_val.toIntern());
@@ -29268,7 +29548,7 @@ fn coerceExtra(
}
break :int;
};
- const result_val = try val.floatFromIntAdvanced(sema.arena, inst_ty, dest_ty, zcu, .sema);
+ const result_val = try val.floatFromIntAdvanced(sema.arena, inst_ty, dest_ty, pt, .sema);
// TODO implement this compile error
//const int_again_val = try result_val.intFromFloat(sema.arena, inst_ty);
//if (!int_again_val.eql(val, inst_ty, zcu)) {
@@ -29276,7 +29556,7 @@ fn coerceExtra(
// block,
// inst_src,
// "type '{}' cannot represent integer value '{}'",
- // .{ dest_ty.fmt(zcu), val },
+ // .{ dest_ty.fmt(pt), val },
// );
//}
return Air.internedToRef(result_val.toIntern());
@@ -29290,10 +29570,10 @@ fn coerceExtra(
const string = zcu.intern_pool.indexToKey(val.toIntern()).enum_literal;
const field_index = dest_ty.enumFieldIndex(string, zcu) orelse {
return sema.fail(block, inst_src, "no field named '{}' in enum '{}'", .{
- string.fmt(&zcu.intern_pool), dest_ty.fmt(zcu),
+ string.fmt(&zcu.intern_pool), dest_ty.fmt(pt),
});
};
- return Air.internedToRef((try zcu.enumValueFieldIndex(dest_ty, @intCast(field_index))).toIntern());
+ return Air.internedToRef((try pt.enumValueFieldIndex(dest_ty, @intCast(field_index))).toIntern());
},
.Union => blk: {
// union to its own tag type
@@ -29308,12 +29588,12 @@ fn coerceExtra(
.ErrorUnion => eu: {
if (maybe_inst_val) |inst_val| {
switch (inst_val.toIntern()) {
- .undef => return zcu.undefRef(dest_ty),
+ .undef => return pt.undefRef(dest_ty),
else => switch (zcu.intern_pool.indexToKey(inst_val.toIntern())) {
.error_union => |error_union| switch (error_union.val) {
.err_name => |err_name| {
const error_set_ty = inst_ty.errorUnionSet(zcu);
- const error_set_val = Air.internedToRef((try zcu.intern(.{ .err = .{
+ const error_set_val = Air.internedToRef((try pt.intern(.{ .err = .{
.ty = error_set_ty.toIntern(),
.name = err_name,
} })));
@@ -29370,7 +29650,7 @@ fn coerceExtra(
if (dest_ty.sentinel(zcu)) |dest_sent| {
const src_sent = inst_ty.sentinel(zcu) orelse break :array_to_array;
- if (dest_sent.toIntern() != (try zcu.getCoerced(src_sent, dest_ty.childType(zcu))).toIntern()) {
+ if (dest_sent.toIntern() != (try pt.getCoerced(src_sent, dest_ty.childType(zcu))).toIntern()) {
break :array_to_array;
}
}
@@ -29414,7 +29694,7 @@ fn coerceExtra(
// undefined to anything. We do this after the big switch above so that
// special logic has a chance to run first, such as `*[N]T` to `[]T` which
// should initialize the length field of the slice.
- if (maybe_inst_val) |val| if (val.toIntern() == .undef) return zcu.undefRef(dest_ty);
+ if (maybe_inst_val) |val| if (val.toIntern() == .undef) return pt.undefRef(dest_ty);
if (!opts.report_err) return error.NotCoercible;
@@ -29434,7 +29714,7 @@ fn coerceExtra(
}
const msg = msg: {
- const msg = try sema.errMsg(inst_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(zcu), inst_ty.fmt(zcu) });
+ const msg = try sema.errMsg(inst_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(pt), inst_ty.fmt(pt) });
errdefer msg.destroy(sema.gpa);
// E!T to T
@@ -29486,7 +29766,7 @@ fn coerceInMemory(
val: Value,
dst_ty: Type,
) CompileError!Air.Inst.Ref {
- return Air.internedToRef((try sema.mod.getCoerced(val, dst_ty)).toIntern());
+ return Air.internedToRef((try sema.pt.getCoerced(val, dst_ty)).toIntern());
}
const InMemoryCoercionResult = union(enum) {
@@ -29607,7 +29887,7 @@ const InMemoryCoercionResult = union(enum) {
}
fn report(res: *const InMemoryCoercionResult, sema: *Sema, src: LazySrcLoc, msg: *Module.ErrorMsg) !void {
- const mod = sema.mod;
+ const pt = sema.pt;
var cur = res;
while (true) switch (cur.*) {
.ok => unreachable,
@@ -29624,7 +29904,7 @@ const InMemoryCoercionResult = union(enum) {
},
.error_union_payload => |pair| {
try sema.errNote(src, msg, "error union payload '{}' cannot cast into error union payload '{}'", .{
- pair.actual.fmt(mod), pair.wanted.fmt(mod),
+ pair.actual.fmt(pt), pair.wanted.fmt(pt),
});
cur = pair.child;
},
@@ -29637,18 +29917,18 @@ const InMemoryCoercionResult = union(enum) {
.array_sentinel => |sentinel| {
if (sentinel.actual.toIntern() != .unreachable_value) {
try sema.errNote(src, msg, "array sentinel '{}' cannot cast into array sentinel '{}'", .{
- sentinel.actual.fmtValue(mod, sema), sentinel.wanted.fmtValue(mod, sema),
+ sentinel.actual.fmtValue(pt, sema), sentinel.wanted.fmtValue(pt, sema),
});
} else {
try sema.errNote(src, msg, "destination array requires '{}' sentinel", .{
- sentinel.wanted.fmtValue(mod, sema),
+ sentinel.wanted.fmtValue(pt, sema),
});
}
break;
},
.array_elem => |pair| {
try sema.errNote(src, msg, "array element type '{}' cannot cast into array element type '{}'", .{
- pair.actual.fmt(mod), pair.wanted.fmt(mod),
+ pair.actual.fmt(pt), pair.wanted.fmt(pt),
});
cur = pair.child;
},
@@ -29660,19 +29940,19 @@ const InMemoryCoercionResult = union(enum) {
},
.vector_elem => |pair| {
try sema.errNote(src, msg, "vector element type '{}' cannot cast into vector element type '{}'", .{
- pair.actual.fmt(mod), pair.wanted.fmt(mod),
+ pair.actual.fmt(pt), pair.wanted.fmt(pt),
});
cur = pair.child;
},
.optional_shape => |pair| {
try sema.errNote(src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{
- pair.actual.optionalChild(mod).fmt(mod), pair.wanted.optionalChild(mod).fmt(mod),
+ pair.actual.optionalChild(pt.zcu).fmt(pt), pair.wanted.optionalChild(pt.zcu).fmt(pt),
});
break;
},
.optional_child => |pair| {
try sema.errNote(src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{
- pair.actual.fmt(mod), pair.wanted.fmt(mod),
+ pair.actual.fmt(pt), pair.wanted.fmt(pt),
});
cur = pair.child;
},
@@ -29682,7 +29962,7 @@ const InMemoryCoercionResult = union(enum) {
},
.missing_error => |missing_errors| {
for (missing_errors) |err| {
- try sema.errNote(src, msg, "'error.{}' not a member of destination error set", .{err.fmt(&mod.intern_pool)});
+ try sema.errNote(src, msg, "'error.{}' not a member of destination error set", .{err.fmt(&pt.zcu.intern_pool)});
}
break;
},
@@ -29736,7 +30016,7 @@ const InMemoryCoercionResult = union(enum) {
},
.fn_param => |param| {
try sema.errNote(src, msg, "parameter {d} '{}' cannot cast into '{}'", .{
- param.index, param.actual.fmt(mod), param.wanted.fmt(mod),
+ param.index, param.actual.fmt(pt), param.wanted.fmt(pt),
});
cur = param.child;
},
@@ -29746,13 +30026,13 @@ const InMemoryCoercionResult = union(enum) {
},
.fn_return_type => |pair| {
try sema.errNote(src, msg, "return type '{}' cannot cast into return type '{}'", .{
- pair.actual.fmt(mod), pair.wanted.fmt(mod),
+ pair.actual.fmt(pt), pair.wanted.fmt(pt),
});
cur = pair.child;
},
.ptr_child => |pair| {
try sema.errNote(src, msg, "pointer type child '{}' cannot cast into pointer type child '{}'", .{
- pair.actual.fmt(mod), pair.wanted.fmt(mod),
+ pair.actual.fmt(pt), pair.wanted.fmt(pt),
});
cur = pair.child;
},
@@ -29763,11 +30043,11 @@ const InMemoryCoercionResult = union(enum) {
.ptr_sentinel => |sentinel| {
if (sentinel.actual.toIntern() != .unreachable_value) {
try sema.errNote(src, msg, "pointer sentinel '{}' cannot cast into pointer sentinel '{}'", .{
- sentinel.actual.fmtValue(mod, sema), sentinel.wanted.fmtValue(mod, sema),
+ sentinel.actual.fmtValue(pt, sema), sentinel.wanted.fmtValue(pt, sema),
});
} else {
try sema.errNote(src, msg, "destination pointer requires '{}' sentinel", .{
- sentinel.wanted.fmtValue(mod, sema),
+ sentinel.wanted.fmtValue(pt, sema),
});
}
break;
@@ -29787,15 +30067,15 @@ const InMemoryCoercionResult = union(enum) {
break;
},
.ptr_allowzero => |pair| {
- const wanted_allow_zero = pair.wanted.ptrAllowsZero(mod);
- const actual_allow_zero = pair.actual.ptrAllowsZero(mod);
+ const wanted_allow_zero = pair.wanted.ptrAllowsZero(pt.zcu);
+ const actual_allow_zero = pair.actual.ptrAllowsZero(pt.zcu);
if (actual_allow_zero and !wanted_allow_zero) {
try sema.errNote(src, msg, "'{}' could have null values which are illegal in type '{}'", .{
- pair.actual.fmt(mod), pair.wanted.fmt(mod),
+ pair.actual.fmt(pt), pair.wanted.fmt(pt),
});
} else {
try sema.errNote(src, msg, "mutable '{}' allows illegal null values stored to type '{}'", .{
- pair.actual.fmt(mod), pair.wanted.fmt(mod),
+ pair.actual.fmt(pt), pair.wanted.fmt(pt),
});
}
break;
@@ -29821,13 +30101,13 @@ const InMemoryCoercionResult = union(enum) {
},
.double_ptr_to_anyopaque => |pair| {
try sema.errNote(src, msg, "cannot implicitly cast double pointer '{}' to anyopaque pointer '{}'", .{
- pair.actual.fmt(mod), pair.wanted.fmt(mod),
+ pair.actual.fmt(pt), pair.wanted.fmt(pt),
});
break;
},
.slice_to_anyopaque => |pair| {
try sema.errNote(src, msg, "cannot implicitly cast slice '{}' to anyopaque pointer '{}'", .{
- pair.actual.fmt(mod), pair.wanted.fmt(mod),
+ pair.actual.fmt(pt), pair.wanted.fmt(pt),
});
try sema.errNote(src, msg, "consider using '.ptr'", .{});
break;
@@ -29864,7 +30144,8 @@ pub fn coerceInMemoryAllowed(
dest_src: LazySrcLoc,
src_src: LazySrcLoc,
) CompileError!InMemoryCoercionResult {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (dest_ty.eql(src_ty, mod))
return .ok;
@@ -29968,7 +30249,7 @@ pub fn coerceInMemoryAllowed(
(src_info.sentinel != null and
dest_info.sentinel != null and
dest_info.sentinel.?.eql(
- try mod.getCoerced(src_info.sentinel.?, dest_info.elem_type),
+ try pt.getCoerced(src_info.sentinel.?, dest_info.elem_type),
dest_info.elem_type,
mod,
));
@@ -30045,8 +30326,8 @@ pub fn coerceInMemoryAllowed(
// The memory layout of @Vector(N, iM) is the same as the integer type i(N*M),
// that is to say, the padding bits are not in the same place as the array [N]iM.
// If there's no padding, the bitcast is possible.
- const elem_bit_size = dest_elem_ty.bitSize(mod);
- const elem_abi_byte_size = dest_elem_ty.abiSize(mod);
+ const elem_bit_size = dest_elem_ty.bitSize(pt);
+ const elem_abi_byte_size = dest_elem_ty.abiSize(pt);
if (elem_abi_byte_size * 8 == elem_bit_size)
return .ok;
}
@@ -30081,7 +30362,7 @@ pub fn coerceInMemoryAllowed(
const field_count = dest_ty.structFieldCount(mod);
for (0..field_count) |field_idx| {
if (dest_ty.structFieldIsComptime(field_idx, mod) != src_ty.structFieldIsComptime(field_idx, mod)) break :tuple;
- if (dest_ty.structFieldAlign(field_idx, mod) != src_ty.structFieldAlign(field_idx, mod)) break :tuple;
+ if (dest_ty.structFieldAlign(field_idx, pt) != src_ty.structFieldAlign(field_idx, pt)) break :tuple;
const dest_field_ty = dest_ty.structFieldType(field_idx, mod);
const src_field_ty = src_ty.structFieldType(field_idx, mod);
const field = try sema.coerceInMemoryAllowed(block, dest_field_ty, src_field_ty, dest_is_mut, target, dest_src, src_src);
@@ -30104,7 +30385,8 @@ fn coerceInMemoryAllowedErrorSets(
dest_src: LazySrcLoc,
src_src: LazySrcLoc,
) !InMemoryCoercionResult {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
@@ -30202,7 +30484,8 @@ fn coerceInMemoryAllowedFns(
dest_src: LazySrcLoc,
src_src: LazySrcLoc,
) !InMemoryCoercionResult {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const dest_info = mod.typeToFunc(dest_ty).?;
@@ -30303,7 +30586,8 @@ fn coerceInMemoryAllowedPtrs(
dest_src: LazySrcLoc,
src_src: LazySrcLoc,
) !InMemoryCoercionResult {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const dest_info = dest_ptr_ty.ptrInfo(zcu);
const src_info = src_ptr_ty.ptrInfo(zcu);
@@ -30381,7 +30665,7 @@ fn coerceInMemoryAllowedPtrs(
const ok_sent = dest_info.sentinel == .none or src_info.flags.size == .C or
(src_info.sentinel != .none and
- dest_info.sentinel == try zcu.intern_pool.getCoerced(sema.gpa, src_info.sentinel, dest_info.child));
+ dest_info.sentinel == try zcu.intern_pool.getCoerced(sema.gpa, pt.tid, src_info.sentinel, dest_info.child));
if (!ok_sent) {
return InMemoryCoercionResult{ .ptr_sentinel = .{
.actual = switch (src_info.sentinel) {
@@ -30432,7 +30716,8 @@ fn coerceVarArgParam(
) !Air.Inst.Ref {
if (block.is_typeof) return inst;
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const uncasted_ty = sema.typeOf(inst);
const coerced = switch (uncasted_ty.zigTypeTag(mod)) {
// TODO consider casting to c_int/f64 if they fit
@@ -30449,9 +30734,9 @@ fn coerceVarArgParam(
},
.Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}),
.Float => float: {
- const target = sema.mod.getTarget();
+ const target = mod.getTarget();
const double_bits = target.c_type_bit_size(.double);
- const inst_bits = uncasted_ty.floatBits(sema.mod.getTarget());
+ const inst_bits = uncasted_ty.floatBits(target);
if (inst_bits >= double_bits) break :float inst;
switch (double_bits) {
32 => break :float try sema.coerce(block, Type.f32, inst, inst_src),
@@ -30461,7 +30746,7 @@ fn coerceVarArgParam(
},
else => if (uncasted_ty.isAbiInt(mod)) int: {
if (!try sema.validateExternType(uncasted_ty, .param_ty)) break :int inst;
- const target = sema.mod.getTarget();
+ const target = mod.getTarget();
const uncasted_info = uncasted_ty.intInfo(mod);
if (uncasted_info.bits <= target.c_type_bit_size(switch (uncasted_info.signedness) {
.signed => .int,
@@ -30491,7 +30776,7 @@ fn coerceVarArgParam(
const coerced_ty = sema.typeOf(coerced);
if (!try sema.validateExternType(coerced_ty, .param_ty)) {
const msg = msg: {
- const msg = try sema.errMsg(inst_src, "cannot pass '{}' to variadic function", .{coerced_ty.fmt(sema.mod)});
+ const msg = try sema.errMsg(inst_src, "cannot pass '{}' to variadic function", .{coerced_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsNotExtern(msg, inst_src, coerced_ty, .param_ty);
@@ -30526,7 +30811,8 @@ fn storePtr2(
operand_src: LazySrcLoc,
air_tag: Air.Inst.Tag,
) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ptr_ty = sema.typeOf(ptr);
if (ptr_ty.isConstPtr(mod))
return sema.fail(block, ptr_src, "cannot assign to constant", .{});
@@ -30548,7 +30834,7 @@ fn storePtr2(
while (i < field_count) : (i += 1) {
const elem_src = operand_src; // TODO better source location
const elem = try sema.tupleField(block, operand_src, uncasted_operand, elem_src, i);
- const elem_index = try mod.intRef(Type.usize, i);
+ const elem_index = try pt.intRef(Type.usize, i);
const elem_ptr = try sema.elemPtr(block, ptr_src, ptr, elem_index, elem_src, false, true);
try sema.storePtr2(block, src, elem_ptr, elem_src, elem, elem_src, .store);
}
@@ -30620,7 +30906,7 @@ fn storePtr2(
return;
}
return sema.fail(block, ptr_src, "unable to determine vector element index of type '{}'", .{
- ptr_ty.fmt(sema.mod),
+ ptr_ty.fmt(pt),
});
}
@@ -30734,7 +31020,8 @@ fn markMaybeComptimeAllocRuntime(sema: *Sema, block: *Block, alloc_inst: Air.Ins
/// pointer. Only if the final element type matches the vector element type, and the
/// lengths match.
fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const array_ty = sema.typeOf(ptr).childType(mod);
if (array_ty.zigTypeTag(mod) != .Array) return null;
var ptr_ref = ptr;
@@ -30751,7 +31038,7 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref {
// We have a pointer-to-array and a pointer-to-vector. If the elements and
// lengths match, return the result.
- if (array_ty.childType(mod).eql(vector_ty.childType(mod), sema.mod) and
+ if (array_ty.childType(mod).eql(vector_ty.childType(mod), mod) and
array_ty.arrayLen(mod) == vector_ty.vectorLen(mod))
{
return ptr_ref;
@@ -30770,17 +31057,18 @@ fn storePtrVal(
operand_val: Value,
operand_ty: Type,
) !void {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
// TODO: audit use sites to eliminate this coercion
- const coerced_operand_val = try zcu.getCoerced(operand_val, operand_ty);
+ const coerced_operand_val = try pt.getCoerced(operand_val, operand_ty);
// TODO: audit use sites to eliminate this coercion
- const ptr_ty = try zcu.ptrType(info: {
+ const ptr_ty = try pt.ptrType(info: {
var info = ptr_val.typeOf(zcu).ptrInfo(zcu);
info.child = operand_ty.toIntern();
break :info info;
});
- const coerced_ptr_val = try zcu.getCoerced(ptr_val, ptr_ty);
+ const coerced_ptr_val = try pt.getCoerced(ptr_val, ptr_ty);
switch (try sema.storeComptimePtr(block, src, coerced_ptr_val, coerced_operand_val)) {
.success => {},
@@ -30800,13 +31088,13 @@ fn storePtrVal(
block,
src,
"comptime dereference requires '{}' to have a well-defined layout",
- .{ty.fmt(zcu)},
+ .{ty.fmt(pt)},
),
.out_of_bounds => |ty| return sema.fail(
block,
src,
"dereference of '{}' exceeds bounds of containing decl of type '{}'",
- .{ ptr_ty.fmt(zcu), ty.fmt(zcu) },
+ .{ ptr_ty.fmt(pt), ty.fmt(pt) },
),
.exceeds_host_size => return sema.fail(block, src, "bit-pointer target exceeds host size", .{}),
}
@@ -30820,31 +31108,32 @@ fn bitCast(
inst_src: LazySrcLoc,
operand_src: ?LazySrcLoc,
) CompileError!Air.Inst.Ref {
- const zcu = sema.mod;
- try dest_ty.resolveLayout(zcu);
+ const pt = sema.pt;
+ const zcu = pt.zcu;
+ try dest_ty.resolveLayout(pt);
const old_ty = sema.typeOf(inst);
- try old_ty.resolveLayout(zcu);
+ try old_ty.resolveLayout(pt);
- const dest_bits = dest_ty.bitSize(zcu);
- const old_bits = old_ty.bitSize(zcu);
+ const dest_bits = dest_ty.bitSize(pt);
+ const old_bits = old_ty.bitSize(pt);
if (old_bits != dest_bits) {
return sema.fail(block, inst_src, "@bitCast size mismatch: destination type '{}' has {d} bits but source type '{}' has {d} bits", .{
- dest_ty.fmt(zcu),
+ dest_ty.fmt(pt),
dest_bits,
- old_ty.fmt(zcu),
+ old_ty.fmt(pt),
old_bits,
});
}
if (try sema.resolveValue(inst)) |val| {
if (val.isUndef(zcu))
- return zcu.undefRef(dest_ty);
+ return pt.undefRef(dest_ty);
if (old_ty.zigTypeTag(zcu) == .ErrorSet and dest_ty.zigTypeTag(zcu) == .ErrorSet) {
// Special case: we sometimes call `bitCast` on error set values, but they
// don't have a well-defined layout, so we can't use `bitCastVal` on them.
- return Air.internedToRef((try zcu.getCoerced(val, dest_ty)).toIntern());
+ return Air.internedToRef((try pt.getCoerced(val, dest_ty)).toIntern());
}
if (try sema.bitCastVal(val, dest_ty, 0, 0, 0)) |result_val| {
return Air.internedToRef(result_val.toIntern());
@@ -30862,16 +31151,17 @@ fn coerceArrayPtrToSlice(
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (try sema.resolveValue(inst)) |val| {
const ptr_array_ty = sema.typeOf(inst);
const array_ty = ptr_array_ty.childType(mod);
const slice_ptr_ty = dest_ty.slicePtrFieldType(mod);
- const slice_ptr = try mod.getCoerced(val, slice_ptr_ty);
- const slice_val = try mod.intern(.{ .slice = .{
+ const slice_ptr = try pt.getCoerced(val, slice_ptr_ty);
+ const slice_val = try pt.intern(.{ .slice = .{
.ty = dest_ty.toIntern(),
.ptr = slice_ptr.toIntern(),
- .len = (try mod.intValue(Type.usize, array_ty.arrayLen(mod))).toIntern(),
+ .len = (try pt.intValue(Type.usize, array_ty.arrayLen(mod))).toIntern(),
} });
return Air.internedToRef(slice_val);
}
@@ -30880,7 +31170,8 @@ fn coerceArrayPtrToSlice(
}
fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_result: *InMemoryCoercionResult) bool {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const dest_info = dest_ty.ptrInfo(mod);
const inst_info = inst_ty.ptrInfo(mod);
const len0 = (Type.fromInterned(inst_info.child).zigTypeTag(mod) == .Array and (Type.fromInterned(inst_info.child).arrayLenIncludingSentinel(mod) == 0 or
@@ -30913,12 +31204,12 @@ fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_resul
const inst_align = if (inst_info.flags.alignment != .none)
inst_info.flags.alignment
else
- Type.fromInterned(inst_info.child).abiAlignment(mod);
+ Type.fromInterned(inst_info.child).abiAlignment(pt);
const dest_align = if (dest_info.flags.alignment != .none)
dest_info.flags.alignment
else
- Type.fromInterned(dest_info.child).abiAlignment(mod);
+ Type.fromInterned(dest_info.child).abiAlignment(pt);
if (dest_align.compare(.gt, inst_align)) {
in_memory_result.* = .{ .ptr_alignment = .{
@@ -30937,15 +31228,16 @@ fn coerceCompatiblePtrs(
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_ty = sema.typeOf(inst);
if (try sema.resolveValue(inst)) |val| {
if (!val.isUndef(mod) and val.isNull(mod) and !dest_ty.isAllowzeroPtr(mod)) {
- return sema.fail(block, inst_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)});
+ return sema.fail(block, inst_src, "null pointer casted to type '{}'", .{dest_ty.fmt(pt)});
}
// The comptime Value representation is compatible with both types.
return Air.internedToRef(
- (try mod.getCoerced(val, dest_ty)).toIntern(),
+ (try pt.getCoerced(val, dest_ty)).toIntern(),
);
}
try sema.requireRuntimeBlock(block, inst_src, null);
@@ -30979,14 +31271,15 @@ fn coerceEnumToUnion(
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const inst_ty = sema.typeOf(inst);
const tag_ty = union_ty.unionTagType(mod) orelse {
const msg = msg: {
const msg = try sema.errMsg(inst_src, "expected type '{}', found '{}'", .{
- union_ty.fmt(sema.mod), inst_ty.fmt(sema.mod),
+ union_ty.fmt(pt), inst_ty.fmt(pt),
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(union_ty_src, msg, "cannot coerce enum to untagged union", .{});
@@ -30998,15 +31291,15 @@ fn coerceEnumToUnion(
const enum_tag = try sema.coerce(block, tag_ty, inst, inst_src);
if (try sema.resolveDefinedValue(block, inst_src, enum_tag)) |val| {
- const field_index = union_ty.unionTagFieldIndex(val, sema.mod) orelse {
+ const field_index = union_ty.unionTagFieldIndex(val, pt.zcu) orelse {
return sema.fail(block, inst_src, "union '{}' has no tag with value '{}'", .{
- union_ty.fmt(sema.mod), val.fmtValue(sema.mod, sema),
+ union_ty.fmt(pt), val.fmtValue(pt, sema),
});
};
const union_obj = mod.typeToUnion(union_ty).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
- try field_ty.resolveFields(mod);
+ try field_ty.resolveFields(pt);
if (field_ty.zigTypeTag(mod) == .NoReturn) {
const msg = msg: {
const msg = try sema.errMsg(inst_src, "cannot initialize 'noreturn' field of union", .{});
@@ -31025,8 +31318,8 @@ fn coerceEnumToUnion(
const msg = msg: {
const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index];
const msg = try sema.errMsg(inst_src, "coercion from enum '{}' to union '{}' must initialize '{}' field '{}'", .{
- inst_ty.fmt(sema.mod), union_ty.fmt(sema.mod),
- field_ty.fmt(sema.mod), field_name.fmt(ip),
+ inst_ty.fmt(pt), union_ty.fmt(pt),
+ field_ty.fmt(pt), field_name.fmt(ip),
});
errdefer msg.destroy(sema.gpa);
@@ -31039,7 +31332,7 @@ fn coerceEnumToUnion(
return sema.failWithOwnedErrorMsg(block, msg);
};
- return Air.internedToRef((try mod.unionValue(union_ty, val, opv)).toIntern());
+ return Air.internedToRef((try pt.unionValue(union_ty, val, opv)).toIntern());
}
try sema.requireRuntimeBlock(block, inst_src, null);
@@ -31047,7 +31340,7 @@ fn coerceEnumToUnion(
if (tag_ty.isNonexhaustiveEnum(mod)) {
const msg = msg: {
const msg = try sema.errMsg(inst_src, "runtime coercion to union '{}' from non-exhaustive enum", .{
- union_ty.fmt(sema.mod),
+ union_ty.fmt(pt),
});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, tag_ty);
@@ -31066,7 +31359,7 @@ fn coerceEnumToUnion(
const err_msg = msg orelse try sema.errMsg(
inst_src,
"runtime coercion from enum '{}' to union '{}' which has a 'noreturn' field",
- .{ tag_ty.fmt(sema.mod), union_ty.fmt(sema.mod) },
+ .{ tag_ty.fmt(pt), union_ty.fmt(pt) },
);
msg = err_msg;
@@ -31081,7 +31374,7 @@ fn coerceEnumToUnion(
}
// If the union has all fields 0 bits, the union value is just the enum value.
- if (union_ty.unionHasAllZeroBitFieldTypes(mod)) {
+ if (union_ty.unionHasAllZeroBitFieldTypes(pt)) {
return block.addBitCast(union_ty, enum_tag);
}
@@ -31089,7 +31382,7 @@ fn coerceEnumToUnion(
const msg = try sema.errMsg(
inst_src,
"runtime coercion from enum '{}' to union '{}' which has non-void fields",
- .{ tag_ty.fmt(sema.mod), union_ty.fmt(sema.mod) },
+ .{ tag_ty.fmt(pt), union_ty.fmt(pt) },
);
errdefer msg.destroy(sema.gpa);
@@ -31099,7 +31392,7 @@ fn coerceEnumToUnion(
if (!(try sema.typeHasRuntimeBits(field_ty))) continue;
try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' has type '{}'", .{
field_name.fmt(ip),
- field_ty.fmt(sema.mod),
+ field_ty.fmt(pt),
});
}
try sema.addDeclaredHereNote(msg, union_ty);
@@ -31116,7 +31409,8 @@ fn coerceAnonStructToUnion(
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const inst_ty = sema.typeOf(inst);
const field_info: union(enum) {
@@ -31174,7 +31468,8 @@ fn coerceAnonStructToUnionPtrs(
ptr_anon_struct: Air.Inst.Ref,
anon_struct_src: LazySrcLoc,
) !Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const union_ty = ptr_union_ty.childType(mod);
const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src);
const union_inst = try sema.coerceAnonStructToUnion(block, union_ty, union_ty_src, anon_struct, anon_struct_src);
@@ -31189,7 +31484,8 @@ fn coerceAnonStructToStructPtrs(
ptr_anon_struct: Air.Inst.Ref,
anon_struct_src: LazySrcLoc,
) !Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const struct_ty = ptr_struct_ty.childType(mod);
const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src);
const struct_inst = try sema.coerceTupleToStruct(block, struct_ty, anon_struct, anon_struct_src);
@@ -31205,7 +31501,8 @@ fn coerceArrayLike(
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_ty = sema.typeOf(inst);
const target = mod.getTarget();
@@ -31226,7 +31523,7 @@ fn coerceArrayLike(
if (dest_len != inst_len) {
const msg = msg: {
const msg = try sema.errMsg(inst_src, "expected type '{}', found '{}'", .{
- dest_ty.fmt(mod), inst_ty.fmt(mod),
+ dest_ty.fmt(pt), inst_ty.fmt(pt),
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(dest_ty_src, msg, "destination has length {d}", .{dest_len});
@@ -31270,7 +31567,7 @@ fn coerceArrayLike(
var runtime_src: ?LazySrcLoc = null;
for (element_vals, element_refs, 0..) |*val, *ref, i| {
- const index_ref = Air.internedToRef((try mod.intValue(Type.usize, i)).toIntern());
+ const index_ref = Air.internedToRef((try pt.intValue(Type.usize, i)).toIntern());
const src = inst_src; // TODO better source location
const elem_src = inst_src; // TODO better source location
const elem_ref = try sema.elemValArray(block, src, inst_src, inst, elem_src, index_ref, true);
@@ -31290,7 +31587,7 @@ fn coerceArrayLike(
return block.addAggregateInit(dest_ty, element_refs);
}
- return Air.internedToRef((try mod.intern(.{ .aggregate = .{
+ return Air.internedToRef((try pt.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
.storage = .{ .elems = element_vals },
} })));
@@ -31305,7 +31602,8 @@ fn coerceTupleToArray(
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const inst_ty = sema.typeOf(inst);
const inst_len = inst_ty.arrayLen(mod);
const dest_len = dest_ty.arrayLen(mod);
@@ -31313,7 +31611,7 @@ fn coerceTupleToArray(
if (dest_len != inst_len) {
const msg = msg: {
const msg = try sema.errMsg(inst_src, "expected type '{}', found '{}'", .{
- dest_ty.fmt(sema.mod), inst_ty.fmt(sema.mod),
+ dest_ty.fmt(pt), inst_ty.fmt(pt),
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(dest_ty_src, msg, "destination has length {d}", .{dest_len});
@@ -31355,7 +31653,7 @@ fn coerceTupleToArray(
return block.addAggregateInit(dest_ty, element_refs);
}
- return Air.internedToRef((try mod.intern(.{ .aggregate = .{
+ return Air.internedToRef((try pt.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
.storage = .{ .elems = element_vals },
} })));
@@ -31370,11 +31668,12 @@ fn coerceTupleToSlicePtrs(
ptr_tuple: Air.Inst.Ref,
tuple_src: LazySrcLoc,
) !Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const tuple_ty = sema.typeOf(ptr_tuple).childType(mod);
const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src);
const slice_info = slice_ty.ptrInfo(mod);
- const array_ty = try mod.arrayType(.{
+ const array_ty = try pt.arrayType(.{
.len = tuple_ty.structFieldCount(mod),
.sentinel = slice_info.sentinel,
.child = slice_info.child,
@@ -31396,7 +31695,8 @@ fn coerceTupleToArrayPtrs(
ptr_tuple: Air.Inst.Ref,
tuple_src: LazySrcLoc,
) !Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src);
const ptr_info = ptr_array_ty.ptrInfo(mod);
const array_ty = Type.fromInterned(ptr_info.child);
@@ -31417,10 +31717,11 @@ fn coerceTupleToStruct(
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
- try struct_ty.resolveFields(mod);
- try struct_ty.resolveStructFieldInits(mod);
+ try struct_ty.resolveFields(pt);
+ try struct_ty.resolveStructFieldInits(pt);
if (struct_ty.isTupleOrAnonStruct(mod)) {
return sema.coerceTupleToTuple(block, struct_ty, inst, inst_src);
@@ -31461,7 +31762,7 @@ fn coerceTupleToStruct(
};
const field_init = Value.fromInterned(struct_type.field_inits.get(ip)[struct_field_index]);
- if (!init_val.eql(field_init, struct_field_ty, sema.mod)) {
+ if (!init_val.eql(field_init, struct_field_ty, pt.zcu)) {
return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, tuple_field_index);
}
}
@@ -31512,7 +31813,7 @@ fn coerceTupleToStruct(
return block.addAggregateInit(struct_ty, field_refs);
}
- const struct_val = try mod.intern(.{ .aggregate = .{
+ const struct_val = try pt.intern(.{ .aggregate = .{
.ty = struct_ty.toIntern(),
.storage = .{ .elems = field_vals },
} });
@@ -31529,7 +31830,8 @@ fn coerceTupleToTuple(
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const dest_field_count = switch (ip.indexToKey(tuple_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
@@ -31594,7 +31896,7 @@ fn coerceTupleToTuple(
});
};
- if (!init_val.eql(Value.fromInterned(default_val), Type.fromInterned(field_ty), sema.mod)) {
+ if (!init_val.eql(Value.fromInterned(default_val), Type.fromInterned(field_ty), pt.zcu)) {
return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i);
}
}
@@ -31659,7 +31961,7 @@ fn coerceTupleToTuple(
return block.addAggregateInit(tuple_ty, field_refs);
}
- return Air.internedToRef((try mod.intern(.{ .aggregate = .{
+ return Air.internedToRef((try pt.intern(.{ .aggregate = .{
.ty = tuple_ty.toIntern(),
.storage = .{ .elems = field_vals },
} })));
@@ -31689,17 +31991,19 @@ fn addReferenceEntry(
src: LazySrcLoc,
referenced_unit: AnalUnit,
) !void {
- if (sema.mod.comp.reference_trace == 0) return;
+ const zcu = sema.pt.zcu;
+ if (zcu.comp.reference_trace == 0) return;
const gop = try sema.references.getOrPut(sema.gpa, referenced_unit);
if (gop.found_existing) return;
// TODO: we need to figure out how to model inline calls here.
// They aren't references in the analysis sense, but ought to show up in the reference trace!
// Would representing inline calls in the reference table cause excessive memory usage?
- try sema.mod.addUnitReference(sema.ownerUnit(), referenced_unit, src);
+ try zcu.addUnitReference(sema.ownerUnit(), referenced_unit, src);
}
pub fn ensureDeclAnalyzed(sema: *Sema, decl_index: InternPool.DeclIndex) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const decl = mod.declPtr(decl_index);
if (decl.analysis == .in_progress) {
@@ -31710,7 +32014,7 @@ pub fn ensureDeclAnalyzed(sema: *Sema, decl_index: InternPool.DeclIndex) Compile
return sema.failWithOwnedErrorMsg(null, msg);
}
- mod.ensureDeclAnalyzed(decl_index) catch |err| {
+ pt.ensureDeclAnalyzed(decl_index) catch |err| {
if (sema.owner_func_index != .none) {
ip.funcAnalysis(sema.owner_func_index).state = .dependency_failure;
} else {
@@ -31721,9 +32025,10 @@ pub fn ensureDeclAnalyzed(sema: *Sema, decl_index: InternPool.DeclIndex) Compile
}
fn ensureFuncBodyAnalyzed(sema: *Sema, func: InternPool.Index) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
- mod.ensureFuncBodyAnalyzed(func) catch |err| {
+ pt.ensureFuncBodyAnalyzed(func) catch |err| {
if (sema.owner_func_index != .none) {
ip.funcAnalysis(sema.owner_func_index).state = .dependency_failure;
} else {
@@ -31734,15 +32039,15 @@ fn ensureFuncBodyAnalyzed(sema: *Sema, func: InternPool.Index) CompileError!void
}
fn optRefValue(sema: *Sema, opt_val: ?Value) !Value {
- const mod = sema.mod;
- const ptr_anyopaque_ty = try mod.singleConstPtrType(Type.anyopaque);
- return Value.fromInterned((try mod.intern(.{ .opt = .{
- .ty = (try mod.optionalType(ptr_anyopaque_ty.toIntern())).toIntern(),
- .val = if (opt_val) |val| (try mod.getCoerced(
+ const pt = sema.pt;
+ const ptr_anyopaque_ty = try pt.singleConstPtrType(Type.anyopaque);
+ return Value.fromInterned(try pt.intern(.{ .opt = .{
+ .ty = (try pt.optionalType(ptr_anyopaque_ty.toIntern())).toIntern(),
+ .val = if (opt_val) |val| (try pt.getCoerced(
Value.fromInterned(try sema.refValue(val.toIntern())),
ptr_anyopaque_ty,
)).toIntern() else .none,
- } })));
+ } }));
}
fn analyzeDeclRef(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.DeclIndex) CompileError!Air.Inst.Ref {
@@ -31754,7 +32059,8 @@ fn analyzeDeclRef(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.DeclIndex
/// decl_ref to end up in runtime code, the function body must be analyzed: `analyzeDeclRef` wraps
/// this function with `analyze_fn_body` set to true.
fn analyzeDeclRefInner(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.DeclIndex, analyze_fn_body: bool) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = decl_index }));
try sema.ensureDeclAnalyzed(decl_index);
@@ -31767,7 +32073,7 @@ fn analyzeDeclRefInner(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.Decl
});
// TODO: if this is a `decl_ref` of a non-variable decl, only depend on decl type
try sema.declareDependency(.{ .decl_val = decl_index });
- const ptr_ty = try mod.ptrTypeSema(.{
+ const ptr_ty = try pt.ptrTypeSema(.{
.child = decl_val.typeOf(mod).toIntern(),
.flags = .{
.alignment = owner_decl.alignment,
@@ -31778,7 +32084,7 @@ fn analyzeDeclRefInner(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.Decl
if (analyze_fn_body) {
try sema.maybeQueueFuncBodyAnalysis(src, decl_index);
}
- return Air.internedToRef((try mod.intern(.{ .ptr = .{
+ return Air.internedToRef((try pt.intern(.{ .ptr = .{
.ty = ptr_ty.toIntern(),
.base_addr = .{ .decl = decl_index },
.byte_offset = 0,
@@ -31786,7 +32092,7 @@ fn analyzeDeclRefInner(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.Decl
}
fn maybeQueueFuncBodyAnalysis(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.DeclIndex) !void {
- const mod = sema.mod;
+ const mod = sema.pt.zcu;
const decl = mod.declPtr(decl_index);
const decl_val = try decl.valueOrFail();
if (!mod.intern_pool.isFuncBody(decl_val.toIntern())) return;
@@ -31801,7 +32107,8 @@ fn analyzeRef(
src: LazySrcLoc,
operand: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const operand_ty = sema.typeOf(operand);
if (try sema.resolveValue(operand)) |val| {
@@ -31814,14 +32121,14 @@ fn analyzeRef(
try sema.requireRuntimeBlock(block, src, null);
const address_space = target_util.defaultAddressSpace(mod.getTarget(), .local);
- const ptr_type = try mod.ptrTypeSema(.{
+ const ptr_type = try pt.ptrTypeSema(.{
.child = operand_ty.toIntern(),
.flags = .{
.is_const = true,
.address_space = address_space,
},
});
- const mut_ptr_type = try mod.ptrTypeSema(.{
+ const mut_ptr_type = try pt.ptrTypeSema(.{
.child = operand_ty.toIntern(),
.flags = .{ .address_space = address_space },
});
@@ -31839,14 +32146,15 @@ fn analyzeLoad(
ptr: Air.Inst.Ref,
ptr_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ptr_ty = sema.typeOf(ptr);
const elem_ty = switch (ptr_ty.zigTypeTag(mod)) {
.Pointer => ptr_ty.childType(mod),
- else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}),
+ else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(pt)}),
};
if (elem_ty.zigTypeTag(mod) == .Opaque) {
- return sema.fail(block, ptr_src, "cannot load opaque type '{}'", .{elem_ty.fmt(mod)});
+ return sema.fail(block, ptr_src, "cannot load opaque type '{}'", .{elem_ty.fmt(pt)});
}
if (try sema.typeHasOnePossibleValue(elem_ty)) |opv| {
@@ -31868,7 +32176,7 @@ fn analyzeLoad(
return block.addBinOp(.ptr_elem_val, bin_op.lhs, bin_op.rhs);
}
return sema.fail(block, ptr_src, "unable to determine vector element index of type '{}'", .{
- ptr_ty.fmt(sema.mod),
+ ptr_ty.fmt(pt),
});
}
@@ -31882,10 +32190,11 @@ fn analyzeSlicePtr(
slice: Air.Inst.Ref,
slice_ty: Type,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const result_ty = slice_ty.slicePtrFieldType(mod);
if (try sema.resolveValue(slice)) |val| {
- if (val.isUndef(mod)) return mod.undefRef(result_ty);
+ if (val.isUndef(mod)) return pt.undefRef(result_ty);
return Air.internedToRef(val.slicePtr(mod).toIntern());
}
try sema.requireRuntimeBlock(block, slice_src, null);
@@ -31899,11 +32208,12 @@ fn analyzeOptionalSlicePtr(
opt_slice: Air.Inst.Ref,
opt_slice_ty: Type,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const result_ty = opt_slice_ty.optionalChild(mod).slicePtrFieldType(mod);
if (try sema.resolveValue(opt_slice)) |opt_val| {
- if (opt_val.isUndef(mod)) return mod.undefRef(result_ty);
+ if (opt_val.isUndef(mod)) return pt.undefRef(result_ty);
const slice_ptr: InternPool.Index = if (opt_val.optionalValue(mod)) |val|
val.slicePtr(mod).toIntern()
else
@@ -31924,12 +32234,13 @@ fn analyzeSliceLen(
src: LazySrcLoc,
slice_inst: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (try sema.resolveValue(slice_inst)) |slice_val| {
if (slice_val.isUndef(mod)) {
- return mod.undefRef(Type.usize);
+ return pt.undefRef(Type.usize);
}
- return mod.intRef(Type.usize, try slice_val.sliceLen(mod));
+ return pt.intRef(Type.usize, try slice_val.sliceLen(pt));
}
try sema.requireRuntimeBlock(block, src, null);
return block.addTyOp(.slice_len, Type.usize, slice_inst);
@@ -31942,11 +32253,12 @@ fn analyzeIsNull(
operand: Air.Inst.Ref,
invert_logic: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const result_ty = Type.bool;
if (try sema.resolveValue(operand)) |opt_val| {
if (opt_val.isUndef(mod)) {
- return mod.undefRef(result_ty);
+ return pt.undefRef(result_ty);
}
const is_null = opt_val.isNull(mod);
const bool_value = if (invert_logic) !is_null else is_null;
@@ -31972,7 +32284,8 @@ fn analyzePtrIsNonErrComptimeOnly(
src: LazySrcLoc,
operand: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ptr_ty = sema.typeOf(operand);
assert(ptr_ty.zigTypeTag(mod) == .Pointer);
const child_ty = ptr_ty.childType(mod);
@@ -31994,7 +32307,8 @@ fn analyzeIsNonErrComptimeOnly(
src: LazySrcLoc,
operand: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const operand_ty = sema.typeOf(operand);
const ot = operand_ty.zigTypeTag(mod);
@@ -32014,7 +32328,7 @@ fn analyzeIsNonErrComptimeOnly(
else => {},
}
} else if (operand == .undef) {
- return mod.undefRef(Type.bool);
+ return pt.undefRef(Type.bool);
} else if (@intFromEnum(operand) < InternPool.static_len) {
// None of the ref tags can be errors.
return .bool_true;
@@ -32098,7 +32412,7 @@ fn analyzeIsNonErrComptimeOnly(
if (maybe_operand_val) |err_union| {
if (err_union.isUndef(mod)) {
- return mod.undefRef(Type.bool);
+ return pt.undefRef(Type.bool);
}
if (err_union.getErrorName(mod) == .none) {
return .bool_true;
@@ -32153,13 +32467,14 @@ fn analyzeSlice(
end_src: LazySrcLoc,
by_length: bool,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
// Slice expressions can operate on a variable whose type is an array. This requires
// the slice operand to be a pointer. In the case of a non-array, it will be a double pointer.
const ptr_ptr_ty = sema.typeOf(ptr_ptr);
const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag(mod)) {
.Pointer => ptr_ptr_ty.childType(mod),
- else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(mod)}),
+ else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(pt)}),
};
var array_ty = ptr_ptr_child_ty;
@@ -32210,8 +32525,8 @@ fn analyzeSlice(
msg,
"expected '{}', found '{}'",
.{
- Value.zero_comptime_int.fmtValue(mod, sema),
- start_value.fmtValue(mod, sema),
+ Value.zero_comptime_int.fmtValue(pt, sema),
+ start_value.fmtValue(pt, sema),
},
);
break :msg msg;
@@ -32226,8 +32541,8 @@ fn analyzeSlice(
msg,
"expected '{}', found '{}'",
.{
- Value.one_comptime_int.fmtValue(mod, sema),
- end_value.fmtValue(mod, sema),
+ Value.one_comptime_int.fmtValue(pt, sema),
+ end_value.fmtValue(pt, sema),
},
);
break :msg msg;
@@ -32240,17 +32555,17 @@ fn analyzeSlice(
block,
end_src,
"end index {} out of bounds for slice of single-item pointer",
- .{end_value.fmtValue(mod, sema)},
+ .{end_value.fmtValue(pt, sema)},
);
}
}
- array_ty = try mod.arrayType(.{
+ array_ty = try pt.arrayType(.{
.len = 1,
.child = double_child_ty.toIntern(),
});
const ptr_info = ptr_ptr_child_ty.ptrInfo(mod);
- slice_ty = try mod.ptrType(.{
+ slice_ty = try pt.ptrType(.{
.child = array_ty.toIntern(),
.flags = .{
.alignment = ptr_info.flags.alignment,
@@ -32286,7 +32601,7 @@ fn analyzeSlice(
elem_ty = ptr_ptr_child_ty.childType(mod);
},
},
- else => return sema.fail(block, src, "slice of non-array type '{}'", .{ptr_ptr_child_ty.fmt(mod)}),
+ else => return sema.fail(block, src, "slice of non-array type '{}'", .{ptr_ptr_child_ty.fmt(pt)}),
}
const ptr = if (slice_ty.isSlice(mod))
@@ -32297,7 +32612,7 @@ fn analyzeSlice(
assert(manyptr_ty_key.flags.size == .One);
manyptr_ty_key.child = elem_ty.toIntern();
manyptr_ty_key.flags.size = .Many;
- break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrTypeSema(manyptr_ty_key), ptr_or_slice, ptr_src);
+ break :ptr try sema.coerceCompatiblePtrs(block, try pt.ptrTypeSema(manyptr_ty_key), ptr_or_slice, ptr_src);
} else ptr_or_slice;
const start = try sema.coerce(block, Type.usize, uncasted_start, start_src);
@@ -32311,7 +32626,7 @@ fn analyzeSlice(
var end_is_len = uncasted_end_opt == .none;
const end = e: {
if (array_ty.zigTypeTag(mod) == .Array) {
- const len_val = try mod.intValue(Type.usize, array_ty.arrayLen(mod));
+ const len_val = try pt.intValue(Type.usize, array_ty.arrayLen(mod));
if (!end_is_len) {
const end = if (by_length) end: {
@@ -32320,7 +32635,7 @@ fn analyzeSlice(
break :end try sema.coerce(block, Type.usize, uncasted_end, end_src);
} else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| {
- const len_s_val = try mod.intValue(
+ const len_s_val = try pt.intValue(
Type.usize,
array_ty.arrayLenIncludingSentinel(mod),
);
@@ -32335,8 +32650,8 @@ fn analyzeSlice(
end_src,
"end index {} out of bounds for array of length {}{s}",
.{
- end_val.fmtValue(mod, sema),
- len_val.fmtValue(mod, sema),
+ end_val.fmtValue(pt, sema),
+ len_val.fmtValue(pt, sema),
sentinel_label,
},
);
@@ -32366,9 +32681,9 @@ fn analyzeSlice(
return sema.fail(block, src, "slice of undefined", .{});
}
const has_sentinel = slice_ty.sentinel(mod) != null;
- const slice_len = try slice_val.sliceLen(mod);
+ const slice_len = try slice_val.sliceLen(pt);
const len_plus_sent = slice_len + @intFromBool(has_sentinel);
- const slice_len_val_with_sentinel = try mod.intValue(Type.usize, len_plus_sent);
+ const slice_len_val_with_sentinel = try pt.intValue(Type.usize, len_plus_sent);
if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, Type.usize))) {
const sentinel_label: []const u8 = if (has_sentinel)
" +1 (sentinel)"
@@ -32380,8 +32695,8 @@ fn analyzeSlice(
end_src,
"end index {} out of bounds for slice of length {d}{s}",
.{
- end_val.fmtValue(mod, sema),
- try slice_val.sliceLen(mod),
+ end_val.fmtValue(pt, sema),
+ try slice_val.sliceLen(pt),
sentinel_label,
},
);
@@ -32390,7 +32705,7 @@ fn analyzeSlice(
// If the slice has a sentinel, we consider end_is_len
// is only true if it equals the length WITHOUT the
// sentinel, so we don't add a sentinel type.
- const slice_len_val = try mod.intValue(Type.usize, slice_len);
+ const slice_len_val = try pt.intValue(Type.usize, slice_len);
if (end_val.eql(slice_len_val, Type.usize, mod)) {
end_is_len = true;
}
@@ -32440,21 +32755,21 @@ fn analyzeSlice(
start_src,
"start index {} is larger than end index {}",
.{
- start_val.fmtValue(mod, sema),
- end_val.fmtValue(mod, sema),
+ start_val.fmtValue(pt, sema),
+ end_val.fmtValue(pt, sema),
},
);
}
checked_start_lte_end = true;
if (try sema.resolveValue(new_ptr)) |ptr_val| sentinel_check: {
const expected_sentinel = sentinel orelse break :sentinel_check;
- const start_int = start_val.getUnsignedInt(mod).?;
- const end_int = end_val.getUnsignedInt(mod).?;
+ const start_int = start_val.getUnsignedInt(pt).?;
+ const end_int = end_val.getUnsignedInt(pt).?;
const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int);
- const many_ptr_ty = try mod.manyConstPtrType(elem_ty);
- const many_ptr_val = try mod.getCoerced(ptr_val, many_ptr_ty);
- const elem_ptr = try many_ptr_val.ptrElem(sentinel_index, mod);
+ const many_ptr_ty = try pt.manyConstPtrType(elem_ty);
+ const many_ptr_val = try pt.getCoerced(ptr_val, many_ptr_ty);
+ const elem_ptr = try many_ptr_val.ptrElem(sentinel_index, pt);
const res = try sema.pointerDerefExtra(block, src, elem_ptr);
const actual_sentinel = switch (res) {
.runtime_load => break :sentinel_check,
@@ -32463,13 +32778,13 @@ fn analyzeSlice(
block,
src,
"comptime dereference requires '{}' to have a well-defined layout",
- .{ty.fmt(mod)},
+ .{ty.fmt(pt)},
),
.out_of_bounds => |ty| return sema.fail(
block,
end_src,
"slice end index {d} exceeds bounds of containing decl of type '{}'",
- .{ end_int, ty.fmt(mod) },
+ .{ end_int, ty.fmt(pt) },
),
};
@@ -32478,8 +32793,8 @@ fn analyzeSlice(
const msg = try sema.errMsg(src, "value in memory does not match slice sentinel", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(src, msg, "expected '{}', found '{}'", .{
- expected_sentinel.fmtValue(mod, sema),
- actual_sentinel.fmtValue(mod, sema),
+ expected_sentinel.fmtValue(pt, sema),
+ actual_sentinel.fmtValue(pt, sema),
});
break :msg msg;
@@ -32501,7 +32816,7 @@ fn analyzeSlice(
assert(!block.is_comptime);
try sema.requireRuntimeBlock(block, src, runtime_src.?);
const ok = try block.addBinOp(.cmp_lte, start, end);
- if (!sema.mod.comp.formatted_panics) {
+ if (!pt.zcu.comp.formatted_panics) {
try sema.addSafetyCheck(block, src, ok, .start_index_greater_than_end);
} else {
try sema.safetyCheckFormatted(block, src, ok, "panicStartGreaterThanEnd", &.{ start, end });
@@ -32517,10 +32832,10 @@ fn analyzeSlice(
const new_allowzero = new_ptr_ty_info.flags.is_allowzero and sema.typeOf(ptr).ptrSize(mod) != .C;
if (opt_new_len_val) |new_len_val| {
- const new_len_int = try new_len_val.toUnsignedIntSema(mod);
+ const new_len_int = try new_len_val.toUnsignedIntSema(pt);
- const return_ty = try mod.ptrTypeSema(.{
- .child = (try mod.arrayType(.{
+ const return_ty = try pt.ptrTypeSema(.{
+ .child = (try pt.arrayType(.{
.len = new_len_int,
.sentinel = if (sentinel) |s| s.toIntern() else .none,
.child = elem_ty.toIntern(),
@@ -32546,7 +32861,7 @@ fn analyzeSlice(
bounds_check: {
const actual_len = if (array_ty.zigTypeTag(mod) == .Array)
- try mod.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(mod))
+ try pt.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(mod))
else if (slice_ty.isSlice(mod)) l: {
const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice);
break :l if (slice_ty.sentinel(mod) == null)
@@ -32570,18 +32885,18 @@ fn analyzeSlice(
};
if (!new_ptr_val.isUndef(mod)) {
- return Air.internedToRef((try mod.getCoerced(new_ptr_val, return_ty)).toIntern());
+ return Air.internedToRef((try pt.getCoerced(new_ptr_val, return_ty)).toIntern());
}
// Special case: @as([]i32, undefined)[x..x]
if (new_len_int == 0) {
- return mod.undefRef(return_ty);
+ return pt.undefRef(return_ty);
}
return sema.fail(block, src, "non-zero length slice of undefined pointer", .{});
}
- const return_ty = try mod.ptrTypeSema(.{
+ const return_ty = try pt.ptrTypeSema(.{
.child = elem_ty.toIntern(),
.sentinel = if (sentinel) |s| s.toIntern() else .none,
.flags = .{
@@ -32604,12 +32919,12 @@ fn analyzeSlice(
// requirement: end <= len
const opt_len_inst = if (array_ty.zigTypeTag(mod) == .Array)
- try mod.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(mod))
+ try pt.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(mod))
else if (slice_ty.isSlice(mod)) blk: {
if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| {
// we don't need to add one for sentinels because the
// underlying value data includes the sentinel
- break :blk try mod.intRef(Type.usize, try slice_val.sliceLen(mod));
+ break :blk try pt.intRef(Type.usize, try slice_val.sliceLen(pt));
}
const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice);
@@ -32657,7 +32972,8 @@ fn cmpNumeric(
lhs_src: LazySrcLoc,
rhs_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const lhs_ty = sema.typeOf(uncasted_lhs);
const rhs_ty = sema.typeOf(uncasted_rhs);
@@ -32696,12 +33012,12 @@ fn cmpNumeric(
}
if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
- return mod.undefRef(Type.bool);
+ return pt.undefRef(Type.bool);
}
if (lhs_val.isNan(mod) or rhs_val.isNan(mod)) {
return if (op == std.math.CompareOperator.neq) .bool_true else .bool_false;
}
- return if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, mod, .sema))
+ return if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, pt, .sema))
.bool_true
else
.bool_false;
@@ -32770,11 +33086,11 @@ fn cmpNumeric(
// a signed integer with mantissa bits + 1, and if there was any non-integral part of the float,
// add/subtract 1.
const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val|
- !(try lhs_val.compareAllWithZeroSema(.gte, mod))
+ !(try lhs_val.compareAllWithZeroSema(.gte, pt))
else
(lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt(mod));
const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val|
- !(try rhs_val.compareAllWithZeroSema(.gte, mod))
+ !(try rhs_val.compareAllWithZeroSema(.gte, pt))
else
(rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt(mod));
const dest_int_is_signed = lhs_is_signed or rhs_is_signed;
@@ -32784,7 +33100,7 @@ fn cmpNumeric(
var lhs_bits: usize = undefined;
if (try sema.resolveValueResolveLazy(lhs)) |lhs_val| {
if (lhs_val.isUndef(mod))
- return mod.undefRef(Type.bool);
+ return pt.undefRef(Type.bool);
if (lhs_val.isNan(mod)) switch (op) {
.neq => return .bool_true,
else => return .bool_false,
@@ -32796,7 +33112,7 @@ fn cmpNumeric(
.lt, .lte => return if (lhs_val.isNegativeInf(mod)) .bool_true else .bool_false,
};
if (!rhs_is_signed) {
- switch (lhs_val.orderAgainstZero(mod)) {
+ switch (lhs_val.orderAgainstZero(pt)) {
.gt => {},
.eq => switch (op) { // LHS = 0, RHS is unsigned
.lte => return .bool_true,
@@ -32818,7 +33134,7 @@ fn cmpNumeric(
}
}
- var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128, mod));
+ var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128, pt));
defer bigint.deinit();
if (lhs_val.floatHasFraction(mod)) {
if (lhs_is_signed) {
@@ -32829,7 +33145,7 @@ fn cmpNumeric(
}
lhs_bits = bigint.toConst().bitCountTwosComp();
} else {
- lhs_bits = lhs_val.intBitCountTwosComp(mod);
+ lhs_bits = lhs_val.intBitCountTwosComp(pt);
}
lhs_bits += @intFromBool(!lhs_is_signed and dest_int_is_signed);
} else if (lhs_is_float) {
@@ -32842,7 +33158,7 @@ fn cmpNumeric(
var rhs_bits: usize = undefined;
if (try sema.resolveValueResolveLazy(rhs)) |rhs_val| {
if (rhs_val.isUndef(mod))
- return mod.undefRef(Type.bool);
+ return pt.undefRef(Type.bool);
if (rhs_val.isNan(mod)) switch (op) {
.neq => return .bool_true,
else => return .bool_false,
@@ -32854,7 +33170,7 @@ fn cmpNumeric(
.lt, .lte => return if (rhs_val.isNegativeInf(mod)) .bool_false else .bool_true,
};
if (!lhs_is_signed) {
- switch (rhs_val.orderAgainstZero(mod)) {
+ switch (rhs_val.orderAgainstZero(pt)) {
.gt => {},
.eq => switch (op) { // RHS = 0, LHS is unsigned
.gte => return .bool_true,
@@ -32876,7 +33192,7 @@ fn cmpNumeric(
}
}
- var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128, mod));
+ var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128, pt));
defer bigint.deinit();
if (rhs_val.floatHasFraction(mod)) {
if (rhs_is_signed) {
@@ -32887,7 +33203,7 @@ fn cmpNumeric(
}
rhs_bits = bigint.toConst().bitCountTwosComp();
} else {
- rhs_bits = rhs_val.intBitCountTwosComp(mod);
+ rhs_bits = rhs_val.intBitCountTwosComp(pt);
}
rhs_bits += @intFromBool(!rhs_is_signed and dest_int_is_signed);
} else if (rhs_is_float) {
@@ -32901,7 +33217,7 @@ fn cmpNumeric(
const max_bits = @max(lhs_bits, rhs_bits);
const casted_bits = std.math.cast(u16, max_bits) orelse return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits});
const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned;
- break :blk try mod.intType(signedness, casted_bits);
+ break :blk try pt.intType(signedness, casted_bits);
};
const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src);
@@ -32920,9 +33236,10 @@ fn compareIntsOnlyPossibleResult(
op: std.math.CompareOperator,
rhs_ty: Type,
) Allocator.Error!?bool {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const rhs_info = rhs_ty.intInfo(mod);
- const vs_zero = lhs_val.orderAgainstZeroAdvanced(mod, .sema) catch unreachable;
+ const vs_zero = lhs_val.orderAgainstZeroAdvanced(pt, .sema) catch unreachable;
const is_zero = vs_zero == .eq;
const is_negative = vs_zero == .lt;
const is_positive = vs_zero == .gt;
@@ -32954,7 +33271,7 @@ fn compareIntsOnlyPossibleResult(
};
const sign_adj = @intFromBool(!is_negative and rhs_info.signedness == .signed);
- const req_bits = lhs_val.intBitCountTwosComp(mod) + sign_adj;
+ const req_bits = lhs_val.intBitCountTwosComp(pt) + sign_adj;
// No sized type can have more than 65535 bits.
// The RHS type operand is either a runtime value or sized (but undefined) constant.
@@ -32981,11 +33298,11 @@ fn compareIntsOnlyPossibleResult(
if (req_bits != rhs_info.bits) break :edge .{ false, false };
- const ty = try mod.intType(
+ const ty = try pt.intType(
if (is_negative) .signed else .unsigned,
@intCast(req_bits),
);
- const pop_count = lhs_val.popCount(ty, mod);
+ const pop_count = lhs_val.popCount(ty, pt);
if (is_negative) {
break :edge .{ pop_count == 1, false };
@@ -33015,7 +33332,8 @@ fn cmpVector(
lhs_src: LazySrcLoc,
rhs_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
assert(lhs_ty.zigTypeTag(mod) == .Vector);
@@ -33026,7 +33344,7 @@ fn cmpVector(
const casted_lhs = try sema.coerce(block, resolved_ty, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_ty, rhs, rhs_src);
- const result_ty = try mod.vectorType(.{
+ const result_ty = try pt.vectorType(.{
.len = lhs_ty.vectorLen(mod),
.child = .bool_type,
});
@@ -33035,7 +33353,7 @@ fn cmpVector(
if (try sema.resolveValue(casted_lhs)) |lhs_val| {
if (try sema.resolveValue(casted_rhs)) |rhs_val| {
if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
- return mod.undefRef(result_ty);
+ return pt.undefRef(result_ty);
}
const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_ty);
return Air.internedToRef(cmp_val.toIntern());
@@ -33059,7 +33377,7 @@ fn wrapOptional(
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
if (try sema.resolveValue(inst)) |val| {
- return Air.internedToRef((try sema.mod.intern(.{ .opt = .{
+ return Air.internedToRef((try sema.pt.intern(.{ .opt = .{
.ty = dest_ty.toIntern(),
.val = val.toIntern(),
} })));
@@ -33076,11 +33394,12 @@ fn wrapErrorUnionPayload(
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const dest_payload_ty = dest_ty.errorUnionPayload(mod);
const coerced = try sema.coerceExtra(block, dest_payload_ty, inst, inst_src, .{ .report_err = false });
if (try sema.resolveValue(coerced)) |val| {
- return Air.internedToRef((try mod.intern(.{ .error_union = .{
+ return Air.internedToRef((try pt.intern(.{ .error_union = .{
.ty = dest_ty.toIntern(),
.val = .{ .payload = val.toIntern() },
} })));
@@ -33096,7 +33415,8 @@ fn wrapErrorUnionSet(
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const inst_ty = sema.typeOf(inst);
const dest_err_set_ty = dest_ty.errorUnionSet(mod);
@@ -33140,7 +33460,7 @@ fn wrapErrorUnionSet(
else => unreachable,
},
}
- return Air.internedToRef((try mod.intern(.{ .error_union = .{
+ return Air.internedToRef((try pt.intern(.{ .error_union = .{
.ty = dest_ty.toIntern(),
.val = .{ .err_name = expected_name },
} })));
@@ -33158,14 +33478,15 @@ fn unionToTag(
un: Air.Inst.Ref,
un_src: LazySrcLoc,
) !Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if ((try sema.typeHasOnePossibleValue(enum_ty))) |opv| {
return Air.internedToRef(opv.toIntern());
}
if (try sema.resolveValue(un)) |un_val| {
const tag_val = un_val.unionTag(mod).?;
if (tag_val.isUndef(mod))
- return try mod.undefRef(enum_ty);
+ return try pt.undefRef(enum_ty);
return Air.internedToRef(tag_val.toIntern());
}
try sema.requireRuntimeBlock(block, un_src, null);
@@ -33399,7 +33720,7 @@ const PeerResolveResult = union(enum) {
instructions: []const Air.Inst.Ref,
candidate_srcs: PeerTypeCandidateSrc,
) !*Module.ErrorMsg {
- const mod = sema.mod;
+ const pt = sema.pt;
var opt_msg: ?*Module.ErrorMsg = null;
errdefer if (opt_msg) |msg| msg.destroy(sema.gpa);
@@ -33425,7 +33746,7 @@ const PeerResolveResult = union(enum) {
},
.field_error => |field_error| {
const fmt = "struct field '{}' has conflicting types";
- const args = .{field_error.field_name.fmt(&mod.intern_pool)};
+ const args = .{field_error.field_name.fmt(&pt.zcu.intern_pool)};
if (opt_msg) |msg| {
try sema.errNote(src, msg, fmt, args);
} else {
@@ -33457,8 +33778,8 @@ const PeerResolveResult = union(enum) {
const fmt = "incompatible types: '{}' and '{}'";
const args = .{
- conflict_tys[0].fmt(mod),
- conflict_tys[1].fmt(mod),
+ conflict_tys[0].fmt(pt),
+ conflict_tys[1].fmt(pt),
};
const msg = if (opt_msg) |msg| msg: {
try sema.errNote(src, msg, fmt, args);
@@ -33469,8 +33790,8 @@ const PeerResolveResult = union(enum) {
break :msg msg;
};
- if (conflict_srcs[0]) |src_loc| try sema.errNote(src_loc, msg, "type '{}' here", .{conflict_tys[0].fmt(mod)});
- if (conflict_srcs[1]) |src_loc| try sema.errNote(src_loc, msg, "type '{}' here", .{conflict_tys[1].fmt(mod)});
+ if (conflict_srcs[0]) |src_loc| try sema.errNote(src_loc, msg, "type '{}' here", .{conflict_tys[0].fmt(pt)});
+ if (conflict_srcs[1]) |src_loc| try sema.errNote(src_loc, msg, "type '{}' here", .{conflict_tys[1].fmt(pt)});
// No child error
break;
@@ -33517,7 +33838,8 @@ fn resolvePeerTypesInner(
peer_tys: []?Type,
peer_vals: []?Value,
) !PeerResolveResult {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
var strat_reason: usize = 0;
@@ -33581,7 +33903,7 @@ fn resolvePeerTypesInner(
.payload => |payload_ip| val_ptr.* = Value.fromInterned(payload_ip),
.err_name => val_ptr.* = null,
},
- .undef => val_ptr.* = Value.fromInterned((try sema.mod.intern(.{ .undef = ty_ptr.*.?.toIntern() }))),
+ .undef => val_ptr.* = Value.fromInterned(try pt.intern(.{ .undef = ty_ptr.*.?.toIntern() })),
else => unreachable,
};
break :blk set_ty;
@@ -33604,7 +33926,7 @@ fn resolvePeerTypesInner(
.success => |ty| ty,
else => |result| return result,
};
- return .{ .success = try mod.errorUnionType(final_set.?, final_payload) };
+ return .{ .success = try pt.errorUnionType(final_set.?, final_payload) };
},
.nullable => {
@@ -33642,7 +33964,7 @@ fn resolvePeerTypesInner(
.success => |ty| ty,
else => |result| return result,
};
- return .{ .success = try mod.optionalType(child_ty.toIntern()) };
+ return .{ .success = try pt.optionalType(child_ty.toIntern()) };
},
.array => {
@@ -33730,7 +34052,7 @@ fn resolvePeerTypesInner(
// There should always be at least one array or vector peer
assert(opt_first_arr_idx != null);
- return .{ .success = try mod.arrayType(.{
+ return .{ .success = try pt.arrayType(.{
.len = len,
.child = elem_ty.toIntern(),
.sentinel = if (sentinel) |sent_val| sent_val.toIntern() else .none,
@@ -33792,7 +34114,7 @@ fn resolvePeerTypesInner(
else => |result| return result,
};
- return .{ .success = try mod.vectorType(.{
+ return .{ .success = try pt.vectorType(.{
.len = @intCast(len.?),
.child = child_ty.toIntern(),
}) };
@@ -33844,8 +34166,8 @@ fn resolvePeerTypesInner(
}).toIntern();
if (ptr_info.sentinel != .none and peer_info.sentinel != .none) {
- const peer_sent = try ip.getCoerced(sema.gpa, ptr_info.sentinel, ptr_info.child);
- const ptr_sent = try ip.getCoerced(sema.gpa, peer_info.sentinel, ptr_info.child);
+ const peer_sent = try ip.getCoerced(sema.gpa, pt.tid, ptr_info.sentinel, ptr_info.child);
+ const ptr_sent = try ip.getCoerced(sema.gpa, pt.tid, peer_info.sentinel, ptr_info.child);
if (ptr_sent == peer_sent) {
ptr_info.sentinel = ptr_sent;
} else {
@@ -33860,12 +34182,12 @@ fn resolvePeerTypesInner(
if (ptr_info.flags.alignment != .none)
ptr_info.flags.alignment
else
- Type.fromInterned(ptr_info.child).abiAlignment(mod),
+ Type.fromInterned(ptr_info.child).abiAlignment(pt),
if (peer_info.flags.alignment != .none)
peer_info.flags.alignment
else
- Type.fromInterned(peer_info.child).abiAlignment(mod),
+ Type.fromInterned(peer_info.child).abiAlignment(pt),
);
if (ptr_info.flags.address_space != peer_info.flags.address_space) {
return .{ .conflict = .{
@@ -33888,7 +34210,7 @@ fn resolvePeerTypesInner(
opt_ptr_info = ptr_info;
}
- return .{ .success = try mod.ptrTypeSema(opt_ptr_info.?) };
+ return .{ .success = try pt.ptrTypeSema(opt_ptr_info.?) };
},
.ptr => {
@@ -34004,7 +34326,7 @@ fn resolvePeerTypesInner(
if (try sema.resolvePairInMemoryCoercible(block, src, cur_arr.elem_ty, peer_arr.elem_ty)) |elem_ty| {
// *[n:x]T + *[n:y]T = *[n]T
if (cur_arr.len == peer_arr.len) {
- ptr_info.child = (try mod.arrayType(.{
+ ptr_info.child = (try pt.arrayType(.{
.len = cur_arr.len,
.child = elem_ty.toIntern(),
})).toIntern();
@@ -34148,12 +34470,12 @@ fn resolvePeerTypesInner(
no_sentinel: {
if (peer_sentinel == .none) break :no_sentinel;
if (cur_sentinel == .none) break :no_sentinel;
- const peer_sent_coerced = try ip.getCoerced(sema.gpa, peer_sentinel, sentinel_ty);
- const cur_sent_coerced = try ip.getCoerced(sema.gpa, cur_sentinel, sentinel_ty);
+ const peer_sent_coerced = try ip.getCoerced(sema.gpa, pt.tid, peer_sentinel, sentinel_ty);
+ const cur_sent_coerced = try ip.getCoerced(sema.gpa, pt.tid, cur_sentinel, sentinel_ty);
if (peer_sent_coerced != cur_sent_coerced) break :no_sentinel;
// Sentinels match
if (ptr_info.flags.size == .One) switch (ip.indexToKey(ptr_info.child)) {
- .array_type => |array_type| ptr_info.child = (try mod.arrayType(.{
+ .array_type => |array_type| ptr_info.child = (try pt.arrayType(.{
.len = array_type.len,
.child = array_type.child,
.sentinel = cur_sent_coerced,
@@ -34167,7 +34489,7 @@ fn resolvePeerTypesInner(
// Clear existing sentinel
ptr_info.sentinel = .none;
switch (ip.indexToKey(ptr_info.child)) {
- .array_type => |array_type| ptr_info.child = (try mod.arrayType(.{
+ .array_type => |array_type| ptr_info.child = (try pt.arrayType(.{
.len = array_type.len,
.child = array_type.child,
.sentinel = .none,
@@ -34198,7 +34520,7 @@ fn resolvePeerTypesInner(
},
}
- return .{ .success = try mod.ptrTypeSema(opt_ptr_info.?) };
+ return .{ .success = try pt.ptrTypeSema(opt_ptr_info.?) };
},
.func => {
@@ -34517,7 +34839,7 @@ fn resolvePeerTypesInner(
continue;
};
peer_field_ty.* = ty.structFieldType(field_index, mod);
- peer_field_val.* = if (opt_val) |val| try val.fieldValue(mod, field_index) else null;
+ peer_field_val.* = if (opt_val) |val| try val.fieldValue(pt, field_index) else null;
}
// Resolve field type recursively
@@ -34555,9 +34877,9 @@ fn resolvePeerTypesInner(
var comptime_val: ?Value = null;
for (peer_tys) |opt_ty| {
const struct_ty = opt_ty orelse continue;
- try struct_ty.resolveStructFieldInits(mod);
+ try struct_ty.resolveStructFieldInits(pt);
- const uncoerced_field_val = try struct_ty.structFieldValueComptime(mod, field_index) orelse {
+ const uncoerced_field_val = try struct_ty.structFieldValueComptime(pt, field_index) orelse {
comptime_val = null;
break;
};
@@ -34584,7 +34906,7 @@ fn resolvePeerTypesInner(
field_val.* = if (comptime_val) |v| v.toIntern() else .none;
}
- const final_ty = try ip.getAnonStructType(mod.gpa, .{
+ const final_ty = try ip.getAnonStructType(mod.gpa, pt.tid, .{
.types = field_types,
.names = if (is_tuple) &.{} else field_names,
.values = field_vals,
@@ -34628,13 +34950,15 @@ fn maybeMergeErrorSets(sema: *Sema, block: *Block, src: LazySrcLoc, e0: Type, e1
}
fn resolvePairInMemoryCoercible(sema: *Sema, block: *Block, src: LazySrcLoc, ty_a: Type, ty_b: Type) !?Type {
+ const target = sema.pt.zcu.getTarget();
+
// ty_b -> ty_a
- if (.ok == try sema.coerceInMemoryAllowed(block, ty_a, ty_b, true, sema.mod.getTarget(), src, src)) {
+ if (.ok == try sema.coerceInMemoryAllowed(block, ty_a, ty_b, true, target, src, src)) {
return ty_a;
}
// ty_a -> ty_b
- if (.ok == try sema.coerceInMemoryAllowed(block, ty_b, ty_a, true, sema.mod.getTarget(), src, src)) {
+ if (.ok == try sema.coerceInMemoryAllowed(block, ty_b, ty_a, true, target, src, src)) {
return ty_b;
}
@@ -34647,7 +34971,8 @@ const ArrayLike = struct {
elem_ty: Type,
};
fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
return switch (ty.zigTypeTag(mod)) {
.Array => .{
.len = ty.arrayLen(mod),
@@ -34676,7 +35001,8 @@ fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike {
}
pub fn resolveIes(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
if (sema.fn_ret_ty_ies) |ies| {
@@ -34687,26 +35013,27 @@ pub fn resolveIes(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError!void
}
pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const fn_ty_info = mod.typeToFunc(fn_ty).?;
- try Type.fromInterned(fn_ty_info.return_type).resolveFully(mod);
+ try Type.fromInterned(fn_ty_info.return_type).resolveFully(pt);
if (mod.comp.config.any_error_tracing and
Type.fromInterned(fn_ty_info.return_type).isError(mod))
{
// Ensure the type exists so that backends can assume that.
- _ = try mod.getBuiltinType("StackTrace");
+ _ = try pt.getBuiltinType("StackTrace");
}
for (0..fn_ty_info.param_types.len) |i| {
- try Type.fromInterned(fn_ty_info.param_types.get(ip)[i]).resolveFully(mod);
+ try Type.fromInterned(fn_ty_info.param_types.get(ip)[i]).resolveFully(pt);
}
}
fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value {
- return val.resolveLazy(sema.arena, sema.mod);
+ return val.resolveLazy(sema.arena, sema.pt);
}
/// Resolve a struct's alignment only without triggering resolution of its layout.
@@ -34716,7 +35043,8 @@ pub fn resolveStructAlignment(
ty: InternPool.Index,
struct_type: InternPool.LoadedStructType,
) SemaError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const target = mod.getTarget();
@@ -34754,7 +35082,7 @@ pub fn resolveStructAlignment(
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty))
continue;
- const field_align = try mod.structFieldAlignmentAdvanced(
+ const field_align = try pt.structFieldAlignmentAdvanced(
struct_type.fieldAlign(ip, i),
field_ty,
struct_type.layout,
@@ -34767,7 +35095,8 @@ pub fn resolveStructAlignment(
}
pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const struct_type = zcu.typeToStruct(ty) orelse return;
@@ -34776,10 +35105,10 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
if (struct_type.haveLayout(ip))
return;
- try ty.resolveFields(zcu);
+ try ty.resolveFields(pt);
if (struct_type.layout == .@"packed") {
- semaBackingIntType(zcu, struct_type) catch |err| switch (err) {
+ semaBackingIntType(pt, struct_type) catch |err| switch (err) {
error.OutOfMemory, error.AnalysisFail => |e| return e,
error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable,
};
@@ -34790,7 +35119,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
const msg = try sema.errMsg(
ty.srcLoc(zcu),
"struct '{}' depends on itself",
- .{ty.fmt(zcu)},
+ .{ty.fmt(pt)},
);
return sema.failWithOwnedErrorMsg(null, msg);
}
@@ -34818,7 +35147,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
},
else => return err,
};
- field_align.* = try zcu.structFieldAlignmentAdvanced(
+ field_align.* = try pt.structFieldAlignmentAdvanced(
struct_type.fieldAlign(ip, i),
field_ty,
struct_type.layout,
@@ -34911,7 +35240,8 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
_ = try sema.typeRequiresComptime(ty);
}
-fn semaBackingIntType(zcu: *Zcu, struct_type: InternPool.LoadedStructType) CompileError!void {
+fn semaBackingIntType(pt: Zcu.PerThread, struct_type: InternPool.LoadedStructType) CompileError!void {
+ const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
@@ -34927,7 +35257,7 @@ fn semaBackingIntType(zcu: *Zcu, struct_type: InternPool.LoadedStructType) Compi
defer comptime_err_ret_trace.deinit();
var sema: Sema = .{
- .mod = zcu,
+ .pt = pt,
.gpa = gpa,
.arena = analysis_arena.allocator(),
.code = zir,
@@ -34958,7 +35288,7 @@ fn semaBackingIntType(zcu: *Zcu, struct_type: InternPool.LoadedStructType) Compi
var accumulator: u64 = 0;
for (0..struct_type.field_types.len) |i| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
- accumulator += try field_ty.bitSizeAdvanced(zcu, .sema);
+ accumulator += try field_ty.bitSizeAdvanced(pt, .sema);
}
break :blk accumulator;
};
@@ -35004,7 +35334,7 @@ fn semaBackingIntType(zcu: *Zcu, struct_type: InternPool.LoadedStructType) Compi
if (fields_bit_sum > std.math.maxInt(u16)) {
return sema.fail(&block, block.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum});
}
- const backing_int_ty = try zcu.intType(.unsigned, @intCast(fields_bit_sum));
+ const backing_int_ty = try pt.intType(.unsigned, @intCast(fields_bit_sum));
struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
}
@@ -35012,26 +35342,27 @@ fn semaBackingIntType(zcu: *Zcu, struct_type: InternPool.LoadedStructType) Compi
}
fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_ty: Type, fields_bit_sum: u64) CompileError!void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (!backing_int_ty.isInt(mod)) {
- return sema.fail(block, src, "expected backing integer type, found '{}'", .{backing_int_ty.fmt(sema.mod)});
+ return sema.fail(block, src, "expected backing integer type, found '{}'", .{backing_int_ty.fmt(pt)});
}
- if (backing_int_ty.bitSize(mod) != fields_bit_sum) {
+ if (backing_int_ty.bitSize(pt) != fields_bit_sum) {
return sema.fail(
block,
src,
"backing integer type '{}' has bit size {} but the struct fields have a total bit size of {}",
- .{ backing_int_ty.fmt(sema.mod), backing_int_ty.bitSize(mod), fields_bit_sum },
+ .{ backing_int_ty.fmt(pt), backing_int_ty.bitSize(pt), fields_bit_sum },
);
}
}
fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
- const mod = sema.mod;
- if (!ty.isIndexable(mod)) {
+ const pt = sema.pt;
+ if (!ty.isIndexable(pt.zcu)) {
const msg = msg: {
- const msg = try sema.errMsg(src, "type '{}' does not support indexing", .{ty.fmt(sema.mod)});
+ const msg = try sema.errMsg(src, "type '{}' does not support indexing", .{ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.errNote(src, msg, "operand must be an array, slice, tuple, or vector", .{});
break :msg msg;
@@ -35041,7 +35372,8 @@ fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
}
fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) == .Pointer) {
switch (ty.ptrSize(mod)) {
.Slice, .Many, .C => return,
@@ -35054,7 +35386,7 @@ fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void
}
}
const msg = msg: {
- const msg = try sema.errMsg(src, "type '{}' is not an indexable pointer", .{ty.fmt(sema.mod)});
+ const msg = try sema.errMsg(src, "type '{}' is not an indexable pointer", .{ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.errNote(src, msg, "operand must be a slice, a many pointer or a pointer to an array", .{});
break :msg msg;
@@ -35069,9 +35401,9 @@ pub fn resolveUnionAlignment(
ty: Type,
union_type: InternPool.LoadedUnionType,
) SemaError!void {
- const mod = sema.mod;
- const ip = &mod.intern_pool;
- const target = mod.getTarget();
+ const zcu = sema.pt.zcu;
+ const ip = &zcu.intern_pool;
+ const target = zcu.getTarget();
assert(sema.ownerUnit().unwrap().decl == union_type.decl);
@@ -35108,8 +35440,8 @@ pub fn resolveUnionAlignment(
/// This logic must be kept in sync with `Module.getUnionLayout`.
pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
- const zcu = sema.mod;
- const ip = &zcu.intern_pool;
+ const pt = sema.pt;
+ const ip = &pt.zcu.intern_pool;
try sema.resolveTypeFieldsUnion(ty, ip.loadUnionType(ty.ip_index));
@@ -35122,9 +35454,9 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
.none, .have_field_types => {},
.field_types_wip, .layout_wip => {
const msg = try sema.errMsg(
- ty.srcLoc(zcu),
+ ty.srcLoc(pt.zcu),
"union '{}' depends on itself",
- .{ty.fmt(zcu)},
+ .{ty.fmt(pt)},
);
return sema.failWithOwnedErrorMsg(null, msg);
},
@@ -35143,7 +35475,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
for (0..union_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
- if (try sema.typeRequiresComptime(field_ty) or field_ty.zigTypeTag(zcu) == .NoReturn) continue; // TODO: should this affect alignment?
+ if (try sema.typeRequiresComptime(field_ty) or field_ty.zigTypeTag(pt.zcu) == .NoReturn) continue; // TODO: should this affect alignment?
max_size = @max(max_size, sema.typeAbiSize(field_ty) catch |err| switch (err) {
error.AnalysisFail => {
@@ -35185,7 +35517,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
} else {
// {Payload, Tag}
size += max_size;
- size = switch (zcu.getTarget().ofmt) {
+ size = switch (pt.zcu.getTarget().ofmt) {
.c => max_align,
else => tag_align,
}.forward(size);
@@ -35205,7 +35537,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
if (union_type.flagsPtr(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
const msg = try sema.errMsg(
- ty.srcLoc(zcu),
+ ty.srcLoc(pt.zcu),
"union layout depends on it having runtime bits",
.{},
);
@@ -35213,10 +35545,10 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
}
if (union_type.flagsPtr(ip).assumed_pointer_aligned and
- alignment.compareStrict(.neq, Alignment.fromByteUnits(@divExact(zcu.getTarget().ptrBitWidth(), 8))))
+ alignment.compareStrict(.neq, Alignment.fromByteUnits(@divExact(pt.zcu.getTarget().ptrBitWidth(), 8))))
{
const msg = try sema.errMsg(
- ty.srcLoc(zcu),
+ ty.srcLoc(pt.zcu),
"union layout depends on being pointer aligned",
.{},
);
@@ -35229,7 +35561,8 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
pub fn resolveStructFully(sema: *Sema, ty: Type) SemaError!void {
try sema.resolveStructLayout(ty);
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const struct_type = mod.typeToStruct(ty).?;
@@ -35244,14 +35577,15 @@ pub fn resolveStructFully(sema: *Sema, ty: Type) SemaError!void {
for (0..struct_type.field_types.len) |i| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
- try field_ty.resolveFully(mod);
+ try field_ty.resolveFully(pt);
}
}
pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void {
try sema.resolveUnionLayout(ty);
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const union_obj = mod.typeToUnion(ty).?;
@@ -35272,7 +35606,7 @@ pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void {
union_obj.flagsPtr(ip).status = .fully_resolved_wip;
for (0..union_obj.field_types.len) |field_index| {
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
- try field_ty.resolveFully(mod);
+ try field_ty.resolveFully(pt);
}
union_obj.flagsPtr(ip).status = .fully_resolved;
}
@@ -35286,7 +35620,8 @@ pub fn resolveTypeFieldsStruct(
ty: InternPool.Index,
struct_type: InternPool.LoadedStructType,
) SemaError!void {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
// If there is no owner decl it means the struct has no fields.
const owner_decl = struct_type.decl.unwrap() orelse return;
@@ -35310,13 +35645,13 @@ pub fn resolveTypeFieldsStruct(
const msg = try sema.errMsg(
Type.fromInterned(ty).srcLoc(zcu),
"struct '{}' depends on itself",
- .{Type.fromInterned(ty).fmt(zcu)},
+ .{Type.fromInterned(ty).fmt(pt)},
);
return sema.failWithOwnedErrorMsg(null, msg);
}
defer struct_type.clearTypesWip(ip);
- semaStructFields(zcu, sema.arena, struct_type) catch |err| switch (err) {
+ semaStructFields(pt, sema.arena, struct_type) catch |err| switch (err) {
error.AnalysisFail => {
if (zcu.declPtr(owner_decl).analysis == .complete) {
zcu.declPtr(owner_decl).analysis = .dependency_failure;
@@ -35329,7 +35664,8 @@ pub fn resolveTypeFieldsStruct(
}
pub fn resolveStructFieldInits(sema: *Sema, ty: Type) SemaError!void {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const struct_type = zcu.typeToStruct(ty) orelse return;
const owner_decl = struct_type.decl.unwrap() orelse return;
@@ -35345,13 +35681,13 @@ pub fn resolveStructFieldInits(sema: *Sema, ty: Type) SemaError!void {
const msg = try sema.errMsg(
ty.srcLoc(zcu),
"struct '{}' depends on itself",
- .{ty.fmt(zcu)},
+ .{ty.fmt(pt)},
);
return sema.failWithOwnedErrorMsg(null, msg);
}
defer struct_type.clearInitsWip(ip);
- semaStructFieldInits(zcu, sema.arena, struct_type) catch |err| switch (err) {
+ semaStructFieldInits(pt, sema.arena, struct_type) catch |err| switch (err) {
error.AnalysisFail => {
if (zcu.declPtr(owner_decl).analysis == .complete) {
zcu.declPtr(owner_decl).analysis = .dependency_failure;
@@ -35365,7 +35701,8 @@ pub fn resolveStructFieldInits(sema: *Sema, ty: Type) SemaError!void {
}
pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.LoadedUnionType) SemaError!void {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const owner_decl = zcu.declPtr(union_type.decl);
@@ -35387,7 +35724,7 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Load
const msg = try sema.errMsg(
ty.srcLoc(zcu),
"union '{}' depends on itself",
- .{ty.fmt(zcu)},
+ .{ty.fmt(pt)},
);
return sema.failWithOwnedErrorMsg(null, msg);
},
@@ -35401,7 +35738,7 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Load
union_type.flagsPtr(ip).status = .field_types_wip;
errdefer union_type.flagsPtr(ip).status = .none;
- semaUnionFields(zcu, sema.arena, union_type) catch |err| switch (err) {
+ semaUnionFields(pt, sema.arena, union_type) catch |err| switch (err) {
error.AnalysisFail => {
if (owner_decl.analysis == .complete) {
owner_decl.analysis = .dependency_failure;
@@ -35422,7 +35759,8 @@ fn resolveInferredErrorSet(
src: LazySrcLoc,
ies_index: InternPool.Index,
) CompileError!InternPool.Index {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const func_index = ip.iesFuncIndex(ies_index);
const func = mod.funcInfo(func_index);
@@ -35482,8 +35820,8 @@ pub fn resolveInferredErrorSetPtr(
src: LazySrcLoc,
ies: *InferredErrorSet,
) CompileError!void {
- const mod = sema.mod;
- const ip = &mod.intern_pool;
+ const pt = sema.pt;
+ const ip = &pt.zcu.intern_pool;
if (ies.resolved != .none) return;
@@ -35505,7 +35843,7 @@ pub fn resolveInferredErrorSetPtr(
}
}
- const resolved_error_set_ty = try mod.errorSetFromUnsortedNames(ies.errors.keys());
+ const resolved_error_set_ty = try pt.errorSetFromUnsortedNames(ies.errors.keys());
ies.resolved = resolved_error_set_ty.toIntern();
}
@@ -35515,12 +35853,13 @@ fn resolveAdHocInferredErrorSet(
src: LazySrcLoc,
value: InternPool.Index,
) CompileError!InternPool.Index {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
const new_ty = try resolveAdHocInferredErrorSetTy(sema, block, src, ip.typeOf(value));
if (new_ty == .none) return value;
- return ip.getCoerced(gpa, value, new_ty);
+ return ip.getCoerced(gpa, pt.tid, value, new_ty);
}
fn resolveAdHocInferredErrorSetTy(
@@ -35530,8 +35869,8 @@ fn resolveAdHocInferredErrorSetTy(
ty: InternPool.Index,
) CompileError!InternPool.Index {
const ies = sema.fn_ret_ty_ies orelse return .none;
- const mod = sema.mod;
- const gpa = sema.gpa;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const error_union_info = switch (ip.indexToKey(ty)) {
.error_union_type => |x| x,
@@ -35541,7 +35880,7 @@ fn resolveAdHocInferredErrorSetTy(
return .none;
try sema.resolveInferredErrorSetPtr(block, src, ies);
- const new_ty = try ip.get(gpa, .{ .error_union_type = .{
+ const new_ty = try pt.intern(.{ .error_union_type = .{
.error_set_type = ies.resolved,
.payload_type = error_union_info.payload_type,
} });
@@ -35554,7 +35893,8 @@ fn resolveInferredErrorSetTy(
src: LazySrcLoc,
ty: InternPool.Index,
) CompileError!InternPool.Index {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
if (ty == .anyerror_type) return ty;
switch (ip.indexToKey(ty)) {
@@ -35614,10 +35954,11 @@ fn structZirInfo(zir: Zir, zir_index: Zir.Inst.Index) struct {
}
fn semaStructFields(
- zcu: *Zcu,
+ pt: Zcu.PerThread,
arena: Allocator,
struct_type: InternPool.LoadedStructType,
) CompileError!void {
+ const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const decl_index = struct_type.decl.unwrap() orelse return;
@@ -35630,7 +35971,7 @@ fn semaStructFields(
if (fields_len == 0) switch (struct_type.layout) {
.@"packed" => {
- try semaBackingIntType(zcu, struct_type);
+ try semaBackingIntType(pt, struct_type);
return;
},
.auto, .@"extern" => {
@@ -35644,7 +35985,7 @@ fn semaStructFields(
defer comptime_err_ret_trace.deinit();
var sema: Sema = .{
- .mod = zcu,
+ .pt = pt,
.gpa = gpa,
.arena = arena,
.code = zir,
@@ -35789,7 +36130,7 @@ fn semaStructFields(
switch (struct_type.layout) {
.@"extern" => if (!try sema.validateExternType(field_ty, .struct_field)) {
const msg = msg: {
- const msg = try sema.errMsg(ty_src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(zcu)});
+ const msg = try sema.errMsg(ty_src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsNotExtern(msg, ty_src, field_ty, .struct_field);
@@ -35801,7 +36142,7 @@ fn semaStructFields(
},
.@"packed" => if (!try sema.validatePackedType(field_ty)) {
const msg = msg: {
- const msg = try sema.errMsg(ty_src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(zcu)});
+ const msg = try sema.errMsg(ty_src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsNotPacked(msg, ty_src, field_ty);
@@ -35837,10 +36178,11 @@ fn semaStructFields(
// This logic must be kept in sync with `semaStructFields`
fn semaStructFieldInits(
- zcu: *Zcu,
+ pt: Zcu.PerThread,
arena: Allocator,
struct_type: InternPool.LoadedStructType,
) CompileError!void {
+ const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
@@ -35857,7 +36199,7 @@ fn semaStructFieldInits(
defer comptime_err_ret_trace.deinit();
var sema: Sema = .{
- .mod = zcu,
+ .pt = pt,
.gpa = gpa,
.arena = arena,
.code = zir,
@@ -35977,10 +36319,11 @@ fn semaStructFieldInits(
try sema.flushExports();
}
-fn semaUnionFields(zcu: *Zcu, arena: Allocator, union_type: InternPool.LoadedUnionType) CompileError!void {
+fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.LoadedUnionType) CompileError!void {
const tracy = trace(@src());
defer tracy.end();
+ const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const decl_index = union_type.decl;
@@ -36034,7 +36377,7 @@ fn semaUnionFields(zcu: *Zcu, arena: Allocator, union_type: InternPool.LoadedUni
defer comptime_err_ret_trace.deinit();
var sema: Sema = .{
- .mod = zcu,
+ .pt = pt,
.gpa = gpa,
.arena = arena,
.code = zir,
@@ -36081,17 +36424,17 @@ fn semaUnionFields(zcu: *Zcu, arena: Allocator, union_type: InternPool.LoadedUni
// The provided type is an integer type and we must construct the enum tag type here.
int_tag_ty = provided_ty;
if (int_tag_ty.zigTypeTag(zcu) != .Int and int_tag_ty.zigTypeTag(zcu) != .ComptimeInt) {
- return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(zcu)});
+ return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(pt)});
}
if (fields_len > 0) {
- const field_count_val = try zcu.intValue(Type.comptime_int, fields_len - 1);
+ const field_count_val = try pt.intValue(Type.comptime_int, fields_len - 1);
if (!(try sema.intFitsInType(field_count_val, int_tag_ty, null))) {
const msg = msg: {
const msg = try sema.errMsg(tag_ty_src, "specified integer tag type cannot represent every field", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(tag_ty_src, msg, "type '{}' cannot fit values in range 0...{d}", .{
- int_tag_ty.fmt(zcu),
+ int_tag_ty.fmt(pt),
fields_len - 1,
});
break :msg msg;
@@ -36106,7 +36449,7 @@ fn semaUnionFields(zcu: *Zcu, arena: Allocator, union_type: InternPool.LoadedUni
union_type.tagTypePtr(ip).* = provided_ty.toIntern();
const enum_type = switch (ip.indexToKey(provided_ty.toIntern())) {
.enum_type => ip.loadEnumType(provided_ty.toIntern()),
- else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{provided_ty.fmt(zcu)}),
+ else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{provided_ty.fmt(pt)}),
};
// The fields of the union must match the enum exactly.
// A flag per field is used to check for missing and extraneous fields.
@@ -36202,7 +36545,7 @@ fn semaUnionFields(zcu: *Zcu, arena: Allocator, union_type: InternPool.LoadedUni
const val = if (last_tag_val) |val|
try sema.intAdd(val, Value.one_comptime_int, int_tag_ty, undefined)
else
- try zcu.intValue(int_tag_ty, 0);
+ try pt.intValue(int_tag_ty, 0);
last_tag_val = val;
break :blk val;
@@ -36214,7 +36557,7 @@ fn semaUnionFields(zcu: *Zcu, arena: Allocator, union_type: InternPool.LoadedUni
.offset = .{ .container_field_value = @intCast(gop.index) },
};
const msg = msg: {
- const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{enum_tag_val.fmtValue(zcu, &sema)});
+ const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{enum_tag_val.fmtValue(pt, &sema)});
errdefer msg.destroy(gpa);
try sema.errNote(other_value_src, msg, "other occurrence here", .{});
break :msg msg;
@@ -36244,7 +36587,7 @@ fn semaUnionFields(zcu: *Zcu, arena: Allocator, union_type: InternPool.LoadedUni
const tag_info = ip.loadEnumType(union_type.tagTypePtr(ip).*);
const enum_index = tag_info.nameIndex(ip, field_name) orelse {
return sema.fail(&block_scope, name_src, "no field named '{}' in enum '{}'", .{
- field_name.fmt(ip), Type.fromInterned(union_type.tagTypePtr(ip).*).fmt(zcu),
+ field_name.fmt(ip), Type.fromInterned(union_type.tagTypePtr(ip).*).fmt(pt),
});
};
@@ -36286,7 +36629,7 @@ fn semaUnionFields(zcu: *Zcu, arena: Allocator, union_type: InternPool.LoadedUni
!try sema.validateExternType(field_ty, .union_field))
{
const msg = msg: {
- const msg = try sema.errMsg(type_src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(zcu)});
+ const msg = try sema.errMsg(type_src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsNotExtern(msg, type_src, field_ty, .union_field);
@@ -36297,7 +36640,7 @@ fn semaUnionFields(zcu: *Zcu, arena: Allocator, union_type: InternPool.LoadedUni
return sema.failWithOwnedErrorMsg(&block_scope, msg);
} else if (layout == .@"packed" and !try sema.validatePackedType(field_ty)) {
const msg = msg: {
- const msg = try sema.errMsg(type_src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(zcu)});
+ const msg = try sema.errMsg(type_src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsNotPacked(msg, type_src, field_ty);
@@ -36366,7 +36709,8 @@ fn generateUnionTagTypeNumbered(
enum_field_vals: []const InternPool.Index,
union_owner_decl: *Module.Decl,
) !InternPool.Index {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
@@ -36390,11 +36734,11 @@ fn generateUnionTagTypeNumbered(
new_decl.owns_tv = true;
new_decl.name_fully_qualified = true;
- const enum_ty = try ip.getGeneratedTagEnumType(gpa, .{
+ const enum_ty = try ip.getGeneratedTagEnumType(gpa, pt.tid, .{
.decl = new_decl_index,
.owner_union_ty = union_owner_decl.val.toIntern(),
.tag_ty = if (enum_field_vals.len == 0)
- (try mod.intType(.unsigned, 0)).toIntern()
+ (try pt.intType(.unsigned, 0)).toIntern()
else
ip.typeOf(enum_field_vals[0]),
.names = enum_field_names,
@@ -36404,7 +36748,7 @@ fn generateUnionTagTypeNumbered(
new_decl.val = Value.fromInterned(enum_ty);
- try mod.finalizeAnonDecl(new_decl_index);
+ try pt.finalizeAnonDecl(new_decl_index);
return enum_ty;
}
@@ -36414,7 +36758,8 @@ fn generateUnionTagTypeSimple(
enum_field_names: []const InternPool.NullTerminatedString,
union_owner_decl: *Module.Decl,
) !InternPool.Index {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const gpa = sema.gpa;
@@ -36438,13 +36783,13 @@ fn generateUnionTagTypeSimple(
};
errdefer mod.abortAnonDecl(new_decl_index);
- const enum_ty = try ip.getGeneratedTagEnumType(gpa, .{
+ const enum_ty = try ip.getGeneratedTagEnumType(gpa, pt.tid, .{
.decl = new_decl_index,
.owner_union_ty = union_owner_decl.val.toIntern(),
.tag_ty = if (enum_field_names.len == 0)
- (try mod.intType(.unsigned, 0)).toIntern()
+ (try pt.intType(.unsigned, 0)).toIntern()
else
- (try mod.smallestUnsignedInt(enum_field_names.len - 1)).toIntern(),
+ (try pt.smallestUnsignedInt(enum_field_names.len - 1)).toIntern(),
.names = enum_field_names,
.values = &.{},
.tag_mode = .auto,
@@ -36454,7 +36799,7 @@ fn generateUnionTagTypeSimple(
new_decl.owns_tv = true;
new_decl.val = Value.fromInterned(enum_ty);
- try mod.finalizeAnonDecl(new_decl_index);
+ try pt.finalizeAnonDecl(new_decl_index);
return enum_ty;
}
@@ -36464,12 +36809,13 @@ fn generateUnionTagTypeSimple(
/// that the types are already resolved.
/// TODO assert the return value matches `ty.onePossibleValue`
pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
return switch (ty.toIntern()) {
.u0_type,
.i0_type,
- => try zcu.intValue(ty, 0),
+ => try pt.intValue(ty, 0),
.u1_type,
.u8_type,
.i8_type,
@@ -36532,7 +36878,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.anyframe_type => unreachable,
.null_type => Value.null,
.undefined_type => Value.undef,
- .optional_noreturn_type => try zcu.nullValue(ty),
+ .optional_noreturn_type => try pt.nullValue(ty),
.generic_poison_type => error.GenericPoison,
.empty_struct_type => Value.empty_struct,
// values, not types
@@ -36646,16 +36992,16 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
=> switch (ip.indexToKey(ty.toIntern())) {
inline .array_type, .vector_type => |seq_type, seq_tag| {
const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none;
- if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned((try zcu.intern(.{ .aggregate = .{
+ if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = &.{} },
- } })));
+ } }));
if (try sema.typeHasOnePossibleValue(Type.fromInterned(seq_type.child))) |opv| {
- return Value.fromInterned((try zcu.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .repeated_elem = opv.toIntern() },
- } })));
+ } }));
}
return null;
},
@@ -36663,17 +37009,17 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.struct_type => {
// Resolving the layout first helps to avoid loops.
// If the type has a coherent layout, we can recurse through fields safely.
- try ty.resolveLayout(zcu);
+ try ty.resolveLayout(pt);
const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.field_types.len == 0) {
// In this case the struct has no fields at all and
// therefore has one possible value.
- return Value.fromInterned((try zcu.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = &.{} },
- } })));
+ } }));
}
const field_vals = try sema.arena.alloc(
@@ -36682,7 +37028,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
);
for (field_vals, 0..) |*field_val, i| {
if (struct_type.fieldIsComptime(ip, i)) {
- try ty.resolveStructFieldInits(zcu);
+ try ty.resolveStructFieldInits(pt);
field_val.* = struct_type.field_inits.get(ip)[i];
continue;
}
@@ -36694,10 +37040,10 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
// In this case the struct has no runtime-known fields and
// therefore has one possible value.
- return Value.fromInterned((try zcu.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = field_vals },
- } })));
+ } }));
},
.anon_struct_type => |tuple| {
@@ -36707,28 +37053,28 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
// In this case the struct has all comptime-known fields and
// therefore has one possible value.
// TODO: write something like getCoercedInts to avoid needing to dupe
- return Value.fromInterned((try zcu.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = try sema.arena.dupe(InternPool.Index, tuple.values.get(ip)) },
- } })));
+ } }));
},
.union_type => {
// Resolving the layout first helps to avoid loops.
// If the type has a coherent layout, we can recurse through fields safely.
- try ty.resolveLayout(zcu);
+ try ty.resolveLayout(pt);
const union_obj = ip.loadUnionType(ty.toIntern());
const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.tagTypePtr(ip).*))) orelse
return null;
if (union_obj.field_types.len == 0) {
- const only = try zcu.intern(.{ .empty_enum_value = ty.toIntern() });
+ const only = try pt.intern(.{ .empty_enum_value = ty.toIntern() });
return Value.fromInterned(only);
}
const only_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
const val_val = (try sema.typeHasOnePossibleValue(only_field_ty)) orelse
return null;
- const only = try zcu.intern(.{ .un = .{
+ const only = try pt.intern(.{ .un = .{
.ty = ty.toIntern(),
.tag = tag_val.toIntern(),
.val = val_val.toIntern(),
@@ -36743,7 +37089,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
if (enum_type.tag_ty == .comptime_int_type) return null;
if (try sema.typeHasOnePossibleValue(Type.fromInterned(enum_type.tag_ty))) |int_opv| {
- const only = try zcu.intern(.{ .enum_tag = .{
+ const only = try pt.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = int_opv.toIntern(),
} });
@@ -36753,18 +37099,19 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
return null;
},
.auto, .explicit => {
- if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(zcu)) return null;
+ if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(pt)) return null;
return Value.fromInterned(switch (enum_type.names.len) {
- 0 => try zcu.intern(.{ .empty_enum_value = ty.toIntern() }),
- 1 => try zcu.intern(.{ .enum_tag = .{
+ 0 => try pt.intern(.{ .empty_enum_value = ty.toIntern() }),
+ 1 => try pt.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = if (enum_type.values.len == 0)
- (try zcu.intValue(Type.fromInterned(enum_type.tag_ty), 0)).toIntern()
+ (try pt.intValue(Type.fromInterned(enum_type.tag_ty), 0)).toIntern()
else
- try zcu.intern_pool.getCoercedInts(
+ try ip.getCoercedInts(
zcu.gpa,
- zcu.intern_pool.indexToKey(enum_type.values.get(ip)[0]).int,
+ pt.tid,
+ ip.indexToKey(enum_type.values.get(ip)[0]).int,
enum_type.tag_ty,
),
} }),
@@ -36782,7 +37129,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
/// Returns the type of the AIR instruction.
fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type {
- return sema.getTmpAir().typeOf(inst, &sema.mod.intern_pool);
+ return sema.getTmpAir().typeOf(inst, &sema.pt.zcu.intern_pool);
}
pub fn getTmpAir(sema: Sema) Air {
@@ -36838,12 +37185,13 @@ fn analyzeComptimeAlloc(
var_type: Type,
alignment: Alignment,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
// Needed to make an anon decl with type `var_type` (the `finish()` call below).
_ = try sema.typeHasOnePossibleValue(var_type);
- const ptr_type = try mod.ptrTypeSema(.{
+ const ptr_type = try pt.ptrTypeSema(.{
.child = var_type.toIntern(),
.flags = .{
.alignment = alignment,
@@ -36853,7 +37201,7 @@ fn analyzeComptimeAlloc(
const alloc = try sema.newComptimeAlloc(block, var_type, alignment);
- return Air.internedToRef((try mod.intern(.{ .ptr = .{
+ return Air.internedToRef((try pt.intern(.{ .ptr = .{
.ty = ptr_type.toIntern(),
.base_addr = .{ .comptime_alloc = alloc },
.byte_offset = 0,
@@ -36896,13 +37244,14 @@ pub fn analyzeAsAddressSpace(
air_ref: Air.Inst.Ref,
ctx: AddressSpaceContext,
) !std.builtin.AddressSpace {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const coerced = try sema.coerce(block, Type.fromInterned(.address_space_type), air_ref, src);
const addrspace_val = try sema.resolveConstDefinedValue(block, src, coerced, .{
.needed_comptime_reason = "address space must be comptime-known",
});
const address_space = mod.toEnum(std.builtin.AddressSpace, addrspace_val);
- const target = sema.mod.getTarget();
+ const target = pt.zcu.getTarget();
const arch = target.cpu.arch;
const is_nv = arch == .nvptx or arch == .nvptx64;
@@ -36946,7 +37295,8 @@ pub fn analyzeAsAddressSpace(
/// Returns `null` if the pointer contents cannot be loaded at comptime.
fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value {
// TODO: audit use sites to eliminate this coercion
- const coerced_ptr_val = try sema.mod.getCoerced(ptr_val, ptr_ty);
+ const pt = sema.pt;
+ const coerced_ptr_val = try pt.getCoerced(ptr_val, ptr_ty);
switch (try sema.pointerDerefExtra(block, src, coerced_ptr_val)) {
.runtime_load => return null,
.val => |v| return v,
@@ -36954,13 +37304,13 @@ fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr
block,
src,
"comptime dereference requires '{}' to have a well-defined layout",
- .{ty.fmt(sema.mod)},
+ .{ty.fmt(pt)},
),
.out_of_bounds => |ty| return sema.fail(
block,
src,
"dereference of '{}' exceeds bounds of containing decl of type '{}'",
- .{ ptr_ty.fmt(sema.mod), ty.fmt(sema.mod) },
+ .{ ptr_ty.fmt(pt), ty.fmt(pt) },
),
}
}
@@ -36973,10 +37323,10 @@ const DerefResult = union(enum) {
};
fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value) CompileError!DerefResult {
- const zcu = sema.mod;
- const ip = &zcu.intern_pool;
+ const pt = sema.pt;
+ const ip = &pt.zcu.intern_pool;
switch (try sema.loadComptimePtr(block, src, ptr_val)) {
- .success => |mv| return .{ .val = try mv.intern(zcu, sema.arena) },
+ .success => |mv| return .{ .val = try mv.intern(pt, sema.arena) },
.runtime_load => return .runtime_load,
.undef => return sema.failWithUseOfUndef(block, src),
.err_payload => |err_name| return sema.fail(block, src, "attempt to unwrap error: {}", .{err_name.fmt(ip)}),
@@ -37001,7 +37351,8 @@ fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError
/// a type has zero bits, which can cause a "foo depends on itself" compile error.
/// This logic must be kept in sync with `Type.isPtrLikeOptional`.
fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.One, .Many, .C => ty,
@@ -37031,27 +37382,28 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type {
/// `generic_poison` will return false.
/// May return false negatives when structs and unions are having their field types resolved.
pub fn typeRequiresComptime(sema: *Sema, ty: Type) SemaError!bool {
- return ty.comptimeOnlyAdvanced(sema.mod, .sema);
+ return ty.comptimeOnlyAdvanced(sema.pt, .sema);
}
pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) SemaError!bool {
- return ty.hasRuntimeBitsAdvanced(sema.mod, false, .sema) catch |err| switch (err) {
+ return ty.hasRuntimeBitsAdvanced(sema.pt, false, .sema) catch |err| switch (err) {
error.NeedLazy => unreachable,
else => |e| return e,
};
}
pub fn typeAbiSize(sema: *Sema, ty: Type) SemaError!u64 {
- try ty.resolveLayout(sema.mod);
- return ty.abiSize(sema.mod);
+ const pt = sema.pt;
+ try ty.resolveLayout(pt);
+ return ty.abiSize(pt);
}
pub fn typeAbiAlignment(sema: *Sema, ty: Type) SemaError!Alignment {
- return (try ty.abiAlignmentAdvanced(sema.mod, .sema)).scalar;
+ return (try ty.abiAlignmentAdvanced(sema.pt, .sema)).scalar;
}
pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool {
- return ty.fnHasRuntimeBitsAdvanced(sema.mod, .sema);
+ return ty.fnHasRuntimeBitsAdvanced(sema.pt, .sema);
}
fn unionFieldIndex(
@@ -37061,9 +37413,10 @@ fn unionFieldIndex(
field_name: InternPool.NullTerminatedString,
field_src: LazySrcLoc,
) !u32 {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
- try union_ty.resolveFields(mod);
+ try union_ty.resolveFields(pt);
const union_obj = mod.typeToUnion(union_ty).?;
const field_index = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse
return sema.failWithBadUnionFieldAccess(block, union_ty, union_obj, field_src, field_name);
@@ -37077,9 +37430,10 @@ fn structFieldIndex(
field_name: InternPool.NullTerminatedString,
field_src: LazySrcLoc,
) !u32 {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
- try struct_ty.resolveFields(mod);
+ try struct_ty.resolveFields(pt);
if (struct_ty.isAnonStruct(mod)) {
return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src);
} else {
@@ -37096,7 +37450,8 @@ fn anonStructFieldIndex(
field_name: InternPool.NullTerminatedString,
field_src: LazySrcLoc,
) !u32 {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
switch (ip.indexToKey(struct_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| for (anon_struct_type.names.get(ip), 0..) |name, i| {
@@ -37106,20 +37461,21 @@ fn anonStructFieldIndex(
else => unreachable,
}
return sema.fail(block, field_src, "no field named '{}' in anonymous struct '{}'", .{
- field_name.fmt(ip), struct_ty.fmt(sema.mod),
+ field_name.fmt(ip), struct_ty.fmt(pt),
});
}
/// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting
/// overflow_idx to the vector index the overflow was at (or 0 for a scalar).
fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) !Value {
+ const pt = sema.pt;
var overflow: usize = undefined;
return sema.intAddInner(lhs, rhs, ty, &overflow) catch |err| switch (err) {
error.Overflow => {
- const is_vec = ty.isVector(sema.mod);
+ const is_vec = ty.isVector(pt.zcu);
overflow_idx.* = if (is_vec) overflow else 0;
- const safe_ty = if (is_vec) try sema.mod.vectorType(.{
- .len = ty.vectorLen(sema.mod),
+ const safe_ty = if (is_vec) try pt.vectorType(.{
+ .len = ty.vectorLen(pt.zcu),
.child = .comptime_int_type,
}) else Type.comptime_int;
return sema.intAddInner(lhs, rhs, safe_ty, undefined) catch |err1| switch (err1) {
@@ -37132,13 +37488,14 @@ fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize)
}
fn intAddInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize) !Value {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod));
const scalar_ty = ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
const val = sema.intAddScalar(lhs_elem, rhs_elem, scalar_ty) catch |err| switch (err) {
error.Overflow => {
overflow_idx.* = i;
@@ -37148,34 +37505,34 @@ fn intAddInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usi
};
scalar.* = val.toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
return sema.intAddScalar(lhs, rhs, ty);
}
fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value {
- const mod = sema.mod;
+ const pt = sema.pt;
if (scalar_ty.toIntern() != .comptime_int_type) {
const res = try sema.intAddWithOverflowScalar(lhs, rhs, scalar_ty);
- if (res.overflow_bit.compareAllWithZero(.neq, mod)) return error.Overflow;
+ if (res.overflow_bit.compareAllWithZero(.neq, pt)) return error.Overflow;
return res.wrapped_result;
}
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema);
- const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema);
+ const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, pt, .sema);
+ const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, pt, .sema);
const limbs = try sema.arena.alloc(
std.math.big.Limb,
@max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
);
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.add(lhs_bigint, rhs_bigint);
- return mod.intValue_big(scalar_ty, result_bigint.toConst());
+ return pt.intValue_big(scalar_ty, result_bigint.toConst());
}
/// Supports both floats and ints; handles undefined.
@@ -37185,15 +37542,16 @@ fn numberAddWrapScalar(
rhs: Value,
ty: Type,
) !Value {
- const mod = sema.mod;
- if (lhs.isUndef(mod) or rhs.isUndef(mod)) return mod.undefValue(ty);
+ const pt = sema.pt;
+ const mod = pt.zcu;
+ if (lhs.isUndef(mod) or rhs.isUndef(mod)) return pt.undefValue(ty);
if (ty.zigTypeTag(mod) == .ComptimeInt) {
return sema.intAdd(lhs, rhs, ty, undefined);
}
if (ty.isAnyFloat()) {
- return Value.floatAdd(lhs, rhs, ty, sema.arena, mod);
+ return Value.floatAdd(lhs, rhs, ty, sema.arena, pt);
}
const overflow_result = try sema.intAddWithOverflow(lhs, rhs, ty);
@@ -37203,13 +37561,14 @@ fn numberAddWrapScalar(
/// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting
/// overflow_idx to the vector index the overflow was at (or 0 for a scalar).
fn intSub(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) !Value {
+ const pt = sema.pt;
var overflow: usize = undefined;
return sema.intSubInner(lhs, rhs, ty, &overflow) catch |err| switch (err) {
error.Overflow => {
- const is_vec = ty.isVector(sema.mod);
+ const is_vec = ty.isVector(pt.zcu);
overflow_idx.* = if (is_vec) overflow else 0;
- const safe_ty = if (is_vec) try sema.mod.vectorType(.{
- .len = ty.vectorLen(sema.mod),
+ const safe_ty = if (is_vec) try pt.vectorType(.{
+ .len = ty.vectorLen(pt.zcu),
.child = .comptime_int_type,
}) else Type.comptime_int;
return sema.intSubInner(lhs, rhs, safe_ty, undefined) catch |err1| switch (err1) {
@@ -37222,13 +37581,13 @@ fn intSub(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize)
}
fn intSubInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize) !Value {
- const mod = sema.mod;
- if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod));
- const scalar_ty = ty.scalarType(mod);
+ const pt = sema.pt;
+ if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
+ const scalar_ty = ty.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(sema.mod, i);
- const rhs_elem = try rhs.elemValue(sema.mod, i);
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
const val = sema.intSubScalar(lhs_elem, rhs_elem, scalar_ty) catch |err| switch (err) {
error.Overflow => {
overflow_idx.* = i;
@@ -37238,34 +37597,34 @@ fn intSubInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usi
};
scalar.* = val.toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
return sema.intSubScalar(lhs, rhs, ty);
}
fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value {
- const mod = sema.mod;
+ const pt = sema.pt;
if (scalar_ty.toIntern() != .comptime_int_type) {
const res = try sema.intSubWithOverflowScalar(lhs, rhs, scalar_ty);
- if (res.overflow_bit.compareAllWithZero(.neq, mod)) return error.Overflow;
+ if (res.overflow_bit.compareAllWithZero(.neq, pt)) return error.Overflow;
return res.wrapped_result;
}
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema);
- const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema);
+ const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, pt, .sema);
+ const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, pt, .sema);
const limbs = try sema.arena.alloc(
std.math.big.Limb,
@max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
);
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.sub(lhs_bigint, rhs_bigint);
- return mod.intValue_big(scalar_ty, result_bigint.toConst());
+ return pt.intValue_big(scalar_ty, result_bigint.toConst());
}
/// Supports both floats and ints; handles undefined.
@@ -37275,15 +37634,16 @@ fn numberSubWrapScalar(
rhs: Value,
ty: Type,
) !Value {
- const mod = sema.mod;
- if (lhs.isUndef(mod) or rhs.isUndef(mod)) return mod.undefValue(ty);
+ const pt = sema.pt;
+ const mod = pt.zcu;
+ if (lhs.isUndef(mod) or rhs.isUndef(mod)) return pt.undefValue(ty);
if (ty.zigTypeTag(mod) == .ComptimeInt) {
return sema.intSub(lhs, rhs, ty, undefined);
}
if (ty.isAnyFloat()) {
- return Value.floatSub(lhs, rhs, ty, sema.arena, mod);
+ return Value.floatSub(lhs, rhs, ty, sema.arena, pt);
}
const overflow_result = try sema.intSubWithOverflow(lhs, rhs, ty);
@@ -37296,28 +37656,29 @@ fn intSubWithOverflow(
rhs: Value,
ty: Type,
) !Value.OverflowArithmeticResult {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) == .Vector) {
const vec_len = ty.vectorLen(mod);
const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len);
const result_data = try sema.arena.alloc(InternPool.Index, vec_len);
const scalar_ty = ty.scalarType(mod);
for (overflowed_data, result_data, 0..) |*of, *scalar, i| {
- const lhs_elem = try lhs.elemValue(sema.mod, i);
- const rhs_elem = try rhs.elemValue(sema.mod, i);
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty);
of.* = of_math_result.overflow_bit.toIntern();
scalar.* = of_math_result.wrapped_result.toIntern();
}
return Value.OverflowArithmeticResult{
- .overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{
- .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
+ .overflow_bit = Value.fromInterned(try pt.intern(.{ .aggregate = .{
+ .ty = (try pt.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
.storage = .{ .elems = overflowed_data },
- } }))),
- .wrapped_result = Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ } })),
+ .wrapped_result = Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } }))),
+ } })),
};
}
return sema.intSubWithOverflowScalar(lhs, rhs, ty);
@@ -37329,29 +37690,30 @@ fn intSubWithOverflowScalar(
rhs: Value,
ty: Type,
) !Value.OverflowArithmeticResult {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const info = ty.intInfo(mod);
if (lhs.isUndef(mod) or rhs.isUndef(mod)) {
return .{
- .overflow_bit = try mod.undefValue(Type.u1),
- .wrapped_result = try mod.undefValue(ty),
+ .overflow_bit = try pt.undefValue(Type.u1),
+ .wrapped_result = try pt.undefValue(ty),
};
}
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema);
- const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema);
+ const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, pt, .sema);
+ const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, pt, .sema);
const limbs = try sema.arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
);
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
- const wrapped_result = try mod.intValue_big(ty, result_bigint.toConst());
+ const wrapped_result = try pt.intValue_big(ty, result_bigint.toConst());
return Value.OverflowArithmeticResult{
- .overflow_bit = try mod.intValue(Type.u1, @intFromBool(overflowed)),
+ .overflow_bit = try pt.intValue(Type.u1, @intFromBool(overflowed)),
.wrapped_result = wrapped_result,
};
}
@@ -37367,17 +37729,18 @@ fn intFromFloat(
int_ty: Type,
mode: IntFromFloatMode,
) CompileError!Value {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (float_ty.zigTypeTag(mod) == .Vector) {
const result_data = try sema.arena.alloc(InternPool.Index, float_ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
- const elem_val = try val.elemValue(sema.mod, i);
+ const elem_val = try val.elemValue(pt, i);
scalar.* = (try sema.intFromFloatScalar(block, src, elem_val, int_ty.scalarType(mod), mode)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = int_ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
return sema.intFromFloatScalar(block, src, val, int_ty, mode);
}
@@ -37415,7 +37778,8 @@ fn intFromFloatScalar(
int_ty: Type,
mode: IntFromFloatMode,
) CompileError!Value {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (val.isUndef(mod)) return sema.failWithUseOfUndef(block, src);
@@ -37423,32 +37787,32 @@ fn intFromFloatScalar(
block,
src,
"fractional component prevents float value '{}' from coercion to type '{}'",
- .{ val.fmtValue(mod, sema), int_ty.fmt(mod) },
+ .{ val.fmtValue(pt, sema), int_ty.fmt(pt) },
);
- const float = val.toFloat(f128, mod);
+ const float = val.toFloat(f128, pt);
if (std.math.isNan(float)) {
return sema.fail(block, src, "float value NaN cannot be stored in integer type '{}'", .{
- int_ty.fmt(sema.mod),
+ int_ty.fmt(pt),
});
}
if (std.math.isInf(float)) {
return sema.fail(block, src, "float value Inf cannot be stored in integer type '{}'", .{
- int_ty.fmt(sema.mod),
+ int_ty.fmt(pt),
});
}
var big_int = try float128IntPartToBigInt(sema.arena, float);
defer big_int.deinit();
- const cti_result = try mod.intValue_big(Type.comptime_int, big_int.toConst());
+ const cti_result = try pt.intValue_big(Type.comptime_int, big_int.toConst());
if (!(try sema.intFitsInType(cti_result, int_ty, null))) {
return sema.fail(block, src, "float value '{}' cannot be stored in integer type '{}'", .{
- val.fmtValue(sema.mod, sema), int_ty.fmt(sema.mod),
+ val.fmtValue(pt, sema), int_ty.fmt(pt),
});
}
- return mod.getCoerced(cti_result, int_ty);
+ return pt.getCoerced(cti_result, int_ty);
}
/// Asserts the value is an integer, and the destination type is ComptimeInt or Int.
@@ -37461,7 +37825,8 @@ fn intFitsInType(
ty: Type,
vector_index: ?*usize,
) CompileError!bool {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (ty.toIntern() == .comptime_int_type) return true;
const info = ty.intInfo(mod);
switch (val.toIntern()) {
@@ -37528,22 +37893,23 @@ fn intFitsInType(
}
fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool {
- const mod = sema.mod;
- if (!(try int_val.compareAllWithZeroSema(.gte, mod))) return false;
- const end_val = try mod.intValue(tag_ty, end);
+ const pt = sema.pt;
+ if (!(try int_val.compareAllWithZeroSema(.gte, pt))) return false;
+ const end_val = try pt.intValue(tag_ty, end);
if (!(try sema.compareAll(int_val, .lt, end_val, tag_ty))) return false;
return true;
}
/// Asserts the type is an enum.
fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const enum_type = mod.intern_pool.loadEnumType(ty.toIntern());
assert(enum_type.tag_mode != .nonexhaustive);
// The `tagValueIndex` function call below relies on the type being the integer tag type.
// `getCoerced` assumes the value will fit the new type.
if (!(try sema.intFitsInType(int, Type.fromInterned(enum_type.tag_ty), null))) return false;
- const int_coerced = try mod.getCoerced(int, Type.fromInterned(enum_type.tag_ty));
+ const int_coerced = try pt.getCoerced(int, Type.fromInterned(enum_type.tag_ty));
return enum_type.tagValueIndex(&mod.intern_pool, int_coerced.toIntern()) != null;
}
@@ -37554,28 +37920,29 @@ fn intAddWithOverflow(
rhs: Value,
ty: Type,
) !Value.OverflowArithmeticResult {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) == .Vector) {
const vec_len = ty.vectorLen(mod);
const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len);
const result_data = try sema.arena.alloc(InternPool.Index, vec_len);
const scalar_ty = ty.scalarType(mod);
for (overflowed_data, result_data, 0..) |*of, *scalar, i| {
- const lhs_elem = try lhs.elemValue(sema.mod, i);
- const rhs_elem = try rhs.elemValue(sema.mod, i);
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty);
of.* = of_math_result.overflow_bit.toIntern();
scalar.* = of_math_result.wrapped_result.toIntern();
}
return Value.OverflowArithmeticResult{
- .overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{
- .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
+ .overflow_bit = Value.fromInterned(try pt.intern(.{ .aggregate = .{
+ .ty = (try pt.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
.storage = .{ .elems = overflowed_data },
- } }))),
- .wrapped_result = Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ } })),
+ .wrapped_result = Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } }))),
+ } })),
};
}
return sema.intAddWithOverflowScalar(lhs, rhs, ty);
@@ -37587,29 +37954,30 @@ fn intAddWithOverflowScalar(
rhs: Value,
ty: Type,
) !Value.OverflowArithmeticResult {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
const info = ty.intInfo(mod);
if (lhs.isUndef(mod) or rhs.isUndef(mod)) {
return .{
- .overflow_bit = try mod.undefValue(Type.u1),
- .wrapped_result = try mod.undefValue(ty),
+ .overflow_bit = try pt.undefValue(Type.u1),
+ .wrapped_result = try pt.undefValue(ty),
};
}
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema);
- const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema);
+ const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, pt, .sema);
+ const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, pt, .sema);
const limbs = try sema.arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
);
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
- const result = try mod.intValue_big(ty, result_bigint.toConst());
+ const result = try pt.intValue_big(ty, result_bigint.toConst());
return Value.OverflowArithmeticResult{
- .overflow_bit = try mod.intValue(Type.u1, @intFromBool(overflowed)),
+ .overflow_bit = try pt.intValue(Type.u1, @intFromBool(overflowed)),
.wrapped_result = result,
};
}
@@ -37625,12 +37993,13 @@ fn compareAll(
rhs: Value,
ty: Type,
) CompileError!bool {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
while (i < ty.vectorLen(mod)) : (i += 1) {
- const lhs_elem = try lhs.elemValue(sema.mod, i);
- const rhs_elem = try rhs.elemValue(sema.mod, i);
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)))) {
return false;
}
@@ -37648,13 +38017,13 @@ fn compareScalar(
rhs: Value,
ty: Type,
) CompileError!bool {
- const mod = sema.mod;
- const coerced_lhs = try mod.getCoerced(lhs, ty);
- const coerced_rhs = try mod.getCoerced(rhs, ty);
+ const pt = sema.pt;
+ const coerced_lhs = try pt.getCoerced(lhs, ty);
+ const coerced_rhs = try pt.getCoerced(rhs, ty);
switch (op) {
.eq => return sema.valuesEqual(coerced_lhs, coerced_rhs, ty),
.neq => return !(try sema.valuesEqual(coerced_lhs, coerced_rhs, ty)),
- else => return Value.compareHeteroAdvanced(coerced_lhs, op, coerced_rhs, mod, .sema),
+ else => return Value.compareHeteroAdvanced(coerced_lhs, op, coerced_rhs, pt, .sema),
}
}
@@ -37664,7 +38033,7 @@ fn valuesEqual(
rhs: Value,
ty: Type,
) CompileError!bool {
- return lhs.eql(rhs, ty, sema.mod);
+ return lhs.eql(rhs, ty, sema.pt.zcu);
}
/// Asserts the values are comparable vectors of type `ty`.
@@ -37675,29 +38044,30 @@ fn compareVector(
rhs: Value,
ty: Type,
) !Value {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
assert(ty.zigTypeTag(mod) == .Vector);
const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(sema.mod, i);
- const rhs_elem = try rhs.elemValue(sema.mod, i);
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod));
scalar.* = Value.makeBool(res_bool).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
- .ty = (try mod.vectorType(.{ .len = ty.vectorLen(mod), .child = .bool_type })).toIntern(),
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
+ .ty = (try pt.vectorType(.{ .len = ty.vectorLen(mod), .child = .bool_type })).toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
/// Merge lhs with rhs.
/// Asserts that lhs and rhs are both error sets and are resolved.
fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type {
- const mod = sema.mod;
- const ip = &mod.intern_pool;
+ const pt = sema.pt;
+ const ip = &pt.zcu.intern_pool;
const arena = sema.arena;
- const lhs_names = lhs.errorSetNames(mod);
- const rhs_names = rhs.errorSetNames(mod);
+ const lhs_names = lhs.errorSetNames(pt.zcu);
+ const rhs_names = rhs.errorSetNames(pt.zcu);
var names: InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(arena, lhs_names.len);
@@ -37708,7 +38078,7 @@ fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type {
try names.put(arena, rhs_names.get(ip)[rhs_index], {});
}
- return mod.errorSetFromUnsortedNames(names.keys());
+ return pt.errorSetFromUnsortedNames(names.keys());
}
/// Avoids crashing the compiler when asking if inferred allocations are noreturn.
@@ -37718,7 +38088,7 @@ fn isNoReturn(sema: *Sema, ref: Air.Inst.Ref) bool {
.inferred_alloc, .inferred_alloc_comptime => return false,
else => {},
};
- return sema.typeOf(ref).isNoReturn(sema.mod);
+ return sema.typeOf(ref).isNoReturn(sema.pt.zcu);
}
/// Avoids crashing the compiler when asking if inferred allocations are known to be a certain zig type.
@@ -37727,11 +38097,12 @@ fn isKnownZigType(sema: *Sema, ref: Air.Inst.Ref, tag: std.builtin.TypeId) bool
.inferred_alloc, .inferred_alloc_comptime => return false,
else => {},
};
- return sema.typeOf(ref).zigTypeTag(sema.mod) == tag;
+ return sema.typeOf(ref).zigTypeTag(sema.pt.zcu) == tag;
}
pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void {
- if (!sema.mod.comp.debug_incremental) return;
+ const zcu = sema.pt.zcu;
+ if (!zcu.comp.debug_incremental) return;
// Avoid creating dependencies on ourselves. This situation can arise when we analyze the fields
// of a type and they use `@This()`. This dependency would be unnecessary, and in fact would
@@ -37747,11 +38118,11 @@ pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void {
else
.{ .decl = sema.owner_decl_index },
);
- try sema.mod.intern_pool.addDependency(sema.gpa, depender, dependee);
+ try zcu.intern_pool.addDependency(sema.gpa, depender, dependee);
}
fn isComptimeMutablePtr(sema: *Sema, val: Value) bool {
- return switch (sema.mod.intern_pool.indexToKey(val.toIntern())) {
+ return switch (sema.pt.zcu.intern_pool.indexToKey(val.toIntern())) {
.slice => |slice| sema.isComptimeMutablePtr(Value.fromInterned(slice.ptr)),
.ptr => |ptr| switch (ptr.base_addr) {
.anon_decl, .decl, .int => false,
@@ -37766,7 +38137,7 @@ fn isComptimeMutablePtr(sema: *Sema, val: Value) bool {
fn checkRuntimeValue(sema: *Sema, ptr: Air.Inst.Ref) bool {
const val = ptr.toInterned() orelse return true;
- return !Value.fromInterned(val).canMutateComptimeVarState(sema.mod);
+ return !Value.fromInterned(val).canMutateComptimeVarState(sema.pt.zcu);
}
fn validateRuntimeValue(sema: *Sema, block: *Block, val_src: LazySrcLoc, val: Air.Inst.Ref) CompileError!void {
@@ -37781,7 +38152,8 @@ fn validateRuntimeValue(sema: *Sema, block: *Block, val_src: LazySrcLoc, val: Ai
/// Returns true if any value contained in `val` is undefined.
fn anyUndef(sema: *Sema, block: *Block, src: LazySrcLoc, val: Value) !bool {
- const mod = sema.mod;
+ const pt = sema.pt;
+ const mod = pt.zcu;
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.undef => true,
.simple_value => |v| v == .undefined,
@@ -37807,13 +38179,14 @@ fn sliceToIpString(
slice_val: Value,
reason: NeededComptimeReason,
) CompileError!InternPool.NullTerminatedString {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const slice_ty = slice_val.typeOf(zcu);
assert(slice_ty.isSlice(zcu));
assert(slice_ty.childType(zcu).toIntern() == .u8_type);
const array_val = try sema.derefSliceAsArray(block, src, slice_val, reason);
const array_ty = array_val.typeOf(zcu);
- return array_val.toIpString(array_ty, zcu);
+ return array_val.toIpString(array_ty, pt);
}
/// Given a slice value, attempts to dereference it into a comptime-known array.
@@ -37840,7 +38213,8 @@ fn maybeDerefSliceAsArray(
src: LazySrcLoc,
slice_val: Value,
) CompileError!?Value {
- const zcu = sema.mod;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
assert(slice_val.typeOf(zcu).isSlice(zcu));
const slice = switch (ip.indexToKey(slice_val.toIntern())) {
@@ -37849,19 +38223,19 @@ fn maybeDerefSliceAsArray(
else => unreachable,
};
const elem_ty = Type.fromInterned(slice.ty).childType(zcu);
- const len = try Value.fromInterned(slice.len).toUnsignedIntSema(zcu);
- const array_ty = try zcu.arrayType(.{
+ const len = try Value.fromInterned(slice.len).toUnsignedIntSema(pt);
+ const array_ty = try pt.arrayType(.{
.child = elem_ty.toIntern(),
.len = len,
});
- const ptr_ty = try zcu.ptrTypeSema(p: {
+ const ptr_ty = try pt.ptrTypeSema(p: {
var p = Type.fromInterned(slice.ty).ptrInfo(zcu);
p.flags.size = .One;
p.child = array_ty.toIntern();
p.sentinel = .none;
break :p p;
});
- const casted_ptr = try zcu.getCoerced(Value.fromInterned(slice.ptr), ptr_ty);
+ const casted_ptr = try pt.getCoerced(Value.fromInterned(slice.ptr), ptr_ty);
return sema.pointerDeref(block, src, casted_ptr, ptr_ty);
}
@@ -37879,7 +38253,7 @@ fn analyzeUnreachable(sema: *Sema, block: *Block, src: LazySrcLoc, safety_check:
pub fn flushExports(sema: *Sema) !void {
if (sema.exports.items.len == 0) return;
- const zcu = sema.mod;
+ const zcu = sema.pt.zcu;
const gpa = zcu.gpa;
const unit = sema.ownerUnit();
src/Type.zig
@@ -136,16 +136,16 @@ pub fn format(ty: Type, comptime unused_fmt_string: []const u8, options: std.fmt
pub const Formatter = std.fmt.Formatter(format2);
-pub fn fmt(ty: Type, module: *Module) Formatter {
+pub fn fmt(ty: Type, pt: Zcu.PerThread) Formatter {
return .{ .data = .{
.ty = ty,
- .module = module,
+ .pt = pt,
} };
}
const FormatContext = struct {
ty: Type,
- module: *Module,
+ pt: Zcu.PerThread,
};
fn format2(
@@ -156,7 +156,7 @@ fn format2(
) !void {
comptime assert(unused_format_string.len == 0);
_ = options;
- return print(ctx.ty, writer, ctx.module);
+ return print(ctx.ty, writer, ctx.pt);
}
pub fn fmtDebug(ty: Type) std.fmt.Formatter(dump) {
@@ -178,7 +178,8 @@ pub fn dump(
/// Prints a name suitable for `@typeName`.
/// TODO: take an `opt_sema` to pass to `fmtValue` when printing sentinels.
-pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void {
+pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error!void {
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| {
@@ -193,8 +194,8 @@ pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void
if (info.sentinel != .none) switch (info.flags.size) {
.One, .C => unreachable,
- .Many => try writer.print("[*:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}),
- .Slice => try writer.print("[:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}),
+ .Many => try writer.print("[*:{}]", .{Value.fromInterned(info.sentinel).fmtValue(pt, null)}),
+ .Slice => try writer.print("[:{}]", .{Value.fromInterned(info.sentinel).fmtValue(pt, null)}),
} else switch (info.flags.size) {
.One => try writer.writeAll("*"),
.Many => try writer.writeAll("[*]"),
@@ -208,7 +209,7 @@ pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void
const alignment = if (info.flags.alignment != .none)
info.flags.alignment
else
- Type.fromInterned(info.child).abiAlignment(mod);
+ Type.fromInterned(info.child).abiAlignment(pt);
try writer.print("align({d}", .{alignment.toByteUnits() orelse 0});
if (info.packed_offset.bit_offset != 0 or info.packed_offset.host_size != 0) {
@@ -230,39 +231,39 @@ pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void
if (info.flags.is_volatile) try writer.writeAll("volatile ");
if (info.flags.is_allowzero and info.flags.size != .C) try writer.writeAll("allowzero ");
- try print(Type.fromInterned(info.child), writer, mod);
+ try print(Type.fromInterned(info.child), writer, pt);
return;
},
.array_type => |array_type| {
if (array_type.sentinel == .none) {
try writer.print("[{d}]", .{array_type.len});
- try print(Type.fromInterned(array_type.child), writer, mod);
+ try print(Type.fromInterned(array_type.child), writer, pt);
} else {
try writer.print("[{d}:{}]", .{
array_type.len,
- Value.fromInterned(array_type.sentinel).fmtValue(mod, null),
+ Value.fromInterned(array_type.sentinel).fmtValue(pt, null),
});
- try print(Type.fromInterned(array_type.child), writer, mod);
+ try print(Type.fromInterned(array_type.child), writer, pt);
}
return;
},
.vector_type => |vector_type| {
try writer.print("@Vector({d}, ", .{vector_type.len});
- try print(Type.fromInterned(vector_type.child), writer, mod);
+ try print(Type.fromInterned(vector_type.child), writer, pt);
try writer.writeAll(")");
return;
},
.opt_type => |child| {
try writer.writeByte('?');
- return print(Type.fromInterned(child), writer, mod);
+ return print(Type.fromInterned(child), writer, pt);
},
.error_union_type => |error_union_type| {
- try print(Type.fromInterned(error_union_type.error_set_type), writer, mod);
+ try print(Type.fromInterned(error_union_type.error_set_type), writer, pt);
try writer.writeByte('!');
if (error_union_type.payload_type == .generic_poison_type) {
try writer.writeAll("anytype");
} else {
- try print(Type.fromInterned(error_union_type.payload_type), writer, mod);
+ try print(Type.fromInterned(error_union_type.payload_type), writer, pt);
}
return;
},
@@ -355,10 +356,10 @@ pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void
try writer.print("{}: ", .{anon_struct.names.get(ip)[i].fmt(&mod.intern_pool)});
}
- try print(Type.fromInterned(field_ty), writer, mod);
+ try print(Type.fromInterned(field_ty), writer, pt);
if (val != .none) {
- try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(mod, null)});
+ try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(pt, null)});
}
}
try writer.writeAll("}");
@@ -395,7 +396,7 @@ pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void
if (param_ty == .generic_poison_type) {
try writer.writeAll("anytype");
} else {
- try print(Type.fromInterned(param_ty), writer, mod);
+ try print(Type.fromInterned(param_ty), writer, pt);
}
}
if (fn_info.is_var_args) {
@@ -413,13 +414,13 @@ pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void
if (fn_info.return_type == .generic_poison_type) {
try writer.writeAll("anytype");
} else {
- try print(Type.fromInterned(fn_info.return_type), writer, mod);
+ try print(Type.fromInterned(fn_info.return_type), writer, pt);
}
},
.anyframe_type => |child| {
if (child == .none) return writer.writeAll("anyframe");
try writer.writeAll("anyframe->");
- return print(Type.fromInterned(child), writer, mod);
+ return print(Type.fromInterned(child), writer, pt);
},
// values, not types
@@ -475,10 +476,11 @@ const RuntimeBitsError = SemaError || error{NeedLazy};
/// may return false positives.
pub fn hasRuntimeBitsAdvanced(
ty: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
ignore_comptime_only: bool,
strat: ResolveStratLazy,
) RuntimeBitsError!bool {
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
return switch (ty.toIntern()) {
// False because it is a comptime-only type.
@@ -490,16 +492,16 @@ pub fn hasRuntimeBitsAdvanced(
// to comptime-only types do not, with the exception of function pointers.
if (ignore_comptime_only) return true;
return switch (strat) {
- .sema => !try ty.comptimeOnlyAdvanced(mod, .sema),
- .eager => !ty.comptimeOnly(mod),
+ .sema => !try ty.comptimeOnlyAdvanced(pt, .sema),
+ .eager => !ty.comptimeOnly(pt),
.lazy => error.NeedLazy,
};
},
.anyframe_type => true,
.array_type => |array_type| return array_type.lenIncludingSentinel() > 0 and
- try Type.fromInterned(array_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
+ try Type.fromInterned(array_type.child).hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat),
.vector_type => |vector_type| return vector_type.len > 0 and
- try Type.fromInterned(vector_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
+ try Type.fromInterned(vector_type.child).hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat),
.opt_type => |child| {
const child_ty = Type.fromInterned(child);
if (child_ty.isNoReturn(mod)) {
@@ -508,8 +510,8 @@ pub fn hasRuntimeBitsAdvanced(
}
if (ignore_comptime_only) return true;
return switch (strat) {
- .sema => !try child_ty.comptimeOnlyAdvanced(mod, .sema),
- .eager => !child_ty.comptimeOnly(mod),
+ .sema => !try child_ty.comptimeOnlyAdvanced(pt, .sema),
+ .eager => !child_ty.comptimeOnly(pt),
.lazy => error.NeedLazy,
};
},
@@ -580,14 +582,14 @@ pub fn hasRuntimeBitsAdvanced(
return true;
}
switch (strat) {
- .sema => try ty.resolveFields(mod),
+ .sema => try ty.resolveFields(pt),
.eager => assert(struct_type.haveFieldTypes(ip)),
.lazy => if (!struct_type.haveFieldTypes(ip)) return error.NeedLazy,
}
for (0..struct_type.field_types.len) |i| {
if (struct_type.comptime_bits.getBit(ip, i)) continue;
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
- if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
+ if (try field_ty.hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat))
return true;
} else {
return false;
@@ -596,7 +598,7 @@ pub fn hasRuntimeBitsAdvanced(
.anon_struct_type => |tuple| {
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
if (val != .none) continue; // comptime field
- if (try Type.fromInterned(field_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true;
+ if (try Type.fromInterned(field_ty).hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat)) return true;
}
return false;
},
@@ -617,21 +619,21 @@ pub fn hasRuntimeBitsAdvanced(
// tag_ty will be `none` if this union's tag type is not resolved yet,
// in which case we want control flow to continue down below.
if (tag_ty != .none and
- try Type.fromInterned(tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
+ try Type.fromInterned(tag_ty).hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat))
{
return true;
}
},
}
switch (strat) {
- .sema => try ty.resolveFields(mod),
+ .sema => try ty.resolveFields(pt),
.eager => assert(union_type.flagsPtr(ip).status.haveFieldTypes()),
.lazy => if (!union_type.flagsPtr(ip).status.haveFieldTypes())
return error.NeedLazy,
}
for (0..union_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
- if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
+ if (try field_ty.hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat))
return true;
} else {
return false;
@@ -639,7 +641,7 @@ pub fn hasRuntimeBitsAdvanced(
},
.opaque_type => true,
- .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
+ .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat),
// values, not types
.undef,
@@ -777,41 +779,41 @@ pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool {
};
}
-pub fn hasRuntimeBits(ty: Type, mod: *Module) bool {
- return hasRuntimeBitsAdvanced(ty, mod, false, .eager) catch unreachable;
+pub fn hasRuntimeBits(ty: Type, pt: Zcu.PerThread) bool {
+ return hasRuntimeBitsAdvanced(ty, pt, false, .eager) catch unreachable;
}
-pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool {
- return hasRuntimeBitsAdvanced(ty, mod, true, .eager) catch unreachable;
+pub fn hasRuntimeBitsIgnoreComptime(ty: Type, pt: Zcu.PerThread) bool {
+ return hasRuntimeBitsAdvanced(ty, pt, true, .eager) catch unreachable;
}
-pub fn fnHasRuntimeBits(ty: Type, mod: *Module) bool {
- return ty.fnHasRuntimeBitsAdvanced(mod, .normal) catch unreachable;
+pub fn fnHasRuntimeBits(ty: Type, pt: Zcu.PerThread) bool {
+ return ty.fnHasRuntimeBitsAdvanced(pt, .normal) catch unreachable;
}
/// Determines whether a function type has runtime bits, i.e. whether a
/// function with this type can exist at runtime.
/// Asserts that `ty` is a function type.
-pub fn fnHasRuntimeBitsAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaError!bool {
- const fn_info = mod.typeToFunc(ty).?;
+pub fn fnHasRuntimeBitsAdvanced(ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) SemaError!bool {
+ const fn_info = pt.zcu.typeToFunc(ty).?;
if (fn_info.is_generic) return false;
if (fn_info.is_var_args) return true;
if (fn_info.cc == .Inline) return false;
- return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(mod, strat);
+ return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(pt, strat);
}
-pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool {
- switch (ty.zigTypeTag(mod)) {
- .Fn => return ty.fnHasRuntimeBits(mod),
- else => return ty.hasRuntimeBits(mod),
+pub fn isFnOrHasRuntimeBits(ty: Type, pt: Zcu.PerThread) bool {
+ switch (ty.zigTypeTag(pt.zcu)) {
+ .Fn => return ty.fnHasRuntimeBits(pt),
+ else => return ty.hasRuntimeBits(pt),
}
}
/// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive.
-pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool {
- return switch (ty.zigTypeTag(mod)) {
+pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, pt: Zcu.PerThread) bool {
+ return switch (ty.zigTypeTag(pt.zcu)) {
.Fn => true,
- else => return ty.hasRuntimeBitsIgnoreComptime(mod),
+ else => return ty.hasRuntimeBitsIgnoreComptime(pt),
};
}
@@ -820,24 +822,24 @@ pub fn isNoReturn(ty: Type, mod: *Module) bool {
}
/// Returns `none` if the pointer is naturally aligned and the element type is 0-bit.
-pub fn ptrAlignment(ty: Type, mod: *Module) Alignment {
- return ptrAlignmentAdvanced(ty, mod, .normal) catch unreachable;
+pub fn ptrAlignment(ty: Type, pt: Zcu.PerThread) Alignment {
+ return ptrAlignmentAdvanced(ty, pt, .normal) catch unreachable;
}
-pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) !Alignment {
- return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+pub fn ptrAlignmentAdvanced(ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) !Alignment {
+ return switch (pt.zcu.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| {
if (ptr_type.flags.alignment != .none)
return ptr_type.flags.alignment;
if (strat == .sema) {
- const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .sema);
+ const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(pt, .sema);
return res.scalar;
}
- return (Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
+ return (Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(pt, .eager) catch unreachable).scalar;
},
- .opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(mod, strat),
+ .opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(pt, strat),
else => unreachable,
};
}
@@ -851,16 +853,16 @@ pub fn ptrAddressSpace(ty: Type, mod: *const Module) std.builtin.AddressSpace {
}
/// Never returns `none`. Asserts that all necessary type resolution is already done.
-pub fn abiAlignment(ty: Type, mod: *Module) Alignment {
- return (ty.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
+pub fn abiAlignment(ty: Type, pt: Zcu.PerThread) Alignment {
+ return (ty.abiAlignmentAdvanced(pt, .eager) catch unreachable).scalar;
}
/// May capture a reference to `ty`.
/// Returned value has type `comptime_int`.
-pub fn lazyAbiAlignment(ty: Type, mod: *Module) !Value {
- switch (try ty.abiAlignmentAdvanced(mod, .lazy)) {
+pub fn lazyAbiAlignment(ty: Type, pt: Zcu.PerThread) !Value {
+ switch (try ty.abiAlignmentAdvanced(pt, .lazy)) {
.val => |val| return val,
- .scalar => |x| return mod.intValue(Type.comptime_int, x.toByteUnits() orelse 0),
+ .scalar => |x| return pt.intValue(Type.comptime_int, x.toByteUnits() orelse 0),
}
}
@@ -907,38 +909,39 @@ pub const ResolveStrat = enum {
/// necessary, possibly returning a CompileError.
pub fn abiAlignmentAdvanced(
ty: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
strat: ResolveStratLazy,
) SemaError!AbiAlignmentAdvanced {
+ const mod = pt.zcu;
const target = mod.getTarget();
const use_llvm = mod.comp.config.use_llvm;
const ip = &mod.intern_pool;
switch (ty.toIntern()) {
- .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = .@"1" },
+ .empty_struct_type => return .{ .scalar = .@"1" },
else => switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| {
- if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" };
+ if (int_type.bits == 0) return .{ .scalar = .@"1" };
return .{ .scalar = intAbiAlignment(int_type.bits, target, use_llvm) };
},
.ptr_type, .anyframe_type => {
return .{ .scalar = ptrAbiAlignment(target) };
},
.array_type => |array_type| {
- return Type.fromInterned(array_type.child).abiAlignmentAdvanced(mod, strat);
+ return Type.fromInterned(array_type.child).abiAlignmentAdvanced(pt, strat);
},
.vector_type => |vector_type| {
if (vector_type.len == 0) return .{ .scalar = .@"1" };
switch (mod.comp.getZigBackend()) {
else => {
- const elem_bits: u32 = @intCast(try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, .sema));
+ const elem_bits: u32 = @intCast(try Type.fromInterned(vector_type.child).bitSizeAdvanced(pt, .sema));
if (elem_bits == 0) return .{ .scalar = .@"1" };
const bytes = ((elem_bits * vector_type.len) + 7) / 8;
const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes);
return .{ .scalar = Alignment.fromByteUnits(alignment) };
},
.stage2_c => {
- return Type.fromInterned(vector_type.child).abiAlignmentAdvanced(mod, strat);
+ return Type.fromInterned(vector_type.child).abiAlignmentAdvanced(pt, strat);
},
.stage2_x86_64 => {
if (vector_type.child == .bool_type) {
@@ -949,7 +952,7 @@ pub fn abiAlignmentAdvanced(
const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes);
return .{ .scalar = Alignment.fromByteUnits(alignment) };
}
- const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar);
+ const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(pt, strat)).scalar);
if (elem_bytes == 0) return .{ .scalar = .@"1" };
const bytes = elem_bytes * vector_type.len;
if (bytes > 32 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{ .scalar = .@"64" };
@@ -959,12 +962,12 @@ pub fn abiAlignmentAdvanced(
}
},
- .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat),
- .error_union_type => |info| return abiAlignmentAdvancedErrorUnion(ty, mod, strat, Type.fromInterned(info.payload_type)),
+ .opt_type => return ty.abiAlignmentAdvancedOptional(pt, strat),
+ .error_union_type => |info| return ty.abiAlignmentAdvancedErrorUnion(pt, strat, Type.fromInterned(info.payload_type)),
.error_set_type, .inferred_error_set_type => {
const bits = mod.errorSetBits();
- if (bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" };
+ if (bits == 0) return .{ .scalar = .@"1" };
return .{ .scalar = intAbiAlignment(bits, target, use_llvm) };
},
@@ -1012,10 +1015,7 @@ pub fn abiAlignmentAdvanced(
},
.f80 => switch (target.c_type_bit_size(.longdouble)) {
80 => return .{ .scalar = cTypeAlign(target, .longdouble) },
- else => {
- const u80_ty: Type = .{ .ip_index = .u80_type };
- return .{ .scalar = abiAlignment(u80_ty, mod) };
- },
+ else => return .{ .scalar = Type.u80.abiAlignment(pt) },
},
.f128 => switch (target.c_type_bit_size(.longdouble)) {
128 => return .{ .scalar = cTypeAlign(target, .longdouble) },
@@ -1024,7 +1024,7 @@ pub fn abiAlignmentAdvanced(
.anyerror, .adhoc_inferred_error_set => {
const bits = mod.errorSetBits();
- if (bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" };
+ if (bits == 0) return .{ .scalar = .@"1" };
return .{ .scalar = intAbiAlignment(bits, target, use_llvm) };
},
@@ -1044,22 +1044,22 @@ pub fn abiAlignmentAdvanced(
const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.layout == .@"packed") {
switch (strat) {
- .sema => try ty.resolveLayout(mod),
+ .sema => try ty.resolveLayout(pt),
.lazy => if (struct_type.backingIntType(ip).* == .none) return .{
- .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } }))),
+ } })),
},
.eager => {},
}
- return .{ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiAlignment(mod) };
+ return .{ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiAlignment(pt) };
}
if (struct_type.flagsPtr(ip).alignment == .none) switch (strat) {
.eager => unreachable, // struct alignment not resolved
- .sema => try ty.resolveStructAlignment(mod),
- .lazy => return .{ .val = Value.fromInterned(try mod.intern(.{ .int = .{
+ .sema => try ty.resolveStructAlignment(pt),
+ .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
} })) },
@@ -1071,15 +1071,15 @@ pub fn abiAlignmentAdvanced(
var big_align: Alignment = .@"1";
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
if (val != .none) continue; // comptime field
- switch (try Type.fromInterned(field_ty).abiAlignmentAdvanced(mod, strat)) {
+ switch (try Type.fromInterned(field_ty).abiAlignmentAdvanced(pt, strat)) {
.scalar => |field_align| big_align = big_align.max(field_align),
.val => switch (strat) {
.eager => unreachable, // field type alignment not resolved
.sema => unreachable, // passed to abiAlignmentAdvanced above
- .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } }))) },
+ } })) },
},
}
}
@@ -1090,18 +1090,18 @@ pub fn abiAlignmentAdvanced(
if (union_type.flagsPtr(ip).alignment == .none) switch (strat) {
.eager => unreachable, // union layout not resolved
- .sema => try ty.resolveUnionAlignment(mod),
- .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .sema => try ty.resolveUnionAlignment(pt),
+ .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } }))) },
+ } })) },
};
return .{ .scalar = union_type.flagsPtr(ip).alignment };
},
.opaque_type => return .{ .scalar = .@"1" },
.enum_type => return .{
- .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiAlignment(mod),
+ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiAlignment(pt),
},
// values, not types
@@ -1131,91 +1131,92 @@ pub fn abiAlignmentAdvanced(
fn abiAlignmentAdvancedErrorUnion(
ty: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
strat: ResolveStratLazy,
payload_ty: Type,
) SemaError!AbiAlignmentAdvanced {
// This code needs to be kept in sync with the equivalent switch prong
// in abiSizeAdvanced.
- const code_align = abiAlignment(Type.anyerror, mod);
+ const code_align = Type.anyerror.abiAlignment(pt);
switch (strat) {
.eager, .sema => {
- if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
- error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ if (!(payload_ty.hasRuntimeBitsAdvanced(pt, false, strat) catch |err| switch (err) {
+ error.NeedLazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } }))) },
+ } })) },
else => |e| return e,
})) {
return .{ .scalar = code_align };
}
return .{ .scalar = code_align.max(
- (try payload_ty.abiAlignmentAdvanced(mod, strat)).scalar,
+ (try payload_ty.abiAlignmentAdvanced(pt, strat)).scalar,
) };
},
.lazy => {
- switch (try payload_ty.abiAlignmentAdvanced(mod, strat)) {
+ switch (try payload_ty.abiAlignmentAdvanced(pt, strat)) {
.scalar => |payload_align| return .{ .scalar = code_align.max(payload_align) },
.val => {},
}
- return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } }))) };
+ } })) };
},
}
}
fn abiAlignmentAdvancedOptional(
ty: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
strat: ResolveStratLazy,
) SemaError!AbiAlignmentAdvanced {
+ const mod = pt.zcu;
const target = mod.getTarget();
const child_type = ty.optionalChild(mod);
switch (child_type.zigTypeTag(mod)) {
.Pointer => return .{ .scalar = ptrAbiAlignment(target) },
- .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat),
+ .ErrorSet => return Type.anyerror.abiAlignmentAdvanced(pt, strat),
.NoReturn => return .{ .scalar = .@"1" },
else => {},
}
switch (strat) {
.eager, .sema => {
- if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
- error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ if (!(child_type.hasRuntimeBitsAdvanced(pt, false, strat) catch |err| switch (err) {
+ error.NeedLazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } }))) },
+ } })) },
else => |e| return e,
})) {
return .{ .scalar = .@"1" };
}
- return child_type.abiAlignmentAdvanced(mod, strat);
+ return child_type.abiAlignmentAdvanced(pt, strat);
},
- .lazy => switch (try child_type.abiAlignmentAdvanced(mod, strat)) {
+ .lazy => switch (try child_type.abiAlignmentAdvanced(pt, strat)) {
.scalar => |x| return .{ .scalar = x.max(.@"1") },
- .val => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .val => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } }))) },
+ } })) },
},
}
}
/// May capture a reference to `ty`.
-pub fn lazyAbiSize(ty: Type, mod: *Module) !Value {
- switch (try ty.abiSizeAdvanced(mod, .lazy)) {
+pub fn lazyAbiSize(ty: Type, pt: Zcu.PerThread) !Value {
+ switch (try ty.abiSizeAdvanced(pt, .lazy)) {
.val => |val| return val,
- .scalar => |x| return mod.intValue(Type.comptime_int, x),
+ .scalar => |x| return pt.intValue(Type.comptime_int, x),
}
}
/// Asserts the type has the ABI size already resolved.
/// Types that return false for hasRuntimeBits() return 0.
-pub fn abiSize(ty: Type, mod: *Module) u64 {
- return (abiSizeAdvanced(ty, mod, .eager) catch unreachable).scalar;
+pub fn abiSize(ty: Type, pt: Zcu.PerThread) u64 {
+ return (abiSizeAdvanced(ty, pt, .eager) catch unreachable).scalar;
}
const AbiSizeAdvanced = union(enum) {
@@ -1231,38 +1232,39 @@ const AbiSizeAdvanced = union(enum) {
/// necessary, possibly returning a CompileError.
pub fn abiSizeAdvanced(
ty: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
strat: ResolveStratLazy,
) SemaError!AbiSizeAdvanced {
+ const mod = pt.zcu;
const target = mod.getTarget();
const use_llvm = mod.comp.config.use_llvm;
const ip = &mod.intern_pool;
switch (ty.toIntern()) {
- .empty_struct_type => return AbiSizeAdvanced{ .scalar = 0 },
+ .empty_struct_type => return .{ .scalar = 0 },
else => switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| {
- if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 };
- return AbiSizeAdvanced{ .scalar = intAbiSize(int_type.bits, target, use_llvm) };
+ if (int_type.bits == 0) return .{ .scalar = 0 };
+ return .{ .scalar = intAbiSize(int_type.bits, target, use_llvm) };
},
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.Slice => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 },
else => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) },
},
- .anyframe_type => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
+ .anyframe_type => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) },
.array_type => |array_type| {
const len = array_type.lenIncludingSentinel();
if (len == 0) return .{ .scalar = 0 };
- switch (try Type.fromInterned(array_type.child).abiSizeAdvanced(mod, strat)) {
+ switch (try Type.fromInterned(array_type.child).abiSizeAdvanced(pt, strat)) {
.scalar => |elem_size| return .{ .scalar = len * elem_size },
.val => switch (strat) {
.sema, .eager => unreachable,
- .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } }))) },
+ } })) },
},
}
},
@@ -1270,71 +1272,71 @@ pub fn abiSizeAdvanced(
const sub_strat: ResolveStrat = switch (strat) {
.sema => .sema,
.eager => .normal,
- .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } }))) },
+ } })) },
};
- const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) {
+ const alignment = switch (try ty.abiAlignmentAdvanced(pt, strat)) {
.scalar => |x| x,
- .val => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .val => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } }))) },
+ } })) },
};
const total_bytes = switch (mod.comp.getZigBackend()) {
else => total_bytes: {
- const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, sub_strat);
+ const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(pt, sub_strat);
const total_bits = elem_bits * vector_type.len;
break :total_bytes (total_bits + 7) / 8;
},
.stage2_c => total_bytes: {
- const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar);
+ const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(pt, strat)).scalar);
break :total_bytes elem_bytes * vector_type.len;
},
.stage2_x86_64 => total_bytes: {
if (vector_type.child == .bool_type) break :total_bytes std.math.divCeil(u32, vector_type.len, 8) catch unreachable;
- const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar);
+ const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(pt, strat)).scalar);
break :total_bytes elem_bytes * vector_type.len;
},
};
- return AbiSizeAdvanced{ .scalar = alignment.forward(total_bytes) };
+ return .{ .scalar = alignment.forward(total_bytes) };
},
- .opt_type => return ty.abiSizeAdvancedOptional(mod, strat),
+ .opt_type => return ty.abiSizeAdvancedOptional(pt, strat),
.error_set_type, .inferred_error_set_type => {
const bits = mod.errorSetBits();
- if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 };
- return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target, use_llvm) };
+ if (bits == 0) return .{ .scalar = 0 };
+ return .{ .scalar = intAbiSize(bits, target, use_llvm) };
},
.error_union_type => |error_union_type| {
const payload_ty = Type.fromInterned(error_union_type.payload_type);
// This code needs to be kept in sync with the equivalent switch prong
// in abiAlignmentAdvanced.
- const code_size = abiSize(Type.anyerror, mod);
- if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
- error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ const code_size = Type.anyerror.abiSize(pt);
+ if (!(payload_ty.hasRuntimeBitsAdvanced(pt, false, strat) catch |err| switch (err) {
+ error.NeedLazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } }))) },
+ } })) },
else => |e| return e,
})) {
// Same as anyerror.
- return AbiSizeAdvanced{ .scalar = code_size };
+ return .{ .scalar = code_size };
}
- const code_align = abiAlignment(Type.anyerror, mod);
- const payload_align = abiAlignment(payload_ty, mod);
- const payload_size = switch (try payload_ty.abiSizeAdvanced(mod, strat)) {
+ const code_align = Type.anyerror.abiAlignment(pt);
+ const payload_align = payload_ty.abiAlignment(pt);
+ const payload_size = switch (try payload_ty.abiSizeAdvanced(pt, strat)) {
.scalar => |elem_size| elem_size,
.val => switch (strat) {
.sema => unreachable,
.eager => unreachable,
- .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } }))) },
+ } })) },
},
};
@@ -1350,7 +1352,7 @@ pub fn abiSizeAdvanced(
size += code_size;
size = payload_align.forward(size);
}
- return AbiSizeAdvanced{ .scalar = size };
+ return .{ .scalar = size };
},
.func_type => unreachable, // represents machine code; not a pointer
.simple_type => |t| switch (t) {
@@ -1362,34 +1364,31 @@ pub fn abiSizeAdvanced(
.float_mode,
.reduce_op,
.call_modifier,
- => return AbiSizeAdvanced{ .scalar = 1 },
+ => return .{ .scalar = 1 },
- .f16 => return AbiSizeAdvanced{ .scalar = 2 },
- .f32 => return AbiSizeAdvanced{ .scalar = 4 },
- .f64 => return AbiSizeAdvanced{ .scalar = 8 },
- .f128 => return AbiSizeAdvanced{ .scalar = 16 },
+ .f16 => return .{ .scalar = 2 },
+ .f32 => return .{ .scalar = 4 },
+ .f64 => return .{ .scalar = 8 },
+ .f128 => return .{ .scalar = 16 },
.f80 => switch (target.c_type_bit_size(.longdouble)) {
- 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) },
- else => {
- const u80_ty: Type = .{ .ip_index = .u80_type };
- return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) };
- },
+ 80 => return .{ .scalar = target.c_type_byte_size(.longdouble) },
+ else => return .{ .scalar = Type.u80.abiSize(pt) },
},
.usize,
.isize,
- => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
-
- .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) },
- .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) },
- .c_ushort => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ushort) },
- .c_int => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.int) },
- .c_uint => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.uint) },
- .c_long => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.long) },
- .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) },
- .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) },
- .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) },
- .c_longdouble => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) },
+ => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) },
+
+ .c_char => return .{ .scalar = target.c_type_byte_size(.char) },
+ .c_short => return .{ .scalar = target.c_type_byte_size(.short) },
+ .c_ushort => return .{ .scalar = target.c_type_byte_size(.ushort) },
+ .c_int => return .{ .scalar = target.c_type_byte_size(.int) },
+ .c_uint => return .{ .scalar = target.c_type_byte_size(.uint) },
+ .c_long => return .{ .scalar = target.c_type_byte_size(.long) },
+ .c_ulong => return .{ .scalar = target.c_type_byte_size(.ulong) },
+ .c_longlong => return .{ .scalar = target.c_type_byte_size(.longlong) },
+ .c_ulonglong => return .{ .scalar = target.c_type_byte_size(.ulonglong) },
+ .c_longdouble => return .{ .scalar = target.c_type_byte_size(.longdouble) },
.anyopaque,
.void,
@@ -1399,12 +1398,12 @@ pub fn abiSizeAdvanced(
.null,
.undefined,
.enum_literal,
- => return AbiSizeAdvanced{ .scalar = 0 },
+ => return .{ .scalar = 0 },
.anyerror, .adhoc_inferred_error_set => {
const bits = mod.errorSetBits();
- if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 };
- return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target, use_llvm) };
+ if (bits == 0) return .{ .scalar = 0 };
+ return .{ .scalar = intAbiSize(bits, target, use_llvm) };
},
.prefetch_options => unreachable, // missing call to resolveTypeFields
@@ -1418,22 +1417,22 @@ pub fn abiSizeAdvanced(
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
switch (strat) {
- .sema => try ty.resolveLayout(mod),
+ .sema => try ty.resolveLayout(pt),
.lazy => switch (struct_type.layout) {
.@"packed" => {
if (struct_type.backingIntType(ip).* == .none) return .{
- .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } }))),
+ } })),
};
},
.auto, .@"extern" => {
if (!struct_type.haveLayout(ip)) return .{
- .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } }))),
+ } })),
};
},
},
@@ -1441,7 +1440,7 @@ pub fn abiSizeAdvanced(
}
switch (struct_type.layout) {
.@"packed" => return .{
- .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiSize(mod),
+ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiSize(pt),
},
.auto, .@"extern" => {
assert(struct_type.haveLayout(ip));
@@ -1451,25 +1450,25 @@ pub fn abiSizeAdvanced(
},
.anon_struct_type => |tuple| {
switch (strat) {
- .sema => try ty.resolveLayout(mod),
+ .sema => try ty.resolveLayout(pt),
.lazy, .eager => {},
}
const field_count = tuple.types.len;
if (field_count == 0) {
- return AbiSizeAdvanced{ .scalar = 0 };
+ return .{ .scalar = 0 };
}
- return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) };
+ return .{ .scalar = ty.structFieldOffset(field_count, pt) };
},
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
switch (strat) {
- .sema => try ty.resolveLayout(mod),
+ .sema => try ty.resolveLayout(pt),
.lazy => if (!union_type.flagsPtr(ip).status.haveLayout()) return .{
- .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } }))),
+ } })),
},
.eager => {},
}
@@ -1478,7 +1477,7 @@ pub fn abiSizeAdvanced(
return .{ .scalar = union_type.size(ip).* };
},
.opaque_type => unreachable, // no size available
- .enum_type => return .{ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiSize(mod) },
+ .enum_type => return .{ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiSize(pt) },
// values, not types
.undef,
@@ -1507,36 +1506,37 @@ pub fn abiSizeAdvanced(
fn abiSizeAdvancedOptional(
ty: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
strat: ResolveStratLazy,
) SemaError!AbiSizeAdvanced {
+ const mod = pt.zcu;
const child_ty = ty.optionalChild(mod);
if (child_ty.isNoReturn(mod)) {
- return AbiSizeAdvanced{ .scalar = 0 };
+ return .{ .scalar = 0 };
}
- if (!(child_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
- error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ if (!(child_ty.hasRuntimeBitsAdvanced(pt, false, strat) catch |err| switch (err) {
+ error.NeedLazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } }))) },
+ } })) },
else => |e| return e,
- })) return AbiSizeAdvanced{ .scalar = 1 };
+ })) return .{ .scalar = 1 };
if (ty.optionalReprIsPayload(mod)) {
- return abiSizeAdvanced(child_ty, mod, strat);
+ return child_ty.abiSizeAdvanced(pt, strat);
}
- const payload_size = switch (try child_ty.abiSizeAdvanced(mod, strat)) {
+ const payload_size = switch (try child_ty.abiSizeAdvanced(pt, strat)) {
.scalar => |elem_size| elem_size,
.val => switch (strat) {
.sema => unreachable,
.eager => unreachable,
- .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } }))) },
+ } })) },
},
};
@@ -1544,8 +1544,8 @@ fn abiSizeAdvancedOptional(
// field and a boolean as the second. Since the child type's abi alignment is
// guaranteed to be >= that of bool's (1 byte) the added size is exactly equal
// to the child type's ABI alignment.
- return AbiSizeAdvanced{
- .scalar = (child_ty.abiAlignment(mod).toByteUnits() orelse 0) + payload_size,
+ return .{
+ .scalar = (child_ty.abiAlignment(pt).toByteUnits() orelse 0) + payload_size,
};
}
@@ -1675,15 +1675,16 @@ pub fn maxIntAlignment(target: std.Target, use_llvm: bool) u16 {
};
}
-pub fn bitSize(ty: Type, mod: *Module) u64 {
- return bitSizeAdvanced(ty, mod, .normal) catch unreachable;
+pub fn bitSize(ty: Type, pt: Zcu.PerThread) u64 {
+ return bitSizeAdvanced(ty, pt, .normal) catch unreachable;
}
pub fn bitSizeAdvanced(
ty: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
strat: ResolveStrat,
) SemaError!u64 {
+ const mod = pt.zcu;
const target = mod.getTarget();
const ip = &mod.intern_pool;
@@ -1702,22 +1703,22 @@ pub fn bitSizeAdvanced(
if (len == 0) return 0;
const elem_ty = Type.fromInterned(array_type.child);
const elem_size = @max(
- (try elem_ty.abiAlignmentAdvanced(mod, strat_lazy)).scalar.toByteUnits() orelse 0,
- (try elem_ty.abiSizeAdvanced(mod, strat_lazy)).scalar,
+ (try elem_ty.abiAlignmentAdvanced(pt, strat_lazy)).scalar.toByteUnits() orelse 0,
+ (try elem_ty.abiSizeAdvanced(pt, strat_lazy)).scalar,
);
if (elem_size == 0) return 0;
- const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, strat);
+ const elem_bit_size = try elem_ty.bitSizeAdvanced(pt, strat);
return (len - 1) * 8 * elem_size + elem_bit_size;
},
.vector_type => |vector_type| {
const child_ty = Type.fromInterned(vector_type.child);
- const elem_bit_size = try bitSizeAdvanced(child_ty, mod, strat);
+ const elem_bit_size = try child_ty.bitSizeAdvanced(pt, strat);
return elem_bit_size * vector_type.len;
},
.opt_type => {
// Optionals and error unions are not packed so their bitsize
// includes padding bits.
- return (try abiSizeAdvanced(ty, mod, strat_lazy)).scalar * 8;
+ return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8;
},
.error_set_type, .inferred_error_set_type => return mod.errorSetBits(),
@@ -1725,7 +1726,7 @@ pub fn bitSizeAdvanced(
.error_union_type => {
// Optionals and error unions are not packed so their bitsize
// includes padding bits.
- return (try abiSizeAdvanced(ty, mod, strat_lazy)).scalar * 8;
+ return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8;
},
.func_type => unreachable, // represents machine code; not a pointer
.simple_type => |t| switch (t) {
@@ -1783,42 +1784,42 @@ pub fn bitSizeAdvanced(
const struct_type = ip.loadStructType(ty.toIntern());
const is_packed = struct_type.layout == .@"packed";
if (strat == .sema) {
- try ty.resolveFields(mod);
- if (is_packed) try ty.resolveLayout(mod);
+ try ty.resolveFields(pt);
+ if (is_packed) try ty.resolveLayout(pt);
}
if (is_packed) {
- return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(mod, strat);
+ return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(pt, strat);
}
- return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8;
+ return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8;
},
.anon_struct_type => {
- if (strat == .sema) try ty.resolveFields(mod);
- return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8;
+ if (strat == .sema) try ty.resolveFields(pt);
+ return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8;
},
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
const is_packed = ty.containerLayout(mod) == .@"packed";
if (strat == .sema) {
- try ty.resolveFields(mod);
- if (is_packed) try ty.resolveLayout(mod);
+ try ty.resolveFields(pt);
+ if (is_packed) try ty.resolveLayout(pt);
}
if (!is_packed) {
- return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8;
+ return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8;
}
assert(union_type.flagsPtr(ip).status.haveFieldTypes());
var size: u64 = 0;
for (0..union_type.field_types.len) |field_index| {
const field_ty = union_type.field_types.get(ip)[field_index];
- size = @max(size, try bitSizeAdvanced(Type.fromInterned(field_ty), mod, strat));
+ size = @max(size, try Type.fromInterned(field_ty).bitSizeAdvanced(pt, strat));
}
return size;
},
.opaque_type => unreachable,
- .enum_type => return bitSizeAdvanced(Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), mod, strat),
+ .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).bitSizeAdvanced(pt, strat),
// values, not types
.undef,
@@ -1870,7 +1871,7 @@ pub fn isSinglePointer(ty: Type, mod: *const Module) bool {
/// Asserts `ty` is a pointer.
pub fn ptrSize(ty: Type, mod: *const Module) std.builtin.Type.Pointer.Size {
- return ptrSizeOrNull(ty, mod).?;
+ return ty.ptrSizeOrNull(mod).?;
}
/// Returns `null` if `ty` is not a pointer.
@@ -2105,29 +2106,28 @@ pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 {
return mod.unionTagFieldIndex(union_obj, enum_tag);
}
-pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *Module) bool {
- const ip = &mod.intern_pool;
- const union_obj = mod.typeToUnion(ty).?;
+pub fn unionHasAllZeroBitFieldTypes(ty: Type, pt: Zcu.PerThread) bool {
+ const ip = &pt.zcu.intern_pool;
+ const union_obj = pt.zcu.typeToUnion(ty).?;
for (union_obj.field_types.get(ip)) |field_ty| {
- if (Type.fromInterned(field_ty).hasRuntimeBits(mod)) return false;
+ if (Type.fromInterned(field_ty).hasRuntimeBits(pt)) return false;
}
return true;
}
/// Returns the type used for backing storage of this union during comptime operations.
/// Asserts the type is either an extern or packed union.
-pub fn unionBackingType(ty: Type, mod: *Module) !Type {
- return switch (ty.containerLayout(mod)) {
- .@"extern" => try mod.arrayType(.{ .len = ty.abiSize(mod), .child = .u8_type }),
- .@"packed" => try mod.intType(.unsigned, @intCast(ty.bitSize(mod))),
+pub fn unionBackingType(ty: Type, pt: Zcu.PerThread) !Type {
+ return switch (ty.containerLayout(pt.zcu)) {
+ .@"extern" => try pt.arrayType(.{ .len = ty.abiSize(pt), .child = .u8_type }),
+ .@"packed" => try pt.intType(.unsigned, @intCast(ty.bitSize(pt))),
.auto => unreachable,
};
}
-pub fn unionGetLayout(ty: Type, mod: *Module) Module.UnionLayout {
- const ip = &mod.intern_pool;
- const union_obj = ip.loadUnionType(ty.toIntern());
- return mod.getUnionLayout(union_obj);
+pub fn unionGetLayout(ty: Type, pt: Zcu.PerThread) Module.UnionLayout {
+ const union_obj = pt.zcu.intern_pool.loadUnionType(ty.toIntern());
+ return pt.getUnionLayout(union_obj);
}
pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout {
@@ -2509,7 +2509,8 @@ pub fn isNumeric(ty: Type, mod: *const Module) bool {
/// During semantic analysis, instead call `Sema.typeHasOnePossibleValue` which
/// resolves field types rather than asserting they are already resolved.
-pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value {
+pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
+ const mod = pt.zcu;
var ty = starting_type;
const ip = &mod.intern_pool;
while (true) switch (ty.toIntern()) {
@@ -2518,7 +2519,7 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value {
else => switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| {
if (int_type.bits == 0) {
- return try mod.intValue(ty, 0);
+ return try pt.intValue(ty, 0);
} else {
return null;
}
@@ -2534,21 +2535,21 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value {
inline .array_type, .vector_type => |seq_type, seq_tag| {
const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none;
- if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = &.{} },
- } })));
- if (try Type.fromInterned(seq_type.child).onePossibleValue(mod)) |opv| {
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ } }));
+ if (try Type.fromInterned(seq_type.child).onePossibleValue(pt)) |opv| {
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .repeated_elem = opv.toIntern() },
- } })));
+ } }));
}
return null;
},
.opt_type => |child| {
if (child == .noreturn_type) {
- return try mod.nullValue(ty);
+ return try pt.nullValue(ty);
} else {
return null;
}
@@ -2615,17 +2616,17 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value {
continue;
}
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
- if (try field_ty.onePossibleValue(mod)) |field_opv| {
+ if (try field_ty.onePossibleValue(pt)) |field_opv| {
field_val.* = field_opv.toIntern();
} else return null;
}
// In this case the struct has no runtime-known fields and
// therefore has one possible value.
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = field_vals },
- } })));
+ } }));
},
.anon_struct_type => |tuple| {
@@ -2637,24 +2638,24 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value {
// TODO: write something like getCoercedInts to avoid needing to dupe
const duped_values = try mod.gpa.dupe(InternPool.Index, tuple.values.get(ip));
defer mod.gpa.free(duped_values);
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = duped_values },
- } })));
+ } }));
},
.union_type => {
const union_obj = ip.loadUnionType(ty.toIntern());
- const tag_val = (try Type.fromInterned(union_obj.enum_tag_ty).onePossibleValue(mod)) orelse
+ const tag_val = (try Type.fromInterned(union_obj.enum_tag_ty).onePossibleValue(pt)) orelse
return null;
if (union_obj.field_types.len == 0) {
- const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() });
+ const only = try pt.intern(.{ .empty_enum_value = ty.toIntern() });
return Value.fromInterned(only);
}
const only_field_ty = union_obj.field_types.get(ip)[0];
- const val_val = (try Type.fromInterned(only_field_ty).onePossibleValue(mod)) orelse
+ const val_val = (try Type.fromInterned(only_field_ty).onePossibleValue(pt)) orelse
return null;
- const only = try mod.intern(.{ .un = .{
+ const only = try pt.intern(.{ .un = .{
.ty = ty.toIntern(),
.tag = tag_val.toIntern(),
.val = val_val.toIntern(),
@@ -2668,8 +2669,8 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value {
.nonexhaustive => {
if (enum_type.tag_ty == .comptime_int_type) return null;
- if (try Type.fromInterned(enum_type.tag_ty).onePossibleValue(mod)) |int_opv| {
- const only = try mod.intern(.{ .enum_tag = .{
+ if (try Type.fromInterned(enum_type.tag_ty).onePossibleValue(pt)) |int_opv| {
+ const only = try pt.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = int_opv.toIntern(),
} });
@@ -2679,18 +2680,18 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value {
return null;
},
.auto, .explicit => {
- if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null;
+ if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(pt)) return null;
switch (enum_type.names.len) {
0 => {
- const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() });
+ const only = try pt.intern(.{ .empty_enum_value = ty.toIntern() });
return Value.fromInterned(only);
},
1 => {
if (enum_type.values.len == 0) {
- const only = try mod.intern(.{ .enum_tag = .{
+ const only = try pt.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
- .int = try mod.intern(.{ .int = .{
+ .int = try pt.intern(.{ .int = .{
.ty = enum_type.tag_ty,
.storage = .{ .u64 = 0 },
} }),
@@ -2733,13 +2734,14 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value {
/// During semantic analysis, instead call `Sema.typeRequiresComptime` which
/// resolves field types rather than asserting they are already resolved.
-pub fn comptimeOnly(ty: Type, mod: *Module) bool {
- return ty.comptimeOnlyAdvanced(mod, .normal) catch unreachable;
+pub fn comptimeOnly(ty: Type, pt: Zcu.PerThread) bool {
+ return ty.comptimeOnlyAdvanced(pt, .normal) catch unreachable;
}
/// `generic_poison` will return false.
/// May return false negatives when structs and unions are having their field types resolved.
-pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaError!bool {
+pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) SemaError!bool {
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
return switch (ty.toIntern()) {
.empty_struct_type => false,
@@ -2749,19 +2751,19 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaErr
.ptr_type => |ptr_type| {
const child_ty = Type.fromInterned(ptr_type.child);
switch (child_ty.zigTypeTag(mod)) {
- .Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(mod, strat),
+ .Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(pt, strat),
.Opaque => return false,
- else => return child_ty.comptimeOnlyAdvanced(mod, strat),
+ else => return child_ty.comptimeOnlyAdvanced(pt, strat),
}
},
.anyframe_type => |child| {
if (child == .none) return false;
- return Type.fromInterned(child).comptimeOnlyAdvanced(mod, strat);
+ return Type.fromInterned(child).comptimeOnlyAdvanced(pt, strat);
},
- .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(mod, strat),
- .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(mod, strat),
- .opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(mod, strat),
- .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(mod, strat),
+ .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(pt, strat),
+ .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(pt, strat),
+ .opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(pt, strat),
+ .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(pt, strat),
.error_set_type,
.inferred_error_set_type,
@@ -2836,13 +2838,13 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaErr
struct_type.flagsPtr(ip).requires_comptime = .wip;
errdefer struct_type.flagsPtr(ip).requires_comptime = .unknown;
- try ty.resolveFields(mod);
+ try ty.resolveFields(pt);
for (0..struct_type.field_types.len) |i_usize| {
const i: u32 = @intCast(i_usize);
if (struct_type.fieldIsComptime(ip, i)) continue;
const field_ty = struct_type.field_types.get(ip)[i];
- if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) {
+ if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(pt, strat)) {
// Note that this does not cause the layout to
// be considered resolved. Comptime-only types
// still maintain a layout of their
@@ -2861,7 +2863,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaErr
.anon_struct_type => |tuple| {
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
const have_comptime_val = val != .none;
- if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) return true;
+ if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(pt, strat)) return true;
}
return false;
},
@@ -2880,11 +2882,11 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaErr
union_type.flagsPtr(ip).requires_comptime = .wip;
errdefer union_type.flagsPtr(ip).requires_comptime = .unknown;
- try ty.resolveFields(mod);
+ try ty.resolveFields(pt);
for (0..union_type.field_types.len) |field_idx| {
const field_ty = union_type.field_types.get(ip)[field_idx];
- if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) {
+ if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(pt, strat)) {
union_type.flagsPtr(ip).requires_comptime = .yes;
return true;
}
@@ -2898,7 +2900,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaErr
.opaque_type => false,
- .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(mod, strat),
+ .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(pt, strat),
// values, not types
.undef,
@@ -2930,10 +2932,10 @@ pub fn isVector(ty: Type, mod: *const Module) bool {
}
/// Returns 0 if not a vector, otherwise returns @bitSizeOf(Element) * vector_len.
-pub fn totalVectorBits(ty: Type, zcu: *Zcu) u64 {
- if (!ty.isVector(zcu)) return 0;
- const v = zcu.intern_pool.indexToKey(ty.toIntern()).vector_type;
- return v.len * Type.fromInterned(v.child).bitSize(zcu);
+pub fn totalVectorBits(ty: Type, pt: Zcu.PerThread) u64 {
+ if (!ty.isVector(pt.zcu)) return 0;
+ const v = pt.zcu.intern_pool.indexToKey(ty.toIntern()).vector_type;
+ return v.len * Type.fromInterned(v.child).bitSize(pt);
}
pub fn isArrayOrVector(ty: Type, mod: *const Module) bool {
@@ -3013,23 +3015,25 @@ pub fn getNamespace(ty: Type, zcu: *Zcu) ?InternPool.OptionalNamespaceIndex {
}
// Works for vectors and vectors of integers.
-pub fn minInt(ty: Type, mod: *Module, dest_ty: Type) !Value {
- const scalar = try minIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod));
- return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{
+pub fn minInt(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
+ const mod = pt.zcu;
+ const scalar = try minIntScalar(ty.scalarType(mod), pt, dest_ty.scalarType(mod));
+ return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
.storage = .{ .repeated_elem = scalar.toIntern() },
- } }))) else scalar;
+ } })) else scalar;
}
/// Asserts that the type is an integer.
-pub fn minIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value {
+pub fn minIntScalar(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
+ const mod = pt.zcu;
const info = ty.intInfo(mod);
- if (info.signedness == .unsigned) return mod.intValue(dest_ty, 0);
- if (info.bits == 0) return mod.intValue(dest_ty, -1);
+ if (info.signedness == .unsigned) return pt.intValue(dest_ty, 0);
+ if (info.bits == 0) return pt.intValue(dest_ty, -1);
if (std.math.cast(u6, info.bits - 1)) |shift| {
const n = @as(i64, std.math.minInt(i64)) >> (63 - shift);
- return mod.intValue(dest_ty, n);
+ return pt.intValue(dest_ty, n);
}
var res = try std.math.big.int.Managed.init(mod.gpa);
@@ -3037,31 +3041,32 @@ pub fn minIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value {
try res.setTwosCompIntLimit(.min, info.signedness, info.bits);
- return mod.intValue_big(dest_ty, res.toConst());
+ return pt.intValue_big(dest_ty, res.toConst());
}
// Works for vectors and vectors of integers.
/// The returned Value will have type dest_ty.
-pub fn maxInt(ty: Type, mod: *Module, dest_ty: Type) !Value {
- const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod));
- return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{
+pub fn maxInt(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
+ const mod = pt.zcu;
+ const scalar = try maxIntScalar(ty.scalarType(mod), pt, dest_ty.scalarType(mod));
+ return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
.storage = .{ .repeated_elem = scalar.toIntern() },
- } }))) else scalar;
+ } })) else scalar;
}
/// The returned Value will have type dest_ty.
-pub fn maxIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value {
- const info = ty.intInfo(mod);
+pub fn maxIntScalar(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
+ const info = ty.intInfo(pt.zcu);
switch (info.bits) {
0 => return switch (info.signedness) {
- .signed => try mod.intValue(dest_ty, -1),
- .unsigned => try mod.intValue(dest_ty, 0),
+ .signed => try pt.intValue(dest_ty, -1),
+ .unsigned => try pt.intValue(dest_ty, 0),
},
1 => return switch (info.signedness) {
- .signed => try mod.intValue(dest_ty, 0),
- .unsigned => try mod.intValue(dest_ty, 1),
+ .signed => try pt.intValue(dest_ty, 0),
+ .unsigned => try pt.intValue(dest_ty, 1),
},
else => {},
}
@@ -3069,20 +3074,20 @@ pub fn maxIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value {
if (std.math.cast(u6, info.bits - 1)) |shift| switch (info.signedness) {
.signed => {
const n = @as(i64, std.math.maxInt(i64)) >> (63 - shift);
- return mod.intValue(dest_ty, n);
+ return pt.intValue(dest_ty, n);
},
.unsigned => {
const n = @as(u64, std.math.maxInt(u64)) >> (63 - shift);
- return mod.intValue(dest_ty, n);
+ return pt.intValue(dest_ty, n);
},
};
- var res = try std.math.big.int.Managed.init(mod.gpa);
+ var res = try std.math.big.int.Managed.init(pt.zcu.gpa);
defer res.deinit();
try res.setTwosCompIntLimit(.max, info.signedness, info.bits);
- return mod.intValue_big(dest_ty, res.toConst());
+ return pt.intValue_big(dest_ty, res.toConst());
}
/// Asserts the type is an enum or a union.
@@ -3188,26 +3193,26 @@ pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type {
};
}
-pub fn structFieldAlign(ty: Type, index: usize, zcu: *Zcu) Alignment {
- return ty.structFieldAlignAdvanced(index, zcu, .normal) catch unreachable;
+pub fn structFieldAlign(ty: Type, index: usize, pt: Zcu.PerThread) Alignment {
+ return ty.structFieldAlignAdvanced(index, pt, .normal) catch unreachable;
}
-pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, strat: ResolveStrat) !Alignment {
- const ip = &zcu.intern_pool;
+pub fn structFieldAlignAdvanced(ty: Type, index: usize, pt: Zcu.PerThread, strat: ResolveStrat) !Alignment {
+ const ip = &pt.zcu.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
assert(struct_type.layout != .@"packed");
const explicit_align = struct_type.fieldAlign(ip, index);
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]);
- return zcu.structFieldAlignmentAdvanced(explicit_align, field_ty, struct_type.layout, strat);
+ return pt.structFieldAlignmentAdvanced(explicit_align, field_ty, struct_type.layout, strat);
},
.anon_struct_type => |anon_struct| {
- return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(zcu, strat.toLazy())).scalar;
+ return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(pt, strat.toLazy())).scalar;
},
.union_type => {
const union_obj = ip.loadUnionType(ty.toIntern());
- return zcu.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(index), strat);
+ return pt.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(index), strat);
},
else => unreachable,
}
@@ -3233,7 +3238,8 @@ pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value {
}
}
-pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value {
+pub fn structFieldValueComptime(ty: Type, pt: Zcu.PerThread, index: usize) !?Value {
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => {
@@ -3242,13 +3248,13 @@ pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value {
assert(struct_type.haveFieldInits(ip));
return Value.fromInterned(struct_type.field_inits.get(ip)[index]);
} else {
- return Type.fromInterned(struct_type.field_types.get(ip)[index]).onePossibleValue(mod);
+ return Type.fromInterned(struct_type.field_types.get(ip)[index]).onePossibleValue(pt);
}
},
.anon_struct_type => |tuple| {
const val = tuple.values.get(ip)[index];
if (val == .none) {
- return Type.fromInterned(tuple.types.get(ip)[index]).onePossibleValue(mod);
+ return Type.fromInterned(tuple.types.get(ip)[index]).onePossibleValue(pt);
} else {
return Value.fromInterned(val);
}
@@ -3272,7 +3278,8 @@ pub const FieldOffset = struct {
};
/// Supports structs and unions.
-pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 {
+pub fn structFieldOffset(ty: Type, index: usize, pt: Zcu.PerThread) u64 {
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => {
@@ -3287,17 +3294,17 @@ pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 {
var big_align: Alignment = .none;
for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| {
- if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) {
+ if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) {
// comptime field
if (i == index) return offset;
continue;
}
- const field_align = Type.fromInterned(field_ty).abiAlignment(mod);
+ const field_align = Type.fromInterned(field_ty).abiAlignment(pt);
big_align = big_align.max(field_align);
offset = field_align.forward(offset);
if (i == index) return offset;
- offset += Type.fromInterned(field_ty).abiSize(mod);
+ offset += Type.fromInterned(field_ty).abiSize(pt);
}
offset = big_align.max(.@"1").forward(offset);
return offset;
@@ -3307,7 +3314,7 @@ pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 {
const union_type = ip.loadUnionType(ty.toIntern());
if (!union_type.hasTag(ip))
return 0;
- const layout = mod.getUnionLayout(union_type);
+ const layout = pt.getUnionLayout(union_type);
if (layout.tag_align.compare(.gte, layout.payload_align)) {
// {Tag, Payload}
return layout.payload_align.forward(layout.tag_size);
@@ -3421,12 +3428,13 @@ pub fn optEuBaseType(ty: Type, mod: *Module) Type {
};
}
-pub fn toUnsigned(ty: Type, mod: *Module) !Type {
+pub fn toUnsigned(ty: Type, pt: Zcu.PerThread) !Type {
+ const mod = pt.zcu;
return switch (ty.zigTypeTag(mod)) {
- .Int => mod.intType(.unsigned, ty.intInfo(mod).bits),
- .Vector => try mod.vectorType(.{
+ .Int => pt.intType(.unsigned, ty.intInfo(mod).bits),
+ .Vector => try pt.vectorType(.{
.len = ty.vectorLen(mod),
- .child = (try ty.childType(mod).toUnsigned(mod)).toIntern(),
+ .child = (try ty.childType(mod).toUnsigned(pt)).toIntern(),
}),
else => unreachable,
};
@@ -3492,7 +3500,7 @@ pub fn arrayBase(ty: Type, zcu: *const Zcu) struct { Type, u64 } {
return .{ cur_ty, cur_len };
}
-pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: u32, zcu: *Zcu) union(enum) {
+pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: u32, pt: Zcu.PerThread) union(enum) {
/// The result is a bit-pointer with the same value and a new packed offset.
bit_ptr: InternPool.Key.PtrType.PackedOffset,
/// The result is a standard pointer.
@@ -3505,6 +3513,7 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx:
} {
comptime assert(Type.packed_struct_layout_version == 2);
+ const zcu = pt.zcu;
const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu);
const field_ty = struct_ty.structFieldType(field_idx, zcu);
@@ -3515,7 +3524,7 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx:
if (i == field_idx) {
bit_offset = running_bits;
}
- running_bits += @intCast(f_ty.bitSize(zcu));
+ running_bits += @intCast(f_ty.bitSize(pt));
}
const res_host_size: u16, const res_bit_offset: u16 = if (parent_ptr_info.packed_offset.host_size != 0)
@@ -3532,9 +3541,9 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx:
// targets before adding the necessary complications to this code. This will not
// cause miscompilations; it only means the field pointer uses bit masking when it
// might not be strictly necessary.
- if (res_bit_offset % 8 == 0 and field_ty.bitSize(zcu) == field_ty.abiSize(zcu) * 8 and zcu.getTarget().cpu.arch.endian() == .little) {
+ if (res_bit_offset % 8 == 0 and field_ty.bitSize(pt) == field_ty.abiSize(pt) * 8 and zcu.getTarget().cpu.arch.endian() == .little) {
const byte_offset = res_bit_offset / 8;
- const new_align = Alignment.fromLog2Units(@ctz(byte_offset | parent_ptr_ty.ptrAlignment(zcu).toByteUnits().?));
+ const new_align = Alignment.fromLog2Units(@ctz(byte_offset | parent_ptr_ty.ptrAlignment(pt).toByteUnits().?));
return .{ .byte_ptr = .{
.offset = byte_offset,
.alignment = new_align,
@@ -3547,34 +3556,35 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx:
} };
}
-pub fn resolveLayout(ty: Type, zcu: *Zcu) SemaError!void {
+pub fn resolveLayout(ty: Type, pt: Zcu.PerThread) SemaError!void {
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
- .simple_type => |simple_type| return resolveSimpleType(simple_type, zcu),
+ .simple_type => |simple_type| return resolveSimpleType(simple_type, pt),
else => {},
}
switch (ty.zigTypeTag(zcu)) {
.Struct => switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| {
const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]);
- try field_ty.resolveLayout(zcu);
+ try field_ty.resolveLayout(pt);
},
- .struct_type => return ty.resolveStructInner(zcu, .layout),
+ .struct_type => return ty.resolveStructInner(pt, .layout),
else => unreachable,
},
- .Union => return ty.resolveUnionInner(zcu, .layout),
+ .Union => return ty.resolveUnionInner(pt, .layout),
.Array => {
if (ty.arrayLenIncludingSentinel(zcu) == 0) return;
const elem_ty = ty.childType(zcu);
- return elem_ty.resolveLayout(zcu);
+ return elem_ty.resolveLayout(pt);
},
.Optional => {
const payload_ty = ty.optionalChild(zcu);
- return payload_ty.resolveLayout(zcu);
+ return payload_ty.resolveLayout(pt);
},
.ErrorUnion => {
const payload_ty = ty.errorUnionPayload(zcu);
- return payload_ty.resolveLayout(zcu);
+ return payload_ty.resolveLayout(pt);
},
.Fn => {
const info = zcu.typeToFunc(ty).?;
@@ -3585,16 +3595,16 @@ pub fn resolveLayout(ty: Type, zcu: *Zcu) SemaError!void {
}
for (0..info.param_types.len) |i| {
const param_ty = info.param_types.get(ip)[i];
- try Type.fromInterned(param_ty).resolveLayout(zcu);
+ try Type.fromInterned(param_ty).resolveLayout(pt);
}
- try Type.fromInterned(info.return_type).resolveLayout(zcu);
+ try Type.fromInterned(info.return_type).resolveLayout(pt);
},
else => {},
}
}
-pub fn resolveFields(ty: Type, zcu: *Zcu) SemaError!void {
- const ip = &zcu.intern_pool;
+pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void {
+ const ip = &pt.zcu.intern_pool;
const ty_ip = ty.toIntern();
switch (ty_ip) {
@@ -3680,22 +3690,23 @@ pub fn resolveFields(ty: Type, zcu: *Zcu) SemaError!void {
.type_struct,
.type_struct_packed,
.type_struct_packed_inits,
- => return ty.resolveStructInner(zcu, .fields),
+ => return ty.resolveStructInner(pt, .fields),
- .type_union => return ty.resolveUnionInner(zcu, .fields),
+ .type_union => return ty.resolveUnionInner(pt, .fields),
- .simple_type => return resolveSimpleType(ip.indexToKey(ty_ip).simple_type, zcu),
+ .simple_type => return resolveSimpleType(ip.indexToKey(ty_ip).simple_type, pt),
else => {},
},
}
}
-pub fn resolveFully(ty: Type, zcu: *Zcu) SemaError!void {
+pub fn resolveFully(ty: Type, pt: Zcu.PerThread) SemaError!void {
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
- .simple_type => |simple_type| return resolveSimpleType(simple_type, zcu),
+ .simple_type => |simple_type| return resolveSimpleType(simple_type, pt),
else => {},
}
@@ -3719,52 +3730,53 @@ pub fn resolveFully(ty: Type, zcu: *Zcu) SemaError!void {
.EnumLiteral,
=> {},
- .Pointer => return ty.childType(zcu).resolveFully(zcu),
- .Array => return ty.childType(zcu).resolveFully(zcu),
- .Optional => return ty.optionalChild(zcu).resolveFully(zcu),
- .ErrorUnion => return ty.errorUnionPayload(zcu).resolveFully(zcu),
+ .Pointer => return ty.childType(zcu).resolveFully(pt),
+ .Array => return ty.childType(zcu).resolveFully(pt),
+ .Optional => return ty.optionalChild(zcu).resolveFully(pt),
+ .ErrorUnion => return ty.errorUnionPayload(zcu).resolveFully(pt),
.Fn => {
const info = zcu.typeToFunc(ty).?;
if (info.is_generic) return;
for (0..info.param_types.len) |i| {
const param_ty = info.param_types.get(ip)[i];
- try Type.fromInterned(param_ty).resolveFully(zcu);
+ try Type.fromInterned(param_ty).resolveFully(pt);
}
- try Type.fromInterned(info.return_type).resolveFully(zcu);
+ try Type.fromInterned(info.return_type).resolveFully(pt);
},
.Struct => switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| {
const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]);
- try field_ty.resolveFully(zcu);
+ try field_ty.resolveFully(pt);
},
- .struct_type => return ty.resolveStructInner(zcu, .full),
+ .struct_type => return ty.resolveStructInner(pt, .full),
else => unreachable,
},
- .Union => return ty.resolveUnionInner(zcu, .full),
+ .Union => return ty.resolveUnionInner(pt, .full),
}
}
-pub fn resolveStructFieldInits(ty: Type, zcu: *Zcu) SemaError!void {
+pub fn resolveStructFieldInits(ty: Type, pt: Zcu.PerThread) SemaError!void {
// TODO: stop calling this for tuples!
- _ = zcu.typeToStruct(ty) orelse return;
- return ty.resolveStructInner(zcu, .inits);
+ _ = pt.zcu.typeToStruct(ty) orelse return;
+ return ty.resolveStructInner(pt, .inits);
}
-pub fn resolveStructAlignment(ty: Type, zcu: *Zcu) SemaError!void {
- return ty.resolveStructInner(zcu, .alignment);
+pub fn resolveStructAlignment(ty: Type, pt: Zcu.PerThread) SemaError!void {
+ return ty.resolveStructInner(pt, .alignment);
}
-pub fn resolveUnionAlignment(ty: Type, zcu: *Zcu) SemaError!void {
- return ty.resolveUnionInner(zcu, .alignment);
+pub fn resolveUnionAlignment(ty: Type, pt: Zcu.PerThread) SemaError!void {
+ return ty.resolveUnionInner(pt, .alignment);
}
/// `ty` must be a struct.
fn resolveStructInner(
ty: Type,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
resolution: enum { fields, inits, alignment, layout, full },
) SemaError!void {
+ const zcu = pt.zcu;
const gpa = zcu.gpa;
const struct_obj = zcu.typeToStruct(ty).?;
@@ -3777,7 +3789,7 @@ fn resolveStructInner(
defer comptime_err_ret_trace.deinit();
var sema: Sema = .{
- .mod = zcu,
+ .pt = pt,
.gpa = gpa,
.arena = analysis_arena.allocator(),
.code = undefined, // This ZIR will not be used.
@@ -3804,9 +3816,10 @@ fn resolveStructInner(
/// `ty` must be a union.
fn resolveUnionInner(
ty: Type,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
resolution: enum { fields, alignment, layout, full },
) SemaError!void {
+ const zcu = pt.zcu;
const gpa = zcu.gpa;
const union_obj = zcu.typeToUnion(ty).?;
@@ -3819,7 +3832,7 @@ fn resolveUnionInner(
defer comptime_err_ret_trace.deinit();
var sema: Sema = .{
- .mod = zcu,
+ .pt = pt,
.gpa = gpa,
.arena = analysis_arena.allocator(),
.code = undefined, // This ZIR will not be used.
@@ -3845,7 +3858,7 @@ fn resolveUnionInner(
/// Fully resolves a simple type. This is usually a nop, but for builtin types with
/// special InternPool indices (such as std.builtin.Type) it will analyze and fully
/// resolve the type.
-fn resolveSimpleType(simple_type: InternPool.SimpleType, zcu: *Zcu) Allocator.Error!void {
+fn resolveSimpleType(simple_type: InternPool.SimpleType, pt: Zcu.PerThread) Allocator.Error!void {
const builtin_type_name: []const u8 = switch (simple_type) {
.atomic_order => "AtomicOrder",
.atomic_rmw_op => "AtomicRmwOp",
@@ -3861,7 +3874,7 @@ fn resolveSimpleType(simple_type: InternPool.SimpleType, zcu: *Zcu) Allocator.Er
else => return,
};
// This will fully resolve the type.
- _ = try zcu.getBuiltinType(builtin_type_name);
+ _ = try pt.getBuiltinType(builtin_type_name);
}
/// Returns the type of a pointer to an element.
@@ -3874,7 +3887,8 @@ fn resolveSimpleType(simple_type: InternPool.SimpleType, zcu: *Zcu) Allocator.Er
/// Handles const-ness and address spaces in particular.
/// This code is duplicated in `Sema.analyzePtrArithmetic`.
/// May perform type resolution and return a transitive `error.AnalysisFail`.
-pub fn elemPtrType(ptr_ty: Type, offset: ?usize, zcu: *Zcu) !Type {
+pub fn elemPtrType(ptr_ty: Type, offset: ?usize, pt: Zcu.PerThread) !Type {
+ const zcu = pt.zcu;
const ptr_info = ptr_ty.ptrInfo(zcu);
const elem_ty = ptr_ty.elemType2(zcu);
const is_allowzero = ptr_info.flags.is_allowzero and (offset orelse 0) == 0;
@@ -3887,14 +3901,14 @@ pub fn elemPtrType(ptr_ty: Type, offset: ?usize, zcu: *Zcu) !Type {
alignment: Alignment = .none,
vector_index: VI = .none,
} = if (parent_ty.isVector(zcu) and ptr_info.flags.size == .One) blk: {
- const elem_bits = elem_ty.bitSize(zcu);
+ const elem_bits = elem_ty.bitSize(pt);
if (elem_bits == 0) break :blk .{};
const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits);
if (!is_packed) break :blk .{};
break :blk .{
.host_size = @intCast(parent_ty.arrayLen(zcu)),
- .alignment = parent_ty.abiAlignment(zcu),
+ .alignment = parent_ty.abiAlignment(pt),
.vector_index = if (offset) |some| @enumFromInt(some) else .runtime,
};
} else .{};
@@ -3908,7 +3922,7 @@ pub fn elemPtrType(ptr_ty: Type, offset: ?usize, zcu: *Zcu) !Type {
}
// If the addend is not a comptime-known value we can still count on
// it being a multiple of the type size.
- const elem_size = (try elem_ty.abiSizeAdvanced(zcu, .sema)).scalar;
+ const elem_size = (try elem_ty.abiSizeAdvanced(pt, .sema)).scalar;
const addend = if (offset) |off| elem_size * off else elem_size;
// The resulting pointer is aligned to the lcd between the offset (an
@@ -3921,7 +3935,7 @@ pub fn elemPtrType(ptr_ty: Type, offset: ?usize, zcu: *Zcu) !Type {
assert(new_align != .none);
break :a new_align;
};
- return zcu.ptrTypeSema(.{
+ return pt.ptrTypeSema(.{
.child = elem_ty.toIntern(),
.flags = .{
.alignment = alignment,
@@ -3944,6 +3958,7 @@ pub const @"u16": Type = .{ .ip_index = .u16_type };
pub const @"u29": Type = .{ .ip_index = .u29_type };
pub const @"u32": Type = .{ .ip_index = .u32_type };
pub const @"u64": Type = .{ .ip_index = .u64_type };
+pub const @"u80": Type = .{ .ip_index = .u80_type };
pub const @"u128": Type = .{ .ip_index = .u128_type };
pub const @"i8": Type = .{ .ip_index = .i8_type };
src/Value.zig
@@ -40,10 +40,10 @@ pub fn fmtDebug(val: Value) std.fmt.Formatter(dump) {
return .{ .data = val };
}
-pub fn fmtValue(val: Value, mod: *Module, opt_sema: ?*Sema) std.fmt.Formatter(print_value.format) {
+pub fn fmtValue(val: Value, pt: Zcu.PerThread, opt_sema: ?*Sema) std.fmt.Formatter(print_value.format) {
return .{ .data = .{
.val = val,
- .mod = mod,
+ .pt = pt,
.opt_sema = opt_sema,
.depth = 3,
} };
@@ -55,15 +55,16 @@ pub fn fmtValueFull(ctx: print_value.FormatContext) std.fmt.Formatter(print_valu
/// Converts `val` to a null-terminated string stored in the InternPool.
/// Asserts `val` is an array of `u8`
-pub fn toIpString(val: Value, ty: Type, mod: *Module) !InternPool.NullTerminatedString {
+pub fn toIpString(val: Value, ty: Type, pt: Zcu.PerThread) !InternPool.NullTerminatedString {
+ const mod = pt.zcu;
assert(ty.zigTypeTag(mod) == .Array);
assert(ty.childType(mod).toIntern() == .u8_type);
const ip = &mod.intern_pool;
switch (mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| return bytes.toNullTerminatedString(ty.arrayLen(mod), ip),
- .elems => return arrayToIpString(val, ty.arrayLen(mod), mod),
+ .elems => return arrayToIpString(val, ty.arrayLen(mod), pt),
.repeated_elem => |elem| {
- const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(mod));
+ const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(pt));
const len: usize = @intCast(ty.arrayLen(mod));
try ip.string_bytes.appendNTimes(mod.gpa, byte, len);
return ip.getOrPutTrailingString(mod.gpa, len, .no_embedded_nulls);
@@ -73,16 +74,17 @@ pub fn toIpString(val: Value, ty: Type, mod: *Module) !InternPool.NullTerminated
/// Asserts that the value is representable as an array of bytes.
/// Copies the value into a freshly allocated slice of memory, which is owned by the caller.
-pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module) ![]u8 {
+pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) ![]u8 {
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
return switch (ip.indexToKey(val.toIntern())) {
.enum_literal => |enum_literal| allocator.dupe(u8, enum_literal.toSlice(ip)),
- .slice => |slice| try arrayToAllocatedBytes(val, Value.fromInterned(slice.len).toUnsignedInt(mod), allocator, mod),
+ .slice => |slice| try arrayToAllocatedBytes(val, Value.fromInterned(slice.len).toUnsignedInt(pt), allocator, pt),
.aggregate => |aggregate| switch (aggregate.storage) {
.bytes => |bytes| try allocator.dupe(u8, bytes.toSlice(ty.arrayLenIncludingSentinel(mod), ip)),
- .elems => try arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod),
+ .elems => try arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, pt),
.repeated_elem => |elem| {
- const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(mod));
+ const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(pt));
const result = try allocator.alloc(u8, @intCast(ty.arrayLen(mod)));
@memset(result, byte);
return result;
@@ -92,16 +94,17 @@ pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module
};
}
-fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Module) ![]u8 {
+fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, pt: Zcu.PerThread) ![]u8 {
const result = try allocator.alloc(u8, @intCast(len));
for (result, 0..) |*elem, i| {
- const elem_val = try val.elemValue(mod, i);
- elem.* = @intCast(elem_val.toUnsignedInt(mod));
+ const elem_val = try val.elemValue(pt, i);
+ elem.* = @intCast(elem_val.toUnsignedInt(pt));
}
return result;
}
-fn arrayToIpString(val: Value, len_u64: u64, mod: *Module) !InternPool.NullTerminatedString {
+fn arrayToIpString(val: Value, len_u64: u64, pt: Zcu.PerThread) !InternPool.NullTerminatedString {
+ const mod = pt.zcu;
const gpa = mod.gpa;
const ip = &mod.intern_pool;
const len: usize = @intCast(len_u64);
@@ -110,9 +113,9 @@ fn arrayToIpString(val: Value, len_u64: u64, mod: *Module) !InternPool.NullTermi
// I don't think elemValue has the possibility to affect ip.string_bytes. Let's
// assert just to be sure.
const prev = ip.string_bytes.items.len;
- const elem_val = try val.elemValue(mod, i);
+ const elem_val = try val.elemValue(pt, i);
assert(ip.string_bytes.items.len == prev);
- const byte: u8 = @intCast(elem_val.toUnsignedInt(mod));
+ const byte: u8 = @intCast(elem_val.toUnsignedInt(pt));
ip.string_bytes.appendAssumeCapacity(byte);
}
return ip.getOrPutTrailingString(gpa, len, .no_embedded_nulls);
@@ -133,14 +136,14 @@ pub fn toType(self: Value) Type {
return Type.fromInterned(self.toIntern());
}
-pub fn intFromEnum(val: Value, ty: Type, mod: *Module) Allocator.Error!Value {
- const ip = &mod.intern_pool;
+pub fn intFromEnum(val: Value, ty: Type, pt: Zcu.PerThread) Allocator.Error!Value {
+ const ip = &pt.zcu.intern_pool;
const enum_ty = ip.typeOf(val.toIntern());
return switch (ip.indexToKey(enum_ty)) {
// Assume it is already an integer and return it directly.
.simple_type, .int_type => val,
.enum_literal => |enum_literal| {
- const field_index = ty.enumFieldIndex(enum_literal, mod).?;
+ const field_index = ty.enumFieldIndex(enum_literal, pt.zcu).?;
switch (ip.indexToKey(ty.toIntern())) {
// Assume it is already an integer and return it directly.
.simple_type, .int_type => return val,
@@ -150,13 +153,13 @@ pub fn intFromEnum(val: Value, ty: Type, mod: *Module) Allocator.Error!Value {
return Value.fromInterned(enum_type.values.get(ip)[field_index]);
} else {
// Field index and integer values are the same.
- return mod.intValue(Type.fromInterned(enum_type.tag_ty), field_index);
+ return pt.intValue(Type.fromInterned(enum_type.tag_ty), field_index);
}
},
else => unreachable,
}
},
- .enum_type => try mod.getCoerced(val, Type.fromInterned(ip.loadEnumType(enum_ty).tag_ty)),
+ .enum_type => try pt.getCoerced(val, Type.fromInterned(ip.loadEnumType(enum_ty).tag_ty)),
else => unreachable,
};
}
@@ -164,38 +167,38 @@ pub fn intFromEnum(val: Value, ty: Type, mod: *Module) Allocator.Error!Value {
pub const ResolveStrat = Type.ResolveStrat;
/// Asserts the value is an integer.
-pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *Module) BigIntConst {
- return val.toBigIntAdvanced(space, mod, .normal) catch unreachable;
+pub fn toBigInt(val: Value, space: *BigIntSpace, pt: Zcu.PerThread) BigIntConst {
+ return val.toBigIntAdvanced(space, pt, .normal) catch unreachable;
}
/// Asserts the value is an integer.
pub fn toBigIntAdvanced(
val: Value,
space: *BigIntSpace,
- mod: *Module,
+ pt: Zcu.PerThread,
strat: ResolveStrat,
) Module.CompileError!BigIntConst {
return switch (val.toIntern()) {
.bool_false => BigIntMutable.init(&space.limbs, 0).toConst(),
.bool_true => BigIntMutable.init(&space.limbs, 1).toConst(),
.null_value => BigIntMutable.init(&space.limbs, 0).toConst(),
- else => switch (mod.intern_pool.indexToKey(val.toIntern())) {
+ else => switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) {
.int => |int| switch (int.storage) {
.u64, .i64, .big_int => int.storage.toBigInt(space),
.lazy_align, .lazy_size => |ty| {
- if (strat == .sema) try Type.fromInterned(ty).resolveLayout(mod);
+ if (strat == .sema) try Type.fromInterned(ty).resolveLayout(pt);
const x = switch (int.storage) {
else => unreachable,
- .lazy_align => Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0,
- .lazy_size => Type.fromInterned(ty).abiSize(mod),
+ .lazy_align => Type.fromInterned(ty).abiAlignment(pt).toByteUnits() orelse 0,
+ .lazy_size => Type.fromInterned(ty).abiSize(pt),
};
return BigIntMutable.init(&space.limbs, x).toConst();
},
},
- .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).toBigIntAdvanced(space, mod, strat),
+ .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).toBigIntAdvanced(space, pt, strat),
.opt, .ptr => BigIntMutable.init(
&space.limbs,
- (try val.getUnsignedIntAdvanced(mod, strat)).?,
+ (try val.getUnsignedIntAdvanced(pt, strat)).?,
).toConst(),
else => unreachable,
},
@@ -229,13 +232,14 @@ pub fn getVariable(val: Value, mod: *Module) ?InternPool.Key.Variable {
/// If the value fits in a u64, return it, otherwise null.
/// Asserts not undefined.
-pub fn getUnsignedInt(val: Value, mod: *Module) ?u64 {
- return getUnsignedIntAdvanced(val, mod, .normal) catch unreachable;
+pub fn getUnsignedInt(val: Value, pt: Zcu.PerThread) ?u64 {
+ return getUnsignedIntAdvanced(val, pt, .normal) catch unreachable;
}
/// If the value fits in a u64, return it, otherwise null.
/// Asserts not undefined.
-pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, strat: ResolveStrat) !?u64 {
+pub fn getUnsignedIntAdvanced(val: Value, pt: Zcu.PerThread, strat: ResolveStrat) !?u64 {
+ const mod = pt.zcu;
return switch (val.toIntern()) {
.undef => unreachable,
.bool_false => 0,
@@ -246,22 +250,22 @@ pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, strat: ResolveStrat) !?u
.big_int => |big_int| big_int.to(u64) catch null,
.u64 => |x| x,
.i64 => |x| std.math.cast(u64, x),
- .lazy_align => |ty| (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, strat.toLazy())).scalar.toByteUnits() orelse 0,
- .lazy_size => |ty| (try Type.fromInterned(ty).abiSizeAdvanced(mod, strat.toLazy())).scalar,
+ .lazy_align => |ty| (try Type.fromInterned(ty).abiAlignmentAdvanced(pt, strat.toLazy())).scalar.toByteUnits() orelse 0,
+ .lazy_size => |ty| (try Type.fromInterned(ty).abiSizeAdvanced(pt, strat.toLazy())).scalar,
},
.ptr => |ptr| switch (ptr.base_addr) {
.int => ptr.byte_offset,
.field => |field| {
- const base_addr = (try Value.fromInterned(field.base).getUnsignedIntAdvanced(mod, strat)) orelse return null;
+ const base_addr = (try Value.fromInterned(field.base).getUnsignedIntAdvanced(pt, strat)) orelse return null;
const struct_ty = Value.fromInterned(field.base).typeOf(mod).childType(mod);
- if (strat == .sema) try struct_ty.resolveLayout(mod);
- return base_addr + struct_ty.structFieldOffset(@intCast(field.index), mod) + ptr.byte_offset;
+ if (strat == .sema) try struct_ty.resolveLayout(pt);
+ return base_addr + struct_ty.structFieldOffset(@intCast(field.index), pt) + ptr.byte_offset;
},
else => null,
},
.opt => |opt| switch (opt.val) {
.none => 0,
- else => |payload| Value.fromInterned(payload).getUnsignedIntAdvanced(mod, strat),
+ else => |payload| Value.fromInterned(payload).getUnsignedIntAdvanced(pt, strat),
},
else => null,
},
@@ -269,27 +273,27 @@ pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, strat: ResolveStrat) !?u
}
/// Asserts the value is an integer and it fits in a u64
-pub fn toUnsignedInt(val: Value, zcu: *Zcu) u64 {
- return getUnsignedInt(val, zcu).?;
+pub fn toUnsignedInt(val: Value, pt: Zcu.PerThread) u64 {
+ return getUnsignedInt(val, pt).?;
}
/// Asserts the value is an integer and it fits in a u64
-pub fn toUnsignedIntSema(val: Value, zcu: *Zcu) !u64 {
- return (try getUnsignedIntAdvanced(val, zcu, .sema)).?;
+pub fn toUnsignedIntSema(val: Value, pt: Zcu.PerThread) !u64 {
+ return (try getUnsignedIntAdvanced(val, pt, .sema)).?;
}
/// Asserts the value is an integer and it fits in a i64
-pub fn toSignedInt(val: Value, mod: *Module) i64 {
+pub fn toSignedInt(val: Value, pt: Zcu.PerThread) i64 {
return switch (val.toIntern()) {
.bool_false => 0,
.bool_true => 1,
- else => switch (mod.intern_pool.indexToKey(val.toIntern())) {
+ else => switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) {
.int => |int| switch (int.storage) {
.big_int => |big_int| big_int.to(i64) catch unreachable,
.i64 => |x| x,
.u64 => |x| @intCast(x),
- .lazy_align => |ty| @intCast(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0),
- .lazy_size => |ty| @intCast(Type.fromInterned(ty).abiSize(mod)),
+ .lazy_align => |ty| @intCast(Type.fromInterned(ty).abiAlignment(pt).toByteUnits() orelse 0),
+ .lazy_size => |ty| @intCast(Type.fromInterned(ty).abiSize(pt)),
},
else => unreachable,
},
@@ -321,16 +325,17 @@ fn ptrHasIntAddr(val: Value, mod: *Module) bool {
///
/// Asserts that buffer.len >= ty.abiSize(). The buffer is allowed to extend past
/// the end of the value in memory.
-pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{
+pub fn writeToMemory(val: Value, ty: Type, pt: Zcu.PerThread, buffer: []u8) error{
ReinterpretDeclRef,
IllDefinedMemoryLayout,
Unimplemented,
OutOfMemory,
}!void {
+ const mod = pt.zcu;
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
if (val.isUndef(mod)) {
- const size: usize = @intCast(ty.abiSize(mod));
+ const size: usize = @intCast(ty.abiSize(pt));
@memset(buffer[0..size], 0xaa);
return;
}
@@ -346,41 +351,41 @@ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{
const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8);
var bigint_buffer: BigIntSpace = undefined;
- const bigint = val.toBigInt(&bigint_buffer, mod);
+ const bigint = val.toBigInt(&bigint_buffer, pt);
bigint.writeTwosComplement(buffer[0..byte_count], endian);
},
.Float => switch (ty.floatBits(target)) {
- 16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(val.toFloat(f16, mod)), endian),
- 32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(val.toFloat(f32, mod)), endian),
- 64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(val.toFloat(f64, mod)), endian),
- 80 => std.mem.writeInt(u80, buffer[0..10], @bitCast(val.toFloat(f80, mod)), endian),
- 128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(val.toFloat(f128, mod)), endian),
+ 16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(val.toFloat(f16, pt)), endian),
+ 32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(val.toFloat(f32, pt)), endian),
+ 64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(val.toFloat(f64, pt)), endian),
+ 80 => std.mem.writeInt(u80, buffer[0..10], @bitCast(val.toFloat(f80, pt)), endian),
+ 128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(val.toFloat(f128, pt)), endian),
else => unreachable,
},
.Array => {
const len = ty.arrayLen(mod);
const elem_ty = ty.childType(mod);
- const elem_size: usize = @intCast(elem_ty.abiSize(mod));
+ const elem_size: usize = @intCast(elem_ty.abiSize(pt));
var elem_i: usize = 0;
var buf_off: usize = 0;
while (elem_i < len) : (elem_i += 1) {
- const elem_val = try val.elemValue(mod, elem_i);
- try elem_val.writeToMemory(elem_ty, mod, buffer[buf_off..]);
+ const elem_val = try val.elemValue(pt, elem_i);
+ try elem_val.writeToMemory(elem_ty, pt, buffer[buf_off..]);
buf_off += elem_size;
}
},
.Vector => {
// We use byte_count instead of abi_size here, so that any padding bytes
// follow the data bytes, on both big- and little-endian systems.
- const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
- return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
+ const byte_count = (@as(usize, @intCast(ty.bitSize(pt))) + 7) / 8;
+ return writeToPackedMemory(val, ty, pt, buffer[0..byte_count], 0);
},
.Struct => {
const struct_type = mod.typeToStruct(ty) orelse return error.IllDefinedMemoryLayout;
switch (struct_type.layout) {
.auto => return error.IllDefinedMemoryLayout,
.@"extern" => for (0..struct_type.field_types.len) |field_index| {
- const off: usize = @intCast(ty.structFieldOffset(field_index, mod));
+ const off: usize = @intCast(ty.structFieldOffset(field_index, pt));
const field_val = Value.fromInterned(switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| {
buffer[off] = bytes.at(field_index, ip);
@@ -390,11 +395,11 @@ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{
.repeated_elem => |elem| elem,
});
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
- try writeToMemory(field_val, field_ty, mod, buffer[off..]);
+ try writeToMemory(field_val, field_ty, pt, buffer[off..]);
},
.@"packed" => {
- const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
- return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
+ const byte_count = (@as(usize, @intCast(ty.bitSize(pt))) + 7) / 8;
+ return writeToPackedMemory(val, ty, pt, buffer[0..byte_count], 0);
},
}
},
@@ -421,34 +426,34 @@ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{
const union_obj = mod.typeToUnion(ty).?;
const field_index = mod.unionTagFieldIndex(union_obj, union_tag).?;
const field_type = Type.fromInterned(union_obj.field_types.get(&mod.intern_pool)[field_index]);
- const field_val = try val.fieldValue(mod, field_index);
- const byte_count: usize = @intCast(field_type.abiSize(mod));
- return writeToMemory(field_val, field_type, mod, buffer[0..byte_count]);
+ const field_val = try val.fieldValue(pt, field_index);
+ const byte_count: usize = @intCast(field_type.abiSize(pt));
+ return writeToMemory(field_val, field_type, pt, buffer[0..byte_count]);
} else {
- const backing_ty = try ty.unionBackingType(mod);
- const byte_count: usize = @intCast(backing_ty.abiSize(mod));
- return writeToMemory(val.unionValue(mod), backing_ty, mod, buffer[0..byte_count]);
+ const backing_ty = try ty.unionBackingType(pt);
+ const byte_count: usize = @intCast(backing_ty.abiSize(pt));
+ return writeToMemory(val.unionValue(mod), backing_ty, pt, buffer[0..byte_count]);
}
},
.@"packed" => {
- const backing_ty = try ty.unionBackingType(mod);
- const byte_count: usize = @intCast(backing_ty.abiSize(mod));
- return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
+ const backing_ty = try ty.unionBackingType(pt);
+ const byte_count: usize = @intCast(backing_ty.abiSize(pt));
+ return writeToPackedMemory(val, ty, pt, buffer[0..byte_count], 0);
},
},
.Pointer => {
if (ty.isSlice(mod)) return error.IllDefinedMemoryLayout;
if (!val.ptrHasIntAddr(mod)) return error.ReinterpretDeclRef;
- return val.writeToMemory(Type.usize, mod, buffer);
+ return val.writeToMemory(Type.usize, pt, buffer);
},
.Optional => {
if (!ty.isPtrLikeOptional(mod)) return error.IllDefinedMemoryLayout;
const child = ty.optionalChild(mod);
const opt_val = val.optionalValue(mod);
if (opt_val) |some| {
- return some.writeToMemory(child, mod, buffer);
+ return some.writeToMemory(child, pt, buffer);
} else {
- return writeToMemory(try mod.intValue(Type.usize, 0), Type.usize, mod, buffer);
+ return writeToMemory(try pt.intValue(Type.usize, 0), Type.usize, pt, buffer);
}
},
else => return error.Unimplemented,
@@ -462,15 +467,16 @@ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{
pub fn writeToPackedMemory(
val: Value,
ty: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
buffer: []u8,
bit_offset: usize,
) error{ ReinterpretDeclRef, OutOfMemory }!void {
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
if (val.isUndef(mod)) {
- const bit_size: usize = @intCast(ty.bitSize(mod));
+ const bit_size: usize = @intCast(ty.bitSize(pt));
if (bit_size != 0) {
std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian);
}
@@ -494,30 +500,30 @@ pub fn writeToPackedMemory(
const bits = ty.intInfo(mod).bits;
if (bits == 0) return;
- switch (ip.indexToKey((try val.intFromEnum(ty, mod)).toIntern()).int.storage) {
+ switch (ip.indexToKey((try val.intFromEnum(ty, pt)).toIntern()).int.storage) {
inline .u64, .i64 => |int| std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian),
.big_int => |bigint| bigint.writePackedTwosComplement(buffer, bit_offset, bits, endian),
.lazy_align => |lazy_align| {
- const num = Type.fromInterned(lazy_align).abiAlignment(mod).toByteUnits() orelse 0;
+ const num = Type.fromInterned(lazy_align).abiAlignment(pt).toByteUnits() orelse 0;
std.mem.writeVarPackedInt(buffer, bit_offset, bits, num, endian);
},
.lazy_size => |lazy_size| {
- const num = Type.fromInterned(lazy_size).abiSize(mod);
+ const num = Type.fromInterned(lazy_size).abiSize(pt);
std.mem.writeVarPackedInt(buffer, bit_offset, bits, num, endian);
},
}
},
.Float => switch (ty.floatBits(target)) {
- 16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(val.toFloat(f16, mod)), endian),
- 32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(val.toFloat(f32, mod)), endian),
- 64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(val.toFloat(f64, mod)), endian),
- 80 => std.mem.writePackedInt(u80, buffer, bit_offset, @bitCast(val.toFloat(f80, mod)), endian),
- 128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(val.toFloat(f128, mod)), endian),
+ 16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(val.toFloat(f16, pt)), endian),
+ 32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(val.toFloat(f32, pt)), endian),
+ 64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(val.toFloat(f64, pt)), endian),
+ 80 => std.mem.writePackedInt(u80, buffer, bit_offset, @bitCast(val.toFloat(f80, pt)), endian),
+ 128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(val.toFloat(f128, pt)), endian),
else => unreachable,
},
.Vector => {
const elem_ty = ty.childType(mod);
- const elem_bit_size: u16 = @intCast(elem_ty.bitSize(mod));
+ const elem_bit_size: u16 = @intCast(elem_ty.bitSize(pt));
const len: usize = @intCast(ty.arrayLen(mod));
var bits: u16 = 0;
@@ -525,8 +531,8 @@ pub fn writeToPackedMemory(
while (elem_i < len) : (elem_i += 1) {
// On big-endian systems, LLVM reverses the element order of vectors by default
const tgt_elem_i = if (endian == .big) len - elem_i - 1 else elem_i;
- const elem_val = try val.elemValue(mod, tgt_elem_i);
- try elem_val.writeToPackedMemory(elem_ty, mod, buffer, bit_offset + bits);
+ const elem_val = try val.elemValue(pt, tgt_elem_i);
+ try elem_val.writeToPackedMemory(elem_ty, pt, buffer, bit_offset + bits);
bits += elem_bit_size;
}
},
@@ -543,8 +549,8 @@ pub fn writeToPackedMemory(
.repeated_elem => |elem| elem,
});
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
- const field_bits: u16 = @intCast(field_ty.bitSize(mod));
- try field_val.writeToPackedMemory(field_ty, mod, buffer, bit_offset + bits);
+ const field_bits: u16 = @intCast(field_ty.bitSize(pt));
+ try field_val.writeToPackedMemory(field_ty, pt, buffer, bit_offset + bits);
bits += field_bits;
}
},
@@ -556,11 +562,11 @@ pub fn writeToPackedMemory(
if (val.unionTag(mod)) |union_tag| {
const field_index = mod.unionTagFieldIndex(union_obj, union_tag).?;
const field_type = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
- const field_val = try val.fieldValue(mod, field_index);
- return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset);
+ const field_val = try val.fieldValue(pt, field_index);
+ return field_val.writeToPackedMemory(field_type, pt, buffer, bit_offset);
} else {
- const backing_ty = try ty.unionBackingType(mod);
- return val.unionValue(mod).writeToPackedMemory(backing_ty, mod, buffer, bit_offset);
+ const backing_ty = try ty.unionBackingType(pt);
+ return val.unionValue(mod).writeToPackedMemory(backing_ty, pt, buffer, bit_offset);
}
},
}
@@ -568,16 +574,16 @@ pub fn writeToPackedMemory(
.Pointer => {
assert(!ty.isSlice(mod)); // No well defined layout.
if (!val.ptrHasIntAddr(mod)) return error.ReinterpretDeclRef;
- return val.writeToPackedMemory(Type.usize, mod, buffer, bit_offset);
+ return val.writeToPackedMemory(Type.usize, pt, buffer, bit_offset);
},
.Optional => {
assert(ty.isPtrLikeOptional(mod));
const child = ty.optionalChild(mod);
const opt_val = val.optionalValue(mod);
if (opt_val) |some| {
- return some.writeToPackedMemory(child, mod, buffer, bit_offset);
+ return some.writeToPackedMemory(child, pt, buffer, bit_offset);
} else {
- return writeToPackedMemory(try mod.intValue(Type.usize, 0), Type.usize, mod, buffer, bit_offset);
+ return writeToPackedMemory(try pt.intValue(Type.usize, 0), Type.usize, pt, buffer, bit_offset);
}
},
else => @panic("TODO implement writeToPackedMemory for more types"),
@@ -590,7 +596,7 @@ pub fn writeToPackedMemory(
/// the end of the value in memory.
pub fn readFromMemory(
ty: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
buffer: []const u8,
arena: Allocator,
) error{
@@ -598,6 +604,7 @@ pub fn readFromMemory(
Unimplemented,
OutOfMemory,
}!Value {
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
@@ -642,7 +649,7 @@ pub fn readFromMemory(
return mod.getCoerced(try mod.intValue_big(int_ty, bigint.toConst()), ty);
}
},
- .Float => return Value.fromInterned((try mod.intern(.{ .float = .{
+ .Float => return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = ty.toIntern(),
.storage = switch (ty.floatBits(target)) {
16 => .{ .f16 = @bitCast(std.mem.readInt(u16, buffer[0..2], endian)) },
@@ -652,25 +659,25 @@ pub fn readFromMemory(
128 => .{ .f128 = @bitCast(std.mem.readInt(u128, buffer[0..16], endian)) },
else => unreachable,
},
- } }))),
+ } })),
.Array => {
const elem_ty = ty.childType(mod);
- const elem_size = elem_ty.abiSize(mod);
+ const elem_size = elem_ty.abiSize(pt);
const elems = try arena.alloc(InternPool.Index, @intCast(ty.arrayLen(mod)));
var offset: usize = 0;
for (elems) |*elem| {
elem.* = (try readFromMemory(elem_ty, mod, buffer[offset..], arena)).toIntern();
offset += @intCast(elem_size);
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = elems },
- } })));
+ } }));
},
.Vector => {
// We use byte_count instead of abi_size here, so that any padding bytes
// follow the data bytes, on both big- and little-endian systems.
- const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
+ const byte_count = (@as(usize, @intCast(ty.bitSize(pt))) + 7) / 8;
return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
},
.Struct => {
@@ -683,16 +690,16 @@ pub fn readFromMemory(
for (field_vals, 0..) |*field_val, i| {
const field_ty = Type.fromInterned(field_types.get(ip)[i]);
const off: usize = @intCast(ty.structFieldOffset(i, mod));
- const sz: usize = @intCast(field_ty.abiSize(mod));
+ const sz: usize = @intCast(field_ty.abiSize(pt));
field_val.* = (try readFromMemory(field_ty, mod, buffer[off..(off + sz)], arena)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = field_vals },
- } })));
+ } }));
},
.@"packed" => {
- const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
+ const byte_count = (@as(usize, @intCast(ty.bitSize(pt))) + 7) / 8;
return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
},
}
@@ -704,49 +711,49 @@ pub fn readFromMemory(
const index = (int << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits));
const name = mod.global_error_set.keys()[@intCast(index)];
- return Value.fromInterned((try mod.intern(.{ .err = .{
+ return Value.fromInterned(try pt.intern(.{ .err = .{
.ty = ty.toIntern(),
.name = name,
- } })));
+ } }));
},
.Union => switch (ty.containerLayout(mod)) {
.auto => return error.IllDefinedMemoryLayout,
.@"extern" => {
- const union_size = ty.abiSize(mod);
+ const union_size = ty.abiSize(pt);
const array_ty = try mod.arrayType(.{ .len = union_size, .child = .u8_type });
const val = (try readFromMemory(array_ty, mod, buffer, arena)).toIntern();
- return Value.fromInterned((try mod.intern(.{ .un = .{
+ return Value.fromInterned(try pt.intern(.{ .un = .{
.ty = ty.toIntern(),
.tag = .none,
.val = val,
- } })));
+ } }));
},
.@"packed" => {
- const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
+ const byte_count = (@as(usize, @intCast(ty.bitSize(pt))) + 7) / 8;
return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
},
},
.Pointer => {
assert(!ty.isSlice(mod)); // No well defined layout.
const int_val = try readFromMemory(Type.usize, mod, buffer, arena);
- return Value.fromInterned((try mod.intern(.{ .ptr = .{
+ return Value.fromInterned(try pt.intern(.{ .ptr = .{
.ty = ty.toIntern(),
.base_addr = .int,
- .byte_offset = int_val.toUnsignedInt(mod),
- } })));
+ .byte_offset = int_val.toUnsignedInt(pt),
+ } }));
},
.Optional => {
assert(ty.isPtrLikeOptional(mod));
const child_ty = ty.optionalChild(mod);
const child_val = try readFromMemory(child_ty, mod, buffer, arena);
- return Value.fromInterned((try mod.intern(.{ .opt = .{
+ return Value.fromInterned(try pt.intern(.{ .opt = .{
.ty = ty.toIntern(),
- .val = switch (child_val.orderAgainstZero(mod)) {
+ .val = switch (child_val.orderAgainstZero(pt)) {
.lt => unreachable,
.eq => .none,
.gt => child_val.toIntern(),
},
- } })));
+ } }));
},
else => return error.Unimplemented,
}
@@ -758,7 +765,7 @@ pub fn readFromMemory(
/// big-endian packed memory layouts start at the end of the buffer.
pub fn readFromPackedMemory(
ty: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
buffer: []const u8,
bit_offset: usize,
arena: Allocator,
@@ -766,6 +773,7 @@ pub fn readFromPackedMemory(
IllDefinedMemoryLayout,
OutOfMemory,
}!Value {
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
@@ -783,35 +791,35 @@ pub fn readFromPackedMemory(
}
},
.Int => {
- if (buffer.len == 0) return mod.intValue(ty, 0);
+ if (buffer.len == 0) return pt.intValue(ty, 0);
const int_info = ty.intInfo(mod);
const bits = int_info.bits;
- if (bits == 0) return mod.intValue(ty, 0);
+ if (bits == 0) return pt.intValue(ty, 0);
// Fast path for integers <= u64
if (bits <= 64) switch (int_info.signedness) {
// Use different backing types for unsigned vs signed to avoid the need to go via
// a larger type like `i128`.
- .unsigned => return mod.intValue(ty, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)),
- .signed => return mod.intValue(ty, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)),
+ .unsigned => return pt.intValue(ty, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)),
+ .signed => return pt.intValue(ty, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)),
};
// Slow path, we have to construct a big-int
- const abi_size: usize = @intCast(ty.abiSize(mod));
+ const abi_size: usize = @intCast(ty.abiSize(pt));
const Limb = std.math.big.Limb;
const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb);
const limbs_buffer = try arena.alloc(Limb, limb_count);
var bigint = BigIntMutable.init(limbs_buffer, 0);
bigint.readPackedTwosComplement(buffer, bit_offset, bits, endian, int_info.signedness);
- return mod.intValue_big(ty, bigint.toConst());
+ return pt.intValue_big(ty, bigint.toConst());
},
.Enum => {
const int_ty = ty.intTagType(mod);
- const int_val = try Value.readFromPackedMemory(int_ty, mod, buffer, bit_offset, arena);
- return mod.getCoerced(int_val, ty);
+ const int_val = try Value.readFromPackedMemory(int_ty, pt, buffer, bit_offset, arena);
+ return pt.getCoerced(int_val, ty);
},
- .Float => return Value.fromInterned((try mod.intern(.{ .float = .{
+ .Float => return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = ty.toIntern(),
.storage = switch (ty.floatBits(target)) {
16 => .{ .f16 = @bitCast(std.mem.readPackedInt(u16, buffer, bit_offset, endian)) },
@@ -821,23 +829,23 @@ pub fn readFromPackedMemory(
128 => .{ .f128 = @bitCast(std.mem.readPackedInt(u128, buffer, bit_offset, endian)) },
else => unreachable,
},
- } }))),
+ } })),
.Vector => {
const elem_ty = ty.childType(mod);
const elems = try arena.alloc(InternPool.Index, @intCast(ty.arrayLen(mod)));
var bits: u16 = 0;
- const elem_bit_size: u16 = @intCast(elem_ty.bitSize(mod));
+ const elem_bit_size: u16 = @intCast(elem_ty.bitSize(pt));
for (elems, 0..) |_, i| {
// On big-endian systems, LLVM reverses the element order of vectors by default
const tgt_elem_i = if (endian == .big) elems.len - i - 1 else i;
- elems[tgt_elem_i] = (try readFromPackedMemory(elem_ty, mod, buffer, bit_offset + bits, arena)).toIntern();
+ elems[tgt_elem_i] = (try readFromPackedMemory(elem_ty, pt, buffer, bit_offset + bits, arena)).toIntern();
bits += elem_bit_size;
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = elems },
- } })));
+ } }));
},
.Struct => {
// Sema is supposed to have emitted a compile error already for Auto layout structs,
@@ -847,43 +855,43 @@ pub fn readFromPackedMemory(
const field_vals = try arena.alloc(InternPool.Index, struct_type.field_types.len);
for (field_vals, 0..) |*field_val, i| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
- const field_bits: u16 = @intCast(field_ty.bitSize(mod));
- field_val.* = (try readFromPackedMemory(field_ty, mod, buffer, bit_offset + bits, arena)).toIntern();
+ const field_bits: u16 = @intCast(field_ty.bitSize(pt));
+ field_val.* = (try readFromPackedMemory(field_ty, pt, buffer, bit_offset + bits, arena)).toIntern();
bits += field_bits;
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = field_vals },
- } })));
+ } }));
},
.Union => switch (ty.containerLayout(mod)) {
.auto, .@"extern" => unreachable, // Handled by non-packed readFromMemory
.@"packed" => {
- const backing_ty = try ty.unionBackingType(mod);
- const val = (try readFromPackedMemory(backing_ty, mod, buffer, bit_offset, arena)).toIntern();
- return Value.fromInterned((try mod.intern(.{ .un = .{
+ const backing_ty = try ty.unionBackingType(pt);
+ const val = (try readFromPackedMemory(backing_ty, pt, buffer, bit_offset, arena)).toIntern();
+ return Value.fromInterned(try pt.intern(.{ .un = .{
.ty = ty.toIntern(),
.tag = .none,
.val = val,
- } })));
+ } }));
},
},
.Pointer => {
assert(!ty.isSlice(mod)); // No well defined layout.
- const int_val = try readFromPackedMemory(Type.usize, mod, buffer, bit_offset, arena);
- return Value.fromInterned(try mod.intern(.{ .ptr = .{
+ const int_val = try readFromPackedMemory(Type.usize, pt, buffer, bit_offset, arena);
+ return Value.fromInterned(try pt.intern(.{ .ptr = .{
.ty = ty.toIntern(),
.base_addr = .int,
- .byte_offset = int_val.toUnsignedInt(mod),
+ .byte_offset = int_val.toUnsignedInt(pt),
} }));
},
.Optional => {
assert(ty.isPtrLikeOptional(mod));
const child_ty = ty.optionalChild(mod);
- const child_val = try readFromPackedMemory(child_ty, mod, buffer, bit_offset, arena);
- return Value.fromInterned(try mod.intern(.{ .opt = .{
+ const child_val = try readFromPackedMemory(child_ty, pt, buffer, bit_offset, arena);
+ return Value.fromInterned(try pt.intern(.{ .opt = .{
.ty = ty.toIntern(),
- .val = switch (child_val.orderAgainstZero(mod)) {
+ .val = switch (child_val.orderAgainstZero(pt)) {
.lt => unreachable,
.eq => .none,
.gt => child_val.toIntern(),
@@ -895,8 +903,8 @@ pub fn readFromPackedMemory(
}
/// Asserts that the value is a float or an integer.
-pub fn toFloat(val: Value, comptime T: type, mod: *Module) T {
- return switch (mod.intern_pool.indexToKey(val.toIntern())) {
+pub fn toFloat(val: Value, comptime T: type, pt: Zcu.PerThread) T {
+ return switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) {
.int => |int| switch (int.storage) {
.big_int => |big_int| @floatCast(bigIntToFloat(big_int.limbs, big_int.positive)),
inline .u64, .i64 => |x| {
@@ -905,8 +913,8 @@ pub fn toFloat(val: Value, comptime T: type, mod: *Module) T {
}
return @floatFromInt(x);
},
- .lazy_align => |ty| @floatFromInt(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0),
- .lazy_size => |ty| @floatFromInt(Type.fromInterned(ty).abiSize(mod)),
+ .lazy_align => |ty| @floatFromInt(Type.fromInterned(ty).abiAlignment(pt).toByteUnits() orelse 0),
+ .lazy_size => |ty| @floatFromInt(Type.fromInterned(ty).abiSize(pt)),
},
.float => |float| switch (float.storage) {
inline else => |x| @floatCast(x),
@@ -934,29 +942,30 @@ fn bigIntToFloat(limbs: []const std.math.big.Limb, positive: bool) f128 {
}
}
-pub fn clz(val: Value, ty: Type, mod: *Module) u64 {
+pub fn clz(val: Value, ty: Type, pt: Zcu.PerThread) u64 {
var bigint_buf: BigIntSpace = undefined;
- const bigint = val.toBigInt(&bigint_buf, mod);
- return bigint.clz(ty.intInfo(mod).bits);
+ const bigint = val.toBigInt(&bigint_buf, pt);
+ return bigint.clz(ty.intInfo(pt.zcu).bits);
}
-pub fn ctz(val: Value, ty: Type, mod: *Module) u64 {
+pub fn ctz(val: Value, ty: Type, pt: Zcu.PerThread) u64 {
var bigint_buf: BigIntSpace = undefined;
- const bigint = val.toBigInt(&bigint_buf, mod);
- return bigint.ctz(ty.intInfo(mod).bits);
+ const bigint = val.toBigInt(&bigint_buf, pt);
+ return bigint.ctz(ty.intInfo(pt.zcu).bits);
}
-pub fn popCount(val: Value, ty: Type, mod: *Module) u64 {
+pub fn popCount(val: Value, ty: Type, pt: Zcu.PerThread) u64 {
var bigint_buf: BigIntSpace = undefined;
- const bigint = val.toBigInt(&bigint_buf, mod);
- return @intCast(bigint.popCount(ty.intInfo(mod).bits));
+ const bigint = val.toBigInt(&bigint_buf, pt);
+ return @intCast(bigint.popCount(ty.intInfo(pt.zcu).bits));
}
-pub fn bitReverse(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value {
+pub fn bitReverse(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) !Value {
+ const mod = pt.zcu;
const info = ty.intInfo(mod);
var buffer: Value.BigIntSpace = undefined;
- const operand_bigint = val.toBigInt(&buffer, mod);
+ const operand_bigint = val.toBigInt(&buffer, pt);
const limbs = try arena.alloc(
std.math.big.Limb,
@@ -965,17 +974,18 @@ pub fn bitReverse(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value {
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitReverse(operand_bigint, info.signedness, info.bits);
- return mod.intValue_big(ty, result_bigint.toConst());
+ return pt.intValue_big(ty, result_bigint.toConst());
}
-pub fn byteSwap(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value {
+pub fn byteSwap(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) !Value {
+ const mod = pt.zcu;
const info = ty.intInfo(mod);
// Bit count must be evenly divisible by 8
assert(info.bits % 8 == 0);
var buffer: Value.BigIntSpace = undefined;
- const operand_bigint = val.toBigInt(&buffer, mod);
+ const operand_bigint = val.toBigInt(&buffer, pt);
const limbs = try arena.alloc(
std.math.big.Limb,
@@ -984,33 +994,33 @@ pub fn byteSwap(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value {
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.byteSwap(operand_bigint, info.signedness, info.bits / 8);
- return mod.intValue_big(ty, result_bigint.toConst());
+ return pt.intValue_big(ty, result_bigint.toConst());
}
/// Asserts the value is an integer and not undefined.
/// Returns the number of bits the value requires to represent stored in twos complement form.
-pub fn intBitCountTwosComp(self: Value, mod: *Module) usize {
+pub fn intBitCountTwosComp(self: Value, pt: Zcu.PerThread) usize {
var buffer: BigIntSpace = undefined;
- const big_int = self.toBigInt(&buffer, mod);
+ const big_int = self.toBigInt(&buffer, pt);
return big_int.bitCountTwosComp();
}
/// Converts an integer or a float to a float. May result in a loss of information.
/// Caller can find out by equality checking the result against the operand.
-pub fn floatCast(val: Value, dest_ty: Type, zcu: *Zcu) !Value {
- const target = zcu.getTarget();
- if (val.isUndef(zcu)) return zcu.undefValue(dest_ty);
- return Value.fromInterned((try zcu.intern(.{ .float = .{
+pub fn floatCast(val: Value, dest_ty: Type, pt: Zcu.PerThread) !Value {
+ const target = pt.zcu.getTarget();
+ if (val.isUndef(pt.zcu)) return pt.undefValue(dest_ty);
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = dest_ty.toIntern(),
.storage = switch (dest_ty.floatBits(target)) {
- 16 => .{ .f16 = val.toFloat(f16, zcu) },
- 32 => .{ .f32 = val.toFloat(f32, zcu) },
- 64 => .{ .f64 = val.toFloat(f64, zcu) },
- 80 => .{ .f80 = val.toFloat(f80, zcu) },
- 128 => .{ .f128 = val.toFloat(f128, zcu) },
+ 16 => .{ .f16 = val.toFloat(f16, pt) },
+ 32 => .{ .f32 = val.toFloat(f32, pt) },
+ 64 => .{ .f64 = val.toFloat(f64, pt) },
+ 80 => .{ .f80 = val.toFloat(f80, pt) },
+ 128 => .{ .f128 = val.toFloat(f128, pt) },
else => unreachable,
},
- } })));
+ } }));
}
/// Asserts the value is a float
@@ -1023,19 +1033,19 @@ pub fn floatHasFraction(self: Value, mod: *const Module) bool {
};
}
-pub fn orderAgainstZero(lhs: Value, mod: *Module) std.math.Order {
- return orderAgainstZeroAdvanced(lhs, mod, .normal) catch unreachable;
+pub fn orderAgainstZero(lhs: Value, pt: Zcu.PerThread) std.math.Order {
+ return orderAgainstZeroAdvanced(lhs, pt, .normal) catch unreachable;
}
pub fn orderAgainstZeroAdvanced(
lhs: Value,
- mod: *Module,
+ pt: Zcu.PerThread,
strat: ResolveStrat,
) Module.CompileError!std.math.Order {
return switch (lhs.toIntern()) {
.bool_false => .eq,
.bool_true => .gt,
- else => switch (mod.intern_pool.indexToKey(lhs.toIntern())) {
+ else => switch (pt.zcu.intern_pool.indexToKey(lhs.toIntern())) {
.ptr => |ptr| if (ptr.byte_offset > 0) .gt else switch (ptr.base_addr) {
.decl, .comptime_alloc, .comptime_field => .gt,
.int => .eq,
@@ -1046,7 +1056,7 @@ pub fn orderAgainstZeroAdvanced(
inline .u64, .i64 => |x| std.math.order(x, 0),
.lazy_align => .gt, // alignment is never 0
.lazy_size => |ty| return if (Type.fromInterned(ty).hasRuntimeBitsAdvanced(
- mod,
+ pt,
false,
strat.toLazy(),
) catch |err| switch (err) {
@@ -1054,7 +1064,7 @@ pub fn orderAgainstZeroAdvanced(
else => |e| return e,
}) .gt else .eq,
},
- .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).orderAgainstZeroAdvanced(mod, strat),
+ .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).orderAgainstZeroAdvanced(pt, strat),
.float => |float| switch (float.storage) {
inline else => |x| std.math.order(x, 0),
},
@@ -1064,14 +1074,14 @@ pub fn orderAgainstZeroAdvanced(
}
/// Asserts the value is comparable.
-pub fn order(lhs: Value, rhs: Value, mod: *Module) std.math.Order {
- return orderAdvanced(lhs, rhs, mod, .normal) catch unreachable;
+pub fn order(lhs: Value, rhs: Value, pt: Zcu.PerThread) std.math.Order {
+ return orderAdvanced(lhs, rhs, pt, .normal) catch unreachable;
}
/// Asserts the value is comparable.
-pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, strat: ResolveStrat) !std.math.Order {
- const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, strat);
- const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, strat);
+pub fn orderAdvanced(lhs: Value, rhs: Value, pt: Zcu.PerThread, strat: ResolveStrat) !std.math.Order {
+ const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(pt, strat);
+ const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(pt, strat);
switch (lhs_against_zero) {
.lt => if (rhs_against_zero != .lt) return .lt,
.eq => return rhs_against_zero.invert(),
@@ -1083,34 +1093,34 @@ pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, strat: ResolveStrat)
.gt => {},
}
- if (lhs.isFloat(mod) or rhs.isFloat(mod)) {
- const lhs_f128 = lhs.toFloat(f128, mod);
- const rhs_f128 = rhs.toFloat(f128, mod);
+ if (lhs.isFloat(pt.zcu) or rhs.isFloat(pt.zcu)) {
+ const lhs_f128 = lhs.toFloat(f128, pt);
+ const rhs_f128 = rhs.toFloat(f128, pt);
return std.math.order(lhs_f128, rhs_f128);
}
var lhs_bigint_space: BigIntSpace = undefined;
var rhs_bigint_space: BigIntSpace = undefined;
- const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, mod, strat);
- const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, mod, strat);
+ const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, pt, strat);
+ const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, pt, strat);
return lhs_bigint.order(rhs_bigint);
}
/// Asserts the value is comparable. Does not take a type parameter because it supports
/// comparisons between heterogeneous types.
-pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, mod: *Module) bool {
- return compareHeteroAdvanced(lhs, op, rhs, mod, .normal) catch unreachable;
+pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, pt: Zcu.PerThread) bool {
+ return compareHeteroAdvanced(lhs, op, rhs, pt, .normal) catch unreachable;
}
pub fn compareHeteroAdvanced(
lhs: Value,
op: std.math.CompareOperator,
rhs: Value,
- mod: *Module,
+ pt: Zcu.PerThread,
strat: ResolveStrat,
) !bool {
- if (lhs.pointerDecl(mod)) |lhs_decl| {
- if (rhs.pointerDecl(mod)) |rhs_decl| {
+ if (lhs.pointerDecl(pt.zcu)) |lhs_decl| {
+ if (rhs.pointerDecl(pt.zcu)) |rhs_decl| {
switch (op) {
.eq => return lhs_decl == rhs_decl,
.neq => return lhs_decl != rhs_decl,
@@ -1123,31 +1133,32 @@ pub fn compareHeteroAdvanced(
else => {},
}
}
- } else if (rhs.pointerDecl(mod)) |_| {
+ } else if (rhs.pointerDecl(pt.zcu)) |_| {
switch (op) {
.eq => return false,
.neq => return true,
else => {},
}
}
- return (try orderAdvanced(lhs, rhs, mod, strat)).compare(op);
+ return (try orderAdvanced(lhs, rhs, pt, strat)).compare(op);
}
/// Asserts the values are comparable. Both operands have type `ty`.
/// For vectors, returns true if comparison is true for ALL elements.
-pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) !bool {
+pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, pt: Zcu.PerThread) !bool {
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) == .Vector) {
const scalar_ty = ty.scalarType(mod);
for (0..ty.vectorLen(mod)) |i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- if (!compareScalar(lhs_elem, op, rhs_elem, scalar_ty, mod)) {
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ if (!compareScalar(lhs_elem, op, rhs_elem, scalar_ty, pt)) {
return false;
}
}
return true;
}
- return compareScalar(lhs, op, rhs, ty, mod);
+ return compareScalar(lhs, op, rhs, ty, pt);
}
/// Asserts the values are comparable. Both operands have type `ty`.
@@ -1156,12 +1167,12 @@ pub fn compareScalar(
op: std.math.CompareOperator,
rhs: Value,
ty: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
) bool {
return switch (op) {
- .eq => lhs.eql(rhs, ty, mod),
- .neq => !lhs.eql(rhs, ty, mod),
- else => compareHetero(lhs, op, rhs, mod),
+ .eq => lhs.eql(rhs, ty, pt.zcu),
+ .neq => !lhs.eql(rhs, ty, pt.zcu),
+ else => compareHetero(lhs, op, rhs, pt),
};
}
@@ -1170,24 +1181,25 @@ pub fn compareScalar(
/// Returns `false` if the value or any vector element is undefined.
///
/// Note that `!compareAllWithZero(.eq, ...) != compareAllWithZero(.neq, ...)`
-pub fn compareAllWithZero(lhs: Value, op: std.math.CompareOperator, mod: *Module) bool {
- return compareAllWithZeroAdvancedExtra(lhs, op, mod, .normal) catch unreachable;
+pub fn compareAllWithZero(lhs: Value, op: std.math.CompareOperator, pt: Zcu.PerThread) bool {
+ return compareAllWithZeroAdvancedExtra(lhs, op, pt, .normal) catch unreachable;
}
pub fn compareAllWithZeroSema(
lhs: Value,
op: std.math.CompareOperator,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
) Module.CompileError!bool {
- return compareAllWithZeroAdvancedExtra(lhs, op, zcu, .sema);
+ return compareAllWithZeroAdvancedExtra(lhs, op, pt, .sema);
}
pub fn compareAllWithZeroAdvancedExtra(
lhs: Value,
op: std.math.CompareOperator,
- mod: *Module,
+ pt: Zcu.PerThread,
strat: ResolveStrat,
) Module.CompileError!bool {
+ const mod = pt.zcu;
if (lhs.isInf(mod)) {
switch (op) {
.neq => return true,
@@ -1206,14 +1218,14 @@ pub fn compareAllWithZeroAdvancedExtra(
if (!std.math.order(byte, 0).compare(op)) break false;
} else true,
.elems => |elems| for (elems) |elem| {
- if (!try Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, strat)) break false;
+ if (!try Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, pt, strat)) break false;
} else true,
- .repeated_elem => |elem| Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, strat),
+ .repeated_elem => |elem| Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, pt, strat),
},
.undef => return false,
else => {},
}
- return (try orderAgainstZeroAdvanced(lhs, mod, strat)).compare(op);
+ return (try orderAgainstZeroAdvanced(lhs, pt, strat)).compare(op);
}
pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool {
@@ -1275,21 +1287,22 @@ pub fn slicePtr(val: Value, mod: *Module) Value {
/// Gets the `len` field of a slice value as a `u64`.
/// Resolves the length using `Sema` if necessary.
-pub fn sliceLen(val: Value, zcu: *Zcu) !u64 {
- return Value.fromInterned(zcu.intern_pool.sliceLen(val.toIntern())).toUnsignedIntSema(zcu);
+pub fn sliceLen(val: Value, pt: Zcu.PerThread) !u64 {
+ return Value.fromInterned(pt.zcu.intern_pool.sliceLen(val.toIntern())).toUnsignedIntSema(pt);
}
/// Asserts the value is an aggregate, and returns the element value at the given index.
-pub fn elemValue(val: Value, zcu: *Zcu, index: usize) Allocator.Error!Value {
+pub fn elemValue(val: Value, pt: Zcu.PerThread, index: usize) Allocator.Error!Value {
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
switch (zcu.intern_pool.indexToKey(val.toIntern())) {
.undef => |ty| {
- return Value.fromInterned(try zcu.intern(.{ .undef = Type.fromInterned(ty).childType(zcu).toIntern() }));
+ return Value.fromInterned(try pt.intern(.{ .undef = Type.fromInterned(ty).childType(zcu).toIntern() }));
},
.aggregate => |aggregate| {
const len = ip.aggregateTypeLen(aggregate.ty);
if (index < len) return Value.fromInterned(switch (aggregate.storage) {
- .bytes => |bytes| try zcu.intern(.{ .int = .{
+ .bytes => |bytes| try pt.intern(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = bytes.at(index, ip) },
} }),
@@ -1330,17 +1343,17 @@ pub fn sliceArray(
start: usize,
end: usize,
) error{OutOfMemory}!Value {
- const mod = sema.mod;
- const ip = &mod.intern_pool;
- return Value.fromInterned(try mod.intern(.{
+ const pt = sema.pt;
+ const ip = &pt.zcu.intern_pool;
+ return Value.fromInterned(try pt.intern(.{
.aggregate = .{
- .ty = switch (mod.intern_pool.indexToKey(mod.intern_pool.typeOf(val.toIntern()))) {
- .array_type => |array_type| try mod.arrayType(.{
+ .ty = switch (pt.zcu.intern_pool.indexToKey(pt.zcu.intern_pool.typeOf(val.toIntern()))) {
+ .array_type => |array_type| try pt.arrayType(.{
.len = @intCast(end - start),
.child = array_type.child,
.sentinel = if (end == array_type.len) array_type.sentinel else .none,
}),
- .vector_type => |vector_type| try mod.vectorType(.{
+ .vector_type => |vector_type| try pt.vectorType(.{
.len = @intCast(end - start),
.child = vector_type.child,
}),
@@ -1363,13 +1376,14 @@ pub fn sliceArray(
}));
}
-pub fn fieldValue(val: Value, mod: *Module, index: usize) !Value {
+pub fn fieldValue(val: Value, pt: Zcu.PerThread, index: usize) !Value {
+ const mod = pt.zcu;
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
- .undef => |ty| Value.fromInterned((try mod.intern(.{
+ .undef => |ty| Value.fromInterned(try pt.intern(.{
.undef = Type.fromInterned(ty).structFieldType(index, mod).toIntern(),
- }))),
+ })),
.aggregate => |aggregate| Value.fromInterned(switch (aggregate.storage) {
- .bytes => |bytes| try mod.intern(.{ .int = .{
+ .bytes => |bytes| try pt.intern(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = bytes.at(index, &mod.intern_pool) },
} }),
@@ -1483,40 +1497,49 @@ pub fn floatFromInt(val: Value, arena: Allocator, int_ty: Type, float_ty: Type,
};
}
-pub fn floatFromIntAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, strat: ResolveStrat) !Value {
+pub fn floatFromIntAdvanced(
+ val: Value,
+ arena: Allocator,
+ int_ty: Type,
+ float_ty: Type,
+ pt: Zcu.PerThread,
+ strat: ResolveStrat,
+) !Value {
+ const mod = pt.zcu;
if (int_ty.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, int_ty.vectorLen(mod));
const scalar_ty = float_ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const elem_val = try val.elemValue(mod, i);
- scalar.* = (try floatFromIntScalar(elem_val, scalar_ty, mod, strat)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ scalar.* = (try floatFromIntScalar(elem_val, scalar_ty, pt, strat)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return floatFromIntScalar(val, float_ty, mod, strat);
+ return floatFromIntScalar(val, float_ty, pt, strat);
}
-pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, strat: ResolveStrat) !Value {
+pub fn floatFromIntScalar(val: Value, float_ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) !Value {
+ const mod = pt.zcu;
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
- .undef => try mod.undefValue(float_ty),
+ .undef => try pt.undefValue(float_ty),
.int => |int| switch (int.storage) {
.big_int => |big_int| {
const float = bigIntToFloat(big_int.limbs, big_int.positive);
- return mod.floatValue(float_ty, float);
+ return pt.floatValue(float_ty, float);
},
- inline .u64, .i64 => |x| floatFromIntInner(x, float_ty, mod),
- .lazy_align => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(mod, strat.toLazy())).scalar.toByteUnits() orelse 0, float_ty, mod),
- .lazy_size => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiSizeAdvanced(mod, strat.toLazy())).scalar, float_ty, mod),
+ inline .u64, .i64 => |x| floatFromIntInner(x, float_ty, pt),
+ .lazy_align => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(pt, strat.toLazy())).scalar.toByteUnits() orelse 0, float_ty, pt),
+ .lazy_size => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiSizeAdvanced(pt, strat.toLazy())).scalar, float_ty, pt),
},
else => unreachable,
};
}
-fn floatFromIntInner(x: anytype, dest_ty: Type, mod: *Module) !Value {
- const target = mod.getTarget();
+fn floatFromIntInner(x: anytype, dest_ty: Type, pt: Zcu.PerThread) !Value {
+ const target = pt.zcu.getTarget();
const storage: InternPool.Key.Float.Storage = switch (dest_ty.floatBits(target)) {
16 => .{ .f16 = @floatFromInt(x) },
32 => .{ .f32 = @floatFromInt(x) },
@@ -1525,10 +1548,10 @@ fn floatFromIntInner(x: anytype, dest_ty: Type, mod: *Module) !Value {
128 => .{ .f128 = @floatFromInt(x) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = dest_ty.toIntern(),
.storage = storage,
- } })));
+ } }));
}
fn calcLimbLenFloat(scalar: anytype) usize {
@@ -1551,22 +1574,22 @@ pub fn intAddSat(
rhs: Value,
ty: Type,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
- if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod));
- const scalar_ty = ty.scalarType(mod);
+ if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
+ const scalar_ty = ty.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try intAddSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try intAddSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return intAddSatScalar(lhs, rhs, ty, arena, mod);
+ return intAddSatScalar(lhs, rhs, ty, arena, pt);
}
/// Supports integers only; asserts neither operand is undefined.
@@ -1575,24 +1598,24 @@ pub fn intAddSatScalar(
rhs: Value,
ty: Type,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
- assert(!lhs.isUndef(mod));
- assert(!rhs.isUndef(mod));
+ assert(!lhs.isUndef(pt.zcu));
+ assert(!rhs.isUndef(pt.zcu));
- const info = ty.intInfo(mod);
+ const info = ty.intInfo(pt.zcu);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
- const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.addSat(lhs_bigint, rhs_bigint, info.signedness, info.bits);
- return mod.intValue_big(ty, result_bigint.toConst());
+ return pt.intValue_big(ty, result_bigint.toConst());
}
/// Supports (vectors of) integers only; asserts neither operand is undefined.
@@ -1601,22 +1624,22 @@ pub fn intSubSat(
rhs: Value,
ty: Type,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
- if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod));
- const scalar_ty = ty.scalarType(mod);
+ if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
+ const scalar_ty = ty.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try intSubSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try intSubSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return intSubSatScalar(lhs, rhs, ty, arena, mod);
+ return intSubSatScalar(lhs, rhs, ty, arena, pt);
}
/// Supports integers only; asserts neither operand is undefined.
@@ -1625,24 +1648,24 @@ pub fn intSubSatScalar(
rhs: Value,
ty: Type,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
- assert(!lhs.isUndef(mod));
- assert(!rhs.isUndef(mod));
+ assert(!lhs.isUndef(pt.zcu));
+ assert(!rhs.isUndef(pt.zcu));
- const info = ty.intInfo(mod);
+ const info = ty.intInfo(pt.zcu);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
- const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.subSat(lhs_bigint, rhs_bigint, info.signedness, info.bits);
- return mod.intValue_big(ty, result_bigint.toConst());
+ return pt.intValue_big(ty, result_bigint.toConst());
}
pub fn intMulWithOverflow(
@@ -1650,32 +1673,33 @@ pub fn intMulWithOverflow(
rhs: Value,
ty: Type,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !OverflowArithmeticResult {
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) == .Vector) {
const vec_len = ty.vectorLen(mod);
const overflowed_data = try arena.alloc(InternPool.Index, vec_len);
const result_data = try arena.alloc(InternPool.Index, vec_len);
const scalar_ty = ty.scalarType(mod);
for (overflowed_data, result_data, 0..) |*of, *scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod);
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt);
of.* = of_math_result.overflow_bit.toIntern();
scalar.* = of_math_result.wrapped_result.toIntern();
}
return OverflowArithmeticResult{
- .overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{
- .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
+ .overflow_bit = Value.fromInterned(try pt.intern(.{ .aggregate = .{
+ .ty = (try pt.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
.storage = .{ .elems = overflowed_data },
- } }))),
- .wrapped_result = Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ } })),
+ .wrapped_result = Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } }))),
+ } })),
};
}
- return intMulWithOverflowScalar(lhs, rhs, ty, arena, mod);
+ return intMulWithOverflowScalar(lhs, rhs, ty, arena, pt);
}
pub fn intMulWithOverflowScalar(
@@ -1683,21 +1707,22 @@ pub fn intMulWithOverflowScalar(
rhs: Value,
ty: Type,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !OverflowArithmeticResult {
+ const mod = pt.zcu;
const info = ty.intInfo(mod);
if (lhs.isUndef(mod) or rhs.isUndef(mod)) {
return .{
- .overflow_bit = try mod.undefValue(Type.u1),
- .wrapped_result = try mod.undefValue(ty),
+ .overflow_bit = try pt.undefValue(Type.u1),
+ .wrapped_result = try pt.undefValue(ty),
};
}
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
- const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
const limbs = try arena.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len + rhs_bigint.limbs.len,
@@ -1715,8 +1740,8 @@ pub fn intMulWithOverflowScalar(
}
return OverflowArithmeticResult{
- .overflow_bit = try mod.intValue(Type.u1, @intFromBool(overflowed)),
- .wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()),
+ .overflow_bit = try pt.intValue(Type.u1, @intFromBool(overflowed)),
+ .wrapped_result = try pt.intValue_big(ty, result_bigint.toConst()),
};
}
@@ -1726,22 +1751,23 @@ pub fn numberMulWrap(
rhs: Value,
ty: Type,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod));
const scalar_ty = ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try numberMulWrapScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try numberMulWrapScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return numberMulWrapScalar(lhs, rhs, ty, arena, mod);
+ return numberMulWrapScalar(lhs, rhs, ty, arena, pt);
}
/// Supports both floats and ints; handles undefined.
@@ -1750,19 +1776,20 @@ pub fn numberMulWrapScalar(
rhs: Value,
ty: Type,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
+ const mod = pt.zcu;
if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef;
if (ty.zigTypeTag(mod) == .ComptimeInt) {
- return intMul(lhs, rhs, ty, undefined, arena, mod);
+ return intMul(lhs, rhs, ty, undefined, arena, pt);
}
if (ty.isAnyFloat()) {
- return floatMul(lhs, rhs, ty, arena, mod);
+ return floatMul(lhs, rhs, ty, arena, pt);
}
- const overflow_result = try intMulWithOverflow(lhs, rhs, ty, arena, mod);
+ const overflow_result = try intMulWithOverflow(lhs, rhs, ty, arena, pt);
return overflow_result.wrapped_result;
}
@@ -1772,22 +1799,22 @@ pub fn intMulSat(
rhs: Value,
ty: Type,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
- if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod));
- const scalar_ty = ty.scalarType(mod);
+ if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
+ const scalar_ty = ty.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try intMulSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try intMulSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return intMulSatScalar(lhs, rhs, ty, arena, mod);
+ return intMulSatScalar(lhs, rhs, ty, arena, pt);
}
/// Supports (vectors of) integers only; asserts neither operand is undefined.
@@ -1796,17 +1823,17 @@ pub fn intMulSatScalar(
rhs: Value,
ty: Type,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
- assert(!lhs.isUndef(mod));
- assert(!rhs.isUndef(mod));
+ assert(!lhs.isUndef(pt.zcu));
+ assert(!rhs.isUndef(pt.zcu));
- const info = ty.intInfo(mod);
+ const info = ty.intInfo(pt.zcu);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
- const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
const limbs = try arena.alloc(
std.math.big.Limb,
@max(
@@ -1822,53 +1849,55 @@ pub fn intMulSatScalar(
);
result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, arena);
result_bigint.saturate(result_bigint.toConst(), info.signedness, info.bits);
- return mod.intValue_big(ty, result_bigint.toConst());
+ return pt.intValue_big(ty, result_bigint.toConst());
}
/// Supports both floats and ints; handles undefined.
-pub fn numberMax(lhs: Value, rhs: Value, mod: *Module) Value {
- if (lhs.isUndef(mod) or rhs.isUndef(mod)) return undef;
- if (lhs.isNan(mod)) return rhs;
- if (rhs.isNan(mod)) return lhs;
+pub fn numberMax(lhs: Value, rhs: Value, pt: Zcu.PerThread) Value {
+ if (lhs.isUndef(pt.zcu) or rhs.isUndef(pt.zcu)) return undef;
+ if (lhs.isNan(pt.zcu)) return rhs;
+ if (rhs.isNan(pt.zcu)) return lhs;
- return switch (order(lhs, rhs, mod)) {
+ return switch (order(lhs, rhs, pt)) {
.lt => rhs,
.gt, .eq => lhs,
};
}
/// Supports both floats and ints; handles undefined.
-pub fn numberMin(lhs: Value, rhs: Value, mod: *Module) Value {
- if (lhs.isUndef(mod) or rhs.isUndef(mod)) return undef;
- if (lhs.isNan(mod)) return rhs;
- if (rhs.isNan(mod)) return lhs;
+pub fn numberMin(lhs: Value, rhs: Value, pt: Zcu.PerThread) Value {
+ if (lhs.isUndef(pt.zcu) or rhs.isUndef(pt.zcu)) return undef;
+ if (lhs.isNan(pt.zcu)) return rhs;
+ if (rhs.isNan(pt.zcu)) return lhs;
- return switch (order(lhs, rhs, mod)) {
+ return switch (order(lhs, rhs, pt)) {
.lt => lhs,
.gt, .eq => rhs,
};
}
/// operands must be (vectors of) integers; handles undefined scalars.
-pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
+pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod));
const scalar_ty = ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const elem_val = try val.elemValue(mod, i);
- scalar.* = (try bitwiseNotScalar(elem_val, scalar_ty, arena, mod)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ scalar.* = (try bitwiseNotScalar(elem_val, scalar_ty, arena, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return bitwiseNotScalar(val, ty, arena, mod);
+ return bitwiseNotScalar(val, ty, arena, pt);
}
/// operands must be integers; handles undefined.
-pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
- if (val.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() })));
+pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
+ if (val.isUndef(mod)) return Value.fromInterned(try pt.intern(.{ .undef = ty.toIntern() }));
if (ty.toIntern() == .bool_type) return makeBool(!val.toBool());
const info = ty.intInfo(mod);
@@ -1880,7 +1909,7 @@ pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !V
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var val_space: Value.BigIntSpace = undefined;
- const val_bigint = val.toBigInt(&val_space, mod);
+ const val_bigint = val.toBigInt(&val_space, pt);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
@@ -1888,29 +1917,31 @@ pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !V
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitNotWrap(val_bigint, info.signedness, info.bits);
- return mod.intValue_big(ty, result_bigint.toConst());
+ return pt.intValue_big(ty, result_bigint.toConst());
}
/// operands must be (vectors of) integers; handles undefined scalars.
-pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod));
const scalar_ty = ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try bitwiseAndScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try bitwiseAndScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return bitwiseAndScalar(lhs, rhs, ty, allocator, mod);
+ return bitwiseAndScalar(lhs, rhs, ty, allocator, pt);
}
/// operands must be integers; handles undefined.
-pub fn bitwiseAndScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Allocator, zcu: *Zcu) !Value {
+pub fn bitwiseAndScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ const zcu = pt.zcu;
// If one operand is defined, we turn the other into `0xAA` so the bitwise AND can
// still zero out some bits.
// TODO: ideally we'd still like tracking for the undef bits. Related: #19634.
@@ -1919,9 +1950,9 @@ pub fn bitwiseAndScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Alloc
const rhs_undef = orig_rhs.isUndef(zcu);
break :make_defined switch ((@as(u2, @intFromBool(lhs_undef)) << 1) | @intFromBool(rhs_undef)) {
0b00 => .{ orig_lhs, orig_rhs },
- 0b01 => .{ orig_lhs, try intValueAa(ty, arena, zcu) },
- 0b10 => .{ try intValueAa(ty, arena, zcu), orig_rhs },
- 0b11 => return zcu.undefValue(ty),
+ 0b01 => .{ orig_lhs, try intValueAa(ty, arena, pt) },
+ 0b10 => .{ try intValueAa(ty, arena, pt), orig_rhs },
+ 0b11 => return pt.undefValue(ty),
};
};
@@ -1931,8 +1962,8 @@ pub fn bitwiseAndScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Alloc
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
- const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
const limbs = try arena.alloc(
std.math.big.Limb,
// + 1 for negatives
@@ -1940,12 +1971,13 @@ pub fn bitwiseAndScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Alloc
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitAnd(lhs_bigint, rhs_bigint);
- return zcu.intValue_big(ty, result_bigint.toConst());
+ return pt.intValue_big(ty, result_bigint.toConst());
}
/// Given an integer or boolean type, creates an value of that with the bit pattern 0xAA.
/// This is used to convert undef values into 0xAA when performing e.g. bitwise operations.
-fn intValueAa(ty: Type, arena: Allocator, zcu: *Zcu) !Value {
+fn intValueAa(ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ const zcu = pt.zcu;
if (ty.toIntern() == .bool_type) return Value.true;
const info = ty.intInfo(zcu);
@@ -1958,68 +1990,71 @@ fn intValueAa(ty: Type, arena: Allocator, zcu: *Zcu) !Value {
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.readTwosComplement(buf, info.bits, zcu.getTarget().cpu.arch.endian(), info.signedness);
- return zcu.intValue_big(ty, result_bigint.toConst());
+ return pt.intValue_big(ty, result_bigint.toConst());
}
/// operands must be (vectors of) integers; handles undefined scalars.
-pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
+pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod));
const scalar_ty = ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try bitwiseNandScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try bitwiseNandScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return bitwiseNandScalar(lhs, rhs, ty, arena, mod);
+ return bitwiseNandScalar(lhs, rhs, ty, arena, pt);
}
/// operands must be integers; handles undefined.
-pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
- if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() })));
+pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
+ if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned(try pt.intern(.{ .undef = ty.toIntern() }));
if (ty.toIntern() == .bool_type) return makeBool(!(lhs.toBool() and rhs.toBool()));
- const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod);
- const all_ones = if (ty.isSignedInt(mod)) try mod.intValue(ty, -1) else try ty.maxIntScalar(mod, ty);
- return bitwiseXor(anded, all_ones, ty, arena, mod);
+ const anded = try bitwiseAnd(lhs, rhs, ty, arena, pt);
+ const all_ones = if (ty.isSignedInt(mod)) try pt.intValue(ty, -1) else try ty.maxIntScalar(pt, ty);
+ return bitwiseXor(anded, all_ones, ty, arena, pt);
}
/// operands must be (vectors of) integers; handles undefined scalars.
-pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod));
const scalar_ty = ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try bitwiseOrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try bitwiseOrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return bitwiseOrScalar(lhs, rhs, ty, allocator, mod);
+ return bitwiseOrScalar(lhs, rhs, ty, allocator, pt);
}
/// operands must be integers; handles undefined.
-pub fn bitwiseOrScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Allocator, zcu: *Zcu) !Value {
+pub fn bitwiseOrScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
// If one operand is defined, we turn the other into `0xAA` so the bitwise AND can
// still zero out some bits.
// TODO: ideally we'd still like tracking for the undef bits. Related: #19634.
const lhs: Value, const rhs: Value = make_defined: {
- const lhs_undef = orig_lhs.isUndef(zcu);
- const rhs_undef = orig_rhs.isUndef(zcu);
+ const lhs_undef = orig_lhs.isUndef(pt.zcu);
+ const rhs_undef = orig_rhs.isUndef(pt.zcu);
break :make_defined switch ((@as(u2, @intFromBool(lhs_undef)) << 1) | @intFromBool(rhs_undef)) {
0b00 => .{ orig_lhs, orig_rhs },
- 0b01 => .{ orig_lhs, try intValueAa(ty, arena, zcu) },
- 0b10 => .{ try intValueAa(ty, arena, zcu), orig_rhs },
- 0b11 => return zcu.undefValue(ty),
+ 0b01 => .{ orig_lhs, try intValueAa(ty, arena, pt) },
+ 0b10 => .{ try intValueAa(ty, arena, pt), orig_rhs },
+ 0b11 => return pt.undefValue(ty),
};
};
@@ -2029,46 +2064,48 @@ pub fn bitwiseOrScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Alloca
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
- const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
const limbs = try arena.alloc(
std.math.big.Limb,
@max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitOr(lhs_bigint, rhs_bigint);
- return zcu.intValue_big(ty, result_bigint.toConst());
+ return pt.intValue_big(ty, result_bigint.toConst());
}
/// operands must be (vectors of) integers; handles undefined scalars.
-pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod));
const scalar_ty = ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try bitwiseXorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try bitwiseXorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return bitwiseXorScalar(lhs, rhs, ty, allocator, mod);
+ return bitwiseXorScalar(lhs, rhs, ty, allocator, pt);
}
/// operands must be integers; handles undefined.
-pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
- if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() })));
+pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
+ if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned(try pt.intern(.{ .undef = ty.toIntern() }));
if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() != rhs.toBool());
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
- const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
const limbs = try arena.alloc(
std.math.big.Limb,
// + 1 for negatives
@@ -2076,22 +2113,22 @@ pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod:
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitXor(lhs_bigint, rhs_bigint);
- return mod.intValue_big(ty, result_bigint.toConst());
+ return pt.intValue_big(ty, result_bigint.toConst());
}
/// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting
/// overflow_idx to the vector index the overflow was at (or 0 for a scalar).
-pub fn intDiv(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator: Allocator, mod: *Module) !Value {
+pub fn intDiv(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator: Allocator, pt: Zcu.PerThread) !Value {
var overflow: usize = undefined;
- return intDivInner(lhs, rhs, ty, &overflow, allocator, mod) catch |err| switch (err) {
+ return intDivInner(lhs, rhs, ty, &overflow, allocator, pt) catch |err| switch (err) {
error.Overflow => {
- const is_vec = ty.isVector(mod);
+ const is_vec = ty.isVector(pt.zcu);
overflow_idx.* = if (is_vec) overflow else 0;
- const safe_ty = if (is_vec) try mod.vectorType(.{
- .len = ty.vectorLen(mod),
+ const safe_ty = if (is_vec) try pt.vectorType(.{
+ .len = ty.vectorLen(pt.zcu),
.child = .comptime_int_type,
}) else Type.comptime_int;
- return intDivInner(lhs, rhs, safe_ty, undefined, allocator, mod) catch |err1| switch (err1) {
+ return intDivInner(lhs, rhs, safe_ty, undefined, allocator, pt) catch |err1| switch (err1) {
error.Overflow => unreachable,
else => |e| return e,
};
@@ -2100,14 +2137,14 @@ pub fn intDiv(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator
};
}
-fn intDivInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator: Allocator, mod: *Module) !Value {
- if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod));
- const scalar_ty = ty.scalarType(mod);
+fn intDivInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator: Allocator, pt: Zcu.PerThread) !Value {
+ if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
+ const scalar_ty = ty.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- const val = intDivScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod) catch |err| switch (err) {
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ const val = intDivScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt) catch |err| switch (err) {
error.Overflow => {
overflow_idx.* = i;
return error.Overflow;
@@ -2116,21 +2153,21 @@ fn intDivInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator
};
scalar.* = val.toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return intDivScalar(lhs, rhs, ty, allocator, mod);
+ return intDivScalar(lhs, rhs, ty, allocator, pt);
}
-pub fn intDivScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+pub fn intDivScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
- const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
const limbs_q = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len,
@@ -2147,38 +2184,38 @@ pub fn intDivScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod:
var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
if (ty.toIntern() != .comptime_int_type) {
- const info = ty.intInfo(mod);
+ const info = ty.intInfo(pt.zcu);
if (!result_q.toConst().fitsInTwosComp(info.signedness, info.bits)) {
return error.Overflow;
}
}
- return mod.intValue_big(ty, result_q.toConst());
+ return pt.intValue_big(ty, result_q.toConst());
}
-pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
- if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod));
- const scalar_ty = ty.scalarType(mod);
+pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
+ if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
+ const scalar_ty = ty.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try intDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try intDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return intDivFloorScalar(lhs, rhs, ty, allocator, mod);
+ return intDivFloorScalar(lhs, rhs, ty, allocator, pt);
}
-pub fn intDivFloorScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+pub fn intDivFloorScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
- const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
const limbs_q = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len,
@@ -2194,33 +2231,33 @@ pub fn intDivFloorScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator,
var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
- return mod.intValue_big(ty, result_q.toConst());
+ return pt.intValue_big(ty, result_q.toConst());
}
-pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
- if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod));
- const scalar_ty = ty.scalarType(mod);
+pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
+ if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
+ const scalar_ty = ty.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try intModScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try intModScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return intModScalar(lhs, rhs, ty, allocator, mod);
+ return intModScalar(lhs, rhs, ty, allocator, pt);
}
-pub fn intModScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+pub fn intModScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
- const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
const limbs_q = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len,
@@ -2236,7 +2273,7 @@ pub fn intModScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod:
var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
- return mod.intValue_big(ty, result_r.toConst());
+ return pt.intValue_big(ty, result_r.toConst());
}
/// Returns true if the value is a floating point type and is NaN. Returns false otherwise.
@@ -2268,85 +2305,86 @@ pub fn isNegativeInf(val: Value, mod: *const Module) bool {
};
}
-pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
- if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
- const scalar_ty = float_type.scalarType(mod);
+pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ if (float_type.zigTypeTag(pt.zcu) == .Vector) {
+ const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
+ const scalar_ty = float_type.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try floatRemScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try floatRemScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return floatRemScalar(lhs, rhs, float_type, mod);
+ return floatRemScalar(lhs, rhs, float_type, pt);
}
-pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, mod: *Module) !Value {
- const target = mod.getTarget();
+pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, pt: Zcu.PerThread) !Value {
+ const target = pt.zcu.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = @rem(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) },
- 32 => .{ .f32 = @rem(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) },
- 64 => .{ .f64 = @rem(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) },
- 80 => .{ .f80 = @rem(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) },
- 128 => .{ .f128 = @rem(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) },
+ 16 => .{ .f16 = @rem(lhs.toFloat(f16, pt), rhs.toFloat(f16, pt)) },
+ 32 => .{ .f32 = @rem(lhs.toFloat(f32, pt), rhs.toFloat(f32, pt)) },
+ 64 => .{ .f64 = @rem(lhs.toFloat(f64, pt), rhs.toFloat(f64, pt)) },
+ 80 => .{ .f80 = @rem(lhs.toFloat(f80, pt), rhs.toFloat(f80, pt)) },
+ 128 => .{ .f128 = @rem(lhs.toFloat(f128, pt), rhs.toFloat(f128, pt)) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
-pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
- if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
- const scalar_ty = float_type.scalarType(mod);
+pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ if (float_type.zigTypeTag(pt.zcu) == .Vector) {
+ const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
+ const scalar_ty = float_type.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try floatModScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try floatModScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return floatModScalar(lhs, rhs, float_type, mod);
+ return floatModScalar(lhs, rhs, float_type, pt);
}
-pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, mod: *Module) !Value {
- const target = mod.getTarget();
+pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, pt: Zcu.PerThread) !Value {
+ const target = pt.zcu.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = @mod(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) },
- 32 => .{ .f32 = @mod(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) },
- 64 => .{ .f64 = @mod(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) },
- 80 => .{ .f80 = @mod(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) },
- 128 => .{ .f128 = @mod(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) },
+ 16 => .{ .f16 = @mod(lhs.toFloat(f16, pt), rhs.toFloat(f16, pt)) },
+ 32 => .{ .f32 = @mod(lhs.toFloat(f32, pt), rhs.toFloat(f32, pt)) },
+ 64 => .{ .f64 = @mod(lhs.toFloat(f64, pt), rhs.toFloat(f64, pt)) },
+ 80 => .{ .f80 = @mod(lhs.toFloat(f80, pt), rhs.toFloat(f80, pt)) },
+ 128 => .{ .f128 = @mod(lhs.toFloat(f128, pt), rhs.toFloat(f128, pt)) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
/// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting
/// overflow_idx to the vector index the overflow was at (or 0 for a scalar).
-pub fn intMul(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator: Allocator, mod: *Module) !Value {
+pub fn intMul(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
var overflow: usize = undefined;
- return intMulInner(lhs, rhs, ty, &overflow, allocator, mod) catch |err| switch (err) {
+ return intMulInner(lhs, rhs, ty, &overflow, allocator, pt) catch |err| switch (err) {
error.Overflow => {
const is_vec = ty.isVector(mod);
overflow_idx.* = if (is_vec) overflow else 0;
- const safe_ty = if (is_vec) try mod.vectorType(.{
+ const safe_ty = if (is_vec) try pt.vectorType(.{
.len = ty.vectorLen(mod),
.child = .comptime_int_type,
}) else Type.comptime_int;
- return intMulInner(lhs, rhs, safe_ty, undefined, allocator, mod) catch |err1| switch (err1) {
+ return intMulInner(lhs, rhs, safe_ty, undefined, allocator, pt) catch |err1| switch (err1) {
error.Overflow => unreachable,
else => |e| return e,
};
@@ -2355,14 +2393,15 @@ pub fn intMul(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator
};
}
-fn intMulInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator: Allocator, mod: *Module) !Value {
+fn intMulInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod));
const scalar_ty = ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- const val = intMulScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod) catch |err| switch (err) {
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ const val = intMulScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt) catch |err| switch (err) {
error.Overflow => {
overflow_idx.* = i;
return error.Overflow;
@@ -2371,26 +2410,26 @@ fn intMulInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator
};
scalar.* = val.toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return intMulScalar(lhs, rhs, ty, allocator, mod);
+ return intMulScalar(lhs, rhs, ty, allocator, pt);
}
-pub fn intMulScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+pub fn intMulScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
if (ty.toIntern() != .comptime_int_type) {
- const res = try intMulWithOverflowScalar(lhs, rhs, ty, allocator, mod);
- if (res.overflow_bit.compareAllWithZero(.neq, mod)) return error.Overflow;
+ const res = try intMulWithOverflowScalar(lhs, rhs, ty, allocator, pt);
+ if (res.overflow_bit.compareAllWithZero(.neq, pt)) return error.Overflow;
return res.wrapped_result;
}
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
- const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
const limbs = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len + rhs_bigint.limbs.len,
@@ -2402,23 +2441,24 @@ pub fn intMulScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod:
);
defer allocator.free(limbs_buffer);
result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, allocator);
- return mod.intValue_big(ty, result_bigint.toConst());
+ return pt.intValue_big(ty, result_bigint.toConst());
}
-pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value {
+pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod));
const scalar_ty = ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const elem_val = try val.elemValue(mod, i);
- scalar.* = (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, mod)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ scalar.* = (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return intTruncScalar(val, ty, allocator, signedness, bits, mod);
+ return intTruncScalar(val, ty, allocator, signedness, bits, pt);
}
/// This variant may vectorize on `bits`. Asserts that `bits` is a (vector of) `u16`.
@@ -2428,22 +2468,22 @@ pub fn intTruncBitsAsValue(
allocator: Allocator,
signedness: std.builtin.Signedness,
bits: Value,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
- if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod));
- const scalar_ty = ty.scalarType(mod);
+ if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
+ const scalar_ty = ty.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
- const elem_val = try val.elemValue(mod, i);
- const bits_elem = try bits.elemValue(mod, i);
- scalar.* = (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(bits_elem.toUnsignedInt(mod)), mod)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ const bits_elem = try bits.elemValue(pt, i);
+ scalar.* = (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(bits_elem.toUnsignedInt(pt)), pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return intTruncScalar(val, ty, allocator, signedness, @intCast(bits.toUnsignedInt(mod)), mod);
+ return intTruncScalar(val, ty, allocator, signedness, @intCast(bits.toUnsignedInt(pt)), pt);
}
pub fn intTruncScalar(
@@ -2452,14 +2492,15 @@ pub fn intTruncScalar(
allocator: Allocator,
signedness: std.builtin.Signedness,
bits: u16,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
) !Value {
- if (bits == 0) return zcu.intValue(ty, 0);
+ const zcu = pt.zcu;
+ if (bits == 0) return pt.intValue(ty, 0);
- if (val.isUndef(zcu)) return zcu.undefValue(ty);
+ if (val.isUndef(zcu)) return pt.undefValue(ty);
var val_space: Value.BigIntSpace = undefined;
- const val_bigint = val.toBigInt(&val_space, zcu);
+ const val_bigint = val.toBigInt(&val_space, pt);
const limbs = try allocator.alloc(
std.math.big.Limb,
@@ -2468,32 +2509,33 @@ pub fn intTruncScalar(
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.truncate(val_bigint, signedness, bits);
- return zcu.intValue_big(ty, result_bigint.toConst());
+ return pt.intValue_big(ty, result_bigint.toConst());
}
-pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod));
const scalar_ty = ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try shlScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try shlScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return shlScalar(lhs, rhs, ty, allocator, mod);
+ return shlScalar(lhs, rhs, ty, allocator, pt);
}
-pub fn shlScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+pub fn shlScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
- const shift: usize = @intCast(rhs.toUnsignedInt(mod));
+ const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
+ const shift: usize = @intCast(rhs.toUnsignedInt(pt));
const limbs = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1,
@@ -2505,11 +2547,11 @@ pub fn shlScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *M
};
result_bigint.shiftLeft(lhs_bigint, shift);
if (ty.toIntern() != .comptime_int_type) {
- const int_info = ty.intInfo(mod);
+ const int_info = ty.intInfo(pt.zcu);
result_bigint.truncate(result_bigint.toConst(), int_info.signedness, int_info.bits);
}
- return mod.intValue_big(ty, result_bigint.toConst());
+ return pt.intValue_big(ty, result_bigint.toConst());
}
pub fn shlWithOverflow(
@@ -2517,32 +2559,32 @@ pub fn shlWithOverflow(
rhs: Value,
ty: Type,
allocator: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !OverflowArithmeticResult {
- if (ty.zigTypeTag(mod) == .Vector) {
- const vec_len = ty.vectorLen(mod);
+ if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ const vec_len = ty.vectorLen(pt.zcu);
const overflowed_data = try allocator.alloc(InternPool.Index, vec_len);
const result_data = try allocator.alloc(InternPool.Index, vec_len);
- const scalar_ty = ty.scalarType(mod);
+ const scalar_ty = ty.scalarType(pt.zcu);
for (overflowed_data, result_data, 0..) |*of, *scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod);
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt);
of.* = of_math_result.overflow_bit.toIntern();
scalar.* = of_math_result.wrapped_result.toIntern();
}
return OverflowArithmeticResult{
- .overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{
- .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
+ .overflow_bit = Value.fromInterned(try pt.intern(.{ .aggregate = .{
+ .ty = (try pt.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
.storage = .{ .elems = overflowed_data },
- } }))),
- .wrapped_result = Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ } })),
+ .wrapped_result = Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } }))),
+ } })),
};
}
- return shlWithOverflowScalar(lhs, rhs, ty, allocator, mod);
+ return shlWithOverflowScalar(lhs, rhs, ty, allocator, pt);
}
pub fn shlWithOverflowScalar(
@@ -2550,12 +2592,12 @@ pub fn shlWithOverflowScalar(
rhs: Value,
ty: Type,
allocator: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !OverflowArithmeticResult {
- const info = ty.intInfo(mod);
+ const info = ty.intInfo(pt.zcu);
var lhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
- const shift: usize = @intCast(rhs.toUnsignedInt(mod));
+ const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
+ const shift: usize = @intCast(rhs.toUnsignedInt(pt));
const limbs = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1,
@@ -2571,8 +2613,8 @@ pub fn shlWithOverflowScalar(
result_bigint.truncate(result_bigint.toConst(), info.signedness, info.bits);
}
return OverflowArithmeticResult{
- .overflow_bit = try mod.intValue(Type.u1, @intFromBool(overflowed)),
- .wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()),
+ .overflow_bit = try pt.intValue(Type.u1, @intFromBool(overflowed)),
+ .wrapped_result = try pt.intValue_big(ty, result_bigint.toConst()),
};
}
@@ -2581,22 +2623,22 @@ pub fn shlSat(
rhs: Value,
ty: Type,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
- if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod));
- const scalar_ty = ty.scalarType(mod);
+ if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
+ const scalar_ty = ty.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try shlSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try shlSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return shlSatScalar(lhs, rhs, ty, arena, mod);
+ return shlSatScalar(lhs, rhs, ty, arena, pt);
}
pub fn shlSatScalar(
@@ -2604,15 +2646,15 @@ pub fn shlSatScalar(
rhs: Value,
ty: Type,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
- const info = ty.intInfo(mod);
+ const info = ty.intInfo(pt.zcu);
var lhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
- const shift: usize = @intCast(rhs.toUnsignedInt(mod));
+ const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
+ const shift: usize = @intCast(rhs.toUnsignedInt(pt));
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits) + 1,
@@ -2623,7 +2665,7 @@ pub fn shlSatScalar(
.len = undefined,
};
result_bigint.shiftLeftSat(lhs_bigint, shift, info.signedness, info.bits);
- return mod.intValue_big(ty, result_bigint.toConst());
+ return pt.intValue_big(ty, result_bigint.toConst());
}
pub fn shlTrunc(
@@ -2631,22 +2673,22 @@ pub fn shlTrunc(
rhs: Value,
ty: Type,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
- if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod));
- const scalar_ty = ty.scalarType(mod);
+ if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
+ const scalar_ty = ty.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try shlTruncScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try shlTruncScalar(lhs_elem, rhs_elem, scalar_ty, arena, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return shlTruncScalar(lhs, rhs, ty, arena, mod);
+ return shlTruncScalar(lhs, rhs, ty, arena, pt);
}
pub fn shlTruncScalar(
@@ -2654,46 +2696,46 @@ pub fn shlTruncScalar(
rhs: Value,
ty: Type,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
- const shifted = try lhs.shl(rhs, ty, arena, mod);
- const int_info = ty.intInfo(mod);
- const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits, mod);
+ const shifted = try lhs.shl(rhs, ty, arena, pt);
+ const int_info = ty.intInfo(pt.zcu);
+ const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits, pt);
return truncated;
}
-pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
- if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod));
- const scalar_ty = ty.scalarType(mod);
+pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
+ if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
+ const scalar_ty = ty.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try shrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try shrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return shrScalar(lhs, rhs, ty, allocator, mod);
+ return shrScalar(lhs, rhs, ty, allocator, pt);
}
-pub fn shrScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
+pub fn shrScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
- const shift: usize = @intCast(rhs.toUnsignedInt(mod));
+ const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
+ const shift: usize = @intCast(rhs.toUnsignedInt(pt));
const result_limbs = lhs_bigint.limbs.len -| (shift / (@sizeOf(std.math.big.Limb) * 8));
if (result_limbs == 0) {
// The shift is enough to remove all the bits from the number, which means the
// result is 0 or -1 depending on the sign.
if (lhs_bigint.positive) {
- return mod.intValue(ty, 0);
+ return pt.intValue(ty, 0);
} else {
- return mod.intValue(ty, -1);
+ return pt.intValue(ty, -1);
}
}
@@ -2707,48 +2749,45 @@ pub fn shrScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *M
.len = undefined,
};
result_bigint.shiftRight(lhs_bigint, shift);
- return mod.intValue_big(ty, result_bigint.toConst());
+ return pt.intValue_big(ty, result_bigint.toConst());
}
pub fn floatNeg(
val: Value,
float_type: Type,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
+ const mod = pt.zcu;
if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const elem_val = try val.elemValue(mod, i);
- scalar.* = (try floatNegScalar(elem_val, scalar_ty, mod)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ scalar.* = (try floatNegScalar(elem_val, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return floatNegScalar(val, float_type, mod);
+ return floatNegScalar(val, float_type, pt);
}
-pub fn floatNegScalar(
- val: Value,
- float_type: Type,
- mod: *Module,
-) !Value {
- const target = mod.getTarget();
+pub fn floatNegScalar(val: Value, float_type: Type, pt: Zcu.PerThread) !Value {
+ const target = pt.zcu.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = -val.toFloat(f16, mod) },
- 32 => .{ .f32 = -val.toFloat(f32, mod) },
- 64 => .{ .f64 = -val.toFloat(f64, mod) },
- 80 => .{ .f80 = -val.toFloat(f80, mod) },
- 128 => .{ .f128 = -val.toFloat(f128, mod) },
+ 16 => .{ .f16 = -val.toFloat(f16, pt) },
+ 32 => .{ .f32 = -val.toFloat(f32, pt) },
+ 64 => .{ .f64 = -val.toFloat(f64, pt) },
+ 80 => .{ .f80 = -val.toFloat(f80, pt) },
+ 128 => .{ .f128 = -val.toFloat(f128, pt) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
pub fn floatAdd(
@@ -2756,43 +2795,45 @@ pub fn floatAdd(
rhs: Value,
float_type: Type,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
+ const mod = pt.zcu;
if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try floatAddScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try floatAddScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return floatAddScalar(lhs, rhs, float_type, mod);
+ return floatAddScalar(lhs, rhs, float_type, pt);
}
pub fn floatAddScalar(
lhs: Value,
rhs: Value,
float_type: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
+ const mod = pt.zcu;
const target = mod.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = lhs.toFloat(f16, mod) + rhs.toFloat(f16, mod) },
- 32 => .{ .f32 = lhs.toFloat(f32, mod) + rhs.toFloat(f32, mod) },
- 64 => .{ .f64 = lhs.toFloat(f64, mod) + rhs.toFloat(f64, mod) },
- 80 => .{ .f80 = lhs.toFloat(f80, mod) + rhs.toFloat(f80, mod) },
- 128 => .{ .f128 = lhs.toFloat(f128, mod) + rhs.toFloat(f128, mod) },
+ 16 => .{ .f16 = lhs.toFloat(f16, pt) + rhs.toFloat(f16, pt) },
+ 32 => .{ .f32 = lhs.toFloat(f32, pt) + rhs.toFloat(f32, pt) },
+ 64 => .{ .f64 = lhs.toFloat(f64, pt) + rhs.toFloat(f64, pt) },
+ 80 => .{ .f80 = lhs.toFloat(f80, pt) + rhs.toFloat(f80, pt) },
+ 128 => .{ .f128 = lhs.toFloat(f128, pt) + rhs.toFloat(f128, pt) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
pub fn floatSub(
@@ -2800,43 +2841,45 @@ pub fn floatSub(
rhs: Value,
float_type: Type,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
+ const mod = pt.zcu;
if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try floatSubScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try floatSubScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return floatSubScalar(lhs, rhs, float_type, mod);
+ return floatSubScalar(lhs, rhs, float_type, pt);
}
pub fn floatSubScalar(
lhs: Value,
rhs: Value,
float_type: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
+ const mod = pt.zcu;
const target = mod.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = lhs.toFloat(f16, mod) - rhs.toFloat(f16, mod) },
- 32 => .{ .f32 = lhs.toFloat(f32, mod) - rhs.toFloat(f32, mod) },
- 64 => .{ .f64 = lhs.toFloat(f64, mod) - rhs.toFloat(f64, mod) },
- 80 => .{ .f80 = lhs.toFloat(f80, mod) - rhs.toFloat(f80, mod) },
- 128 => .{ .f128 = lhs.toFloat(f128, mod) - rhs.toFloat(f128, mod) },
+ 16 => .{ .f16 = lhs.toFloat(f16, pt) - rhs.toFloat(f16, pt) },
+ 32 => .{ .f32 = lhs.toFloat(f32, pt) - rhs.toFloat(f32, pt) },
+ 64 => .{ .f64 = lhs.toFloat(f64, pt) - rhs.toFloat(f64, pt) },
+ 80 => .{ .f80 = lhs.toFloat(f80, pt) - rhs.toFloat(f80, pt) },
+ 128 => .{ .f128 = lhs.toFloat(f128, pt) - rhs.toFloat(f128, pt) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
pub fn floatDiv(
@@ -2844,43 +2887,43 @@ pub fn floatDiv(
rhs: Value,
float_type: Type,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
- if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
- const scalar_ty = float_type.scalarType(mod);
+ if (float_type.zigTypeTag(pt.zcu) == .Vector) {
+ const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
+ const scalar_ty = float_type.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try floatDivScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try floatDivScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return floatDivScalar(lhs, rhs, float_type, mod);
+ return floatDivScalar(lhs, rhs, float_type, pt);
}
pub fn floatDivScalar(
lhs: Value,
rhs: Value,
float_type: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
- const target = mod.getTarget();
+ const target = pt.zcu.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = lhs.toFloat(f16, mod) / rhs.toFloat(f16, mod) },
- 32 => .{ .f32 = lhs.toFloat(f32, mod) / rhs.toFloat(f32, mod) },
- 64 => .{ .f64 = lhs.toFloat(f64, mod) / rhs.toFloat(f64, mod) },
- 80 => .{ .f80 = lhs.toFloat(f80, mod) / rhs.toFloat(f80, mod) },
- 128 => .{ .f128 = lhs.toFloat(f128, mod) / rhs.toFloat(f128, mod) },
+ 16 => .{ .f16 = lhs.toFloat(f16, pt) / rhs.toFloat(f16, pt) },
+ 32 => .{ .f32 = lhs.toFloat(f32, pt) / rhs.toFloat(f32, pt) },
+ 64 => .{ .f64 = lhs.toFloat(f64, pt) / rhs.toFloat(f64, pt) },
+ 80 => .{ .f80 = lhs.toFloat(f80, pt) / rhs.toFloat(f80, pt) },
+ 128 => .{ .f128 = lhs.toFloat(f128, pt) / rhs.toFloat(f128, pt) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
pub fn floatDivFloor(
@@ -2888,43 +2931,43 @@ pub fn floatDivFloor(
rhs: Value,
float_type: Type,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
- if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
- const scalar_ty = float_type.scalarType(mod);
+ if (float_type.zigTypeTag(pt.zcu) == .Vector) {
+ const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
+ const scalar_ty = float_type.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try floatDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try floatDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return floatDivFloorScalar(lhs, rhs, float_type, mod);
+ return floatDivFloorScalar(lhs, rhs, float_type, pt);
}
pub fn floatDivFloorScalar(
lhs: Value,
rhs: Value,
float_type: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
- const target = mod.getTarget();
+ const target = pt.zcu.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = @divFloor(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) },
- 32 => .{ .f32 = @divFloor(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) },
- 64 => .{ .f64 = @divFloor(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) },
- 80 => .{ .f80 = @divFloor(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) },
- 128 => .{ .f128 = @divFloor(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) },
+ 16 => .{ .f16 = @divFloor(lhs.toFloat(f16, pt), rhs.toFloat(f16, pt)) },
+ 32 => .{ .f32 = @divFloor(lhs.toFloat(f32, pt), rhs.toFloat(f32, pt)) },
+ 64 => .{ .f64 = @divFloor(lhs.toFloat(f64, pt), rhs.toFloat(f64, pt)) },
+ 80 => .{ .f80 = @divFloor(lhs.toFloat(f80, pt), rhs.toFloat(f80, pt)) },
+ 128 => .{ .f128 = @divFloor(lhs.toFloat(f128, pt), rhs.toFloat(f128, pt)) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
pub fn floatDivTrunc(
@@ -2932,43 +2975,43 @@ pub fn floatDivTrunc(
rhs: Value,
float_type: Type,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
- if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
- const scalar_ty = float_type.scalarType(mod);
+ if (float_type.zigTypeTag(pt.zcu) == .Vector) {
+ const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
+ const scalar_ty = float_type.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try floatDivTruncScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try floatDivTruncScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return floatDivTruncScalar(lhs, rhs, float_type, mod);
+ return floatDivTruncScalar(lhs, rhs, float_type, pt);
}
pub fn floatDivTruncScalar(
lhs: Value,
rhs: Value,
float_type: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
- const target = mod.getTarget();
+ const target = pt.zcu.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = @divTrunc(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) },
- 32 => .{ .f32 = @divTrunc(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) },
- 64 => .{ .f64 = @divTrunc(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) },
- 80 => .{ .f80 = @divTrunc(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) },
- 128 => .{ .f128 = @divTrunc(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) },
+ 16 => .{ .f16 = @divTrunc(lhs.toFloat(f16, pt), rhs.toFloat(f16, pt)) },
+ 32 => .{ .f32 = @divTrunc(lhs.toFloat(f32, pt), rhs.toFloat(f32, pt)) },
+ 64 => .{ .f64 = @divTrunc(lhs.toFloat(f64, pt), rhs.toFloat(f64, pt)) },
+ 80 => .{ .f80 = @divTrunc(lhs.toFloat(f80, pt), rhs.toFloat(f80, pt)) },
+ 128 => .{ .f128 = @divTrunc(lhs.toFloat(f128, pt), rhs.toFloat(f128, pt)) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
pub fn floatMul(
@@ -2976,510 +3019,539 @@ pub fn floatMul(
rhs: Value,
float_type: Type,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
+ const mod = pt.zcu;
if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const lhs_elem = try lhs.elemValue(mod, i);
- const rhs_elem = try rhs.elemValue(mod, i);
- scalar.* = (try floatMulScalar(lhs_elem, rhs_elem, scalar_ty, mod)).toIntern();
+ const lhs_elem = try lhs.elemValue(pt, i);
+ const rhs_elem = try rhs.elemValue(pt, i);
+ scalar.* = (try floatMulScalar(lhs_elem, rhs_elem, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return floatMulScalar(lhs, rhs, float_type, mod);
+ return floatMulScalar(lhs, rhs, float_type, pt);
}
pub fn floatMulScalar(
lhs: Value,
rhs: Value,
float_type: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
+ const mod = pt.zcu;
const target = mod.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = lhs.toFloat(f16, mod) * rhs.toFloat(f16, mod) },
- 32 => .{ .f32 = lhs.toFloat(f32, mod) * rhs.toFloat(f32, mod) },
- 64 => .{ .f64 = lhs.toFloat(f64, mod) * rhs.toFloat(f64, mod) },
- 80 => .{ .f80 = lhs.toFloat(f80, mod) * rhs.toFloat(f80, mod) },
- 128 => .{ .f128 = lhs.toFloat(f128, mod) * rhs.toFloat(f128, mod) },
+ 16 => .{ .f16 = lhs.toFloat(f16, pt) * rhs.toFloat(f16, pt) },
+ 32 => .{ .f32 = lhs.toFloat(f32, pt) * rhs.toFloat(f32, pt) },
+ 64 => .{ .f64 = lhs.toFloat(f64, pt) * rhs.toFloat(f64, pt) },
+ 80 => .{ .f80 = lhs.toFloat(f80, pt) * rhs.toFloat(f80, pt) },
+ 128 => .{ .f128 = lhs.toFloat(f128, pt) * rhs.toFloat(f128, pt) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
-pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
- if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
- const scalar_ty = float_type.scalarType(mod);
+pub fn sqrt(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ if (float_type.zigTypeTag(pt.zcu) == .Vector) {
+ const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
+ const scalar_ty = float_type.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
- const elem_val = try val.elemValue(mod, i);
- scalar.* = (try sqrtScalar(elem_val, scalar_ty, mod)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ scalar.* = (try sqrtScalar(elem_val, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return sqrtScalar(val, float_type, mod);
+ return sqrtScalar(val, float_type, pt);
}
-pub fn sqrtScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value {
+pub fn sqrtScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
+ const mod = pt.zcu;
const target = mod.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = @sqrt(val.toFloat(f16, mod)) },
- 32 => .{ .f32 = @sqrt(val.toFloat(f32, mod)) },
- 64 => .{ .f64 = @sqrt(val.toFloat(f64, mod)) },
- 80 => .{ .f80 = @sqrt(val.toFloat(f80, mod)) },
- 128 => .{ .f128 = @sqrt(val.toFloat(f128, mod)) },
+ 16 => .{ .f16 = @sqrt(val.toFloat(f16, pt)) },
+ 32 => .{ .f32 = @sqrt(val.toFloat(f32, pt)) },
+ 64 => .{ .f64 = @sqrt(val.toFloat(f64, pt)) },
+ 80 => .{ .f80 = @sqrt(val.toFloat(f80, pt)) },
+ 128 => .{ .f128 = @sqrt(val.toFloat(f128, pt)) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
-pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+pub fn sin(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const elem_val = try val.elemValue(mod, i);
- scalar.* = (try sinScalar(elem_val, scalar_ty, mod)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ scalar.* = (try sinScalar(elem_val, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return sinScalar(val, float_type, mod);
+ return sinScalar(val, float_type, pt);
}
-pub fn sinScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value {
+pub fn sinScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
+ const mod = pt.zcu;
const target = mod.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = @sin(val.toFloat(f16, mod)) },
- 32 => .{ .f32 = @sin(val.toFloat(f32, mod)) },
- 64 => .{ .f64 = @sin(val.toFloat(f64, mod)) },
- 80 => .{ .f80 = @sin(val.toFloat(f80, mod)) },
- 128 => .{ .f128 = @sin(val.toFloat(f128, mod)) },
+ 16 => .{ .f16 = @sin(val.toFloat(f16, pt)) },
+ 32 => .{ .f32 = @sin(val.toFloat(f32, pt)) },
+ 64 => .{ .f64 = @sin(val.toFloat(f64, pt)) },
+ 80 => .{ .f80 = @sin(val.toFloat(f80, pt)) },
+ 128 => .{ .f128 = @sin(val.toFloat(f128, pt)) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
-pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+pub fn cos(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const elem_val = try val.elemValue(mod, i);
- scalar.* = (try cosScalar(elem_val, scalar_ty, mod)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ scalar.* = (try cosScalar(elem_val, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return cosScalar(val, float_type, mod);
+ return cosScalar(val, float_type, pt);
}
-pub fn cosScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value {
+pub fn cosScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
+ const mod = pt.zcu;
const target = mod.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = @cos(val.toFloat(f16, mod)) },
- 32 => .{ .f32 = @cos(val.toFloat(f32, mod)) },
- 64 => .{ .f64 = @cos(val.toFloat(f64, mod)) },
- 80 => .{ .f80 = @cos(val.toFloat(f80, mod)) },
- 128 => .{ .f128 = @cos(val.toFloat(f128, mod)) },
+ 16 => .{ .f16 = @cos(val.toFloat(f16, pt)) },
+ 32 => .{ .f32 = @cos(val.toFloat(f32, pt)) },
+ 64 => .{ .f64 = @cos(val.toFloat(f64, pt)) },
+ 80 => .{ .f80 = @cos(val.toFloat(f80, pt)) },
+ 128 => .{ .f128 = @cos(val.toFloat(f128, pt)) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
-pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+pub fn tan(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const elem_val = try val.elemValue(mod, i);
- scalar.* = (try tanScalar(elem_val, scalar_ty, mod)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ scalar.* = (try tanScalar(elem_val, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return tanScalar(val, float_type, mod);
+ return tanScalar(val, float_type, pt);
}
-pub fn tanScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value {
+pub fn tanScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
+ const mod = pt.zcu;
const target = mod.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = @tan(val.toFloat(f16, mod)) },
- 32 => .{ .f32 = @tan(val.toFloat(f32, mod)) },
- 64 => .{ .f64 = @tan(val.toFloat(f64, mod)) },
- 80 => .{ .f80 = @tan(val.toFloat(f80, mod)) },
- 128 => .{ .f128 = @tan(val.toFloat(f128, mod)) },
+ 16 => .{ .f16 = @tan(val.toFloat(f16, pt)) },
+ 32 => .{ .f32 = @tan(val.toFloat(f32, pt)) },
+ 64 => .{ .f64 = @tan(val.toFloat(f64, pt)) },
+ 80 => .{ .f80 = @tan(val.toFloat(f80, pt)) },
+ 128 => .{ .f128 = @tan(val.toFloat(f128, pt)) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
-pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+pub fn exp(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const elem_val = try val.elemValue(mod, i);
- scalar.* = (try expScalar(elem_val, scalar_ty, mod)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ scalar.* = (try expScalar(elem_val, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return expScalar(val, float_type, mod);
+ return expScalar(val, float_type, pt);
}
-pub fn expScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value {
+pub fn expScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
+ const mod = pt.zcu;
const target = mod.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = @exp(val.toFloat(f16, mod)) },
- 32 => .{ .f32 = @exp(val.toFloat(f32, mod)) },
- 64 => .{ .f64 = @exp(val.toFloat(f64, mod)) },
- 80 => .{ .f80 = @exp(val.toFloat(f80, mod)) },
- 128 => .{ .f128 = @exp(val.toFloat(f128, mod)) },
+ 16 => .{ .f16 = @exp(val.toFloat(f16, pt)) },
+ 32 => .{ .f32 = @exp(val.toFloat(f32, pt)) },
+ 64 => .{ .f64 = @exp(val.toFloat(f64, pt)) },
+ 80 => .{ .f80 = @exp(val.toFloat(f80, pt)) },
+ 128 => .{ .f128 = @exp(val.toFloat(f128, pt)) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
-pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+pub fn exp2(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const elem_val = try val.elemValue(mod, i);
- scalar.* = (try exp2Scalar(elem_val, scalar_ty, mod)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ scalar.* = (try exp2Scalar(elem_val, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return exp2Scalar(val, float_type, mod);
+ return exp2Scalar(val, float_type, pt);
}
-pub fn exp2Scalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value {
+pub fn exp2Scalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
+ const mod = pt.zcu;
const target = mod.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = @exp2(val.toFloat(f16, mod)) },
- 32 => .{ .f32 = @exp2(val.toFloat(f32, mod)) },
- 64 => .{ .f64 = @exp2(val.toFloat(f64, mod)) },
- 80 => .{ .f80 = @exp2(val.toFloat(f80, mod)) },
- 128 => .{ .f128 = @exp2(val.toFloat(f128, mod)) },
+ 16 => .{ .f16 = @exp2(val.toFloat(f16, pt)) },
+ 32 => .{ .f32 = @exp2(val.toFloat(f32, pt)) },
+ 64 => .{ .f64 = @exp2(val.toFloat(f64, pt)) },
+ 80 => .{ .f80 = @exp2(val.toFloat(f80, pt)) },
+ 128 => .{ .f128 = @exp2(val.toFloat(f128, pt)) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
-pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+pub fn log(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const elem_val = try val.elemValue(mod, i);
- scalar.* = (try logScalar(elem_val, scalar_ty, mod)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ scalar.* = (try logScalar(elem_val, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return logScalar(val, float_type, mod);
+ return logScalar(val, float_type, pt);
}
-pub fn logScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value {
+pub fn logScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
+ const mod = pt.zcu;
const target = mod.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = @log(val.toFloat(f16, mod)) },
- 32 => .{ .f32 = @log(val.toFloat(f32, mod)) },
- 64 => .{ .f64 = @log(val.toFloat(f64, mod)) },
- 80 => .{ .f80 = @log(val.toFloat(f80, mod)) },
- 128 => .{ .f128 = @log(val.toFloat(f128, mod)) },
+ 16 => .{ .f16 = @log(val.toFloat(f16, pt)) },
+ 32 => .{ .f32 = @log(val.toFloat(f32, pt)) },
+ 64 => .{ .f64 = @log(val.toFloat(f64, pt)) },
+ 80 => .{ .f80 = @log(val.toFloat(f80, pt)) },
+ 128 => .{ .f128 = @log(val.toFloat(f128, pt)) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
-pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+pub fn log2(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const elem_val = try val.elemValue(mod, i);
- scalar.* = (try log2Scalar(elem_val, scalar_ty, mod)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ scalar.* = (try log2Scalar(elem_val, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return log2Scalar(val, float_type, mod);
+ return log2Scalar(val, float_type, pt);
}
-pub fn log2Scalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value {
+pub fn log2Scalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
+ const mod = pt.zcu;
const target = mod.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = @log2(val.toFloat(f16, mod)) },
- 32 => .{ .f32 = @log2(val.toFloat(f32, mod)) },
- 64 => .{ .f64 = @log2(val.toFloat(f64, mod)) },
- 80 => .{ .f80 = @log2(val.toFloat(f80, mod)) },
- 128 => .{ .f128 = @log2(val.toFloat(f128, mod)) },
+ 16 => .{ .f16 = @log2(val.toFloat(f16, pt)) },
+ 32 => .{ .f32 = @log2(val.toFloat(f32, pt)) },
+ 64 => .{ .f64 = @log2(val.toFloat(f64, pt)) },
+ 80 => .{ .f80 = @log2(val.toFloat(f80, pt)) },
+ 128 => .{ .f128 = @log2(val.toFloat(f128, pt)) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
-pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+pub fn log10(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const elem_val = try val.elemValue(mod, i);
- scalar.* = (try log10Scalar(elem_val, scalar_ty, mod)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ scalar.* = (try log10Scalar(elem_val, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return log10Scalar(val, float_type, mod);
+ return log10Scalar(val, float_type, pt);
}
-pub fn log10Scalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value {
+pub fn log10Scalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
+ const mod = pt.zcu;
const target = mod.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = @log10(val.toFloat(f16, mod)) },
- 32 => .{ .f32 = @log10(val.toFloat(f32, mod)) },
- 64 => .{ .f64 = @log10(val.toFloat(f64, mod)) },
- 80 => .{ .f80 = @log10(val.toFloat(f80, mod)) },
- 128 => .{ .f128 = @log10(val.toFloat(f128, mod)) },
+ 16 => .{ .f16 = @log10(val.toFloat(f16, pt)) },
+ 32 => .{ .f32 = @log10(val.toFloat(f32, pt)) },
+ 64 => .{ .f64 = @log10(val.toFloat(f64, pt)) },
+ 80 => .{ .f80 = @log10(val.toFloat(f80, pt)) },
+ 128 => .{ .f128 = @log10(val.toFloat(f128, pt)) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
-pub fn abs(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
+pub fn abs(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod));
const scalar_ty = ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const elem_val = try val.elemValue(mod, i);
- scalar.* = (try absScalar(elem_val, scalar_ty, mod, arena)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ scalar.* = (try absScalar(elem_val, scalar_ty, pt, arena)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return absScalar(val, ty, mod, arena);
+ return absScalar(val, ty, pt, arena);
}
-pub fn absScalar(val: Value, ty: Type, mod: *Module, arena: Allocator) Allocator.Error!Value {
+pub fn absScalar(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) Allocator.Error!Value {
+ const mod = pt.zcu;
switch (ty.zigTypeTag(mod)) {
.Int => {
var buffer: Value.BigIntSpace = undefined;
- var operand_bigint = try val.toBigInt(&buffer, mod).toManaged(arena);
+ var operand_bigint = try val.toBigInt(&buffer, pt).toManaged(arena);
operand_bigint.abs();
- return mod.intValue_big(try ty.toUnsigned(mod), operand_bigint.toConst());
+ return pt.intValue_big(try ty.toUnsigned(pt), operand_bigint.toConst());
},
.ComptimeInt => {
var buffer: Value.BigIntSpace = undefined;
- var operand_bigint = try val.toBigInt(&buffer, mod).toManaged(arena);
+ var operand_bigint = try val.toBigInt(&buffer, pt).toManaged(arena);
operand_bigint.abs();
- return mod.intValue_big(ty, operand_bigint.toConst());
+ return pt.intValue_big(ty, operand_bigint.toConst());
},
.ComptimeFloat, .Float => {
const target = mod.getTarget();
const storage: InternPool.Key.Float.Storage = switch (ty.floatBits(target)) {
- 16 => .{ .f16 = @abs(val.toFloat(f16, mod)) },
- 32 => .{ .f32 = @abs(val.toFloat(f32, mod)) },
- 64 => .{ .f64 = @abs(val.toFloat(f64, mod)) },
- 80 => .{ .f80 = @abs(val.toFloat(f80, mod)) },
- 128 => .{ .f128 = @abs(val.toFloat(f128, mod)) },
+ 16 => .{ .f16 = @abs(val.toFloat(f16, pt)) },
+ 32 => .{ .f32 = @abs(val.toFloat(f32, pt)) },
+ 64 => .{ .f64 = @abs(val.toFloat(f64, pt)) },
+ 80 => .{ .f80 = @abs(val.toFloat(f80, pt)) },
+ 128 => .{ .f128 = @abs(val.toFloat(f128, pt)) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = ty.toIntern(),
.storage = storage,
- } })));
+ } }));
},
else => unreachable,
}
}
-pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+pub fn floor(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const elem_val = try val.elemValue(mod, i);
- scalar.* = (try floorScalar(elem_val, scalar_ty, mod)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ scalar.* = (try floorScalar(elem_val, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return floorScalar(val, float_type, mod);
+ return floorScalar(val, float_type, pt);
}
-pub fn floorScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value {
+pub fn floorScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
+ const mod = pt.zcu;
const target = mod.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = @floor(val.toFloat(f16, mod)) },
- 32 => .{ .f32 = @floor(val.toFloat(f32, mod)) },
- 64 => .{ .f64 = @floor(val.toFloat(f64, mod)) },
- 80 => .{ .f80 = @floor(val.toFloat(f80, mod)) },
- 128 => .{ .f128 = @floor(val.toFloat(f128, mod)) },
+ 16 => .{ .f16 = @floor(val.toFloat(f16, pt)) },
+ 32 => .{ .f32 = @floor(val.toFloat(f32, pt)) },
+ 64 => .{ .f64 = @floor(val.toFloat(f64, pt)) },
+ 80 => .{ .f80 = @floor(val.toFloat(f80, pt)) },
+ 128 => .{ .f128 = @floor(val.toFloat(f128, pt)) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
-pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+pub fn ceil(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const elem_val = try val.elemValue(mod, i);
- scalar.* = (try ceilScalar(elem_val, scalar_ty, mod)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ scalar.* = (try ceilScalar(elem_val, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return ceilScalar(val, float_type, mod);
+ return ceilScalar(val, float_type, pt);
}
-pub fn ceilScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value {
+pub fn ceilScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
+ const mod = pt.zcu;
const target = mod.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = @ceil(val.toFloat(f16, mod)) },
- 32 => .{ .f32 = @ceil(val.toFloat(f32, mod)) },
- 64 => .{ .f64 = @ceil(val.toFloat(f64, mod)) },
- 80 => .{ .f80 = @ceil(val.toFloat(f80, mod)) },
- 128 => .{ .f128 = @ceil(val.toFloat(f128, mod)) },
+ 16 => .{ .f16 = @ceil(val.toFloat(f16, pt)) },
+ 32 => .{ .f32 = @ceil(val.toFloat(f32, pt)) },
+ 64 => .{ .f64 = @ceil(val.toFloat(f64, pt)) },
+ 80 => .{ .f80 = @ceil(val.toFloat(f80, pt)) },
+ 128 => .{ .f128 = @ceil(val.toFloat(f128, pt)) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
-pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+pub fn round(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const elem_val = try val.elemValue(mod, i);
- scalar.* = (try roundScalar(elem_val, scalar_ty, mod)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ scalar.* = (try roundScalar(elem_val, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return roundScalar(val, float_type, mod);
+ return roundScalar(val, float_type, pt);
}
-pub fn roundScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value {
+pub fn roundScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
+ const mod = pt.zcu;
const target = mod.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = @round(val.toFloat(f16, mod)) },
- 32 => .{ .f32 = @round(val.toFloat(f32, mod)) },
- 64 => .{ .f64 = @round(val.toFloat(f64, mod)) },
- 80 => .{ .f80 = @round(val.toFloat(f80, mod)) },
- 128 => .{ .f128 = @round(val.toFloat(f128, mod)) },
+ 16 => .{ .f16 = @round(val.toFloat(f16, pt)) },
+ 32 => .{ .f32 = @round(val.toFloat(f32, pt)) },
+ 64 => .{ .f64 = @round(val.toFloat(f64, pt)) },
+ 80 => .{ .f80 = @round(val.toFloat(f80, pt)) },
+ 128 => .{ .f128 = @round(val.toFloat(f128, pt)) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
-pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
+pub fn trunc(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
+ const mod = pt.zcu;
if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const elem_val = try val.elemValue(mod, i);
- scalar.* = (try truncScalar(elem_val, scalar_ty, mod)).toIntern();
+ const elem_val = try val.elemValue(pt, i);
+ scalar.* = (try truncScalar(elem_val, scalar_ty, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return truncScalar(val, float_type, mod);
+ return truncScalar(val, float_type, pt);
}
-pub fn truncScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value {
+pub fn truncScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
+ const mod = pt.zcu;
const target = mod.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = @trunc(val.toFloat(f16, mod)) },
- 32 => .{ .f32 = @trunc(val.toFloat(f32, mod)) },
- 64 => .{ .f64 = @trunc(val.toFloat(f64, mod)) },
- 80 => .{ .f80 = @trunc(val.toFloat(f80, mod)) },
- 128 => .{ .f128 = @trunc(val.toFloat(f128, mod)) },
+ 16 => .{ .f16 = @trunc(val.toFloat(f16, pt)) },
+ 32 => .{ .f32 = @trunc(val.toFloat(f32, pt)) },
+ 64 => .{ .f64 = @trunc(val.toFloat(f64, pt)) },
+ 80 => .{ .f80 = @trunc(val.toFloat(f80, pt)) },
+ 128 => .{ .f128 = @trunc(val.toFloat(f128, pt)) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
pub fn mulAdd(
@@ -3488,23 +3560,24 @@ pub fn mulAdd(
mulend2: Value,
addend: Value,
arena: Allocator,
- mod: *Module,
+ pt: Zcu.PerThread,
) !Value {
+ const mod = pt.zcu;
if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
const scalar_ty = float_type.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
- const mulend1_elem = try mulend1.elemValue(mod, i);
- const mulend2_elem = try mulend2.elemValue(mod, i);
- const addend_elem = try addend.elemValue(mod, i);
- scalar.* = (try mulAddScalar(scalar_ty, mulend1_elem, mulend2_elem, addend_elem, mod)).toIntern();
+ const mulend1_elem = try mulend1.elemValue(pt, i);
+ const mulend2_elem = try mulend2.elemValue(pt, i);
+ const addend_elem = try addend.elemValue(pt, i);
+ scalar.* = (try mulAddScalar(scalar_ty, mulend1_elem, mulend2_elem, addend_elem, pt)).toIntern();
}
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })));
+ } }));
}
- return mulAddScalar(float_type, mulend1, mulend2, addend, mod);
+ return mulAddScalar(float_type, mulend1, mulend2, addend, pt);
}
pub fn mulAddScalar(
@@ -3512,32 +3585,33 @@ pub fn mulAddScalar(
mulend1: Value,
mulend2: Value,
addend: Value,
- mod: *Module,
+ pt: Zcu.PerThread,
) Allocator.Error!Value {
+ const mod = pt.zcu;
const target = mod.getTarget();
const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
- 16 => .{ .f16 = @mulAdd(f16, mulend1.toFloat(f16, mod), mulend2.toFloat(f16, mod), addend.toFloat(f16, mod)) },
- 32 => .{ .f32 = @mulAdd(f32, mulend1.toFloat(f32, mod), mulend2.toFloat(f32, mod), addend.toFloat(f32, mod)) },
- 64 => .{ .f64 = @mulAdd(f64, mulend1.toFloat(f64, mod), mulend2.toFloat(f64, mod), addend.toFloat(f64, mod)) },
- 80 => .{ .f80 = @mulAdd(f80, mulend1.toFloat(f80, mod), mulend2.toFloat(f80, mod), addend.toFloat(f80, mod)) },
- 128 => .{ .f128 = @mulAdd(f128, mulend1.toFloat(f128, mod), mulend2.toFloat(f128, mod), addend.toFloat(f128, mod)) },
+ 16 => .{ .f16 = @mulAdd(f16, mulend1.toFloat(f16, pt), mulend2.toFloat(f16, pt), addend.toFloat(f16, pt)) },
+ 32 => .{ .f32 = @mulAdd(f32, mulend1.toFloat(f32, pt), mulend2.toFloat(f32, pt), addend.toFloat(f32, pt)) },
+ 64 => .{ .f64 = @mulAdd(f64, mulend1.toFloat(f64, pt), mulend2.toFloat(f64, pt), addend.toFloat(f64, pt)) },
+ 80 => .{ .f80 = @mulAdd(f80, mulend1.toFloat(f80, pt), mulend2.toFloat(f80, pt), addend.toFloat(f80, pt)) },
+ 128 => .{ .f128 = @mulAdd(f128, mulend1.toFloat(f128, pt), mulend2.toFloat(f128, pt), addend.toFloat(f128, pt)) },
else => unreachable,
};
- return Value.fromInterned((try mod.intern(.{ .float = .{
+ return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })));
+ } }));
}
/// If the value is represented in-memory as a series of bytes that all
/// have the same value, return that byte value, otherwise null.
-pub fn hasRepeatedByteRepr(val: Value, ty: Type, mod: *Module) !?u8 {
- const abi_size = std.math.cast(usize, ty.abiSize(mod)) orelse return null;
+pub fn hasRepeatedByteRepr(val: Value, ty: Type, pt: Zcu.PerThread) !?u8 {
+ const abi_size = std.math.cast(usize, ty.abiSize(pt)) orelse return null;
assert(abi_size >= 1);
- const byte_buffer = try mod.gpa.alloc(u8, abi_size);
- defer mod.gpa.free(byte_buffer);
+ const byte_buffer = try pt.zcu.gpa.alloc(u8, abi_size);
+ defer pt.zcu.gpa.free(byte_buffer);
- writeToMemory(val, ty, mod, byte_buffer) catch |err| switch (err) {
+ writeToMemory(val, ty, pt, byte_buffer) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.ReinterpretDeclRef => return null,
// TODO: The writeToMemory function was originally created for the purpose
@@ -3567,13 +3641,13 @@ pub fn typeOf(val: Value, zcu: *const Zcu) Type {
/// If `val` is not undef, the bounds are both `val`.
/// If `val` is undef and has a fixed-width type, the bounds are the bounds of the type.
/// If `val` is undef and is a `comptime_int`, returns null.
-pub fn intValueBounds(val: Value, mod: *Module) !?[2]Value {
- if (!val.isUndef(mod)) return .{ val, val };
- const ty = mod.intern_pool.typeOf(val.toIntern());
+pub fn intValueBounds(val: Value, pt: Zcu.PerThread) !?[2]Value {
+ if (!val.isUndef(pt.zcu)) return .{ val, val };
+ const ty = pt.zcu.intern_pool.typeOf(val.toIntern());
if (ty == .comptime_int_type) return null;
return .{
- try Type.fromInterned(ty).minInt(mod, Type.fromInterned(ty)),
- try Type.fromInterned(ty).maxInt(mod, Type.fromInterned(ty)),
+ try Type.fromInterned(ty).minInt(pt, Type.fromInterned(ty)),
+ try Type.fromInterned(ty).maxInt(pt, Type.fromInterned(ty)),
};
}
@@ -3604,14 +3678,15 @@ pub const RuntimeIndex = InternPool.RuntimeIndex;
/// `parent_ptr` must be a single-pointer to some optional.
/// Returns a pointer to the payload of the optional.
/// May perform type resolution.
-pub fn ptrOptPayload(parent_ptr: Value, zcu: *Zcu) !Value {
+pub fn ptrOptPayload(parent_ptr: Value, pt: Zcu.PerThread) !Value {
+ const zcu = pt.zcu;
const parent_ptr_ty = parent_ptr.typeOf(zcu);
const opt_ty = parent_ptr_ty.childType(zcu);
assert(parent_ptr_ty.ptrSize(zcu) == .One);
assert(opt_ty.zigTypeTag(zcu) == .Optional);
- const result_ty = try zcu.ptrTypeSema(info: {
+ const result_ty = try pt.ptrTypeSema(info: {
var new = parent_ptr_ty.ptrInfo(zcu);
// We can correctly preserve alignment `.none`, since an optional has the same
// natural alignment as its child type.
@@ -3619,15 +3694,15 @@ pub fn ptrOptPayload(parent_ptr: Value, zcu: *Zcu) !Value {
break :info new;
});
- if (parent_ptr.isUndef(zcu)) return zcu.undefValue(result_ty);
+ if (parent_ptr.isUndef(zcu)) return pt.undefValue(result_ty);
if (opt_ty.isPtrLikeOptional(zcu)) {
// Just reinterpret the pointer, since the layout is well-defined
- return zcu.getCoerced(parent_ptr, result_ty);
+ return pt.getCoerced(parent_ptr, result_ty);
}
- const base_ptr = try parent_ptr.canonicalizeBasePtr(.One, opt_ty, zcu);
- return Value.fromInterned(try zcu.intern(.{ .ptr = .{
+ const base_ptr = try parent_ptr.canonicalizeBasePtr(.One, opt_ty, pt);
+ return Value.fromInterned(try pt.intern(.{ .ptr = .{
.ty = result_ty.toIntern(),
.base_addr = .{ .opt_payload = base_ptr.toIntern() },
.byte_offset = 0,
@@ -3637,14 +3712,15 @@ pub fn ptrOptPayload(parent_ptr: Value, zcu: *Zcu) !Value {
/// `parent_ptr` must be a single-pointer to some error union.
/// Returns a pointer to the payload of the error union.
/// May perform type resolution.
-pub fn ptrEuPayload(parent_ptr: Value, zcu: *Zcu) !Value {
+pub fn ptrEuPayload(parent_ptr: Value, pt: Zcu.PerThread) !Value {
+ const zcu = pt.zcu;
const parent_ptr_ty = parent_ptr.typeOf(zcu);
const eu_ty = parent_ptr_ty.childType(zcu);
assert(parent_ptr_ty.ptrSize(zcu) == .One);
assert(eu_ty.zigTypeTag(zcu) == .ErrorUnion);
- const result_ty = try zcu.ptrTypeSema(info: {
+ const result_ty = try pt.ptrTypeSema(info: {
var new = parent_ptr_ty.ptrInfo(zcu);
// We can correctly preserve alignment `.none`, since an error union has a
// natural alignment greater than or equal to that of its payload type.
@@ -3652,10 +3728,10 @@ pub fn ptrEuPayload(parent_ptr: Value, zcu: *Zcu) !Value {
break :info new;
});
- if (parent_ptr.isUndef(zcu)) return zcu.undefValue(result_ty);
+ if (parent_ptr.isUndef(zcu)) return pt.undefValue(result_ty);
- const base_ptr = try parent_ptr.canonicalizeBasePtr(.One, eu_ty, zcu);
- return Value.fromInterned(try zcu.intern(.{ .ptr = .{
+ const base_ptr = try parent_ptr.canonicalizeBasePtr(.One, eu_ty, pt);
+ return Value.fromInterned(try pt.intern(.{ .ptr = .{
.ty = result_ty.toIntern(),
.base_addr = .{ .eu_payload = base_ptr.toIntern() },
.byte_offset = 0,
@@ -3666,7 +3742,8 @@ pub fn ptrEuPayload(parent_ptr: Value, zcu: *Zcu) !Value {
/// Returns a pointer to the aggregate field at the specified index.
/// For slices, uses `slice_ptr_index` and `slice_len_index`.
/// May perform type resolution.
-pub fn ptrField(parent_ptr: Value, field_idx: u32, zcu: *Zcu) !Value {
+pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value {
+ const zcu = pt.zcu;
const parent_ptr_ty = parent_ptr.typeOf(zcu);
const aggregate_ty = parent_ptr_ty.childType(zcu);
@@ -3679,39 +3756,39 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, zcu: *Zcu) !Value {
.Struct => field: {
const field_ty = aggregate_ty.structFieldType(field_idx, zcu);
switch (aggregate_ty.containerLayout(zcu)) {
- .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, .sema) },
+ .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), pt, .sema) },
.@"extern" => {
// Well-defined layout, so just offset the pointer appropriately.
- const byte_off = aggregate_ty.structFieldOffset(field_idx, zcu);
+ const byte_off = aggregate_ty.structFieldOffset(field_idx, pt);
const field_align = a: {
const parent_align = if (parent_ptr_info.flags.alignment == .none) pa: {
- break :pa (try aggregate_ty.abiAlignmentAdvanced(zcu, .sema)).scalar;
+ break :pa (try aggregate_ty.abiAlignmentAdvanced(pt, .sema)).scalar;
} else parent_ptr_info.flags.alignment;
break :a InternPool.Alignment.fromLog2Units(@min(parent_align.toLog2Units(), @ctz(byte_off)));
};
- const result_ty = try zcu.ptrTypeSema(info: {
+ const result_ty = try pt.ptrTypeSema(info: {
var new = parent_ptr_info;
new.child = field_ty.toIntern();
new.flags.alignment = field_align;
break :info new;
});
- return parent_ptr.getOffsetPtr(byte_off, result_ty, zcu);
+ return parent_ptr.getOffsetPtr(byte_off, result_ty, pt);
},
- .@"packed" => switch (aggregate_ty.packedStructFieldPtrInfo(parent_ptr_ty, field_idx, zcu)) {
+ .@"packed" => switch (aggregate_ty.packedStructFieldPtrInfo(parent_ptr_ty, field_idx, pt)) {
.bit_ptr => |packed_offset| {
- const result_ty = try zcu.ptrType(info: {
+ const result_ty = try pt.ptrType(info: {
var new = parent_ptr_info;
new.packed_offset = packed_offset;
new.child = field_ty.toIntern();
if (new.flags.alignment == .none) {
- new.flags.alignment = (try aggregate_ty.abiAlignmentAdvanced(zcu, .sema)).scalar;
+ new.flags.alignment = (try aggregate_ty.abiAlignmentAdvanced(pt, .sema)).scalar;
}
break :info new;
});
- return zcu.getCoerced(parent_ptr, result_ty);
+ return pt.getCoerced(parent_ptr, result_ty);
},
.byte_ptr => |ptr_info| {
- const result_ty = try zcu.ptrTypeSema(info: {
+ const result_ty = try pt.ptrTypeSema(info: {
var new = parent_ptr_info;
new.child = field_ty.toIntern();
new.packed_offset = .{
@@ -3721,7 +3798,7 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, zcu: *Zcu) !Value {
new.flags.alignment = ptr_info.alignment;
break :info new;
});
- return parent_ptr.getOffsetPtr(ptr_info.offset, result_ty, zcu);
+ return parent_ptr.getOffsetPtr(ptr_info.offset, result_ty, pt);
},
},
}
@@ -3730,46 +3807,46 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, zcu: *Zcu) !Value {
const union_obj = zcu.typeToUnion(aggregate_ty).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[field_idx]);
switch (aggregate_ty.containerLayout(zcu)) {
- .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, .sema) },
+ .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), pt, .sema) },
.@"extern" => {
// Point to the same address.
- const result_ty = try zcu.ptrTypeSema(info: {
+ const result_ty = try pt.ptrTypeSema(info: {
var new = parent_ptr_info;
new.child = field_ty.toIntern();
break :info new;
});
- return zcu.getCoerced(parent_ptr, result_ty);
+ return pt.getCoerced(parent_ptr, result_ty);
},
.@"packed" => {
// If the field has an ABI size matching its bit size, then we can continue to use a
// non-bit pointer if the parent pointer is also a non-bit pointer.
- if (parent_ptr_info.packed_offset.host_size == 0 and (try field_ty.abiSizeAdvanced(zcu, .sema)).scalar * 8 == try field_ty.bitSizeAdvanced(zcu, .sema)) {
+ if (parent_ptr_info.packed_offset.host_size == 0 and (try field_ty.abiSizeAdvanced(pt, .sema)).scalar * 8 == try field_ty.bitSizeAdvanced(pt, .sema)) {
// We must offset the pointer on big-endian targets, since the bits of packed memory don't align nicely.
const byte_offset = switch (zcu.getTarget().cpu.arch.endian()) {
.little => 0,
- .big => (try aggregate_ty.abiSizeAdvanced(zcu, .sema)).scalar - (try field_ty.abiSizeAdvanced(zcu, .sema)).scalar,
+ .big => (try aggregate_ty.abiSizeAdvanced(pt, .sema)).scalar - (try field_ty.abiSizeAdvanced(pt, .sema)).scalar,
};
- const result_ty = try zcu.ptrTypeSema(info: {
+ const result_ty = try pt.ptrTypeSema(info: {
var new = parent_ptr_info;
new.child = field_ty.toIntern();
new.flags.alignment = InternPool.Alignment.fromLog2Units(
- @ctz(byte_offset | (try parent_ptr_ty.ptrAlignmentAdvanced(zcu, .sema)).toByteUnits().?),
+ @ctz(byte_offset | (try parent_ptr_ty.ptrAlignmentAdvanced(pt, .sema)).toByteUnits().?),
);
break :info new;
});
- return parent_ptr.getOffsetPtr(byte_offset, result_ty, zcu);
+ return parent_ptr.getOffsetPtr(byte_offset, result_ty, pt);
} else {
// The result must be a bit-pointer if it is not already.
- const result_ty = try zcu.ptrTypeSema(info: {
+ const result_ty = try pt.ptrTypeSema(info: {
var new = parent_ptr_info;
new.child = field_ty.toIntern();
if (new.packed_offset.host_size == 0) {
- new.packed_offset.host_size = @intCast(((try aggregate_ty.bitSizeAdvanced(zcu, .sema)) + 7) / 8);
+ new.packed_offset.host_size = @intCast(((try aggregate_ty.bitSizeAdvanced(pt, .sema)) + 7) / 8);
assert(new.packed_offset.bit_offset == 0);
}
break :info new;
});
- return zcu.getCoerced(parent_ptr, result_ty);
+ return pt.getCoerced(parent_ptr, result_ty);
}
},
}
@@ -3777,8 +3854,8 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, zcu: *Zcu) !Value {
.Pointer => field_ty: {
assert(aggregate_ty.isSlice(zcu));
break :field_ty switch (field_idx) {
- Value.slice_ptr_index => .{ aggregate_ty.slicePtrFieldType(zcu), Type.usize.abiAlignment(zcu) },
- Value.slice_len_index => .{ Type.usize, Type.usize.abiAlignment(zcu) },
+ Value.slice_ptr_index => .{ aggregate_ty.slicePtrFieldType(zcu), Type.usize.abiAlignment(pt) },
+ Value.slice_len_index => .{ Type.usize, Type.usize.abiAlignment(pt) },
else => unreachable,
};
},
@@ -3786,24 +3863,24 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, zcu: *Zcu) !Value {
};
const new_align: InternPool.Alignment = if (parent_ptr_info.flags.alignment != .none) a: {
- const ty_align = (try field_ty.abiAlignmentAdvanced(zcu, .sema)).scalar;
+ const ty_align = (try field_ty.abiAlignmentAdvanced(pt, .sema)).scalar;
const true_field_align = if (field_align == .none) ty_align else field_align;
const new_align = true_field_align.min(parent_ptr_info.flags.alignment);
if (new_align == ty_align) break :a .none;
break :a new_align;
} else field_align;
- const result_ty = try zcu.ptrTypeSema(info: {
+ const result_ty = try pt.ptrTypeSema(info: {
var new = parent_ptr_info;
new.child = field_ty.toIntern();
new.flags.alignment = new_align;
break :info new;
});
- if (parent_ptr.isUndef(zcu)) return zcu.undefValue(result_ty);
+ if (parent_ptr.isUndef(zcu)) return pt.undefValue(result_ty);
- const base_ptr = try parent_ptr.canonicalizeBasePtr(.One, aggregate_ty, zcu);
- return Value.fromInterned(try zcu.intern(.{ .ptr = .{
+ const base_ptr = try parent_ptr.canonicalizeBasePtr(.One, aggregate_ty, pt);
+ return Value.fromInterned(try pt.intern(.{ .ptr = .{
.ty = result_ty.toIntern(),
.base_addr = .{ .field = .{
.base = base_ptr.toIntern(),
@@ -3816,7 +3893,8 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, zcu: *Zcu) !Value {
/// `orig_parent_ptr` must be either a single-pointer to an array or vector, or a many-pointer or C-pointer or slice.
/// Returns a pointer to the element at the specified index.
/// May perform type resolution.
-pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, zcu: *Zcu) !Value {
+pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, pt: Zcu.PerThread) !Value {
+ const zcu = pt.zcu;
const parent_ptr = switch (orig_parent_ptr.typeOf(zcu).ptrSize(zcu)) {
.One, .Many, .C => orig_parent_ptr,
.Slice => orig_parent_ptr.slicePtr(zcu),
@@ -3824,14 +3902,14 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, zcu: *Zcu) !Value {
const parent_ptr_ty = parent_ptr.typeOf(zcu);
const elem_ty = parent_ptr_ty.childType(zcu);
- const result_ty = try parent_ptr_ty.elemPtrType(@intCast(field_idx), zcu);
+ const result_ty = try parent_ptr_ty.elemPtrType(@intCast(field_idx), pt);
- if (parent_ptr.isUndef(zcu)) return zcu.undefValue(result_ty);
+ if (parent_ptr.isUndef(zcu)) return pt.undefValue(result_ty);
if (result_ty.ptrInfo(zcu).packed_offset.host_size != 0) {
// Since we have a bit-pointer, the pointer address should be unchanged.
assert(elem_ty.zigTypeTag(zcu) == .Vector);
- return zcu.getCoerced(parent_ptr, result_ty);
+ return pt.getCoerced(parent_ptr, result_ty);
}
const PtrStrat = union(enum) {
@@ -3841,31 +3919,31 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, zcu: *Zcu) !Value {
const strat: PtrStrat = switch (parent_ptr_ty.ptrSize(zcu)) {
.One => switch (elem_ty.zigTypeTag(zcu)) {
- .Vector => .{ .offset = field_idx * @divExact(try elem_ty.childType(zcu).bitSizeAdvanced(zcu, .sema), 8) },
+ .Vector => .{ .offset = field_idx * @divExact(try elem_ty.childType(zcu).bitSizeAdvanced(pt, .sema), 8) },
.Array => strat: {
const arr_elem_ty = elem_ty.childType(zcu);
- if (try arr_elem_ty.comptimeOnlyAdvanced(zcu, .sema)) {
+ if (try arr_elem_ty.comptimeOnlyAdvanced(pt, .sema)) {
break :strat .{ .elem_ptr = arr_elem_ty };
}
- break :strat .{ .offset = field_idx * (try arr_elem_ty.abiSizeAdvanced(zcu, .sema)).scalar };
+ break :strat .{ .offset = field_idx * (try arr_elem_ty.abiSizeAdvanced(pt, .sema)).scalar };
},
else => unreachable,
},
- .Many, .C => if (try elem_ty.comptimeOnlyAdvanced(zcu, .sema))
+ .Many, .C => if (try elem_ty.comptimeOnlyAdvanced(pt, .sema))
.{ .elem_ptr = elem_ty }
else
- .{ .offset = field_idx * (try elem_ty.abiSizeAdvanced(zcu, .sema)).scalar },
+ .{ .offset = field_idx * (try elem_ty.abiSizeAdvanced(pt, .sema)).scalar },
.Slice => unreachable,
};
switch (strat) {
.offset => |byte_offset| {
- return parent_ptr.getOffsetPtr(byte_offset, result_ty, zcu);
+ return parent_ptr.getOffsetPtr(byte_offset, result_ty, pt);
},
.elem_ptr => |manyptr_elem_ty| if (field_idx == 0) {
- return zcu.getCoerced(parent_ptr, result_ty);
+ return pt.getCoerced(parent_ptr, result_ty);
} else {
const arr_base_ty, const arr_base_len = manyptr_elem_ty.arrayBase(zcu);
const base_idx = arr_base_len * field_idx;
@@ -3875,7 +3953,7 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, zcu: *Zcu) !Value {
if (Value.fromInterned(arr_elem.base).typeOf(zcu).childType(zcu).toIntern() == arr_base_ty.toIntern()) {
// We already have a pointer to an element of an array of this type.
// Just modify the index.
- return Value.fromInterned(try zcu.intern(.{ .ptr = ptr: {
+ return Value.fromInterned(try pt.intern(.{ .ptr = ptr: {
var new = parent_info;
new.base_addr.arr_elem.index += base_idx;
new.ty = result_ty.toIntern();
@@ -3885,8 +3963,8 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, zcu: *Zcu) !Value {
},
else => {},
}
- const base_ptr = try parent_ptr.canonicalizeBasePtr(.Many, arr_base_ty, zcu);
- return Value.fromInterned(try zcu.intern(.{ .ptr = .{
+ const base_ptr = try parent_ptr.canonicalizeBasePtr(.Many, arr_base_ty, pt);
+ return Value.fromInterned(try pt.intern(.{ .ptr = .{
.ty = result_ty.toIntern(),
.base_addr = .{ .arr_elem = .{
.base = base_ptr.toIntern(),
@@ -3898,9 +3976,9 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, zcu: *Zcu) !Value {
}
}
-fn canonicalizeBasePtr(base_ptr: Value, want_size: std.builtin.Type.Pointer.Size, want_child: Type, zcu: *Zcu) !Value {
- const ptr_ty = base_ptr.typeOf(zcu);
- const ptr_info = ptr_ty.ptrInfo(zcu);
+fn canonicalizeBasePtr(base_ptr: Value, want_size: std.builtin.Type.Pointer.Size, want_child: Type, pt: Zcu.PerThread) !Value {
+ const ptr_ty = base_ptr.typeOf(pt.zcu);
+ const ptr_info = ptr_ty.ptrInfo(pt.zcu);
if (ptr_info.flags.size == want_size and
ptr_info.child == want_child.toIntern() and
@@ -3914,7 +3992,7 @@ fn canonicalizeBasePtr(base_ptr: Value, want_size: std.builtin.Type.Pointer.Size
return base_ptr;
}
- const new_ty = try zcu.ptrType(.{
+ const new_ty = try pt.ptrType(.{
.child = want_child.toIntern(),
.sentinel = .none,
.flags = .{
@@ -3926,15 +4004,15 @@ fn canonicalizeBasePtr(base_ptr: Value, want_size: std.builtin.Type.Pointer.Size
.address_space = ptr_info.flags.address_space,
},
});
- return zcu.getCoerced(base_ptr, new_ty);
+ return pt.getCoerced(base_ptr, new_ty);
}
-pub fn getOffsetPtr(ptr_val: Value, byte_off: u64, new_ty: Type, zcu: *Zcu) !Value {
- if (ptr_val.isUndef(zcu)) return ptr_val;
- var ptr = zcu.intern_pool.indexToKey(ptr_val.toIntern()).ptr;
+pub fn getOffsetPtr(ptr_val: Value, byte_off: u64, new_ty: Type, pt: Zcu.PerThread) !Value {
+ if (ptr_val.isUndef(pt.zcu)) return ptr_val;
+ var ptr = pt.zcu.intern_pool.indexToKey(ptr_val.toIntern()).ptr;
ptr.ty = new_ty.toIntern();
ptr.byte_offset += byte_off;
- return Value.fromInterned(try zcu.intern(.{ .ptr = ptr }));
+ return Value.fromInterned(try pt.intern(.{ .ptr = ptr }));
}
pub const PointerDeriveStep = union(enum) {
@@ -3977,21 +4055,21 @@ pub const PointerDeriveStep = union(enum) {
new_ptr_ty: Type,
},
- pub fn ptrType(step: PointerDeriveStep, zcu: *Zcu) !Type {
+ pub fn ptrType(step: PointerDeriveStep, pt: Zcu.PerThread) !Type {
return switch (step) {
.int => |int| int.ptr_ty,
- .decl_ptr => |decl| try zcu.declPtr(decl).declPtrType(zcu),
+ .decl_ptr => |decl| try pt.zcu.declPtr(decl).declPtrType(pt),
.anon_decl_ptr => |ad| Type.fromInterned(ad.orig_ty),
.comptime_alloc_ptr => |info| info.ptr_ty,
- .comptime_field_ptr => |val| try zcu.singleConstPtrType(val.typeOf(zcu)),
+ .comptime_field_ptr => |val| try pt.singleConstPtrType(val.typeOf(pt.zcu)),
.offset_and_cast => |oac| oac.new_ptr_ty,
inline .eu_payload_ptr, .opt_payload_ptr, .field_ptr, .elem_ptr => |x| x.result_ptr_ty,
};
}
};
-pub fn pointerDerivation(ptr_val: Value, arena: Allocator, zcu: *Zcu) Allocator.Error!PointerDeriveStep {
- return ptr_val.pointerDerivationAdvanced(arena, zcu, null) catch |err| switch (err) {
+pub fn pointerDerivation(ptr_val: Value, arena: Allocator, pt: Zcu.PerThread) Allocator.Error!PointerDeriveStep {
+ return ptr_val.pointerDerivationAdvanced(arena, pt, null) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.AnalysisFail => unreachable,
};
@@ -4001,7 +4079,8 @@ pub fn pointerDerivation(ptr_val: Value, arena: Allocator, zcu: *Zcu) Allocator.
/// only field and element pointers with no casts. This can be used by codegen backends
/// which prefer field/elem accesses when lowering constant pointer values.
/// It is also used by the Value printing logic for pointers.
-pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, opt_sema: ?*Sema) !PointerDeriveStep {
+pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerThread, opt_sema: ?*Sema) !PointerDeriveStep {
+ const zcu = pt.zcu;
const ptr = zcu.intern_pool.indexToKey(ptr_val.toIntern()).ptr;
const base_derive: PointerDeriveStep = switch (ptr.base_addr) {
.int => return .{ .int = .{
@@ -4012,7 +4091,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op
.anon_decl => |ad| base: {
// A slight tweak: `orig_ty` here is sometimes not `const`, but it ought to be.
// TODO: fix this in the sites interning anon decls!
- const const_ty = try zcu.ptrType(info: {
+ const const_ty = try pt.ptrType(info: {
var info = Type.fromInterned(ad.orig_ty).ptrInfo(zcu);
info.flags.is_const = true;
break :info info;
@@ -4024,11 +4103,11 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op
},
.comptime_alloc => |idx| base: {
const alloc = opt_sema.?.getComptimeAlloc(idx);
- const val = try alloc.val.intern(zcu, opt_sema.?.arena);
+ const val = try alloc.val.intern(pt, opt_sema.?.arena);
const ty = val.typeOf(zcu);
break :base .{ .comptime_alloc_ptr = .{
.val = val,
- .ptr_ty = try zcu.ptrType(.{
+ .ptr_ty = try pt.ptrType(.{
.child = ty.toIntern(),
.flags = .{
.alignment = alloc.alignment,
@@ -4041,20 +4120,20 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op
const base_ptr = Value.fromInterned(eu_ptr);
const base_ptr_ty = base_ptr.typeOf(zcu);
const parent_step = try arena.create(PointerDeriveStep);
- parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(eu_ptr), arena, zcu, opt_sema);
+ parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(eu_ptr), arena, pt, opt_sema);
break :base .{ .eu_payload_ptr = .{
.parent = parent_step,
- .result_ptr_ty = try zcu.adjustPtrTypeChild(base_ptr_ty, base_ptr_ty.childType(zcu).errorUnionPayload(zcu)),
+ .result_ptr_ty = try pt.adjustPtrTypeChild(base_ptr_ty, base_ptr_ty.childType(zcu).errorUnionPayload(zcu)),
} };
},
.opt_payload => |opt_ptr| base: {
const base_ptr = Value.fromInterned(opt_ptr);
const base_ptr_ty = base_ptr.typeOf(zcu);
const parent_step = try arena.create(PointerDeriveStep);
- parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(opt_ptr), arena, zcu, opt_sema);
+ parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(opt_ptr), arena, pt, opt_sema);
break :base .{ .opt_payload_ptr = .{
.parent = parent_step,
- .result_ptr_ty = try zcu.adjustPtrTypeChild(base_ptr_ty, base_ptr_ty.childType(zcu).optionalChild(zcu)),
+ .result_ptr_ty = try pt.adjustPtrTypeChild(base_ptr_ty, base_ptr_ty.childType(zcu).optionalChild(zcu)),
} };
},
.field => |field| base: {
@@ -4062,22 +4141,22 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op
const base_ptr_ty = base_ptr.typeOf(zcu);
const agg_ty = base_ptr_ty.childType(zcu);
const field_ty, const field_align = switch (agg_ty.zigTypeTag(zcu)) {
- .Struct => .{ agg_ty.structFieldType(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, .sema) },
- .Union => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, .sema) },
+ .Struct => .{ agg_ty.structFieldType(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), pt, .sema) },
+ .Union => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), pt, .sema) },
.Pointer => .{ switch (field.index) {
Value.slice_ptr_index => agg_ty.slicePtrFieldType(zcu),
Value.slice_len_index => Type.usize,
else => unreachable,
- }, Type.usize.abiAlignment(zcu) },
+ }, Type.usize.abiAlignment(pt) },
else => unreachable,
};
- const base_align = base_ptr_ty.ptrAlignment(zcu);
+ const base_align = base_ptr_ty.ptrAlignment(pt);
const result_align = field_align.minStrict(base_align);
- const result_ty = try zcu.ptrType(.{
+ const result_ty = try pt.ptrType(.{
.child = field_ty.toIntern(),
.flags = flags: {
var flags = base_ptr_ty.ptrInfo(zcu).flags;
- if (result_align == field_ty.abiAlignment(zcu)) {
+ if (result_align == field_ty.abiAlignment(pt)) {
flags.alignment = .none;
} else {
flags.alignment = result_align;
@@ -4086,7 +4165,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op
},
});
const parent_step = try arena.create(PointerDeriveStep);
- parent_step.* = try pointerDerivationAdvanced(base_ptr, arena, zcu, opt_sema);
+ parent_step.* = try pointerDerivationAdvanced(base_ptr, arena, pt, opt_sema);
break :base .{ .field_ptr = .{
.parent = parent_step,
.field_idx = @intCast(field.index),
@@ -4095,9 +4174,9 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op
},
.arr_elem => |arr_elem| base: {
const parent_step = try arena.create(PointerDeriveStep);
- parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(arr_elem.base), arena, zcu, opt_sema);
- const parent_ptr_info = (try parent_step.ptrType(zcu)).ptrInfo(zcu);
- const result_ptr_ty = try zcu.ptrType(.{
+ parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(arr_elem.base), arena, pt, opt_sema);
+ const parent_ptr_info = (try parent_step.ptrType(pt)).ptrInfo(zcu);
+ const result_ptr_ty = try pt.ptrType(.{
.child = parent_ptr_info.child,
.flags = flags: {
var flags = parent_ptr_info.flags;
@@ -4113,12 +4192,12 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op
},
};
- if (ptr.byte_offset == 0 and ptr.ty == (try base_derive.ptrType(zcu)).toIntern()) {
+ if (ptr.byte_offset == 0 and ptr.ty == (try base_derive.ptrType(pt)).toIntern()) {
return base_derive;
}
const need_child = Type.fromInterned(ptr.ty).childType(zcu);
- if (need_child.comptimeOnly(zcu)) {
+ if (need_child.comptimeOnly(pt)) {
// No refinement can happen - this pointer is presumably invalid.
// Just offset it.
const parent = try arena.create(PointerDeriveStep);
@@ -4129,7 +4208,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op
.new_ptr_ty = Type.fromInterned(ptr.ty),
} };
}
- const need_bytes = need_child.abiSize(zcu);
+ const need_bytes = need_child.abiSize(pt);
var cur_derive = base_derive;
var cur_offset = ptr.byte_offset;
@@ -4137,7 +4216,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op
// Refine through fields and array elements as much as possible.
if (need_bytes > 0) while (true) {
- const cur_ty = (try cur_derive.ptrType(zcu)).childType(zcu);
+ const cur_ty = (try cur_derive.ptrType(pt)).childType(zcu);
if (cur_ty.toIntern() == need_child.toIntern() and cur_offset == 0) {
break;
}
@@ -4168,7 +4247,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op
.Array => {
const elem_ty = cur_ty.childType(zcu);
- const elem_size = elem_ty.abiSize(zcu);
+ const elem_size = elem_ty.abiSize(pt);
const start_idx = cur_offset / elem_size;
const end_idx = (cur_offset + need_bytes + elem_size - 1) / elem_size;
if (end_idx == start_idx + 1) {
@@ -4177,7 +4256,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op
cur_derive = .{ .elem_ptr = .{
.parent = parent,
.elem_idx = start_idx,
- .result_ptr_ty = try zcu.adjustPtrTypeChild(try parent.ptrType(zcu), elem_ty),
+ .result_ptr_ty = try pt.adjustPtrTypeChild(try parent.ptrType(pt), elem_ty),
} };
cur_offset -= start_idx * elem_size;
} else {
@@ -4188,7 +4267,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op
cur_derive = .{ .elem_ptr = .{
.parent = parent,
.elem_idx = start_idx,
- .result_ptr_ty = try zcu.adjustPtrTypeChild(try parent.ptrType(zcu), elem_ty),
+ .result_ptr_ty = try pt.adjustPtrTypeChild(try parent.ptrType(pt), elem_ty),
} };
cur_offset -= start_idx * elem_size;
}
@@ -4199,19 +4278,19 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op
.auto, .@"packed" => break,
.@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
const field_ty = cur_ty.structFieldType(field_idx, zcu);
- const start_off = cur_ty.structFieldOffset(field_idx, zcu);
- const end_off = start_off + field_ty.abiSize(zcu);
+ const start_off = cur_ty.structFieldOffset(field_idx, pt);
+ const end_off = start_off + field_ty.abiSize(pt);
if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
- const old_ptr_ty = try cur_derive.ptrType(zcu);
- const parent_align = old_ptr_ty.ptrAlignment(zcu);
+ const old_ptr_ty = try cur_derive.ptrType(pt);
+ const parent_align = old_ptr_ty.ptrAlignment(pt);
const field_align = InternPool.Alignment.fromLog2Units(@min(parent_align.toLog2Units(), @ctz(start_off)));
const parent = try arena.create(PointerDeriveStep);
parent.* = cur_derive;
- const new_ptr_ty = try zcu.ptrType(.{
+ const new_ptr_ty = try pt.ptrType(.{
.child = field_ty.toIntern(),
.flags = flags: {
var flags = old_ptr_ty.ptrInfo(zcu).flags;
- if (field_align == field_ty.abiAlignment(zcu)) {
+ if (field_align == field_ty.abiAlignment(pt)) {
flags.alignment = .none;
} else {
flags.alignment = field_align;
@@ -4232,7 +4311,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op
}
};
- if (cur_offset == 0 and (try cur_derive.ptrType(zcu)).toIntern() == ptr.ty) {
+ if (cur_offset == 0 and (try cur_derive.ptrType(pt)).toIntern() == ptr.ty) {
return cur_derive;
}
@@ -4245,20 +4324,20 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op
} };
}
-pub fn resolveLazy(val: Value, arena: Allocator, zcu: *Zcu) Zcu.SemaError!Value {
- switch (zcu.intern_pool.indexToKey(val.toIntern())) {
+pub fn resolveLazy(val: Value, arena: Allocator, pt: Zcu.PerThread) Zcu.SemaError!Value {
+ switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) {
.int => |int| switch (int.storage) {
.u64, .i64, .big_int => return val,
- .lazy_align, .lazy_size => return zcu.intValue(
+ .lazy_align, .lazy_size => return pt.intValue(
Type.fromInterned(int.ty),
- (try val.getUnsignedIntAdvanced(zcu, .sema)).?,
+ (try val.getUnsignedIntAdvanced(pt, .sema)).?,
),
},
.slice => |slice| {
- const ptr = try Value.fromInterned(slice.ptr).resolveLazy(arena, zcu);
- const len = try Value.fromInterned(slice.len).resolveLazy(arena, zcu);
+ const ptr = try Value.fromInterned(slice.ptr).resolveLazy(arena, pt);
+ const len = try Value.fromInterned(slice.len).resolveLazy(arena, pt);
if (ptr.toIntern() == slice.ptr and len.toIntern() == slice.len) return val;
- return Value.fromInterned(try zcu.intern(.{ .slice = .{
+ return Value.fromInterned(try pt.intern(.{ .slice = .{
.ty = slice.ty,
.ptr = ptr.toIntern(),
.len = len.toIntern(),
@@ -4268,22 +4347,22 @@ pub fn resolveLazy(val: Value, arena: Allocator, zcu: *Zcu) Zcu.SemaError!Value
switch (ptr.base_addr) {
.decl, .comptime_alloc, .anon_decl, .int => return val,
.comptime_field => |field_val| {
- const resolved_field_val = (try Value.fromInterned(field_val).resolveLazy(arena, zcu)).toIntern();
+ const resolved_field_val = (try Value.fromInterned(field_val).resolveLazy(arena, pt)).toIntern();
return if (resolved_field_val == field_val)
val
else
- Value.fromInterned((try zcu.intern(.{ .ptr = .{
+ Value.fromInterned(try pt.intern(.{ .ptr = .{
.ty = ptr.ty,
.base_addr = .{ .comptime_field = resolved_field_val },
.byte_offset = ptr.byte_offset,
- } })));
+ } }));
},
.eu_payload, .opt_payload => |base| {
- const resolved_base = (try Value.fromInterned(base).resolveLazy(arena, zcu)).toIntern();
+ const resolved_base = (try Value.fromInterned(base).resolveLazy(arena, pt)).toIntern();
return if (resolved_base == base)
val
else
- Value.fromInterned((try zcu.intern(.{ .ptr = .{
+ Value.fromInterned(try pt.intern(.{ .ptr = .{
.ty = ptr.ty,
.base_addr = switch (ptr.base_addr) {
.eu_payload => .{ .eu_payload = resolved_base },
@@ -4291,14 +4370,14 @@ pub fn resolveLazy(val: Value, arena: Allocator, zcu: *Zcu) Zcu.SemaError!Value
else => unreachable,
},
.byte_offset = ptr.byte_offset,
- } })));
+ } }));
},
.arr_elem, .field => |base_index| {
- const resolved_base = (try Value.fromInterned(base_index.base).resolveLazy(arena, zcu)).toIntern();
+ const resolved_base = (try Value.fromInterned(base_index.base).resolveLazy(arena, pt)).toIntern();
return if (resolved_base == base_index.base)
val
else
- Value.fromInterned((try zcu.intern(.{ .ptr = .{
+ Value.fromInterned(try pt.intern(.{ .ptr = .{
.ty = ptr.ty,
.base_addr = switch (ptr.base_addr) {
.arr_elem => .{ .arr_elem = .{
@@ -4312,7 +4391,7 @@ pub fn resolveLazy(val: Value, arena: Allocator, zcu: *Zcu) Zcu.SemaError!Value
else => unreachable,
},
.byte_offset = ptr.byte_offset,
- } })));
+ } }));
},
}
},
@@ -4321,40 +4400,40 @@ pub fn resolveLazy(val: Value, arena: Allocator, zcu: *Zcu) Zcu.SemaError!Value
.elems => |elems| {
var resolved_elems: []InternPool.Index = &.{};
for (elems, 0..) |elem, i| {
- const resolved_elem = (try Value.fromInterned(elem).resolveLazy(arena, zcu)).toIntern();
+ const resolved_elem = (try Value.fromInterned(elem).resolveLazy(arena, pt)).toIntern();
if (resolved_elems.len == 0 and resolved_elem != elem) {
resolved_elems = try arena.alloc(InternPool.Index, elems.len);
@memcpy(resolved_elems[0..i], elems[0..i]);
}
if (resolved_elems.len > 0) resolved_elems[i] = resolved_elem;
}
- return if (resolved_elems.len == 0) val else Value.fromInterned((try zcu.intern(.{ .aggregate = .{
+ return if (resolved_elems.len == 0) val else Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = aggregate.ty,
.storage = .{ .elems = resolved_elems },
- } })));
+ } }));
},
.repeated_elem => |elem| {
- const resolved_elem = (try Value.fromInterned(elem).resolveLazy(arena, zcu)).toIntern();
- return if (resolved_elem == elem) val else Value.fromInterned((try zcu.intern(.{ .aggregate = .{
+ const resolved_elem = (try Value.fromInterned(elem).resolveLazy(arena, pt)).toIntern();
+ return if (resolved_elem == elem) val else Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = aggregate.ty,
.storage = .{ .repeated_elem = resolved_elem },
- } })));
+ } }));
},
},
.un => |un| {
const resolved_tag = if (un.tag == .none)
.none
else
- (try Value.fromInterned(un.tag).resolveLazy(arena, zcu)).toIntern();
- const resolved_val = (try Value.fromInterned(un.val).resolveLazy(arena, zcu)).toIntern();
+ (try Value.fromInterned(un.tag).resolveLazy(arena, pt)).toIntern();
+ const resolved_val = (try Value.fromInterned(un.val).resolveLazy(arena, pt)).toIntern();
return if (resolved_tag == un.tag and resolved_val == un.val)
val
else
- Value.fromInterned((try zcu.intern(.{ .un = .{
+ Value.fromInterned(try pt.intern(.{ .un = .{
.ty = un.ty,
.tag = resolved_tag,
.val = resolved_val,
- } })));
+ } }));
},
else => return val,
}
src/Zcu.zig
@@ -6,7 +6,6 @@ const std = @import("std");
const builtin = @import("builtin");
const mem = std.mem;
const Allocator = std.mem.Allocator;
-const ArrayListUnmanaged = std.ArrayListUnmanaged;
const assert = std.debug.assert;
const log = std.log.scoped(.module);
const BigIntConst = std.math.big.int.Const;
@@ -75,10 +74,10 @@ local_zir_cache: Compilation.Directory,
/// This is where all `Export` values are stored. Not all values here are necessarily valid exports;
/// to enumerate all exports, `single_exports` and `multi_exports` must be consulted.
-all_exports: ArrayListUnmanaged(Export) = .{},
+all_exports: std.ArrayListUnmanaged(Export) = .{},
/// This is a list of free indices in `all_exports`. These indices may be reused by exports from
/// future semantic analysis.
-free_exports: ArrayListUnmanaged(u32) = .{},
+free_exports: std.ArrayListUnmanaged(u32) = .{},
/// Maps from an `AnalUnit` which performs a single export, to the index into `all_exports` of
/// the export it performs. Note that the key is not the `Decl` being exported, but the `AnalUnit`
/// whose analysis triggered the export.
@@ -179,7 +178,7 @@ stage1_flags: packed struct {
reserved: u2 = 0,
} = .{},
-compile_log_text: ArrayListUnmanaged(u8) = .{},
+compile_log_text: std.ArrayListUnmanaged(u8) = .{},
emit_h: ?*GlobalEmitH,
@@ -203,6 +202,8 @@ panic_messages: [PanicId.len]Decl.OptionalIndex = .{.none} ** PanicId.len,
panic_func_index: InternPool.Index = .none,
null_stack_trace: InternPool.Index = .none,
+pub const PerThread = @import("Zcu/PerThread.zig");
+
pub const PanicId = enum {
unreach,
unwrap_null,
@@ -519,24 +520,24 @@ pub const Decl = struct {
return decl.getExternDecl(zcu) != .none;
}
- pub fn getAlignment(decl: Decl, zcu: *Zcu) Alignment {
+ pub fn getAlignment(decl: Decl, pt: Zcu.PerThread) Alignment {
assert(decl.has_tv);
if (decl.alignment != .none) return decl.alignment;
- return decl.typeOf(zcu).abiAlignment(zcu);
+ return decl.typeOf(pt.zcu).abiAlignment(pt);
}
- pub fn declPtrType(decl: Decl, zcu: *Zcu) !Type {
+ pub fn declPtrType(decl: Decl, pt: Zcu.PerThread) !Type {
assert(decl.has_tv);
- const decl_ty = decl.typeOf(zcu);
- return zcu.ptrType(.{
+ const decl_ty = decl.typeOf(pt.zcu);
+ return pt.ptrType(.{
.child = decl_ty.toIntern(),
.flags = .{
- .alignment = if (decl.alignment == decl_ty.abiAlignment(zcu))
+ .alignment = if (decl.alignment == decl_ty.abiAlignment(pt))
.none
else
decl.alignment,
.address_space = decl.@"addrspace",
- .is_const = decl.getOwnedVariable(zcu) == null,
+ .is_const = decl.getOwnedVariable(pt.zcu) == null,
},
});
}
@@ -589,7 +590,7 @@ pub const Decl = struct {
/// This state is attached to every Decl when Module emit_h is non-null.
pub const EmitH = struct {
- fwd_decl: ArrayListUnmanaged(u8) = .{},
+ fwd_decl: std.ArrayListUnmanaged(u8) = .{},
};
pub const DeclAdapter = struct {
@@ -622,8 +623,8 @@ pub const Namespace = struct {
/// Value is whether the usingnamespace decl is marked `pub`.
usingnamespace_set: std.AutoHashMapUnmanaged(Decl.Index, bool) = .{},
- const Index = InternPool.NamespaceIndex;
- const OptionalIndex = InternPool.OptionalNamespaceIndex;
+ pub const Index = InternPool.NamespaceIndex;
+ pub const OptionalIndex = InternPool.OptionalNamespaceIndex;
const DeclContext = struct {
zcu: *Zcu,
@@ -3079,7 +3080,7 @@ pub fn markDependeeOutdated(zcu: *Zcu, dependee: InternPool.Dependee) !void {
}
}
-fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
+pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
var it = zcu.intern_pool.dependencyIterator(dependee);
while (it.next()) |depender| {
if (zcu.outdated.getPtr(depender)) |po_dep_count| {
@@ -3279,7 +3280,7 @@ pub fn mapOldZirToNew(
old_inst: Zir.Inst.Index,
new_inst: Zir.Inst.Index,
};
- var match_stack: ArrayListUnmanaged(MatchedZirDecl) = .{};
+ var match_stack: std.ArrayListUnmanaged(MatchedZirDecl) = .{};
defer match_stack.deinit(gpa);
// Main struct inst is always matched
@@ -3394,357 +3395,6 @@ pub fn mapOldZirToNew(
}
}
-/// Like `ensureDeclAnalyzed`, but the Decl is a file's root Decl.
-pub fn ensureFileAnalyzed(zcu: *Zcu, file_index: File.Index) SemaError!void {
- if (zcu.fileRootDecl(file_index).unwrap()) |existing_root| {
- return zcu.ensureDeclAnalyzed(existing_root);
- } else {
- return zcu.semaFile(file_index);
- }
-}
-
-/// This ensures that the Decl will have an up-to-date Type and Value populated.
-/// However the resolution status of the Type may not be fully resolved.
-/// For example an inferred error set is not resolved until after `analyzeFnBody`.
-/// is called.
-pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
- const tracy = trace(@src());
- defer tracy.end();
-
- const ip = &mod.intern_pool;
- const decl = mod.declPtr(decl_index);
-
- log.debug("ensureDeclAnalyzed '{d}' (name '{}')", .{
- @intFromEnum(decl_index),
- decl.name.fmt(ip),
- });
-
- // Determine whether or not this Decl is outdated, i.e. requires re-analysis
- // even if `complete`. If a Decl is PO, we pessismistically assume that it
- // *does* require re-analysis, to ensure that the Decl is definitely
- // up-to-date when this function returns.
-
- // If analysis occurs in a poor order, this could result in over-analysis.
- // We do our best to avoid this by the other dependency logic in this file
- // which tries to limit re-analysis to Decls whose previously listed
- // dependencies are all up-to-date.
-
- const decl_as_depender = AnalUnit.wrap(.{ .decl = decl_index });
- const decl_was_outdated = mod.outdated.swapRemove(decl_as_depender) or
- mod.potentially_outdated.swapRemove(decl_as_depender);
-
- if (decl_was_outdated) {
- _ = mod.outdated_ready.swapRemove(decl_as_depender);
- }
-
- const was_outdated = mod.outdated_file_root.swapRemove(decl_index) or decl_was_outdated;
-
- switch (decl.analysis) {
- .in_progress => unreachable,
-
- .file_failure => return error.AnalysisFail,
-
- .sema_failure,
- .dependency_failure,
- .codegen_failure,
- => if (!was_outdated) return error.AnalysisFail,
-
- .complete => if (!was_outdated) return,
-
- .unreferenced => {},
- }
-
- if (was_outdated) {
- // The exports this Decl performs will be re-discovered, so we remove them here
- // prior to re-analysis.
- if (build_options.only_c) unreachable;
- mod.deleteUnitExports(decl_as_depender);
- mod.deleteUnitReferences(decl_as_depender);
- }
-
- const sema_result: SemaDeclResult = blk: {
- if (decl.zir_decl_index == .none and !mod.declIsRoot(decl_index)) {
- // Anonymous decl. We don't semantically analyze these.
- break :blk .{
- .invalidate_decl_val = false,
- .invalidate_decl_ref = false,
- };
- }
-
- if (mod.declIsRoot(decl_index)) {
- const changed = try mod.semaFileUpdate(decl.getFileScopeIndex(mod), decl_was_outdated);
- break :blk .{
- .invalidate_decl_val = changed,
- .invalidate_decl_ref = changed,
- };
- }
-
- const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0);
- defer decl_prog_node.end();
-
- break :blk mod.semaDecl(decl_index) catch |err| switch (err) {
- error.AnalysisFail => {
- if (decl.analysis == .in_progress) {
- // If this decl caused the compile error, the analysis field would
- // be changed to indicate it was this Decl's fault. Because this
- // did not happen, we infer here that it was a dependency failure.
- decl.analysis = .dependency_failure;
- }
- return error.AnalysisFail;
- },
- error.GenericPoison => unreachable,
- else => |e| {
- decl.analysis = .sema_failure;
- try mod.failed_analysis.ensureUnusedCapacity(mod.gpa, 1);
- try mod.retryable_failures.append(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }));
- mod.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try ErrorMsg.create(
- mod.gpa,
- decl.navSrcLoc(mod),
- "unable to analyze: {s}",
- .{@errorName(e)},
- ));
- return error.AnalysisFail;
- },
- };
- };
-
- // TODO: we do not yet have separate dependencies for decl values vs types.
- if (decl_was_outdated) {
- if (sema_result.invalidate_decl_val or sema_result.invalidate_decl_ref) {
- log.debug("Decl tv invalidated ('{d}')", .{@intFromEnum(decl_index)});
- // This dependency was marked as PO, meaning dependees were waiting
- // on its analysis result, and it has turned out to be outdated.
- // Update dependees accordingly.
- try mod.markDependeeOutdated(.{ .decl_val = decl_index });
- } else {
- log.debug("Decl tv up-to-date ('{d}')", .{@intFromEnum(decl_index)});
- // This dependency was previously PO, but turned out to be up-to-date.
- // We do not need to queue successive analysis.
- try mod.markPoDependeeUpToDate(.{ .decl_val = decl_index });
- }
- }
-}
-
-pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.Index) SemaError!void {
- const tracy = trace(@src());
- defer tracy.end();
-
- const gpa = zcu.gpa;
- const ip = &zcu.intern_pool;
-
- // We only care about the uncoerced function.
- // We need to do this for the "orphaned function" check below to be valid.
- const func_index = ip.unwrapCoercedFunc(maybe_coerced_func_index);
-
- const func = zcu.funcInfo(maybe_coerced_func_index);
- const decl_index = func.owner_decl;
- const decl = zcu.declPtr(decl_index);
-
- log.debug("ensureFuncBodyAnalyzed '{d}' (instance of '{}')", .{
- @intFromEnum(func_index),
- decl.name.fmt(ip),
- });
-
- // First, our owner decl must be up-to-date. This will always be the case
- // during the first update, but may not on successive updates if we happen
- // to get analyzed before our parent decl.
- try zcu.ensureDeclAnalyzed(decl_index);
-
- // On an update, it's possible this function changed such that our owner
- // decl now refers to a different function, making this one orphaned. If
- // that's the case, we should remove this function from the binary.
- if (decl.val.ip_index != func_index) {
- try zcu.markDependeeOutdated(.{ .func_ies = func_index });
- ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .func = func_index }));
- ip.remove(func_index);
- @panic("TODO: remove orphaned function from binary");
- }
-
- // We'll want to remember what the IES used to be before the update for
- // dependency invalidation purposes.
- const old_resolved_ies = if (func.analysis(ip).inferred_error_set)
- func.resolvedErrorSet(ip).*
- else
- .none;
-
- switch (decl.analysis) {
- .unreferenced => unreachable,
- .in_progress => unreachable,
-
- .codegen_failure => unreachable, // functions do not perform constant value generation
-
- .file_failure,
- .sema_failure,
- .dependency_failure,
- => return error.AnalysisFail,
-
- .complete => {},
- }
-
- const func_as_depender = AnalUnit.wrap(.{ .func = func_index });
- const was_outdated = zcu.outdated.swapRemove(func_as_depender) or
- zcu.potentially_outdated.swapRemove(func_as_depender);
-
- if (was_outdated) {
- if (build_options.only_c) unreachable;
- _ = zcu.outdated_ready.swapRemove(func_as_depender);
- zcu.deleteUnitExports(func_as_depender);
- zcu.deleteUnitReferences(func_as_depender);
- }
-
- switch (func.analysis(ip).state) {
- .success => if (!was_outdated) return,
- .sema_failure,
- .dependency_failure,
- .codegen_failure,
- => if (!was_outdated) return error.AnalysisFail,
- .none, .queued => {},
- .in_progress => unreachable,
- .inline_only => unreachable, // don't queue work for this
- }
-
- log.debug("analyze and generate fn body '{d}'; reason='{s}'", .{
- @intFromEnum(func_index),
- if (was_outdated) "outdated" else "never analyzed",
- });
-
- var tmp_arena = std.heap.ArenaAllocator.init(gpa);
- defer tmp_arena.deinit();
- const sema_arena = tmp_arena.allocator();
-
- var air = zcu.analyzeFnBody(func_index, sema_arena) catch |err| switch (err) {
- error.AnalysisFail => {
- if (func.analysis(ip).state == .in_progress) {
- // If this decl caused the compile error, the analysis field would
- // be changed to indicate it was this Decl's fault. Because this
- // did not happen, we infer here that it was a dependency failure.
- func.analysis(ip).state = .dependency_failure;
- }
- return error.AnalysisFail;
- },
- error.OutOfMemory => return error.OutOfMemory,
- };
- errdefer air.deinit(gpa);
-
- const invalidate_ies_deps = i: {
- if (!was_outdated) break :i false;
- if (!func.analysis(ip).inferred_error_set) break :i true;
- const new_resolved_ies = func.resolvedErrorSet(ip).*;
- break :i new_resolved_ies != old_resolved_ies;
- };
- if (invalidate_ies_deps) {
- log.debug("func IES invalidated ('{d}')", .{@intFromEnum(func_index)});
- try zcu.markDependeeOutdated(.{ .func_ies = func_index });
- } else if (was_outdated) {
- log.debug("func IES up-to-date ('{d}')", .{@intFromEnum(func_index)});
- try zcu.markPoDependeeUpToDate(.{ .func_ies = func_index });
- }
-
- const comp = zcu.comp;
-
- const dump_air = build_options.enable_debug_extensions and comp.verbose_air;
- const dump_llvm_ir = build_options.enable_debug_extensions and (comp.verbose_llvm_ir != null or comp.verbose_llvm_bc != null);
-
- if (comp.bin_file == null and zcu.llvm_object == null and !dump_air and !dump_llvm_ir) {
- air.deinit(gpa);
- return;
- }
-
- try comp.work_queue.writeItem(.{ .codegen_func = .{
- .func = func_index,
- .air = air,
- } });
-}
-
-/// Takes ownership of `air`, even on error.
-/// If any types referenced by `air` are unresolved, marks the codegen as failed.
-pub fn linkerUpdateFunc(zcu: *Zcu, func_index: InternPool.Index, air: Air) Allocator.Error!void {
- const gpa = zcu.gpa;
- const ip = &zcu.intern_pool;
- const comp = zcu.comp;
-
- defer {
- var air_mut = air;
- air_mut.deinit(gpa);
- }
-
- const func = zcu.funcInfo(func_index);
- const decl_index = func.owner_decl;
- const decl = zcu.declPtr(decl_index);
-
- var liveness = try Liveness.analyze(gpa, air, ip);
- defer liveness.deinit(gpa);
-
- if (build_options.enable_debug_extensions and comp.verbose_air) {
- const fqn = try decl.fullyQualifiedName(zcu);
- std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(ip)});
- @import("print_air.zig").dump(zcu, air, liveness);
- std.debug.print("# End Function AIR: {}\n\n", .{fqn.fmt(ip)});
- }
-
- if (std.debug.runtime_safety) {
- var verify: Liveness.Verify = .{
- .gpa = gpa,
- .air = air,
- .liveness = liveness,
- .intern_pool = ip,
- };
- defer verify.deinit();
-
- verify.verify() catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- else => {
- try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1);
- zcu.failed_analysis.putAssumeCapacityNoClobber(
- AnalUnit.wrap(.{ .func = func_index }),
- try Module.ErrorMsg.create(
- gpa,
- decl.navSrcLoc(zcu),
- "invalid liveness: {s}",
- .{@errorName(err)},
- ),
- );
- func.analysis(ip).state = .codegen_failure;
- return;
- },
- };
- }
-
- const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(ip), 0);
- defer codegen_prog_node.end();
-
- if (!air.typesFullyResolved(zcu)) {
- // A type we depend on failed to resolve. This is a transitive failure.
- // Correcting this failure will involve changing a type this function
- // depends on, hence triggering re-analysis of this function, so this
- // interacts correctly with incremental compilation.
- func.analysis(ip).state = .codegen_failure;
- } else if (comp.bin_file) |lf| {
- lf.updateFunc(zcu, func_index, air, liveness) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.AnalysisFail => {
- func.analysis(ip).state = .codegen_failure;
- },
- else => {
- try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1);
- zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .func = func_index }), try Module.ErrorMsg.create(
- gpa,
- decl.navSrcLoc(zcu),
- "unable to codegen: {s}",
- .{@errorName(err)},
- ));
- func.analysis(ip).state = .codegen_failure;
- try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .func = func_index }));
- },
- };
- } else if (zcu.llvm_object) |llvm_object| {
- if (build_options.only_c) unreachable;
- llvm_object.updateFunc(zcu, func_index, air, liveness) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- };
- }
-}
-
/// Ensure this function's body is or will be analyzed and emitted. This should
/// be called whenever a potential runtime call of a function is seen.
///
@@ -3804,608 +3454,105 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index)
func.analysis(ip).state = .queued;
}
-pub fn semaPkg(zcu: *Zcu, pkg: *Package.Module) !void {
- const import_file_result = try zcu.importPkg(pkg);
- const root_decl_index = zcu.fileRootDecl(import_file_result.file_index);
- if (root_decl_index == .none) {
- return zcu.semaFile(import_file_result.file_index);
- }
-}
-
-fn getFileRootStruct(
- zcu: *Zcu,
- decl_index: Decl.Index,
- namespace_index: Namespace.Index,
- file_index: File.Index,
-) Allocator.Error!InternPool.Index {
- const gpa = zcu.gpa;
- const ip = &zcu.intern_pool;
- const file = zcu.fileByIndex(file_index);
- const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended;
- assert(extended.opcode == .struct_decl);
- const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
- assert(!small.has_captures_len);
- assert(!small.has_backing_int);
- assert(small.layout == .auto);
- var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
- const fields_len = if (small.has_fields_len) blk: {
- const fields_len = file.zir.extra[extra_index];
- extra_index += 1;
- break :blk fields_len;
- } else 0;
- const decls_len = if (small.has_decls_len) blk: {
- const decls_len = file.zir.extra[extra_index];
- extra_index += 1;
- break :blk decls_len;
- } else 0;
- const decls = file.zir.bodySlice(extra_index, decls_len);
- extra_index += decls_len;
-
- const tracked_inst = try ip.trackZir(gpa, file_index, .main_struct_inst);
- const wip_ty = switch (try ip.getStructType(gpa, .{
- .layout = .auto,
- .fields_len = fields_len,
- .known_non_opv = small.known_non_opv,
- .requires_comptime = if (small.known_comptime_only) .yes else .unknown,
- .is_tuple = small.is_tuple,
- .any_comptime_fields = small.any_comptime_fields,
- .any_default_inits = small.any_default_inits,
- .inits_resolved = false,
- .any_aligned_fields = small.any_aligned_fields,
- .has_namespace = true,
- .key = .{ .declared = .{
- .zir_index = tracked_inst,
- .captures = &.{},
- } },
- })) {
- .existing => unreachable, // we wouldn't be analysing the file root if this type existed
- .wip => |wip| wip,
- };
- errdefer wip_ty.cancel(ip);
-
- if (zcu.comp.debug_incremental) {
- try ip.addDependency(
- gpa,
- AnalUnit.wrap(.{ .decl = decl_index }),
- .{ .src_hash = tracked_inst },
- );
- }
+pub const SemaDeclResult = packed struct {
+ /// Whether the value of a `decl_val` of this Decl changed.
+ invalidate_decl_val: bool,
+ /// Whether the type of a `decl_ref` of this Decl changed.
+ invalidate_decl_ref: bool,
+};
+pub fn semaAnonOwnerDecl(zcu: *Zcu, decl_index: Decl.Index) !SemaDeclResult {
const decl = zcu.declPtr(decl_index);
- decl.val = Value.fromInterned(wip_ty.index);
- decl.has_tv = true;
- decl.owns_tv = true;
- decl.analysis = .complete;
-
- try zcu.scanNamespace(namespace_index, decls, decl);
- try zcu.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index });
- return wip_ty.finish(ip, decl_index, namespace_index.toOptional());
-}
-
-/// Re-analyze the root Decl of a file on an incremental update.
-/// If `type_outdated`, the struct type itself is considered outdated and is
-/// reconstructed at a new InternPool index. Otherwise, the namespace is just
-/// re-analyzed. Returns whether the decl's tyval was invalidated.
-fn semaFileUpdate(zcu: *Zcu, file_index: File.Index, type_outdated: bool) SemaError!bool {
- const file = zcu.fileByIndex(file_index);
- const decl = zcu.declPtr(zcu.fileRootDecl(file_index).unwrap().?);
-
- log.debug("semaFileUpdate mod={s} sub_file_path={s} type_outdated={}", .{
- file.mod.fully_qualified_name,
- file.sub_file_path,
- type_outdated,
- });
-
- if (file.status != .success_zir) {
- if (decl.analysis == .file_failure) {
- return false;
- } else {
- decl.analysis = .file_failure;
- return true;
- }
- }
-
- if (decl.analysis == .file_failure) {
- // No struct type currently exists. Create one!
- const root_decl = zcu.fileRootDecl(file_index);
- _ = try zcu.getFileRootStruct(root_decl.unwrap().?, decl.src_namespace, file_index);
- return true;
- }
assert(decl.has_tv);
assert(decl.owns_tv);
- if (type_outdated) {
- // Invalidate the existing type, reusing the decl and namespace.
- const file_root_decl = zcu.fileRootDecl(file_index).unwrap().?;
- zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, AnalUnit.wrap(.{
- .decl = file_root_decl,
- }));
- zcu.intern_pool.remove(decl.val.toIntern());
- decl.val = undefined;
- _ = try zcu.getFileRootStruct(file_root_decl, decl.src_namespace, file_index);
- return true;
- }
-
- // Only the struct's namespace is outdated.
- // Preserve the type - just scan the namespace again.
-
- const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended;
- const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
-
- var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
- extra_index += @intFromBool(small.has_fields_len);
- const decls_len = if (small.has_decls_len) blk: {
- const decls_len = file.zir.extra[extra_index];
- extra_index += 1;
- break :blk decls_len;
- } else 0;
- const decls = file.zir.bodySlice(extra_index, decls_len);
+ log.debug("semaAnonOwnerDecl '{d}'", .{@intFromEnum(decl_index)});
- if (!type_outdated) {
- try zcu.scanNamespace(decl.src_namespace, decls, decl);
+ switch (decl.typeOf(zcu).zigTypeTag(zcu)) {
+ .Fn => @panic("TODO: update fn instance"),
+ .Type => {},
+ else => unreachable,
}
- return false;
+ // We are the owner Decl of a type, and we were marked as outdated. That means the *structure*
+ // of this type changed; not just its namespace. Therefore, we need a new InternPool index.
+ //
+ // However, as soon as we make that, the context that created us will require re-analysis anyway
+ // (as it depends on this Decl's value), meaning the `struct_decl` (or equivalent) instruction
+ // will be analyzed again. Since Sema already needs to be able to reconstruct types like this,
+ // why should we bother implementing it here too when the Sema logic will be hit right after?
+ //
+ // So instead, let's just mark this Decl as failed - so that any remaining Decls which genuinely
+ // reference it (via `@This`) end up silently erroring too - and we'll let Sema make a new type
+ // with a new Decl.
+ //
+ // Yes, this does mean that any type owner Decl has a constant value for its entire lifetime.
+ zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, AnalUnit.wrap(.{ .decl = decl_index }));
+ zcu.intern_pool.remove(decl.val.toIntern());
+ decl.analysis = .dependency_failure;
+ return .{
+ .invalidate_decl_val = true,
+ .invalidate_decl_ref = true,
+ };
}
-/// Regardless of the file status, will create a `Decl` if none exists so that we can track
-/// dependencies and re-analyze when the file becomes outdated.
-fn semaFile(zcu: *Zcu, file_index: File.Index) SemaError!void {
- const tracy = trace(@src());
- defer tracy.end();
-
- const file = zcu.fileByIndex(file_index);
- assert(zcu.fileRootDecl(file_index) == .none);
+pub const ImportFileResult = struct {
+ file: *File,
+ file_index: File.Index,
+ is_new: bool,
+ is_pkg: bool,
+};
+pub fn importPkg(zcu: *Zcu, mod: *Package.Module) !ImportFileResult {
const gpa = zcu.gpa;
- log.debug("semaFile zcu={s} sub_file_path={s}", .{
- file.mod.fully_qualified_name, file.sub_file_path,
- });
- // Because these three things each reference each other, `undefined`
- // placeholders are used before being set after the struct type gains an
- // InternPool index.
- const new_namespace_index = try zcu.createNamespace(.{
- .parent = .none,
- .decl_index = undefined,
- .file_scope = file_index,
+ // The resolved path is used as the key in the import table, to detect if
+ // an import refers to the same as another, despite different relative paths
+ // or differently mapped package names.
+ const resolved_path = try std.fs.path.resolve(gpa, &.{
+ mod.root.root_dir.path orelse ".",
+ mod.root.sub_path,
+ mod.root_src_path,
});
- errdefer zcu.destroyNamespace(new_namespace_index);
+ var keep_resolved_path = false;
+ defer if (!keep_resolved_path) gpa.free(resolved_path);
- const new_decl_index = try zcu.allocateNewDecl(new_namespace_index);
- const new_decl = zcu.declPtr(new_decl_index);
- errdefer @panic("TODO error handling");
+ const gop = try zcu.import_table.getOrPut(gpa, resolved_path);
+ errdefer _ = zcu.import_table.pop();
+ if (gop.found_existing) {
+ try gop.value_ptr.*.addReference(zcu.*, .{ .root = mod });
+ return .{
+ .file = gop.value_ptr.*,
+ .file_index = @enumFromInt(gop.index),
+ .is_new = false,
+ .is_pkg = true,
+ };
+ }
- zcu.setFileRootDecl(file_index, new_decl_index.toOptional());
- zcu.namespacePtr(new_namespace_index).decl_index = new_decl_index;
+ const ip = &zcu.intern_pool;
- new_decl.name = try file.fullyQualifiedName(zcu);
- new_decl.name_fully_qualified = true;
- new_decl.is_pub = true;
- new_decl.is_exported = false;
- new_decl.alignment = .none;
- new_decl.@"linksection" = .none;
- new_decl.analysis = .in_progress;
+ try ip.files.ensureUnusedCapacity(gpa, 1);
- if (file.status != .success_zir) {
- new_decl.analysis = .file_failure;
- return;
+ if (mod.builtin_file) |builtin_file| {
+ keep_resolved_path = true; // It's now owned by import_table.
+ gop.value_ptr.* = builtin_file;
+ try builtin_file.addReference(zcu.*, .{ .root = mod });
+ const path_digest = computePathDigest(zcu, mod, builtin_file.sub_file_path);
+ ip.files.putAssumeCapacityNoClobber(path_digest, .none);
+ return .{
+ .file = builtin_file,
+ .file_index = @enumFromInt(ip.files.entries.len - 1),
+ .is_new = false,
+ .is_pkg = true,
+ };
}
- assert(file.zir_loaded);
- const struct_ty = try zcu.getFileRootStruct(new_decl_index, new_namespace_index, file_index);
- errdefer zcu.intern_pool.remove(struct_ty);
+ const sub_file_path = try gpa.dupe(u8, mod.root_src_path);
+ errdefer gpa.free(sub_file_path);
- switch (zcu.comp.cache_use) {
- .whole => |whole| if (whole.cache_manifest) |man| {
- const source = file.getSource(gpa) catch |err| {
- try reportRetryableFileError(zcu, file_index, "unable to load source: {s}", .{@errorName(err)});
- return error.AnalysisFail;
- };
-
- const resolved_path = std.fs.path.resolve(gpa, &.{
- file.mod.root.root_dir.path orelse ".",
- file.mod.root.sub_path,
- file.sub_file_path,
- }) catch |err| {
- try reportRetryableFileError(zcu, file_index, "unable to resolve path: {s}", .{@errorName(err)});
- return error.AnalysisFail;
- };
- errdefer gpa.free(resolved_path);
-
- whole.cache_manifest_mutex.lock();
- defer whole.cache_manifest_mutex.unlock();
- try man.addFilePostContents(resolved_path, source.bytes, source.stat);
- },
- .incremental => {},
- }
-}
-
-const SemaDeclResult = packed struct {
- /// Whether the value of a `decl_val` of this Decl changed.
- invalidate_decl_val: bool,
- /// Whether the type of a `decl_ref` of this Decl changed.
- invalidate_decl_ref: bool,
-};
-
-fn semaDecl(zcu: *Zcu, decl_index: Decl.Index) !SemaDeclResult {
- const tracy = trace(@src());
- defer tracy.end();
-
- const decl = zcu.declPtr(decl_index);
- const ip = &zcu.intern_pool;
-
- if (decl.getFileScope(zcu).status != .success_zir) {
- return error.AnalysisFail;
- }
-
- assert(!zcu.declIsRoot(decl_index));
-
- if (decl.zir_decl_index == .none and decl.owns_tv) {
- // We are re-analyzing an anonymous owner Decl (for a function or a namespace type).
- return zcu.semaAnonOwnerDecl(decl_index);
- }
-
- log.debug("semaDecl '{d}'", .{@intFromEnum(decl_index)});
- log.debug("decl name '{}'", .{(try decl.fullyQualifiedName(zcu)).fmt(ip)});
- defer blk: {
- log.debug("finish decl name '{}'", .{(decl.fullyQualifiedName(zcu) catch break :blk).fmt(ip)});
- }
-
- const old_has_tv = decl.has_tv;
- // The following values are ignored if `!old_has_tv`
- const old_ty = if (old_has_tv) decl.typeOf(zcu) else undefined;
- const old_val = decl.val;
- const old_align = decl.alignment;
- const old_linksection = decl.@"linksection";
- const old_addrspace = decl.@"addrspace";
- const old_is_inline = if (decl.getOwnedFunction(zcu)) |prev_func|
- prev_func.analysis(ip).state == .inline_only
- else
- false;
-
- const decl_inst = decl.zir_decl_index.unwrap().?.resolve(ip);
-
- const gpa = zcu.gpa;
- const zir = decl.getFileScope(zcu).zir;
-
- const builtin_type_target_index: InternPool.Index = ip_index: {
- const std_mod = zcu.std_mod;
- if (decl.getFileScope(zcu).mod != std_mod) break :ip_index .none;
- // We're in the std module.
- const std_file_imported = try zcu.importPkg(std_mod);
- const std_file_root_decl_index = zcu.fileRootDecl(std_file_imported.file_index);
- const std_decl = zcu.declPtr(std_file_root_decl_index.unwrap().?);
- const std_namespace = std_decl.getInnerNamespace(zcu).?;
- const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls);
- const builtin_decl = zcu.declPtr(std_namespace.decls.getKeyAdapted(builtin_str, DeclAdapter{ .zcu = zcu }) orelse break :ip_index .none);
- const builtin_namespace = builtin_decl.getInnerNamespaceIndex(zcu).unwrap() orelse break :ip_index .none;
- if (decl.src_namespace != builtin_namespace) break :ip_index .none;
- // We're in builtin.zig. This could be a builtin we need to add to a specific InternPool index.
- for ([_][]const u8{
- "AtomicOrder",
- "AtomicRmwOp",
- "CallingConvention",
- "AddressSpace",
- "FloatMode",
- "ReduceOp",
- "CallModifier",
- "PrefetchOptions",
- "ExportOptions",
- "ExternOptions",
- "Type",
- }, [_]InternPool.Index{
- .atomic_order_type,
- .atomic_rmw_op_type,
- .calling_convention_type,
- .address_space_type,
- .float_mode_type,
- .reduce_op_type,
- .call_modifier_type,
- .prefetch_options_type,
- .export_options_type,
- .extern_options_type,
- .type_info_type,
- }) |type_name, type_ip| {
- if (decl.name.eqlSlice(type_name, ip)) break :ip_index type_ip;
- }
- break :ip_index .none;
- };
-
- zcu.intern_pool.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .decl = decl_index }));
-
- decl.analysis = .in_progress;
-
- var analysis_arena = std.heap.ArenaAllocator.init(gpa);
- defer analysis_arena.deinit();
-
- var comptime_err_ret_trace = std.ArrayList(LazySrcLoc).init(gpa);
- defer comptime_err_ret_trace.deinit();
-
- var sema: Sema = .{
- .mod = zcu,
- .gpa = gpa,
- .arena = analysis_arena.allocator(),
- .code = zir,
- .owner_decl = decl,
- .owner_decl_index = decl_index,
- .func_index = .none,
- .func_is_naked = false,
- .fn_ret_ty = Type.void,
- .fn_ret_ty_ies = null,
- .owner_func_index = .none,
- .comptime_err_ret_trace = &comptime_err_ret_trace,
- .builtin_type_target_index = builtin_type_target_index,
- };
- defer sema.deinit();
-
- // Every Decl (other than file root Decls, which do not have a ZIR index) has a dependency on its own source.
- try sema.declareDependency(.{ .src_hash = try ip.trackZir(
- gpa,
- decl.getFileScopeIndex(zcu),
- decl_inst,
- ) });
-
- var block_scope: Sema.Block = .{
- .parent = null,
- .sema = &sema,
- .namespace = decl.src_namespace,
- .instructions = .{},
- .inlining = null,
- .is_comptime = true,
- .src_base_inst = decl.zir_decl_index.unwrap().?,
- .type_name_ctx = decl.name,
- };
- defer block_scope.instructions.deinit(gpa);
-
- const decl_bodies = decl.zirBodies(zcu);
-
- const result_ref = try sema.resolveInlineBody(&block_scope, decl_bodies.value_body, decl_inst);
- // We'll do some other bits with the Sema. Clear the type target index just
- // in case they analyze any type.
- sema.builtin_type_target_index = .none;
- const align_src: LazySrcLoc = block_scope.src(.{ .node_offset_var_decl_align = 0 });
- const section_src: LazySrcLoc = block_scope.src(.{ .node_offset_var_decl_section = 0 });
- const address_space_src: LazySrcLoc = block_scope.src(.{ .node_offset_var_decl_addrspace = 0 });
- const ty_src: LazySrcLoc = block_scope.src(.{ .node_offset_var_decl_ty = 0 });
- const init_src: LazySrcLoc = block_scope.src(.{ .node_offset_var_decl_init = 0 });
- const decl_val = try sema.resolveFinalDeclValue(&block_scope, init_src, result_ref);
- const decl_ty = decl_val.typeOf(zcu);
-
- // Note this resolves the type of the Decl, not the value; if this Decl
- // is a struct, for example, this resolves `type` (which needs no resolution),
- // not the struct itself.
- try decl_ty.resolveLayout(zcu);
-
- if (decl.kind == .@"usingnamespace") {
- if (!decl_ty.eql(Type.type, zcu)) {
- return sema.fail(&block_scope, ty_src, "expected type, found {}", .{
- decl_ty.fmt(zcu),
- });
- }
- const ty = decl_val.toType();
- if (ty.getNamespace(zcu) == null) {
- return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(zcu)});
- }
-
- decl.val = ty.toValue();
- decl.alignment = .none;
- decl.@"linksection" = .none;
- decl.has_tv = true;
- decl.owns_tv = false;
- decl.analysis = .complete;
-
- // TODO: usingnamespace cannot currently participate in incremental compilation
- return .{
- .invalidate_decl_val = true,
- .invalidate_decl_ref = true,
- };
- }
-
- var queue_linker_work = true;
- var is_func = false;
- var is_inline = false;
- switch (decl_val.toIntern()) {
- .generic_poison => unreachable,
- .unreachable_value => unreachable,
- else => switch (ip.indexToKey(decl_val.toIntern())) {
- .variable => |variable| {
- decl.owns_tv = variable.decl == decl_index;
- queue_linker_work = decl.owns_tv;
- },
-
- .extern_func => |extern_func| {
- decl.owns_tv = extern_func.decl == decl_index;
- queue_linker_work = decl.owns_tv;
- is_func = decl.owns_tv;
- },
-
- .func => |func| {
- decl.owns_tv = func.owner_decl == decl_index;
- queue_linker_work = false;
- is_inline = decl.owns_tv and decl_ty.fnCallingConvention(zcu) == .Inline;
- is_func = decl.owns_tv;
- },
-
- else => {},
- },
- }
-
- decl.val = decl_val;
- // Function linksection, align, and addrspace were already set by Sema
- if (!is_func) {
- decl.alignment = blk: {
- const align_body = decl_bodies.align_body orelse break :blk .none;
- const align_ref = try sema.resolveInlineBody(&block_scope, align_body, decl_inst);
- break :blk try sema.analyzeAsAlign(&block_scope, align_src, align_ref);
- };
- decl.@"linksection" = blk: {
- const linksection_body = decl_bodies.linksection_body orelse break :blk .none;
- const linksection_ref = try sema.resolveInlineBody(&block_scope, linksection_body, decl_inst);
- const bytes = try sema.toConstString(&block_scope, section_src, linksection_ref, .{
- .needed_comptime_reason = "linksection must be comptime-known",
- });
- if (mem.indexOfScalar(u8, bytes, 0) != null) {
- return sema.fail(&block_scope, section_src, "linksection cannot contain null bytes", .{});
- } else if (bytes.len == 0) {
- return sema.fail(&block_scope, section_src, "linksection cannot be empty", .{});
- }
- break :blk try ip.getOrPutStringOpt(gpa, bytes, .no_embedded_nulls);
- };
- decl.@"addrspace" = blk: {
- const addrspace_ctx: Sema.AddressSpaceContext = switch (ip.indexToKey(decl_val.toIntern())) {
- .variable => .variable,
- .extern_func, .func => .function,
- else => .constant,
- };
-
- const target = sema.mod.getTarget();
-
- const addrspace_body = decl_bodies.addrspace_body orelse break :blk switch (addrspace_ctx) {
- .function => target_util.defaultAddressSpace(target, .function),
- .variable => target_util.defaultAddressSpace(target, .global_mutable),
- .constant => target_util.defaultAddressSpace(target, .global_constant),
- else => unreachable,
- };
- const addrspace_ref = try sema.resolveInlineBody(&block_scope, addrspace_body, decl_inst);
- break :blk try sema.analyzeAsAddressSpace(&block_scope, address_space_src, addrspace_ref, addrspace_ctx);
- };
- }
- decl.has_tv = true;
- decl.analysis = .complete;
-
- const result: SemaDeclResult = if (old_has_tv) .{
- .invalidate_decl_val = !decl_ty.eql(old_ty, zcu) or
- !decl.val.eql(old_val, decl_ty, zcu) or
- is_inline != old_is_inline,
- .invalidate_decl_ref = !decl_ty.eql(old_ty, zcu) or
- decl.alignment != old_align or
- decl.@"linksection" != old_linksection or
- decl.@"addrspace" != old_addrspace or
- is_inline != old_is_inline,
- } else .{
- .invalidate_decl_val = true,
- .invalidate_decl_ref = true,
- };
-
- const has_runtime_bits = queue_linker_work and (is_func or try sema.typeHasRuntimeBits(decl_ty));
- if (has_runtime_bits) {
- // Needed for codegen_decl which will call updateDecl and then the
- // codegen backend wants full access to the Decl Type.
- try decl_ty.resolveFully(zcu);
-
- try zcu.comp.work_queue.writeItem(.{ .codegen_decl = decl_index });
-
- if (result.invalidate_decl_ref and zcu.emit_h != null) {
- try zcu.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index });
- }
- }
-
- if (decl.is_exported) {
- const export_src: LazySrcLoc = block_scope.src(.{ .token_offset = @intFromBool(decl.is_pub) });
- if (is_inline) return sema.fail(&block_scope, export_src, "export of inline function", .{});
- // The scope needs to have the decl in it.
- try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index);
- }
-
- try sema.flushExports();
-
- return result;
-}
-
-fn semaAnonOwnerDecl(zcu: *Zcu, decl_index: Decl.Index) !SemaDeclResult {
- const decl = zcu.declPtr(decl_index);
-
- assert(decl.has_tv);
- assert(decl.owns_tv);
-
- log.debug("semaAnonOwnerDecl '{d}'", .{@intFromEnum(decl_index)});
-
- switch (decl.typeOf(zcu).zigTypeTag(zcu)) {
- .Fn => @panic("TODO: update fn instance"),
- .Type => {},
- else => unreachable,
- }
-
- // We are the owner Decl of a type, and we were marked as outdated. That means the *structure*
- // of this type changed; not just its namespace. Therefore, we need a new InternPool index.
- //
- // However, as soon as we make that, the context that created us will require re-analysis anyway
- // (as it depends on this Decl's value), meaning the `struct_decl` (or equivalent) instruction
- // will be analyzed again. Since Sema already needs to be able to reconstruct types like this,
- // why should we bother implementing it here too when the Sema logic will be hit right after?
- //
- // So instead, let's just mark this Decl as failed - so that any remaining Decls which genuinely
- // reference it (via `@This`) end up silently erroring too - and we'll let Sema make a new type
- // with a new Decl.
- //
- // Yes, this does mean that any type owner Decl has a constant value for its entire lifetime.
- zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, AnalUnit.wrap(.{ .decl = decl_index }));
- zcu.intern_pool.remove(decl.val.toIntern());
- decl.analysis = .dependency_failure;
- return .{
- .invalidate_decl_val = true,
- .invalidate_decl_ref = true,
- };
-}
-
-pub const ImportFileResult = struct {
- file: *File,
- file_index: File.Index,
- is_new: bool,
- is_pkg: bool,
-};
-
-pub fn importPkg(zcu: *Zcu, mod: *Package.Module) !ImportFileResult {
- const gpa = zcu.gpa;
-
- // The resolved path is used as the key in the import table, to detect if
- // an import refers to the same as another, despite different relative paths
- // or differently mapped package names.
- const resolved_path = try std.fs.path.resolve(gpa, &.{
- mod.root.root_dir.path orelse ".",
- mod.root.sub_path,
- mod.root_src_path,
- });
- var keep_resolved_path = false;
- defer if (!keep_resolved_path) gpa.free(resolved_path);
-
- const gop = try zcu.import_table.getOrPut(gpa, resolved_path);
- errdefer _ = zcu.import_table.pop();
- if (gop.found_existing) {
- try gop.value_ptr.*.addReference(zcu.*, .{ .root = mod });
- return .{
- .file = gop.value_ptr.*,
- .file_index = @enumFromInt(gop.index),
- .is_new = false,
- .is_pkg = true,
- };
- }
-
- const ip = &zcu.intern_pool;
-
- try ip.files.ensureUnusedCapacity(gpa, 1);
-
- if (mod.builtin_file) |builtin_file| {
- keep_resolved_path = true; // It's now owned by import_table.
- gop.value_ptr.* = builtin_file;
- try builtin_file.addReference(zcu.*, .{ .root = mod });
- const path_digest = computePathDigest(zcu, mod, builtin_file.sub_file_path);
- ip.files.putAssumeCapacityNoClobber(path_digest, .none);
- return .{
- .file = builtin_file,
- .file_index = @enumFromInt(ip.files.entries.len - 1),
- .is_new = false,
- .is_pkg = true,
- };
- }
-
- const sub_file_path = try gpa.dupe(u8, mod.root_src_path);
- errdefer gpa.free(sub_file_path);
-
- const new_file = try gpa.create(File);
- errdefer gpa.destroy(new_file);
+ const new_file = try gpa.create(File);
+ errdefer gpa.destroy(new_file);
keep_resolved_path = true; // It's now owned by import_table.
gop.value_ptr.* = new_file;
@@ -4533,78 +3680,6 @@ pub fn importFile(
};
}
-pub fn embedFile(
- mod: *Module,
- cur_file: *File,
- import_string: []const u8,
- src_loc: LazySrcLoc,
-) !InternPool.Index {
- const gpa = mod.gpa;
-
- if (cur_file.mod.deps.get(import_string)) |pkg| {
- const resolved_path = try std.fs.path.resolve(gpa, &.{
- pkg.root.root_dir.path orelse ".",
- pkg.root.sub_path,
- pkg.root_src_path,
- });
- var keep_resolved_path = false;
- defer if (!keep_resolved_path) gpa.free(resolved_path);
-
- const gop = try mod.embed_table.getOrPut(gpa, resolved_path);
- errdefer {
- assert(std.mem.eql(u8, mod.embed_table.pop().key, resolved_path));
- keep_resolved_path = false;
- }
- if (gop.found_existing) return gop.value_ptr.*.val;
- keep_resolved_path = true;
-
- const sub_file_path = try gpa.dupe(u8, pkg.root_src_path);
- errdefer gpa.free(sub_file_path);
-
- return newEmbedFile(mod, pkg, sub_file_path, resolved_path, gop.value_ptr, src_loc);
- }
-
- // The resolved path is used as the key in the table, to detect if a file
- // refers to the same as another, despite different relative paths.
- const resolved_path = try std.fs.path.resolve(gpa, &.{
- cur_file.mod.root.root_dir.path orelse ".",
- cur_file.mod.root.sub_path,
- cur_file.sub_file_path,
- "..",
- import_string,
- });
-
- var keep_resolved_path = false;
- defer if (!keep_resolved_path) gpa.free(resolved_path);
-
- const gop = try mod.embed_table.getOrPut(gpa, resolved_path);
- errdefer {
- assert(std.mem.eql(u8, mod.embed_table.pop().key, resolved_path));
- keep_resolved_path = false;
- }
- if (gop.found_existing) return gop.value_ptr.*.val;
- keep_resolved_path = true;
-
- const resolved_root_path = try std.fs.path.resolve(gpa, &.{
- cur_file.mod.root.root_dir.path orelse ".",
- cur_file.mod.root.sub_path,
- });
- defer gpa.free(resolved_root_path);
-
- const sub_file_path = p: {
- const relative = try std.fs.path.relative(gpa, resolved_root_path, resolved_path);
- errdefer gpa.free(relative);
-
- if (!isUpDir(relative) and !std.fs.path.isAbsolute(relative)) {
- break :p relative;
- }
- return error.ImportOutsideModulePath;
- };
- defer gpa.free(sub_file_path);
-
- return newEmbedFile(mod, cur_file.mod, sub_file_path, resolved_path, gop.value_ptr, src_loc);
-}
-
fn computePathDigest(zcu: *Zcu, mod: *Package.Module, sub_file_path: []const u8) Cache.BinDigest {
const want_local_cache = mod == zcu.main_mod;
var path_hash: Cache.HashHelper = .{};
@@ -4620,87 +3695,6 @@ fn computePathDigest(zcu: *Zcu, mod: *Package.Module, sub_file_path: []const u8)
return bin;
}
-/// https://github.com/ziglang/zig/issues/14307
-fn newEmbedFile(
- mod: *Module,
- pkg: *Package.Module,
- sub_file_path: []const u8,
- resolved_path: []const u8,
- result: **EmbedFile,
- src_loc: LazySrcLoc,
-) !InternPool.Index {
- const gpa = mod.gpa;
- const ip = &mod.intern_pool;
-
- const new_file = try gpa.create(EmbedFile);
- errdefer gpa.destroy(new_file);
-
- var file = try pkg.root.openFile(sub_file_path, .{});
- defer file.close();
-
- const actual_stat = try file.stat();
- const stat: Cache.File.Stat = .{
- .size = actual_stat.size,
- .inode = actual_stat.inode,
- .mtime = actual_stat.mtime,
- };
- const size = std.math.cast(usize, actual_stat.size) orelse return error.Overflow;
-
- const bytes = try ip.string_bytes.addManyAsSlice(gpa, try std.math.add(usize, size, 1));
- const actual_read = try file.readAll(bytes[0..size]);
- if (actual_read != size) return error.UnexpectedEndOfFile;
- bytes[size] = 0;
-
- const comp = mod.comp;
- switch (comp.cache_use) {
- .whole => |whole| if (whole.cache_manifest) |man| {
- const copied_resolved_path = try gpa.dupe(u8, resolved_path);
- errdefer gpa.free(copied_resolved_path);
- whole.cache_manifest_mutex.lock();
- defer whole.cache_manifest_mutex.unlock();
- try man.addFilePostContents(copied_resolved_path, bytes[0..size], stat);
- },
- .incremental => {},
- }
-
- const array_ty = try ip.get(gpa, .{ .array_type = .{
- .len = size,
- .sentinel = .zero_u8,
- .child = .u8_type,
- } });
- const array_val = try ip.get(gpa, .{ .aggregate = .{
- .ty = array_ty,
- .storage = .{ .bytes = try ip.getOrPutTrailingString(gpa, bytes.len, .maybe_embedded_nulls) },
- } });
-
- const ptr_ty = (try mod.ptrType(.{
- .child = array_ty,
- .flags = .{
- .alignment = .none,
- .is_const = true,
- .address_space = .generic,
- },
- })).toIntern();
- const ptr_val = try ip.get(gpa, .{ .ptr = .{
- .ty = ptr_ty,
- .base_addr = .{ .anon_decl = .{
- .val = array_val,
- .orig_ty = ptr_ty,
- } },
- .byte_offset = 0,
- } });
-
- result.* = new_file;
- new_file.* = .{
- .sub_file_path = try ip.getOrPutString(gpa, sub_file_path, .no_embedded_nulls),
- .owner = pkg,
- .stat = stat,
- .val = ptr_val,
- .src_loc = src_loc,
- };
- return ptr_val;
-}
-
pub fn scanNamespace(
zcu: *Zcu,
namespace_index: Namespace.Index,
@@ -4970,13 +3964,6 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void {
mod.destroyDecl(decl_index);
}
-/// Finalize the creation of an anon decl.
-pub fn finalizeAnonDecl(mod: *Module, decl_index: Decl.Index) Allocator.Error!void {
- if (mod.declPtr(decl_index).typeOf(mod).isFnOrHasRuntimeBits(mod)) {
- try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl_index });
- }
-}
-
/// Delete all the Export objects that are caused by this `AnalUnit`. Re-analysis of
/// this `AnalUnit` will cause them to be re-created (or not).
pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void {
@@ -5019,7 +4006,7 @@ pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void {
/// Delete all references in `reference_table` which are caused by this `AnalUnit`.
/// Re-analysis of the `AnalUnit` will cause appropriate references to be recreated.
-fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void {
+pub fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void {
const gpa = zcu.gpa;
const kv = zcu.reference_table.fetchSwapRemove(anal_unit) orelse return;
@@ -5058,258 +4045,13 @@ pub fn addUnitReference(zcu: *Zcu, src_unit: AnalUnit, referenced_unit: AnalUnit
gop.value_ptr.* = @intCast(ref_idx);
}
-pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocator) SemaError!Air {
- const tracy = trace(@src());
- defer tracy.end();
-
- const gpa = mod.gpa;
- const ip = &mod.intern_pool;
- const func = mod.funcInfo(func_index);
- const decl_index = func.owner_decl;
- const decl = mod.declPtr(decl_index);
-
- log.debug("func name '{}'", .{(try decl.fullyQualifiedName(mod)).fmt(ip)});
- defer blk: {
- log.debug("finish func name '{}'", .{(decl.fullyQualifiedName(mod) catch break :blk).fmt(ip)});
- }
-
- const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0);
- defer decl_prog_node.end();
-
- mod.intern_pool.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .func = func_index }));
-
- var comptime_err_ret_trace = std.ArrayList(LazySrcLoc).init(gpa);
- defer comptime_err_ret_trace.deinit();
+pub fn createNamespace(mod: *Module, initialization: Namespace) !Namespace.Index {
+ return mod.intern_pool.createNamespace(mod.gpa, initialization);
+}
- // In the case of a generic function instance, this is the type of the
- // instance, which has comptime parameters elided. In other words, it is
- // the runtime-known parameters only, not to be confused with the
- // generic_owner function type, which potentially has more parameters,
- // including comptime parameters.
- const fn_ty = decl.typeOf(mod);
- const fn_ty_info = mod.typeToFunc(fn_ty).?;
-
- var sema: Sema = .{
- .mod = mod,
- .gpa = gpa,
- .arena = arena,
- .code = decl.getFileScope(mod).zir,
- .owner_decl = decl,
- .owner_decl_index = decl_index,
- .func_index = func_index,
- .func_is_naked = fn_ty_info.cc == .Naked,
- .fn_ret_ty = Type.fromInterned(fn_ty_info.return_type),
- .fn_ret_ty_ies = null,
- .owner_func_index = func_index,
- .branch_quota = @max(func.branchQuota(ip).*, Sema.default_branch_quota),
- .comptime_err_ret_trace = &comptime_err_ret_trace,
- };
- defer sema.deinit();
-
- // Every runtime function has a dependency on the source of the Decl it originates from.
- // It also depends on the value of its owner Decl.
- try sema.declareDependency(.{ .src_hash = decl.zir_decl_index.unwrap().? });
- try sema.declareDependency(.{ .decl_val = decl_index });
-
- if (func.analysis(ip).inferred_error_set) {
- const ies = try arena.create(Sema.InferredErrorSet);
- ies.* = .{ .func = func_index };
- sema.fn_ret_ty_ies = ies;
- }
-
- // reset in case calls to errorable functions are removed.
- func.analysis(ip).calls_or_awaits_errorable_fn = false;
-
- // First few indexes of extra are reserved and set at the end.
- const reserved_count = @typeInfo(Air.ExtraIndex).Enum.fields.len;
- try sema.air_extra.ensureTotalCapacity(gpa, reserved_count);
- sema.air_extra.items.len += reserved_count;
-
- var inner_block: Sema.Block = .{
- .parent = null,
- .sema = &sema,
- .namespace = decl.src_namespace,
- .instructions = .{},
- .inlining = null,
- .is_comptime = false,
- .src_base_inst = inst: {
- const owner_info = if (func.generic_owner == .none)
- func
- else
- mod.funcInfo(func.generic_owner);
- const orig_decl = mod.declPtr(owner_info.owner_decl);
- break :inst orig_decl.zir_decl_index.unwrap().?;
- },
- .type_name_ctx = decl.name,
- };
- defer inner_block.instructions.deinit(gpa);
-
- const fn_info = sema.code.getFnInfo(func.zirBodyInst(ip).resolve(ip));
-
- // Here we are performing "runtime semantic analysis" for a function body, which means
- // we must map the parameter ZIR instructions to `arg` AIR instructions.
- // AIR requires the `arg` parameters to be the first N instructions.
- // This could be a generic function instantiation, however, in which case we need to
- // map the comptime parameters to constant values and only emit arg AIR instructions
- // for the runtime ones.
- const runtime_params_len = fn_ty_info.param_types.len;
- try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len);
- try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len);
- try sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body);
-
- // In the case of a generic function instance, pre-populate all the comptime args.
- if (func.comptime_args.len != 0) {
- for (
- fn_info.param_body[0..func.comptime_args.len],
- func.comptime_args.get(ip),
- ) |inst, comptime_arg| {
- if (comptime_arg == .none) continue;
- sema.inst_map.putAssumeCapacityNoClobber(inst, Air.internedToRef(comptime_arg));
- }
- }
-
- const src_params_len = if (func.comptime_args.len != 0)
- func.comptime_args.len
- else
- runtime_params_len;
-
- var runtime_param_index: usize = 0;
- for (fn_info.param_body[0..src_params_len], 0..) |inst, src_param_index| {
- const gop = sema.inst_map.getOrPutAssumeCapacity(inst);
- if (gop.found_existing) continue; // provided above by comptime arg
-
- const param_ty = fn_ty_info.param_types.get(ip)[runtime_param_index];
- runtime_param_index += 1;
-
- const opt_opv = sema.typeHasOnePossibleValue(Type.fromInterned(param_ty)) catch |err| switch (err) {
- error.GenericPoison => unreachable,
- error.ComptimeReturn => unreachable,
- error.ComptimeBreak => unreachable,
- else => |e| return e,
- };
- if (opt_opv) |opv| {
- gop.value_ptr.* = Air.internedToRef(opv.toIntern());
- continue;
- }
- const arg_index: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
- gop.value_ptr.* = arg_index.toRef();
- inner_block.instructions.appendAssumeCapacity(arg_index);
- sema.air_instructions.appendAssumeCapacity(.{
- .tag = .arg,
- .data = .{ .arg = .{
- .ty = Air.internedToRef(param_ty),
- .src_index = @intCast(src_param_index),
- } },
- });
- }
-
- func.analysis(ip).state = .in_progress;
-
- const last_arg_index = inner_block.instructions.items.len;
-
- // Save the error trace as our first action in the function.
- // If this is unnecessary after all, Liveness will clean it up for us.
- const error_return_trace_index = try sema.analyzeSaveErrRetIndex(&inner_block);
- sema.error_return_trace_index_on_fn_entry = error_return_trace_index;
- inner_block.error_return_trace_index = error_return_trace_index;
-
- sema.analyzeFnBody(&inner_block, fn_info.body) catch |err| switch (err) {
- // TODO make these unreachable instead of @panic
- error.GenericPoison => @panic("zig compiler bug: GenericPoison"),
- error.ComptimeReturn => @panic("zig compiler bug: ComptimeReturn"),
- else => |e| return e,
- };
-
- for (sema.unresolved_inferred_allocs.keys()) |ptr_inst| {
- // The lack of a resolve_inferred_alloc means that this instruction
- // is unused so it just has to be a no-op.
- sema.air_instructions.set(@intFromEnum(ptr_inst), .{
- .tag = .alloc,
- .data = .{ .ty = Type.single_const_pointer_to_comptime_int },
- });
- }
-
- // If we don't get an error return trace from a caller, create our own.
- if (func.analysis(ip).calls_or_awaits_errorable_fn and
- mod.comp.config.any_error_tracing and
- !sema.fn_ret_ty.isError(mod))
- {
- sema.setupErrorReturnTrace(&inner_block, last_arg_index) catch |err| switch (err) {
- // TODO make these unreachable instead of @panic
- error.GenericPoison => @panic("zig compiler bug: GenericPoison"),
- error.ComptimeReturn => @panic("zig compiler bug: ComptimeReturn"),
- error.ComptimeBreak => @panic("zig compiler bug: ComptimeBreak"),
- else => |e| return e,
- };
- }
-
- // Copy the block into place and mark that as the main block.
- try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
- inner_block.instructions.items.len);
- const main_block_index = sema.addExtraAssumeCapacity(Air.Block{
- .body_len = @intCast(inner_block.instructions.items.len),
- });
- sema.air_extra.appendSliceAssumeCapacity(@ptrCast(inner_block.instructions.items));
- sema.air_extra.items[@intFromEnum(Air.ExtraIndex.main_block)] = main_block_index;
-
- // Resolving inferred error sets is done *before* setting the function
- // state to success, so that "unable to resolve inferred error set" errors
- // can be emitted here.
- if (sema.fn_ret_ty_ies) |ies| {
- sema.resolveInferredErrorSetPtr(&inner_block, .{
- .base_node_inst = inner_block.src_base_inst,
- .offset = LazySrcLoc.Offset.nodeOffset(0),
- }, ies) catch |err| switch (err) {
- error.GenericPoison => unreachable,
- error.ComptimeReturn => unreachable,
- error.ComptimeBreak => unreachable,
- error.AnalysisFail => {
- // In this case our function depends on a type that had a compile error.
- // We should not try to lower this function.
- decl.analysis = .dependency_failure;
- return error.AnalysisFail;
- },
- else => |e| return e,
- };
- assert(ies.resolved != .none);
- ip.funcIesResolved(func_index).* = ies.resolved;
- }
-
- func.analysis(ip).state = .success;
-
- // Finally we must resolve the return type and parameter types so that backends
- // have full access to type information.
- // Crucially, this happens *after* we set the function state to success above,
- // so that dependencies on the function body will now be satisfied rather than
- // result in circular dependency errors.
- sema.resolveFnTypes(fn_ty) catch |err| switch (err) {
- error.GenericPoison => unreachable,
- error.ComptimeReturn => unreachable,
- error.ComptimeBreak => unreachable,
- error.AnalysisFail => {
- // In this case our function depends on a type that had a compile error.
- // We should not try to lower this function.
- decl.analysis = .dependency_failure;
- return error.AnalysisFail;
- },
- else => |e| return e,
- };
-
- try sema.flushExports();
-
- return .{
- .instructions = sema.air_instructions.toOwnedSlice(),
- .extra = try sema.air_extra.toOwnedSlice(gpa),
- };
-}
-
-pub fn createNamespace(mod: *Module, initialization: Namespace) !Namespace.Index {
- return mod.intern_pool.createNamespace(mod.gpa, initialization);
-}
-
-pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void {
- return mod.intern_pool.destroyNamespace(mod.gpa, index);
-}
+pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void {
+ return mod.intern_pool.destroyNamespace(mod.gpa, index);
+}
pub fn allocateNewDecl(zcu: *Zcu, namespace: Namespace.Index) !Decl.Index {
const gpa = zcu.gpa;
@@ -5420,117 +4162,7 @@ fn lockAndClearFileCompileError(mod: *Module, file: *File) void {
}
}
-/// Called from `Compilation.update`, after everything is done, just before
-/// reporting compile errors. In this function we emit exported symbol collision
-/// errors and communicate exported symbols to the linker backend.
-pub fn processExports(zcu: *Zcu) !void {
- const gpa = zcu.gpa;
-
- // First, construct a mapping of every exported value and Decl to the indices of all its different exports.
- var decl_exports: std.AutoArrayHashMapUnmanaged(Decl.Index, ArrayListUnmanaged(u32)) = .{};
- var value_exports: std.AutoArrayHashMapUnmanaged(InternPool.Index, ArrayListUnmanaged(u32)) = .{};
- defer {
- for (decl_exports.values()) |*exports| {
- exports.deinit(gpa);
- }
- decl_exports.deinit(gpa);
- for (value_exports.values()) |*exports| {
- exports.deinit(gpa);
- }
- value_exports.deinit(gpa);
- }
-
- // We note as a heuristic:
- // * It is rare to export a value.
- // * It is rare for one Decl to be exported multiple times.
- // So, this ensureTotalCapacity serves as a reasonable (albeit very approximate) optimization.
- try decl_exports.ensureTotalCapacity(gpa, zcu.single_exports.count() + zcu.multi_exports.count());
-
- for (zcu.single_exports.values()) |export_idx| {
- const exp = zcu.all_exports.items[export_idx];
- const value_ptr, const found_existing = switch (exp.exported) {
- .decl_index => |i| gop: {
- const gop = try decl_exports.getOrPut(gpa, i);
- break :gop .{ gop.value_ptr, gop.found_existing };
- },
- .value => |i| gop: {
- const gop = try value_exports.getOrPut(gpa, i);
- break :gop .{ gop.value_ptr, gop.found_existing };
- },
- };
- if (!found_existing) value_ptr.* = .{};
- try value_ptr.append(gpa, export_idx);
- }
-
- for (zcu.multi_exports.values()) |info| {
- for (zcu.all_exports.items[info.index..][0..info.len], info.index..) |exp, export_idx| {
- const value_ptr, const found_existing = switch (exp.exported) {
- .decl_index => |i| gop: {
- const gop = try decl_exports.getOrPut(gpa, i);
- break :gop .{ gop.value_ptr, gop.found_existing };
- },
- .value => |i| gop: {
- const gop = try value_exports.getOrPut(gpa, i);
- break :gop .{ gop.value_ptr, gop.found_existing };
- },
- };
- if (!found_existing) value_ptr.* = .{};
- try value_ptr.append(gpa, @intCast(export_idx));
- }
- }
-
- // Map symbol names to `Export` for name collision detection.
- var symbol_exports: SymbolExports = .{};
- defer symbol_exports.deinit(gpa);
-
- for (decl_exports.keys(), decl_exports.values()) |exported_decl, exports_list| {
- const exported: Exported = .{ .decl_index = exported_decl };
- try processExportsInner(zcu, &symbol_exports, exported, exports_list.items);
- }
-
- for (value_exports.keys(), value_exports.values()) |exported_value, exports_list| {
- const exported: Exported = .{ .value = exported_value };
- try processExportsInner(zcu, &symbol_exports, exported, exports_list.items);
- }
-}
-
-const SymbolExports = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, u32);
-
-fn processExportsInner(
- zcu: *Zcu,
- symbol_exports: *SymbolExports,
- exported: Exported,
- export_indices: []const u32,
-) error{OutOfMemory}!void {
- const gpa = zcu.gpa;
-
- for (export_indices) |export_idx| {
- const new_export = &zcu.all_exports.items[export_idx];
- const gop = try symbol_exports.getOrPut(gpa, new_export.opts.name);
- if (gop.found_existing) {
- new_export.status = .failed_retryable;
- try zcu.failed_exports.ensureUnusedCapacity(gpa, 1);
- const msg = try ErrorMsg.create(gpa, new_export.src, "exported symbol collision: {}", .{
- new_export.opts.name.fmt(&zcu.intern_pool),
- });
- errdefer msg.destroy(gpa);
- const other_export = zcu.all_exports.items[gop.value_ptr.*];
- try zcu.errNote(other_export.src, msg, "other symbol here", .{});
- zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, msg);
- new_export.status = .failed;
- } else {
- gop.value_ptr.* = export_idx;
- }
- }
- if (zcu.comp.bin_file) |lf| {
- try handleUpdateExports(zcu, export_indices, lf.updateExports(zcu, exported, export_indices));
- } else if (zcu.llvm_object) |llvm_object| {
- if (build_options.only_c) unreachable;
- try handleUpdateExports(zcu, export_indices, llvm_object.updateExports(zcu, exported, export_indices));
- }
-}
-
-fn handleUpdateExports(
+pub fn handleUpdateExports(
zcu: *Zcu,
export_indices: []const u32,
result: link.File.UpdateExportsError!void,
@@ -5551,180 +4183,7 @@ fn handleUpdateExports(
};
}
-pub fn populateTestFunctions(
- zcu: *Zcu,
- main_progress_node: std.Progress.Node,
-) !void {
- const gpa = zcu.gpa;
- const ip = &zcu.intern_pool;
- const builtin_mod = zcu.root_mod.getBuiltinDependency();
- const builtin_file_index = (zcu.importPkg(builtin_mod) catch unreachable).file_index;
- const root_decl_index = zcu.fileRootDecl(builtin_file_index);
- const root_decl = zcu.declPtr(root_decl_index.unwrap().?);
- const builtin_namespace = zcu.namespacePtr(root_decl.src_namespace);
- const test_functions_str = try ip.getOrPutString(gpa, "test_functions", .no_embedded_nulls);
- const decl_index = builtin_namespace.decls.getKeyAdapted(
- test_functions_str,
- DeclAdapter{ .zcu = zcu },
- ).?;
- {
- // We have to call `ensureDeclAnalyzed` here in case `builtin.test_functions`
- // was not referenced by start code.
- zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0);
- defer {
- zcu.sema_prog_node.end();
- zcu.sema_prog_node = undefined;
- }
- try zcu.ensureDeclAnalyzed(decl_index);
- }
-
- const decl = zcu.declPtr(decl_index);
- const test_fn_ty = decl.typeOf(zcu).slicePtrFieldType(zcu).childType(zcu);
-
- const array_anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl = array: {
- // Add zcu.test_functions to an array decl then make the test_functions
- // decl reference it as a slice.
- const test_fn_vals = try gpa.alloc(InternPool.Index, zcu.test_functions.count());
- defer gpa.free(test_fn_vals);
-
- for (test_fn_vals, zcu.test_functions.keys()) |*test_fn_val, test_decl_index| {
- const test_decl = zcu.declPtr(test_decl_index);
- const test_decl_name = try test_decl.fullyQualifiedName(zcu);
- const test_decl_name_len = test_decl_name.length(ip);
- const test_name_anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl = n: {
- const test_name_ty = try zcu.arrayType(.{
- .len = test_decl_name_len,
- .child = .u8_type,
- });
- const test_name_val = try zcu.intern(.{ .aggregate = .{
- .ty = test_name_ty.toIntern(),
- .storage = .{ .bytes = test_decl_name.toString() },
- } });
- break :n .{
- .orig_ty = (try zcu.singleConstPtrType(test_name_ty)).toIntern(),
- .val = test_name_val,
- };
- };
-
- const test_fn_fields = .{
- // name
- try zcu.intern(.{ .slice = .{
- .ty = .slice_const_u8_type,
- .ptr = try zcu.intern(.{ .ptr = .{
- .ty = .manyptr_const_u8_type,
- .base_addr = .{ .anon_decl = test_name_anon_decl },
- .byte_offset = 0,
- } }),
- .len = try zcu.intern(.{ .int = .{
- .ty = .usize_type,
- .storage = .{ .u64 = test_decl_name_len },
- } }),
- } }),
- // func
- try zcu.intern(.{ .ptr = .{
- .ty = try zcu.intern(.{ .ptr_type = .{
- .child = test_decl.typeOf(zcu).toIntern(),
- .flags = .{
- .is_const = true,
- },
- } }),
- .base_addr = .{ .decl = test_decl_index },
- .byte_offset = 0,
- } }),
- };
- test_fn_val.* = try zcu.intern(.{ .aggregate = .{
- .ty = test_fn_ty.toIntern(),
- .storage = .{ .elems = &test_fn_fields },
- } });
- }
-
- const array_ty = try zcu.arrayType(.{
- .len = test_fn_vals.len,
- .child = test_fn_ty.toIntern(),
- .sentinel = .none,
- });
- const array_val = try zcu.intern(.{ .aggregate = .{
- .ty = array_ty.toIntern(),
- .storage = .{ .elems = test_fn_vals },
- } });
- break :array .{
- .orig_ty = (try zcu.singleConstPtrType(array_ty)).toIntern(),
- .val = array_val,
- };
- };
-
- {
- const new_ty = try zcu.ptrType(.{
- .child = test_fn_ty.toIntern(),
- .flags = .{
- .is_const = true,
- .size = .Slice,
- },
- });
- const new_val = decl.val;
- const new_init = try zcu.intern(.{ .slice = .{
- .ty = new_ty.toIntern(),
- .ptr = try zcu.intern(.{ .ptr = .{
- .ty = new_ty.slicePtrFieldType(zcu).toIntern(),
- .base_addr = .{ .anon_decl = array_anon_decl },
- .byte_offset = 0,
- } }),
- .len = (try zcu.intValue(Type.usize, zcu.test_functions.count())).toIntern(),
- } });
- ip.mutateVarInit(decl.val.toIntern(), new_init);
-
- // Since we are replacing the Decl's value we must perform cleanup on the
- // previous value.
- decl.val = new_val;
- decl.has_tv = true;
- }
- {
- zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0);
- defer {
- zcu.codegen_prog_node.end();
- zcu.codegen_prog_node = undefined;
- }
-
- try zcu.linkerUpdateDecl(decl_index);
- }
-}
-
-pub fn linkerUpdateDecl(zcu: *Zcu, decl_index: Decl.Index) !void {
- const comp = zcu.comp;
-
- const decl = zcu.declPtr(decl_index);
-
- const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(&zcu.intern_pool), 0);
- defer codegen_prog_node.end();
-
- if (comp.bin_file) |lf| {
- lf.updateDecl(zcu, decl_index) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.AnalysisFail => {
- decl.analysis = .codegen_failure;
- },
- else => {
- const gpa = zcu.gpa;
- try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1);
- zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try ErrorMsg.create(
- gpa,
- decl.navSrcLoc(zcu),
- "unable to codegen: {s}",
- .{@errorName(err)},
- ));
- decl.analysis = .codegen_failure;
- try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .decl = decl_index }));
- },
- };
- } else if (zcu.llvm_object) |llvm_object| {
- if (build_options.only_c) unreachable;
- llvm_object.updateDecl(zcu, decl_index) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- };
- }
-}
-
-fn reportRetryableFileError(
+pub fn reportRetryableFileError(
zcu: *Zcu,
file_index: File.Index,
comptime format: []const u8,
@@ -5795,344 +4254,6 @@ pub fn backendSupportsFeature(zcu: Module, feature: Feature) bool {
return target_util.backendSupportsFeature(cpu_arch, ofmt, use_llvm, feature);
}
-/// Shortcut for calling `intern_pool.get`.
-pub fn intern(mod: *Module, key: InternPool.Key) Allocator.Error!InternPool.Index {
- return mod.intern_pool.get(mod.gpa, key);
-}
-
-/// Shortcut for calling `intern_pool.getCoerced`.
-pub fn getCoerced(mod: *Module, val: Value, new_ty: Type) Allocator.Error!Value {
- return Value.fromInterned((try mod.intern_pool.getCoerced(mod.gpa, val.toIntern(), new_ty.toIntern())));
-}
-
-pub fn intType(mod: *Module, signedness: std.builtin.Signedness, bits: u16) Allocator.Error!Type {
- return Type.fromInterned((try intern(mod, .{ .int_type = .{
- .signedness = signedness,
- .bits = bits,
- } })));
-}
-
-pub fn errorIntType(mod: *Module) std.mem.Allocator.Error!Type {
- return mod.intType(.unsigned, mod.errorSetBits());
-}
-
-pub fn arrayType(mod: *Module, info: InternPool.Key.ArrayType) Allocator.Error!Type {
- const i = try intern(mod, .{ .array_type = info });
- return Type.fromInterned(i);
-}
-
-pub fn vectorType(mod: *Module, info: InternPool.Key.VectorType) Allocator.Error!Type {
- const i = try intern(mod, .{ .vector_type = info });
- return Type.fromInterned(i);
-}
-
-pub fn optionalType(mod: *Module, child_type: InternPool.Index) Allocator.Error!Type {
- const i = try intern(mod, .{ .opt_type = child_type });
- return Type.fromInterned(i);
-}
-
-pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type {
- var canon_info = info;
-
- if (info.flags.size == .C) canon_info.flags.is_allowzero = true;
-
- // Canonicalize non-zero alignment. If it matches the ABI alignment of the pointee
- // type, we change it to 0 here. If this causes an assertion trip because the
- // pointee type needs to be resolved more, that needs to be done before calling
- // this ptr() function.
- if (info.flags.alignment != .none and
- info.flags.alignment == Type.fromInterned(info.child).abiAlignment(mod))
- {
- canon_info.flags.alignment = .none;
- }
-
- switch (info.flags.vector_index) {
- // Canonicalize host_size. If it matches the bit size of the pointee type,
- // we change it to 0 here. If this causes an assertion trip, the pointee type
- // needs to be resolved before calling this ptr() function.
- .none => if (info.packed_offset.host_size != 0) {
- const elem_bit_size = Type.fromInterned(info.child).bitSize(mod);
- assert(info.packed_offset.bit_offset + elem_bit_size <= info.packed_offset.host_size * 8);
- if (info.packed_offset.host_size * 8 == elem_bit_size) {
- canon_info.packed_offset.host_size = 0;
- }
- },
- .runtime => {},
- _ => assert(@intFromEnum(info.flags.vector_index) < info.packed_offset.host_size),
- }
-
- return Type.fromInterned((try intern(mod, .{ .ptr_type = canon_info })));
-}
-
-/// Like `ptrType`, but if `info` specifies an `alignment`, first ensures the pointer
-/// child type's alignment is resolved so that an invalid alignment is not used.
-/// In general, prefer this function during semantic analysis.
-pub fn ptrTypeSema(zcu: *Zcu, info: InternPool.Key.PtrType) SemaError!Type {
- if (info.flags.alignment != .none) {
- _ = try Type.fromInterned(info.child).abiAlignmentAdvanced(zcu, .sema);
- }
- return zcu.ptrType(info);
-}
-
-pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type {
- return ptrType(mod, .{ .child = child_type.toIntern() });
-}
-
-pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type {
- return ptrType(mod, .{
- .child = child_type.toIntern(),
- .flags = .{
- .is_const = true,
- },
- });
-}
-
-pub fn manyConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type {
- return ptrType(mod, .{
- .child = child_type.toIntern(),
- .flags = .{
- .size = .Many,
- .is_const = true,
- },
- });
-}
-
-pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type {
- var info = ptr_ty.ptrInfo(mod);
- info.child = new_child.toIntern();
- return mod.ptrType(info);
-}
-
-pub fn funcType(mod: *Module, key: InternPool.GetFuncTypeKey) Allocator.Error!Type {
- return Type.fromInterned((try mod.intern_pool.getFuncType(mod.gpa, key)));
-}
-
-/// Use this for `anyframe->T` only.
-/// For `anyframe`, use the `InternPool.Index.anyframe` tag directly.
-pub fn anyframeType(mod: *Module, payload_ty: Type) Allocator.Error!Type {
- return Type.fromInterned((try intern(mod, .{ .anyframe_type = payload_ty.toIntern() })));
-}
-
-pub fn errorUnionType(mod: *Module, error_set_ty: Type, payload_ty: Type) Allocator.Error!Type {
- return Type.fromInterned((try intern(mod, .{ .error_union_type = .{
- .error_set_type = error_set_ty.toIntern(),
- .payload_type = payload_ty.toIntern(),
- } })));
-}
-
-pub fn singleErrorSetType(mod: *Module, name: InternPool.NullTerminatedString) Allocator.Error!Type {
- const names: *const [1]InternPool.NullTerminatedString = &name;
- const new_ty = try mod.intern_pool.getErrorSetType(mod.gpa, names);
- return Type.fromInterned(new_ty);
-}
-
-/// Sorts `names` in place.
-pub fn errorSetFromUnsortedNames(
- mod: *Module,
- names: []InternPool.NullTerminatedString,
-) Allocator.Error!Type {
- std.mem.sort(
- InternPool.NullTerminatedString,
- names,
- {},
- InternPool.NullTerminatedString.indexLessThan,
- );
- const new_ty = try mod.intern_pool.getErrorSetType(mod.gpa, names);
- return Type.fromInterned(new_ty);
-}
-
-/// Supports only pointers, not pointer-like optionals.
-pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value {
- assert(ty.zigTypeTag(mod) == .Pointer and !ty.isSlice(mod));
- assert(x != 0 or ty.isAllowzeroPtr(mod));
- const i = try intern(mod, .{ .ptr = .{
- .ty = ty.toIntern(),
- .base_addr = .int,
- .byte_offset = x,
- } });
- return Value.fromInterned(i);
-}
-
-/// Creates an enum tag value based on the integer tag value.
-pub fn enumValue(mod: *Module, ty: Type, tag_int: InternPool.Index) Allocator.Error!Value {
- if (std.debug.runtime_safety) {
- const tag = ty.zigTypeTag(mod);
- assert(tag == .Enum);
- }
- const i = try intern(mod, .{ .enum_tag = .{
- .ty = ty.toIntern(),
- .int = tag_int,
- } });
- return Value.fromInterned(i);
-}
-
-/// Creates an enum tag value based on the field index according to source code
-/// declaration order.
-pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.Error!Value {
- const ip = &mod.intern_pool;
- const gpa = mod.gpa;
- const enum_type = ip.loadEnumType(ty.toIntern());
-
- if (enum_type.values.len == 0) {
- // Auto-numbered fields.
- return Value.fromInterned((try ip.get(gpa, .{ .enum_tag = .{
- .ty = ty.toIntern(),
- .int = try ip.get(gpa, .{ .int = .{
- .ty = enum_type.tag_ty,
- .storage = .{ .u64 = field_index },
- } }),
- } })));
- }
-
- return Value.fromInterned((try ip.get(gpa, .{ .enum_tag = .{
- .ty = ty.toIntern(),
- .int = enum_type.values.get(ip)[field_index],
- } })));
-}
-
-pub fn undefValue(mod: *Module, ty: Type) Allocator.Error!Value {
- return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() })));
-}
-
-pub fn undefRef(mod: *Module, ty: Type) Allocator.Error!Air.Inst.Ref {
- return Air.internedToRef((try mod.undefValue(ty)).toIntern());
-}
-
-pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value {
- if (std.math.cast(u64, x)) |casted| return intValue_u64(mod, ty, casted);
- if (std.math.cast(i64, x)) |casted| return intValue_i64(mod, ty, casted);
- var limbs_buffer: [4]usize = undefined;
- var big_int = BigIntMutable.init(&limbs_buffer, x);
- return intValue_big(mod, ty, big_int.toConst());
-}
-
-pub fn intRef(mod: *Module, ty: Type, x: anytype) Allocator.Error!Air.Inst.Ref {
- return Air.internedToRef((try mod.intValue(ty, x)).toIntern());
-}
-
-pub fn intValue_big(mod: *Module, ty: Type, x: BigIntConst) Allocator.Error!Value {
- const i = try intern(mod, .{ .int = .{
- .ty = ty.toIntern(),
- .storage = .{ .big_int = x },
- } });
- return Value.fromInterned(i);
-}
-
-pub fn intValue_u64(mod: *Module, ty: Type, x: u64) Allocator.Error!Value {
- const i = try intern(mod, .{ .int = .{
- .ty = ty.toIntern(),
- .storage = .{ .u64 = x },
- } });
- return Value.fromInterned(i);
-}
-
-pub fn intValue_i64(mod: *Module, ty: Type, x: i64) Allocator.Error!Value {
- const i = try intern(mod, .{ .int = .{
- .ty = ty.toIntern(),
- .storage = .{ .i64 = x },
- } });
- return Value.fromInterned(i);
-}
-
-pub fn unionValue(mod: *Module, union_ty: Type, tag: Value, val: Value) Allocator.Error!Value {
- const i = try intern(mod, .{ .un = .{
- .ty = union_ty.toIntern(),
- .tag = tag.toIntern(),
- .val = val.toIntern(),
- } });
- return Value.fromInterned(i);
-}
-
-/// This function casts the float representation down to the representation of the type, potentially
-/// losing data if the representation wasn't correct.
-pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value {
- const storage: InternPool.Key.Float.Storage = switch (ty.floatBits(mod.getTarget())) {
- 16 => .{ .f16 = @as(f16, @floatCast(x)) },
- 32 => .{ .f32 = @as(f32, @floatCast(x)) },
- 64 => .{ .f64 = @as(f64, @floatCast(x)) },
- 80 => .{ .f80 = @as(f80, @floatCast(x)) },
- 128 => .{ .f128 = @as(f128, @floatCast(x)) },
- else => unreachable,
- };
- const i = try intern(mod, .{ .float = .{
- .ty = ty.toIntern(),
- .storage = storage,
- } });
- return Value.fromInterned(i);
-}
-
-pub fn nullValue(mod: *Module, opt_ty: Type) Allocator.Error!Value {
- const ip = &mod.intern_pool;
- assert(ip.isOptionalType(opt_ty.toIntern()));
- const result = try ip.get(mod.gpa, .{ .opt = .{
- .ty = opt_ty.toIntern(),
- .val = .none,
- } });
- return Value.fromInterned(result);
-}
-
-pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type {
- return intType(mod, .unsigned, Type.smallestUnsignedBits(max));
-}
-
-/// Returns the smallest possible integer type containing both `min` and
-/// `max`. Asserts that neither value is undef.
-/// TODO: if #3806 is implemented, this becomes trivial
-pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type {
- assert(!min.isUndef(mod));
- assert(!max.isUndef(mod));
-
- if (std.debug.runtime_safety) {
- assert(Value.order(min, max, mod).compare(.lte));
- }
-
- const sign = min.orderAgainstZero(mod) == .lt;
-
- const min_val_bits = intBitsForValue(mod, min, sign);
- const max_val_bits = intBitsForValue(mod, max, sign);
-
- return mod.intType(
- if (sign) .signed else .unsigned,
- @max(min_val_bits, max_val_bits),
- );
-}
-
-/// Given a value representing an integer, returns the number of bits necessary to represent
-/// this value in an integer. If `sign` is true, returns the number of bits necessary in a
-/// twos-complement integer; otherwise in an unsigned integer.
-/// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true.
-pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 {
- assert(!val.isUndef(mod));
-
- const key = mod.intern_pool.indexToKey(val.toIntern());
- switch (key.int.storage) {
- .i64 => |x| {
- if (std.math.cast(u64, x)) |casted| return Type.smallestUnsignedBits(casted) + @intFromBool(sign);
- assert(sign);
- // Protect against overflow in the following negation.
- if (x == std.math.minInt(i64)) return 64;
- return Type.smallestUnsignedBits(@as(u64, @intCast(-(x + 1)))) + 1;
- },
- .u64 => |x| {
- return Type.smallestUnsignedBits(x) + @intFromBool(sign);
- },
- .big_int => |big| {
- if (big.positive) return @as(u16, @intCast(big.bitCountAbs() + @intFromBool(sign)));
-
- // Zero is still a possibility, in which case unsigned is fine
- if (big.eqlZero()) return 0;
-
- return @as(u16, @intCast(big.bitCountTwosComp()));
- },
- .lazy_align => |lazy_ty| {
- return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiAlignment(mod).toByteUnits() orelse 0) + @intFromBool(sign);
- },
- .lazy_size => |lazy_ty| {
- return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiSize(mod)) + @intFromBool(sign);
- },
- }
-}
-
pub const AtomicPtrAlignmentError = error{
FloatTooBig,
IntTooBig,
@@ -6371,101 +4492,6 @@ pub const UnionLayout = struct {
padding: u32,
};
-pub fn getUnionLayout(mod: *Module, loaded_union: InternPool.LoadedUnionType) UnionLayout {
- const ip = &mod.intern_pool;
- assert(loaded_union.haveLayout(ip));
- var most_aligned_field: u32 = undefined;
- var most_aligned_field_size: u64 = undefined;
- var biggest_field: u32 = undefined;
- var payload_size: u64 = 0;
- var payload_align: Alignment = .@"1";
- for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| {
- if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue;
-
- const explicit_align = loaded_union.fieldAlign(ip, field_index);
- const field_align = if (explicit_align != .none)
- explicit_align
- else
- Type.fromInterned(field_ty).abiAlignment(mod);
- const field_size = Type.fromInterned(field_ty).abiSize(mod);
- if (field_size > payload_size) {
- payload_size = field_size;
- biggest_field = @intCast(field_index);
- }
- if (field_align.compare(.gte, payload_align)) {
- payload_align = field_align;
- most_aligned_field = @intCast(field_index);
- most_aligned_field_size = field_size;
- }
- }
- const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag();
- if (!have_tag or !Type.fromInterned(loaded_union.enum_tag_ty).hasRuntimeBits(mod)) {
- return .{
- .abi_size = payload_align.forward(payload_size),
- .abi_align = payload_align,
- .most_aligned_field = most_aligned_field,
- .most_aligned_field_size = most_aligned_field_size,
- .biggest_field = biggest_field,
- .payload_size = payload_size,
- .payload_align = payload_align,
- .tag_align = .none,
- .tag_size = 0,
- .padding = 0,
- };
- }
-
- const tag_size = Type.fromInterned(loaded_union.enum_tag_ty).abiSize(mod);
- const tag_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(mod).max(.@"1");
- return .{
- .abi_size = loaded_union.size(ip).*,
- .abi_align = tag_align.max(payload_align),
- .most_aligned_field = most_aligned_field,
- .most_aligned_field_size = most_aligned_field_size,
- .biggest_field = biggest_field,
- .payload_size = payload_size,
- .payload_align = payload_align,
- .tag_align = tag_align,
- .tag_size = tag_size,
- .padding = loaded_union.padding(ip).*,
- };
-}
-
-pub fn unionAbiSize(mod: *Module, loaded_union: InternPool.LoadedUnionType) u64 {
- return mod.getUnionLayout(loaded_union).abi_size;
-}
-
-/// Returns 0 if the union is represented with 0 bits at runtime.
-pub fn unionAbiAlignment(mod: *Module, loaded_union: InternPool.LoadedUnionType) Alignment {
- const ip = &mod.intern_pool;
- const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag();
- var max_align: Alignment = .none;
- if (have_tag) max_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(mod);
- for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| {
- if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
-
- const field_align = mod.unionFieldNormalAlignment(loaded_union, @intCast(field_index));
- max_align = max_align.max(field_align);
- }
- return max_align;
-}
-
-/// Returns the field alignment of a non-packed union. Asserts the layout is not packed.
-pub fn unionFieldNormalAlignment(zcu: *Zcu, loaded_union: InternPool.LoadedUnionType, field_index: u32) Alignment {
- return zcu.unionFieldNormalAlignmentAdvanced(loaded_union, field_index, .normal) catch unreachable;
-}
-
-/// Returns the field alignment of a non-packed union. Asserts the layout is not packed.
-/// If `strat` is `.sema`, may perform type resolution.
-pub fn unionFieldNormalAlignmentAdvanced(zcu: *Zcu, loaded_union: InternPool.LoadedUnionType, field_index: u32, strat: Type.ResolveStrat) SemaError!Alignment {
- const ip = &zcu.intern_pool;
- assert(loaded_union.flagsPtr(ip).layout != .@"packed");
- const field_align = loaded_union.fieldAlign(ip, field_index);
- if (field_align != .none) return field_align;
- const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
- if (field_ty.isNoReturn(zcu)) return .none;
- return (try field_ty.abiAlignmentAdvanced(zcu, strat.toLazy())).scalar;
-}
-
/// Returns the index of the active field, given the current tag value
pub fn unionTagFieldIndex(mod: *Module, loaded_union: InternPool.LoadedUnionType, enum_tag: Value) ?u32 {
const ip = &mod.intern_pool;
@@ -6474,63 +4500,6 @@ pub fn unionTagFieldIndex(mod: *Module, loaded_union: InternPool.LoadedUnionType
return loaded_union.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern());
}
-/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed.
-pub fn structFieldAlignment(
- zcu: *Zcu,
- explicit_alignment: InternPool.Alignment,
- field_ty: Type,
- layout: std.builtin.Type.ContainerLayout,
-) Alignment {
- return zcu.structFieldAlignmentAdvanced(explicit_alignment, field_ty, layout, .normal) catch unreachable;
-}
-
-/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed.
-/// If `strat` is `.sema`, may perform type resolution.
-pub fn structFieldAlignmentAdvanced(
- zcu: *Zcu,
- explicit_alignment: InternPool.Alignment,
- field_ty: Type,
- layout: std.builtin.Type.ContainerLayout,
- strat: Type.ResolveStrat,
-) SemaError!Alignment {
- assert(layout != .@"packed");
- if (explicit_alignment != .none) return explicit_alignment;
- const ty_abi_align = (try field_ty.abiAlignmentAdvanced(zcu, strat.toLazy())).scalar;
- switch (layout) {
- .@"packed" => unreachable,
- .auto => if (zcu.getTarget().ofmt != .c) return ty_abi_align,
- .@"extern" => {},
- }
- // extern
- if (field_ty.isAbiInt(zcu) and field_ty.intInfo(zcu).bits >= 128) {
- return ty_abi_align.maxStrict(.@"16");
- }
- return ty_abi_align;
-}
-
-/// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets
-/// into the packed struct InternPool data rather than computing this on the
-/// fly, however it was found to perform worse when measured on real world
-/// projects.
-pub fn structPackedFieldBitOffset(
- mod: *Module,
- struct_type: InternPool.LoadedStructType,
- field_index: u32,
-) u16 {
- const ip = &mod.intern_pool;
- assert(struct_type.layout == .@"packed");
- assert(struct_type.haveLayout(ip));
- var bit_sum: u64 = 0;
- for (0..struct_type.field_types.len) |i| {
- if (i == field_index) {
- return @intCast(bit_sum);
- }
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
- bit_sum += field_ty.bitSize(mod);
- }
- unreachable; // index out of bounds
-}
-
pub const ResolvedReference = struct {
referencer: AnalUnit,
src: LazySrcLoc,
@@ -6564,33 +4533,6 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, Resolved
return result;
}
-pub fn getBuiltin(zcu: *Zcu, name: []const u8) Allocator.Error!Air.Inst.Ref {
- const decl_index = try zcu.getBuiltinDecl(name);
- zcu.ensureDeclAnalyzed(decl_index) catch @panic("std.builtin is corrupt");
- return Air.internedToRef(zcu.declPtr(decl_index).val.toIntern());
-}
-
-pub fn getBuiltinDecl(zcu: *Zcu, name: []const u8) Allocator.Error!InternPool.DeclIndex {
- const gpa = zcu.gpa;
- const ip = &zcu.intern_pool;
- const std_file_imported = zcu.importPkg(zcu.std_mod) catch @panic("failed to import lib/std.zig");
- const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index).unwrap().?;
- const std_namespace = zcu.declPtr(std_file_root_decl).getOwnedInnerNamespace(zcu).?;
- const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls);
- const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std.zig is corrupt and missing 'builtin'");
- zcu.ensureDeclAnalyzed(builtin_decl) catch @panic("std.builtin is corrupt");
- const builtin_namespace = zcu.declPtr(builtin_decl).getInnerNamespace(zcu) orelse @panic("std.builtin is corrupt");
- const name_str = try ip.getOrPutString(gpa, name, .no_embedded_nulls);
- return builtin_namespace.decls.getKeyAdapted(name_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std/builtin.zig is corrupt");
-}
-
-pub fn getBuiltinType(zcu: *Zcu, name: []const u8) Allocator.Error!Type {
- const ty_inst = try zcu.getBuiltin(name);
- const ty = Type.fromInterned(ty_inst.toInterned() orelse @panic("std.builtin is corrupt"));
- ty.resolveFully(zcu) catch @panic("std.builtin is corrupt");
- return ty;
-}
-
pub fn fileByIndex(zcu: *const Zcu, i: File.Index) *File {
return zcu.import_table.values()[@intFromEnum(i)];
}
CMakeLists.txt
@@ -525,6 +525,7 @@ set(ZIG_STAGE2_SOURCES
src/Type.zig
src/Value.zig
src/Zcu.zig
+ src/Zcu/PerThread.zig
src/arch/aarch64/CodeGen.zig
src/arch/aarch64/Emit.zig
src/arch/aarch64/Mir.zig