Commit b4bb64ce78

David Rubin <daviru007@icloud.com>
2024-08-11 12:14:12
sema: rework type resolution to use Zcu when possible
1 parent 849c31a
src/arch/aarch64/abi.zig
@@ -15,44 +15,44 @@ pub const Class = union(enum) {
 };
 
 /// For `float_array` the second element will be the amount of floats.
-pub fn classifyType(ty: Type, pt: Zcu.PerThread) Class {
-    std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(pt));
+pub fn classifyType(ty: Type, zcu: *Zcu) Class {
+    std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
 
     var maybe_float_bits: ?u16 = null;
-    switch (ty.zigTypeTag(pt.zcu)) {
+    switch (ty.zigTypeTag(zcu)) {
         .Struct => {
-            if (ty.containerLayout(pt.zcu) == .@"packed") return .byval;
-            const float_count = countFloats(ty, pt.zcu, &maybe_float_bits);
+            if (ty.containerLayout(zcu) == .@"packed") return .byval;
+            const float_count = countFloats(ty, zcu, &maybe_float_bits);
             if (float_count <= sret_float_count) return .{ .float_array = float_count };
 
-            const bit_size = ty.bitSize(pt);
+            const bit_size = ty.bitSize(zcu);
             if (bit_size > 128) return .memory;
             if (bit_size > 64) return .double_integer;
             return .integer;
         },
         .Union => {
-            if (ty.containerLayout(pt.zcu) == .@"packed") return .byval;
-            const float_count = countFloats(ty, pt.zcu, &maybe_float_bits);
+            if (ty.containerLayout(zcu) == .@"packed") return .byval;
+            const float_count = countFloats(ty, zcu, &maybe_float_bits);
             if (float_count <= sret_float_count) return .{ .float_array = float_count };
 
-            const bit_size = ty.bitSize(pt);
+            const bit_size = ty.bitSize(zcu);
             if (bit_size > 128) return .memory;
             if (bit_size > 64) return .double_integer;
             return .integer;
         },
         .Int, .Enum, .ErrorSet, .Float, .Bool => return .byval,
         .Vector => {
-            const bit_size = ty.bitSize(pt);
+            const bit_size = ty.bitSize(zcu);
             // TODO is this controlled by a cpu feature?
             if (bit_size > 128) return .memory;
             return .byval;
         },
         .Optional => {
-            std.debug.assert(ty.isPtrLikeOptional(pt.zcu));
+            std.debug.assert(ty.isPtrLikeOptional(zcu));
             return .byval;
         },
         .Pointer => {
-            std.debug.assert(!ty.isSlice(pt.zcu));
+            std.debug.assert(!ty.isSlice(zcu));
             return .byval;
         },
         .ErrorUnion,
src/arch/aarch64/CodeGen.zig
@@ -467,8 +467,8 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
 
 fn gen(self: *Self) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const cc = self.fn_type.fnCallingConvention(mod);
+    const zcu = pt.zcu;
+    const cc = self.fn_type.fnCallingConvention(zcu);
     if (cc != .Naked) {
         // stp fp, lr, [sp, #-16]!
         _ = try self.addInst(.{
@@ -517,8 +517,8 @@ fn gen(self: *Self) !void {
 
                     const ty = self.typeOfIndex(inst);
 
-                    const abi_size = @as(u32, @intCast(ty.abiSize(pt)));
-                    const abi_align = ty.abiAlignment(pt);
+                    const abi_size = @as(u32, @intCast(ty.abiSize(zcu)));
+                    const abi_align = ty.abiAlignment(zcu);
                     const stack_offset = try self.allocMem(abi_size, abi_align, inst);
                     try self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
 
@@ -648,8 +648,8 @@ fn gen(self: *Self) !void {
 
 fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const air_tags = self.air.instructions.items(.tag);
 
     for (body) |inst| {
@@ -1016,31 +1016,31 @@ fn allocMem(
 /// Use a pointer instruction as the basis for allocating stack memory.
 fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const elem_ty = self.typeOfIndex(inst).childType(mod);
+    const zcu = pt.zcu;
+    const elem_ty = self.typeOfIndex(inst).childType(zcu);
 
-    if (!elem_ty.hasRuntimeBits(pt)) {
+    if (!elem_ty.hasRuntimeBits(zcu)) {
         // return the stack offset 0. Stack offset 0 will be where all
         // zero-sized stack allocations live as non-zero-sized
         // allocations will always have an offset > 0.
         return @as(u32, 0);
     }
 
-    const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse {
+    const abi_size = math.cast(u32, elem_ty.abiSize(zcu)) orelse {
         return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
     };
     // TODO swap this for inst.ty.ptrAlign
-    const abi_align = elem_ty.abiAlignment(pt);
+    const abi_align = elem_ty.abiAlignment(zcu);
 
     return self.allocMem(abi_size, abi_align, inst);
 }
 
 fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue {
     const pt = self.pt;
-    const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse {
+    const abi_size = math.cast(u32, elem_ty.abiSize(pt.zcu)) orelse {
         return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
     };
-    const abi_align = elem_ty.abiAlignment(pt);
+    const abi_align = elem_ty.abiAlignment(pt.zcu);
 
     if (reg_ok) {
         // Make sure the type can fit in a register before we try to allocate one.
@@ -1128,13 +1128,13 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const result: MCValue = switch (self.ret_mcv) {
         .none, .register => .{ .ptr_stack_offset = try self.allocMemPtr(inst) },
         .stack_offset => blk: {
             // self.ret_mcv is an address to where this function
             // should store its result into
-            const ret_ty = self.fn_type.fnReturnType(mod);
+            const ret_ty = self.fn_type.fnReturnType(zcu);
             const ptr_ty = try pt.singleMutPtrType(ret_ty);
 
             // addr_reg will contain the address of where to store the
@@ -1166,14 +1166,14 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
         return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
 
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const operand = ty_op.operand;
     const operand_mcv = try self.resolveInst(operand);
     const operand_ty = self.typeOf(operand);
-    const operand_info = operand_ty.intInfo(mod);
+    const operand_info = operand_ty.intInfo(zcu);
 
     const dest_ty = self.typeOfIndex(inst);
-    const dest_info = dest_ty.intInfo(mod);
+    const dest_info = dest_ty.intInfo(zcu);
 
     const result: MCValue = result: {
         const operand_lock: ?RegisterLock = switch (operand_mcv) {
@@ -1248,9 +1248,9 @@ fn trunc(
     dest_ty: Type,
 ) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const info_a = operand_ty.intInfo(mod);
-    const info_b = dest_ty.intInfo(mod);
+    const zcu = pt.zcu;
+    const info_a = operand_ty.intInfo(zcu);
+    const info_b = dest_ty.intInfo(zcu);
 
     if (info_b.bits <= 64) {
         const operand_reg = switch (operand) {
@@ -1312,7 +1312,7 @@ fn airIntFromBool(self: *Self, inst: Air.Inst.Index) !void {
 fn airNot(self: *Self, inst: Air.Inst.Index) !void {
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const operand = try self.resolveInst(ty_op.operand);
         const operand_ty = self.typeOf(ty_op.operand);
@@ -1321,7 +1321,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
             .unreach => unreachable,
             .compare_flags => |cond| break :result MCValue{ .compare_flags = cond.negate() },
             else => {
-                switch (operand_ty.zigTypeTag(mod)) {
+                switch (operand_ty.zigTypeTag(zcu)) {
                     .Bool => {
                         // TODO convert this to mvn + and
                         const op_reg = switch (operand) {
@@ -1355,7 +1355,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
                     },
                     .Vector => return self.fail("TODO bitwise not for vectors", .{}),
                     .Int => {
-                        const int_info = operand_ty.intInfo(mod);
+                        const int_info = operand_ty.intInfo(zcu);
                         if (int_info.bits <= 64) {
                             const op_reg = switch (operand) {
                                 .register => |r| r,
@@ -1408,13 +1408,13 @@ fn minMax(
     maybe_inst: ?Air.Inst.Index,
 ) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Float => return self.fail("TODO ARM min/max on floats", .{}),
         .Vector => return self.fail("TODO ARM min/max on vectors", .{}),
         .Int => {
-            assert(lhs_ty.eql(rhs_ty, mod));
-            const int_info = lhs_ty.intInfo(mod);
+            assert(lhs_ty.eql(rhs_ty, zcu));
+            const int_info = lhs_ty.intInfo(zcu);
             if (int_info.bits <= 64) {
                 var lhs_reg: Register = undefined;
                 var rhs_reg: Register = undefined;
@@ -1899,13 +1899,13 @@ fn addSub(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Float => return self.fail("TODO binary operations on floats", .{}),
         .Vector => return self.fail("TODO binary operations on vectors", .{}),
         .Int => {
-            assert(lhs_ty.eql(rhs_ty, mod));
-            const int_info = lhs_ty.intInfo(mod);
+            assert(lhs_ty.eql(rhs_ty, zcu));
+            const int_info = lhs_ty.intInfo(zcu);
             if (int_info.bits <= 64) {
                 const lhs_immediate = try lhs_bind.resolveToImmediate(self);
                 const rhs_immediate = try rhs_bind.resolveToImmediate(self);
@@ -1961,12 +1961,12 @@ fn mul(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Vector => return self.fail("TODO binary operations on vectors", .{}),
         .Int => {
-            assert(lhs_ty.eql(rhs_ty, mod));
-            const int_info = lhs_ty.intInfo(mod);
+            assert(lhs_ty.eql(rhs_ty, zcu));
+            const int_info = lhs_ty.intInfo(zcu);
             if (int_info.bits <= 64) {
                 // TODO add optimisations for multiplication
                 // with immediates, for example a * 2 can be
@@ -1994,8 +1994,8 @@ fn divFloat(
     _ = maybe_inst;
 
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Float => return self.fail("TODO div_float", .{}),
         .Vector => return self.fail("TODO div_float on vectors", .{}),
         else => unreachable,
@@ -2011,13 +2011,13 @@ fn divTrunc(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Float => return self.fail("TODO div on floats", .{}),
         .Vector => return self.fail("TODO div on vectors", .{}),
         .Int => {
-            assert(lhs_ty.eql(rhs_ty, mod));
-            const int_info = lhs_ty.intInfo(mod);
+            assert(lhs_ty.eql(rhs_ty, zcu));
+            const int_info = lhs_ty.intInfo(zcu);
             if (int_info.bits <= 64) {
                 switch (int_info.signedness) {
                     .signed => {
@@ -2046,13 +2046,13 @@ fn divFloor(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Float => return self.fail("TODO div on floats", .{}),
         .Vector => return self.fail("TODO div on vectors", .{}),
         .Int => {
-            assert(lhs_ty.eql(rhs_ty, mod));
-            const int_info = lhs_ty.intInfo(mod);
+            assert(lhs_ty.eql(rhs_ty, zcu));
+            const int_info = lhs_ty.intInfo(zcu);
             if (int_info.bits <= 64) {
                 switch (int_info.signedness) {
                     .signed => {
@@ -2080,13 +2080,13 @@ fn divExact(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Float => return self.fail("TODO div on floats", .{}),
         .Vector => return self.fail("TODO div on vectors", .{}),
         .Int => {
-            assert(lhs_ty.eql(rhs_ty, mod));
-            const int_info = lhs_ty.intInfo(mod);
+            assert(lhs_ty.eql(rhs_ty, zcu));
+            const int_info = lhs_ty.intInfo(zcu);
             if (int_info.bits <= 64) {
                 switch (int_info.signedness) {
                     .signed => {
@@ -2117,13 +2117,13 @@ fn rem(
     _ = maybe_inst;
 
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
-        .Float => return self.fail("TODO rem/mod on floats", .{}),
-        .Vector => return self.fail("TODO rem/mod on vectors", .{}),
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
+        .Float => return self.fail("TODO rem/zcu on floats", .{}),
+        .Vector => return self.fail("TODO rem/zcu on vectors", .{}),
         .Int => {
-            assert(lhs_ty.eql(rhs_ty, mod));
-            const int_info = lhs_ty.intInfo(mod);
+            assert(lhs_ty.eql(rhs_ty, zcu));
+            const int_info = lhs_ty.intInfo(zcu);
             if (int_info.bits <= 64) {
                 var lhs_reg: Register = undefined;
                 var rhs_reg: Register = undefined;
@@ -2168,7 +2168,7 @@ fn rem(
 
                 return MCValue{ .register = remainder_reg };
             } else {
-                return self.fail("TODO rem/mod for integers with bits > 64", .{});
+                return self.fail("TODO rem/zcu for integers with bits > 64", .{});
             }
         },
         else => unreachable,
@@ -2189,11 +2189,11 @@ fn modulo(
     _ = maybe_inst;
 
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
-        .Float => return self.fail("TODO mod on floats", .{}),
-        .Vector => return self.fail("TODO mod on vectors", .{}),
-        .Int => return self.fail("TODO mod on ints", .{}),
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
+        .Float => return self.fail("TODO zcu on floats", .{}),
+        .Vector => return self.fail("TODO zcu on vectors", .{}),
+        .Int => return self.fail("TODO zcu on ints", .{}),
         else => unreachable,
     }
 }
@@ -2208,11 +2208,11 @@ fn wrappingArithmetic(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Vector => return self.fail("TODO binary operations on vectors", .{}),
         .Int => {
-            const int_info = lhs_ty.intInfo(mod);
+            const int_info = lhs_ty.intInfo(zcu);
             if (int_info.bits <= 64) {
                 // Generate an add/sub/mul
                 const result: MCValue = switch (tag) {
@@ -2244,12 +2244,12 @@ fn bitwise(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Vector => return self.fail("TODO binary operations on vectors", .{}),
         .Int => {
-            assert(lhs_ty.eql(rhs_ty, mod));
-            const int_info = lhs_ty.intInfo(mod);
+            assert(lhs_ty.eql(rhs_ty, zcu));
+            const int_info = lhs_ty.intInfo(zcu);
             if (int_info.bits <= 64) {
                 // TODO implement bitwise operations with immediates
                 const mir_tag: Mir.Inst.Tag = switch (tag) {
@@ -2280,11 +2280,11 @@ fn shiftExact(
     _ = rhs_ty;
 
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Vector => return self.fail("TODO binary operations on vectors", .{}),
         .Int => {
-            const int_info = lhs_ty.intInfo(mod);
+            const int_info = lhs_ty.intInfo(zcu);
             if (int_info.bits <= 64) {
                 const rhs_immediate = try rhs_bind.resolveToImmediate(self);
 
@@ -2331,11 +2331,11 @@ fn shiftNormal(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Vector => return self.fail("TODO binary operations on vectors", .{}),
         .Int => {
-            const int_info = lhs_ty.intInfo(mod);
+            const int_info = lhs_ty.intInfo(zcu);
             if (int_info.bits <= 64) {
                 // Generate a shl_exact/shr_exact
                 const result: MCValue = switch (tag) {
@@ -2372,8 +2372,8 @@ fn booleanOp(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Bool => {
             assert((try lhs_bind.resolveToImmediate(self)) == null); // should have been handled by Sema
             assert((try rhs_bind.resolveToImmediate(self)) == null); // should have been handled by Sema
@@ -2400,17 +2400,17 @@ fn ptrArithmetic(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Pointer => {
-            assert(rhs_ty.eql(Type.usize, mod));
+            assert(rhs_ty.eql(Type.usize, zcu));
 
             const ptr_ty = lhs_ty;
-            const elem_ty = switch (ptr_ty.ptrSize(mod)) {
-                .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type
-                else => ptr_ty.childType(mod),
+            const elem_ty = switch (ptr_ty.ptrSize(zcu)) {
+                .One => ptr_ty.childType(zcu).childType(zcu), // ptr to array, so get array element type
+                else => ptr_ty.childType(zcu),
             };
-            const elem_size = elem_ty.abiSize(pt);
+            const elem_size = elem_ty.abiSize(zcu);
 
             const base_tag: Air.Inst.Tag = switch (tag) {
                 .ptr_add => .add,
@@ -2524,7 +2524,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
     const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs };
         const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs };
@@ -2532,15 +2532,15 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
         const rhs_ty = self.typeOf(extra.rhs);
 
         const tuple_ty = self.typeOfIndex(inst);
-        const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(pt)));
-        const tuple_align = tuple_ty.abiAlignment(pt);
-        const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, pt)));
+        const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(zcu)));
+        const tuple_align = tuple_ty.abiAlignment(zcu);
+        const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, zcu)));
 
-        switch (lhs_ty.zigTypeTag(mod)) {
+        switch (lhs_ty.zigTypeTag(zcu)) {
             .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
             .Int => {
-                assert(lhs_ty.eql(rhs_ty, mod));
-                const int_info = lhs_ty.intInfo(mod);
+                assert(lhs_ty.eql(rhs_ty, zcu));
+                const int_info = lhs_ty.intInfo(zcu);
                 switch (int_info.bits) {
                     1...31, 33...63 => {
                         const stack_offset = try self.allocMem(tuple_size, tuple_align, inst);
@@ -2652,8 +2652,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
     const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
     if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
-    const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = self.pt.zcu;
     const result: MCValue = result: {
         const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs };
         const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs };
@@ -2661,15 +2660,15 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
         const rhs_ty = self.typeOf(extra.rhs);
 
         const tuple_ty = self.typeOfIndex(inst);
-        const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(pt)));
-        const tuple_align = tuple_ty.abiAlignment(pt);
-        const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, pt)));
+        const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(zcu)));
+        const tuple_align = tuple_ty.abiAlignment(zcu);
+        const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, zcu)));
 
-        switch (lhs_ty.zigTypeTag(mod)) {
+        switch (lhs_ty.zigTypeTag(zcu)) {
             .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
             .Int => {
-                assert(lhs_ty.eql(rhs_ty, mod));
-                const int_info = lhs_ty.intInfo(mod);
+                assert(lhs_ty.eql(rhs_ty, zcu));
+                const int_info = lhs_ty.intInfo(zcu);
                 if (int_info.bits <= 32) {
                     const stack_offset = try self.allocMem(tuple_size, tuple_align, inst);
 
@@ -2878,7 +2877,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
     const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
     if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const result: MCValue = result: {
         const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs };
         const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs };
@@ -2886,14 +2885,14 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
         const rhs_ty = self.typeOf(extra.rhs);
 
         const tuple_ty = self.typeOfIndex(inst);
-        const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(pt)));
-        const tuple_align = tuple_ty.abiAlignment(pt);
-        const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, pt)));
+        const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(zcu)));
+        const tuple_align = tuple_ty.abiAlignment(zcu);
+        const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, zcu)));
 
-        switch (lhs_ty.zigTypeTag(mod)) {
+        switch (lhs_ty.zigTypeTag(zcu)) {
             .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}),
             .Int => {
-                const int_info = lhs_ty.intInfo(mod);
+                const int_info = lhs_ty.intInfo(zcu);
                 if (int_info.bits <= 64) {
                     const stack_offset = try self.allocMem(tuple_size, tuple_align, inst);
 
@@ -3027,10 +3026,10 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void {
 
 fn optionalPayload(self: *Self, inst: Air.Inst.Index, mcv: MCValue, optional_ty: Type) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const payload_ty = optional_ty.optionalChild(mod);
-    if (!payload_ty.hasRuntimeBits(pt)) return MCValue.none;
-    if (optional_ty.isPtrLikeOptional(mod)) {
+    const zcu = pt.zcu;
+    const payload_ty = optional_ty.optionalChild(zcu);
+    if (!payload_ty.hasRuntimeBits(zcu)) return MCValue.none;
+    if (optional_ty.isPtrLikeOptional(zcu)) {
         // TODO should we reuse the operand here?
         const raw_reg = try self.register_manager.allocReg(inst, gp);
         const reg = self.registerAlias(raw_reg, payload_ty);
@@ -3072,17 +3071,17 @@ fn errUnionErr(
     maybe_inst: ?Air.Inst.Index,
 ) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const err_ty = error_union_ty.errorUnionSet(mod);
-    const payload_ty = error_union_ty.errorUnionPayload(mod);
-    if (err_ty.errorSetIsEmpty(mod)) {
+    const zcu = pt.zcu;
+    const err_ty = error_union_ty.errorUnionSet(zcu);
+    const payload_ty = error_union_ty.errorUnionPayload(zcu);
+    if (err_ty.errorSetIsEmpty(zcu)) {
         return MCValue{ .immediate = 0 };
     }
-    if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+    if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
         return try error_union_bind.resolveToMcv(self);
     }
 
-    const err_offset: u32 = @intCast(errUnionErrorOffset(payload_ty, pt));
+    const err_offset: u32 = @intCast(errUnionErrorOffset(payload_ty, zcu));
     switch (try error_union_bind.resolveToMcv(self)) {
         .register => {
             var operand_reg: Register = undefined;
@@ -3104,7 +3103,7 @@ fn errUnionErr(
             );
 
             const err_bit_offset = err_offset * 8;
-            const err_bit_size = @as(u32, @intCast(err_ty.abiSize(pt))) * 8;
+            const err_bit_size = @as(u32, @intCast(err_ty.abiSize(zcu))) * 8;
 
             _ = try self.addInst(.{
                 .tag = .ubfx, // errors are unsigned integers
@@ -3153,17 +3152,17 @@ fn errUnionPayload(
     maybe_inst: ?Air.Inst.Index,
 ) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const err_ty = error_union_ty.errorUnionSet(mod);
-    const payload_ty = error_union_ty.errorUnionPayload(mod);
-    if (err_ty.errorSetIsEmpty(mod)) {
+    const zcu = pt.zcu;
+    const err_ty = error_union_ty.errorUnionSet(zcu);
+    const payload_ty = error_union_ty.errorUnionPayload(zcu);
+    if (err_ty.errorSetIsEmpty(zcu)) {
         return try error_union_bind.resolveToMcv(self);
     }
-    if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+    if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
         return MCValue.none;
     }
 
-    const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, pt)));
+    const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, zcu)));
     switch (try error_union_bind.resolveToMcv(self)) {
         .register => {
             var operand_reg: Register = undefined;
@@ -3185,10 +3184,10 @@ fn errUnionPayload(
             );
 
             const payload_bit_offset = payload_offset * 8;
-            const payload_bit_size = @as(u32, @intCast(payload_ty.abiSize(pt))) * 8;
+            const payload_bit_size = @as(u32, @intCast(payload_ty.abiSize(zcu))) * 8;
 
             _ = try self.addInst(.{
-                .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx,
+                .tag = if (payload_ty.isSignedInt(zcu)) Mir.Inst.Tag.sbfx else .ubfx,
                 .data = .{
                     .rr_lsb_width = .{
                         // Set both registers to the X variant to get the full width
@@ -3266,7 +3265,7 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     if (self.liveness.isUnused(inst)) {
@@ -3275,7 +3274,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
 
     const result: MCValue = result: {
         const payload_ty = self.typeOf(ty_op.operand);
-        if (!payload_ty.hasRuntimeBits(pt)) {
+        if (!payload_ty.hasRuntimeBits(zcu)) {
             break :result MCValue{ .immediate = 1 };
         }
 
@@ -3287,7 +3286,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
         };
         defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
 
-        if (optional_ty.isPtrLikeOptional(mod)) {
+        if (optional_ty.isPtrLikeOptional(zcu)) {
             // TODO should we check if we can reuse the operand?
             const raw_reg = try self.register_manager.allocReg(inst, gp);
             const reg = self.registerAlias(raw_reg, payload_ty);
@@ -3295,9 +3294,9 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
             break :result MCValue{ .register = reg };
         }
 
-        const optional_abi_size: u32 = @intCast(optional_ty.abiSize(pt));
-        const optional_abi_align = optional_ty.abiAlignment(pt);
-        const offset: u32 = @intCast(payload_ty.abiSize(pt));
+        const optional_abi_size: u32 = @intCast(optional_ty.abiSize(zcu));
+        const optional_abi_align = optional_ty.abiAlignment(zcu);
+        const offset: u32 = @intCast(payload_ty.abiSize(zcu));
 
         const stack_offset = try self.allocMem(optional_abi_size, optional_abi_align, inst);
         try self.genSetStack(payload_ty, stack_offset, operand);
@@ -3312,20 +3311,20 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
 /// T to E!T
 fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const error_union_ty = ty_op.ty.toType();
-        const error_ty = error_union_ty.errorUnionSet(mod);
-        const payload_ty = error_union_ty.errorUnionPayload(mod);
+        const error_ty = error_union_ty.errorUnionSet(zcu);
+        const payload_ty = error_union_ty.errorUnionPayload(zcu);
         const operand = try self.resolveInst(ty_op.operand);
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result operand;
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result operand;
 
-        const abi_size = @as(u32, @intCast(error_union_ty.abiSize(pt)));
-        const abi_align = error_union_ty.abiAlignment(pt);
+        const abi_size = @as(u32, @intCast(error_union_ty.abiSize(zcu)));
+        const abi_align = error_union_ty.abiAlignment(zcu);
         const stack_offset = try self.allocMem(abi_size, abi_align, inst);
-        const payload_off = errUnionPayloadOffset(payload_ty, pt);
-        const err_off = errUnionErrorOffset(payload_ty, pt);
+        const payload_off = errUnionPayloadOffset(payload_ty, zcu);
+        const err_off = errUnionErrorOffset(payload_ty, zcu);
         try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), operand);
         try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), .{ .immediate = 0 });
 
@@ -3339,18 +3338,18 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const error_union_ty = ty_op.ty.toType();
-        const error_ty = error_union_ty.errorUnionSet(mod);
-        const payload_ty = error_union_ty.errorUnionPayload(mod);
+        const error_ty = error_union_ty.errorUnionSet(zcu);
+        const payload_ty = error_union_ty.errorUnionPayload(zcu);
         const operand = try self.resolveInst(ty_op.operand);
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result operand;
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result operand;
 
-        const abi_size = @as(u32, @intCast(error_union_ty.abiSize(pt)));
-        const abi_align = error_union_ty.abiAlignment(pt);
+        const abi_size = @as(u32, @intCast(error_union_ty.abiSize(zcu)));
+        const abi_align = error_union_ty.abiAlignment(zcu);
         const stack_offset = try self.allocMem(abi_size, abi_align, inst);
-        const payload_off = errUnionPayloadOffset(payload_ty, pt);
-        const err_off = errUnionErrorOffset(payload_ty, pt);
+        const payload_off = errUnionPayloadOffset(payload_ty, zcu);
+        const err_off = errUnionErrorOffset(payload_ty, zcu);
         try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), operand);
         try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), .undef);
 
@@ -3443,11 +3442,11 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
     const slice_ty = self.typeOf(bin_op.lhs);
-    const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: {
-        const ptr_ty = slice_ty.slicePtrFieldType(mod);
+    const result: MCValue = if (!slice_ty.isVolatilePtr(zcu) and self.liveness.isUnused(inst)) .dead else result: {
+        const ptr_ty = slice_ty.slicePtrFieldType(zcu);
 
         const slice_mcv = try self.resolveInst(bin_op.lhs);
         const base_mcv = slicePtr(slice_mcv);
@@ -3468,9 +3467,9 @@ fn ptrElemVal(
     maybe_inst: ?Air.Inst.Index,
 ) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const elem_ty = ptr_ty.childType(mod);
-    const elem_size = @as(u32, @intCast(elem_ty.abiSize(pt)));
+    const zcu = pt.zcu;
+    const elem_ty = ptr_ty.childType(zcu);
+    const elem_size = @as(u32, @intCast(elem_ty.abiSize(zcu)));
 
     // TODO optimize for elem_sizes of 1, 2, 4, 8
     switch (elem_size) {
@@ -3511,10 +3510,10 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
     const ptr_ty = self.typeOf(bin_op.lhs);
-    const result: MCValue = if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: {
+    const result: MCValue = if (!ptr_ty.isVolatilePtr(zcu) and self.liveness.isUnused(inst)) .dead else result: {
         const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs };
         const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs };
 
@@ -3635,9 +3634,9 @@ fn reuseOperand(
 
 fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const elem_ty = ptr_ty.childType(mod);
-    const elem_size = elem_ty.abiSize(pt);
+    const zcu = pt.zcu;
+    const elem_ty = ptr_ty.childType(zcu);
+    const elem_size = elem_ty.abiSize(zcu);
 
     switch (ptr) {
         .none => unreachable,
@@ -3884,16 +3883,16 @@ fn genInlineMemsetCode(
 
 fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const elem_ty = self.typeOfIndex(inst);
-    const elem_size = elem_ty.abiSize(pt);
+    const elem_size = elem_ty.abiSize(zcu);
     const result: MCValue = result: {
-        if (!elem_ty.hasRuntimeBits(pt))
+        if (!elem_ty.hasRuntimeBits(zcu))
             break :result MCValue.none;
 
         const ptr = try self.resolveInst(ty_op.operand);
-        const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod);
+        const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(zcu);
         if (self.liveness.isUnused(inst) and !is_volatile)
             break :result MCValue.dead;
 
@@ -3916,12 +3915,12 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
 
 fn genLdrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const abi_size = ty.abiSize(pt);
+    const zcu = pt.zcu;
+    const abi_size = ty.abiSize(zcu);
 
     const tag: Mir.Inst.Tag = switch (abi_size) {
-        1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_immediate else .ldrb_immediate,
-        2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_immediate else .ldrh_immediate,
+        1 => if (ty.isSignedInt(zcu)) Mir.Inst.Tag.ldrsb_immediate else .ldrb_immediate,
+        2 => if (ty.isSignedInt(zcu)) Mir.Inst.Tag.ldrsh_immediate else .ldrh_immediate,
         4 => .ldr_immediate,
         8 => .ldr_immediate,
         3, 5, 6, 7 => return self.fail("TODO: genLdrRegister for more abi_sizes", .{}),
@@ -3940,7 +3939,7 @@ fn genLdrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type
 
 fn genStrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type) !void {
     const pt = self.pt;
-    const abi_size = ty.abiSize(pt);
+    const abi_size = ty.abiSize(pt.zcu);
 
     const tag: Mir.Inst.Tag = switch (abi_size) {
         1 => .strb_immediate,
@@ -3963,7 +3962,7 @@ fn genStrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type
 fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void {
     const pt = self.pt;
     log.debug("store: storing {} to {}", .{ value, ptr });
-    const abi_size = value_ty.abiSize(pt);
+    const abi_size = value_ty.abiSize(pt.zcu);
 
     switch (ptr) {
         .none => unreachable,
@@ -4116,11 +4115,11 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
 fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
     return if (self.liveness.isUnused(inst)) .dead else result: {
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const mcv = try self.resolveInst(operand);
         const ptr_ty = self.typeOf(operand);
-        const struct_ty = ptr_ty.childType(mod);
-        const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, pt)));
+        const struct_ty = ptr_ty.childType(zcu);
+        const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, zcu)));
         switch (mcv) {
             .ptr_stack_offset => |off| {
                 break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };
@@ -4142,11 +4141,11 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
     const index = extra.field_index;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const mcv = try self.resolveInst(operand);
         const struct_ty = self.typeOf(operand);
-        const struct_field_ty = struct_ty.structFieldType(index, mod);
-        const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, pt)));
+        const struct_field_ty = struct_ty.structFieldType(index, zcu);
+        const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, zcu)));
 
         switch (mcv) {
             .dead, .unreach => unreachable,
@@ -4193,13 +4192,13 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const field_ptr = try self.resolveInst(extra.field_ptr);
-        const struct_ty = ty_pl.ty.toType().childType(mod);
-        const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(extra.field_index, pt)));
+        const struct_ty = ty_pl.ty.toType().childType(zcu);
+        const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(extra.field_index, zcu)));
         switch (field_ptr) {
             .ptr_stack_offset => |off| {
                 break :result MCValue{ .ptr_stack_offset = off + struct_field_offset };
@@ -4274,12 +4273,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
     const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]));
     const ty = self.typeOf(callee);
     const pt = self.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
 
-    const fn_ty = switch (ty.zigTypeTag(mod)) {
+    const fn_ty = switch (ty.zigTypeTag(zcu)) {
         .Fn => ty,
-        .Pointer => ty.childType(mod),
+        .Pointer => ty.childType(zcu),
         else => unreachable,
     };
 
@@ -4298,9 +4297,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
 
     if (info.return_value == .stack_offset) {
         log.debug("airCall: return by reference", .{});
-        const ret_ty = fn_ty.fnReturnType(mod);
-        const ret_abi_size: u32 = @intCast(ret_ty.abiSize(pt));
-        const ret_abi_align = ret_ty.abiAlignment(pt);
+        const ret_ty = fn_ty.fnReturnType(zcu);
+        const ret_abi_size: u32 = @intCast(ret_ty.abiSize(zcu));
+        const ret_abi_align = ret_ty.abiAlignment(zcu);
         const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
 
         const ret_ptr_reg = self.registerAlias(.x0, Type.usize);
@@ -4387,7 +4386,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
         },
         else => return self.fail("TODO implement calling bitcasted functions", .{}),
     } else {
-        assert(ty.zigTypeTag(mod) == .Pointer);
+        assert(ty.zigTypeTag(zcu) == .Pointer);
         const mcv = try self.resolveInst(callee);
         try self.genSetReg(ty, .x30, mcv);
 
@@ -4426,15 +4425,15 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
 
 fn airRet(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
     const operand = try self.resolveInst(un_op);
-    const ret_ty = self.fn_type.fnReturnType(mod);
+    const ret_ty = self.fn_type.fnReturnType(zcu);
 
     switch (self.ret_mcv) {
         .none => {},
         .immediate => {
-            assert(ret_ty.isError(mod));
+            assert(ret_ty.isError(zcu));
         },
         .register => |reg| {
             // Return result by value
@@ -4459,11 +4458,11 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
     const ptr = try self.resolveInst(un_op);
     const ptr_ty = self.typeOf(un_op);
-    const ret_ty = self.fn_type.fnReturnType(mod);
+    const ret_ty = self.fn_type.fnReturnType(zcu);
 
     switch (self.ret_mcv) {
         .none => {},
@@ -4483,8 +4482,8 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
             // location.
             const op_inst = un_op.toIndex().?;
             if (self.air.instructions.items(.tag)[@intFromEnum(op_inst)] != .ret_ptr) {
-                const abi_size = @as(u32, @intCast(ret_ty.abiSize(pt)));
-                const abi_align = ret_ty.abiAlignment(pt);
+                const abi_size = @as(u32, @intCast(ret_ty.abiSize(zcu)));
+                const abi_align = ret_ty.abiAlignment(zcu);
 
                 const offset = try self.allocMem(abi_size, abi_align, null);
 
@@ -4520,20 +4519,20 @@ fn cmp(
     op: math.CompareOperator,
 ) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const int_ty = switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    const int_ty = switch (lhs_ty.zigTypeTag(zcu)) {
         .Optional => blk: {
-            const payload_ty = lhs_ty.optionalChild(mod);
-            if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+            const payload_ty = lhs_ty.optionalChild(zcu);
+            if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                 break :blk Type.u1;
-            } else if (lhs_ty.isPtrLikeOptional(mod)) {
+            } else if (lhs_ty.isPtrLikeOptional(zcu)) {
                 break :blk Type.usize;
             } else {
                 return self.fail("TODO ARM cmp non-pointer optionals", .{});
             }
         },
         .Float => return self.fail("TODO ARM cmp floats", .{}),
-        .Enum => lhs_ty.intTagType(mod),
+        .Enum => lhs_ty.intTagType(zcu),
         .Int => lhs_ty,
         .Bool => Type.u1,
         .Pointer => Type.usize,
@@ -4541,7 +4540,7 @@ fn cmp(
         else => unreachable,
     };
 
-    const int_info = int_ty.intInfo(mod);
+    const int_info = int_ty.intInfo(zcu);
     if (int_info.bits <= 64) {
         try self.spillCompareFlagsIfOccupied();
 
@@ -4628,10 +4627,10 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = self.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
-    const func = mod.funcInfo(extra.data.func);
+    const func = zcu.funcInfo(extra.data.func);
     // TODO emit debug info for function change
     _ = func;
     try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
@@ -4834,13 +4833,13 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
 
 fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional(mod)) blk: {
-        const payload_ty = operand_ty.optionalChild(mod);
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt))
+    const zcu = pt.zcu;
+    const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional(zcu)) blk: {
+        const payload_ty = operand_ty.optionalChild(zcu);
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu))
             break :blk .{ .ty = operand_ty, .bind = operand_bind };
 
-        const offset = @as(u32, @intCast(payload_ty.abiSize(pt)));
+        const offset = @as(u32, @intCast(payload_ty.abiSize(zcu)));
         const operand_mcv = try operand_bind.resolveToMcv(self);
         const new_mcv: MCValue = switch (operand_mcv) {
             .register => |source_reg| new: {
@@ -4853,7 +4852,7 @@ fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue {
                     try self.genSetReg(payload_ty, dest_reg, operand_mcv);
                 } else {
                     _ = try self.addInst(.{
-                        .tag = if (payload_ty.isSignedInt(mod))
+                        .tag = if (payload_ty.isSignedInt(zcu))
                             Mir.Inst.Tag.asr_immediate
                         else
                             Mir.Inst.Tag.lsr_immediate,
@@ -4891,10 +4890,10 @@ fn isErr(
     error_union_ty: Type,
 ) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const error_type = error_union_ty.errorUnionSet(mod);
+    const zcu = pt.zcu;
+    const error_type = error_union_ty.errorUnionSet(zcu);
 
-    if (error_type.errorSetIsEmpty(mod)) {
+    if (error_type.errorSetIsEmpty(zcu)) {
         return MCValue{ .immediate = 0 }; // always false
     }
 
@@ -4934,12 +4933,12 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const operand_ptr = try self.resolveInst(un_op);
         const ptr_ty = self.typeOf(un_op);
-        const elem_ty = ptr_ty.childType(mod);
+        const elem_ty = ptr_ty.childType(zcu);
 
         const operand = try self.allocRegOrMem(elem_ty, true, null);
         try self.load(operand, operand_ptr, ptr_ty);
@@ -4962,12 +4961,12 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const operand_ptr = try self.resolveInst(un_op);
         const ptr_ty = self.typeOf(un_op);
-        const elem_ty = ptr_ty.childType(mod);
+        const elem_ty = ptr_ty.childType(zcu);
 
         const operand = try self.allocRegOrMem(elem_ty, true, null);
         try self.load(operand, operand_ptr, ptr_ty);
@@ -4990,12 +4989,12 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const operand_ptr = try self.resolveInst(un_op);
         const ptr_ty = self.typeOf(un_op);
-        const elem_ty = ptr_ty.childType(mod);
+        const elem_ty = ptr_ty.childType(zcu);
 
         const operand = try self.allocRegOrMem(elem_ty, true, null);
         try self.load(operand, operand_ptr, ptr_ty);
@@ -5018,12 +5017,12 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const operand_ptr = try self.resolveInst(un_op);
         const ptr_ty = self.typeOf(un_op);
-        const elem_ty = ptr_ty.childType(mod);
+        const elem_ty = ptr_ty.childType(zcu);
 
         const operand = try self.allocRegOrMem(elem_ty, true, null);
         try self.load(operand, operand_ptr, ptr_ty);
@@ -5240,9 +5239,10 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void {
 
 fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
     const pt = self.pt;
+    const zcu = pt.zcu;
     const block_data = self.blocks.getPtr(block).?;
 
-    if (self.typeOf(operand).hasRuntimeBits(pt)) {
+    if (self.typeOf(operand).hasRuntimeBits(zcu)) {
         const operand_mcv = try self.resolveInst(operand);
         const block_mcv = block_data.mcv;
         if (block_mcv == .none) {
@@ -5417,8 +5417,8 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void {
 
 fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const abi_size = @as(u32, @intCast(ty.abiSize(pt)));
+    const zcu = pt.zcu;
+    const abi_size = @as(u32, @intCast(ty.abiSize(zcu)));
     switch (mcv) {
         .dead => unreachable,
         .unreach, .none => return, // Nothing to do.
@@ -5473,11 +5473,11 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
             const reg_lock = self.register_manager.lockReg(rwo.reg);
             defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
 
-            const wrapped_ty = ty.structFieldType(0, mod);
+            const wrapped_ty = ty.structFieldType(0, zcu);
             try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg });
 
-            const overflow_bit_ty = ty.structFieldType(1, mod);
-            const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, pt)));
+            const overflow_bit_ty = ty.structFieldType(1, zcu);
+            const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, zcu)));
             const raw_cond_reg = try self.register_manager.allocReg(null, gp);
             const cond_reg = self.registerAlias(raw_cond_reg, overflow_bit_ty);
 
@@ -5589,7 +5589,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
 
 fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     switch (mcv) {
         .dead => unreachable,
         .unreach, .none => return, // Nothing to do.
@@ -5701,13 +5701,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
             try self.genLdrRegister(reg, reg.toX(), ty);
         },
         .stack_offset => |off| {
-            const abi_size = ty.abiSize(pt);
+            const abi_size = ty.abiSize(zcu);
 
             switch (abi_size) {
                 1, 2, 4, 8 => {
                     const tag: Mir.Inst.Tag = switch (abi_size) {
-                        1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack else .ldrb_stack,
-                        2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_stack else .ldrh_stack,
+                        1 => if (ty.isSignedInt(zcu)) Mir.Inst.Tag.ldrsb_stack else .ldrb_stack,
+                        2 => if (ty.isSignedInt(zcu)) Mir.Inst.Tag.ldrsh_stack else .ldrh_stack,
                         4, 8 => .ldr_stack,
                         else => unreachable, // unexpected abi size
                     };
@@ -5725,13 +5725,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
             }
         },
         .stack_argument_offset => |off| {
-            const abi_size = ty.abiSize(pt);
+            const abi_size = ty.abiSize(zcu);
 
             switch (abi_size) {
                 1, 2, 4, 8 => {
                     const tag: Mir.Inst.Tag = switch (abi_size) {
-                        1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument,
-                        2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument,
+                        1 => if (ty.isSignedInt(zcu)) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument,
+                        2 => if (ty.isSignedInt(zcu)) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument,
                         4, 8 => .ldr_stack_argument,
                         else => unreachable, // unexpected abi size
                     };
@@ -5753,7 +5753,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
 
 fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
     const pt = self.pt;
-    const abi_size = @as(u32, @intCast(ty.abiSize(pt)));
+    const zcu = pt.zcu;
+    const abi_size = @as(u32, @intCast(ty.abiSize(zcu)));
     switch (mcv) {
         .dead => unreachable,
         .none, .unreach => return,
@@ -5761,7 +5762,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
             if (!self.wantSafety())
                 return; // The already existing value will do just fine.
             // TODO Upgrade this to a memset call when we have that available.
-            switch (ty.abiSize(pt)) {
+            switch (ty.abiSize(pt.zcu)) {
                 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }),
                 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }),
                 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
@@ -5953,13 +5954,13 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const ptr_ty = self.typeOf(ty_op.operand);
         const ptr = try self.resolveInst(ty_op.operand);
-        const array_ty = ptr_ty.childType(mod);
-        const array_len = @as(u32, @intCast(array_ty.arrayLen(mod)));
+        const array_ty = ptr_ty.childType(zcu);
+        const array_len = @as(u32, @intCast(array_ty.arrayLen(zcu)));
         const ptr_bytes = 8;
         const stack_offset = try self.allocMem(ptr_bytes * 2, .@"8", inst);
         try self.genSetStack(ptr_ty, stack_offset, ptr);
@@ -6074,9 +6075,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const vector_ty = self.typeOfIndex(inst);
-    const len = vector_ty.vectorLen(mod);
+    const len = vector_ty.vectorLen(zcu);
     const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len]));
     const result: MCValue = res: {
@@ -6125,8 +6126,8 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void {
     const result: MCValue = result: {
         const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand };
         const error_union_ty = self.typeOf(pl_op.operand);
-        const error_union_size = @as(u32, @intCast(error_union_ty.abiSize(pt)));
-        const error_union_align = error_union_ty.abiAlignment(pt);
+        const error_union_size = @as(u32, @intCast(error_union_ty.abiSize(pt.zcu)));
+        const error_union_align = error_union_ty.abiAlignment(pt.zcu);
 
         // The error union will die in the body. However, we need the
         // error union after the body in order to extract the payload
@@ -6156,11 +6157,11 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void {
 
 fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
     // If the type has no codegen bits, no need to store it.
     const inst_ty = self.typeOf(inst);
-    if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt) and !inst_ty.isError(mod))
+    if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu) and !inst_ty.isError(zcu))
         return MCValue{ .none = {} };
 
     const inst_index = inst.toIndex() orelse return self.genTypedValue((try self.air.value(inst, pt)).?);
@@ -6220,9 +6221,9 @@ const CallMCValues = struct {
 /// Caller must call `CallMCValues.deinit`.
 fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    const fn_info = mod.typeToFunc(fn_ty).?;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    const fn_info = zcu.typeToFunc(fn_ty).?;
     const cc = fn_info.cc;
     var result: CallMCValues = .{
         .args = try self.gpa.alloc(MCValue, fn_info.param_types.len),
@@ -6233,7 +6234,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
     };
     errdefer self.gpa.free(result.args);
 
-    const ret_ty = fn_ty.fnReturnType(mod);
+    const ret_ty = fn_ty.fnReturnType(zcu);
 
     switch (cc) {
         .Naked => {
@@ -6248,14 +6249,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
             var ncrn: usize = 0; // Next Core Register Number
             var nsaa: u32 = 0; // Next stacked argument address
 
-            if (ret_ty.zigTypeTag(mod) == .NoReturn) {
+            if (ret_ty.zigTypeTag(zcu) == .NoReturn) {
                 result.return_value = .{ .unreach = {} };
-            } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt) and !ret_ty.isError(mod)) {
+            } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu) and !ret_ty.isError(zcu)) {
                 result.return_value = .{ .none = {} };
             } else {
-                const ret_ty_size: u32 = @intCast(ret_ty.abiSize(pt));
+                const ret_ty_size: u32 = @intCast(ret_ty.abiSize(zcu));
                 if (ret_ty_size == 0) {
-                    assert(ret_ty.isError(mod));
+                    assert(ret_ty.isError(zcu));
                     result.return_value = .{ .immediate = 0 };
                 } else if (ret_ty_size <= 8) {
                     result.return_value = .{ .register = self.registerAlias(c_abi_int_return_regs[0], ret_ty) };
@@ -6265,7 +6266,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
             }
 
             for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
-                const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(pt)));
+                const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(zcu)));
                 if (param_size == 0) {
                     result_arg.* = .{ .none = {} };
                     continue;
@@ -6273,7 +6274,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
 
                 // We round up NCRN only for non-Apple platforms which allow the 16-byte aligned
                 // values to spread across odd-numbered registers.
-                if (Type.fromInterned(ty).abiAlignment(pt) == .@"16" and !self.target.isDarwin()) {
+                if (Type.fromInterned(ty).abiAlignment(zcu) == .@"16" and !self.target.isDarwin()) {
                     // Round up NCRN to the next even number
                     ncrn += ncrn % 2;
                 }
@@ -6291,7 +6292,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
                     ncrn = 8;
                     // TODO Apple allows the arguments on the stack to be non-8-byte aligned provided
                     // that the entire stack space consumed by the arguments is 8-byte aligned.
-                    if (Type.fromInterned(ty).abiAlignment(pt) == .@"8") {
+                    if (Type.fromInterned(ty).abiAlignment(zcu) == .@"8") {
                         if (nsaa % 8 != 0) {
                             nsaa += 8 - (nsaa % 8);
                         }
@@ -6306,14 +6307,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
             result.stack_align = 16;
         },
         .Unspecified => {
-            if (ret_ty.zigTypeTag(mod) == .NoReturn) {
+            if (ret_ty.zigTypeTag(zcu) == .NoReturn) {
                 result.return_value = .{ .unreach = {} };
-            } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt) and !ret_ty.isError(mod)) {
+            } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu) and !ret_ty.isError(zcu)) {
                 result.return_value = .{ .none = {} };
             } else {
-                const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(pt)));
+                const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(zcu)));
                 if (ret_ty_size == 0) {
-                    assert(ret_ty.isError(mod));
+                    assert(ret_ty.isError(zcu));
                     result.return_value = .{ .immediate = 0 };
                 } else if (ret_ty_size <= 8) {
                     result.return_value = .{ .register = self.registerAlias(.x0, ret_ty) };
@@ -6330,9 +6331,9 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
             var stack_offset: u32 = 0;
 
             for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
-                if (Type.fromInterned(ty).abiSize(pt) > 0) {
-                    const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(pt));
-                    const param_alignment = Type.fromInterned(ty).abiAlignment(pt);
+                if (Type.fromInterned(ty).abiSize(zcu) > 0) {
+                    const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(zcu));
+                    const param_alignment = Type.fromInterned(ty).abiAlignment(zcu);
 
                     stack_offset = @intCast(param_alignment.forward(stack_offset));
                     result_arg.* = .{ .stack_argument_offset = stack_offset };
@@ -6383,7 +6384,7 @@ fn parseRegName(name: []const u8) ?Register {
 }
 
 fn registerAlias(self: *Self, reg: Register, ty: Type) Register {
-    const abi_size = ty.abiSize(self.pt);
+    const abi_size = ty.abiSize(self.pt.zcu);
 
     switch (reg.class()) {
         .general_purpose => {
src/arch/arm/abi.zig
@@ -24,29 +24,29 @@ pub const Class = union(enum) {
 
 pub const Context = enum { ret, arg };
 
-pub fn classifyType(ty: Type, pt: Zcu.PerThread, ctx: Context) Class {
-    assert(ty.hasRuntimeBitsIgnoreComptime(pt));
+pub fn classifyType(ty: Type, zcu: *Zcu, ctx: Context) Class {
+    assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
 
     var maybe_float_bits: ?u16 = null;
     const max_byval_size = 512;
-    const ip = &pt.zcu.intern_pool;
-    switch (ty.zigTypeTag(pt.zcu)) {
+    const ip = &zcu.intern_pool;
+    switch (ty.zigTypeTag(zcu)) {
         .Struct => {
-            const bit_size = ty.bitSize(pt);
-            if (ty.containerLayout(pt.zcu) == .@"packed") {
+            const bit_size = ty.bitSize(zcu);
+            if (ty.containerLayout(zcu) == .@"packed") {
                 if (bit_size > 64) return .memory;
                 return .byval;
             }
             if (bit_size > max_byval_size) return .memory;
-            const float_count = countFloats(ty, pt.zcu, &maybe_float_bits);
+            const float_count = countFloats(ty, zcu, &maybe_float_bits);
             if (float_count <= byval_float_count) return .byval;
 
-            const fields = ty.structFieldCount(pt.zcu);
+            const fields = ty.structFieldCount(zcu);
             var i: u32 = 0;
             while (i < fields) : (i += 1) {
-                const field_ty = ty.structFieldType(i, pt.zcu);
-                const field_alignment = ty.structFieldAlign(i, pt);
-                const field_size = field_ty.bitSize(pt);
+                const field_ty = ty.structFieldType(i, zcu);
+                const field_alignment = ty.structFieldAlign(i, zcu);
+                const field_size = field_ty.bitSize(zcu);
                 if (field_size > 32 or field_alignment.compare(.gt, .@"32")) {
                     return Class.arrSize(bit_size, 64);
                 }
@@ -54,19 +54,19 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread, ctx: Context) Class {
             return Class.arrSize(bit_size, 32);
         },
         .Union => {
-            const bit_size = ty.bitSize(pt);
-            const union_obj = pt.zcu.typeToUnion(ty).?;
+            const bit_size = ty.bitSize(zcu);
+            const union_obj = zcu.typeToUnion(ty).?;
             if (union_obj.flagsUnordered(ip).layout == .@"packed") {
                 if (bit_size > 64) return .memory;
                 return .byval;
             }
             if (bit_size > max_byval_size) return .memory;
-            const float_count = countFloats(ty, pt.zcu, &maybe_float_bits);
+            const float_count = countFloats(ty, zcu, &maybe_float_bits);
             if (float_count <= byval_float_count) return .byval;
 
             for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| {
-                if (Type.fromInterned(field_ty).bitSize(pt) > 32 or
-                    pt.unionFieldNormalAlignment(union_obj, @intCast(field_index)).compare(.gt, .@"32"))
+                if (Type.fromInterned(field_ty).bitSize(zcu) > 32 or
+                    Type.unionFieldNormalAlignment(union_obj, @intCast(field_index), zcu).compare(.gt, .@"32"))
                 {
                     return Class.arrSize(bit_size, 64);
                 }
@@ -77,28 +77,28 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread, ctx: Context) Class {
         .Int => {
             // TODO this is incorrect for _BitInt(128) but implementing
             // this correctly makes implementing compiler-rt impossible.
-            // const bit_size = ty.bitSize(pt);
+            // const bit_size = ty.bitSize(zcu);
             // if (bit_size > 64) return .memory;
             return .byval;
         },
         .Enum, .ErrorSet => {
-            const bit_size = ty.bitSize(pt);
+            const bit_size = ty.bitSize(zcu);
             if (bit_size > 64) return .memory;
             return .byval;
         },
         .Vector => {
-            const bit_size = ty.bitSize(pt);
+            const bit_size = ty.bitSize(zcu);
             // TODO is this controlled by a cpu feature?
             if (ctx == .ret and bit_size > 128) return .memory;
             if (bit_size > 512) return .memory;
             return .byval;
         },
         .Optional => {
-            assert(ty.isPtrLikeOptional(pt.zcu));
+            assert(ty.isPtrLikeOptional(zcu));
             return .byval;
         },
         .Pointer => {
-            assert(!ty.isSlice(pt.zcu));
+            assert(!ty.isSlice(zcu));
             return .byval;
         },
         .ErrorUnion,
src/arch/arm/CodeGen.zig
@@ -474,8 +474,8 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
 
 fn gen(self: *Self) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const cc = self.fn_type.fnCallingConvention(mod);
+    const zcu = pt.zcu;
+    const cc = self.fn_type.fnCallingConvention(zcu);
     if (cc != .Naked) {
         // push {fp, lr}
         const push_reloc = try self.addNop();
@@ -518,8 +518,8 @@ fn gen(self: *Self) !void {
 
                     const ty = self.typeOfIndex(inst);
 
-                    const abi_size: u32 = @intCast(ty.abiSize(pt));
-                    const abi_align = ty.abiAlignment(pt);
+                    const abi_size: u32 = @intCast(ty.abiSize(zcu));
+                    const abi_align = ty.abiAlignment(zcu);
                     const stack_offset = try self.allocMem(abi_size, abi_align, inst);
                     try self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
 
@@ -635,8 +635,8 @@ fn gen(self: *Self) !void {
 
 fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const air_tags = self.air.instructions.items(.tag);
 
     for (body) |inst| {
@@ -999,10 +999,10 @@ fn allocMem(
 /// Use a pointer instruction as the basis for allocating stack memory.
 fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const elem_ty = self.typeOfIndex(inst).childType(mod);
+    const zcu = pt.zcu;
+    const elem_ty = self.typeOfIndex(inst).childType(zcu);
 
-    if (!elem_ty.hasRuntimeBits(pt)) {
+    if (!elem_ty.hasRuntimeBits(zcu)) {
         // As this stack item will never be dereferenced at runtime,
         // return the stack offset 0. Stack offset 0 will be where all
         // zero-sized stack allocations live as non-zero-sized
@@ -1010,21 +1010,21 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
         return 0;
     }
 
-    const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse {
+    const abi_size = math.cast(u32, elem_ty.abiSize(zcu)) orelse {
         return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
     };
     // TODO swap this for inst.ty.ptrAlign
-    const abi_align = elem_ty.abiAlignment(pt);
+    const abi_align = elem_ty.abiAlignment(zcu);
 
     return self.allocMem(abi_size, abi_align, inst);
 }
 
 fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue {
     const pt = self.pt;
-    const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse {
+    const abi_size = math.cast(u32, elem_ty.abiSize(pt.zcu)) orelse {
         return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
     };
-    const abi_align = elem_ty.abiAlignment(pt);
+    const abi_align = elem_ty.abiAlignment(pt.zcu);
 
     if (reg_ok) {
         // Make sure the type can fit in a register before we try to allocate one.
@@ -1108,13 +1108,13 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const result: MCValue = switch (self.ret_mcv) {
         .none, .register => .{ .ptr_stack_offset = try self.allocMemPtr(inst) },
         .stack_offset => blk: {
             // self.ret_mcv is an address to where this function
             // should store its result into
-            const ret_ty = self.fn_type.fnReturnType(mod);
+            const ret_ty = self.fn_type.fnReturnType(zcu);
             const ptr_ty = try pt.singleMutPtrType(ret_ty);
 
             // addr_reg will contain the address of where to store the
@@ -1142,7 +1142,7 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     if (self.liveness.isUnused(inst))
         return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
@@ -1151,10 +1151,10 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
     const operand_ty = self.typeOf(ty_op.operand);
     const dest_ty = self.typeOfIndex(inst);
 
-    const operand_abi_size = operand_ty.abiSize(pt);
-    const dest_abi_size = dest_ty.abiSize(pt);
-    const info_a = operand_ty.intInfo(mod);
-    const info_b = dest_ty.intInfo(mod);
+    const operand_abi_size = operand_ty.abiSize(zcu);
+    const dest_abi_size = dest_ty.abiSize(zcu);
+    const info_a = operand_ty.intInfo(zcu);
+    const info_b = dest_ty.intInfo(zcu);
 
     const dst_mcv: MCValue = blk: {
         if (info_a.bits == info_b.bits) {
@@ -1209,9 +1209,9 @@ fn trunc(
     dest_ty: Type,
 ) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const info_a = operand_ty.intInfo(mod);
-    const info_b = dest_ty.intInfo(mod);
+    const zcu = pt.zcu;
+    const info_a = operand_ty.intInfo(zcu);
+    const info_b = dest_ty.intInfo(zcu);
 
     if (info_b.bits <= 32) {
         if (info_a.bits > 32) {
@@ -1274,7 +1274,7 @@ fn airIntFromBool(self: *Self, inst: Air.Inst.Index) !void {
 fn airNot(self: *Self, inst: Air.Inst.Index) !void {
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand };
         const operand_ty = self.typeOf(ty_op.operand);
@@ -1283,7 +1283,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
             .unreach => unreachable,
             .cpsr_flags => |cond| break :result MCValue{ .cpsr_flags = cond.negate() },
             else => {
-                switch (operand_ty.zigTypeTag(mod)) {
+                switch (operand_ty.zigTypeTag(zcu)) {
                     .Bool => {
                         var op_reg: Register = undefined;
                         var dest_reg: Register = undefined;
@@ -1316,7 +1316,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
                     },
                     .Vector => return self.fail("TODO bitwise not for vectors", .{}),
                     .Int => {
-                        const int_info = operand_ty.intInfo(mod);
+                        const int_info = operand_ty.intInfo(zcu);
                         if (int_info.bits <= 32) {
                             var op_reg: Register = undefined;
                             var dest_reg: Register = undefined;
@@ -1371,13 +1371,13 @@ fn minMax(
     maybe_inst: ?Air.Inst.Index,
 ) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Float => return self.fail("TODO ARM min/max on floats", .{}),
         .Vector => return self.fail("TODO ARM min/max on vectors", .{}),
         .Int => {
-            assert(lhs_ty.eql(rhs_ty, mod));
-            const int_info = lhs_ty.intInfo(mod);
+            assert(lhs_ty.eql(rhs_ty, zcu));
+            const int_info = lhs_ty.intInfo(zcu);
             if (int_info.bits <= 32) {
                 var lhs_reg: Register = undefined;
                 var rhs_reg: Register = undefined;
@@ -1581,7 +1581,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
     const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs };
         const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs };
@@ -1589,15 +1589,15 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
         const rhs_ty = self.typeOf(extra.rhs);
 
         const tuple_ty = self.typeOfIndex(inst);
-        const tuple_size: u32 = @intCast(tuple_ty.abiSize(pt));
-        const tuple_align = tuple_ty.abiAlignment(pt);
-        const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, pt));
+        const tuple_size: u32 = @intCast(tuple_ty.abiSize(zcu));
+        const tuple_align = tuple_ty.abiAlignment(zcu);
+        const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, zcu));
 
-        switch (lhs_ty.zigTypeTag(mod)) {
+        switch (lhs_ty.zigTypeTag(zcu)) {
             .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
             .Int => {
-                assert(lhs_ty.eql(rhs_ty, mod));
-                const int_info = lhs_ty.intInfo(mod);
+                assert(lhs_ty.eql(rhs_ty, zcu));
+                const int_info = lhs_ty.intInfo(zcu);
                 if (int_info.bits < 32) {
                     const stack_offset = try self.allocMem(tuple_size, tuple_align, inst);
 
@@ -1695,7 +1695,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
     const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
     if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const result: MCValue = result: {
         const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs };
         const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs };
@@ -1703,15 +1703,15 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
         const rhs_ty = self.typeOf(extra.rhs);
 
         const tuple_ty = self.typeOfIndex(inst);
-        const tuple_size: u32 = @intCast(tuple_ty.abiSize(pt));
-        const tuple_align = tuple_ty.abiAlignment(pt);
-        const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, pt));
+        const tuple_size: u32 = @intCast(tuple_ty.abiSize(zcu));
+        const tuple_align = tuple_ty.abiAlignment(zcu);
+        const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, zcu));
 
-        switch (lhs_ty.zigTypeTag(mod)) {
+        switch (lhs_ty.zigTypeTag(zcu)) {
             .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
             .Int => {
-                assert(lhs_ty.eql(rhs_ty, mod));
-                const int_info = lhs_ty.intInfo(mod);
+                assert(lhs_ty.eql(rhs_ty, zcu));
+                const int_info = lhs_ty.intInfo(zcu);
                 if (int_info.bits <= 16) {
                     const stack_offset = try self.allocMem(tuple_size, tuple_align, inst);
 
@@ -1860,20 +1860,20 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
     const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
     if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const result: MCValue = result: {
         const lhs_ty = self.typeOf(extra.lhs);
         const rhs_ty = self.typeOf(extra.rhs);
 
         const tuple_ty = self.typeOfIndex(inst);
-        const tuple_size: u32 = @intCast(tuple_ty.abiSize(pt));
-        const tuple_align = tuple_ty.abiAlignment(pt);
-        const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, pt));
+        const tuple_size: u32 = @intCast(tuple_ty.abiSize(zcu));
+        const tuple_align = tuple_ty.abiAlignment(zcu);
+        const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, zcu));
 
-        switch (lhs_ty.zigTypeTag(mod)) {
+        switch (lhs_ty.zigTypeTag(zcu)) {
             .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}),
             .Int => {
-                const int_info = lhs_ty.intInfo(mod);
+                const int_info = lhs_ty.intInfo(zcu);
                 if (int_info.bits <= 32) {
                     const stack_offset = try self.allocMem(tuple_size, tuple_align, inst);
 
@@ -2020,7 +2020,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const optional_ty = self.typeOfIndex(inst);
-        const abi_size: u32 = @intCast(optional_ty.abiSize(pt));
+        const abi_size: u32 = @intCast(optional_ty.abiSize(pt.zcu));
 
         // Optional with a zero-bit payload type is just a boolean true
         if (abi_size == 1) {
@@ -2040,17 +2040,17 @@ fn errUnionErr(
     maybe_inst: ?Air.Inst.Index,
 ) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const err_ty = error_union_ty.errorUnionSet(mod);
-    const payload_ty = error_union_ty.errorUnionPayload(mod);
-    if (err_ty.errorSetIsEmpty(mod)) {
+    const zcu = pt.zcu;
+    const err_ty = error_union_ty.errorUnionSet(zcu);
+    const payload_ty = error_union_ty.errorUnionPayload(zcu);
+    if (err_ty.errorSetIsEmpty(zcu)) {
         return MCValue{ .immediate = 0 };
     }
-    if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+    if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
         return try error_union_bind.resolveToMcv(self);
     }
 
-    const err_offset: u32 = @intCast(errUnionErrorOffset(payload_ty, pt));
+    const err_offset: u32 = @intCast(errUnionErrorOffset(payload_ty, zcu));
     switch (try error_union_bind.resolveToMcv(self)) {
         .register => {
             var operand_reg: Register = undefined;
@@ -2072,7 +2072,7 @@ fn errUnionErr(
             );
 
             const err_bit_offset = err_offset * 8;
-            const err_bit_size: u32 = @intCast(err_ty.abiSize(pt) * 8);
+            const err_bit_size: u32 = @intCast(err_ty.abiSize(zcu) * 8);
 
             _ = try self.addInst(.{
                 .tag = .ubfx, // errors are unsigned integers
@@ -2118,17 +2118,17 @@ fn errUnionPayload(
     maybe_inst: ?Air.Inst.Index,
 ) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const err_ty = error_union_ty.errorUnionSet(mod);
-    const payload_ty = error_union_ty.errorUnionPayload(mod);
-    if (err_ty.errorSetIsEmpty(mod)) {
+    const zcu = pt.zcu;
+    const err_ty = error_union_ty.errorUnionSet(zcu);
+    const payload_ty = error_union_ty.errorUnionPayload(zcu);
+    if (err_ty.errorSetIsEmpty(zcu)) {
         return try error_union_bind.resolveToMcv(self);
     }
-    if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+    if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
         return MCValue.none;
     }
 
-    const payload_offset: u32 = @intCast(errUnionPayloadOffset(payload_ty, pt));
+    const payload_offset: u32 = @intCast(errUnionPayloadOffset(payload_ty, zcu));
     switch (try error_union_bind.resolveToMcv(self)) {
         .register => {
             var operand_reg: Register = undefined;
@@ -2150,10 +2150,10 @@ fn errUnionPayload(
             );
 
             const payload_bit_offset = payload_offset * 8;
-            const payload_bit_size: u32 = @intCast(payload_ty.abiSize(pt) * 8);
+            const payload_bit_size: u32 = @intCast(payload_ty.abiSize(zcu) * 8);
 
             _ = try self.addInst(.{
-                .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx,
+                .tag = if (payload_ty.isSignedInt(zcu)) Mir.Inst.Tag.sbfx else .ubfx,
                 .data = .{ .rr_lsb_width = .{
                     .rd = dest_reg,
                     .rn = operand_reg,
@@ -2229,20 +2229,20 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void {
 /// T to E!T
 fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const error_union_ty = ty_op.ty.toType();
-        const error_ty = error_union_ty.errorUnionSet(mod);
-        const payload_ty = error_union_ty.errorUnionPayload(mod);
+        const error_ty = error_union_ty.errorUnionSet(zcu);
+        const payload_ty = error_union_ty.errorUnionPayload(zcu);
         const operand = try self.resolveInst(ty_op.operand);
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result operand;
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result operand;
 
-        const abi_size: u32 = @intCast(error_union_ty.abiSize(pt));
-        const abi_align = error_union_ty.abiAlignment(pt);
+        const abi_size: u32 = @intCast(error_union_ty.abiSize(zcu));
+        const abi_align = error_union_ty.abiAlignment(zcu);
         const stack_offset: u32 = @intCast(try self.allocMem(abi_size, abi_align, inst));
-        const payload_off = errUnionPayloadOffset(payload_ty, pt);
-        const err_off = errUnionErrorOffset(payload_ty, pt);
+        const payload_off = errUnionPayloadOffset(payload_ty, zcu);
+        const err_off = errUnionErrorOffset(payload_ty, zcu);
         try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), operand);
         try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), .{ .immediate = 0 });
 
@@ -2254,20 +2254,20 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
 /// E to E!T
 fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const error_union_ty = ty_op.ty.toType();
-        const error_ty = error_union_ty.errorUnionSet(mod);
-        const payload_ty = error_union_ty.errorUnionPayload(mod);
+        const error_ty = error_union_ty.errorUnionSet(zcu);
+        const payload_ty = error_union_ty.errorUnionPayload(zcu);
         const operand = try self.resolveInst(ty_op.operand);
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result operand;
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result operand;
 
-        const abi_size: u32 = @intCast(error_union_ty.abiSize(pt));
-        const abi_align = error_union_ty.abiAlignment(pt);
+        const abi_size: u32 = @intCast(error_union_ty.abiSize(zcu));
+        const abi_align = error_union_ty.abiAlignment(zcu);
         const stack_offset: u32 = @intCast(try self.allocMem(abi_size, abi_align, inst));
-        const payload_off = errUnionPayloadOffset(payload_ty, pt);
-        const err_off = errUnionErrorOffset(payload_ty, pt);
+        const payload_off = errUnionPayloadOffset(payload_ty, zcu);
+        const err_off = errUnionErrorOffset(payload_ty, zcu);
         try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), operand);
         try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), .undef);
 
@@ -2372,9 +2372,9 @@ fn ptrElemVal(
     maybe_inst: ?Air.Inst.Index,
 ) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const elem_ty = ptr_ty.childType(mod);
-    const elem_size: u32 = @intCast(elem_ty.abiSize(pt));
+    const zcu = pt.zcu;
+    const elem_ty = ptr_ty.childType(zcu);
+    const elem_size: u32 = @intCast(elem_ty.abiSize(zcu));
 
     switch (elem_size) {
         1, 4 => {
@@ -2432,11 +2432,11 @@ fn ptrElemVal(
 
 fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
     const slice_ty = self.typeOf(bin_op.lhs);
-    const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: {
-        const ptr_ty = slice_ty.slicePtrFieldType(mod);
+    const result: MCValue = if (!slice_ty.isVolatilePtr(zcu) and self.liveness.isUnused(inst)) .dead else result: {
+        const ptr_ty = slice_ty.slicePtrFieldType(zcu);
 
         const slice_mcv = try self.resolveInst(bin_op.lhs);
         const base_mcv = slicePtr(slice_mcv);
@@ -2476,8 +2476,8 @@ fn arrayElemVal(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const elem_ty = array_ty.childType(mod);
+    const zcu = pt.zcu;
+    const elem_ty = array_ty.childType(zcu);
 
     const mcv = try array_bind.resolveToMcv(self);
     switch (mcv) {
@@ -2533,10 +2533,10 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
     const ptr_ty = self.typeOf(bin_op.lhs);
-    const result: MCValue = if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: {
+    const result: MCValue = if (!ptr_ty.isVolatilePtr(zcu) and self.liveness.isUnused(inst)) .dead else result: {
         const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs };
         const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs };
 
@@ -2668,9 +2668,9 @@ fn reuseOperand(
 
 fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const elem_ty = ptr_ty.childType(mod);
-    const elem_size: u32 = @intCast(elem_ty.abiSize(pt));
+    const zcu = pt.zcu;
+    const elem_ty = ptr_ty.childType(zcu);
+    const elem_size: u32 = @intCast(elem_ty.abiSize(zcu));
 
     switch (ptr) {
         .none => unreachable,
@@ -2746,20 +2746,20 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
 
 fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const elem_ty = self.typeOfIndex(inst);
     const result: MCValue = result: {
-        if (!elem_ty.hasRuntimeBits(pt))
+        if (!elem_ty.hasRuntimeBits(zcu))
             break :result MCValue.none;
 
         const ptr = try self.resolveInst(ty_op.operand);
-        const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod);
+        const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(zcu);
         if (self.liveness.isUnused(inst) and !is_volatile)
             break :result MCValue.dead;
 
         const dest_mcv: MCValue = blk: {
-            const ptr_fits_dest = elem_ty.abiSize(pt) <= 4;
+            const ptr_fits_dest = elem_ty.abiSize(zcu) <= 4;
             if (ptr_fits_dest and self.reuseOperand(inst, ty_op.operand, 0, ptr)) {
                 // The MCValue that holds the pointer can be re-used as the value.
                 break :blk ptr;
@@ -2776,7 +2776,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
 
 fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void {
     const pt = self.pt;
-    const elem_size: u32 = @intCast(value_ty.abiSize(pt));
+    const elem_size: u32 = @intCast(value_ty.abiSize(pt.zcu));
 
     switch (ptr) {
         .none => unreachable,
@@ -2896,11 +2896,11 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
 fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
     return if (self.liveness.isUnused(inst)) .dead else result: {
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const mcv = try self.resolveInst(operand);
         const ptr_ty = self.typeOf(operand);
-        const struct_ty = ptr_ty.childType(mod);
-        const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(index, pt));
+        const struct_ty = ptr_ty.childType(zcu);
+        const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(index, zcu));
         switch (mcv) {
             .ptr_stack_offset => |off| {
                 break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };
@@ -2921,12 +2921,12 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
     const operand = extra.struct_operand;
     const index = extra.field_index;
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const mcv = try self.resolveInst(operand);
         const struct_ty = self.typeOf(operand);
-        const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(index, pt));
-        const struct_field_ty = struct_ty.structFieldType(index, mod);
+        const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(index, zcu));
+        const struct_field_ty = struct_ty.structFieldType(index, zcu);
 
         switch (mcv) {
             .dead, .unreach => unreachable,
@@ -2989,10 +2989,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
                 );
 
                 const field_bit_offset = struct_field_offset * 8;
-                const field_bit_size: u32 = @intCast(struct_field_ty.abiSize(pt) * 8);
+                const field_bit_size: u32 = @intCast(struct_field_ty.abiSize(zcu) * 8);
 
                 _ = try self.addInst(.{
-                    .tag = if (struct_field_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx,
+                    .tag = if (struct_field_ty.isSignedInt(zcu)) Mir.Inst.Tag.sbfx else .ubfx,
                     .data = .{ .rr_lsb_width = .{
                         .rd = dest_reg,
                         .rn = operand_reg,
@@ -3012,18 +3012,18 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const field_ptr = try self.resolveInst(extra.field_ptr);
-        const struct_ty = ty_pl.ty.toType().childType(mod);
+        const struct_ty = ty_pl.ty.toType().childType(zcu);
 
-        if (struct_ty.zigTypeTag(mod) == .Union) {
+        if (struct_ty.zigTypeTag(zcu) == .Union) {
             return self.fail("TODO implement @fieldParentPtr codegen for unions", .{});
         }
 
-        const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(extra.field_index, pt));
+        const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(extra.field_index, zcu));
         switch (field_ptr) {
             .ptr_stack_offset => |off| {
                 break :result MCValue{ .ptr_stack_offset = off + struct_field_offset };
@@ -3407,13 +3407,13 @@ fn addSub(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Float => return self.fail("TODO ARM binary operations on floats", .{}),
         .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
         .Int => {
-            assert(lhs_ty.eql(rhs_ty, mod));
-            const int_info = lhs_ty.intInfo(mod);
+            assert(lhs_ty.eql(rhs_ty, zcu));
+            const int_info = lhs_ty.intInfo(zcu);
             if (int_info.bits <= 32) {
                 const lhs_immediate = try lhs_bind.resolveToImmediate(self);
                 const rhs_immediate = try rhs_bind.resolveToImmediate(self);
@@ -3464,13 +3464,13 @@ fn mul(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Float => return self.fail("TODO ARM binary operations on floats", .{}),
         .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
         .Int => {
-            assert(lhs_ty.eql(rhs_ty, mod));
-            const int_info = lhs_ty.intInfo(mod);
+            assert(lhs_ty.eql(rhs_ty, zcu));
+            const int_info = lhs_ty.intInfo(zcu);
             if (int_info.bits <= 32) {
                 // TODO add optimisations for multiplication
                 // with immediates, for example a * 2 can be
@@ -3498,8 +3498,8 @@ fn divFloat(
     _ = maybe_inst;
 
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Float => return self.fail("TODO ARM binary operations on floats", .{}),
         .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
         else => unreachable,
@@ -3515,13 +3515,13 @@ fn divTrunc(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Float => return self.fail("TODO ARM binary operations on floats", .{}),
         .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
         .Int => {
-            assert(lhs_ty.eql(rhs_ty, mod));
-            const int_info = lhs_ty.intInfo(mod);
+            assert(lhs_ty.eql(rhs_ty, zcu));
+            const int_info = lhs_ty.intInfo(zcu);
             if (int_info.bits <= 32) {
                 switch (int_info.signedness) {
                     .signed => {
@@ -3559,13 +3559,13 @@ fn divFloor(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Float => return self.fail("TODO ARM binary operations on floats", .{}),
         .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
         .Int => {
-            assert(lhs_ty.eql(rhs_ty, mod));
-            const int_info = lhs_ty.intInfo(mod);
+            assert(lhs_ty.eql(rhs_ty, zcu));
+            const int_info = lhs_ty.intInfo(zcu);
             if (int_info.bits <= 32) {
                 switch (int_info.signedness) {
                     .signed => {
@@ -3608,8 +3608,8 @@ fn divExact(
     _ = maybe_inst;
 
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Float => return self.fail("TODO ARM binary operations on floats", .{}),
         .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
         .Int => return self.fail("TODO ARM div_exact", .{}),
@@ -3626,17 +3626,17 @@ fn rem(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Float => return self.fail("TODO ARM binary operations on floats", .{}),
         .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
         .Int => {
-            assert(lhs_ty.eql(rhs_ty, mod));
-            const int_info = lhs_ty.intInfo(mod);
+            assert(lhs_ty.eql(rhs_ty, zcu));
+            const int_info = lhs_ty.intInfo(zcu);
             if (int_info.bits <= 32) {
                 switch (int_info.signedness) {
                     .signed => {
-                        return self.fail("TODO ARM signed integer mod", .{});
+                        return self.fail("TODO ARM signed integer zcu", .{});
                     },
                     .unsigned => {
                         const rhs_immediate = try rhs_bind.resolveToImmediate(self);
@@ -3667,10 +3667,10 @@ fn rem(
 
                                 return MCValue{ .register = dest_reg };
                             } else {
-                                return self.fail("TODO ARM integer mod by constants", .{});
+                                return self.fail("TODO ARM integer zcu by constants", .{});
                             }
                         } else {
-                            return self.fail("TODO ARM integer mod", .{});
+                            return self.fail("TODO ARM integer zcu", .{});
                         }
                     },
                 }
@@ -3696,11 +3696,11 @@ fn modulo(
     _ = maybe_inst;
 
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Float => return self.fail("TODO ARM binary operations on floats", .{}),
         .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
-        .Int => return self.fail("TODO ARM mod", .{}),
+        .Int => return self.fail("TODO ARM zcu", .{}),
         else => unreachable,
     }
 }
@@ -3715,11 +3715,11 @@ fn wrappingArithmetic(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
         .Int => {
-            const int_info = lhs_ty.intInfo(mod);
+            const int_info = lhs_ty.intInfo(zcu);
             if (int_info.bits <= 32) {
                 // Generate an add/sub/mul
                 const result: MCValue = switch (tag) {
@@ -3754,12 +3754,12 @@ fn bitwise(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
         .Int => {
-            assert(lhs_ty.eql(rhs_ty, mod));
-            const int_info = lhs_ty.intInfo(mod);
+            assert(lhs_ty.eql(rhs_ty, zcu));
+            const int_info = lhs_ty.intInfo(zcu);
             if (int_info.bits <= 32) {
                 const lhs_immediate = try lhs_bind.resolveToImmediate(self);
                 const rhs_immediate = try rhs_bind.resolveToImmediate(self);
@@ -3800,17 +3800,17 @@ fn shiftExact(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
         .Int => {
-            const int_info = lhs_ty.intInfo(mod);
+            const int_info = lhs_ty.intInfo(zcu);
             if (int_info.bits <= 32) {
                 const rhs_immediate = try rhs_bind.resolveToImmediate(self);
 
                 const mir_tag: Mir.Inst.Tag = switch (tag) {
                     .shl_exact => .lsl,
-                    .shr_exact => switch (lhs_ty.intInfo(mod).signedness) {
+                    .shr_exact => switch (lhs_ty.intInfo(zcu).signedness) {
                         .signed => Mir.Inst.Tag.asr,
                         .unsigned => Mir.Inst.Tag.lsr,
                     },
@@ -3840,11 +3840,11 @@ fn shiftNormal(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
         .Int => {
-            const int_info = lhs_ty.intInfo(mod);
+            const int_info = lhs_ty.intInfo(zcu);
             if (int_info.bits <= 32) {
                 // Generate a shl_exact/shr_exact
                 const result: MCValue = switch (tag) {
@@ -3884,8 +3884,8 @@ fn booleanOp(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Bool => {
             const lhs_immediate = try lhs_bind.resolveToImmediate(self);
             const rhs_immediate = try rhs_bind.resolveToImmediate(self);
@@ -3919,17 +3919,17 @@ fn ptrArithmetic(
     maybe_inst: ?Air.Inst.Index,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Pointer => {
-            assert(rhs_ty.eql(Type.usize, mod));
+            assert(rhs_ty.eql(Type.usize, zcu));
 
             const ptr_ty = lhs_ty;
-            const elem_ty = switch (ptr_ty.ptrSize(mod)) {
-                .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type
-                else => ptr_ty.childType(mod),
+            const elem_ty = switch (ptr_ty.ptrSize(zcu)) {
+                .One => ptr_ty.childType(zcu).childType(zcu), // ptr to array, so get array element type
+                else => ptr_ty.childType(zcu),
             };
-            const elem_size: u32 = @intCast(elem_ty.abiSize(pt));
+            const elem_size: u32 = @intCast(elem_ty.abiSize(zcu));
 
             const base_tag: Air.Inst.Tag = switch (tag) {
                 .ptr_add => .add,
@@ -3957,12 +3957,12 @@ fn ptrArithmetic(
 
 fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const abi_size = ty.abiSize(pt);
+    const zcu = pt.zcu;
+    const abi_size = ty.abiSize(zcu);
 
     const tag: Mir.Inst.Tag = switch (abi_size) {
-        1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb,
-        2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh else .ldrh,
+        1 => if (ty.isSignedInt(zcu)) Mir.Inst.Tag.ldrsb else .ldrb,
+        2 => if (ty.isSignedInt(zcu)) Mir.Inst.Tag.ldrsh else .ldrh,
         3, 4 => .ldr,
         else => unreachable,
     };
@@ -3979,7 +3979,7 @@ fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type)
     } };
 
     const data: Mir.Inst.Data = switch (abi_size) {
-        1 => if (ty.isSignedInt(mod)) rr_extra_offset else rr_offset,
+        1 => if (ty.isSignedInt(zcu)) rr_extra_offset else rr_offset,
         2 => rr_extra_offset,
         3, 4 => rr_offset,
         else => unreachable,
@@ -3993,7 +3993,7 @@ fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type)
 
 fn genStrRegister(self: *Self, source_reg: Register, addr_reg: Register, ty: Type) !void {
     const pt = self.pt;
-    const abi_size = ty.abiSize(pt);
+    const abi_size = ty.abiSize(pt.zcu);
 
     const tag: Mir.Inst.Tag = switch (abi_size) {
         1 => .strb,
@@ -4253,12 +4253,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
     const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]);
     const ty = self.typeOf(callee);
     const pt = self.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
 
-    const fn_ty = switch (ty.zigTypeTag(mod)) {
+    const fn_ty = switch (ty.zigTypeTag(zcu)) {
         .Fn => ty,
-        .Pointer => ty.childType(mod),
+        .Pointer => ty.childType(zcu),
         else => unreachable,
     };
 
@@ -4283,9 +4283,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
     // untouched by the parameter passing code
     const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: {
         log.debug("airCall: return by reference", .{});
-        const ret_ty = fn_ty.fnReturnType(mod);
-        const ret_abi_size: u32 = @intCast(ret_ty.abiSize(pt));
-        const ret_abi_align = ret_ty.abiAlignment(pt);
+        const ret_ty = fn_ty.fnReturnType(zcu);
+        const ret_abi_size: u32 = @intCast(ret_ty.abiSize(zcu));
+        const ret_abi_align = ret_ty.abiAlignment(zcu);
         const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
 
         const ptr_ty = try pt.singleMutPtrType(ret_ty);
@@ -4335,7 +4335,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
             return self.fail("TODO implement calling bitcasted functions", .{});
         },
     } else {
-        assert(ty.zigTypeTag(mod) == .Pointer);
+        assert(ty.zigTypeTag(zcu) == .Pointer);
         const mcv = try self.resolveInst(callee);
 
         try self.genSetReg(Type.usize, .lr, mcv);
@@ -4370,7 +4370,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
                 if (RegisterManager.indexOfRegIntoTracked(reg) == null) {
                     // Save function return value into a tracked register
                     log.debug("airCall: copying {} as it is not tracked", .{reg});
-                    const new_reg = try self.copyToTmpRegister(fn_ty.fnReturnType(mod), info.return_value);
+                    const new_reg = try self.copyToTmpRegister(fn_ty.fnReturnType(zcu), info.return_value);
                     break :result MCValue{ .register = new_reg };
                 }
             },
@@ -4395,15 +4395,15 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
 
 fn airRet(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
     const operand = try self.resolveInst(un_op);
-    const ret_ty = self.fn_type.fnReturnType(mod);
+    const ret_ty = self.fn_type.fnReturnType(zcu);
 
     switch (self.ret_mcv) {
         .none => {},
         .immediate => {
-            assert(ret_ty.isError(mod));
+            assert(ret_ty.isError(zcu));
         },
         .register => |reg| {
             // Return result by value
@@ -4428,11 +4428,11 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
     const ptr = try self.resolveInst(un_op);
     const ptr_ty = self.typeOf(un_op);
-    const ret_ty = self.fn_type.fnReturnType(mod);
+    const ret_ty = self.fn_type.fnReturnType(zcu);
 
     switch (self.ret_mcv) {
         .none => {},
@@ -4452,8 +4452,8 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
             // location.
             const op_inst = un_op.toIndex().?;
             if (self.air.instructions.items(.tag)[@intFromEnum(op_inst)] != .ret_ptr) {
-                const abi_size: u32 = @intCast(ret_ty.abiSize(pt));
-                const abi_align = ret_ty.abiAlignment(pt);
+                const abi_size: u32 = @intCast(ret_ty.abiSize(zcu));
+                const abi_align = ret_ty.abiAlignment(zcu);
 
                 const offset = try self.allocMem(abi_size, abi_align, null);
 
@@ -4490,20 +4490,20 @@ fn cmp(
     op: math.CompareOperator,
 ) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const int_ty = switch (lhs_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    const int_ty = switch (lhs_ty.zigTypeTag(zcu)) {
         .Optional => blk: {
-            const payload_ty = lhs_ty.optionalChild(mod);
-            if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+            const payload_ty = lhs_ty.optionalChild(zcu);
+            if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                 break :blk Type.u1;
-            } else if (lhs_ty.isPtrLikeOptional(mod)) {
+            } else if (lhs_ty.isPtrLikeOptional(zcu)) {
                 break :blk Type.usize;
             } else {
                 return self.fail("TODO ARM cmp non-pointer optionals", .{});
             }
         },
         .Float => return self.fail("TODO ARM cmp floats", .{}),
-        .Enum => lhs_ty.intTagType(mod),
+        .Enum => lhs_ty.intTagType(zcu),
         .Int => lhs_ty,
         .Bool => Type.u1,
         .Pointer => Type.usize,
@@ -4511,7 +4511,7 @@ fn cmp(
         else => unreachable,
     };
 
-    const int_info = int_ty.intInfo(mod);
+    const int_info = int_ty.intInfo(zcu);
     if (int_info.bits <= 32) {
         try self.spillCompareFlagsIfOccupied();
 
@@ -4597,10 +4597,10 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = self.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
-    const func = mod.funcInfo(extra.data.func);
+    const func = zcu.funcInfo(extra.data.func);
     // TODO emit debug info for function change
     _ = func;
     try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
@@ -4810,9 +4810,9 @@ fn isNull(
     operand_ty: Type,
 ) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    if (operand_ty.isPtrLikeOptional(mod)) {
-        assert(operand_ty.abiSize(pt) == 4);
+    const zcu = pt.zcu;
+    if (operand_ty.isPtrLikeOptional(zcu)) {
+        assert(operand_ty.abiSize(zcu) == 4);
 
         const imm_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = 0 } };
         return self.cmp(operand_bind, imm_bind, Type.usize, .eq);
@@ -4845,12 +4845,12 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const operand_ptr = try self.resolveInst(un_op);
         const ptr_ty = self.typeOf(un_op);
-        const elem_ty = ptr_ty.childType(mod);
+        const elem_ty = ptr_ty.childType(zcu);
 
         const operand = try self.allocRegOrMem(elem_ty, true, null);
         try self.load(operand, operand_ptr, ptr_ty);
@@ -4873,12 +4873,12 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const operand_ptr = try self.resolveInst(un_op);
         const ptr_ty = self.typeOf(un_op);
-        const elem_ty = ptr_ty.childType(mod);
+        const elem_ty = ptr_ty.childType(zcu);
 
         const operand = try self.allocRegOrMem(elem_ty, true, null);
         try self.load(operand, operand_ptr, ptr_ty);
@@ -4894,10 +4894,10 @@ fn isErr(
     error_union_ty: Type,
 ) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const error_type = error_union_ty.errorUnionSet(mod);
+    const zcu = pt.zcu;
+    const error_type = error_union_ty.errorUnionSet(zcu);
 
-    if (error_type.errorSetIsEmpty(mod)) {
+    if (error_type.errorSetIsEmpty(zcu)) {
         return MCValue{ .immediate = 0 }; // always false
     }
 
@@ -4937,12 +4937,12 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const operand_ptr = try self.resolveInst(un_op);
         const ptr_ty = self.typeOf(un_op);
-        const elem_ty = ptr_ty.childType(mod);
+        const elem_ty = ptr_ty.childType(zcu);
 
         const operand = try self.allocRegOrMem(elem_ty, true, null);
         try self.load(operand, operand_ptr, ptr_ty);
@@ -4965,12 +4965,12 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const operand_ptr = try self.resolveInst(un_op);
         const ptr_ty = self.typeOf(un_op);
-        const elem_ty = ptr_ty.childType(mod);
+        const elem_ty = ptr_ty.childType(zcu);
 
         const operand = try self.allocRegOrMem(elem_ty, true, null);
         try self.load(operand, operand_ptr, ptr_ty);
@@ -5184,10 +5184,10 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void {
 }
 
 fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
-    const pt = self.pt;
+    const zcu = self.pt.zcu;
     const block_data = self.blocks.getPtr(block).?;
 
-    if (self.typeOf(operand).hasRuntimeBits(pt)) {
+    if (self.typeOf(operand).hasRuntimeBits(zcu)) {
         const operand_mcv = try self.resolveInst(operand);
         const block_mcv = block_data.mcv;
         if (block_mcv == .none) {
@@ -5356,8 +5356,8 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void {
 
 fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const abi_size: u32 = @intCast(ty.abiSize(pt));
+    const zcu = pt.zcu;
+    const abi_size: u32 = @intCast(ty.abiSize(zcu));
     switch (mcv) {
         .dead => unreachable,
         .unreach, .none => return, // Nothing to do.
@@ -5434,11 +5434,11 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
             const reg_lock = self.register_manager.lockReg(reg);
             defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
 
-            const wrapped_ty = ty.structFieldType(0, mod);
+            const wrapped_ty = ty.structFieldType(0, zcu);
             try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg });
 
-            const overflow_bit_ty = ty.structFieldType(1, mod);
-            const overflow_bit_offset: u32 = @intCast(ty.structFieldOffset(1, pt));
+            const overflow_bit_ty = ty.structFieldType(1, zcu);
+            const overflow_bit_offset: u32 = @intCast(ty.structFieldOffset(1, zcu));
             const cond_reg = try self.register_manager.allocReg(null, gp);
 
             // C flag: movcs reg, #1
@@ -5519,7 +5519,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
 
 fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     switch (mcv) {
         .dead => unreachable,
         .unreach, .none => return, // Nothing to do.
@@ -5694,17 +5694,17 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
         },
         .stack_offset => |off| {
             // TODO: maybe addressing from sp instead of fp
-            const abi_size: u32 = @intCast(ty.abiSize(pt));
+            const abi_size: u32 = @intCast(ty.abiSize(zcu));
 
             const tag: Mir.Inst.Tag = switch (abi_size) {
-                1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb,
-                2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh else .ldrh,
+                1 => if (ty.isSignedInt(zcu)) Mir.Inst.Tag.ldrsb else .ldrb,
+                2 => if (ty.isSignedInt(zcu)) Mir.Inst.Tag.ldrsh else .ldrh,
                 3, 4 => .ldr,
                 else => unreachable,
             };
 
             const extra_offset = switch (abi_size) {
-                1 => ty.isSignedInt(mod),
+                1 => ty.isSignedInt(zcu),
                 2 => true,
                 3, 4 => false,
                 else => unreachable,
@@ -5745,11 +5745,11 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
             }
         },
         .stack_argument_offset => |off| {
-            const abi_size = ty.abiSize(pt);
+            const abi_size = ty.abiSize(zcu);
 
             const tag: Mir.Inst.Tag = switch (abi_size) {
-                1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument,
-                2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument,
+                1 => if (ty.isSignedInt(zcu)) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument,
+                2 => if (ty.isSignedInt(zcu)) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument,
                 3, 4 => .ldr_stack_argument,
                 else => unreachable,
             };
@@ -5767,7 +5767,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
 
 fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
     const pt = self.pt;
-    const abi_size: u32 = @intCast(ty.abiSize(pt));
+    const abi_size: u32 = @intCast(ty.abiSize(pt.zcu));
     switch (mcv) {
         .dead => unreachable,
         .none, .unreach => return,
@@ -5923,13 +5923,13 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const ptr_ty = self.typeOf(ty_op.operand);
         const ptr = try self.resolveInst(ty_op.operand);
-        const array_ty = ptr_ty.childType(mod);
-        const array_len: u32 = @intCast(array_ty.arrayLen(mod));
+        const array_ty = ptr_ty.childType(zcu);
+        const array_len: u32 = @intCast(array_ty.arrayLen(zcu));
 
         const stack_offset = try self.allocMem(8, .@"8", inst);
         try self.genSetStack(ptr_ty, stack_offset, ptr);
@@ -6043,9 +6043,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const vector_ty = self.typeOfIndex(inst);
-    const len = vector_ty.vectorLen(mod);
+    const len = vector_ty.vectorLen(zcu);
     const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]);
     const result: MCValue = res: {
@@ -6095,8 +6095,8 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void {
     const result: MCValue = result: {
         const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand };
         const error_union_ty = self.typeOf(pl_op.operand);
-        const error_union_size: u32 = @intCast(error_union_ty.abiSize(pt));
-        const error_union_align = error_union_ty.abiAlignment(pt);
+        const error_union_size: u32 = @intCast(error_union_ty.abiSize(pt.zcu));
+        const error_union_align = error_union_ty.abiAlignment(pt.zcu);
 
         // The error union will die in the body. However, we need the
         // error union after the body in order to extract the payload
@@ -6126,11 +6126,11 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void {
 
 fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
     // If the type has no codegen bits, no need to store it.
     const inst_ty = self.typeOf(inst);
-    if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt) and !inst_ty.isError(mod))
+    if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu) and !inst_ty.isError(zcu))
         return MCValue{ .none = {} };
 
     const inst_index = inst.toIndex() orelse return self.genTypedValue((try self.air.value(inst, pt)).?);
@@ -6189,9 +6189,9 @@ const CallMCValues = struct {
 /// Caller must call `CallMCValues.deinit`.
 fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    const fn_info = mod.typeToFunc(fn_ty).?;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    const fn_info = zcu.typeToFunc(fn_ty).?;
     const cc = fn_info.cc;
     var result: CallMCValues = .{
         .args = try self.gpa.alloc(MCValue, fn_info.param_types.len),
@@ -6202,7 +6202,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
     };
     errdefer self.gpa.free(result.args);
 
-    const ret_ty = fn_ty.fnReturnType(mod);
+    const ret_ty = fn_ty.fnReturnType(zcu);
 
     switch (cc) {
         .Naked => {
@@ -6217,12 +6217,12 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
             var ncrn: usize = 0; // Next Core Register Number
             var nsaa: u32 = 0; // Next stacked argument address
 
-            if (ret_ty.zigTypeTag(mod) == .NoReturn) {
+            if (ret_ty.zigTypeTag(zcu) == .NoReturn) {
                 result.return_value = .{ .unreach = {} };
-            } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+            } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                 result.return_value = .{ .none = {} };
             } else {
-                const ret_ty_size: u32 = @intCast(ret_ty.abiSize(pt));
+                const ret_ty_size: u32 = @intCast(ret_ty.abiSize(zcu));
                 // TODO handle cases where multiple registers are used
                 if (ret_ty_size <= 4) {
                     result.return_value = .{ .register = c_abi_int_return_regs[0] };
@@ -6237,10 +6237,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
             }
 
             for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
-                if (Type.fromInterned(ty).abiAlignment(pt) == .@"8")
+                if (Type.fromInterned(ty).abiAlignment(zcu) == .@"8")
                     ncrn = std.mem.alignForward(usize, ncrn, 2);
 
-                const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(pt));
+                const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(zcu));
                 if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) {
                     if (param_size <= 4) {
                         result_arg.* = .{ .register = c_abi_int_param_regs[ncrn] };
@@ -6252,7 +6252,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
                     return self.fail("TODO MCValues split between registers and stack", .{});
                 } else {
                     ncrn = 4;
-                    if (Type.fromInterned(ty).abiAlignment(pt) == .@"8")
+                    if (Type.fromInterned(ty).abiAlignment(zcu) == .@"8")
                         nsaa = std.mem.alignForward(u32, nsaa, 8);
 
                     result_arg.* = .{ .stack_argument_offset = nsaa };
@@ -6264,14 +6264,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
             result.stack_align = 8;
         },
         .Unspecified => {
-            if (ret_ty.zigTypeTag(mod) == .NoReturn) {
+            if (ret_ty.zigTypeTag(zcu) == .NoReturn) {
                 result.return_value = .{ .unreach = {} };
-            } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt) and !ret_ty.isError(mod)) {
+            } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu) and !ret_ty.isError(zcu)) {
                 result.return_value = .{ .none = {} };
             } else {
-                const ret_ty_size: u32 = @intCast(ret_ty.abiSize(pt));
+                const ret_ty_size: u32 = @intCast(ret_ty.abiSize(zcu));
                 if (ret_ty_size == 0) {
-                    assert(ret_ty.isError(mod));
+                    assert(ret_ty.isError(zcu));
                     result.return_value = .{ .immediate = 0 };
                 } else if (ret_ty_size <= 4) {
                     result.return_value = .{ .register = .r0 };
@@ -6287,9 +6287,9 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
             var stack_offset: u32 = 0;
 
             for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
-                if (Type.fromInterned(ty).abiSize(pt) > 0) {
-                    const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(pt));
-                    const param_alignment = Type.fromInterned(ty).abiAlignment(pt);
+                if (Type.fromInterned(ty).abiSize(zcu) > 0) {
+                    const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(zcu));
+                    const param_alignment = Type.fromInterned(ty).abiAlignment(zcu);
 
                     stack_offset = @intCast(param_alignment.forward(stack_offset));
                     result_arg.* = .{ .stack_argument_offset = stack_offset };
src/arch/riscv64/abi.zig
@@ -9,15 +9,15 @@ const assert = std.debug.assert;
 
 pub const Class = enum { memory, byval, integer, double_integer, fields };
 
-pub fn classifyType(ty: Type, pt: Zcu.PerThread) Class {
-    const target = pt.zcu.getTarget();
-    std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(pt));
+pub fn classifyType(ty: Type, zcu: *Zcu) Class {
+    const target = zcu.getTarget();
+    std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
 
     const max_byval_size = target.ptrBitWidth() * 2;
-    switch (ty.zigTypeTag(pt.zcu)) {
+    switch (ty.zigTypeTag(zcu)) {
         .Struct => {
-            const bit_size = ty.bitSize(pt);
-            if (ty.containerLayout(pt.zcu) == .@"packed") {
+            const bit_size = ty.bitSize(zcu);
+            if (ty.containerLayout(zcu) == .@"packed") {
                 if (bit_size > max_byval_size) return .memory;
                 return .byval;
             }
@@ -25,12 +25,12 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread) Class {
             if (std.Target.riscv.featureSetHas(target.cpu.features, .d)) fields: {
                 var any_fp = false;
                 var field_count: usize = 0;
-                for (0..ty.structFieldCount(pt.zcu)) |field_index| {
-                    const field_ty = ty.structFieldType(field_index, pt.zcu);
-                    if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+                for (0..ty.structFieldCount(zcu)) |field_index| {
+                    const field_ty = ty.structFieldType(field_index, zcu);
+                    if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
                     if (field_ty.isRuntimeFloat())
                         any_fp = true
-                    else if (!field_ty.isAbiInt(pt.zcu))
+                    else if (!field_ty.isAbiInt(zcu))
                         break :fields;
                     field_count += 1;
                     if (field_count > 2) break :fields;
@@ -45,8 +45,8 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread) Class {
             return .integer;
         },
         .Union => {
-            const bit_size = ty.bitSize(pt);
-            if (ty.containerLayout(pt.zcu) == .@"packed") {
+            const bit_size = ty.bitSize(zcu);
+            if (ty.containerLayout(zcu) == .@"packed") {
                 if (bit_size > max_byval_size) return .memory;
                 return .byval;
             }
@@ -58,21 +58,21 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread) Class {
         .Bool => return .integer,
         .Float => return .byval,
         .Int, .Enum, .ErrorSet => {
-            const bit_size = ty.bitSize(pt);
+            const bit_size = ty.bitSize(zcu);
             if (bit_size > max_byval_size) return .memory;
             return .byval;
         },
         .Vector => {
-            const bit_size = ty.bitSize(pt);
+            const bit_size = ty.bitSize(zcu);
             if (bit_size > max_byval_size) return .memory;
             return .integer;
         },
         .Optional => {
-            std.debug.assert(ty.isPtrLikeOptional(pt.zcu));
+            std.debug.assert(ty.isPtrLikeOptional(zcu));
             return .byval;
         },
         .Pointer => {
-            std.debug.assert(!ty.isSlice(pt.zcu));
+            std.debug.assert(!ty.isSlice(zcu));
             return .byval;
         },
         .ErrorUnion,
@@ -97,19 +97,18 @@ pub const SystemClass = enum { integer, float, memory, none };
 
 /// There are a maximum of 8 possible return slots. Returned values are in
 /// the beginning of the array; unused slots are filled with .none.
-pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
-    const zcu = pt.zcu;
+pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass {
     var result = [1]SystemClass{.none} ** 8;
     const memory_class = [_]SystemClass{
         .memory, .none, .none, .none,
         .none,   .none, .none, .none,
     };
-    switch (ty.zigTypeTag(pt.zcu)) {
+    switch (ty.zigTypeTag(zcu)) {
         .Bool, .Void, .NoReturn => {
             result[0] = .integer;
             return result;
         },
-        .Pointer => switch (ty.ptrSize(pt.zcu)) {
+        .Pointer => switch (ty.ptrSize(zcu)) {
             .Slice => {
                 result[0] = .integer;
                 result[1] = .integer;
@@ -121,14 +120,14 @@ pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
             },
         },
         .Optional => {
-            if (ty.isPtrLikeOptional(pt.zcu)) {
+            if (ty.isPtrLikeOptional(zcu)) {
                 result[0] = .integer;
                 return result;
             }
             return memory_class;
         },
         .Int, .Enum, .ErrorSet => {
-            const int_bits = ty.intInfo(pt.zcu).bits;
+            const int_bits = ty.intInfo(zcu).bits;
             if (int_bits <= 64) {
                 result[0] = .integer;
                 return result;
@@ -153,8 +152,8 @@ pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
             unreachable; // support split float args
         },
         .ErrorUnion => {
-            const payload_ty = ty.errorUnionPayload(pt.zcu);
-            const payload_bits = payload_ty.bitSize(pt);
+            const payload_ty = ty.errorUnionPayload(zcu);
+            const payload_bits = payload_ty.bitSize(zcu);
 
             // the error union itself
             result[0] = .integer;
@@ -165,8 +164,8 @@ pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
             return memory_class;
         },
         .Struct, .Union => {
-            const layout = ty.containerLayout(pt.zcu);
-            const ty_size = ty.abiSize(pt);
+            const layout = ty.containerLayout(zcu);
+            const ty_size = ty.abiSize(zcu);
 
             if (layout == .@"packed") {
                 assert(ty_size <= 16);
@@ -178,7 +177,7 @@ pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
             return memory_class;
         },
         .Array => {
-            const ty_size = ty.abiSize(pt);
+            const ty_size = ty.abiSize(zcu);
             if (ty_size <= 8) {
                 result[0] = .integer;
                 return result;
@@ -192,7 +191,7 @@ pub fn classifySystem(ty: Type, pt: Zcu.PerThread) [8]SystemClass {
         },
         .Vector => {
             // we pass vectors through integer registers if they are small enough to fit.
-            const vec_bits = ty.totalVectorBits(pt);
+            const vec_bits = ty.totalVectorBits(zcu);
             if (vec_bits <= 64) {
                 result[0] = .integer;
                 return result;
src/arch/riscv64/CodeGen.zig
@@ -591,14 +591,14 @@ const FrameAlloc = struct {
             .ref_count = 0,
         };
     }
-    fn initType(ty: Type, pt: Zcu.PerThread) FrameAlloc {
+    fn initType(ty: Type, zcu: *Zcu) FrameAlloc {
         return init(.{
-            .size = ty.abiSize(pt),
-            .alignment = ty.abiAlignment(pt),
+            .size = ty.abiSize(zcu),
+            .alignment = ty.abiAlignment(zcu),
         });
     }
-    fn initSpill(ty: Type, pt: Zcu.PerThread) FrameAlloc {
-        const abi_size = ty.abiSize(pt);
+    fn initSpill(ty: Type, zcu: *Zcu) FrameAlloc {
+        const abi_size = ty.abiSize(zcu);
         const spill_size = if (abi_size < 8)
             math.ceilPowerOfTwoAssert(u64, abi_size)
         else
@@ -606,7 +606,7 @@ const FrameAlloc = struct {
         return init(.{
             .size = spill_size,
             .pad = @intCast(spill_size - abi_size),
-            .alignment = ty.abiAlignment(pt).maxStrict(
+            .alignment = ty.abiAlignment(zcu).maxStrict(
                 Alignment.fromNonzeroByteUnits(@min(spill_size, 8)),
             ),
         });
@@ -835,11 +835,11 @@ pub fn generate(
     function.args = call_info.args;
     function.ret_mcv = call_info.return_value;
     function.frame_allocs.set(@intFromEnum(FrameIndex.ret_addr), FrameAlloc.init(.{
-        .size = Type.u64.abiSize(pt),
-        .alignment = Type.u64.abiAlignment(pt).min(call_info.stack_align),
+        .size = Type.u64.abiSize(zcu),
+        .alignment = Type.u64.abiAlignment(zcu).min(call_info.stack_align),
     }));
     function.frame_allocs.set(@intFromEnum(FrameIndex.base_ptr), FrameAlloc.init(.{
-        .size = Type.u64.abiSize(pt),
+        .size = Type.u64.abiSize(zcu),
         .alignment = Alignment.min(
             call_info.stack_align,
             Alignment.fromNonzeroByteUnits(function.target.stackAlignment()),
@@ -851,7 +851,7 @@ pub fn generate(
     }));
     function.frame_allocs.set(@intFromEnum(FrameIndex.spill_frame), FrameAlloc.init(.{
         .size = 0,
-        .alignment = Type.u64.abiAlignment(pt),
+        .alignment = Type.u64.abiAlignment(zcu),
     }));
 
     function.gen() catch |err| switch (err) {
@@ -1245,7 +1245,7 @@ fn gen(func: *Func) !void {
                 // The address where to store the return value for the caller is in a
                 // register which the callee is free to clobber. Therefore, we purposely
                 // spill it to stack immediately.
-                const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(Type.u64, pt));
+                const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(Type.u64, zcu));
                 try func.genSetMem(
                     .{ .frame = frame_index },
                     0,
@@ -1379,9 +1379,9 @@ fn gen(func: *Func) !void {
 
 fn genLazy(func: *Func, lazy_sym: link.File.LazySymbol) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    switch (Type.fromInterned(lazy_sym.ty).zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    switch (Type.fromInterned(lazy_sym.ty).zigTypeTag(zcu)) {
         .Enum => {
             const enum_ty = Type.fromInterned(lazy_sym.ty);
             wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(pt)});
@@ -1390,7 +1390,7 @@ fn genLazy(func: *Func, lazy_sym: link.File.LazySymbol) InnerError!void {
             const ret_reg = param_regs[0];
             const enum_mcv: MCValue = .{ .register = param_regs[1] };
 
-            const exitlude_jump_relocs = try func.gpa.alloc(Mir.Inst.Index, enum_ty.enumFieldCount(mod));
+            const exitlude_jump_relocs = try func.gpa.alloc(Mir.Inst.Index, enum_ty.enumFieldCount(zcu));
             defer func.gpa.free(exitlude_jump_relocs);
 
             const data_reg, const data_lock = try func.allocReg(.int);
@@ -1410,7 +1410,7 @@ fn genLazy(func: *Func, lazy_sym: link.File.LazySymbol) InnerError!void {
             defer func.register_manager.unlockReg(cmp_lock);
 
             var data_off: i32 = 0;
-            const tag_names = enum_ty.enumFields(mod);
+            const tag_names = enum_ty.enumFields(zcu);
             for (exitlude_jump_relocs, 0..) |*exitlude_jump_reloc, tag_index| {
                 const tag_name_len = tag_names.get(ip)[tag_index].length(ip);
                 const tag_val = try pt.enumValueFieldIndex(enum_ty, @intCast(tag_index));
@@ -1944,32 +1944,32 @@ fn memSize(func: *Func, ty: Type) Memory.Size {
     const zcu = pt.zcu;
     return switch (ty.zigTypeTag(zcu)) {
         .Float => Memory.Size.fromBitSize(ty.floatBits(func.target.*)),
-        else => Memory.Size.fromByteSize(ty.abiSize(pt)),
+        else => Memory.Size.fromByteSize(ty.abiSize(zcu)),
     };
 }
 
 fn splitType(func: *Func, ty: Type) ![2]Type {
-    const pt = func.pt;
-    const classes = mem.sliceTo(&abi.classifySystem(ty, pt), .none);
+    const zcu = func.pt.zcu;
+    const classes = mem.sliceTo(&abi.classifySystem(ty, zcu), .none);
     var parts: [2]Type = undefined;
     if (classes.len == 2) for (&parts, classes, 0..) |*part, class, part_i| {
         part.* = switch (class) {
             .integer => switch (part_i) {
                 0 => Type.u64,
                 1 => part: {
-                    const elem_size = ty.abiAlignment(pt).minStrict(.@"8").toByteUnits().?;
-                    const elem_ty = try pt.intType(.unsigned, @intCast(elem_size * 8));
-                    break :part switch (@divExact(ty.abiSize(pt) - 8, elem_size)) {
+                    const elem_size = ty.abiAlignment(zcu).minStrict(.@"8").toByteUnits().?;
+                    const elem_ty = try func.pt.intType(.unsigned, @intCast(elem_size * 8));
+                    break :part switch (@divExact(ty.abiSize(zcu) - 8, elem_size)) {
                         1 => elem_ty,
-                        else => |len| try pt.arrayType(.{ .len = len, .child = elem_ty.toIntern() }),
+                        else => |len| try func.pt.arrayType(.{ .len = len, .child = elem_ty.toIntern() }),
                     };
                 },
                 else => unreachable,
             },
             else => return func.fail("TODO: splitType class {}", .{class}),
         };
-    } else if (parts[0].abiSize(pt) + parts[1].abiSize(pt) == ty.abiSize(pt)) return parts;
-    return func.fail("TODO implement splitType for {}", .{ty.fmt(pt)});
+    } else if (parts[0].abiSize(zcu) + parts[1].abiSize(zcu) == ty.abiSize(zcu)) return parts;
+    return func.fail("TODO implement splitType for {}", .{ty.fmt(func.pt)});
 }
 
 /// Truncates the value in the register in place.
@@ -1979,7 +1979,7 @@ fn truncateRegister(func: *Func, ty: Type, reg: Register) !void {
     const zcu = pt.zcu;
     const int_info = if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else std.builtin.Type.Int{
         .signedness = .unsigned,
-        .bits = @intCast(ty.bitSize(pt)),
+        .bits = @intCast(ty.bitSize(zcu)),
     };
     assert(reg.class() == .int);
 
@@ -2081,10 +2081,10 @@ fn allocMemPtr(func: *Func, inst: Air.Inst.Index) !FrameIndex {
     const ptr_ty = func.typeOfIndex(inst);
     const val_ty = ptr_ty.childType(zcu);
     return func.allocFrameIndex(FrameAlloc.init(.{
-        .size = math.cast(u32, val_ty.abiSize(pt)) orelse {
+        .size = math.cast(u32, val_ty.abiSize(zcu)) orelse {
             return func.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(pt)});
         },
-        .alignment = ptr_ty.ptrAlignment(pt).max(.@"1"),
+        .alignment = ptr_ty.ptrAlignment(zcu).max(.@"1"),
     }));
 }
 
@@ -2118,7 +2118,7 @@ fn allocRegOrMem(func: *Func, elem_ty: Type, inst: ?Air.Inst.Index, reg_ok: bool
     const pt = func.pt;
     const zcu = pt.zcu;
 
-    const bit_size = elem_ty.bitSize(pt);
+    const bit_size = elem_ty.bitSize(zcu);
     const min_size: u64 = switch (elem_ty.zigTypeTag(zcu)) {
         .Float => if (func.hasFeature(.d)) 64 else 32,
         .Vector => 256, // TODO: calculate it from avl * vsew
@@ -2133,7 +2133,7 @@ fn allocRegOrMem(func: *Func, elem_ty: Type, inst: ?Air.Inst.Index, reg_ok: bool
         return func.fail("did you forget to extend vector registers before allocating", .{});
     }
 
-    const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(elem_ty, pt));
+    const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(elem_ty, zcu));
     return .{ .load_frame = .{ .index = frame_index } };
 }
 
@@ -2368,7 +2368,7 @@ fn airNot(func: *Func, inst: Air.Inst.Index) !void {
                 });
             },
             .Int => {
-                const size = ty.bitSize(pt);
+                const size = ty.bitSize(zcu);
                 if (!math.isPowerOfTwo(size))
                     return func.fail("TODO: airNot non-pow 2 int size", .{});
 
@@ -2399,11 +2399,12 @@ fn airNot(func: *Func, inst: Air.Inst.Index) !void {
 
 fn airSlice(func: *Func, inst: Air.Inst.Index) !void {
     const pt = func.pt;
+    const zcu = pt.zcu;
     const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
 
     const slice_ty = func.typeOfIndex(inst);
-    const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(slice_ty, pt));
+    const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(slice_ty, zcu));
 
     const ptr_ty = func.typeOf(bin_op.lhs);
     try func.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, .{ .air_ref = bin_op.lhs });
@@ -2411,7 +2412,7 @@ fn airSlice(func: *Func, inst: Air.Inst.Index) !void {
     const len_ty = func.typeOf(bin_op.rhs);
     try func.genSetMem(
         .{ .frame = frame_index },
-        @intCast(ptr_ty.abiSize(pt)),
+        @intCast(ptr_ty.abiSize(zcu)),
         len_ty,
         .{ .air_ref = bin_op.rhs },
     );
@@ -2428,8 +2429,8 @@ fn airBinOp(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
 
     const dst_ty = func.typeOfIndex(inst);
     if (dst_ty.isAbiInt(zcu)) {
-        const abi_size: u32 = @intCast(dst_ty.abiSize(pt));
-        const bit_size: u32 = @intCast(dst_ty.bitSize(pt));
+        const abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
+        const bit_size: u32 = @intCast(dst_ty.bitSize(zcu));
         if (abi_size * 8 > bit_size) {
             const dst_lock = switch (dst_mcv) {
                 .register => |dst_reg| func.register_manager.lockRegAssumeUnused(dst_reg),
@@ -2443,7 +2444,7 @@ fn airBinOp(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
                 const tmp_reg, const tmp_lock = try func.allocReg(.int);
                 defer func.register_manager.unlockReg(tmp_lock);
 
-                const hi_ty = try pt.intType(.unsigned, @intCast((dst_ty.bitSize(pt) - 1) % 64 + 1));
+                const hi_ty = try pt.intType(.unsigned, @intCast((dst_ty.bitSize(zcu) - 1) % 64 + 1));
                 const hi_mcv = dst_mcv.address().offset(@intCast(bit_size / 64 * 8)).deref();
                 try func.genSetReg(hi_ty, tmp_reg, hi_mcv);
                 try func.truncateRegister(dst_ty, tmp_reg);
@@ -2464,6 +2465,7 @@ fn binOp(
 ) !MCValue {
     _ = maybe_inst;
     const pt = func.pt;
+    const zcu = pt.zcu;
     const lhs_ty = func.typeOf(lhs_air);
     const rhs_ty = func.typeOf(rhs_air);
 
@@ -2480,9 +2482,9 @@ fn binOp(
     }
 
     // don't have support for certain sizes of addition
-    switch (lhs_ty.zigTypeTag(pt.zcu)) {
+    switch (lhs_ty.zigTypeTag(zcu)) {
         .Vector => {}, // works differently and fails in a different place
-        else => if (lhs_ty.bitSize(pt) > 64) return func.fail("TODO: binOp >= 64 bits", .{}),
+        else => if (lhs_ty.bitSize(zcu) > 64) return func.fail("TODO: binOp >= 64 bits", .{}),
     }
 
     const lhs_mcv = try func.resolveInst(lhs_air);
@@ -2533,7 +2535,7 @@ fn genBinOp(
 ) !void {
     const pt = func.pt;
     const zcu = pt.zcu;
-    const bit_size = lhs_ty.bitSize(pt);
+    const bit_size = lhs_ty.bitSize(zcu);
 
     const is_unsigned = lhs_ty.isUnsignedInt(zcu);
 
@@ -2646,7 +2648,7 @@ fn genBinOp(
                 },
                 .Vector => {
                     const num_elem = lhs_ty.vectorLen(zcu);
-                    const elem_size = lhs_ty.childType(zcu).bitSize(pt);
+                    const elem_size = lhs_ty.childType(zcu).bitSize(zcu);
 
                     const child_ty = lhs_ty.childType(zcu);
 
@@ -2753,7 +2755,7 @@ fn genBinOp(
             defer func.register_manager.unlockReg(tmp_lock);
 
             // RISC-V has no immediate mul, so we copy the size to a temporary register
-            const elem_size = lhs_ty.elemType2(zcu).abiSize(pt);
+            const elem_size = lhs_ty.elemType2(zcu).abiSize(zcu);
             const elem_size_reg = try func.copyToTmpRegister(Type.u64, .{ .immediate = elem_size });
 
             try func.genBinOp(
@@ -2990,7 +2992,7 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
 
                     try func.genSetMem(
                         .{ .frame = offset.index },
-                        offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, pt))),
+                        offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))),
                         lhs_ty,
                         add_result,
                     );
@@ -3016,7 +3018,7 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
 
                     try func.genSetMem(
                         .{ .frame = offset.index },
-                        offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, pt))),
+                        offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))),
                         Type.u1,
                         .{ .register = overflow_reg },
                     );
@@ -3053,7 +3055,7 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
 
                     try func.genSetMem(
                         .{ .frame = offset.index },
-                        offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, pt))),
+                        offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))),
                         lhs_ty,
                         add_result,
                     );
@@ -3079,7 +3081,7 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
 
                     try func.genSetMem(
                         .{ .frame = offset.index },
-                        offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, pt))),
+                        offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))),
                         Type.u1,
                         .{ .register = overflow_reg },
                     );
@@ -3126,7 +3128,7 @@ fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
 
         try func.genSetMem(
             .{ .frame = offset.index },
-            offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, pt))),
+            offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))),
             lhs_ty,
             .{ .register = dest_reg },
         );
@@ -3155,7 +3157,7 @@ fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
 
                 try func.genSetMem(
                     .{ .frame = offset.index },
-                    offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, pt))),
+                    offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))),
                     Type.u1,
                     .{ .register = overflow_reg },
                 );
@@ -3203,7 +3205,7 @@ fn airSubWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
 
                         try func.genSetMem(
                             .{ .frame = offset.index },
-                            offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, pt))),
+                            offset.off + @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))),
                             Type.u1,
                             .{ .register = overflow_reg },
                         );
@@ -3236,8 +3238,8 @@ fn airMulWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
         // genSetReg needs to support register_offset src_mcv for this to be true.
         const result_mcv = try func.allocRegOrMem(tuple_ty, inst, false);
 
-        const result_off: i32 = @intCast(tuple_ty.structFieldOffset(0, pt));
-        const overflow_off: i32 = @intCast(tuple_ty.structFieldOffset(1, pt));
+        const result_off: i32 = @intCast(tuple_ty.structFieldOffset(0, zcu));
+        const overflow_off: i32 = @intCast(tuple_ty.structFieldOffset(1, zcu));
 
         const dest_reg, const dest_lock = try func.allocReg(.int);
         defer func.register_manager.unlockReg(dest_lock);
@@ -3320,11 +3322,11 @@ fn airShlSat(func: *Func, inst: Air.Inst.Index) !void {
 }
 
 fn airOptionalPayload(func: *Func, inst: Air.Inst.Index) !void {
-    const pt = func.pt;
+    const zcu = func.pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const result: MCValue = result: {
         const pl_ty = func.typeOfIndex(inst);
-        if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
+        if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
 
         const opt_mcv = try func.resolveInst(ty_op.operand);
         if (func.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) {
@@ -3368,11 +3370,11 @@ fn airUnwrapErrErr(func: *Func, inst: Air.Inst.Index) !void {
             break :result .{ .immediate = 0 };
         }
 
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             break :result operand;
         }
 
-        const err_off: u32 = @intCast(errUnionErrorOffset(payload_ty, pt));
+        const err_off: u32 = @intCast(errUnionErrorOffset(payload_ty, zcu));
 
         switch (operand) {
             .register => |reg| {
@@ -3421,9 +3423,9 @@ fn genUnwrapErrUnionPayloadMir(
     const payload_ty = err_union_ty.errorUnionPayload(zcu);
 
     const result: MCValue = result: {
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
 
-        const payload_off: u31 = @intCast(errUnionPayloadOffset(payload_ty, pt));
+        const payload_off: u31 = @intCast(errUnionPayloadOffset(payload_ty, zcu));
         switch (err_union) {
             .load_frame => |frame_addr| break :result .{ .load_frame = .{
                 .index = frame_addr.index,
@@ -3497,7 +3499,7 @@ fn airWrapOptional(func: *Func, inst: Air.Inst.Index) !void {
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const result: MCValue = result: {
         const pl_ty = func.typeOf(ty_op.operand);
-        if (!pl_ty.hasRuntimeBits(pt)) break :result .{ .immediate = 1 };
+        if (!pl_ty.hasRuntimeBits(zcu)) break :result .{ .immediate = 1 };
 
         const opt_ty = func.typeOfIndex(inst);
         const pl_mcv = try func.resolveInst(ty_op.operand);
@@ -3514,7 +3516,7 @@ fn airWrapOptional(func: *Func, inst: Air.Inst.Index) !void {
         try func.genCopy(pl_ty, opt_mcv, pl_mcv);
 
         if (!same_repr) {
-            const pl_abi_size: i32 = @intCast(pl_ty.abiSize(pt));
+            const pl_abi_size: i32 = @intCast(pl_ty.abiSize(zcu));
             switch (opt_mcv) {
                 .load_frame => |frame_addr| {
                     try func.genCopy(pl_ty, opt_mcv, pl_mcv);
@@ -3545,11 +3547,11 @@ fn airWrapErrUnionPayload(func: *Func, inst: Air.Inst.Index) !void {
     const operand = try func.resolveInst(ty_op.operand);
 
     const result: MCValue = result: {
-        if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .{ .immediate = 0 };
+        if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .{ .immediate = 0 };
 
-        const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(eu_ty, pt));
-        const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, pt));
-        const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt));
+        const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(eu_ty, zcu));
+        const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, zcu));
+        const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
         try func.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand);
         try func.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 });
         break :result .{ .load_frame = .{ .index = frame_index } };
@@ -3569,11 +3571,11 @@ fn airWrapErrUnionErr(func: *Func, inst: Air.Inst.Index) !void {
     const err_ty = eu_ty.errorUnionSet(zcu);
 
     const result: MCValue = result: {
-        if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result try func.resolveInst(ty_op.operand);
+        if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result try func.resolveInst(ty_op.operand);
 
-        const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(eu_ty, pt));
-        const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, pt));
-        const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt));
+        const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(eu_ty, zcu));
+        const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, zcu));
+        const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
         try func.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .{ .undef = null });
         const operand = try func.resolveInst(ty_op.operand);
         try func.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand);
@@ -3717,7 +3719,7 @@ fn airSliceElemVal(func: *Func, inst: Air.Inst.Index) !void {
 
     const result: MCValue = result: {
         const elem_ty = func.typeOfIndex(inst);
-        if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
+        if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
 
         const slice_ty = func.typeOf(bin_op.lhs);
         const slice_ptr_field_type = slice_ty.slicePtrFieldType(zcu);
@@ -3748,7 +3750,7 @@ fn genSliceElemPtr(func: *Func, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
     defer if (slice_mcv_lock) |lock| func.register_manager.unlockReg(lock);
 
     const elem_ty = slice_ty.childType(zcu);
-    const elem_size = elem_ty.abiSize(pt);
+    const elem_size = elem_ty.abiSize(zcu);
 
     const index_ty = func.typeOf(rhs);
     const index_mcv = try func.resolveInst(rhs);
@@ -3792,14 +3794,14 @@ fn airArrayElemVal(func: *Func, inst: Air.Inst.Index) !void {
         const index_ty = func.typeOf(bin_op.rhs);
 
         const elem_ty = array_ty.childType(zcu);
-        const elem_abi_size = elem_ty.abiSize(pt);
+        const elem_abi_size = elem_ty.abiSize(zcu);
 
         const addr_reg, const addr_reg_lock = try func.allocReg(.int);
         defer func.register_manager.unlockReg(addr_reg_lock);
 
         switch (array_mcv) {
             .register => {
-                const frame_index = try func.allocFrameIndex(FrameAlloc.initType(array_ty, pt));
+                const frame_index = try func.allocFrameIndex(FrameAlloc.initType(array_ty, zcu));
                 try func.genSetMem(.{ .frame = frame_index }, 0, array_ty, array_mcv);
                 try func.genSetReg(Type.u64, addr_reg, .{ .lea_frame = .{ .index = frame_index } });
             },
@@ -3870,7 +3872,7 @@ fn airPtrElemVal(func: *Func, inst: Air.Inst.Index) !void {
 
     const result: MCValue = if (!is_volatile and func.liveness.isUnused(inst)) .unreach else result: {
         const elem_ty = base_ptr_ty.elemType2(zcu);
-        if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
+        if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
 
         const base_ptr_mcv = try func.resolveInst(bin_op.lhs);
         const base_ptr_lock: ?RegisterLock = switch (base_ptr_mcv) {
@@ -3970,11 +3972,12 @@ fn airSetUnionTag(func: *Func, inst: Air.Inst.Index) !void {
 
 fn airGetUnionTag(func: *Func, inst: Air.Inst.Index) !void {
     const pt = func.pt;
+    const zcu = pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const tag_ty = func.typeOfIndex(inst);
     const union_ty = func.typeOf(ty_op.operand);
-    const layout = union_ty.unionGetLayout(pt);
+    const layout = union_ty.unionGetLayout(zcu);
 
     if (layout.tag_size == 0) {
         return func.finishAir(inst, .none, .{ ty_op.operand, .none, .none });
@@ -3985,7 +3988,7 @@ fn airGetUnionTag(func: *Func, inst: Air.Inst.Index) !void {
     const frame_mcv = try func.allocRegOrMem(union_ty, null, false);
     try func.genCopy(union_ty, frame_mcv, operand);
 
-    const tag_abi_size = tag_ty.abiSize(pt);
+    const tag_abi_size = tag_ty.abiSize(zcu);
     const result_reg, const result_lock = try func.allocReg(.int);
     defer func.register_manager.unlockReg(result_lock);
 
@@ -4034,7 +4037,7 @@ fn airClz(func: *Func, inst: Air.Inst.Index) !void {
         else
             (try func.allocRegOrMem(func.typeOfIndex(inst), inst, true)).register;
 
-        const bit_size = ty.bitSize(func.pt);
+        const bit_size = ty.bitSize(func.pt.zcu);
         if (!math.isPowerOfTwo(bit_size)) try func.truncateRegister(ty, src_reg);
 
         if (bit_size > 64) {
@@ -4081,6 +4084,7 @@ fn airPopcount(func: *Func, inst: Air.Inst.Index) !void {
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
         const pt = func.pt;
+        const zcu = pt.zcu;
 
         const operand = try func.resolveInst(ty_op.operand);
         const src_ty = func.typeOf(ty_op.operand);
@@ -4090,7 +4094,7 @@ fn airPopcount(func: *Func, inst: Air.Inst.Index) !void {
         const dst_reg, const dst_lock = try func.allocReg(.int);
         defer func.register_manager.unlockReg(dst_lock);
 
-        const bit_size = src_ty.bitSize(pt);
+        const bit_size = src_ty.bitSize(zcu);
         switch (bit_size) {
             32, 64 => {},
             1...31, 33...63 => try func.truncateRegister(src_ty, operand_reg),
@@ -4283,12 +4287,13 @@ fn airBitReverse(func: *Func, inst: Air.Inst.Index) !void {
 
 fn airUnaryMath(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
     const pt = func.pt;
+    const zcu = pt.zcu;
     const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
     const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
         const ty = func.typeOf(un_op);
 
         const operand = try func.resolveInst(un_op);
-        const operand_bit_size = ty.bitSize(pt);
+        const operand_bit_size = ty.bitSize(zcu);
 
         if (!math.isPowerOfTwo(operand_bit_size))
             return func.fail("TODO: airUnaryMath non-pow 2", .{});
@@ -4300,7 +4305,7 @@ fn airUnaryMath(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
         const dst_reg, const dst_lock = try func.allocReg(dst_class);
         defer func.register_manager.unlockReg(dst_lock);
 
-        switch (ty.zigTypeTag(pt.zcu)) {
+        switch (ty.zigTypeTag(zcu)) {
             .Float => {
                 assert(dst_class == .float);
 
@@ -4397,7 +4402,7 @@ fn airLoad(func: *Func, inst: Air.Inst.Index) !void {
     const elem_ty = func.typeOfIndex(inst);
 
     const result: MCValue = result: {
-        if (!elem_ty.hasRuntimeBits(pt))
+        if (!elem_ty.hasRuntimeBits(zcu))
             break :result .none;
 
         const ptr = try func.resolveInst(ty_op.operand);
@@ -4405,7 +4410,7 @@ fn airLoad(func: *Func, inst: Air.Inst.Index) !void {
         if (func.liveness.isUnused(inst) and !is_volatile)
             break :result .unreach;
 
-        const elem_size = elem_ty.abiSize(pt);
+        const elem_size = elem_ty.abiSize(zcu);
 
         const dst_mcv: MCValue = blk: {
             // The MCValue that holds the pointer can be re-used as the value.
@@ -4544,7 +4549,7 @@ fn structFieldPtr(func: *Func, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
     const container_ty = ptr_container_ty.childType(zcu);
 
     const field_offset: i32 = switch (container_ty.containerLayout(zcu)) {
-        .auto, .@"extern" => @intCast(container_ty.structFieldOffset(index, pt)),
+        .auto, .@"extern" => @intCast(container_ty.structFieldOffset(index, zcu)),
         .@"packed" => @divExact(@as(i32, ptr_container_ty.ptrInfo(zcu).packed_offset.bit_offset) +
             (if (zcu.typeToStruct(container_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, index) else 0) -
             ptr_field_ty.ptrInfo(zcu).packed_offset.bit_offset, 8),
@@ -4572,10 +4577,10 @@ fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void {
         const src_mcv = try func.resolveInst(operand);
         const struct_ty = func.typeOf(operand);
         const field_ty = struct_ty.structFieldType(index, zcu);
-        if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
+        if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
 
         const field_off: u32 = switch (struct_ty.containerLayout(zcu)) {
-            .auto, .@"extern" => @intCast(struct_ty.structFieldOffset(index, pt) * 8),
+            .auto, .@"extern" => @intCast(struct_ty.structFieldOffset(index, zcu) * 8),
             .@"packed" => if (zcu.typeToStruct(struct_ty)) |struct_type|
                 pt.structPackedFieldBitOffset(struct_type, index)
             else
@@ -4615,11 +4620,11 @@ fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void {
                 break :result if (field_off == 0) dst_mcv else try func.copyToNewRegister(inst, dst_mcv);
             },
             .load_frame => {
-                const field_abi_size: u32 = @intCast(field_ty.abiSize(pt));
+                const field_abi_size: u32 = @intCast(field_ty.abiSize(zcu));
                 if (field_off % 8 == 0) {
                     const field_byte_off = @divExact(field_off, 8);
                     const off_mcv = src_mcv.address().offset(@intCast(field_byte_off)).deref();
-                    const field_bit_size = field_ty.bitSize(pt);
+                    const field_bit_size = field_ty.bitSize(zcu);
 
                     if (field_abi_size <= 8) {
                         const int_ty = try pt.intType(
@@ -4635,7 +4640,7 @@ fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void {
                         break :result try func.copyToNewRegister(inst, dst_mcv);
                     }
 
-                    const container_abi_size: u32 = @intCast(struct_ty.abiSize(pt));
+                    const container_abi_size: u32 = @intCast(struct_ty.abiSize(zcu));
                     const dst_mcv = if (field_byte_off + field_abi_size <= container_abi_size and
                         func.reuseOperand(inst, operand, 0, src_mcv))
                         off_mcv
@@ -4880,7 +4885,7 @@ fn genCall(
                 try reg_locks.appendSlice(&func.register_manager.lockRegs(2, regs));
             },
             .indirect => |reg_off| {
-                frame_index.* = try func.allocFrameIndex(FrameAlloc.initType(arg_ty, pt));
+                frame_index.* = try func.allocFrameIndex(FrameAlloc.initType(arg_ty, zcu));
                 try func.genSetMem(.{ .frame = frame_index.* }, 0, arg_ty, src_arg);
                 try func.register_manager.getReg(reg_off.reg, null);
                 try reg_locks.append(func.register_manager.lockReg(reg_off.reg));
@@ -4893,7 +4898,7 @@ fn genCall(
         .none, .unreach => {},
         .indirect => |reg_off| {
             const ret_ty = Type.fromInterned(fn_info.return_type);
-            const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(ret_ty, pt));
+            const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(ret_ty, zcu));
             try func.genSetReg(Type.u64, reg_off.reg, .{
                 .lea_frame = .{ .index = frame_index, .off = -reg_off.off },
             });
@@ -5013,7 +5018,7 @@ fn airRet(func: *Func, inst: Air.Inst.Index, safety: bool) !void {
         .register_pair,
         => {
             if (ret_ty.isVector(zcu)) {
-                const bit_size = ret_ty.totalVectorBits(pt);
+                const bit_size = ret_ty.totalVectorBits(zcu);
 
                 // set the vtype to hold the entire vector's contents in a single element
                 try func.setVl(.zero, 0, .{
@@ -5113,7 +5118,7 @@ fn airCmp(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
                     .ErrorSet => Type.anyerror,
                     .Optional => blk: {
                         const payload_ty = lhs_ty.optionalChild(zcu);
-                        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+                        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                             break :blk Type.u1;
                         } else if (lhs_ty.isPtrLikeOptional(zcu)) {
                             break :blk Type.u64;
@@ -5289,7 +5294,7 @@ fn isNull(func: *Func, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
     const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(zcu))
         .{ .off = 0, .ty = if (pl_ty.isSlice(zcu)) pl_ty.slicePtrFieldType(zcu) else pl_ty }
     else
-        .{ .off = @intCast(pl_ty.abiSize(pt)), .ty = Type.bool };
+        .{ .off = @intCast(pl_ty.abiSize(zcu)), .ty = Type.bool };
 
     const return_mcv = try func.allocRegOrMem(func.typeOfIndex(inst), inst, true);
     assert(return_mcv == .register); // should not be larger 8 bytes
@@ -5472,11 +5477,10 @@ fn airIsErrPtr(func: *Func, inst: Air.Inst.Index) !void {
 /// Result is in the return register.
 fn isErr(func: *Func, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MCValue {
     _ = maybe_inst;
-    const pt = func.pt;
-    const zcu = pt.zcu;
+    const zcu = func.pt.zcu;
     const err_ty = eu_ty.errorUnionSet(zcu);
     if (err_ty.errorSetIsEmpty(zcu)) return MCValue{ .immediate = 0 }; // always false
-    const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(zcu), pt));
+    const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(zcu), zcu));
 
     const return_reg, const return_lock = try func.allocReg(.int);
     defer func.register_manager.unlockReg(return_lock);
@@ -5769,12 +5773,12 @@ fn performReloc(func: *Func, inst: Mir.Inst.Index) void {
 }
 
 fn airBr(func: *Func, inst: Air.Inst.Index) !void {
-    const pt = func.pt;
+    const zcu = func.pt.zcu;
     const br = func.air.instructions.items(.data)[@intFromEnum(inst)].br;
 
     const block_ty = func.typeOfIndex(br.block_inst);
     const block_unused =
-        !block_ty.hasRuntimeBitsIgnoreComptime(pt) or func.liveness.isUnused(br.block_inst);
+        !block_ty.hasRuntimeBitsIgnoreComptime(zcu) or func.liveness.isUnused(br.block_inst);
     const block_tracking = func.inst_tracking.getPtr(br.block_inst).?;
     const block_data = func.blocks.getPtr(br.block_inst).?;
     const first_br = block_data.relocs.items.len == 0;
@@ -6354,6 +6358,8 @@ fn genCopy(func: *Func, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void {
         return std.debug.panic("tried to genCopy immutable: {s}", .{@tagName(dst_mcv)});
     }
 
+    const zcu = func.pt.zcu;
+
     switch (dst_mcv) {
         .register => |reg| return func.genSetReg(ty, reg, src_mcv),
         .register_offset => |dst_reg_off| try func.genSetReg(ty, dst_reg_off.reg, switch (src_mcv) {
@@ -6425,7 +6431,7 @@ fn genCopy(func: *Func, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void {
                     } },
                     else => unreachable,
                 });
-                part_disp += @intCast(dst_ty.abiSize(func.pt));
+                part_disp += @intCast(dst_ty.abiSize(zcu));
             }
         },
         else => return std.debug.panic("TODO: genCopy to {s} from {s}", .{ @tagName(dst_mcv), @tagName(src_mcv) }),
@@ -6622,7 +6628,7 @@ fn genInlineMemset(
 fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!void {
     const pt = func.pt;
     const zcu = pt.zcu;
-    const abi_size: u32 = @intCast(ty.abiSize(pt));
+    const abi_size: u32 = @intCast(ty.abiSize(zcu));
 
     const max_size: u32 = switch (reg.class()) {
         .int => 64,
@@ -6729,7 +6735,7 @@ fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!
             // size to the total size of the vector, and vmv.x.s will work then
             if (src_reg.class() == .vector) {
                 try func.setVl(.zero, 0, .{
-                    .vsew = switch (ty.totalVectorBits(pt)) {
+                    .vsew = switch (ty.totalVectorBits(zcu)) {
                         8 => .@"8",
                         16 => .@"16",
                         32 => .@"32",
@@ -6848,7 +6854,7 @@ fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!
                     // and load from it.
                     const len = ty.vectorLen(zcu);
                     const elem_ty = ty.childType(zcu);
-                    const elem_size = elem_ty.abiSize(pt);
+                    const elem_size = elem_ty.abiSize(zcu);
 
                     try func.setVl(.zero, len, .{
                         .vsew = switch (elem_size) {
@@ -6945,7 +6951,7 @@ fn genSetMem(
     const pt = func.pt;
     const zcu = pt.zcu;
 
-    const abi_size: u32 = @intCast(ty.abiSize(pt));
+    const abi_size: u32 = @intCast(ty.abiSize(zcu));
     const dst_ptr_mcv: MCValue = switch (base) {
         .reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } },
         .frame => |base_frame_index| .{ .lea_frame = .{ .index = base_frame_index, .off = disp } },
@@ -6995,7 +7001,7 @@ fn genSetMem(
                 const addr_reg = try func.copyToTmpRegister(Type.u64, dst_ptr_mcv);
 
                 const num_elem = ty.vectorLen(zcu);
-                const elem_size = ty.childType(zcu).bitSize(pt);
+                const elem_size = ty.childType(zcu).bitSize(zcu);
 
                 try func.setVl(.zero, num_elem, .{
                     .vsew = switch (elem_size) {
@@ -7083,7 +7089,7 @@ fn genSetMem(
             var part_disp: i32 = disp;
             for (try func.splitType(ty), src_regs) |src_ty, src_reg| {
                 try func.genSetMem(base, part_disp, src_ty, .{ .register = src_reg });
-                part_disp += @intCast(src_ty.abiSize(pt));
+                part_disp += @intCast(src_ty.abiSize(zcu));
             }
         },
         .immediate => {
@@ -7128,10 +7134,10 @@ fn airBitCast(func: *Func, inst: Air.Inst.Index) !void {
         const src_lock = if (src_mcv.getReg()) |reg| func.register_manager.lockReg(reg) else null;
         defer if (src_lock) |lock| func.register_manager.unlockReg(lock);
 
-        const dst_mcv = if (dst_ty.abiSize(pt) <= src_ty.abiSize(pt) and src_mcv != .register_pair and
+        const dst_mcv = if (dst_ty.abiSize(zcu) <= src_ty.abiSize(zcu) and src_mcv != .register_pair and
             func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: {
             const dst_mcv = try func.allocRegOrMem(dst_ty, inst, true);
-            try func.genCopy(switch (math.order(dst_ty.abiSize(pt), src_ty.abiSize(pt))) {
+            try func.genCopy(switch (math.order(dst_ty.abiSize(zcu), src_ty.abiSize(zcu))) {
                 .lt => dst_ty,
                 .eq => if (!dst_mcv.isMemory() or src_mcv.isMemory()) dst_ty else src_ty,
                 .gt => src_ty,
@@ -7142,8 +7148,8 @@ fn airBitCast(func: *Func, inst: Air.Inst.Index) !void {
         if (dst_ty.isAbiInt(zcu) and src_ty.isAbiInt(zcu) and
             dst_ty.intInfo(zcu).signedness == src_ty.intInfo(zcu).signedness) break :result dst_mcv;
 
-        const abi_size = dst_ty.abiSize(pt);
-        const bit_size = dst_ty.bitSize(pt);
+        const abi_size = dst_ty.abiSize(zcu);
+        const bit_size = dst_ty.bitSize(zcu);
         if (abi_size * 8 <= bit_size) break :result dst_mcv;
 
         return func.fail("TODO: airBitCast {} to {}", .{ src_ty.fmt(pt), dst_ty.fmt(pt) });
@@ -7162,11 +7168,11 @@ fn airArrayToSlice(func: *Func, inst: Air.Inst.Index) !void {
     const array_ty = ptr_ty.childType(zcu);
     const array_len = array_ty.arrayLen(zcu);
 
-    const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(slice_ty, pt));
+    const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(slice_ty, zcu));
     try func.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr);
     try func.genSetMem(
         .{ .frame = frame_index },
-        @intCast(ptr_ty.abiSize(pt)),
+        @intCast(ptr_ty.abiSize(zcu)),
         Type.u64,
         .{ .immediate = array_len },
     );
@@ -7190,21 +7196,21 @@ fn airFloatFromInt(func: *Func, inst: Air.Inst.Index) !void {
         defer if (src_lock) |lock| func.register_manager.unlockReg(lock);
 
         const is_unsigned = dst_ty.isUnsignedInt(zcu);
-        const src_bits = src_ty.bitSize(pt);
-        const dst_bits = dst_ty.bitSize(pt);
+        const src_bits = src_ty.bitSize(zcu);
+        const dst_bits = dst_ty.bitSize(zcu);
 
         switch (src_bits) {
             32, 64 => {},
             else => try func.truncateRegister(src_ty, src_reg),
         }
 
-        const int_mod: Mir.FcvtOp = switch (src_bits) {
+        const int_zcu: Mir.FcvtOp = switch (src_bits) {
             8, 16, 32 => if (is_unsigned) .wu else .w,
             64 => if (is_unsigned) .lu else .l,
             else => return func.fail("TODO: airFloatFromInt src size: {d}", .{src_bits}),
         };
 
-        const float_mod: enum { s, d } = switch (dst_bits) {
+        const float_zcu: enum { s, d } = switch (dst_bits) {
             32 => .s,
             64 => .d,
             else => return func.fail("TODO: airFloatFromInt dst size {d}", .{dst_bits}),
@@ -7214,14 +7220,14 @@ fn airFloatFromInt(func: *Func, inst: Air.Inst.Index) !void {
         defer func.register_manager.unlockReg(dst_lock);
 
         _ = try func.addInst(.{
-            .tag = switch (float_mod) {
-                .s => switch (int_mod) {
+            .tag = switch (float_zcu) {
+                .s => switch (int_zcu) {
                     .l => .fcvtsl,
                     .lu => .fcvtslu,
                     .w => .fcvtsw,
                     .wu => .fcvtswu,
                 },
-                .d => switch (int_mod) {
+                .d => switch (int_zcu) {
                     .l => .fcvtdl,
                     .lu => .fcvtdlu,
                     .w => .fcvtdw,
@@ -7250,16 +7256,16 @@ fn airIntFromFloat(func: *Func, inst: Air.Inst.Index) !void {
         const dst_ty = ty_op.ty.toType();
 
         const is_unsigned = dst_ty.isUnsignedInt(zcu);
-        const src_bits = src_ty.bitSize(pt);
-        const dst_bits = dst_ty.bitSize(pt);
+        const src_bits = src_ty.bitSize(zcu);
+        const dst_bits = dst_ty.bitSize(zcu);
 
-        const float_mod: enum { s, d } = switch (src_bits) {
+        const float_zcu: enum { s, d } = switch (src_bits) {
             32 => .s,
             64 => .d,
             else => return func.fail("TODO: airIntFromFloat src size {d}", .{src_bits}),
         };
 
-        const int_mod: Mir.FcvtOp = switch (dst_bits) {
+        const int_zcu: Mir.FcvtOp = switch (dst_bits) {
             32 => if (is_unsigned) .wu else .w,
             8, 16, 64 => if (is_unsigned) .lu else .l,
             else => return func.fail("TODO: airIntFromFloat dst size: {d}", .{dst_bits}),
@@ -7272,14 +7278,14 @@ fn airIntFromFloat(func: *Func, inst: Air.Inst.Index) !void {
         defer func.register_manager.unlockReg(dst_lock);
 
         _ = try func.addInst(.{
-            .tag = switch (float_mod) {
-                .s => switch (int_mod) {
+            .tag = switch (float_zcu) {
+                .s => switch (int_zcu) {
                     .l => .fcvtls,
                     .lu => .fcvtlus,
                     .w => .fcvtws,
                     .wu => .fcvtwus,
                 },
-                .d => switch (int_mod) {
+                .d => switch (int_zcu) {
                     .l => .fcvtld,
                     .lu => .fcvtlud,
                     .w => .fcvtwd,
@@ -7301,12 +7307,13 @@ fn airCmpxchg(func: *Func, inst: Air.Inst.Index, strength: enum { weak, strong }
     _ = strength; // TODO: do something with this
 
     const pt = func.pt;
+    const zcu = pt.zcu;
     const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
 
     const ptr_ty = func.typeOf(extra.ptr);
     const val_ty = func.typeOf(extra.expected_value);
-    const val_abi_size: u32 = @intCast(val_ty.abiSize(pt));
+    const val_abi_size: u32 = @intCast(val_ty.abiSize(pt.zcu));
 
     switch (val_abi_size) {
         1, 2, 4, 8 => {},
@@ -7364,7 +7371,7 @@ fn airCmpxchg(func: *Func, inst: Air.Inst.Index, strength: enum { weak, strong }
     defer func.register_manager.unlockReg(fallthrough_lock);
 
     const jump_back = try func.addInst(.{
-        .tag = if (val_ty.bitSize(pt) <= 32) .lrw else .lrd,
+        .tag = if (val_ty.bitSize(zcu) <= 32) .lrw else .lrd,
         .data = .{ .amo = .{
             .aq = lr_order.aq,
             .rl = lr_order.rl,
@@ -7385,7 +7392,7 @@ fn airCmpxchg(func: *Func, inst: Air.Inst.Index, strength: enum { weak, strong }
     });
 
     _ = try func.addInst(.{
-        .tag = if (val_ty.bitSize(pt) <= 32) .scw else .scd,
+        .tag = if (val_ty.bitSize(zcu) <= 32) .scw else .scd,
         .data = .{ .amo = .{
             .aq = sc_order.aq,
             .rl = sc_order.rl,
@@ -7449,7 +7456,7 @@ fn airAtomicRmw(func: *Func, inst: Air.Inst.Index) !void {
         const ptr_mcv = try func.resolveInst(pl_op.operand);
 
         const val_ty = func.typeOf(extra.operand);
-        const val_size = val_ty.abiSize(pt);
+        const val_size = val_ty.abiSize(zcu);
         const val_mcv = try func.resolveInst(extra.operand);
 
         if (!math.isPowerOfTwo(val_size))
@@ -7488,7 +7495,7 @@ fn airAtomicRmw(func: *Func, inst: Air.Inst.Index) !void {
 
         switch (method) {
             .amo => {
-                const is_d = val_ty.abiSize(pt) == 8;
+                const is_d = val_ty.abiSize(zcu) == 8;
                 const is_un = val_ty.isUnsignedInt(zcu);
 
                 const mnem: Mnemonic = switch (op) {
@@ -7587,7 +7594,7 @@ fn airAtomicLoad(func: *Func, inst: Air.Inst.Index) !void {
     const elem_ty = ptr_ty.childType(zcu);
     const ptr_mcv = try func.resolveInst(atomic_load.ptr);
 
-    const bit_size = elem_ty.bitSize(pt);
+    const bit_size = elem_ty.bitSize(zcu);
     if (bit_size > 64) return func.fail("TODO: airAtomicStore > 64 bits", .{});
 
     const result_mcv = try func.allocRegOrMem(elem_ty, inst, true);
@@ -7634,7 +7641,7 @@ fn airAtomicStore(func: *Func, inst: Air.Inst.Index, order: std.builtin.AtomicOr
     const val_ty = func.typeOf(bin_op.rhs);
     const val_mcv = try func.resolveInst(bin_op.rhs);
 
-    const bit_size = val_ty.bitSize(func.pt);
+    const bit_size = val_ty.bitSize(func.pt.zcu);
     if (bit_size > 64) return func.fail("TODO: airAtomicStore > 64 bits", .{});
 
     switch (order) {
@@ -7679,7 +7686,7 @@ fn airMemset(func: *Func, inst: Air.Inst.Index, safety: bool) !void {
         };
         defer if (src_val_lock) |lock| func.register_manager.unlockReg(lock);
 
-        const elem_abi_size: u31 = @intCast(elem_ty.abiSize(pt));
+        const elem_abi_size: u31 = @intCast(elem_ty.abiSize(zcu));
 
         if (elem_abi_size == 1) {
             const ptr: MCValue = switch (dst_ptr_ty.ptrSize(zcu)) {
@@ -7751,7 +7758,7 @@ fn airMemcpy(func: *Func, inst: Air.Inst.Index) !void {
             const len_reg, const len_lock = try func.allocReg(.int);
             defer func.register_manager.unlockReg(len_lock);
 
-            const elem_size = dst_ty.childType(zcu).abiSize(pt);
+            const elem_size = dst_ty.childType(zcu).abiSize(zcu);
             try func.genBinOp(
                 .mul,
                 .{ .immediate = elem_size },
@@ -7764,7 +7771,7 @@ fn airMemcpy(func: *Func, inst: Air.Inst.Index) !void {
         },
         .One => len: {
             const array_ty = dst_ty.childType(zcu);
-            break :len .{ .immediate = array_ty.arrayLen(zcu) * array_ty.childType(zcu).abiSize(pt) };
+            break :len .{ .immediate = array_ty.arrayLen(zcu) * array_ty.childType(zcu).abiSize(zcu) };
         },
         else => |size| return func.fail("TODO: airMemcpy size {s}", .{@tagName(size)}),
     };
@@ -7862,13 +7869,13 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void {
     const result: MCValue = result: {
         switch (result_ty.zigTypeTag(zcu)) {
             .Struct => {
-                const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(result_ty, pt));
+                const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu));
                 if (result_ty.containerLayout(zcu) == .@"packed") {
                     const struct_obj = zcu.typeToStruct(result_ty).?;
                     try func.genInlineMemset(
                         .{ .lea_frame = .{ .index = frame_index } },
                         .{ .immediate = 0 },
-                        .{ .immediate = result_ty.abiSize(pt) },
+                        .{ .immediate = result_ty.abiSize(zcu) },
                     );
 
                     for (elements, 0..) |elem, elem_i_usize| {
@@ -7876,7 +7883,7 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void {
                         if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue;
 
                         const elem_ty = result_ty.structFieldType(elem_i, zcu);
-                        const elem_bit_size: u32 = @intCast(elem_ty.bitSize(pt));
+                        const elem_bit_size: u32 = @intCast(elem_ty.bitSize(zcu));
                         if (elem_bit_size > 64) {
                             return func.fail(
                                 "TODO airAggregateInit implement packed structs with large fields",
@@ -7884,7 +7891,7 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void {
                             );
                         }
 
-                        const elem_abi_size: u32 = @intCast(elem_ty.abiSize(pt));
+                        const elem_abi_size: u32 = @intCast(elem_ty.abiSize(zcu));
                         const elem_abi_bits = elem_abi_size * 8;
                         const elem_off = pt.structPackedFieldBitOffset(struct_obj, elem_i);
                         const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size);
@@ -7910,7 +7917,7 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void {
                     if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue;
 
                     const elem_ty = result_ty.structFieldType(elem_i, zcu);
-                    const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, pt));
+                    const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, zcu));
                     const elem_mcv = try func.resolveInst(elem);
                     try func.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, elem_mcv);
                 }
@@ -7918,8 +7925,8 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void {
             },
             .Array => {
                 const elem_ty = result_ty.childType(zcu);
-                const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(result_ty, pt));
-                const elem_size: u32 = @intCast(elem_ty.abiSize(pt));
+                const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu));
+                const elem_size: u32 = @intCast(elem_ty.abiSize(zcu));
 
                 for (elements, 0..) |elem, elem_i| {
                     const elem_mcv = try func.resolveInst(elem);
@@ -7979,10 +7986,11 @@ fn airMulAdd(func: *Func, inst: Air.Inst.Index) !void {
 
 fn resolveInst(func: *Func, ref: Air.Inst.Ref) InnerError!MCValue {
     const pt = func.pt;
+    const zcu = pt.zcu;
 
     // If the type has no codegen bits, no need to store it.
     const inst_ty = func.typeOf(ref);
-    if (!inst_ty.hasRuntimeBits(pt))
+    if (!inst_ty.hasRuntimeBits(zcu))
         return .none;
 
     const mcv = if (ref.toIndex()) |inst| mcv: {
@@ -8100,14 +8108,14 @@ fn resolveCallingConventionValues(
             // Return values
             if (ret_ty.zigTypeTag(zcu) == .NoReturn) {
                 result.return_value = InstTracking.init(.unreach);
-            } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+            } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                 result.return_value = InstTracking.init(.none);
             } else {
                 var ret_tracking: [2]InstTracking = undefined;
                 var ret_tracking_i: usize = 0;
                 var ret_float_reg_i: usize = 0;
 
-                const classes = mem.sliceTo(&abi.classifySystem(ret_ty, pt), .none);
+                const classes = mem.sliceTo(&abi.classifySystem(ret_ty, zcu), .none);
 
                 for (classes) |class| switch (class) {
                     .integer => {
@@ -8151,7 +8159,7 @@ fn resolveCallingConventionValues(
             var param_float_reg_i: usize = 0;
 
             for (param_types, result.args) |ty, *arg| {
-                if (!ty.hasRuntimeBitsIgnoreComptime(pt)) {
+                if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                     assert(cc == .Unspecified);
                     arg.* = .none;
                     continue;
@@ -8160,7 +8168,7 @@ fn resolveCallingConventionValues(
                 var arg_mcv: [2]MCValue = undefined;
                 var arg_mcv_i: usize = 0;
 
-                const classes = mem.sliceTo(&abi.classifySystem(ty, pt), .none);
+                const classes = mem.sliceTo(&abi.classifySystem(ty, zcu), .none);
 
                 for (classes) |class| switch (class) {
                     .integer => {
@@ -8244,8 +8252,7 @@ fn typeOf(func: *Func, inst: Air.Inst.Ref) Type {
 }
 
 fn typeOfIndex(func: *Func, inst: Air.Inst.Index) Type {
-    const pt = func.pt;
-    const zcu = pt.zcu;
+    const zcu = func.pt.zcu;
     return func.air.typeOfIndex(inst, &zcu.intern_pool);
 }
 
@@ -8253,23 +8260,23 @@ fn hasFeature(func: *Func, feature: Target.riscv.Feature) bool {
     return Target.riscv.featureSetHas(func.target.cpu.features, feature);
 }
 
-pub fn errUnionPayloadOffset(payload_ty: Type, pt: Zcu.PerThread) u64 {
-    if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return 0;
-    const payload_align = payload_ty.abiAlignment(pt);
-    const error_align = Type.anyerror.abiAlignment(pt);
-    if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+pub fn errUnionPayloadOffset(payload_ty: Type, zcu: *Zcu) u64 {
+    if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return 0;
+    const payload_align = payload_ty.abiAlignment(zcu);
+    const error_align = Type.anyerror.abiAlignment(zcu);
+    if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
         return 0;
     } else {
-        return payload_align.forward(Type.anyerror.abiSize(pt));
+        return payload_align.forward(Type.anyerror.abiSize(zcu));
     }
 }
 
-pub fn errUnionErrorOffset(payload_ty: Type, pt: Zcu.PerThread) u64 {
-    if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return 0;
-    const payload_align = payload_ty.abiAlignment(pt);
-    const error_align = Type.anyerror.abiAlignment(pt);
-    if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
-        return error_align.forward(payload_ty.abiSize(pt));
+pub fn errUnionErrorOffset(payload_ty: Type, zcu: *Zcu) u64 {
+    if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return 0;
+    const payload_align = payload_ty.abiAlignment(zcu);
+    const error_align = Type.anyerror.abiAlignment(zcu);
+    if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+        return error_align.forward(payload_ty.abiSize(zcu));
     } else {
         return 0;
     }
src/arch/riscv64/Lower.zig
@@ -49,6 +49,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index, options: struct {
     relocs: []const Reloc,
 } {
     const pt = lower.pt;
+    const zcu = pt.zcu;
 
     lower.result_insts = undefined;
     lower.result_relocs = undefined;
@@ -308,11 +309,11 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index, options: struct {
 
             const class = rs1.class();
             const ty = compare.ty;
-            const size = std.math.ceilPowerOfTwo(u64, ty.bitSize(pt)) catch {
-                return lower.fail("pseudo_compare size {}", .{ty.bitSize(pt)});
+            const size = std.math.ceilPowerOfTwo(u64, ty.bitSize(zcu)) catch {
+                return lower.fail("pseudo_compare size {}", .{ty.bitSize(zcu)});
             };
 
-            const is_unsigned = ty.isUnsignedInt(pt.zcu);
+            const is_unsigned = ty.isUnsignedInt(zcu);
             const less_than: Mnemonic = if (is_unsigned) .sltu else .slt;
 
             switch (class) {
src/arch/sparc64/CodeGen.zig
@@ -1012,6 +1012,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airArg(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
+    const zcu = pt.zcu;
     const arg_index = self.arg_index;
     self.arg_index += 1;
 
@@ -1021,7 +1022,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
     const mcv = blk: {
         switch (arg) {
             .stack_offset => |off| {
-                const abi_size = math.cast(u32, ty.abiSize(pt)) orelse {
+                const abi_size = math.cast(u32, ty.abiSize(zcu)) orelse {
                     return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(pt)});
                 };
                 const offset = off + abi_size;
@@ -1211,7 +1212,7 @@ fn airBreakpoint(self: *Self) !void {
 
 fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     // We have hardware byteswapper in SPARCv9, don't let mainstream compilers mislead you.
@@ -1227,14 +1228,14 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const operand = try self.resolveInst(ty_op.operand);
         const operand_ty = self.typeOf(ty_op.operand);
-        switch (operand_ty.zigTypeTag(mod)) {
+        switch (operand_ty.zigTypeTag(zcu)) {
             .Vector => return self.fail("TODO byteswap for vectors", .{}),
             .Int => {
-                const int_info = operand_ty.intInfo(mod);
+                const int_info = operand_ty.intInfo(zcu);
                 if (int_info.bits == 8) break :result operand;
 
                 const abi_size = int_info.bits >> 3;
-                const abi_align = operand_ty.abiAlignment(pt);
+                const abi_align = operand_ty.abiAlignment(zcu);
                 const opposite_endian_asi = switch (self.target.cpu.arch.endian()) {
                     Endian.big => ASI.asi_primary_little,
                     Endian.little => ASI.asi_primary,
@@ -1409,24 +1410,24 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
 fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
     const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const lhs = try self.resolveInst(bin_op.lhs);
         const rhs = try self.resolveInst(bin_op.rhs);
         const lhs_ty = self.typeOf(bin_op.lhs);
 
-        const int_ty = switch (lhs_ty.zigTypeTag(mod)) {
+        const int_ty = switch (lhs_ty.zigTypeTag(zcu)) {
             .Vector => unreachable, // Handled by cmp_vector.
-            .Enum => lhs_ty.intTagType(mod),
+            .Enum => lhs_ty.intTagType(zcu),
             .Int => lhs_ty,
             .Bool => Type.u1,
             .Pointer => Type.usize,
             .ErrorSet => Type.u16,
             .Optional => blk: {
-                const payload_ty = lhs_ty.optionalChild(mod);
-                if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+                const payload_ty = lhs_ty.optionalChild(zcu);
+                if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                     break :blk Type.u1;
-                } else if (lhs_ty.isPtrLikeOptional(mod)) {
+                } else if (lhs_ty.isPtrLikeOptional(zcu)) {
                     break :blk Type.usize;
                 } else {
                     return self.fail("TODO SPARCv9 cmp non-pointer optionals", .{});
@@ -1436,7 +1437,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
             else => unreachable,
         };
 
-        const int_info = int_ty.intInfo(mod);
+        const int_info = int_ty.intInfo(zcu);
         if (int_info.bits <= 64) {
             _ = try self.binOp(.cmp_eq, lhs, rhs, int_ty, int_ty, BinOpMetadata{
                 .lhs = bin_op.lhs,
@@ -1797,16 +1798,16 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const elem_ty = self.typeOfIndex(inst);
-    const elem_size = elem_ty.abiSize(pt);
+    const elem_size = elem_ty.abiSize(zcu);
     const result: MCValue = result: {
-        if (!elem_ty.hasRuntimeBits(pt))
+        if (!elem_ty.hasRuntimeBits(zcu))
             break :result MCValue.none;
 
         const ptr = try self.resolveInst(ty_op.operand);
-        const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod);
+        const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(zcu);
         if (self.liveness.isUnused(inst) and !is_volatile)
             break :result MCValue.dead;
 
@@ -2428,7 +2429,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const is_volatile = false; // TODO
     const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
@@ -2438,10 +2439,10 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
         const index_mcv = try self.resolveInst(bin_op.rhs);
 
         const slice_ty = self.typeOf(bin_op.lhs);
-        const elem_ty = slice_ty.childType(mod);
-        const elem_size = elem_ty.abiSize(pt);
+        const elem_ty = slice_ty.childType(zcu);
+        const elem_size = elem_ty.abiSize(zcu);
 
-        const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod);
+        const slice_ptr_field_type = slice_ty.slicePtrFieldType(zcu);
 
         const index_lock: ?RegisterLock = if (index_mcv == .register)
             self.register_manager.lockRegAssumeUnused(index_mcv.register)
@@ -2553,10 +2554,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
     const operand = extra.struct_operand;
     const index = extra.field_index;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
-        const pt = self.pt;
+        const zcu = self.pt.zcu;
         const mcv = try self.resolveInst(operand);
         const struct_ty = self.typeOf(operand);
-        const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, pt)));
+        const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, zcu)));
 
         switch (mcv) {
             .dead, .unreach => unreachable,
@@ -2687,13 +2688,13 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const error_union_ty = self.typeOf(ty_op.operand);
-        const payload_ty = error_union_ty.errorUnionPayload(mod);
+        const payload_ty = error_union_ty.errorUnionPayload(zcu);
         const mcv = try self.resolveInst(ty_op.operand);
-        if (!payload_ty.hasRuntimeBits(pt)) break :result mcv;
+        if (!payload_ty.hasRuntimeBits(zcu)) break :result mcv;
 
         return self.fail("TODO implement unwrap error union error for non-empty payloads", .{});
     };
@@ -2702,12 +2703,12 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const error_union_ty = self.typeOf(ty_op.operand);
-        const payload_ty = error_union_ty.errorUnionPayload(mod);
-        if (!payload_ty.hasRuntimeBits(pt)) break :result MCValue.none;
+        const payload_ty = error_union_ty.errorUnionPayload(zcu);
+        if (!payload_ty.hasRuntimeBits(zcu)) break :result MCValue.none;
 
         return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{});
     };
@@ -2717,13 +2718,13 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
 /// E to E!T
 fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const error_union_ty = ty_op.ty.toType();
-        const payload_ty = error_union_ty.errorUnionPayload(mod);
+        const payload_ty = error_union_ty.errorUnionPayload(zcu);
         const mcv = try self.resolveInst(ty_op.operand);
-        if (!payload_ty.hasRuntimeBits(pt)) break :result mcv;
+        if (!payload_ty.hasRuntimeBits(zcu)) break :result mcv;
 
         return self.fail("TODO implement wrap errunion error for non-empty payloads", .{});
     };
@@ -2744,7 +2745,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
         const optional_ty = self.typeOfIndex(inst);
 
         // Optional with a zero-bit payload type is just a boolean true
-        if (optional_ty.abiSize(pt) == 1)
+        if (optional_ty.abiSize(pt.zcu) == 1)
             break :result MCValue{ .immediate = 1 };
 
         return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch});
@@ -2779,10 +2780,10 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: Alignme
 /// Use a pointer instruction as the basis for allocating stack memory.
 fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const elem_ty = self.typeOfIndex(inst).childType(mod);
+    const zcu = pt.zcu;
+    const elem_ty = self.typeOfIndex(inst).childType(zcu);
 
-    if (!elem_ty.hasRuntimeBits(pt)) {
+    if (!elem_ty.hasRuntimeBits(zcu)) {
         // As this stack item will never be dereferenced at runtime,
         // return the stack offset 0. Stack offset 0 will be where all
         // zero-sized stack allocations live as non-zero-sized
@@ -2790,21 +2791,22 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
         return @as(u32, 0);
     }
 
-    const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse {
+    const abi_size = math.cast(u32, elem_ty.abiSize(zcu)) orelse {
         return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
     };
     // TODO swap this for inst.ty.ptrAlign
-    const abi_align = elem_ty.abiAlignment(pt);
+    const abi_align = elem_ty.abiAlignment(zcu);
     return self.allocMem(inst, abi_size, abi_align);
 }
 
 fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
     const pt = self.pt;
+    const zcu = pt.zcu;
     const elem_ty = self.typeOfIndex(inst);
-    const abi_size = math.cast(u32, elem_ty.abiSize(pt)) orelse {
+    const abi_size = math.cast(u32, elem_ty.abiSize(zcu)) orelse {
         return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
     };
-    const abi_align = elem_ty.abiAlignment(pt);
+    const abi_align = elem_ty.abiAlignment(zcu);
     self.stack_align = self.stack_align.max(abi_align);
 
     if (reg_ok) {
@@ -2847,7 +2849,7 @@ fn binOp(
     metadata: ?BinOpMetadata,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     switch (tag) {
         .add,
         .sub,
@@ -2857,12 +2859,12 @@ fn binOp(
         .xor,
         .cmp_eq,
         => {
-            switch (lhs_ty.zigTypeTag(mod)) {
+            switch (lhs_ty.zigTypeTag(zcu)) {
                 .Float => return self.fail("TODO binary operations on floats", .{}),
                 .Vector => return self.fail("TODO binary operations on vectors", .{}),
                 .Int => {
-                    assert(lhs_ty.eql(rhs_ty, mod));
-                    const int_info = lhs_ty.intInfo(mod);
+                    assert(lhs_ty.eql(rhs_ty, zcu));
+                    const int_info = lhs_ty.intInfo(zcu);
                     if (int_info.bits <= 64) {
                         // Only say yes if the operation is
                         // commutative, i.e. we can swap both of the
@@ -2931,10 +2933,10 @@ fn binOp(
             const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
 
             // Truncate if necessary
-            switch (lhs_ty.zigTypeTag(mod)) {
+            switch (lhs_ty.zigTypeTag(zcu)) {
                 .Vector => return self.fail("TODO binary operations on vectors", .{}),
                 .Int => {
-                    const int_info = lhs_ty.intInfo(mod);
+                    const int_info = lhs_ty.intInfo(zcu);
                     if (int_info.bits <= 64) {
                         const result_reg = result.register;
                         try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits);
@@ -2948,11 +2950,11 @@ fn binOp(
         },
 
         .div_trunc => {
-            switch (lhs_ty.zigTypeTag(mod)) {
+            switch (lhs_ty.zigTypeTag(zcu)) {
                 .Vector => return self.fail("TODO binary operations on vectors", .{}),
                 .Int => {
-                    assert(lhs_ty.eql(rhs_ty, mod));
-                    const int_info = lhs_ty.intInfo(mod);
+                    assert(lhs_ty.eql(rhs_ty, zcu));
+                    const int_info = lhs_ty.intInfo(zcu);
                     if (int_info.bits <= 64) {
                         const rhs_immediate_ok = switch (tag) {
                             .div_trunc => rhs == .immediate and rhs.immediate <= std.math.maxInt(u12),
@@ -2981,14 +2983,14 @@ fn binOp(
         },
 
         .ptr_add => {
-            switch (lhs_ty.zigTypeTag(mod)) {
+            switch (lhs_ty.zigTypeTag(zcu)) {
                 .Pointer => {
                     const ptr_ty = lhs_ty;
-                    const elem_ty = switch (ptr_ty.ptrSize(mod)) {
-                        .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type
-                        else => ptr_ty.childType(mod),
+                    const elem_ty = switch (ptr_ty.ptrSize(zcu)) {
+                        .One => ptr_ty.childType(zcu).childType(zcu), // ptr to array, so get array element type
+                        else => ptr_ty.childType(zcu),
                     };
-                    const elem_size = elem_ty.abiSize(pt);
+                    const elem_size = elem_ty.abiSize(zcu);
 
                     if (elem_size == 1) {
                         const base_tag: Mir.Inst.Tag = switch (tag) {
@@ -3013,7 +3015,7 @@ fn binOp(
         .bool_and,
         .bool_or,
         => {
-            switch (lhs_ty.zigTypeTag(mod)) {
+            switch (lhs_ty.zigTypeTag(zcu)) {
                 .Bool => {
                     assert(lhs != .immediate); // should have been handled by Sema
                     assert(rhs != .immediate); // should have been handled by Sema
@@ -3043,10 +3045,10 @@ fn binOp(
             const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
 
             // Truncate if necessary
-            switch (lhs_ty.zigTypeTag(mod)) {
+            switch (lhs_ty.zigTypeTag(zcu)) {
                 .Vector => return self.fail("TODO binary operations on vectors", .{}),
                 .Int => {
-                    const int_info = lhs_ty.intInfo(mod);
+                    const int_info = lhs_ty.intInfo(zcu);
                     if (int_info.bits <= 64) {
                         // 32 and 64 bit operands doesn't need truncating
                         if (int_info.bits == 32 or int_info.bits == 64) return result;
@@ -3065,10 +3067,10 @@ fn binOp(
         .shl_exact,
         .shr_exact,
         => {
-            switch (lhs_ty.zigTypeTag(mod)) {
+            switch (lhs_ty.zigTypeTag(zcu)) {
                 .Vector => return self.fail("TODO binary operations on vectors", .{}),
                 .Int => {
-                    const int_info = lhs_ty.intInfo(mod);
+                    const int_info = lhs_ty.intInfo(zcu);
                     if (int_info.bits <= 64) {
                         const rhs_immediate_ok = rhs == .immediate;
 
@@ -3388,8 +3390,8 @@ fn binOpRegister(
 fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
     const block_data = self.blocks.getPtr(block).?;
 
-    const pt = self.pt;
-    if (self.typeOf(operand).hasRuntimeBits(pt)) {
+    const zcu = self.pt.zcu;
+    if (self.typeOf(operand).hasRuntimeBits(zcu)) {
         const operand_mcv = try self.resolveInst(operand);
         const block_mcv = block_data.mcv;
         if (block_mcv == .none) {
@@ -3509,17 +3511,17 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
 /// Given an error union, returns the payload
 fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const err_ty = error_union_ty.errorUnionSet(mod);
-    const payload_ty = error_union_ty.errorUnionPayload(mod);
-    if (err_ty.errorSetIsEmpty(mod)) {
+    const zcu = pt.zcu;
+    const err_ty = error_union_ty.errorUnionSet(zcu);
+    const payload_ty = error_union_ty.errorUnionPayload(zcu);
+    if (err_ty.errorSetIsEmpty(zcu)) {
         return error_union_mcv;
     }
-    if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+    if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
         return MCValue.none;
     }
 
-    const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, pt)));
+    const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, zcu)));
     switch (error_union_mcv) {
         .register => return self.fail("TODO errUnionPayload for registers", .{}),
         .stack_offset => |off| {
@@ -3731,6 +3733,7 @@ fn genLoadASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Reg
 
 fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
     const pt = self.pt;
+    const zcu = pt.zcu;
     switch (mcv) {
         .dead => unreachable,
         .unreach, .none => return, // Nothing to do.
@@ -3929,21 +3932,21 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
             // The value is in memory at a hard-coded address.
             // If the type is a pointer, it means the pointer address is at this memory location.
             try self.genSetReg(ty, reg, .{ .immediate = addr });
-            try self.genLoad(reg, reg, i13, 0, ty.abiSize(pt));
+            try self.genLoad(reg, reg, i13, 0, ty.abiSize(zcu));
         },
         .stack_offset => |off| {
             const real_offset = realStackOffset(off);
             const simm13 = math.cast(i13, real_offset) orelse
                 return self.fail("TODO larger stack offsets: {}", .{real_offset});
-            try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(pt));
+            try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(zcu));
         },
     }
 }
 
 fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const abi_size = ty.abiSize(pt);
+    const zcu = pt.zcu;
+    const abi_size = ty.abiSize(zcu);
     switch (mcv) {
         .dead => unreachable,
         .unreach, .none => return, // Nothing to do.
@@ -3951,7 +3954,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
             if (!self.wantSafety())
                 return; // The already existing value will do just fine.
             // TODO Upgrade this to a memset call when we have that available.
-            switch (ty.abiSize(pt)) {
+            switch (ty.abiSize(zcu)) {
                 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }),
                 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }),
                 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
@@ -3977,11 +3980,11 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
             const reg_lock = self.register_manager.lockReg(rwo.reg);
             defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
 
-            const wrapped_ty = ty.structFieldType(0, mod);
+            const wrapped_ty = ty.structFieldType(0, zcu);
             try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg });
 
-            const overflow_bit_ty = ty.structFieldType(1, mod);
-            const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, pt)));
+            const overflow_bit_ty = ty.structFieldType(1, zcu);
+            const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, zcu)));
             const cond_reg = try self.register_manager.allocReg(null, gp);
 
             // TODO handle floating point CCRs
@@ -4154,14 +4157,14 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
 
 fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const error_type = ty.errorUnionSet(mod);
-    const payload_type = ty.errorUnionPayload(mod);
+    const zcu = pt.zcu;
+    const error_type = ty.errorUnionSet(zcu);
+    const payload_type = ty.errorUnionPayload(zcu);
 
-    if (!error_type.hasRuntimeBits(pt)) {
+    if (!error_type.hasRuntimeBits(zcu)) {
         return MCValue{ .immediate = 0 }; // always false
-    } else if (!payload_type.hasRuntimeBits(pt)) {
-        if (error_type.abiSize(pt) <= 8) {
+    } else if (!payload_type.hasRuntimeBits(zcu)) {
+        if (error_type.abiSize(zcu) <= 8) {
             const reg_mcv: MCValue = switch (operand) {
                 .register => operand,
                 else => .{ .register = try self.copyToTmpRegister(error_type, operand) },
@@ -4253,9 +4256,9 @@ fn jump(self: *Self, inst: Mir.Inst.Index) !void {
 
 fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const elem_ty = ptr_ty.childType(mod);
-    const elem_size = elem_ty.abiSize(pt);
+    const zcu = pt.zcu;
+    const elem_ty = ptr_ty.childType(zcu);
+    const elem_size = elem_ty.abiSize(zcu);
 
     switch (ptr) {
         .none => unreachable,
@@ -4446,9 +4449,9 @@ fn realStackOffset(off: u32) u32 {
 /// Caller must call `CallMCValues.deinit`.
 fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) !CallMCValues {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    const fn_info = mod.typeToFunc(fn_ty).?;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    const fn_info = zcu.typeToFunc(fn_ty).?;
     const cc = fn_info.cc;
     var result: CallMCValues = .{
         .args = try self.gpa.alloc(MCValue, fn_info.param_types.len),
@@ -4459,7 +4462,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
     };
     errdefer self.gpa.free(result.args);
 
-    const ret_ty = fn_ty.fnReturnType(mod);
+    const ret_ty = fn_ty.fnReturnType(zcu);
 
     switch (cc) {
         .Naked => {
@@ -4487,7 +4490,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
             };
 
             for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
-                const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(pt)));
+                const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(zcu)));
                 if (param_size <= 8) {
                     if (next_register < argument_registers.len) {
                         result_arg.* = .{ .register = argument_registers[next_register] };
@@ -4514,12 +4517,12 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
             result.stack_byte_count = next_stack_offset;
             result.stack_align = .@"16";
 
-            if (ret_ty.zigTypeTag(mod) == .NoReturn) {
+            if (ret_ty.zigTypeTag(zcu) == .NoReturn) {
                 result.return_value = .{ .unreach = {} };
-            } else if (!ret_ty.hasRuntimeBits(pt)) {
+            } else if (!ret_ty.hasRuntimeBits(zcu)) {
                 result.return_value = .{ .none = {} };
             } else {
-                const ret_ty_size: u32 = @intCast(ret_ty.abiSize(pt));
+                const ret_ty_size: u32 = @intCast(ret_ty.abiSize(zcu));
                 // The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller.
                 if (ret_ty_size <= 8) {
                     result.return_value = switch (role) {
@@ -4542,7 +4545,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
     const ty = self.typeOf(ref);
 
     // If the type has no codegen bits, no need to store it.
-    if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return .none;
+    if (!ty.hasRuntimeBitsIgnoreComptime(pt.zcu)) return .none;
 
     if (ref.toIndex()) |inst| {
         return self.getResolvedInstValue(inst);
@@ -4656,7 +4659,7 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
 
 fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void {
     const pt = self.pt;
-    const abi_size = value_ty.abiSize(pt);
+    const abi_size = value_ty.abiSize(pt.zcu);
 
     switch (ptr) {
         .none => unreachable,
@@ -4698,11 +4701,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
 fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
     return if (self.liveness.isUnused(inst)) .dead else result: {
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const mcv = try self.resolveInst(operand);
         const ptr_ty = self.typeOf(operand);
-        const struct_ty = ptr_ty.childType(mod);
-        const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, pt)));
+        const struct_ty = ptr_ty.childType(zcu);
+        const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, zcu)));
         switch (mcv) {
             .ptr_stack_offset => |off| {
                 break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };
src/arch/wasm/abi.zig
@@ -22,16 +22,15 @@ const direct: [2]Class = .{ .direct, .none };
 /// Classifies a given Zig type to determine how they must be passed
 /// or returned as value within a wasm function.
 /// When all elements result in `.none`, no value must be passed in or returned.
-pub fn classifyType(ty: Type, pt: Zcu.PerThread) [2]Class {
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    const target = mod.getTarget();
-    if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return none;
-    switch (ty.zigTypeTag(mod)) {
+pub fn classifyType(ty: Type, zcu: *Zcu) [2]Class {
+    const ip = &zcu.intern_pool;
+    const target = zcu.getTarget();
+    if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) return none;
+    switch (ty.zigTypeTag(zcu)) {
         .Struct => {
-            const struct_type = pt.zcu.typeToStruct(ty).?;
+            const struct_type = zcu.typeToStruct(ty).?;
             if (struct_type.layout == .@"packed") {
-                if (ty.bitSize(pt) <= 64) return direct;
+                if (ty.bitSize(zcu) <= 64) return direct;
                 return .{ .direct, .direct };
             }
             if (struct_type.field_types.len > 1) {
@@ -41,13 +40,13 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread) [2]Class {
             const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[0]);
             const explicit_align = struct_type.fieldAlign(ip, 0);
             if (explicit_align != .none) {
-                if (explicit_align.compareStrict(.gt, field_ty.abiAlignment(pt)))
+                if (explicit_align.compareStrict(.gt, field_ty.abiAlignment(zcu)))
                     return memory;
             }
-            return classifyType(field_ty, pt);
+            return classifyType(field_ty, zcu);
         },
         .Int, .Enum, .ErrorSet => {
-            const int_bits = ty.intInfo(pt.zcu).bits;
+            const int_bits = ty.intInfo(zcu).bits;
             if (int_bits <= 64) return direct;
             if (int_bits <= 128) return .{ .direct, .direct };
             return memory;
@@ -62,24 +61,24 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread) [2]Class {
         .Vector => return direct,
         .Array => return memory,
         .Optional => {
-            assert(ty.isPtrLikeOptional(pt.zcu));
+            assert(ty.isPtrLikeOptional(zcu));
             return direct;
         },
         .Pointer => {
-            assert(!ty.isSlice(pt.zcu));
+            assert(!ty.isSlice(zcu));
             return direct;
         },
         .Union => {
-            const union_obj = pt.zcu.typeToUnion(ty).?;
+            const union_obj = zcu.typeToUnion(ty).?;
             if (union_obj.flagsUnordered(ip).layout == .@"packed") {
-                if (ty.bitSize(pt) <= 64) return direct;
+                if (ty.bitSize(zcu) <= 64) return direct;
                 return .{ .direct, .direct };
             }
-            const layout = ty.unionGetLayout(pt);
+            const layout = ty.unionGetLayout(zcu);
             assert(layout.tag_size == 0);
             if (union_obj.field_types.len > 1) return memory;
             const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
-            return classifyType(first_field_ty, pt);
+            return classifyType(first_field_ty, zcu);
         },
         .ErrorUnion,
         .Frame,
@@ -101,29 +100,28 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread) [2]Class {
 /// Returns the scalar type a given type can represent.
 /// Asserts given type can be represented as scalar, such as
 /// a struct with a single scalar field.
-pub fn scalarType(ty: Type, pt: Zcu.PerThread) Type {
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    switch (ty.zigTypeTag(mod)) {
+pub fn scalarType(ty: Type, zcu: *Zcu) Type {
+    const ip = &zcu.intern_pool;
+    switch (ty.zigTypeTag(zcu)) {
         .Struct => {
-            if (mod.typeToPackedStruct(ty)) |packed_struct| {
-                return scalarType(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), pt);
+            if (zcu.typeToPackedStruct(ty)) |packed_struct| {
+                return scalarType(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), zcu);
             } else {
-                assert(ty.structFieldCount(mod) == 1);
-                return scalarType(ty.structFieldType(0, mod), pt);
+                assert(ty.structFieldCount(zcu) == 1);
+                return scalarType(ty.structFieldType(0, zcu), zcu);
             }
         },
         .Union => {
-            const union_obj = mod.typeToUnion(ty).?;
+            const union_obj = zcu.typeToUnion(ty).?;
             if (union_obj.flagsUnordered(ip).layout != .@"packed") {
-                const layout = pt.getUnionLayout(union_obj);
+                const layout = Type.getUnionLayout(union_obj, zcu);
                 if (layout.payload_size == 0 and layout.tag_size != 0) {
-                    return scalarType(ty.unionTagTypeSafety(mod).?, pt);
+                    return scalarType(ty.unionTagTypeSafety(zcu).?, zcu);
                 }
                 assert(union_obj.field_types.len == 1);
             }
             const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
-            return scalarType(first_field_ty, pt);
+            return scalarType(first_field_ty, zcu);
         },
         else => return ty,
     }
src/arch/wasm/CodeGen.zig
@@ -788,10 +788,10 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
     assert(!gop.found_existing);
 
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const val = (try func.air.value(ref, pt)).?;
     const ty = func.typeOf(ref);
-    if (!ty.hasRuntimeBitsIgnoreComptime(pt) and !ty.isInt(mod) and !ty.isError(mod)) {
+    if (!ty.hasRuntimeBitsIgnoreComptime(zcu) and !ty.isInt(zcu) and !ty.isError(zcu)) {
         gop.value_ptr.* = .none;
         return gop.value_ptr.*;
     }
@@ -1001,9 +1001,9 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32
 
 /// Using a given `Type`, returns the corresponding valtype for .auto callconv
 fn typeToValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) wasm.Valtype {
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    return switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    return switch (ty.zigTypeTag(zcu)) {
         .Float => switch (ty.floatBits(target)) {
             16 => .i32, // stored/loaded as u16
             32 => .f32,
@@ -1011,26 +1011,26 @@ fn typeToValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) wasm.Valtype {
             80, 128 => .i32,
             else => unreachable,
         },
-        .Int, .Enum => switch (ty.intInfo(pt.zcu).bits) {
+        .Int, .Enum => switch (ty.intInfo(zcu).bits) {
             0...32 => .i32,
             33...64 => .i64,
             else => .i32,
         },
         .Struct => blk: {
-            if (pt.zcu.typeToPackedStruct(ty)) |packed_struct| {
+            if (zcu.typeToPackedStruct(ty)) |packed_struct| {
                 const backing_int_ty = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
                 break :blk typeToValtype(backing_int_ty, pt, target);
             } else {
                 break :blk .i32;
             }
         },
-        .Vector => switch (determineSimdStoreStrategy(ty, pt, target)) {
+        .Vector => switch (determineSimdStoreStrategy(ty, zcu, target)) {
             .direct => .v128,
             .unrolled => .i32,
         },
-        .Union => switch (ty.containerLayout(pt.zcu)) {
+        .Union => switch (ty.containerLayout(zcu)) {
             .@"packed" => blk: {
-                const int_ty = pt.intType(.unsigned, @as(u16, @intCast(ty.bitSize(pt)))) catch @panic("out of memory");
+                const int_ty = pt.intType(.unsigned, @as(u16, @intCast(ty.bitSize(zcu)))) catch @panic("out of memory");
                 break :blk typeToValtype(int_ty, pt, target);
             },
             else => .i32,
@@ -1148,7 +1148,7 @@ fn genFunctype(
     pt: Zcu.PerThread,
     target: std.Target,
 ) !wasm.Type {
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     var temp_params = std.ArrayList(wasm.Valtype).init(gpa);
     defer temp_params.deinit();
     var returns = std.ArrayList(wasm.Valtype).init(gpa);
@@ -1156,30 +1156,30 @@ fn genFunctype(
 
     if (firstParamSRet(cc, return_type, pt, target)) {
         try temp_params.append(.i32); // memory address is always a 32-bit handle
-    } else if (return_type.hasRuntimeBitsIgnoreComptime(pt)) {
+    } else if (return_type.hasRuntimeBitsIgnoreComptime(zcu)) {
         if (cc == .C) {
-            const res_classes = abi.classifyType(return_type, pt);
+            const res_classes = abi.classifyType(return_type, zcu);
             assert(res_classes[0] == .direct and res_classes[1] == .none);
-            const scalar_type = abi.scalarType(return_type, pt);
+            const scalar_type = abi.scalarType(return_type, zcu);
             try returns.append(typeToValtype(scalar_type, pt, target));
         } else {
             try returns.append(typeToValtype(return_type, pt, target));
         }
-    } else if (return_type.isError(mod)) {
+    } else if (return_type.isError(zcu)) {
         try returns.append(.i32);
     }
 
     // param types
     for (params) |param_type_ip| {
         const param_type = Type.fromInterned(param_type_ip);
-        if (!param_type.hasRuntimeBitsIgnoreComptime(pt)) continue;
+        if (!param_type.hasRuntimeBitsIgnoreComptime(zcu)) continue;
 
         switch (cc) {
             .C => {
-                const param_classes = abi.classifyType(param_type, pt);
+                const param_classes = abi.classifyType(param_type, zcu);
                 if (param_classes[1] == .none) {
                     if (param_classes[0] == .direct) {
-                        const scalar_type = abi.scalarType(param_type, pt);
+                        const scalar_type = abi.scalarType(param_type, zcu);
                         try temp_params.append(typeToValtype(scalar_type, pt, target));
                     } else {
                         try temp_params.append(typeToValtype(param_type, pt, target));
@@ -1242,10 +1242,10 @@ pub fn generate(
 
 fn genFunc(func: *CodeGen) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    const fn_ty = mod.navValue(func.owner_nav).typeOf(mod);
-    const fn_info = mod.typeToFunc(fn_ty).?;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    const fn_ty = zcu.navValue(func.owner_nav).typeOf(zcu);
+    const fn_info = zcu.typeToFunc(fn_ty).?;
     var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt, func.target.*);
     defer func_type.deinit(func.gpa);
     _ = try func.bin_file.storeNavType(func.owner_nav, func_type);
@@ -1273,7 +1273,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
     if (func_type.returns.len != 0 and func.air.instructions.len > 0) {
         const inst: Air.Inst.Index = @enumFromInt(func.air.instructions.len - 1);
         const last_inst_ty = func.typeOfIndex(inst);
-        if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(pt) or last_inst_ty.isNoReturn(mod)) {
+        if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(zcu) or last_inst_ty.isNoReturn(zcu)) {
             try func.addTag(.@"unreachable");
         }
     }
@@ -1356,9 +1356,9 @@ const CallWValues = struct {
 
 fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues {
     const pt = func.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    const fn_info = mod.typeToFunc(fn_ty).?;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    const fn_info = zcu.typeToFunc(fn_ty).?;
     const cc = fn_info.cc;
     var result: CallWValues = .{
         .args = &.{},
@@ -1381,7 +1381,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
     switch (cc) {
         .Unspecified => {
             for (fn_info.param_types.get(ip)) |ty| {
-                if (!Type.fromInterned(ty).hasRuntimeBitsIgnoreComptime(pt)) {
+                if (!Type.fromInterned(ty).hasRuntimeBitsIgnoreComptime(zcu)) {
                     continue;
                 }
 
@@ -1391,7 +1391,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
         },
         .C => {
             for (fn_info.param_types.get(ip)) |ty| {
-                const ty_classes = abi.classifyType(Type.fromInterned(ty), pt);
+                const ty_classes = abi.classifyType(Type.fromInterned(ty), zcu);
                 for (ty_classes) |class| {
                     if (class == .none) continue;
                     try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } });
@@ -1409,7 +1409,7 @@ fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, pt: Zcu.
     switch (cc) {
         .Unspecified, .Inline => return isByRef(return_type, pt, target),
         .C => {
-            const ty_classes = abi.classifyType(return_type, pt);
+            const ty_classes = abi.classifyType(return_type, pt.zcu);
             if (ty_classes[0] == .indirect) return true;
             if (ty_classes[0] == .direct and ty_classes[1] == .direct) return true;
             return false;
@@ -1426,16 +1426,16 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
     }
 
     const pt = func.pt;
-    const mod = pt.zcu;
-    const ty_classes = abi.classifyType(ty, pt);
+    const zcu = pt.zcu;
+    const ty_classes = abi.classifyType(ty, zcu);
     assert(ty_classes[0] != .none);
-    switch (ty.zigTypeTag(mod)) {
+    switch (ty.zigTypeTag(zcu)) {
         .Struct, .Union => {
             if (ty_classes[0] == .indirect) {
                 return func.lowerToStack(value);
             }
             assert(ty_classes[0] == .direct);
-            const scalar_type = abi.scalarType(ty, pt);
+            const scalar_type = abi.scalarType(ty, zcu);
             switch (value) {
                 .memory,
                 .memory_offset,
@@ -1450,7 +1450,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
                 return func.lowerToStack(value);
             }
             assert(ty_classes[0] == .direct and ty_classes[1] == .direct);
-            assert(ty.abiSize(pt) == 16);
+            assert(ty.abiSize(zcu) == 16);
             // in this case we have an integer or float that must be lowered as 2 i64's.
             try func.emitWValue(value);
             try func.addMemArg(.i64_load, .{ .offset = value.offset(), .alignment = 8 });
@@ -1517,18 +1517,18 @@ fn restoreStackPointer(func: *CodeGen) !void {
 ///
 /// Asserts Type has codegenbits
 fn allocStack(func: *CodeGen, ty: Type) !WValue {
-    const pt = func.pt;
-    assert(ty.hasRuntimeBitsIgnoreComptime(pt));
+    const zcu = func.pt.zcu;
+    assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
     if (func.initial_stack_value == .none) {
         try func.initializeStack();
     }
 
-    const abi_size = std.math.cast(u32, ty.abiSize(pt)) orelse {
+    const abi_size = std.math.cast(u32, ty.abiSize(zcu)) orelse {
         return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
-            ty.fmt(pt), ty.abiSize(pt),
+            ty.fmt(func.pt), ty.abiSize(zcu),
         });
     };
-    const abi_align = ty.abiAlignment(pt);
+    const abi_align = ty.abiAlignment(zcu);
 
     func.stack_alignment = func.stack_alignment.max(abi_align);
 
@@ -1544,22 +1544,22 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue {
 /// if it is set, to ensure the stack alignment will be set correctly.
 fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ptr_ty = func.typeOfIndex(inst);
-    const pointee_ty = ptr_ty.childType(mod);
+    const pointee_ty = ptr_ty.childType(zcu);
 
     if (func.initial_stack_value == .none) {
         try func.initializeStack();
     }
 
-    if (!pointee_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+    if (!pointee_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
         return func.allocStack(Type.usize); // create a value containing just the stack pointer.
     }
 
-    const abi_alignment = ptr_ty.ptrAlignment(pt);
-    const abi_size = std.math.cast(u32, pointee_ty.abiSize(pt)) orelse {
+    const abi_alignment = ptr_ty.ptrAlignment(zcu);
+    const abi_size = std.math.cast(u32, pointee_ty.abiSize(zcu)) orelse {
         return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
-            pointee_ty.fmt(pt), pointee_ty.abiSize(pt),
+            pointee_ty.fmt(pt), pointee_ty.abiSize(zcu),
         });
     };
     func.stack_alignment = func.stack_alignment.max(abi_alignment);
@@ -1716,9 +1716,9 @@ fn arch(func: *const CodeGen) std.Target.Cpu.Arch {
 /// For a given `Type`, will return true when the type will be passed
 /// by reference, rather than by value
 fn isByRef(ty: Type, pt: Zcu.PerThread, target: std.Target) bool {
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    switch (ty.zigTypeTag(zcu)) {
         .Type,
         .ComptimeInt,
         .ComptimeFloat,
@@ -1738,41 +1738,41 @@ fn isByRef(ty: Type, pt: Zcu.PerThread, target: std.Target) bool {
 
         .Array,
         .Frame,
-        => return ty.hasRuntimeBitsIgnoreComptime(pt),
+        => return ty.hasRuntimeBitsIgnoreComptime(zcu),
         .Union => {
-            if (mod.typeToUnion(ty)) |union_obj| {
+            if (zcu.typeToUnion(ty)) |union_obj| {
                 if (union_obj.flagsUnordered(ip).layout == .@"packed") {
-                    return ty.abiSize(pt) > 8;
+                    return ty.abiSize(zcu) > 8;
                 }
             }
-            return ty.hasRuntimeBitsIgnoreComptime(pt);
+            return ty.hasRuntimeBitsIgnoreComptime(zcu);
         },
         .Struct => {
-            if (mod.typeToPackedStruct(ty)) |packed_struct| {
+            if (zcu.typeToPackedStruct(ty)) |packed_struct| {
                 return isByRef(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), pt, target);
             }
-            return ty.hasRuntimeBitsIgnoreComptime(pt);
+            return ty.hasRuntimeBitsIgnoreComptime(zcu);
         },
-        .Vector => return determineSimdStoreStrategy(ty, pt, target) == .unrolled,
-        .Int => return ty.intInfo(mod).bits > 64,
-        .Enum => return ty.intInfo(mod).bits > 64,
+        .Vector => return determineSimdStoreStrategy(ty, zcu, target) == .unrolled,
+        .Int => return ty.intInfo(zcu).bits > 64,
+        .Enum => return ty.intInfo(zcu).bits > 64,
         .Float => return ty.floatBits(target) > 64,
         .ErrorUnion => {
-            const pl_ty = ty.errorUnionPayload(mod);
-            if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+            const pl_ty = ty.errorUnionPayload(zcu);
+            if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                 return false;
             }
             return true;
         },
         .Optional => {
-            if (ty.isPtrLikeOptional(mod)) return false;
-            const pl_type = ty.optionalChild(mod);
-            if (pl_type.zigTypeTag(mod) == .ErrorSet) return false;
-            return pl_type.hasRuntimeBitsIgnoreComptime(pt);
+            if (ty.isPtrLikeOptional(zcu)) return false;
+            const pl_type = ty.optionalChild(zcu);
+            if (pl_type.zigTypeTag(zcu) == .ErrorSet) return false;
+            return pl_type.hasRuntimeBitsIgnoreComptime(zcu);
         },
         .Pointer => {
             // Slices act like struct and will be passed by reference
-            if (ty.isSlice(mod)) return true;
+            if (ty.isSlice(zcu)) return true;
             return false;
         },
     }
@@ -1787,9 +1787,9 @@ const SimdStoreStrategy = enum {
 /// This means when a given type is 128 bits and either the simd128 or relaxed-simd
 /// features are enabled, the function will return `.direct`. This would allow to store
 /// it using a instruction, rather than an unrolled version.
-fn determineSimdStoreStrategy(ty: Type, pt: Zcu.PerThread, target: std.Target) SimdStoreStrategy {
-    std.debug.assert(ty.zigTypeTag(pt.zcu) == .Vector);
-    if (ty.bitSize(pt) != 128) return .unrolled;
+fn determineSimdStoreStrategy(ty: Type, zcu: *Zcu, target: std.Target) SimdStoreStrategy {
+    std.debug.assert(ty.zigTypeTag(zcu) == .Vector);
+    if (ty.bitSize(zcu) != 128) return .unrolled;
     const hasFeature = std.Target.wasm.featureSetHas;
     const features = target.cpu.features;
     if (hasFeature(features, .relaxed_simd) or hasFeature(features, .simd128)) {
@@ -2069,8 +2069,8 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
 
     for (body) |inst| {
         if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip)) {
@@ -2091,37 +2091,37 @@ fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
 
 fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
     const operand = try func.resolveInst(un_op);
-    const fn_info = mod.typeToFunc(mod.navValue(func.owner_nav).typeOf(mod)).?;
+    const fn_info = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?;
     const ret_ty = Type.fromInterned(fn_info.return_type);
 
     // result must be stored in the stack and we return a pointer
     // to the stack instead
     if (func.return_value != .none) {
         try func.store(func.return_value, operand, ret_ty, 0);
-    } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
-        switch (ret_ty.zigTypeTag(mod)) {
+    } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+        switch (ret_ty.zigTypeTag(zcu)) {
             // Aggregate types can be lowered as a singular value
             .Struct, .Union => {
-                const scalar_type = abi.scalarType(ret_ty, pt);
+                const scalar_type = abi.scalarType(ret_ty, zcu);
                 try func.emitWValue(operand);
                 const opcode = buildOpcode(.{
                     .op = .load,
-                    .width = @as(u8, @intCast(scalar_type.abiSize(pt) * 8)),
-                    .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned,
+                    .width = @as(u8, @intCast(scalar_type.abiSize(zcu) * 8)),
+                    .signedness = if (scalar_type.isSignedInt(zcu)) .signed else .unsigned,
                     .valtype1 = typeToValtype(scalar_type, pt, func.target.*),
                 });
                 try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
                     .offset = operand.offset(),
-                    .alignment = @intCast(scalar_type.abiAlignment(pt).toByteUnits().?),
+                    .alignment = @intCast(scalar_type.abiAlignment(zcu).toByteUnits().?),
                 });
             },
             else => try func.emitWValue(operand),
         }
     } else {
-        if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt) and ret_ty.isError(mod)) {
+        if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu) and ret_ty.isError(zcu)) {
             try func.addImm32(0);
         } else {
             try func.emitWValue(operand);
@@ -2135,15 +2135,15 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
-    const child_type = func.typeOfIndex(inst).childType(mod);
+    const zcu = pt.zcu;
+    const child_type = func.typeOfIndex(inst).childType(zcu);
 
     const result = result: {
-        if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
+        if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
             break :result try func.allocStack(Type.usize); // create pointer to void
         }
 
-        const fn_info = mod.typeToFunc(mod.navValue(func.owner_nav).typeOf(mod)).?;
+        const fn_info = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?;
         if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*)) {
             break :result func.return_value;
         }
@@ -2156,14 +2156,14 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
     const operand = try func.resolveInst(un_op);
-    const ret_ty = func.typeOf(un_op).childType(mod);
+    const ret_ty = func.typeOf(un_op).childType(zcu);
 
-    const fn_info = mod.typeToFunc(mod.navValue(func.owner_nav).typeOf(mod)).?;
-    if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
-        if (ret_ty.isError(mod)) {
+    const fn_info = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?;
+    if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+        if (ret_ty.isError(zcu)) {
             try func.addImm32(0);
         }
     } else if (!firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*)) {
@@ -2184,15 +2184,15 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
     const ty = func.typeOf(pl_op.operand);
 
     const pt = func.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    const fn_ty = switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    const fn_ty = switch (ty.zigTypeTag(zcu)) {
         .Fn => ty,
-        .Pointer => ty.childType(mod),
+        .Pointer => ty.childType(zcu),
         else => unreachable,
     };
-    const ret_ty = fn_ty.fnReturnType(mod);
-    const fn_info = mod.typeToFunc(fn_ty).?;
+    const ret_ty = fn_ty.fnReturnType(zcu);
+    const fn_info = zcu.typeToFunc(fn_ty).?;
     const first_param_sret = firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*);
 
     const callee: ?InternPool.Nav.Index = blk: {
@@ -2205,7 +2205,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
             },
             .@"extern" => |@"extern"| {
                 const ext_nav = ip.getNav(@"extern".owner_nav);
-                const ext_info = mod.typeToFunc(Type.fromInterned(@"extern".ty)).?;
+                const ext_info = zcu.typeToFunc(Type.fromInterned(@"extern".ty)).?;
                 var func_type = try genFunctype(
                     func.gpa,
                     ext_info.cc,
@@ -2248,9 +2248,9 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
         const arg_val = try func.resolveInst(arg);
 
         const arg_ty = func.typeOf(arg);
-        if (!arg_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+        if (!arg_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
 
-        try func.lowerArg(mod.typeToFunc(fn_ty).?.cc, arg_ty, arg_val);
+        try func.lowerArg(zcu.typeToFunc(fn_ty).?.cc, arg_ty, arg_val);
     }
 
     if (callee) |direct| {
@@ -2259,7 +2259,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
     } else {
         // in this case we call a function pointer
         // so load its value onto the stack
-        std.debug.assert(ty.zigTypeTag(mod) == .Pointer);
+        std.debug.assert(ty.zigTypeTag(zcu) == .Pointer);
         const operand = try func.resolveInst(pl_op.operand);
         try func.emitWValue(operand);
 
@@ -2271,18 +2271,18 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
     }
 
     const result_value = result_value: {
-        if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt) and !ret_ty.isError(mod)) {
+        if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu) and !ret_ty.isError(zcu)) {
             break :result_value .none;
-        } else if (ret_ty.isNoReturn(mod)) {
+        } else if (ret_ty.isNoReturn(zcu)) {
             try func.addTag(.@"unreachable");
             break :result_value .none;
         } else if (first_param_sret) {
             break :result_value sret;
             // TODO: Make this less fragile and optimize
-        } else if (mod.typeToFunc(fn_ty).?.cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) {
+        } else if (zcu.typeToFunc(fn_ty).?.cc == .C and ret_ty.zigTypeTag(zcu) == .Struct or ret_ty.zigTypeTag(zcu) == .Union) {
             const result_local = try func.allocLocal(ret_ty);
             try func.addLabel(.local_set, result_local.local.value);
-            const scalar_type = abi.scalarType(ret_ty, pt);
+            const scalar_type = abi.scalarType(ret_ty, zcu);
             const result = try func.allocStack(scalar_type);
             try func.store(result, result_local, scalar_type, 0);
             break :result_value result;
@@ -2306,7 +2306,7 @@ fn airAlloc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     if (safety) {
         // TODO if the value is undef, write 0xaa bytes to dest
     } else {
@@ -2317,8 +2317,8 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
     const lhs = try func.resolveInst(bin_op.lhs);
     const rhs = try func.resolveInst(bin_op.rhs);
     const ptr_ty = func.typeOf(bin_op.lhs);
-    const ptr_info = ptr_ty.ptrInfo(mod);
-    const ty = ptr_ty.childType(mod);
+    const ptr_info = ptr_ty.ptrInfo(zcu);
+    const ty = ptr_ty.childType(zcu);
 
     if (ptr_info.packed_offset.host_size == 0) {
         try func.store(lhs, rhs, ty, 0);
@@ -2331,7 +2331,7 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
             return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{});
         }
 
-        var mask = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(ty.bitSize(pt)))) - 1));
+        var mask = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(ty.bitSize(zcu)))) - 1));
         mask <<= @as(u6, @intCast(ptr_info.packed_offset.bit_offset));
         mask ^= ~@as(u64, 0);
         const shift_val: WValue = if (ptr_info.packed_offset.host_size <= 4)
@@ -2343,9 +2343,9 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
         else
             .{ .imm64 = mask };
         const wrap_mask_val: WValue = if (ptr_info.packed_offset.host_size <= 4)
-            .{ .imm32 = @truncate(~@as(u64, 0) >> @intCast(64 - ty.bitSize(pt))) }
+            .{ .imm32 = @truncate(~@as(u64, 0) >> @intCast(64 - ty.bitSize(zcu))) }
         else
-            .{ .imm64 = ~@as(u64, 0) >> @intCast(64 - ty.bitSize(pt)) };
+            .{ .imm64 = ~@as(u64, 0) >> @intCast(64 - ty.bitSize(zcu)) };
 
         try func.emitWValue(lhs);
         const loaded = try func.load(lhs, int_elem_ty, 0);
@@ -2366,12 +2366,12 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
 fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void {
     assert(!(lhs != .stack and rhs == .stack));
     const pt = func.pt;
-    const mod = pt.zcu;
-    const abi_size = ty.abiSize(pt);
-    switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    const abi_size = ty.abiSize(zcu);
+    switch (ty.zigTypeTag(zcu)) {
         .ErrorUnion => {
-            const pl_ty = ty.errorUnionPayload(mod);
-            if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+            const pl_ty = ty.errorUnionPayload(zcu);
+            if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                 return func.store(lhs, rhs, Type.anyerror, 0);
             }
 
@@ -2379,14 +2379,14 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
             return func.memcpy(lhs, rhs, .{ .imm32 = len });
         },
         .Optional => {
-            if (ty.isPtrLikeOptional(mod)) {
+            if (ty.isPtrLikeOptional(zcu)) {
                 return func.store(lhs, rhs, Type.usize, 0);
             }
-            const pl_ty = ty.optionalChild(mod);
-            if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+            const pl_ty = ty.optionalChild(zcu);
+            if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                 return func.store(lhs, rhs, Type.u8, 0);
             }
-            if (pl_ty.zigTypeTag(mod) == .ErrorSet) {
+            if (pl_ty.zigTypeTag(zcu) == .ErrorSet) {
                 return func.store(lhs, rhs, Type.anyerror, 0);
             }
 
@@ -2397,7 +2397,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
             const len = @as(u32, @intCast(abi_size));
             return func.memcpy(lhs, rhs, .{ .imm32 = len });
         },
-        .Vector => switch (determineSimdStoreStrategy(ty, pt, func.target.*)) {
+        .Vector => switch (determineSimdStoreStrategy(ty, zcu, func.target.*)) {
             .unrolled => {
                 const len: u32 = @intCast(abi_size);
                 return func.memcpy(lhs, rhs, .{ .imm32 = len });
@@ -2411,13 +2411,13 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
                 try func.mir_extra.appendSlice(func.gpa, &[_]u32{
                     std.wasm.simdOpcode(.v128_store),
                     offset + lhs.offset(),
-                    @intCast(ty.abiAlignment(pt).toByteUnits() orelse 0),
+                    @intCast(ty.abiAlignment(zcu).toByteUnits() orelse 0),
                 });
                 return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
             },
         },
         .Pointer => {
-            if (ty.isSlice(mod)) {
+            if (ty.isSlice(zcu)) {
                 // store pointer first
                 // lower it to the stack so we do not have to store rhs into a local first
                 try func.emitWValue(lhs);
@@ -2441,7 +2441,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
             try func.store(.stack, msb, Type.u64, 8 + lhs.offset());
             return;
         } else if (abi_size > 16) {
-            try func.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(pt))) });
+            try func.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(zcu))) });
         },
         else => if (abi_size > 8) {
             return func.fail("TODO: `store` for type `{}` with abisize `{d}`", .{
@@ -2467,21 +2467,21 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
         Mir.Inst.Tag.fromOpcode(opcode),
         .{
             .offset = offset + lhs.offset(),
-            .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?),
+            .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?),
         },
     );
 }
 
 fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const operand = try func.resolveInst(ty_op.operand);
     const ty = ty_op.ty.toType();
     const ptr_ty = func.typeOf(ty_op.operand);
-    const ptr_info = ptr_ty.ptrInfo(mod);
+    const ptr_info = ptr_ty.ptrInfo(zcu);
 
-    if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return func.finishAir(inst, .none, &.{ty_op.operand});
+    if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) return func.finishAir(inst, .none, &.{ty_op.operand});
 
     const result = result: {
         if (isByRef(ty, pt, func.target.*)) {
@@ -2515,36 +2515,36 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 /// NOTE: Leaves the value on the stack.
 fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     // load local's value from memory by its stack position
     try func.emitWValue(operand);
 
-    if (ty.zigTypeTag(mod) == .Vector) {
+    if (ty.zigTypeTag(zcu) == .Vector) {
         // TODO: Add helper functions for simd opcodes
         const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
         // stores as := opcode, offset, alignment (opcode::memarg)
         try func.mir_extra.appendSlice(func.gpa, &[_]u32{
             std.wasm.simdOpcode(.v128_load),
             offset + operand.offset(),
-            @intCast(ty.abiAlignment(pt).toByteUnits().?),
+            @intCast(ty.abiAlignment(zcu).toByteUnits().?),
         });
         try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
         return .stack;
     }
 
-    const abi_size: u8 = @intCast(ty.abiSize(pt));
+    const abi_size: u8 = @intCast(ty.abiSize(zcu));
     const opcode = buildOpcode(.{
         .valtype1 = typeToValtype(ty, pt, func.target.*),
         .width = abi_size * 8,
         .op = .load,
-        .signedness = if (ty.isSignedInt(mod)) .signed else .unsigned,
+        .signedness = if (ty.isSignedInt(zcu)) .signed else .unsigned,
     });
 
     try func.addMemArg(
         Mir.Inst.Tag.fromOpcode(opcode),
         .{
             .offset = offset + operand.offset(),
-            .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?),
+            .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?),
         },
     );
 
@@ -2553,13 +2553,13 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
 
 fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const arg_index = func.arg_index;
     const arg = func.args[arg_index];
-    const cc = mod.typeToFunc(mod.navValue(func.owner_nav).typeOf(mod)).?.cc;
+    const cc = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?.cc;
     const arg_ty = func.typeOfIndex(inst);
     if (cc == .C) {
-        const arg_classes = abi.classifyType(arg_ty, pt);
+        const arg_classes = abi.classifyType(arg_ty, zcu);
         for (arg_classes) |class| {
             if (class != .none) {
                 func.arg_index += 1;
@@ -2569,7 +2569,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         // When we have an argument that's passed using more than a single parameter,
         // we combine them into a single stack value
         if (arg_classes[0] == .direct and arg_classes[1] == .direct) {
-            if (arg_ty.zigTypeTag(mod) != .Int and arg_ty.zigTypeTag(mod) != .Float) {
+            if (arg_ty.zigTypeTag(zcu) != .Int and arg_ty.zigTypeTag(zcu) != .Float) {
                 return func.fail(
                     "TODO: Implement C-ABI argument for type '{}'",
                     .{arg_ty.fmt(pt)},
@@ -2602,6 +2602,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
     const pt = func.pt;
+    const zcu = pt.zcu;
     const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
     const lhs = try func.resolveInst(bin_op.lhs);
     const rhs = try func.resolveInst(bin_op.rhs);
@@ -2615,10 +2616,10 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
     // For big integers we can ignore this as we will call into compiler-rt which handles this.
     const result = switch (op) {
         .shr, .shl => result: {
-            const lhs_wasm_bits = toWasmBits(@intCast(lhs_ty.bitSize(pt))) orelse {
+            const lhs_wasm_bits = toWasmBits(@intCast(lhs_ty.bitSize(zcu))) orelse {
                 return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
             };
-            const rhs_wasm_bits = toWasmBits(@intCast(rhs_ty.bitSize(pt))).?;
+            const rhs_wasm_bits = toWasmBits(@intCast(rhs_ty.bitSize(zcu))).?;
             const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128)
                 try (try func.intcast(rhs, rhs_ty, lhs_ty)).toLocal(func, lhs_ty)
             else
@@ -2635,7 +2636,7 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
 /// NOTE: THis leaves the value on top of the stack.
 fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     assert(!(lhs != .stack and rhs == .stack));
 
     if (ty.isAnyFloat()) {
@@ -2644,7 +2645,7 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
     }
 
     if (isByRef(ty, pt, func.target.*)) {
-        if (ty.zigTypeTag(mod) == .Int) {
+        if (ty.zigTypeTag(zcu) == .Int) {
             return func.binOpBigInt(lhs, rhs, ty, op);
         } else {
             return func.fail(
@@ -2657,7 +2658,7 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
     const opcode: wasm.Opcode = buildOpcode(.{
         .op = op,
         .valtype1 = typeToValtype(ty, pt, func.target.*),
-        .signedness = if (ty.isSignedInt(mod)) .signed else .unsigned,
+        .signedness = if (ty.isSignedInt(zcu)) .signed else .unsigned,
     });
     try func.emitWValue(lhs);
     try func.emitWValue(rhs);
@@ -2669,8 +2670,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
 
 fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
     const pt = func.pt;
-    const mod = pt.zcu;
-    const int_info = ty.intInfo(mod);
+    const zcu = pt.zcu;
+    const int_info = ty.intInfo(zcu);
     if (int_info.bits > 128) {
         return func.fail("TODO: Implement binary operation for big integers larger than 128 bits", .{});
     }
@@ -2812,17 +2813,17 @@ const FloatOp = enum {
 
 fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const operand = try func.resolveInst(ty_op.operand);
     const ty = func.typeOf(ty_op.operand);
-    const scalar_ty = ty.scalarType(mod);
+    const scalar_ty = ty.scalarType(zcu);
 
-    switch (scalar_ty.zigTypeTag(mod)) {
-        .Int => if (ty.zigTypeTag(mod) == .Vector) {
+    switch (scalar_ty.zigTypeTag(zcu)) {
+        .Int => if (ty.zigTypeTag(zcu) == .Vector) {
             return func.fail("TODO implement airAbs for {}", .{ty.fmt(pt)});
         } else {
-            const int_bits = ty.intInfo(mod).bits;
+            const int_bits = ty.intInfo(zcu).bits;
             const wasm_bits = toWasmBits(int_bits) orelse {
                 return func.fail("TODO: airAbs for signed integers larger than '{d}' bits", .{int_bits});
             };
@@ -2903,8 +2904,8 @@ fn airUnaryFloatOp(func: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError
 
 fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) InnerError!WValue {
     const pt = func.pt;
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) == .Vector) {
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Vector) {
         return func.fail("TODO: Implement floatOps for vectors", .{});
     }
 
@@ -3010,7 +3011,7 @@ fn floatNeg(func: *CodeGen, ty: Type, arg: WValue) InnerError!WValue {
 
 fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
     const lhs = try func.resolveInst(bin_op.lhs);
@@ -3018,7 +3019,7 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
     const lhs_ty = func.typeOf(bin_op.lhs);
     const rhs_ty = func.typeOf(bin_op.rhs);
 
-    if (lhs_ty.zigTypeTag(mod) == .Vector or rhs_ty.zigTypeTag(mod) == .Vector) {
+    if (lhs_ty.zigTypeTag(zcu) == .Vector or rhs_ty.zigTypeTag(zcu) == .Vector) {
         return func.fail("TODO: Implement wrapping arithmetic for vectors", .{});
     }
 
@@ -3029,10 +3030,10 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
     // For big integers we can ignore this as we will call into compiler-rt which handles this.
     const result = switch (op) {
         .shr, .shl => result: {
-            const lhs_wasm_bits = toWasmBits(@intCast(lhs_ty.bitSize(pt))) orelse {
+            const lhs_wasm_bits = toWasmBits(@intCast(lhs_ty.bitSize(zcu))) orelse {
                 return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
             };
-            const rhs_wasm_bits = toWasmBits(@intCast(rhs_ty.bitSize(pt))).?;
+            const rhs_wasm_bits = toWasmBits(@intCast(rhs_ty.bitSize(zcu))).?;
             const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128)
                 try (try func.intcast(rhs, rhs_ty, lhs_ty)).toLocal(func, lhs_ty)
             else
@@ -3058,9 +3059,9 @@ fn wrapBinOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr
 /// NOTE: When the Type is <= 64 bits, leaves the value on top of the stack, if wrapping was needed.
 fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
     const pt = func.pt;
-    const mod = pt.zcu;
-    assert(ty.abiSize(pt) <= 16);
-    const int_bits: u16 = @intCast(ty.bitSize(pt)); // TODO use ty.intInfo(mod).bits
+    const zcu = pt.zcu;
+    assert(ty.abiSize(zcu) <= 16);
+    const int_bits: u16 = @intCast(ty.bitSize(zcu)); // TODO use ty.intInfo(zcu).bits
     const wasm_bits = toWasmBits(int_bits) orelse {
         return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{int_bits});
     };
@@ -3070,7 +3071,7 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
     switch (wasm_bits) {
         32 => {
             try func.emitWValue(operand);
-            if (ty.isSignedInt(mod)) {
+            if (ty.isSignedInt(zcu)) {
                 try func.addImm32(32 - int_bits);
                 try func.addTag(.i32_shl);
                 try func.addImm32(32 - int_bits);
@@ -3083,7 +3084,7 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
         },
         64 => {
             try func.emitWValue(operand);
-            if (ty.isSignedInt(mod)) {
+            if (ty.isSignedInt(zcu)) {
                 try func.addImm64(64 - int_bits);
                 try func.addTag(.i64_shl);
                 try func.addImm64(64 - int_bits);
@@ -3104,7 +3105,7 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
 
             try func.emitWValue(result);
             _ = try func.load(operand, Type.u64, 8);
-            if (ty.isSignedInt(mod)) {
+            if (ty.isSignedInt(zcu)) {
                 try func.addImm64(128 - int_bits);
                 try func.addTag(.i64_shl);
                 try func.addImm64(128 - int_bits);
@@ -3145,13 +3146,13 @@ fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerEr
                     };
                 },
                 .Struct => switch (base_ty.containerLayout(zcu)) {
-                    .auto => base_ty.structFieldOffset(@intCast(field.index), pt),
+                    .auto => base_ty.structFieldOffset(@intCast(field.index), zcu),
                     .@"extern", .@"packed" => unreachable,
                 },
                 .Union => switch (base_ty.containerLayout(zcu)) {
                     .auto => off: {
                         // Keep in sync with the `un` case of `generateSymbol`.
-                        const layout = base_ty.unionGetLayout(pt);
+                        const layout = base_ty.unionGetLayout(zcu);
                         if (layout.payload_size == 0) break :off 0;
                         if (layout.tag_size == 0) break :off 0;
                         if (layout.tag_align.compare(.gte, layout.payload_align)) {
@@ -3178,15 +3179,15 @@ fn lowerUavRef(
     offset: u32,
 ) InnerError!WValue {
     const pt = func.pt;
-    const mod = pt.zcu;
-    const ty = Type.fromInterned(mod.intern_pool.typeOf(uav.val));
+    const zcu = pt.zcu;
+    const ty = Type.fromInterned(zcu.intern_pool.typeOf(uav.val));
 
-    const is_fn_body = ty.zigTypeTag(mod) == .Fn;
-    if (!is_fn_body and !ty.hasRuntimeBitsIgnoreComptime(pt)) {
+    const is_fn_body = ty.zigTypeTag(zcu) == .Fn;
+    if (!is_fn_body and !ty.hasRuntimeBitsIgnoreComptime(zcu)) {
         return .{ .imm32 = 0xaaaaaaaa };
     }
 
-    const decl_align = mod.intern_pool.indexToKey(uav.orig_ty).ptr_type.flags.alignment;
+    const decl_align = zcu.intern_pool.indexToKey(uav.orig_ty).ptr_type.flags.alignment;
     const res = try func.bin_file.lowerUav(pt, uav.val, decl_align, func.src_loc);
     const target_sym_index = switch (res) {
         .mcv => |mcv| mcv.load_symbol,
@@ -3204,19 +3205,19 @@ fn lowerUavRef(
 
 fn lowerNavRef(func: *CodeGen, nav_index: InternPool.Nav.Index, offset: u32) InnerError!WValue {
     const pt = func.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
 
     // check if decl is an alias to a function, in which case we
     // want to lower the actual decl, rather than the alias itself.
-    const owner_nav = switch (ip.indexToKey(mod.navValue(nav_index).toIntern())) {
+    const owner_nav = switch (ip.indexToKey(zcu.navValue(nav_index).toIntern())) {
         .func => |function| function.owner_nav,
         .variable => |variable| variable.owner_nav,
         .@"extern" => |@"extern"| @"extern".owner_nav,
         else => nav_index,
     };
     const nav_ty = ip.getNav(owner_nav).typeOf(ip);
-    if (!ip.isFunctionType(nav_ty) and !Type.fromInterned(nav_ty).hasRuntimeBitsIgnoreComptime(pt)) {
+    if (!ip.isFunctionType(nav_ty) and !Type.fromInterned(nav_ty).hasRuntimeBitsIgnoreComptime(zcu)) {
         return .{ .imm32 = 0xaaaaaaaa };
     }
 
@@ -3234,10 +3235,10 @@ fn lowerNavRef(func: *CodeGen, nav_index: InternPool.Nav.Index, offset: u32) Inn
 /// Asserts that `isByRef` returns `false` for `ty`.
 fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     assert(!isByRef(ty, pt, func.target.*));
-    const ip = &mod.intern_pool;
-    if (val.isUndefDeep(mod)) return func.emitUndefined(ty);
+    const ip = &zcu.intern_pool;
+    if (val.isUndefDeep(zcu)) return func.emitUndefined(ty);
 
     switch (ip.indexToKey(val.ip_index)) {
         .int_type,
@@ -3280,16 +3281,16 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
         .empty_enum_value,
         => unreachable, // non-runtime values
         .int => {
-            const int_info = ty.intInfo(mod);
+            const int_info = ty.intInfo(zcu);
             switch (int_info.signedness) {
                 .signed => switch (int_info.bits) {
-                    0...32 => return .{ .imm32 = @bitCast(@as(i32, @intCast(val.toSignedInt(pt)))) },
-                    33...64 => return .{ .imm64 = @bitCast(val.toSignedInt(pt)) },
+                    0...32 => return .{ .imm32 = @bitCast(@as(i32, @intCast(val.toSignedInt(zcu)))) },
+                    33...64 => return .{ .imm64 = @bitCast(val.toSignedInt(zcu)) },
                     else => unreachable,
                 },
                 .unsigned => switch (int_info.bits) {
-                    0...32 => return .{ .imm32 = @intCast(val.toUnsignedInt(pt)) },
-                    33...64 => return .{ .imm64 = val.toUnsignedInt(pt) },
+                    0...32 => return .{ .imm32 = @intCast(val.toUnsignedInt(zcu)) },
+                    33...64 => return .{ .imm64 = val.toUnsignedInt(zcu) },
                     else => unreachable,
                 },
             }
@@ -3302,9 +3303,9 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
             const err_int_ty = try pt.errorIntType();
             const err_ty, const err_val = switch (error_union.val) {
                 .err_name => |err_name| .{
-                    ty.errorUnionSet(mod),
+                    ty.errorUnionSet(zcu),
                     Value.fromInterned(try pt.intern(.{ .err = .{
-                        .ty = ty.errorUnionSet(mod).toIntern(),
+                        .ty = ty.errorUnionSet(zcu).toIntern(),
                         .name = err_name,
                     } })),
                 },
@@ -3313,8 +3314,8 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
                     try pt.intValue(err_int_ty, 0),
                 },
             };
-            const payload_type = ty.errorUnionPayload(mod);
-            if (!payload_type.hasRuntimeBitsIgnoreComptime(pt)) {
+            const payload_type = ty.errorUnionPayload(zcu);
+            if (!payload_type.hasRuntimeBitsIgnoreComptime(zcu)) {
                 // We use the error type directly as the type.
                 return func.lowerConstant(err_val, err_ty);
             }
@@ -3339,20 +3340,20 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
             },
         },
         .ptr => return func.lowerPtr(val.toIntern(), 0),
-        .opt => if (ty.optionalReprIsPayload(mod)) {
-            const pl_ty = ty.optionalChild(mod);
-            if (val.optionalValue(mod)) |payload| {
+        .opt => if (ty.optionalReprIsPayload(zcu)) {
+            const pl_ty = ty.optionalChild(zcu);
+            if (val.optionalValue(zcu)) |payload| {
                 return func.lowerConstant(payload, pl_ty);
             } else {
                 return .{ .imm32 = 0 };
             }
         } else {
-            return .{ .imm32 = @intFromBool(!val.isNull(mod)) };
+            return .{ .imm32 = @intFromBool(!val.isNull(zcu)) };
         },
         .aggregate => switch (ip.indexToKey(ty.ip_index)) {
             .array_type => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(pt)}),
             .vector_type => {
-                assert(determineSimdStoreStrategy(ty, pt, func.target.*) == .direct);
+                assert(determineSimdStoreStrategy(ty, zcu, func.target.*) == .direct);
                 var buf: [16]u8 = undefined;
                 val.writeToMemory(ty, pt, &buf) catch unreachable;
                 return func.storeSimdImmd(buf);
@@ -3378,8 +3379,8 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
             const constant_ty = if (un.tag == .none)
                 try ty.unionBackingType(pt)
             else field_ty: {
-                const union_obj = mod.typeToUnion(ty).?;
-                const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
+                const union_obj = zcu.typeToUnion(ty).?;
+                const field_index = zcu.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
                 break :field_ty Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
             };
             return func.lowerConstant(Value.fromInterned(un.val), constant_ty);
@@ -3398,11 +3399,11 @@ fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue {
 
 fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
     const pt = func.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    switch (ty.zigTypeTag(zcu)) {
         .Bool, .ErrorSet => return .{ .imm32 = 0xaaaaaaaa },
-        .Int, .Enum => switch (ty.intInfo(mod).bits) {
+        .Int, .Enum => switch (ty.intInfo(zcu).bits) {
             0...32 => return .{ .imm32 = 0xaaaaaaaa },
             33...64 => return .{ .imm64 = 0xaaaaaaaaaaaaaaaa },
             else => unreachable,
@@ -3419,8 +3420,8 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
             else => unreachable,
         },
         .Optional => {
-            const pl_ty = ty.optionalChild(mod);
-            if (ty.optionalReprIsPayload(mod)) {
+            const pl_ty = ty.optionalChild(zcu);
+            if (ty.optionalReprIsPayload(zcu)) {
                 return func.emitUndefined(pl_ty);
             }
             return .{ .imm32 = 0xaaaaaaaa };
@@ -3429,10 +3430,10 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
             return .{ .imm32 = 0xaaaaaaaa };
         },
         .Struct => {
-            const packed_struct = mod.typeToPackedStruct(ty).?;
+            const packed_struct = zcu.typeToPackedStruct(ty).?;
             return func.emitUndefined(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)));
         },
-        else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(mod)}),
+        else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(zcu)}),
     }
 }
 
@@ -3441,8 +3442,8 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
 /// as an integer value.
 fn valueAsI32(func: *const CodeGen, val: Value) i32 {
     const pt = func.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
 
     switch (val.toIntern()) {
         .bool_true => return 1,
@@ -3465,12 +3466,13 @@ fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, pt: Zcu.PerThread
 }
 
 fn intStorageAsI32(storage: InternPool.Key.Int.Storage, pt: Zcu.PerThread) i32 {
+    const zcu = pt.zcu;
     return switch (storage) {
         .i64 => |x| @as(i32, @intCast(x)),
         .u64 => |x| @as(i32, @bitCast(@as(u32, @intCast(x)))),
         .big_int => unreachable,
-        .lazy_align => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiAlignment(pt).toByteUnits() orelse 0)))),
-        .lazy_size => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiSize(pt))))),
+        .lazy_align => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiAlignment(zcu).toByteUnits() orelse 0)))),
+        .lazy_size => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiSize(zcu))))),
     };
 }
 
@@ -3599,10 +3601,10 @@ fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) In
 fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue {
     assert(!(lhs != .stack and rhs == .stack));
     const pt = func.pt;
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) == .Optional and !ty.optionalReprIsPayload(mod)) {
-        const payload_ty = ty.optionalChild(mod);
-        if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Optional and !ty.optionalReprIsPayload(zcu)) {
+        const payload_ty = ty.optionalChild(zcu);
+        if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             // When we hit this case, we must check the value of optionals
             // that are not pointers. This means first checking against non-null for
             // both lhs and rhs, as well as checking the payload are matching of lhs and rhs
@@ -3616,10 +3618,10 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO
 
     const signedness: std.builtin.Signedness = blk: {
         // by default we tell the operand type is unsigned (i.e. bools and enum values)
-        if (ty.zigTypeTag(mod) != .Int) break :blk .unsigned;
+        if (ty.zigTypeTag(zcu) != .Int) break :blk .unsigned;
 
         // incase of an actual integer, we emit the correct signedness
-        break :blk ty.intInfo(mod).signedness;
+        break :blk ty.intInfo(zcu).signedness;
     };
 
     // ensure that when we compare pointers, we emit
@@ -3708,12 +3710,12 @@ fn airCmpLtErrorsLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 }
 
 fn airBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+    const zcu = func.pt.zcu;
     const br = func.air.instructions.items(.data)[@intFromEnum(inst)].br;
     const block = func.blocks.get(br.block_inst).?;
 
     // if operand has codegen bits we should break with a value
-    if (func.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(pt)) {
+    if (func.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(zcu)) {
         const operand = try func.resolveInst(br.operand);
         try func.lowerToStack(operand);
 
@@ -3736,17 +3738,17 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const operand = try func.resolveInst(ty_op.operand);
     const operand_ty = func.typeOf(ty_op.operand);
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
     const result = result: {
-        if (operand_ty.zigTypeTag(mod) == .Bool) {
+        if (operand_ty.zigTypeTag(zcu) == .Bool) {
             try func.emitWValue(operand);
             try func.addTag(.i32_eqz);
             const not_tmp = try func.allocLocal(operand_ty);
             try func.addLabel(.local_set, not_tmp.local.value);
             break :result not_tmp;
         } else {
-            const int_info = operand_ty.intInfo(mod);
+            const int_info = operand_ty.intInfo(zcu);
             const wasm_bits = toWasmBits(int_info.bits) orelse {
                 return func.fail("TODO: Implement binary NOT for {}", .{operand_ty.fmt(pt)});
             };
@@ -3816,14 +3818,14 @@ fn airUnreachable(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const operand = try func.resolveInst(ty_op.operand);
     const wanted_ty = func.typeOfIndex(inst);
     const given_ty = func.typeOf(ty_op.operand);
 
-    const bit_size = given_ty.bitSize(pt);
-    const needs_wrapping = (given_ty.isSignedInt(mod) != wanted_ty.isSignedInt(mod)) and
+    const bit_size = given_ty.bitSize(zcu);
+    const needs_wrapping = (given_ty.isSignedInt(zcu) != wanted_ty.isSignedInt(zcu)) and
         bit_size != 32 and bit_size != 64 and bit_size != 128;
 
     const result = result: {
@@ -3860,12 +3862,12 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) InnerError!WValue {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     // if we bitcast a float to or from an integer we must use the 'reinterpret' instruction
     if (!(wanted_ty.isAnyFloat() or given_ty.isAnyFloat())) return operand;
     if (wanted_ty.ip_index == .f16_type or given_ty.ip_index == .f16_type) return operand;
-    if (wanted_ty.bitSize(pt) > 64) return operand;
-    assert((wanted_ty.isInt(mod) and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt(mod)));
+    if (wanted_ty.bitSize(zcu) > 64) return operand;
+    assert((wanted_ty.isInt(zcu) and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt(zcu)));
 
     const opcode = buildOpcode(.{
         .op = .reinterpret,
@@ -3879,24 +3881,24 @@ fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) Inn
 
 fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = func.air.extraData(Air.StructField, ty_pl.payload);
 
     const struct_ptr = try func.resolveInst(extra.data.struct_operand);
     const struct_ptr_ty = func.typeOf(extra.data.struct_operand);
-    const struct_ty = struct_ptr_ty.childType(mod);
+    const struct_ty = struct_ptr_ty.childType(zcu);
     const result = try func.structFieldPtr(inst, extra.data.struct_operand, struct_ptr, struct_ptr_ty, struct_ty, extra.data.field_index);
     return func.finishAir(inst, result, &.{extra.data.struct_operand});
 }
 
 fn airStructFieldPtrIndex(func: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const struct_ptr = try func.resolveInst(ty_op.operand);
     const struct_ptr_ty = func.typeOf(ty_op.operand);
-    const struct_ty = struct_ptr_ty.childType(mod);
+    const struct_ty = struct_ptr_ty.childType(zcu);
 
     const result = try func.structFieldPtr(inst, ty_op.operand, struct_ptr, struct_ptr_ty, struct_ty, index);
     return func.finishAir(inst, result, &.{ty_op.operand});
@@ -3912,23 +3914,23 @@ fn structFieldPtr(
     index: u32,
 ) InnerError!WValue {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const result_ty = func.typeOfIndex(inst);
-    const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod);
+    const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(zcu);
 
-    const offset = switch (struct_ty.containerLayout(mod)) {
-        .@"packed" => switch (struct_ty.zigTypeTag(mod)) {
+    const offset = switch (struct_ty.containerLayout(zcu)) {
+        .@"packed" => switch (struct_ty.zigTypeTag(zcu)) {
             .Struct => offset: {
-                if (result_ty.ptrInfo(mod).packed_offset.host_size != 0) {
+                if (result_ty.ptrInfo(zcu).packed_offset.host_size != 0) {
                     break :offset @as(u32, 0);
                 }
-                const struct_type = mod.typeToStruct(struct_ty).?;
+                const struct_type = zcu.typeToStruct(struct_ty).?;
                 break :offset @divExact(pt.structPackedFieldBitOffset(struct_type, index) + struct_ptr_ty_info.packed_offset.bit_offset, 8);
             },
             .Union => 0,
             else => unreachable,
         },
-        else => struct_ty.structFieldOffset(index, pt),
+        else => struct_ty.structFieldOffset(index, zcu),
     };
     // save a load and store when we can simply reuse the operand
     if (offset == 0) {
@@ -3944,24 +3946,24 @@ fn structFieldPtr(
 
 fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data;
 
     const struct_ty = func.typeOf(struct_field.struct_operand);
     const operand = try func.resolveInst(struct_field.struct_operand);
     const field_index = struct_field.field_index;
-    const field_ty = struct_ty.structFieldType(field_index, mod);
-    if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
+    const field_ty = struct_ty.structFieldType(field_index, zcu);
+    if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
 
-    const result: WValue = switch (struct_ty.containerLayout(mod)) {
-        .@"packed" => switch (struct_ty.zigTypeTag(mod)) {
+    const result: WValue = switch (struct_ty.containerLayout(zcu)) {
+        .@"packed" => switch (struct_ty.zigTypeTag(zcu)) {
             .Struct => result: {
-                const packed_struct = mod.typeToPackedStruct(struct_ty).?;
+                const packed_struct = zcu.typeToPackedStruct(struct_ty).?;
                 const offset = pt.structPackedFieldBitOffset(packed_struct, field_index);
                 const backing_ty = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
-                const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse {
+                const wasm_bits = toWasmBits(backing_ty.intInfo(zcu).bits) orelse {
                     return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{});
                 };
                 const const_wvalue: WValue = if (wasm_bits == 32)
@@ -3977,16 +3979,16 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 else
                     try func.binOp(operand, const_wvalue, backing_ty, .shr);
 
-                if (field_ty.zigTypeTag(mod) == .Float) {
-                    const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(pt))));
+                if (field_ty.zigTypeTag(zcu) == .Float) {
+                    const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu))));
                     const truncated = try func.trunc(shifted_value, int_type, backing_ty);
                     break :result try func.bitcast(field_ty, int_type, truncated);
-                } else if (field_ty.isPtrAtRuntime(mod) and packed_struct.field_types.len == 1) {
+                } else if (field_ty.isPtrAtRuntime(zcu) and packed_struct.field_types.len == 1) {
                     // In this case we do not have to perform any transformations,
                     // we can simply reuse the operand.
                     break :result func.reuseOperand(struct_field.struct_operand, operand);
-                } else if (field_ty.isPtrAtRuntime(mod)) {
-                    const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(pt))));
+                } else if (field_ty.isPtrAtRuntime(zcu)) {
+                    const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu))));
                     break :result try func.trunc(shifted_value, int_type, backing_ty);
                 }
                 break :result try func.trunc(shifted_value, field_ty, backing_ty);
@@ -4002,13 +4004,13 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                     }
                 }
 
-                const union_int_type = try pt.intType(.unsigned, @as(u16, @intCast(struct_ty.bitSize(pt))));
-                if (field_ty.zigTypeTag(mod) == .Float) {
-                    const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(pt))));
+                const union_int_type = try pt.intType(.unsigned, @as(u16, @intCast(struct_ty.bitSize(zcu))));
+                if (field_ty.zigTypeTag(zcu) == .Float) {
+                    const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu))));
                     const truncated = try func.trunc(operand, int_type, union_int_type);
                     break :result try func.bitcast(field_ty, int_type, truncated);
-                } else if (field_ty.isPtrAtRuntime(mod)) {
-                    const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(pt))));
+                } else if (field_ty.isPtrAtRuntime(zcu)) {
+                    const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu))));
                     break :result try func.trunc(operand, int_type, union_int_type);
                 }
                 break :result try func.trunc(operand, field_ty, union_int_type);
@@ -4016,7 +4018,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             else => unreachable,
         },
         else => result: {
-            const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, pt)) orelse {
+            const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, zcu)) orelse {
                 return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(pt)});
             };
             if (isByRef(field_ty, pt, func.target.*)) {
@@ -4036,7 +4038,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     // result type is always 'noreturn'
     const blocktype = wasm.block_empty;
     const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
@@ -4093,7 +4095,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     // When the target is an integer size larger than u32, we have no way to use the value
     // as an index, therefore we also use an if/else-chain for those cases.
     // TODO: Benchmark this to find a proper value, LLVM seems to draw the line at '40~45'.
-    const is_sparse = highest - lowest > 50 or target_ty.bitSize(pt) > 32;
+    const is_sparse = highest - lowest > 50 or target_ty.bitSize(zcu) > 32;
 
     const else_body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra_index..][0..switch_br.data.else_body_len]);
     const has_else_body = else_body.len != 0;
@@ -4138,7 +4140,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 // for errors that are not present in any branch. This is fine as this default
                 // case will never be hit for those cases but we do save runtime cost and size
                 // by using a jump table for this instead of if-else chains.
-                break :blk if (has_else_body or target_ty.zigTypeTag(mod) == .ErrorSet) case_i else unreachable;
+                break :blk if (has_else_body or target_ty.zigTypeTag(zcu) == .ErrorSet) case_i else unreachable;
             };
             func.mir_extra.appendAssumeCapacity(idx);
         } else if (has_else_body) {
@@ -4149,10 +4151,10 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
     const signedness: std.builtin.Signedness = blk: {
         // by default we tell the operand type is unsigned (i.e. bools and enum values)
-        if (target_ty.zigTypeTag(mod) != .Int) break :blk .unsigned;
+        if (target_ty.zigTypeTag(zcu) != .Int) break :blk .unsigned;
 
         // incase of an actual integer, we emit the correct signedness
-        break :blk target_ty.intInfo(mod).signedness;
+        break :blk target_ty.intInfo(zcu).signedness;
     };
 
     try func.branches.ensureUnusedCapacity(func.gpa, case_list.items.len + @intFromBool(has_else_body));
@@ -4217,14 +4219,14 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
     const operand = try func.resolveInst(un_op);
     const err_union_ty = func.typeOf(un_op);
-    const pl_ty = err_union_ty.errorUnionPayload(mod);
+    const pl_ty = err_union_ty.errorUnionPayload(zcu);
 
     const result: WValue = result: {
-        if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
+        if (err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
             switch (opcode) {
                 .i32_ne => break :result .{ .imm32 = 0 },
                 .i32_eq => break :result .{ .imm32 = 1 },
@@ -4233,10 +4235,10 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
         }
 
         try func.emitWValue(operand);
-        if (pl_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        if (pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             try func.addMemArg(.i32_load16_u, .{
-                .offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, pt))),
-                .alignment = @intCast(Type.anyerror.abiAlignment(pt).toByteUnits().?),
+                .offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, zcu))),
+                .alignment = @intCast(Type.anyerror.abiAlignment(zcu).toByteUnits().?),
             });
         }
 
@@ -4250,23 +4252,23 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
 
 fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const operand = try func.resolveInst(ty_op.operand);
     const op_ty = func.typeOf(ty_op.operand);
-    const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty;
-    const payload_ty = err_ty.errorUnionPayload(mod);
+    const err_ty = if (op_is_ptr) op_ty.childType(zcu) else op_ty;
+    const payload_ty = err_ty.errorUnionPayload(zcu);
 
     const result: WValue = result: {
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             if (op_is_ptr) {
                 break :result func.reuseOperand(ty_op.operand, operand);
             }
             break :result .none;
         }
 
-        const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, pt)));
+        const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, zcu)));
         if (op_is_ptr or isByRef(payload_ty, pt, func.target.*)) {
             break :result try func.buildPointerOffset(operand, pl_offset, .new);
         }
@@ -4278,30 +4280,30 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo
 
 fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const operand = try func.resolveInst(ty_op.operand);
     const op_ty = func.typeOf(ty_op.operand);
-    const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty;
-    const payload_ty = err_ty.errorUnionPayload(mod);
+    const err_ty = if (op_is_ptr) op_ty.childType(zcu) else op_ty;
+    const payload_ty = err_ty.errorUnionPayload(zcu);
 
     const result: WValue = result: {
-        if (err_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
+        if (err_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
             break :result .{ .imm32 = 0 };
         }
 
-        if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             break :result func.reuseOperand(ty_op.operand, operand);
         }
 
-        break :result try func.load(operand, Type.anyerror, @intCast(errUnionErrorOffset(payload_ty, pt)));
+        break :result try func.load(operand, Type.anyerror, @intCast(errUnionErrorOffset(payload_ty, zcu)));
     };
     return func.finishAir(inst, result, &.{ty_op.operand});
 }
 
 fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+    const zcu = func.pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const operand = try func.resolveInst(ty_op.operand);
@@ -4309,18 +4311,18 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void
 
     const pl_ty = func.typeOf(ty_op.operand);
     const result = result: {
-        if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             break :result func.reuseOperand(ty_op.operand, operand);
         }
 
         const err_union = try func.allocStack(err_ty);
-        const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, pt))), .new);
+        const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, zcu))), .new);
         try func.store(payload_ptr, operand, pl_ty, 0);
 
         // ensure we also write '0' to the error part, so any present stack value gets overwritten by it.
         try func.emitWValue(err_union);
         try func.addImm32(0);
-        const err_val_offset: u32 = @intCast(errUnionErrorOffset(pl_ty, pt));
+        const err_val_offset: u32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
         try func.addMemArg(.i32_store16, .{
             .offset = err_union.offset() + err_val_offset,
             .alignment = 2,
@@ -4332,25 +4334,25 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void
 
 fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const operand = try func.resolveInst(ty_op.operand);
     const err_ty = ty_op.ty.toType();
-    const pl_ty = err_ty.errorUnionPayload(mod);
+    const pl_ty = err_ty.errorUnionPayload(zcu);
 
     const result = result: {
-        if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             break :result func.reuseOperand(ty_op.operand, operand);
         }
 
         const err_union = try func.allocStack(err_ty);
         // store error value
-        try func.store(err_union, operand, Type.anyerror, @intCast(errUnionErrorOffset(pl_ty, pt)));
+        try func.store(err_union, operand, Type.anyerror, @intCast(errUnionErrorOffset(pl_ty, zcu)));
 
         // write 'undefined' to the payload
-        const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, pt))), .new);
-        const len = @as(u32, @intCast(err_ty.errorUnionPayload(mod).abiSize(pt)));
+        const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, zcu))), .new);
+        const len = @as(u32, @intCast(err_ty.errorUnionPayload(zcu).abiSize(zcu)));
         try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa });
 
         break :result err_union;
@@ -4365,16 +4367,16 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const operand = try func.resolveInst(ty_op.operand);
     const operand_ty = func.typeOf(ty_op.operand);
     const pt = func.pt;
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) == .Vector or operand_ty.zigTypeTag(mod) == .Vector) {
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Vector or operand_ty.zigTypeTag(zcu) == .Vector) {
         return func.fail("todo Wasm intcast for vectors", .{});
     }
-    if (ty.abiSize(pt) > 16 or operand_ty.abiSize(pt) > 16) {
+    if (ty.abiSize(zcu) > 16 or operand_ty.abiSize(zcu) > 16) {
         return func.fail("todo Wasm intcast for bitsize > 128", .{});
     }
 
-    const op_bits = toWasmBits(@intCast(operand_ty.bitSize(pt))).?;
-    const wanted_bits = toWasmBits(@intCast(ty.bitSize(pt))).?;
+    const op_bits = toWasmBits(@intCast(operand_ty.bitSize(zcu))).?;
+    const wanted_bits = toWasmBits(@intCast(ty.bitSize(zcu))).?;
     const result = if (op_bits == wanted_bits)
         func.reuseOperand(ty_op.operand, operand)
     else
@@ -4389,9 +4391,9 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 /// NOTE: May leave the result on the top of the stack.
 fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
     const pt = func.pt;
-    const mod = pt.zcu;
-    const given_bitsize = @as(u16, @intCast(given.bitSize(pt)));
-    const wanted_bitsize = @as(u16, @intCast(wanted.bitSize(pt)));
+    const zcu = pt.zcu;
+    const given_bitsize = @as(u16, @intCast(given.bitSize(zcu)));
+    const wanted_bitsize = @as(u16, @intCast(wanted.bitSize(zcu)));
     assert(given_bitsize <= 128);
     assert(wanted_bitsize <= 128);
 
@@ -4407,7 +4409,7 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
         return .stack;
     } else if (op_bits == 32 and wanted_bits == 64) {
         try func.emitWValue(operand);
-        try func.addTag(if (wanted.isSignedInt(mod)) .i64_extend_i32_s else .i64_extend_i32_u);
+        try func.addTag(if (wanted.isSignedInt(zcu)) .i64_extend_i32_s else .i64_extend_i32_u);
         return .stack;
     } else if (wanted_bits == 128) {
         // for 128bit integers we store the integer in the virtual stack, rather than a local
@@ -4417,7 +4419,7 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
         // for 32 bit integers, we first coerce the value into a 64 bit integer before storing it
         // meaning less store operations are required.
         const lhs = if (op_bits == 32) blk: {
-            const sign_ty = if (wanted.isSignedInt(mod)) Type.i64 else Type.u64;
+            const sign_ty = if (wanted.isSignedInt(zcu)) Type.i64 else Type.u64;
             break :blk try (try func.intcast(operand, given, sign_ty)).toLocal(func, sign_ty);
         } else operand;
 
@@ -4425,7 +4427,7 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
         try func.store(.stack, lhs, Type.u64, 0 + stack_ptr.offset());
 
         // For signed integers we shift lsb by 63 (64bit integer - 1 sign bit) and store remaining value
-        if (wanted.isSignedInt(mod)) {
+        if (wanted.isSignedInt(zcu)) {
             try func.emitWValue(stack_ptr);
             const shr = try func.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr);
             try func.store(.stack, shr, Type.u64, 8 + stack_ptr.offset());
@@ -4439,12 +4441,12 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
 
 fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
     const operand = try func.resolveInst(un_op);
 
     const op_ty = func.typeOf(un_op);
-    const optional_ty = if (op_kind == .ptr) op_ty.childType(mod) else op_ty;
+    const optional_ty = if (op_kind == .ptr) op_ty.childType(zcu) else op_ty;
     const result = try func.isNull(operand, optional_ty, opcode);
     return func.finishAir(inst, result, &.{un_op});
 }
@@ -4453,19 +4455,19 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind:
 /// NOTE: Leaves the result on the stack
 fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     try func.emitWValue(operand);
-    const payload_ty = optional_ty.optionalChild(mod);
-    if (!optional_ty.optionalReprIsPayload(mod)) {
+    const payload_ty = optional_ty.optionalChild(zcu);
+    if (!optional_ty.optionalReprIsPayload(zcu)) {
         // When payload is zero-bits, we can treat operand as a value, rather than
         // a pointer to the stack value
-        if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
-            const offset = std.math.cast(u32, payload_ty.abiSize(pt)) orelse {
+        if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+            const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse {
                 return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(pt)});
             };
             try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 });
         }
-    } else if (payload_ty.isSlice(mod)) {
+    } else if (payload_ty.isSlice(zcu)) {
         switch (func.arch()) {
             .wasm32 => try func.addMemArg(.i32_load, .{ .offset = operand.offset(), .alignment = 4 }),
             .wasm64 => try func.addMemArg(.i64_load, .{ .offset = operand.offset(), .alignment = 8 }),
@@ -4482,17 +4484,17 @@ fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcod
 
 fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const opt_ty = func.typeOf(ty_op.operand);
     const payload_ty = func.typeOfIndex(inst);
-    if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+    if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
         return func.finishAir(inst, .none, &.{ty_op.operand});
     }
 
     const result = result: {
         const operand = try func.resolveInst(ty_op.operand);
-        if (opt_ty.optionalReprIsPayload(mod)) break :result func.reuseOperand(ty_op.operand, operand);
+        if (opt_ty.optionalReprIsPayload(zcu)) break :result func.reuseOperand(ty_op.operand, operand);
 
         if (isByRef(payload_ty, pt, func.target.*)) {
             break :result try func.buildPointerOffset(operand, 0, .new);
@@ -4505,14 +4507,14 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const operand = try func.resolveInst(ty_op.operand);
-    const opt_ty = func.typeOf(ty_op.operand).childType(mod);
+    const opt_ty = func.typeOf(ty_op.operand).childType(zcu);
 
     const result = result: {
-        const payload_ty = opt_ty.optionalChild(mod);
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt) or opt_ty.optionalReprIsPayload(mod)) {
+        const payload_ty = opt_ty.optionalChild(zcu);
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu) or opt_ty.optionalReprIsPayload(zcu)) {
             break :result func.reuseOperand(ty_op.operand, operand);
         }
 
@@ -4523,20 +4525,20 @@ fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const operand = try func.resolveInst(ty_op.operand);
-    const opt_ty = func.typeOf(ty_op.operand).childType(mod);
-    const payload_ty = opt_ty.optionalChild(mod);
-    if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+    const opt_ty = func.typeOf(ty_op.operand).childType(zcu);
+    const payload_ty = opt_ty.optionalChild(zcu);
+    if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
         return func.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()});
     }
 
-    if (opt_ty.optionalReprIsPayload(mod)) {
+    if (opt_ty.optionalReprIsPayload(zcu)) {
         return func.finishAir(inst, operand, &.{ty_op.operand});
     }
 
-    const offset = std.math.cast(u32, payload_ty.abiSize(pt)) orelse {
+    const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse {
         return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(pt)});
     };
 
@@ -4552,10 +4554,10 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const payload_ty = func.typeOf(ty_op.operand);
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
     const result = result: {
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             const non_null_bit = try func.allocStack(Type.u1);
             try func.emitWValue(non_null_bit);
             try func.addImm32(1);
@@ -4565,10 +4567,10 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
         const operand = try func.resolveInst(ty_op.operand);
         const op_ty = func.typeOfIndex(inst);
-        if (op_ty.optionalReprIsPayload(mod)) {
+        if (op_ty.optionalReprIsPayload(zcu)) {
             break :result func.reuseOperand(ty_op.operand, operand);
         }
-        const offset = std.math.cast(u32, payload_ty.abiSize(pt)) orelse {
+        const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse {
             return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(pt)});
         };
 
@@ -4610,14 +4612,14 @@ fn airSliceLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
     const slice_ty = func.typeOf(bin_op.lhs);
     const slice = try func.resolveInst(bin_op.lhs);
     const index = try func.resolveInst(bin_op.rhs);
-    const elem_ty = slice_ty.childType(mod);
-    const elem_size = elem_ty.abiSize(pt);
+    const elem_ty = slice_ty.childType(zcu);
+    const elem_size = elem_ty.abiSize(zcu);
 
     // load pointer onto stack
     _ = try func.load(slice, Type.usize, 0);
@@ -4638,12 +4640,12 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
 
-    const elem_ty = ty_pl.ty.toType().childType(mod);
-    const elem_size = elem_ty.abiSize(pt);
+    const elem_ty = ty_pl.ty.toType().childType(zcu);
+    const elem_size = elem_ty.abiSize(zcu);
 
     const slice = try func.resolveInst(bin_op.lhs);
     const index = try func.resolveInst(bin_op.rhs);
@@ -4682,13 +4684,13 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const wanted_ty: Type = ty_op.ty.toType();
     const op_ty = func.typeOf(ty_op.operand);
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
-    if (wanted_ty.zigTypeTag(mod) == .Vector or op_ty.zigTypeTag(mod) == .Vector) {
+    if (wanted_ty.zigTypeTag(zcu) == .Vector or op_ty.zigTypeTag(zcu) == .Vector) {
         return func.fail("TODO: trunc for vectors", .{});
     }
 
-    const result = if (op_ty.bitSize(pt) == wanted_ty.bitSize(pt))
+    const result = if (op_ty.bitSize(zcu) == wanted_ty.bitSize(zcu))
         func.reuseOperand(ty_op.operand, operand)
     else
         try func.trunc(operand, wanted_ty, op_ty);
@@ -4700,13 +4702,14 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 /// NOTE: Resulting value is left on the stack.
 fn trunc(func: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue {
     const pt = func.pt;
-    const given_bits = @as(u16, @intCast(given_ty.bitSize(pt)));
+    const zcu = pt.zcu;
+    const given_bits = @as(u16, @intCast(given_ty.bitSize(zcu)));
     if (toWasmBits(given_bits) == null) {
         return func.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits});
     }
 
     var result = try func.intcast(operand, given_ty, wanted_ty);
-    const wanted_bits = @as(u16, @intCast(wanted_ty.bitSize(pt)));
+    const wanted_bits = @as(u16, @intCast(wanted_ty.bitSize(zcu)));
     const wasm_bits = toWasmBits(wanted_bits).?;
     if (wasm_bits != wanted_bits) {
         result = try func.wrapOperand(result, wanted_ty);
@@ -4724,23 +4727,23 @@ fn airIntFromBool(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const operand = try func.resolveInst(ty_op.operand);
-    const array_ty = func.typeOf(ty_op.operand).childType(mod);
+    const array_ty = func.typeOf(ty_op.operand).childType(zcu);
     const slice_ty = ty_op.ty.toType();
 
     // create a slice on the stack
     const slice_local = try func.allocStack(slice_ty);
 
     // store the array ptr in the slice
-    if (array_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+    if (array_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
         try func.store(slice_local, operand, Type.usize, 0);
     }
 
     // store the length of the array in the slice
-    const array_len: u32 = @intCast(array_ty.arrayLen(mod));
+    const array_len: u32 = @intCast(array_ty.arrayLen(zcu));
     try func.store(slice_local, .{ .imm32 = array_len }, Type.usize, func.ptrSize());
 
     return func.finishAir(inst, slice_local, &.{ty_op.operand});
@@ -4748,11 +4751,11 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airIntFromPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
     const operand = try func.resolveInst(un_op);
     const ptr_ty = func.typeOf(un_op);
-    const result = if (ptr_ty.isSlice(mod))
+    const result = if (ptr_ty.isSlice(zcu))
         try func.slicePtr(operand)
     else switch (operand) {
         // for stack offset, return a pointer to this offset.
@@ -4764,17 +4767,17 @@ fn airIntFromPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
     const ptr_ty = func.typeOf(bin_op.lhs);
     const ptr = try func.resolveInst(bin_op.lhs);
     const index = try func.resolveInst(bin_op.rhs);
-    const elem_ty = ptr_ty.childType(mod);
-    const elem_size = elem_ty.abiSize(pt);
+    const elem_ty = ptr_ty.childType(zcu);
+    const elem_size = elem_ty.abiSize(zcu);
 
     // load pointer onto the stack
-    if (ptr_ty.isSlice(mod)) {
+    if (ptr_ty.isSlice(zcu)) {
         _ = try func.load(ptr, Type.usize, 0);
     } else {
         try func.lowerToStack(ptr);
@@ -4796,19 +4799,19 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
 
     const ptr_ty = func.typeOf(bin_op.lhs);
-    const elem_ty = ty_pl.ty.toType().childType(mod);
-    const elem_size = elem_ty.abiSize(pt);
+    const elem_ty = ty_pl.ty.toType().childType(zcu);
+    const elem_size = elem_ty.abiSize(zcu);
 
     const ptr = try func.resolveInst(bin_op.lhs);
     const index = try func.resolveInst(bin_op.rhs);
 
     // load pointer onto the stack
-    if (ptr_ty.isSlice(mod)) {
+    if (ptr_ty.isSlice(zcu)) {
         _ = try func.load(ptr, Type.usize, 0);
     } else {
         try func.lowerToStack(ptr);
@@ -4825,16 +4828,16 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
 
     const ptr = try func.resolveInst(bin_op.lhs);
     const offset = try func.resolveInst(bin_op.rhs);
     const ptr_ty = func.typeOf(bin_op.lhs);
-    const pointee_ty = switch (ptr_ty.ptrSize(mod)) {
-        .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type
-        else => ptr_ty.childType(mod),
+    const pointee_ty = switch (ptr_ty.ptrSize(zcu)) {
+        .One => ptr_ty.childType(zcu).childType(zcu), // ptr to array, so get array element type
+        else => ptr_ty.childType(zcu),
     };
 
     const valtype = typeToValtype(Type.usize, pt, func.target.*);
@@ -4843,7 +4846,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
 
     try func.lowerToStack(ptr);
     try func.emitWValue(offset);
-    try func.addImm32(@intCast(pointee_ty.abiSize(pt)));
+    try func.addImm32(@intCast(pointee_ty.abiSize(zcu)));
     try func.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode));
     try func.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode));
 
@@ -4852,7 +4855,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
 
 fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     if (safety) {
         // TODO if the value is undef, write 0xaa bytes to dest
     } else {
@@ -4863,16 +4866,16 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
     const ptr = try func.resolveInst(bin_op.lhs);
     const ptr_ty = func.typeOf(bin_op.lhs);
     const value = try func.resolveInst(bin_op.rhs);
-    const len = switch (ptr_ty.ptrSize(mod)) {
+    const len = switch (ptr_ty.ptrSize(zcu)) {
         .Slice => try func.sliceLen(ptr),
-        .One => @as(WValue, .{ .imm32 = @as(u32, @intCast(ptr_ty.childType(mod).arrayLen(mod))) }),
+        .One => @as(WValue, .{ .imm32 = @as(u32, @intCast(ptr_ty.childType(zcu).arrayLen(zcu))) }),
         .C, .Many => unreachable,
     };
 
-    const elem_ty = if (ptr_ty.ptrSize(mod) == .One)
-        ptr_ty.childType(mod).childType(mod)
+    const elem_ty = if (ptr_ty.ptrSize(zcu) == .One)
+        ptr_ty.childType(zcu).childType(zcu)
     else
-        ptr_ty.childType(mod);
+        ptr_ty.childType(zcu);
 
     const dst_ptr = try func.sliceOrArrayPtr(ptr, ptr_ty);
     try func.memset(elem_ty, dst_ptr, len, value);
@@ -4886,7 +4889,7 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
 /// we implement it manually.
 fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void {
     const pt = func.pt;
-    const abi_size = @as(u32, @intCast(elem_ty.abiSize(pt)));
+    const abi_size = @as(u32, @intCast(elem_ty.abiSize(pt.zcu)));
 
     // When bulk_memory is enabled, we lower it to wasm's memset instruction.
     // If not, we lower it ourselves.
@@ -4975,14 +4978,14 @@ fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue
 
 fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
     const array_ty = func.typeOf(bin_op.lhs);
     const array = try func.resolveInst(bin_op.lhs);
     const index = try func.resolveInst(bin_op.rhs);
-    const elem_ty = array_ty.childType(mod);
-    const elem_size = elem_ty.abiSize(pt);
+    const elem_ty = array_ty.childType(zcu);
+    const elem_size = elem_ty.abiSize(zcu);
 
     if (isByRef(array_ty, pt, func.target.*)) {
         try func.lowerToStack(array);
@@ -4991,15 +4994,15 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         try func.addTag(.i32_mul);
         try func.addTag(.i32_add);
     } else {
-        std.debug.assert(array_ty.zigTypeTag(mod) == .Vector);
+        std.debug.assert(array_ty.zigTypeTag(zcu) == .Vector);
 
         switch (index) {
             inline .imm32, .imm64 => |lane| {
-                const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(pt)) {
-                    8 => if (elem_ty.isSignedInt(mod)) .i8x16_extract_lane_s else .i8x16_extract_lane_u,
-                    16 => if (elem_ty.isSignedInt(mod)) .i16x8_extract_lane_s else .i16x8_extract_lane_u,
-                    32 => if (elem_ty.isInt(mod)) .i32x4_extract_lane else .f32x4_extract_lane,
-                    64 => if (elem_ty.isInt(mod)) .i64x2_extract_lane else .f64x2_extract_lane,
+                const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(zcu)) {
+                    8 => if (elem_ty.isSignedInt(zcu)) .i8x16_extract_lane_s else .i8x16_extract_lane_u,
+                    16 => if (elem_ty.isSignedInt(zcu)) .i16x8_extract_lane_s else .i16x8_extract_lane_u,
+                    32 => if (elem_ty.isInt(zcu)) .i32x4_extract_lane else .f32x4_extract_lane,
+                    64 => if (elem_ty.isInt(zcu)) .i64x2_extract_lane else .f64x2_extract_lane,
                     else => unreachable,
                 };
 
@@ -5037,7 +5040,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airIntFromFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const operand = try func.resolveInst(ty_op.operand);
@@ -5045,7 +5048,7 @@ fn airIntFromFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const op_bits = op_ty.floatBits(func.target.*);
 
     const dest_ty = func.typeOfIndex(inst);
-    const dest_info = dest_ty.intInfo(mod);
+    const dest_info = dest_ty.intInfo(zcu);
 
     if (dest_info.bits > 128) {
         return func.fail("TODO: intFromFloat for integers/floats with bitsize {}", .{dest_info.bits});
@@ -5082,12 +5085,12 @@ fn airIntFromFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airFloatFromInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const operand = try func.resolveInst(ty_op.operand);
     const op_ty = func.typeOf(ty_op.operand);
-    const op_info = op_ty.intInfo(mod);
+    const op_info = op_ty.intInfo(zcu);
 
     const dest_ty = func.typeOfIndex(inst);
     const dest_bits = dest_ty.floatBits(func.target.*);
@@ -5127,19 +5130,19 @@ fn airFloatFromInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const operand = try func.resolveInst(ty_op.operand);
     const ty = func.typeOfIndex(inst);
-    const elem_ty = ty.childType(mod);
+    const elem_ty = ty.childType(zcu);
 
-    if (determineSimdStoreStrategy(ty, pt, func.target.*) == .direct) blk: {
+    if (determineSimdStoreStrategy(ty, zcu, func.target.*) == .direct) blk: {
         switch (operand) {
             // when the operand lives in the linear memory section, we can directly
             // load and splat the value at once. Meaning we do not first have to load
             // the scalar value onto the stack.
             .stack_offset, .memory, .memory_offset => {
-                const opcode = switch (elem_ty.bitSize(pt)) {
+                const opcode = switch (elem_ty.bitSize(zcu)) {
                     8 => std.wasm.simdOpcode(.v128_load8_splat),
                     16 => std.wasm.simdOpcode(.v128_load16_splat),
                     32 => std.wasm.simdOpcode(.v128_load32_splat),
@@ -5153,17 +5156,17 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 try func.mir_extra.appendSlice(func.gpa, &[_]u32{
                     opcode,
                     operand.offset(),
-                    @intCast(elem_ty.abiAlignment(pt).toByteUnits().?),
+                    @intCast(elem_ty.abiAlignment(zcu).toByteUnits().?),
                 });
                 try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
                 return func.finishAir(inst, .stack, &.{ty_op.operand});
             },
             .local => {
-                const opcode = switch (elem_ty.bitSize(pt)) {
+                const opcode = switch (elem_ty.bitSize(zcu)) {
                     8 => std.wasm.simdOpcode(.i8x16_splat),
                     16 => std.wasm.simdOpcode(.i16x8_splat),
-                    32 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat),
-                    64 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat),
+                    32 => if (elem_ty.isInt(zcu)) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat),
+                    64 => if (elem_ty.isInt(zcu)) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat),
                     else => break :blk, // Cannot make use of simd-instructions
                 };
                 try func.emitWValue(operand);
@@ -5175,14 +5178,14 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             else => unreachable,
         }
     }
-    const elem_size = elem_ty.bitSize(pt);
-    const vector_len = @as(usize, @intCast(ty.vectorLen(mod)));
+    const elem_size = elem_ty.bitSize(zcu);
+    const vector_len = @as(usize, @intCast(ty.vectorLen(zcu)));
     if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) {
         return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size});
     }
 
     const result = try func.allocStack(ty);
-    const elem_byte_size = @as(u32, @intCast(elem_ty.abiSize(pt)));
+    const elem_byte_size = @as(u32, @intCast(elem_ty.abiSize(zcu)));
     var index: usize = 0;
     var offset: u32 = 0;
     while (index < vector_len) : (index += 1) {
@@ -5203,7 +5206,7 @@ fn airSelect(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_ty = func.typeOfIndex(inst);
     const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = func.air.extraData(Air.Shuffle, ty_pl.payload).data;
@@ -5213,15 +5216,15 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const mask = Value.fromInterned(extra.mask);
     const mask_len = extra.mask_len;
 
-    const child_ty = inst_ty.childType(mod);
-    const elem_size = child_ty.abiSize(pt);
+    const child_ty = inst_ty.childType(zcu);
+    const elem_size = child_ty.abiSize(zcu);
 
     // TODO: One of them could be by ref; handle in loop
     if (isByRef(func.typeOf(extra.a), pt, func.target.*) or isByRef(inst_ty, pt, func.target.*)) {
         const result = try func.allocStack(inst_ty);
 
         for (0..mask_len) |index| {
-            const value = (try mask.elemValue(pt, index)).toSignedInt(pt);
+            const value = (try mask.elemValue(pt, index)).toSignedInt(zcu);
 
             try func.emitWValue(result);
 
@@ -5241,7 +5244,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
         var lanes = mem.asBytes(operands[1..]);
         for (0..@as(usize, @intCast(mask_len))) |index| {
-            const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(pt);
+            const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(zcu);
             const base_index = if (mask_elem >= 0)
                 @as(u8, @intCast(@as(i64, @intCast(elem_size)) * mask_elem))
             else
@@ -5273,20 +5276,20 @@ fn airReduce(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const result_ty = func.typeOfIndex(inst);
-    const len = @as(usize, @intCast(result_ty.arrayLen(mod)));
+    const len = @as(usize, @intCast(result_ty.arrayLen(zcu)));
     const elements = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[ty_pl.payload..][0..len]));
 
     const result: WValue = result_value: {
-        switch (result_ty.zigTypeTag(mod)) {
+        switch (result_ty.zigTypeTag(zcu)) {
             .Array => {
                 const result = try func.allocStack(result_ty);
-                const elem_ty = result_ty.childType(mod);
-                const elem_size = @as(u32, @intCast(elem_ty.abiSize(pt)));
-                const sentinel = if (result_ty.sentinel(mod)) |sent| blk: {
+                const elem_ty = result_ty.childType(zcu);
+                const elem_size = @as(u32, @intCast(elem_ty.abiSize(zcu)));
+                const sentinel = if (result_ty.sentinel(zcu)) |sent| blk: {
                     break :blk try func.lowerConstant(sent, elem_ty);
                 } else null;
 
@@ -5321,18 +5324,18 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 }
                 break :result_value result;
             },
-            .Struct => switch (result_ty.containerLayout(mod)) {
+            .Struct => switch (result_ty.containerLayout(zcu)) {
                 .@"packed" => {
                     if (isByRef(result_ty, pt, func.target.*)) {
                         return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{});
                     }
-                    const packed_struct = mod.typeToPackedStruct(result_ty).?;
+                    const packed_struct = zcu.typeToPackedStruct(result_ty).?;
                     const field_types = packed_struct.field_types;
                     const backing_type = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
 
                     // ensure the result is zero'd
                     const result = try func.allocLocal(backing_type);
-                    if (backing_type.bitSize(pt) <= 32)
+                    if (backing_type.bitSize(zcu) <= 32)
                         try func.addImm32(0)
                     else
                         try func.addImm64(0);
@@ -5341,15 +5344,15 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                     var current_bit: u16 = 0;
                     for (elements, 0..) |elem, elem_index| {
                         const field_ty = Type.fromInterned(field_types.get(ip)[elem_index]);
-                        if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+                        if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
 
-                        const shift_val: WValue = if (backing_type.bitSize(pt) <= 32)
+                        const shift_val: WValue = if (backing_type.bitSize(zcu) <= 32)
                             .{ .imm32 = current_bit }
                         else
                             .{ .imm64 = current_bit };
 
                         const value = try func.resolveInst(elem);
-                        const value_bit_size: u16 = @intCast(field_ty.bitSize(pt));
+                        const value_bit_size: u16 = @intCast(field_ty.bitSize(zcu));
                         const int_ty = try pt.intType(.unsigned, value_bit_size);
 
                         // load our current result on stack so we can perform all transformations
@@ -5375,8 +5378,8 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                     for (elements, 0..) |elem, elem_index| {
                         if (try result_ty.structFieldValueComptime(pt, elem_index) != null) continue;
 
-                        const elem_ty = result_ty.structFieldType(elem_index, mod);
-                        const field_offset = result_ty.structFieldOffset(elem_index, pt);
+                        const elem_ty = result_ty.structFieldType(elem_index, zcu);
+                        const field_offset = result_ty.structFieldOffset(elem_index, zcu);
                         _ = try func.buildPointerOffset(offset, @intCast(field_offset - prev_field_offset), .modify);
                         prev_field_offset = field_offset;
 
@@ -5404,21 +5407,21 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data;
 
     const result = result: {
         const union_ty = func.typeOfIndex(inst);
-        const layout = union_ty.unionGetLayout(pt);
-        const union_obj = mod.typeToUnion(union_ty).?;
+        const layout = union_ty.unionGetLayout(zcu);
+        const union_obj = zcu.typeToUnion(union_ty).?;
         const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
         const field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
 
         const tag_int = blk: {
-            const tag_ty = union_ty.unionTagTypeHypothetical(mod);
-            const enum_field_index = tag_ty.enumFieldIndex(field_name, mod).?;
+            const tag_ty = union_ty.unionTagTypeHypothetical(zcu);
+            const enum_field_index = tag_ty.enumFieldIndex(field_name, zcu).?;
             const tag_val = try pt.enumValueFieldIndex(tag_ty, enum_field_index);
             break :blk try func.lowerConstant(tag_val, tag_ty);
         };
@@ -5458,13 +5461,13 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             break :result result_ptr;
         } else {
             const operand = try func.resolveInst(extra.init);
-            const union_int_type = try pt.intType(.unsigned, @as(u16, @intCast(union_ty.bitSize(pt))));
-            if (field_ty.zigTypeTag(mod) == .Float) {
-                const int_type = try pt.intType(.unsigned, @intCast(field_ty.bitSize(pt)));
+            const union_int_type = try pt.intType(.unsigned, @as(u16, @intCast(union_ty.bitSize(zcu))));
+            if (field_ty.zigTypeTag(zcu) == .Float) {
+                const int_type = try pt.intType(.unsigned, @intCast(field_ty.bitSize(zcu)));
                 const bitcasted = try func.bitcast(field_ty, int_type, operand);
                 break :result try func.trunc(bitcasted, int_type, union_int_type);
-            } else if (field_ty.isPtrAtRuntime(mod)) {
-                const int_type = try pt.intType(.unsigned, @intCast(field_ty.bitSize(pt)));
+            } else if (field_ty.isPtrAtRuntime(zcu)) {
+                const int_type = try pt.intType(.unsigned, @intCast(field_ty.bitSize(zcu)));
                 break :result try func.intcast(operand, int_type, union_int_type);
             }
             break :result try func.intcast(operand, field_ty, union_int_type);
@@ -5497,10 +5500,10 @@ fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void {
 
 fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
     const pt = func.pt;
-    const mod = pt.zcu;
-    assert(operand_ty.hasRuntimeBitsIgnoreComptime(pt));
+    const zcu = pt.zcu;
+    assert(operand_ty.hasRuntimeBitsIgnoreComptime(zcu));
     assert(op == .eq or op == .neq);
-    const payload_ty = operand_ty.optionalChild(mod);
+    const payload_ty = operand_ty.optionalChild(zcu);
 
     // We store the final result in here that will be validated
     // if the optional is truly equal.
@@ -5534,11 +5537,11 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op:
 /// TODO: Lower this to compiler_rt call when bitsize > 128
 fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
     const pt = func.pt;
-    const mod = pt.zcu;
-    assert(operand_ty.abiSize(pt) >= 16);
+    const zcu = pt.zcu;
+    assert(operand_ty.abiSize(zcu) >= 16);
     assert(!(lhs != .stack and rhs == .stack));
-    if (operand_ty.bitSize(pt) > 128) {
-        return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(pt)});
+    if (operand_ty.bitSize(zcu) > 128) {
+        return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(zcu)});
     }
 
     var lhs_msb = try (try func.load(lhs, Type.u64, 8)).toLocal(func, Type.u64);
@@ -5561,7 +5564,7 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std
             }
         },
         else => {
-            const ty = if (operand_ty.isSignedInt(mod)) Type.i64 else Type.u64;
+            const ty = if (operand_ty.isSignedInt(zcu)) Type.i64 else Type.u64;
             // leave those value on top of the stack for '.select'
             const lhs_lsb = try func.load(lhs, Type.u64, 0);
             const rhs_lsb = try func.load(rhs, Type.u64, 0);
@@ -5577,11 +5580,11 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std
 
 fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
-    const un_ty = func.typeOf(bin_op.lhs).childType(mod);
+    const un_ty = func.typeOf(bin_op.lhs).childType(zcu);
     const tag_ty = func.typeOf(bin_op.rhs);
-    const layout = un_ty.unionGetLayout(pt);
+    const layout = un_ty.unionGetLayout(zcu);
     if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 
     const union_ptr = try func.resolveInst(bin_op.lhs);
@@ -5601,12 +5604,12 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 }
 
 fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+    const zcu = func.pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const un_ty = func.typeOf(ty_op.operand);
     const tag_ty = func.typeOfIndex(inst);
-    const layout = un_ty.unionGetLayout(pt);
+    const layout = un_ty.unionGetLayout(zcu);
     if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ty_op.operand});
 
     const operand = try func.resolveInst(ty_op.operand);
@@ -5705,11 +5708,11 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
 
 fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
-    const err_set_ty = func.typeOf(ty_op.operand).childType(mod);
-    const payload_ty = err_set_ty.errorUnionPayload(mod);
+    const err_set_ty = func.typeOf(ty_op.operand).childType(zcu);
+    const payload_ty = err_set_ty.errorUnionPayload(zcu);
     const operand = try func.resolveInst(ty_op.operand);
 
     // set error-tag to '0' to annotate error union is non-error
@@ -5717,28 +5720,28 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
         operand,
         .{ .imm32 = 0 },
         Type.anyerror,
-        @intCast(errUnionErrorOffset(payload_ty, pt)),
+        @intCast(errUnionErrorOffset(payload_ty, zcu)),
     );
 
     const result = result: {
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             break :result func.reuseOperand(ty_op.operand, operand);
         }
 
-        break :result try func.buildPointerOffset(operand, @as(u32, @intCast(errUnionPayloadOffset(payload_ty, pt))), .new);
+        break :result try func.buildPointerOffset(operand, @as(u32, @intCast(errUnionPayloadOffset(payload_ty, zcu))), .new);
     };
     return func.finishAir(inst, result, &.{ty_op.operand});
 }
 
 fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = func.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
 
     const field_ptr = try func.resolveInst(extra.field_ptr);
-    const parent_ty = ty_pl.ty.toType().childType(mod);
-    const field_offset = parent_ty.structFieldOffset(extra.field_index, pt);
+    const parent_ty = ty_pl.ty.toType().childType(zcu);
+    const field_offset = parent_ty.structFieldOffset(extra.field_index, zcu);
 
     const result = if (field_offset != 0) result: {
         const base = try func.buildPointerOffset(field_ptr, 0, .new);
@@ -5754,8 +5757,8 @@ fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue {
     const pt = func.pt;
-    const mod = pt.zcu;
-    if (ptr_ty.isSlice(mod)) {
+    const zcu = pt.zcu;
+    if (ptr_ty.isSlice(zcu)) {
         return func.slicePtr(ptr);
     } else {
         return ptr;
@@ -5764,26 +5767,26 @@ fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue
 
 fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
     const dst = try func.resolveInst(bin_op.lhs);
     const dst_ty = func.typeOf(bin_op.lhs);
-    const ptr_elem_ty = dst_ty.childType(mod);
+    const ptr_elem_ty = dst_ty.childType(zcu);
     const src = try func.resolveInst(bin_op.rhs);
     const src_ty = func.typeOf(bin_op.rhs);
-    const len = switch (dst_ty.ptrSize(mod)) {
+    const len = switch (dst_ty.ptrSize(zcu)) {
         .Slice => blk: {
             const slice_len = try func.sliceLen(dst);
-            if (ptr_elem_ty.abiSize(pt) != 1) {
+            if (ptr_elem_ty.abiSize(zcu) != 1) {
                 try func.emitWValue(slice_len);
-                try func.emitWValue(.{ .imm32 = @as(u32, @intCast(ptr_elem_ty.abiSize(pt))) });
+                try func.emitWValue(.{ .imm32 = @as(u32, @intCast(ptr_elem_ty.abiSize(zcu))) });
                 try func.addTag(.i32_mul);
                 try func.addLabel(.local_set, slice_len.local.value);
             }
             break :blk slice_len;
         },
         .One => @as(WValue, .{
-            .imm32 = @as(u32, @intCast(ptr_elem_ty.arrayLen(mod) * ptr_elem_ty.childType(mod).abiSize(pt))),
+            .imm32 = @as(u32, @intCast(ptr_elem_ty.arrayLen(zcu) * ptr_elem_ty.childType(zcu).abiSize(zcu))),
         }),
         .C, .Many => unreachable,
     };
@@ -5805,17 +5808,17 @@ fn airRetAddr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const operand = try func.resolveInst(ty_op.operand);
     const op_ty = func.typeOf(ty_op.operand);
 
-    if (op_ty.zigTypeTag(mod) == .Vector) {
+    if (op_ty.zigTypeTag(zcu) == .Vector) {
         return func.fail("TODO: Implement @popCount for vectors", .{});
     }
 
-    const int_info = op_ty.intInfo(mod);
+    const int_info = op_ty.intInfo(zcu);
     const bits = int_info.bits;
     const wasm_bits = toWasmBits(bits) orelse {
         return func.fail("TODO: Implement @popCount for integers with bitsize '{d}'", .{bits});
@@ -5824,14 +5827,14 @@ fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     switch (wasm_bits) {
         32 => {
             try func.emitWValue(operand);
-            if (op_ty.isSignedInt(mod) and bits != wasm_bits) {
+            if (op_ty.isSignedInt(zcu) and bits != wasm_bits) {
                 _ = try func.wrapOperand(.stack, try pt.intType(.unsigned, bits));
             }
             try func.addTag(.i32_popcnt);
         },
         64 => {
             try func.emitWValue(operand);
-            if (op_ty.isSignedInt(mod) and bits != wasm_bits) {
+            if (op_ty.isSignedInt(zcu) and bits != wasm_bits) {
                 _ = try func.wrapOperand(.stack, try pt.intType(.unsigned, bits));
             }
             try func.addTag(.i64_popcnt);
@@ -5842,7 +5845,7 @@ fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             _ = try func.load(operand, Type.u64, 0);
             try func.addTag(.i64_popcnt);
             _ = try func.load(operand, Type.u64, 8);
-            if (op_ty.isSignedInt(mod) and bits != wasm_bits) {
+            if (op_ty.isSignedInt(zcu) and bits != wasm_bits) {
                 _ = try func.wrapOperand(.stack, try pt.intType(.unsigned, bits - 64));
             }
             try func.addTag(.i64_popcnt);
@@ -5857,17 +5860,17 @@ fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const operand = try func.resolveInst(ty_op.operand);
     const ty = func.typeOf(ty_op.operand);
 
-    if (ty.zigTypeTag(mod) == .Vector) {
+    if (ty.zigTypeTag(zcu) == .Vector) {
         return func.fail("TODO: Implement @bitReverse for vectors", .{});
     }
 
-    const int_info = ty.intInfo(mod);
+    const int_info = ty.intInfo(zcu);
     const bits = int_info.bits;
     const wasm_bits = toWasmBits(bits) orelse {
         return func.fail("TODO: Implement @bitReverse for integers with bitsize '{d}'", .{bits});
@@ -5933,7 +5936,7 @@ fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 defer tmp.free(func);
                 try func.addLabel(.local_tee, tmp.local.value);
                 try func.emitWValue(.{ .imm64 = 128 - bits });
-                if (ty.isSignedInt(mod)) {
+                if (ty.isSignedInt(zcu)) {
                     try func.addTag(.i64_shr_s);
                 } else {
                     try func.addTag(.i64_shr_u);
@@ -5969,7 +5972,7 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
     const error_table_symbol = try func.bin_file.getErrorTableSymbol(pt);
     const name_ty = Type.slice_const_u8_sentinel_0;
-    const abi_size = name_ty.abiSize(pt);
+    const abi_size = name_ty.abiSize(pt.zcu);
 
     const error_name_value: WValue = .{ .memory = error_table_symbol }; // emitting this will create a relocation
     try func.emitWValue(error_name_value);
@@ -6000,8 +6003,8 @@ fn airPtrSliceFieldPtr(func: *CodeGen, inst: Air.Inst.Index, offset: u32) InnerE
 
 /// NOTE: Allocates place for result on virtual stack, when integer size > 64 bits
 fn intZeroValue(func: *CodeGen, ty: Type) InnerError!WValue {
-    const mod = func.bin_file.base.comp.module.?;
-    const int_info = ty.intInfo(mod);
+    const zcu = func.bin_file.base.comp.module.?;
+    const int_info = ty.intInfo(zcu);
     const wasm_bits = toWasmBits(int_info.bits) orelse {
         return func.fail("TODO: Implement intZeroValue for integer bitsize: {d}", .{int_info.bits});
     };
@@ -6027,13 +6030,13 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
     const rhs = try func.resolveInst(extra.rhs);
     const ty = func.typeOf(extra.lhs);
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
-    if (ty.zigTypeTag(mod) == .Vector) {
+    if (ty.zigTypeTag(zcu) == .Vector) {
         return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
     }
 
-    const int_info = ty.intInfo(mod);
+    const int_info = ty.intInfo(zcu);
     const is_signed = int_info.signedness == .signed;
     if (int_info.bits > 128) {
         return func.fail("TODO: Implement {{add/sub}}_with_overflow for integer bitsize: {d}", .{int_info.bits});
@@ -6058,7 +6061,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
     defer bit_tmp.free(func);
 
     const result = try func.allocStack(func.typeOfIndex(inst));
-    const offset: u32 = @intCast(ty.abiSize(pt));
+    const offset: u32 = @intCast(ty.abiSize(zcu));
     try func.store(result, op_tmp, ty, 0);
     try func.store(result, bit_tmp, Type.u1, offset);
 
@@ -6067,7 +6070,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
 
 fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
 
@@ -6076,18 +6079,18 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const ty = func.typeOf(extra.lhs);
     const rhs_ty = func.typeOf(extra.rhs);
 
-    if (ty.zigTypeTag(mod) == .Vector) {
+    if (ty.zigTypeTag(zcu) == .Vector) {
         return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
     }
 
-    const int_info = ty.intInfo(mod);
+    const int_info = ty.intInfo(zcu);
     const wasm_bits = toWasmBits(int_info.bits) orelse {
         return func.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits});
     };
 
     // Ensure rhs is coerced to lhs as they must have the same WebAssembly types
     // before we can perform any binary operation.
-    const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(mod).bits).?;
+    const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(zcu).bits).?;
     // If wasm_bits == 128, compiler-rt expects i32 for shift
     const rhs_final = if (wasm_bits != rhs_wasm_bits and wasm_bits == 64) blk: {
         const rhs_casted = try func.intcast(rhs, rhs_ty, ty);
@@ -6105,7 +6108,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     defer overflow_local.free(func);
 
     const result = try func.allocStack(func.typeOfIndex(inst));
-    const offset: u32 = @intCast(ty.abiSize(pt));
+    const offset: u32 = @intCast(ty.abiSize(zcu));
     try func.store(result, shl, ty, 0);
     try func.store(result, overflow_local, Type.u1, offset);
 
@@ -6120,9 +6123,9 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const rhs = try func.resolveInst(extra.rhs);
     const ty = func.typeOf(extra.lhs);
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
-    if (ty.zigTypeTag(mod) == .Vector) {
+    if (ty.zigTypeTag(zcu) == .Vector) {
         return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
     }
 
@@ -6131,7 +6134,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     var overflow_bit = try func.ensureAllocLocal(Type.u1);
     defer overflow_bit.free(func);
 
-    const int_info = ty.intInfo(mod);
+    const int_info = ty.intInfo(zcu);
     const wasm_bits = toWasmBits(int_info.bits) orelse {
         return func.fail("TODO: Implement `@mulWithOverflow` for integer bitsize: {d}", .{int_info.bits});
     };
@@ -6238,7 +6241,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     defer bin_op_local.free(func);
 
     const result = try func.allocStack(func.typeOfIndex(inst));
-    const offset: u32 = @intCast(ty.abiSize(pt));
+    const offset: u32 = @intCast(ty.abiSize(zcu));
     try func.store(result, bin_op_local, ty, 0);
     try func.store(result, overflow_bit, Type.u1, offset);
 
@@ -6248,22 +6251,22 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
     assert(op == .max or op == .min);
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
     const ty = func.typeOfIndex(inst);
-    if (ty.zigTypeTag(mod) == .Vector) {
+    if (ty.zigTypeTag(zcu) == .Vector) {
         return func.fail("TODO: `@maximum` and `@minimum` for vectors", .{});
     }
 
-    if (ty.abiSize(pt) > 16) {
+    if (ty.abiSize(zcu) > 16) {
         return func.fail("TODO: `@maximum` and `@minimum` for types larger than 16 bytes", .{});
     }
 
     const lhs = try func.resolveInst(bin_op.lhs);
     const rhs = try func.resolveInst(bin_op.rhs);
 
-    if (ty.zigTypeTag(mod) == .Float) {
+    if (ty.zigTypeTag(zcu) == .Float) {
         var fn_name_buf: [64]u8 = undefined;
         const float_bits = ty.floatBits(func.target.*);
         const fn_name = std.fmt.bufPrint(&fn_name_buf, "{s}f{s}{s}", .{
@@ -6288,12 +6291,12 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
 
 fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
     const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data;
 
     const ty = func.typeOfIndex(inst);
-    if (ty.zigTypeTag(mod) == .Vector) {
+    if (ty.zigTypeTag(zcu) == .Vector) {
         return func.fail("TODO: `@mulAdd` for vectors", .{});
     }
 
@@ -6323,16 +6326,16 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const ty = func.typeOf(ty_op.operand);
-    if (ty.zigTypeTag(mod) == .Vector) {
+    if (ty.zigTypeTag(zcu) == .Vector) {
         return func.fail("TODO: `@clz` for vectors", .{});
     }
 
     const operand = try func.resolveInst(ty_op.operand);
-    const int_info = ty.intInfo(mod);
+    const int_info = ty.intInfo(zcu);
     const wasm_bits = toWasmBits(int_info.bits) orelse {
         return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
     };
@@ -6374,17 +6377,17 @@ fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const ty = func.typeOf(ty_op.operand);
 
-    if (ty.zigTypeTag(mod) == .Vector) {
+    if (ty.zigTypeTag(zcu) == .Vector) {
         return func.fail("TODO: `@ctz` for vectors", .{});
     }
 
     const operand = try func.resolveInst(ty_op.operand);
-    const int_info = ty.intInfo(mod);
+    const int_info = ty.intInfo(zcu);
     const wasm_bits = toWasmBits(int_info.bits) orelse {
         return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
     };
@@ -6497,12 +6500,12 @@ fn airTry(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airTryPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = func.air.extraData(Air.TryPtr, ty_pl.payload);
     const err_union_ptr = try func.resolveInst(extra.data.ptr);
     const body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len]);
-    const err_union_ty = func.typeOf(extra.data.ptr).childType(mod);
+    const err_union_ty = func.typeOf(extra.data.ptr).childType(zcu);
     const result = try lowerTry(func, inst, err_union_ptr, body, err_union_ty, true);
     return func.finishAir(inst, result, &.{extra.data.ptr});
 }
@@ -6516,25 +6519,25 @@ fn lowerTry(
     operand_is_ptr: bool,
 ) InnerError!WValue {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     if (operand_is_ptr) {
         return func.fail("TODO: lowerTry for pointers", .{});
     }
 
-    const pl_ty = err_union_ty.errorUnionPayload(mod);
-    const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(pt);
+    const pl_ty = err_union_ty.errorUnionPayload(zcu);
+    const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(zcu);
 
-    if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
+    if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
         // Block we can jump out of when error is not set
         try func.startBlock(.block, wasm.block_empty);
 
         // check if the error tag is set for the error union.
         try func.emitWValue(err_union);
         if (pl_has_bits) {
-            const err_offset: u32 = @intCast(errUnionErrorOffset(pl_ty, pt));
+            const err_offset: u32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
             try func.addMemArg(.i32_load16_u, .{
                 .offset = err_union.offset() + err_offset,
-                .alignment = @intCast(Type.anyerror.abiAlignment(pt).toByteUnits().?),
+                .alignment = @intCast(Type.anyerror.abiAlignment(zcu).toByteUnits().?),
             });
         }
         try func.addTag(.i32_eqz);
@@ -6556,7 +6559,7 @@ fn lowerTry(
         return .none;
     }
 
-    const pl_offset: u32 = @intCast(errUnionPayloadOffset(pl_ty, pt));
+    const pl_offset: u32 = @intCast(errUnionPayloadOffset(pl_ty, zcu));
     if (isByRef(pl_ty, pt, func.target.*)) {
         return buildPointerOffset(func, err_union, pl_offset, .new);
     }
@@ -6566,16 +6569,16 @@ fn lowerTry(
 
 fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const ty = func.typeOfIndex(inst);
     const operand = try func.resolveInst(ty_op.operand);
 
-    if (ty.zigTypeTag(mod) == .Vector) {
+    if (ty.zigTypeTag(zcu) == .Vector) {
         return func.fail("TODO: @byteSwap for vectors", .{});
     }
-    const int_info = ty.intInfo(mod);
+    const int_info = ty.intInfo(zcu);
     const wasm_bits = toWasmBits(int_info.bits) orelse {
         return func.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits});
     };
@@ -6649,15 +6652,15 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty = func.typeOfIndex(inst);
     const lhs = try func.resolveInst(bin_op.lhs);
     const rhs = try func.resolveInst(bin_op.rhs);
 
-    if (ty.isUnsignedInt(mod)) {
+    if (ty.isUnsignedInt(zcu)) {
         _ = try func.binOp(lhs, rhs, ty, .div);
-    } else if (ty.isSignedInt(mod)) {
-        const int_bits = ty.intInfo(mod).bits;
+    } else if (ty.isSignedInt(zcu)) {
+        const int_bits = ty.intInfo(zcu).bits;
         const wasm_bits = toWasmBits(int_bits) orelse {
             return func.fail("TODO: `@divFloor` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits});
         };
@@ -6767,19 +6770,19 @@ fn airMod(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty = func.typeOfIndex(inst);
     const lhs = try func.resolveInst(bin_op.lhs);
     const rhs = try func.resolveInst(bin_op.rhs);
 
-    if (ty.isUnsignedInt(mod)) {
+    if (ty.isUnsignedInt(zcu)) {
         _ = try func.binOp(lhs, rhs, ty, .rem);
-    } else if (ty.isSignedInt(mod)) {
+    } else if (ty.isSignedInt(zcu)) {
         // The wasm rem instruction gives the remainder after truncating division (rounding towards
         // 0), equivalent to @rem.
         // We make use of the fact that:
         // @mod(a, b) = @rem(@rem(a, b) + b, b)
-        const int_bits = ty.intInfo(mod).bits;
+        const int_bits = ty.intInfo(zcu).bits;
         const wasm_bits = toWasmBits(int_bits) orelse {
             return func.fail("TODO: `@mod` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits});
         };
@@ -6802,9 +6805,9 @@ fn airSatMul(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty = func.typeOfIndex(inst);
-    const int_info = ty.intInfo(mod);
+    const int_info = ty.intInfo(zcu);
     const is_signed = int_info.signedness == .signed;
 
     const lhs = try func.resolveInst(bin_op.lhs);
@@ -6903,12 +6906,12 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
     const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty = func.typeOfIndex(inst);
     const lhs = try func.resolveInst(bin_op.lhs);
     const rhs = try func.resolveInst(bin_op.rhs);
 
-    const int_info = ty.intInfo(mod);
+    const int_info = ty.intInfo(zcu);
     const is_signed = int_info.signedness == .signed;
 
     if (int_info.bits > 64) {
@@ -6950,8 +6953,8 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
 
 fn signedSat(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
     const pt = func.pt;
-    const mod = pt.zcu;
-    const int_info = ty.intInfo(mod);
+    const zcu = pt.zcu;
+    const int_info = ty.intInfo(zcu);
     const wasm_bits = toWasmBits(int_info.bits).?;
     const is_wasm_bits = wasm_bits == int_info.bits;
     const ext_ty = if (!is_wasm_bits) try pt.intType(int_info.signedness, wasm_bits) else ty;
@@ -7009,9 +7012,9 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty = func.typeOfIndex(inst);
-    const int_info = ty.intInfo(mod);
+    const int_info = ty.intInfo(zcu);
     const is_signed = int_info.signedness == .signed;
     if (int_info.bits > 64) {
         return func.fail("TODO: Saturating shifting left for integers with bitsize '{d}'", .{int_info.bits});
@@ -7130,7 +7133,7 @@ fn callIntrinsic(
 
     // Always pass over C-ABI
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     var func_type = try genFunctype(func.gpa, .C, param_types, return_type, pt, func.target.*);
     defer func_type.deinit(func.gpa);
     const func_type_index = try func.bin_file.zigObjectPtr().?.putOrGetFuncType(func.gpa, func_type);
@@ -7148,16 +7151,16 @@ fn callIntrinsic(
     // Lower all arguments to the stack before we call our function
     for (args, 0..) |arg, arg_i| {
         assert(!(want_sret_param and arg == .stack));
-        assert(Type.fromInterned(param_types[arg_i]).hasRuntimeBitsIgnoreComptime(pt));
+        assert(Type.fromInterned(param_types[arg_i]).hasRuntimeBitsIgnoreComptime(zcu));
         try func.lowerArg(.C, Type.fromInterned(param_types[arg_i]), arg);
     }
 
     // Actually call our intrinsic
     try func.addLabel(.call, @intFromEnum(symbol_index));
 
-    if (!return_type.hasRuntimeBitsIgnoreComptime(pt)) {
+    if (!return_type.hasRuntimeBitsIgnoreComptime(zcu)) {
         return .none;
-    } else if (return_type.isNoReturn(mod)) {
+    } else if (return_type.isNoReturn(zcu)) {
         try func.addTag(.@"unreachable");
         return .none;
     } else if (want_sret_param) {
@@ -7184,8 +7187,8 @@ fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
     const pt = func.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
 
     var arena_allocator = std.heap.ArenaAllocator.init(func.gpa);
     defer arena_allocator.deinit();
@@ -7198,9 +7201,9 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
         return @intFromEnum(loc.index);
     }
 
-    const int_tag_ty = enum_ty.intTagType(mod);
+    const int_tag_ty = enum_ty.intTagType(zcu);
 
-    if (int_tag_ty.bitSize(pt) > 64) {
+    if (int_tag_ty.bitSize(zcu) > 64) {
         return func.fail("TODO: Implement @tagName for enums with tag size larger than 64 bits", .{});
     }
 
@@ -7220,7 +7223,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
 
     // TODO: Make switch implementation generic so we can use a jump table for this when the tags are not sparse.
     // generate an if-else chain for each tag value as well as constant.
-    const tag_names = enum_ty.enumFields(mod);
+    const tag_names = enum_ty.enumFields(zcu);
     for (0..tag_names.len) |tag_index| {
         const tag_name = tag_names.get(ip)[tag_index];
         const tag_name_len = tag_name.length(ip);
@@ -7345,15 +7348,15 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
 
 fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const operand = try func.resolveInst(ty_op.operand);
     const error_set_ty = ty_op.ty.toType();
     const result = try func.allocLocal(Type.bool);
 
-    const names = error_set_ty.errorSetNames(mod);
+    const names = error_set_ty.errorSetNames(zcu);
     var values = try std.ArrayList(u32).initCapacity(func.gpa, names.len);
     defer values.deinit();
 
@@ -7432,12 +7435,12 @@ inline fn useAtomicFeature(func: *const CodeGen) bool {
 
 fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
 
     const ptr_ty = func.typeOf(extra.ptr);
-    const ty = ptr_ty.childType(mod);
+    const ty = ptr_ty.childType(zcu);
     const result_ty = func.typeOfIndex(inst);
 
     const ptr_operand = try func.resolveInst(extra.ptr);
@@ -7451,7 +7454,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         try func.emitWValue(ptr_operand);
         try func.lowerToStack(expected_val);
         try func.lowerToStack(new_val);
-        try func.addAtomicMemArg(switch (ty.abiSize(pt)) {
+        try func.addAtomicMemArg(switch (ty.abiSize(zcu)) {
             1 => .i32_atomic_rmw8_cmpxchg_u,
             2 => .i32_atomic_rmw16_cmpxchg_u,
             4 => .i32_atomic_rmw_cmpxchg,
@@ -7459,14 +7462,14 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}),
         }, .{
             .offset = ptr_operand.offset(),
-            .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?),
+            .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?),
         });
         try func.addLabel(.local_tee, val_local.local.value);
         _ = try func.cmp(.stack, expected_val, ty, .eq);
         try func.addLabel(.local_set, cmp_result.local.value);
         break :val val_local;
     } else val: {
-        if (ty.abiSize(pt) > 8) {
+        if (ty.abiSize(zcu) > 8) {
             return func.fail("TODO: Implement `@cmpxchg` for types larger than abi size of 8 bytes", .{});
         }
         const ptr_val = try WValue.toLocal(try func.load(ptr_operand, ty, 0), func, ty);
@@ -7490,7 +7493,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         try func.addTag(.i32_and);
         const and_result = try WValue.toLocal(.stack, func, Type.bool);
         const result_ptr = try func.allocStack(result_ty);
-        try func.store(result_ptr, and_result, Type.bool, @as(u32, @intCast(ty.abiSize(pt))));
+        try func.store(result_ptr, and_result, Type.bool, @as(u32, @intCast(ty.abiSize(zcu))));
         try func.store(result_ptr, ptr_val, ty, 0);
         break :val result_ptr;
     } else val: {
@@ -7511,7 +7514,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const ty = func.typeOfIndex(inst);
 
     if (func.useAtomicFeature()) {
-        const tag: wasm.AtomicsOpcode = switch (ty.abiSize(pt)) {
+        const tag: wasm.AtomicsOpcode = switch (ty.abiSize(pt.zcu)) {
             1 => .i32_atomic_load8_u,
             2 => .i32_atomic_load16_u,
             4 => .i32_atomic_load,
@@ -7521,7 +7524,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         try func.emitWValue(ptr);
         try func.addAtomicMemArg(tag, .{
             .offset = ptr.offset(),
-            .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?),
+            .alignment = @intCast(ty.abiAlignment(pt.zcu).toByteUnits().?),
         });
     } else {
         _ = try func.load(ptr, ty, 0);
@@ -7532,7 +7535,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
     const extra = func.air.extraData(Air.AtomicRmw, pl_op.payload).data;
 
@@ -7556,7 +7559,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 try func.emitWValue(ptr);
                 try func.emitWValue(value);
                 if (op == .Nand) {
-                    const wasm_bits = toWasmBits(@intCast(ty.bitSize(pt))).?;
+                    const wasm_bits = toWasmBits(@intCast(ty.bitSize(zcu))).?;
 
                     const and_res = try func.binOp(value, operand, ty, .@"and");
                     if (wasm_bits == 32)
@@ -7573,7 +7576,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                     try func.addTag(.select);
                 }
                 try func.addAtomicMemArg(
-                    switch (ty.abiSize(pt)) {
+                    switch (ty.abiSize(zcu)) {
                         1 => .i32_atomic_rmw8_cmpxchg_u,
                         2 => .i32_atomic_rmw16_cmpxchg_u,
                         4 => .i32_atomic_rmw_cmpxchg,
@@ -7582,7 +7585,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                     },
                     .{
                         .offset = ptr.offset(),
-                        .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?),
+                        .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?),
                     },
                 );
                 const select_res = try func.allocLocal(ty);
@@ -7601,7 +7604,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             else => {
                 try func.emitWValue(ptr);
                 try func.emitWValue(operand);
-                const tag: wasm.AtomicsOpcode = switch (ty.abiSize(pt)) {
+                const tag: wasm.AtomicsOpcode = switch (ty.abiSize(zcu)) {
                     1 => switch (op) {
                         .Xchg => .i32_atomic_rmw8_xchg_u,
                         .Add => .i32_atomic_rmw8_add_u,
@@ -7642,7 +7645,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 };
                 try func.addAtomicMemArg(tag, .{
                     .offset = ptr.offset(),
-                    .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?),
+                    .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?),
                 });
                 return func.finishAir(inst, .stack, &.{ pl_op.operand, extra.operand });
             },
@@ -7670,7 +7673,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                     .Xor => .xor,
                     else => unreachable,
                 });
-                if (ty.isInt(mod) and (op == .Add or op == .Sub)) {
+                if (ty.isInt(zcu) and (op == .Add or op == .Sub)) {
                     _ = try func.wrapOperand(.stack, ty);
                 }
                 try func.store(.stack, .stack, ty, ptr.offset());
@@ -7686,7 +7689,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 try func.store(.stack, .stack, ty, ptr.offset());
             },
             .Nand => {
-                const wasm_bits = toWasmBits(@intCast(ty.bitSize(pt))).?;
+                const wasm_bits = toWasmBits(@intCast(ty.bitSize(zcu))).?;
 
                 try func.emitWValue(ptr);
                 const and_res = try func.binOp(result, operand, ty, .@"and");
@@ -7721,16 +7724,16 @@ fn airFence(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const pt = func.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
     const ptr = try func.resolveInst(bin_op.lhs);
     const operand = try func.resolveInst(bin_op.rhs);
     const ptr_ty = func.typeOf(bin_op.lhs);
-    const ty = ptr_ty.childType(mod);
+    const ty = ptr_ty.childType(zcu);
 
     if (func.useAtomicFeature()) {
-        const tag: wasm.AtomicsOpcode = switch (ty.abiSize(pt)) {
+        const tag: wasm.AtomicsOpcode = switch (ty.abiSize(zcu)) {
             1 => .i32_atomic_store8,
             2 => .i32_atomic_store16,
             4 => .i32_atomic_store,
@@ -7741,7 +7744,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         try func.lowerToStack(operand);
         try func.addAtomicMemArg(tag, .{
             .offset = ptr.offset(),
-            .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?),
+            .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?),
         });
     } else {
         try func.store(ptr, operand, ty, 0);
@@ -7760,12 +7763,12 @@ fn airFrameAddress(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn typeOf(func: *CodeGen, inst: Air.Inst.Ref) Type {
     const pt = func.pt;
-    const mod = pt.zcu;
-    return func.air.typeOf(inst, &mod.intern_pool);
+    const zcu = pt.zcu;
+    return func.air.typeOf(inst, &zcu.intern_pool);
 }
 
 fn typeOfIndex(func: *CodeGen, inst: Air.Inst.Index) Type {
     const pt = func.pt;
-    const mod = pt.zcu;
-    return func.air.typeOfIndex(inst, &mod.intern_pool);
+    const zcu = pt.zcu;
+    return func.air.typeOfIndex(inst, &zcu.intern_pool);
 }
src/arch/x86_64/abi.zig
@@ -44,7 +44,7 @@ pub const Class = enum {
     }
 };
 
-pub fn classifyWindows(ty: Type, pt: Zcu.PerThread) Class {
+pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
     // https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017
     // "There's a strict one-to-one correspondence between a function call's arguments
     // and the registers used for those arguments. Any argument that doesn't fit in 8
@@ -53,7 +53,7 @@ pub fn classifyWindows(ty: Type, pt: Zcu.PerThread) Class {
     // "All floating point operations are done using the 16 XMM registers."
     // "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed
     // as if they were integers of the same size."
-    switch (ty.zigTypeTag(pt.zcu)) {
+    switch (ty.zigTypeTag(zcu)) {
         .Pointer,
         .Int,
         .Bool,
@@ -68,12 +68,12 @@ pub fn classifyWindows(ty: Type, pt: Zcu.PerThread) Class {
         .ErrorUnion,
         .AnyFrame,
         .Frame,
-        => switch (ty.abiSize(pt)) {
+        => switch (ty.abiSize(zcu)) {
             0 => unreachable,
             1, 2, 4, 8 => return .integer,
-            else => switch (ty.zigTypeTag(pt.zcu)) {
+            else => switch (ty.zigTypeTag(zcu)) {
                 .Int => return .win_i128,
-                .Struct, .Union => if (ty.containerLayout(pt.zcu) == .@"packed") {
+                .Struct, .Union => if (ty.containerLayout(zcu) == .@"packed") {
                     return .win_i128;
                 } else {
                     return .memory;
@@ -100,14 +100,14 @@ pub const Context = enum { ret, arg, field, other };
 
 /// There are a maximum of 8 possible return slots. Returned values are in
 /// the beginning of the array; unused slots are filled with .none.
-pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Context) [8]Class {
+pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8]Class {
     const memory_class = [_]Class{
         .memory, .none, .none, .none,
         .none,   .none, .none, .none,
     };
     var result = [1]Class{.none} ** 8;
-    switch (ty.zigTypeTag(pt.zcu)) {
-        .Pointer => switch (ty.ptrSize(pt.zcu)) {
+    switch (ty.zigTypeTag(zcu)) {
+        .Pointer => switch (ty.ptrSize(zcu)) {
             .Slice => {
                 result[0] = .integer;
                 result[1] = .integer;
@@ -119,7 +119,7 @@ pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Con
             },
         },
         .Int, .Enum, .ErrorSet => {
-            const bits = ty.intInfo(pt.zcu).bits;
+            const bits = ty.intInfo(zcu).bits;
             if (bits <= 64) {
                 result[0] = .integer;
                 return result;
@@ -185,8 +185,8 @@ pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Con
             else => unreachable,
         },
         .Vector => {
-            const elem_ty = ty.childType(pt.zcu);
-            const bits = elem_ty.bitSize(pt) * ty.arrayLen(pt.zcu);
+            const elem_ty = ty.childType(zcu);
+            const bits = elem_ty.bitSize(zcu) * ty.arrayLen(zcu);
             if (elem_ty.toIntern() == .bool_type) {
                 if (bits <= 32) return .{
                     .integer, .none, .none, .none,
@@ -250,7 +250,7 @@ pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Con
             return memory_class;
         },
         .Optional => {
-            if (ty.isPtrLikeOptional(pt.zcu)) {
+            if (ty.isPtrLikeOptional(zcu)) {
                 result[0] = .integer;
                 return result;
             }
@@ -261,8 +261,8 @@ pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Con
             // it contains unaligned fields, it has class MEMORY"
             // "If the size of the aggregate exceeds a single eightbyte, each is classified
             // separately.".
-            const ty_size = ty.abiSize(pt);
-            switch (ty.containerLayout(pt.zcu)) {
+            const ty_size = ty.abiSize(zcu);
+            switch (ty.containerLayout(zcu)) {
                 .auto, .@"extern" => {},
                 .@"packed" => {
                     assert(ty_size <= 16);
@@ -274,10 +274,10 @@ pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Con
             if (ty_size > 64)
                 return memory_class;
 
-            _ = if (pt.zcu.typeToStruct(ty)) |loaded_struct|
-                classifySystemVStruct(&result, 0, loaded_struct, pt, target)
-            else if (pt.zcu.typeToUnion(ty)) |loaded_union|
-                classifySystemVUnion(&result, 0, loaded_union, pt, target)
+            _ = if (zcu.typeToStruct(ty)) |loaded_struct|
+                classifySystemVStruct(&result, 0, loaded_struct, zcu, target)
+            else if (zcu.typeToUnion(ty)) |loaded_union|
+                classifySystemVUnion(&result, 0, loaded_union, zcu, target)
             else
                 unreachable;
 
@@ -306,7 +306,7 @@ pub fn classifySystemV(ty: Type, pt: Zcu.PerThread, target: std.Target, ctx: Con
             return result;
         },
         .Array => {
-            const ty_size = ty.abiSize(pt);
+            const ty_size = ty.abiSize(zcu);
             if (ty_size <= 8) {
                 result[0] = .integer;
                 return result;
@@ -326,10 +326,10 @@ fn classifySystemVStruct(
     result: *[8]Class,
     starting_byte_offset: u64,
     loaded_struct: InternPool.LoadedStructType,
-    pt: Zcu.PerThread,
+    zcu: *Zcu,
     target: std.Target,
 ) u64 {
-    const ip = &pt.zcu.intern_pool;
+    const ip = &zcu.intern_pool;
     var byte_offset = starting_byte_offset;
     var field_it = loaded_struct.iterateRuntimeOrder(ip);
     while (field_it.next()) |field_index| {
@@ -338,29 +338,29 @@ fn classifySystemVStruct(
         byte_offset = std.mem.alignForward(
             u64,
             byte_offset,
-            field_align.toByteUnits() orelse field_ty.abiAlignment(pt).toByteUnits().?,
+            field_align.toByteUnits() orelse field_ty.abiAlignment(zcu).toByteUnits().?,
         );
-        if (pt.zcu.typeToStruct(field_ty)) |field_loaded_struct| {
+        if (zcu.typeToStruct(field_ty)) |field_loaded_struct| {
             switch (field_loaded_struct.layout) {
                 .auto, .@"extern" => {
-                    byte_offset = classifySystemVStruct(result, byte_offset, field_loaded_struct, pt, target);
+                    byte_offset = classifySystemVStruct(result, byte_offset, field_loaded_struct, zcu, target);
                     continue;
                 },
                 .@"packed" => {},
             }
-        } else if (pt.zcu.typeToUnion(field_ty)) |field_loaded_union| {
+        } else if (zcu.typeToUnion(field_ty)) |field_loaded_union| {
             switch (field_loaded_union.flagsUnordered(ip).layout) {
                 .auto, .@"extern" => {
-                    byte_offset = classifySystemVUnion(result, byte_offset, field_loaded_union, pt, target);
+                    byte_offset = classifySystemVUnion(result, byte_offset, field_loaded_union, zcu, target);
                     continue;
                 },
                 .@"packed" => {},
             }
         }
-        const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, pt, target, .field), .none);
+        const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
         for (result[@intCast(byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
             result_class.* = result_class.combineSystemV(field_class);
-        byte_offset += field_ty.abiSize(pt);
+        byte_offset += field_ty.abiSize(zcu);
     }
     const final_byte_offset = starting_byte_offset + loaded_struct.sizeUnordered(ip);
     std.debug.assert(final_byte_offset == std.mem.alignForward(
@@ -375,30 +375,30 @@ fn classifySystemVUnion(
     result: *[8]Class,
     starting_byte_offset: u64,
     loaded_union: InternPool.LoadedUnionType,
-    pt: Zcu.PerThread,
+    zcu: *Zcu,
     target: std.Target,
 ) u64 {
-    const ip = &pt.zcu.intern_pool;
+    const ip = &zcu.intern_pool;
     for (0..loaded_union.field_types.len) |field_index| {
         const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
-        if (pt.zcu.typeToStruct(field_ty)) |field_loaded_struct| {
+        if (zcu.typeToStruct(field_ty)) |field_loaded_struct| {
             switch (field_loaded_struct.layout) {
                 .auto, .@"extern" => {
-                    _ = classifySystemVStruct(result, starting_byte_offset, field_loaded_struct, pt, target);
+                    _ = classifySystemVStruct(result, starting_byte_offset, field_loaded_struct, zcu, target);
                     continue;
                 },
                 .@"packed" => {},
             }
-        } else if (pt.zcu.typeToUnion(field_ty)) |field_loaded_union| {
+        } else if (zcu.typeToUnion(field_ty)) |field_loaded_union| {
             switch (field_loaded_union.flagsUnordered(ip).layout) {
                 .auto, .@"extern" => {
-                    _ = classifySystemVUnion(result, starting_byte_offset, field_loaded_union, pt, target);
+                    _ = classifySystemVUnion(result, starting_byte_offset, field_loaded_union, zcu, target);
                     continue;
                 },
                 .@"packed" => {},
             }
         }
-        const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, pt, target, .field), .none);
+        const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
         for (result[@intCast(starting_byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
             result_class.* = result_class.combineSystemV(field_class);
     }
src/arch/x86_64/CodeGen.zig
@@ -732,14 +732,14 @@ const FrameAlloc = struct {
             .ref_count = 0,
         };
     }
-    fn initType(ty: Type, pt: Zcu.PerThread) FrameAlloc {
+    fn initType(ty: Type, zcu: *Zcu) FrameAlloc {
         return init(.{
-            .size = ty.abiSize(pt),
-            .alignment = ty.abiAlignment(pt),
+            .size = ty.abiSize(zcu),
+            .alignment = ty.abiAlignment(zcu),
         });
     }
-    fn initSpill(ty: Type, pt: Zcu.PerThread) FrameAlloc {
-        const abi_size = ty.abiSize(pt);
+    fn initSpill(ty: Type, zcu: *Zcu) FrameAlloc {
+        const abi_size = ty.abiSize(zcu);
         const spill_size = if (abi_size < 8)
             math.ceilPowerOfTwoAssert(u64, abi_size)
         else
@@ -747,7 +747,7 @@ const FrameAlloc = struct {
         return init(.{
             .size = spill_size,
             .pad = @intCast(spill_size - abi_size),
-            .alignment = ty.abiAlignment(pt).maxStrict(
+            .alignment = ty.abiAlignment(zcu).maxStrict(
                 Alignment.fromNonzeroByteUnits(@min(spill_size, 8)),
             ),
         });
@@ -756,7 +756,7 @@ const FrameAlloc = struct {
 
 const StackAllocation = struct {
     inst: ?Air.Inst.Index,
-    /// TODO do we need size? should be determined by inst.ty.abiSize(pt)
+    /// TODO do we need size? should be determined by inst.ty.abiSize(zcu)
     size: u32,
 };
 
@@ -859,11 +859,11 @@ pub fn generate(
     function.args = call_info.args;
     function.ret_mcv = call_info.return_value;
     function.frame_allocs.set(@intFromEnum(FrameIndex.ret_addr), FrameAlloc.init(.{
-        .size = Type.usize.abiSize(pt),
-        .alignment = Type.usize.abiAlignment(pt).min(call_info.stack_align),
+        .size = Type.usize.abiSize(zcu),
+        .alignment = Type.usize.abiAlignment(zcu).min(call_info.stack_align),
     }));
     function.frame_allocs.set(@intFromEnum(FrameIndex.base_ptr), FrameAlloc.init(.{
-        .size = Type.usize.abiSize(pt),
+        .size = Type.usize.abiSize(zcu),
         .alignment = Alignment.min(
             call_info.stack_align,
             Alignment.fromNonzeroByteUnits(function.target.stackAlignment()),
@@ -1872,8 +1872,8 @@ fn asmMemoryRegisterImmediate(
 
 fn gen(self: *Self) InnerError!void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const fn_info = mod.typeToFunc(self.fn_type).?;
+    const zcu = pt.zcu;
+    const fn_info = zcu.typeToFunc(self.fn_type).?;
     const cc = abi.resolveCallingConvention(fn_info.cc, self.target.*);
     if (cc != .Naked) {
         try self.asmRegister(.{ ._, .push }, .rbp);
@@ -1890,7 +1890,7 @@ fn gen(self: *Self) InnerError!void {
                 // The address where to store the return value for the caller is in a
                 // register which the callee is free to clobber. Therefore, we purposely
                 // spill it to stack immediately.
-                const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(Type.usize, pt));
+                const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(Type.usize, zcu));
                 try self.genSetMem(
                     .{ .frame = frame_index },
                     0,
@@ -2099,8 +2099,8 @@ fn checkInvariantsAfterAirInst(self: *Self, inst: Air.Inst.Index, old_air_bookke
 
 fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const air_tags = self.air.instructions.items(.tag);
 
     self.arg_index = 0;
@@ -2370,9 +2370,9 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
 
 fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    switch (Type.fromInterned(lazy_sym.ty).zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    switch (Type.fromInterned(lazy_sym.ty).zigTypeTag(zcu)) {
         .Enum => {
             const enum_ty = Type.fromInterned(lazy_sym.ty);
             wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(pt)});
@@ -2385,7 +2385,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void {
             const ret_reg = param_regs[0];
             const enum_mcv = MCValue{ .register = param_regs[1] };
 
-            const exitlude_jump_relocs = try self.gpa.alloc(Mir.Inst.Index, enum_ty.enumFieldCount(mod));
+            const exitlude_jump_relocs = try self.gpa.alloc(Mir.Inst.Index, enum_ty.enumFieldCount(zcu));
             defer self.gpa.free(exitlude_jump_relocs);
 
             const data_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
@@ -2394,7 +2394,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void {
             try self.genLazySymbolRef(.lea, data_reg, .{ .kind = .const_data, .ty = enum_ty.toIntern() });
 
             var data_off: i32 = 0;
-            const tag_names = enum_ty.enumFields(mod);
+            const tag_names = enum_ty.enumFields(zcu);
             for (exitlude_jump_relocs, 0..) |*exitlude_jump_reloc, tag_index| {
                 const tag_name_len = tag_names.get(ip)[tag_index].length(ip);
                 const tag_val = try pt.enumValueFieldIndex(enum_ty, @intCast(tag_index));
@@ -2630,14 +2630,14 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex {
 /// Use a pointer instruction as the basis for allocating stack memory.
 fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ptr_ty = self.typeOfIndex(inst);
-    const val_ty = ptr_ty.childType(mod);
+    const val_ty = ptr_ty.childType(zcu);
     return self.allocFrameIndex(FrameAlloc.init(.{
-        .size = math.cast(u32, val_ty.abiSize(pt)) orelse {
+        .size = math.cast(u32, val_ty.abiSize(zcu)) orelse {
             return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(pt)});
         },
-        .alignment = ptr_ty.ptrAlignment(pt).max(.@"1"),
+        .alignment = ptr_ty.ptrAlignment(zcu).max(.@"1"),
     }));
 }
 
@@ -2651,20 +2651,20 @@ fn allocTempRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool) !MCValue {
 
 fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: bool) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const abi_size = math.cast(u32, ty.abiSize(pt)) orelse {
+    const zcu = pt.zcu;
+    const abi_size = math.cast(u32, ty.abiSize(zcu)) orelse {
         return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(pt)});
     };
 
     if (reg_ok) need_mem: {
-        if (abi_size <= @as(u32, switch (ty.zigTypeTag(mod)) {
+        if (abi_size <= @as(u32, switch (ty.zigTypeTag(zcu)) {
             .Float => switch (ty.floatBits(self.target.*)) {
                 16, 32, 64, 128 => 16,
                 80 => break :need_mem,
                 else => unreachable,
             },
-            .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
-                .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
+            .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+                .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
                     16, 32, 64, 128 => if (self.hasFeature(.avx)) 32 else 16,
                     80 => break :need_mem,
                     else => unreachable,
@@ -2679,21 +2679,21 @@ fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: b
         }
     }
 
-    const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(ty, pt));
+    const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(ty, zcu));
     return .{ .load_frame = .{ .index = frame_index } };
 }
 
 fn regClassForType(self: *Self, ty: Type) RegisterManager.RegisterBitSet {
     const pt = self.pt;
-    const mod = pt.zcu;
-    return switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    return switch (ty.zigTypeTag(zcu)) {
         .Float => switch (ty.floatBits(self.target.*)) {
             80 => abi.RegisterClass.x87,
             else => abi.RegisterClass.sse,
         },
-        .Vector => switch (ty.childType(mod).toIntern()) {
+        .Vector => switch (ty.childType(zcu).toIntern()) {
             .bool_type, .u1_type => abi.RegisterClass.gp,
-            else => if (ty.isAbiInt(mod) and ty.intInfo(mod).bits == 1)
+            else => if (ty.isAbiInt(zcu) and ty.intInfo(zcu).bits == 1)
                 abi.RegisterClass.gp
             else
                 abi.RegisterClass.sse,
@@ -3001,13 +3001,13 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const dst_ty = self.typeOfIndex(inst);
-    const dst_scalar_ty = dst_ty.scalarType(mod);
+    const dst_scalar_ty = dst_ty.scalarType(zcu);
     const dst_bits = dst_scalar_ty.floatBits(self.target.*);
     const src_ty = self.typeOf(ty_op.operand);
-    const src_scalar_ty = src_ty.scalarType(mod);
+    const src_scalar_ty = src_ty.scalarType(zcu);
     const src_bits = src_scalar_ty.floatBits(self.target.*);
 
     const result = result: {
@@ -3032,7 +3032,7 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
             },
             else => unreachable,
         }) {
-            if (dst_ty.isVector(mod)) break :result null;
+            if (dst_ty.isVector(zcu)) break :result null;
             var callee_buf: ["__extend?f?f2".len]u8 = undefined;
             break :result try self.genCall(.{ .lib = .{
                 .return_type = self.floatCompilerRtAbiType(dst_scalar_ty, src_scalar_ty).toIntern(),
@@ -3044,18 +3044,18 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
             } }, &.{src_scalar_ty}, &.{.{ .air_ref = ty_op.operand }});
         }
 
-        const src_abi_size: u32 = @intCast(src_ty.abiSize(pt));
+        const src_abi_size: u32 = @intCast(src_ty.abiSize(zcu));
         const src_mcv = try self.resolveInst(ty_op.operand);
         const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
             src_mcv
         else
             try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
         const dst_reg = dst_mcv.getReg().?;
-        const dst_alias = registerAlias(dst_reg, @intCast(@max(dst_ty.abiSize(pt), 16)));
+        const dst_alias = registerAlias(dst_reg, @intCast(@max(dst_ty.abiSize(zcu), 16)));
         const dst_lock = self.register_manager.lockReg(dst_reg);
         defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
 
-        const vec_len = if (dst_ty.isVector(mod)) dst_ty.vectorLen(mod) else 1;
+        const vec_len = if (dst_ty.isVector(zcu)) dst_ty.vectorLen(zcu) else 1;
         if (src_bits == 16) {
             assert(self.hasFeature(.f16c));
             const mat_src_reg = if (src_mcv.isRegister())
@@ -3137,30 +3137,30 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const src_ty = self.typeOf(ty_op.operand);
     const dst_ty = self.typeOfIndex(inst);
 
     const result = @as(?MCValue, result: {
-        const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+        const dst_abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
 
-        const src_int_info = src_ty.intInfo(mod);
-        const dst_int_info = dst_ty.intInfo(mod);
+        const src_int_info = src_ty.intInfo(zcu);
+        const dst_int_info = dst_ty.intInfo(zcu);
         const extend = switch (src_int_info.signedness) {
             .signed => dst_int_info,
             .unsigned => src_int_info,
         }.signedness;
 
         const src_mcv = try self.resolveInst(ty_op.operand);
-        if (dst_ty.isVector(mod)) {
-            const src_abi_size: u32 = @intCast(src_ty.abiSize(pt));
+        if (dst_ty.isVector(zcu)) {
+            const src_abi_size: u32 = @intCast(src_ty.abiSize(zcu));
             const max_abi_size = @max(dst_abi_size, src_abi_size);
             if (max_abi_size > @as(u32, if (self.hasFeature(.avx2)) 32 else 16)) break :result null;
             const has_avx = self.hasFeature(.avx);
 
-            const dst_elem_abi_size = dst_ty.childType(mod).abiSize(pt);
-            const src_elem_abi_size = src_ty.childType(mod).abiSize(pt);
+            const dst_elem_abi_size = dst_ty.childType(zcu).abiSize(zcu);
+            const src_elem_abi_size = src_ty.childType(zcu).abiSize(zcu);
             switch (math.order(dst_elem_abi_size, src_elem_abi_size)) {
                 .lt => {
                     const mir_tag: Mir.Inst.FixedTag = switch (dst_elem_abi_size) {
@@ -3396,13 +3396,13 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const dst_ty = self.typeOfIndex(inst);
-    const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+    const dst_abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
     const src_ty = self.typeOf(ty_op.operand);
-    const src_abi_size: u32 = @intCast(src_ty.abiSize(pt));
+    const src_abi_size: u32 = @intCast(src_ty.abiSize(zcu));
 
     const result = result: {
         const src_mcv = try self.resolveInst(ty_op.operand);
@@ -3414,7 +3414,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
             src_mcv
         else if (dst_abi_size <= 8)
             try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv)
-        else if (dst_abi_size <= 16 and !dst_ty.isVector(mod)) dst: {
+        else if (dst_abi_size <= 16 and !dst_ty.isVector(zcu)) dst: {
             const dst_regs =
                 try self.register_manager.allocRegs(2, .{ inst, inst }, abi.RegisterClass.gp);
             const dst_mcv: MCValue = .{ .register_pair = dst_regs };
@@ -3429,16 +3429,16 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
             break :dst dst_mcv;
         };
 
-        if (dst_ty.zigTypeTag(mod) == .Vector) {
-            assert(src_ty.zigTypeTag(mod) == .Vector and dst_ty.vectorLen(mod) == src_ty.vectorLen(mod));
-            const dst_elem_ty = dst_ty.childType(mod);
-            const dst_elem_abi_size: u32 = @intCast(dst_elem_ty.abiSize(pt));
-            const src_elem_ty = src_ty.childType(mod);
-            const src_elem_abi_size: u32 = @intCast(src_elem_ty.abiSize(pt));
+        if (dst_ty.zigTypeTag(zcu) == .Vector) {
+            assert(src_ty.zigTypeTag(zcu) == .Vector and dst_ty.vectorLen(zcu) == src_ty.vectorLen(zcu));
+            const dst_elem_ty = dst_ty.childType(zcu);
+            const dst_elem_abi_size: u32 = @intCast(dst_elem_ty.abiSize(zcu));
+            const src_elem_ty = src_ty.childType(zcu);
+            const src_elem_abi_size: u32 = @intCast(src_elem_ty.abiSize(zcu));
 
             const mir_tag = @as(?Mir.Inst.FixedTag, switch (dst_elem_abi_size) {
                 1 => switch (src_elem_abi_size) {
-                    2 => switch (dst_ty.vectorLen(mod)) {
+                    2 => switch (dst_ty.vectorLen(zcu)) {
                         1...8 => if (self.hasFeature(.avx)) .{ .vp_b, .ackusw } else .{ .p_b, .ackusw },
                         9...16 => if (self.hasFeature(.avx2)) .{ .vp_b, .ackusw } else null,
                         else => null,
@@ -3446,7 +3446,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
                     else => null,
                 },
                 2 => switch (src_elem_abi_size) {
-                    4 => switch (dst_ty.vectorLen(mod)) {
+                    4 => switch (dst_ty.vectorLen(zcu)) {
                         1...4 => if (self.hasFeature(.avx))
                             .{ .vp_w, .ackusd }
                         else if (self.hasFeature(.sse4_1))
@@ -3461,8 +3461,8 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
                 else => null,
             }) orelse return self.fail("TODO implement airTrunc for {}", .{dst_ty.fmt(pt)});
 
-            const dst_info = dst_elem_ty.intInfo(mod);
-            const src_info = src_elem_ty.intInfo(mod);
+            const dst_info = dst_elem_ty.intInfo(zcu);
+            const src_info = src_elem_ty.intInfo(zcu);
 
             const mask_val = try pt.intValue(src_elem_ty, @as(u64, math.maxInt(u64)) >> @intCast(64 - dst_info.bits));
 
@@ -3470,7 +3470,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
                 .len = @intCast(@divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)),
                 .child = src_elem_ty.ip_index,
             });
-            const splat_abi_size: u32 = @intCast(splat_ty.abiSize(pt));
+            const splat_abi_size: u32 = @intCast(splat_ty.abiSize(zcu));
 
             const splat_val = try pt.intern(.{ .aggregate = .{
                 .ty = splat_ty.ip_index,
@@ -3528,7 +3528,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
                 try self.truncateRegister(dst_ty, dst_mcv.register.to64());
             }
         } else if (dst_abi_size <= 16) {
-            const dst_info = dst_ty.intInfo(mod);
+            const dst_info = dst_ty.intInfo(zcu);
             const high_ty = try pt.intType(dst_info.signedness, dst_info.bits - 64);
             if (self.regExtraBits(high_ty) > 0) {
                 try self.truncateRegister(high_ty, dst_mcv.register_pair[1].to64());
@@ -3554,12 +3554,12 @@ fn airIntFromBool(self: *Self, inst: Air.Inst.Index) !void {
 }
 
 fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
-    const pt = self.pt;
+    const zcu = self.pt.zcu;
     const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
 
     const slice_ty = self.typeOfIndex(inst);
-    const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(slice_ty, pt));
+    const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(slice_ty, zcu));
 
     const ptr_ty = self.typeOf(bin_op.lhs);
     try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, .{ .air_ref = bin_op.lhs }, .{});
@@ -3567,7 +3567,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
     const len_ty = self.typeOf(bin_op.rhs);
     try self.genSetMem(
         .{ .frame = frame_index },
-        @intCast(ptr_ty.abiSize(pt)),
+        @intCast(ptr_ty.abiSize(zcu)),
         len_ty,
         .{ .air_ref = bin_op.rhs },
         .{},
@@ -3585,14 +3585,14 @@ fn airUnOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
 
 fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
     const dst_mcv = try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs);
 
     const dst_ty = self.typeOfIndex(inst);
-    if (dst_ty.isAbiInt(mod)) {
-        const abi_size: u32 = @intCast(dst_ty.abiSize(pt));
-        const bit_size: u32 = @intCast(dst_ty.bitSize(pt));
+    if (dst_ty.isAbiInt(zcu)) {
+        const abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
+        const bit_size: u32 = @intCast(dst_ty.bitSize(zcu));
         if (abi_size * 8 > bit_size) {
             const dst_lock = switch (dst_mcv) {
                 .register => |dst_reg| self.register_manager.lockRegAssumeUnused(dst_reg),
@@ -3607,7 +3607,7 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
                 const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
                 defer self.register_manager.unlockReg(tmp_lock);
 
-                const hi_ty = try pt.intType(.unsigned, @intCast((dst_ty.bitSize(pt) - 1) % 64 + 1));
+                const hi_ty = try pt.intType(.unsigned, @intCast((dst_ty.bitSize(zcu) - 1) % 64 + 1));
                 const hi_mcv = dst_mcv.address().offset(@intCast(bit_size / 64 * 8)).deref();
                 try self.genSetReg(tmp_reg, hi_ty, hi_mcv, .{});
                 try self.truncateRegister(dst_ty, tmp_reg);
@@ -3627,17 +3627,17 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void
 
 fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const air_tag = self.air.instructions.items(.tag);
     const air_data = self.air.instructions.items(.data);
 
     const dst_ty = self.typeOf(dst_air);
-    const dst_info = dst_ty.intInfo(mod);
+    const dst_info = dst_ty.intInfo(zcu);
     if (dst_air.toIndex()) |inst| {
         switch (air_tag[@intFromEnum(inst)]) {
             .intcast => {
                 const src_ty = self.typeOf(air_data[@intFromEnum(inst)].ty_op.operand);
-                const src_info = src_ty.intInfo(mod);
+                const src_info = src_ty.intInfo(zcu);
                 return @min(switch (src_info.signedness) {
                     .signed => switch (dst_info.signedness) {
                         .signed => src_info.bits,
@@ -3653,7 +3653,7 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 {
         }
     } else if (dst_air.toInterned()) |ip_index| {
         var space: Value.BigIntSpace = undefined;
-        const src_int = Value.fromInterned(ip_index).toBigInt(&space, pt);
+        const src_int = Value.fromInterned(ip_index).toBigInt(&space, zcu);
         return @as(u16, @intCast(src_int.bitCountTwosComp())) +
             @intFromBool(src_int.positive and dst_info.signedness == .signed);
     }
@@ -3662,18 +3662,18 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 {
 
 fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
     const result = result: {
         const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
         const dst_ty = self.typeOfIndex(inst);
-        switch (dst_ty.zigTypeTag(mod)) {
+        switch (dst_ty.zigTypeTag(zcu)) {
             .Float, .Vector => break :result try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs),
             else => {},
         }
-        const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+        const dst_abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
 
-        const dst_info = dst_ty.intInfo(mod);
+        const dst_info = dst_ty.intInfo(zcu);
         const src_ty = try pt.intType(dst_info.signedness, switch (tag) {
             else => unreachable,
             .mul, .mul_wrap => @max(
@@ -3683,20 +3683,20 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
             ),
             .div_trunc, .div_floor, .div_exact, .rem, .mod => dst_info.bits,
         });
-        const src_abi_size: u32 = @intCast(src_ty.abiSize(pt));
+        const src_abi_size: u32 = @intCast(src_ty.abiSize(zcu));
 
         if (dst_abi_size == 16 and src_abi_size == 16) switch (tag) {
             else => unreachable,
             .mul, .mul_wrap => {},
             .div_trunc, .div_floor, .div_exact, .rem, .mod => {
-                const signed = dst_ty.isSignedInt(mod);
+                const signed = dst_ty.isSignedInt(zcu);
                 var callee_buf: ["__udiv?i3".len]u8 = undefined;
                 const signed_div_floor_state: struct {
                     frame_index: FrameIndex,
                     state: State,
                     reloc: Mir.Inst.Index,
                 } = if (signed and tag == .div_floor) state: {
-                    const frame_index = try self.allocFrameIndex(FrameAlloc.initType(Type.usize, pt));
+                    const frame_index = try self.allocFrameIndex(FrameAlloc.initType(Type.usize, zcu));
                     try self.asmMemoryImmediate(
                         .{ ._, .mov },
                         .{ .base = .{ .frame = frame_index }, .mod = .{ .rm = .{ .size = .qword } } },
@@ -3771,7 +3771,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
                                 .rem, .mod => "mod",
                                 else => unreachable,
                             },
-                            intCompilerRtAbiName(@intCast(dst_ty.bitSize(pt))),
+                            intCompilerRtAbiName(@intCast(dst_ty.bitSize(zcu))),
                         }) catch unreachable,
                     } },
                     &.{ src_ty, src_ty },
@@ -3800,7 +3800,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
                                 .return_type = dst_ty.toIntern(),
                                 .param_types = &.{ src_ty.toIntern(), src_ty.toIntern() },
                                 .callee = std.fmt.bufPrint(&callee_buf, "__div{c}i3", .{
-                                    intCompilerRtAbiName(@intCast(dst_ty.bitSize(pt))),
+                                    intCompilerRtAbiName(@intCast(dst_ty.bitSize(zcu))),
                                 }) catch unreachable,
                             } },
                             &.{ src_ty, src_ty },
@@ -3892,10 +3892,10 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
     const ty = self.typeOf(bin_op.lhs);
-    if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(pt) > 8) return self.fail(
+    if (ty.zigTypeTag(zcu) == .Vector or ty.abiSize(zcu) > 8) return self.fail(
         "TODO implement airAddSat for {}",
         .{ty.fmt(pt)},
     );
@@ -3923,7 +3923,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
 
     const reg_bits = self.regBitSize(ty);
     const reg_extra_bits = self.regExtraBits(ty);
-    const cc: Condition = if (ty.isSignedInt(mod)) cc: {
+    const cc: Condition = if (ty.isSignedInt(zcu)) cc: {
         if (reg_extra_bits > 0) {
             try self.genShiftBinOpMir(
                 .{ ._l, .sa },
@@ -3962,7 +3962,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
         break :cc .o;
     } else cc: {
         try self.genSetReg(limit_reg, ty, .{
-            .immediate = @as(u64, math.maxInt(u64)) >> @intCast(64 - ty.bitSize(pt)),
+            .immediate = @as(u64, math.maxInt(u64)) >> @intCast(64 - ty.bitSize(zcu)),
         }, .{});
 
         try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, rhs_mcv);
@@ -3973,14 +3973,14 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
         break :cc .c;
     };
 
-    const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(pt))), 2);
+    const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(zcu))), 2);
     try self.asmCmovccRegisterRegister(
         cc,
         registerAlias(dst_reg, cmov_abi_size),
         registerAlias(limit_reg, cmov_abi_size),
     );
 
-    if (reg_extra_bits > 0 and ty.isSignedInt(mod)) try self.genShiftBinOpMir(
+    if (reg_extra_bits > 0 and ty.isSignedInt(zcu)) try self.genShiftBinOpMir(
         .{ ._r, .sa },
         ty,
         dst_mcv,
@@ -3993,10 +3993,10 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
     const ty = self.typeOf(bin_op.lhs);
-    if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(pt) > 8) return self.fail(
+    if (ty.zigTypeTag(zcu) == .Vector or ty.abiSize(zcu) > 8) return self.fail(
         "TODO implement airSubSat for {}",
         .{ty.fmt(pt)},
     );
@@ -4024,7 +4024,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
 
     const reg_bits = self.regBitSize(ty);
     const reg_extra_bits = self.regExtraBits(ty);
-    const cc: Condition = if (ty.isSignedInt(mod)) cc: {
+    const cc: Condition = if (ty.isSignedInt(zcu)) cc: {
         if (reg_extra_bits > 0) {
             try self.genShiftBinOpMir(
                 .{ ._l, .sa },
@@ -4067,14 +4067,14 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
         break :cc .c;
     };
 
-    const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(pt))), 2);
+    const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(zcu))), 2);
     try self.asmCmovccRegisterRegister(
         cc,
         registerAlias(dst_reg, cmov_abi_size),
         registerAlias(limit_reg, cmov_abi_size),
     );
 
-    if (reg_extra_bits > 0 and ty.isSignedInt(mod)) try self.genShiftBinOpMir(
+    if (reg_extra_bits > 0 and ty.isSignedInt(zcu)) try self.genShiftBinOpMir(
         .{ ._r, .sa },
         ty,
         dst_mcv,
@@ -4087,7 +4087,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
     const ty = self.typeOf(bin_op.lhs);
 
@@ -4170,7 +4170,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
             break :result dst_mcv;
         }
 
-        if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(pt) > 8) return self.fail(
+        if (ty.zigTypeTag(zcu) == .Vector or ty.abiSize(zcu) > 8) return self.fail(
             "TODO implement airMulSat for {}",
             .{ty.fmt(pt)},
         );
@@ -4199,7 +4199,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
         defer self.register_manager.unlockReg(limit_lock);
 
         const reg_bits = self.regBitSize(ty);
-        const cc: Condition = if (ty.isSignedInt(mod)) cc: {
+        const cc: Condition = if (ty.isSignedInt(zcu)) cc: {
             try self.genSetReg(limit_reg, ty, lhs_mcv, .{});
             try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, rhs_mcv);
             try self.genShiftBinOpMir(
@@ -4221,7 +4221,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
         };
 
         const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, ty, lhs_mcv, rhs_mcv);
-        const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(pt))), 2);
+        const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(zcu))), 2);
         try self.asmCmovccRegisterRegister(
             cc,
             registerAlias(dst_mcv.register, cmov_abi_size),
@@ -4234,13 +4234,13 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
     const result: MCValue = result: {
         const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
         const ty = self.typeOf(bin_op.lhs);
-        switch (ty.zigTypeTag(mod)) {
+        switch (ty.zigTypeTag(zcu)) {
             .Vector => return self.fail("TODO implement add/sub with overflow for Vector type", .{}),
             .Int => {
                 try self.spillEflagsIfOccupied();
@@ -4253,7 +4253,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                     .sub_with_overflow => .sub,
                     else => unreachable,
                 }, bin_op.lhs, bin_op.rhs);
-                const int_info = ty.intInfo(mod);
+                const int_info = ty.intInfo(zcu);
                 const cc: Condition = switch (int_info.signedness) {
                     .unsigned => .c,
                     .signed => .o,
@@ -4270,17 +4270,17 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                     }
 
                     const frame_index =
-                        try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt));
+                        try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, zcu));
                     try self.genSetMem(
                         .{ .frame = frame_index },
-                        @intCast(tuple_ty.structFieldOffset(1, pt)),
+                        @intCast(tuple_ty.structFieldOffset(1, zcu)),
                         Type.u1,
                         .{ .eflags = cc },
                         .{},
                     );
                     try self.genSetMem(
                         .{ .frame = frame_index },
-                        @intCast(tuple_ty.structFieldOffset(0, pt)),
+                        @intCast(tuple_ty.structFieldOffset(0, zcu)),
                         ty,
                         partial_mcv,
                         .{},
@@ -4289,7 +4289,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                 }
 
                 const frame_index =
-                    try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt));
+                    try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, zcu));
                 try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc);
                 break :result .{ .load_frame = .{ .index = frame_index } };
             },
@@ -4301,13 +4301,13 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
     const result: MCValue = result: {
         const lhs_ty = self.typeOf(bin_op.lhs);
         const rhs_ty = self.typeOf(bin_op.rhs);
-        switch (lhs_ty.zigTypeTag(mod)) {
+        switch (lhs_ty.zigTypeTag(zcu)) {
             .Vector => return self.fail("TODO implement shl with overflow for Vector type", .{}),
             .Int => {
                 try self.spillEflagsIfOccupied();
@@ -4318,7 +4318,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                 const lhs = try self.resolveInst(bin_op.lhs);
                 const rhs = try self.resolveInst(bin_op.rhs);
 
-                const int_info = lhs_ty.intInfo(mod);
+                const int_info = lhs_ty.intInfo(zcu);
 
                 const partial_mcv = try self.genShiftBinOp(.shl, null, lhs, rhs, lhs_ty, rhs_ty);
                 const partial_lock = switch (partial_mcv) {
@@ -4348,18 +4348,18 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                     }
 
                     const frame_index =
-                        try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt));
+                        try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, zcu));
                     try self.genSetMem(
                         .{ .frame = frame_index },
-                        @intCast(tuple_ty.structFieldOffset(1, pt)),
-                        tuple_ty.structFieldType(1, mod),
+                        @intCast(tuple_ty.structFieldOffset(1, zcu)),
+                        tuple_ty.structFieldType(1, zcu),
                         .{ .eflags = cc },
                         .{},
                     );
                     try self.genSetMem(
                         .{ .frame = frame_index },
-                        @intCast(tuple_ty.structFieldOffset(0, pt)),
-                        tuple_ty.structFieldType(0, mod),
+                        @intCast(tuple_ty.structFieldOffset(0, zcu)),
+                        tuple_ty.structFieldType(0, zcu),
                         partial_mcv,
                         .{},
                     );
@@ -4367,7 +4367,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                 }
 
                 const frame_index =
-                    try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt));
+                    try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, zcu));
                 try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc);
                 break :result .{ .load_frame = .{ .index = frame_index } };
             },
@@ -4385,15 +4385,15 @@ fn genSetFrameTruncatedOverflowCompare(
     overflow_cc: ?Condition,
 ) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const src_lock = switch (src_mcv) {
         .register => |reg| self.register_manager.lockReg(reg),
         else => null,
     };
     defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const ty = tuple_ty.structFieldType(0, mod);
-    const int_info = ty.intInfo(mod);
+    const ty = tuple_ty.structFieldType(0, zcu);
+    const int_info = ty.intInfo(zcu);
 
     const hi_bits = (int_info.bits - 1) % 64 + 1;
     const hi_ty = try pt.intType(int_info.signedness, hi_bits);
@@ -4432,7 +4432,7 @@ fn genSetFrameTruncatedOverflowCompare(
         );
     }
 
-    const payload_off: i32 = @intCast(tuple_ty.structFieldOffset(0, pt));
+    const payload_off: i32 = @intCast(tuple_ty.structFieldOffset(0, zcu));
     if (hi_limb_off > 0) try self.genSetMem(
         .{ .frame = frame_index },
         payload_off,
@@ -4449,8 +4449,8 @@ fn genSetFrameTruncatedOverflowCompare(
     );
     try self.genSetMem(
         .{ .frame = frame_index },
-        @intCast(tuple_ty.structFieldOffset(1, pt)),
-        tuple_ty.structFieldType(1, mod),
+        @intCast(tuple_ty.structFieldOffset(1, zcu)),
+        tuple_ty.structFieldType(1, zcu),
         if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne },
         .{},
     );
@@ -4458,18 +4458,18 @@ fn genSetFrameTruncatedOverflowCompare(
 
 fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
     const tuple_ty = self.typeOfIndex(inst);
     const dst_ty = self.typeOf(bin_op.lhs);
-    const result: MCValue = switch (dst_ty.zigTypeTag(mod)) {
+    const result: MCValue = switch (dst_ty.zigTypeTag(zcu)) {
         .Vector => return self.fail("TODO implement airMulWithOverflow for {}", .{dst_ty.fmt(pt)}),
         .Int => result: {
-            const dst_info = dst_ty.intInfo(mod);
+            const dst_info = dst_ty.intInfo(zcu);
             if (dst_info.bits > 128 and dst_info.signedness == .unsigned) {
                 const slow_inc = self.hasFeature(.slow_incdec);
-                const abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+                const abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
                 const limb_len = math.divCeil(u32, abi_size, 8) catch unreachable;
 
                 try self.spillRegisters(&.{ .rax, .rcx, .rdx });
@@ -4480,7 +4480,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                 try self.genInlineMemset(
                     dst_mcv.address(),
                     .{ .immediate = 0 },
-                    .{ .immediate = tuple_ty.abiSize(pt) },
+                    .{ .immediate = tuple_ty.abiSize(zcu) },
                     .{},
                 );
                 const lhs_mcv = try self.resolveInst(bin_op.lhs);
@@ -4520,7 +4520,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                         .index = temp_regs[3].to64(),
                         .scale = .@"8",
                         .disp = dst_mcv.load_frame.off +
-                            @as(i32, @intCast(tuple_ty.structFieldOffset(0, pt))),
+                            @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))),
                     } },
                 }, .rdx);
                 try self.asmSetccRegister(.c, .cl);
@@ -4544,7 +4544,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                         .index = temp_regs[3].to64(),
                         .scale = .@"8",
                         .disp = dst_mcv.load_frame.off +
-                            @as(i32, @intCast(tuple_ty.structFieldOffset(0, pt))),
+                            @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))),
                     } },
                 }, .rax);
                 try self.asmSetccRegister(.c, .ch);
@@ -4593,7 +4593,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                     .mod = .{ .rm = .{
                         .size = .byte,
                         .disp = dst_mcv.load_frame.off +
-                            @as(i32, @intCast(tuple_ty.structFieldOffset(1, pt))),
+                            @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))),
                     } },
                 }, Immediate.u(1));
                 self.performReloc(no_overflow);
@@ -4636,8 +4636,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                     const dst_mcv = try self.allocRegOrMem(inst, false);
                     try self.genSetMem(
                         .{ .frame = dst_mcv.load_frame.index },
-                        @intCast(tuple_ty.structFieldOffset(0, pt)),
-                        tuple_ty.structFieldType(0, mod),
+                        @intCast(tuple_ty.structFieldOffset(0, zcu)),
+                        tuple_ty.structFieldType(0, zcu),
                         result,
                         .{},
                     );
@@ -4648,8 +4648,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                     );
                     try self.genSetMem(
                         .{ .frame = dst_mcv.load_frame.index },
-                        @intCast(tuple_ty.structFieldOffset(1, pt)),
-                        tuple_ty.structFieldType(1, mod),
+                        @intCast(tuple_ty.structFieldOffset(1, zcu)),
+                        tuple_ty.structFieldType(1, zcu),
                         .{ .eflags = .ne },
                         .{},
                     );
@@ -4760,15 +4760,15 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                     const dst_mcv = try self.allocRegOrMem(inst, false);
                     try self.genSetMem(
                         .{ .frame = dst_mcv.load_frame.index },
-                        @intCast(tuple_ty.structFieldOffset(0, pt)),
-                        tuple_ty.structFieldType(0, mod),
+                        @intCast(tuple_ty.structFieldOffset(0, zcu)),
+                        tuple_ty.structFieldType(0, zcu),
                         .{ .register_pair = .{ .rax, .rdx } },
                         .{},
                     );
                     try self.genSetMem(
                         .{ .frame = dst_mcv.load_frame.index },
-                        @intCast(tuple_ty.structFieldOffset(1, pt)),
-                        tuple_ty.structFieldType(1, mod),
+                        @intCast(tuple_ty.structFieldOffset(1, zcu)),
+                        tuple_ty.structFieldType(1, zcu),
                         .{ .register = tmp_regs[1] },
                         .{},
                     );
@@ -4800,7 +4800,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                     self.eflags_inst = inst;
                     break :result .{ .register_overflow = .{ .reg = reg, .eflags = cc } };
                 } else {
-                    const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt));
+                    const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, zcu));
                     try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc);
                     break :result .{ .load_frame = .{ .index = frame_index } };
                 },
@@ -4811,19 +4811,19 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                             src_ty.fmt(pt), dst_ty.fmt(pt),
                         });
 
-                    const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt));
+                    const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, zcu));
                     if (dst_info.bits >= lhs_active_bits + rhs_active_bits) {
                         try self.genSetMem(
                             .{ .frame = frame_index },
-                            @intCast(tuple_ty.structFieldOffset(0, pt)),
-                            tuple_ty.structFieldType(0, mod),
+                            @intCast(tuple_ty.structFieldOffset(0, zcu)),
+                            tuple_ty.structFieldType(0, zcu),
                             partial_mcv,
                             .{},
                         );
                         try self.genSetMem(
                             .{ .frame = frame_index },
-                            @intCast(tuple_ty.structFieldOffset(1, pt)),
-                            tuple_ty.structFieldType(1, mod),
+                            @intCast(tuple_ty.structFieldOffset(1, zcu)),
+                            tuple_ty.structFieldType(1, zcu),
                             .{ .immediate = 0 }, // cc being set is impossible
                             .{},
                         );
@@ -4847,7 +4847,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
 /// Quotient is saved in .rax and remainder in .rdx.
 fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue, rhs: MCValue) !void {
     const pt = self.pt;
-    const abi_size: u32 = @intCast(ty.abiSize(pt));
+    const abi_size: u32 = @intCast(ty.abiSize(pt.zcu));
     const bit_size: u32 = @intCast(self.regBitSize(ty));
     if (abi_size > 8) {
         return self.fail("TODO implement genIntMulDivOpMir for ABI size larger than 8", .{});
@@ -4897,9 +4897,9 @@ fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue
 /// Clobbers .rax and .rdx registers.
 fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const abi_size: u32 = @intCast(ty.abiSize(pt));
-    const int_info = ty.intInfo(mod);
+    const zcu = pt.zcu;
+    const abi_size: u32 = @intCast(ty.abiSize(zcu));
+    const int_info = ty.intInfo(zcu);
     const dividend = switch (lhs) {
         .register => |reg| reg,
         else => try self.copyToTmpRegister(ty, lhs),
@@ -4950,7 +4950,7 @@ fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCVa
 
 fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
     const air_tags = self.air.instructions.items(.tag);
@@ -4958,7 +4958,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
     const lhs_ty = self.typeOf(bin_op.lhs);
     const rhs_ty = self.typeOf(bin_op.rhs);
     const result: MCValue = result: {
-        switch (lhs_ty.zigTypeTag(mod)) {
+        switch (lhs_ty.zigTypeTag(zcu)) {
             .Int => {
                 try self.spillRegisters(&.{.rcx});
                 try self.register_manager.getKnownReg(.rcx, null);
@@ -4977,7 +4977,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
                             const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
                             defer self.register_manager.unlockReg(tmp_lock);
 
-                            const lhs_bits: u31 = @intCast(lhs_ty.bitSize(pt));
+                            const lhs_bits: u31 = @intCast(lhs_ty.bitSize(zcu));
                             const tmp_ty = if (lhs_bits > 64) Type.usize else lhs_ty;
                             const off = frame_addr.off + (lhs_bits - 1) / 64 * 8;
                             try self.genSetReg(
@@ -5001,14 +5001,14 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
                 }
                 break :result dst_mcv;
             },
-            .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
-                .Int => if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.childType(mod).intInfo(mod).bits) {
+            .Vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
+                .Int => if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.childType(zcu).intInfo(zcu).bits) {
                     else => null,
-                    16 => switch (lhs_ty.vectorLen(mod)) {
+                    16 => switch (lhs_ty.vectorLen(zcu)) {
                         else => null,
                         1...8 => switch (tag) {
                             else => unreachable,
-                            .shr, .shr_exact => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                            .shr, .shr_exact => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                                 .signed => if (self.hasFeature(.avx))
                                     .{ .vp_w, .sra }
                                 else
@@ -5025,18 +5025,18 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
                         },
                         9...16 => switch (tag) {
                             else => unreachable,
-                            .shr, .shr_exact => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                            .shr, .shr_exact => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                                 .signed => if (self.hasFeature(.avx2)) .{ .vp_w, .sra } else null,
                                 .unsigned => if (self.hasFeature(.avx2)) .{ .vp_w, .srl } else null,
                             },
                             .shl, .shl_exact => if (self.hasFeature(.avx2)) .{ .vp_w, .sll } else null,
                         },
                     },
-                    32 => switch (lhs_ty.vectorLen(mod)) {
+                    32 => switch (lhs_ty.vectorLen(zcu)) {
                         else => null,
                         1...4 => switch (tag) {
                             else => unreachable,
-                            .shr, .shr_exact => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                            .shr, .shr_exact => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                                 .signed => if (self.hasFeature(.avx))
                                     .{ .vp_d, .sra }
                                 else
@@ -5053,18 +5053,18 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
                         },
                         5...8 => switch (tag) {
                             else => unreachable,
-                            .shr, .shr_exact => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                            .shr, .shr_exact => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                                 .signed => if (self.hasFeature(.avx2)) .{ .vp_d, .sra } else null,
                                 .unsigned => if (self.hasFeature(.avx2)) .{ .vp_d, .srl } else null,
                             },
                             .shl, .shl_exact => if (self.hasFeature(.avx2)) .{ .vp_d, .sll } else null,
                         },
                     },
-                    64 => switch (lhs_ty.vectorLen(mod)) {
+                    64 => switch (lhs_ty.vectorLen(zcu)) {
                         else => null,
                         1...2 => switch (tag) {
                             else => unreachable,
-                            .shr, .shr_exact => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                            .shr, .shr_exact => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                                 .signed => if (self.hasFeature(.avx))
                                     .{ .vp_q, .sra }
                                 else
@@ -5081,7 +5081,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
                         },
                         3...4 => switch (tag) {
                             else => unreachable,
-                            .shr, .shr_exact => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                            .shr, .shr_exact => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                                 .signed => if (self.hasFeature(.avx2)) .{ .vp_q, .sra } else null,
                                 .unsigned => if (self.hasFeature(.avx2)) .{ .vp_q, .srl } else null,
                             },
@@ -5089,10 +5089,10 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
                         },
                     },
                 })) |mir_tag| if (try self.air.value(bin_op.rhs, pt)) |rhs_val| {
-                    switch (mod.intern_pool.indexToKey(rhs_val.toIntern())) {
+                    switch (zcu.intern_pool.indexToKey(rhs_val.toIntern())) {
                         .aggregate => |rhs_aggregate| switch (rhs_aggregate.storage) {
                             .repeated_elem => |rhs_elem| {
-                                const abi_size: u32 = @intCast(lhs_ty.abiSize(pt));
+                                const abi_size: u32 = @intCast(lhs_ty.abiSize(zcu));
 
                                 const lhs_mcv = try self.resolveInst(bin_op.lhs);
                                 const dst_reg, const lhs_reg = if (lhs_mcv.isRegister() and
@@ -5112,7 +5112,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
                                     self.register_manager.unlockReg(lock);
 
                                 const shift_imm =
-                                    Immediate.u(@intCast(Value.fromInterned(rhs_elem).toUnsignedInt(pt)));
+                                    Immediate.u(@intCast(Value.fromInterned(rhs_elem).toUnsignedInt(zcu)));
                                 if (self.hasFeature(.avx)) try self.asmRegisterRegisterImmediate(
                                     mir_tag,
                                     registerAlias(dst_reg, abi_size),
@@ -5134,7 +5134,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
                     }
                 } else if (bin_op.rhs.toIndex()) |rhs_inst| switch (air_tags[@intFromEnum(rhs_inst)]) {
                     .splat => {
-                        const abi_size: u32 = @intCast(lhs_ty.abiSize(pt));
+                        const abi_size: u32 = @intCast(lhs_ty.abiSize(zcu));
 
                         const lhs_mcv = try self.resolveInst(bin_op.lhs);
                         const dst_reg, const lhs_reg = if (lhs_mcv.isRegister() and
@@ -5161,7 +5161,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
                         const mask_mcv = try self.genTypedValue(Value.fromInterned(try pt.intern(.{ .aggregate = .{
                             .ty = mask_ty.toIntern(),
                             .storage = .{ .elems = &([1]InternPool.Index{
-                                (try rhs_ty.childType(mod).maxIntScalar(pt, Type.u8)).toIntern(),
+                                (try rhs_ty.childType(zcu).maxIntScalar(pt, Type.u8)).toIntern(),
                             } ++ [1]InternPool.Index{
                                 (try pt.intValue(Type.u8, 0)).toIntern(),
                             } ** 15) },
@@ -5224,11 +5224,11 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void {
 }
 
 fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void {
-    const pt = self.pt;
+    const zcu = self.pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const result: MCValue = result: {
         const pl_ty = self.typeOfIndex(inst);
-        if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
+        if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
 
         const opt_mcv = try self.resolveInst(ty_op.operand);
         if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) {
@@ -5271,15 +5271,15 @@ fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const result = result: {
         const dst_ty = self.typeOfIndex(inst);
         const src_ty = self.typeOf(ty_op.operand);
-        const opt_ty = src_ty.childType(mod);
+        const opt_ty = src_ty.childType(zcu);
         const src_mcv = try self.resolveInst(ty_op.operand);
 
-        if (opt_ty.optionalReprIsPayload(mod)) {
+        if (opt_ty.optionalReprIsPayload(zcu)) {
             break :result if (self.liveness.isUnused(inst))
                 .unreach
             else if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
@@ -5296,8 +5296,8 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
         else
             try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
 
-        const pl_ty = dst_ty.childType(mod);
-        const pl_abi_size: i32 = @intCast(pl_ty.abiSize(pt));
+        const pl_ty = dst_ty.childType(zcu);
+        const pl_abi_size: i32 = @intCast(pl_ty.abiSize(zcu));
         try self.genSetMem(
             .{ .reg = dst_mcv.getReg().? },
             pl_abi_size,
@@ -5312,23 +5312,23 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const err_union_ty = self.typeOf(ty_op.operand);
-    const err_ty = err_union_ty.errorUnionSet(mod);
-    const payload_ty = err_union_ty.errorUnionPayload(mod);
+    const err_ty = err_union_ty.errorUnionSet(zcu);
+    const payload_ty = err_union_ty.errorUnionPayload(zcu);
     const operand = try self.resolveInst(ty_op.operand);
 
     const result: MCValue = result: {
-        if (err_ty.errorSetIsEmpty(mod)) {
+        if (err_ty.errorSetIsEmpty(zcu)) {
             break :result MCValue{ .immediate = 0 };
         }
 
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             break :result operand;
         }
 
-        const err_off = errUnionErrorOffset(payload_ty, pt);
+        const err_off = errUnionErrorOffset(payload_ty, zcu);
         switch (operand) {
             .register => |reg| {
                 // TODO reuse operand
@@ -5366,7 +5366,7 @@ fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
 // *(E!T) -> E
 fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const src_ty = self.typeOf(ty_op.operand);
@@ -5383,11 +5383,11 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void {
     const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
     defer self.register_manager.unlockReg(dst_lock);
 
-    const eu_ty = src_ty.childType(mod);
-    const pl_ty = eu_ty.errorUnionPayload(mod);
-    const err_ty = eu_ty.errorUnionSet(mod);
-    const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt));
-    const err_abi_size: u32 = @intCast(err_ty.abiSize(pt));
+    const eu_ty = src_ty.childType(zcu);
+    const pl_ty = eu_ty.errorUnionPayload(zcu);
+    const err_ty = eu_ty.errorUnionSet(zcu);
+    const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
+    const err_abi_size: u32 = @intCast(err_ty.abiSize(zcu));
     try self.asmRegisterMemory(
         .{ ._, .mov },
         registerAlias(dst_reg, err_abi_size),
@@ -5414,7 +5414,7 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const result: MCValue = result: {
         const src_ty = self.typeOf(ty_op.operand);
@@ -5426,11 +5426,11 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
         const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
         defer self.register_manager.unlockReg(src_lock);
 
-        const eu_ty = src_ty.childType(mod);
-        const pl_ty = eu_ty.errorUnionPayload(mod);
-        const err_ty = eu_ty.errorUnionSet(mod);
-        const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt));
-        const err_abi_size: u32 = @intCast(err_ty.abiSize(pt));
+        const eu_ty = src_ty.childType(zcu);
+        const pl_ty = eu_ty.errorUnionPayload(zcu);
+        const err_ty = eu_ty.errorUnionSet(zcu);
+        const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
+        const err_abi_size: u32 = @intCast(err_ty.abiSize(zcu));
         try self.asmMemoryImmediate(
             .{ ._, .mov },
             .{
@@ -5453,8 +5453,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
         const dst_lock = self.register_manager.lockReg(dst_reg);
         defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
 
-        const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, pt));
-        const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+        const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, zcu));
+        const dst_abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
         try self.asmRegisterMemory(
             .{ ._, .lea },
             registerAlias(dst_reg, dst_abi_size),
@@ -5475,13 +5475,13 @@ fn genUnwrapErrUnionPayloadMir(
     err_union: MCValue,
 ) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const payload_ty = err_union_ty.errorUnionPayload(mod);
+    const zcu = pt.zcu;
+    const payload_ty = err_union_ty.errorUnionPayload(zcu);
 
     const result: MCValue = result: {
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
 
-        const payload_off: u31 = @intCast(errUnionPayloadOffset(payload_ty, pt));
+        const payload_off: u31 = @intCast(errUnionPayloadOffset(payload_ty, zcu));
         switch (err_union) {
             .load_frame => |frame_addr| break :result .{ .load_frame = .{
                 .index = frame_addr.index,
@@ -5525,12 +5525,12 @@ fn genUnwrapErrUnionPayloadPtrMir(
     ptr_mcv: MCValue,
 ) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const err_union_ty = ptr_ty.childType(mod);
-    const payload_ty = err_union_ty.errorUnionPayload(mod);
+    const zcu = pt.zcu;
+    const err_union_ty = ptr_ty.childType(zcu);
+    const payload_ty = err_union_ty.errorUnionPayload(zcu);
 
     const result: MCValue = result: {
-        const payload_off = errUnionPayloadOffset(payload_ty, pt);
+        const payload_off = errUnionPayloadOffset(payload_ty, zcu);
         const result_mcv: MCValue = if (maybe_inst) |inst|
             try self.copyToRegisterWithInstTracking(inst, ptr_ty, ptr_mcv)
         else
@@ -5560,15 +5560,15 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const result: MCValue = result: {
         const pl_ty = self.typeOf(ty_op.operand);
-        if (!pl_ty.hasRuntimeBits(pt)) break :result .{ .immediate = 1 };
+        if (!pl_ty.hasRuntimeBits(zcu)) break :result .{ .immediate = 1 };
 
         const opt_ty = self.typeOfIndex(inst);
         const pl_mcv = try self.resolveInst(ty_op.operand);
-        const same_repr = opt_ty.optionalReprIsPayload(mod);
+        const same_repr = opt_ty.optionalReprIsPayload(zcu);
         if (same_repr and self.reuseOperand(inst, ty_op.operand, 0, pl_mcv)) break :result pl_mcv;
 
         const pl_lock: ?RegisterLock = switch (pl_mcv) {
@@ -5581,7 +5581,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
         try self.genCopy(pl_ty, opt_mcv, pl_mcv, .{});
 
         if (!same_repr) {
-            const pl_abi_size: i32 = @intCast(pl_ty.abiSize(pt));
+            const pl_abi_size: i32 = @intCast(pl_ty.abiSize(zcu));
             switch (opt_mcv) {
                 else => unreachable,
 
@@ -5615,20 +5615,20 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
 /// T to E!T
 fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const eu_ty = ty_op.ty.toType();
-    const pl_ty = eu_ty.errorUnionPayload(mod);
-    const err_ty = eu_ty.errorUnionSet(mod);
+    const pl_ty = eu_ty.errorUnionPayload(zcu);
+    const err_ty = eu_ty.errorUnionSet(zcu);
     const operand = try self.resolveInst(ty_op.operand);
 
     const result: MCValue = result: {
-        if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .{ .immediate = 0 };
+        if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .{ .immediate = 0 };
 
-        const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, pt));
-        const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, pt));
-        const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt));
+        const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, zcu));
+        const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, zcu));
+        const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
         try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand, .{});
         try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 }, .{});
         break :result .{ .load_frame = .{ .index = frame_index } };
@@ -5639,19 +5639,19 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
 /// E to E!T
 fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const eu_ty = ty_op.ty.toType();
-    const pl_ty = eu_ty.errorUnionPayload(mod);
-    const err_ty = eu_ty.errorUnionSet(mod);
+    const pl_ty = eu_ty.errorUnionPayload(zcu);
+    const err_ty = eu_ty.errorUnionSet(zcu);
 
     const result: MCValue = result: {
-        if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result try self.resolveInst(ty_op.operand);
+        if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result try self.resolveInst(ty_op.operand);
 
-        const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, pt));
-        const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, pt));
-        const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt));
+        const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, zcu));
+        const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, zcu));
+        const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
         try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef, .{});
         const operand = try self.resolveInst(ty_op.operand);
         try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand, .{});
@@ -5719,7 +5719,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
     const dst_lock = self.register_manager.lockReg(dst_reg);
     defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+    const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt.zcu));
     try self.asmRegisterMemory(
         .{ ._, .lea },
         registerAlias(dst_reg, dst_abi_size),
@@ -5767,7 +5767,7 @@ fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Regi
 
 fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const slice_ty = self.typeOf(lhs);
     const slice_mcv = try self.resolveInst(lhs);
     const slice_mcv_lock: ?RegisterLock = switch (slice_mcv) {
@@ -5776,9 +5776,9 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
     };
     defer if (slice_mcv_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const elem_ty = slice_ty.childType(mod);
-    const elem_size = elem_ty.abiSize(pt);
-    const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod);
+    const elem_ty = slice_ty.childType(zcu);
+    const elem_size = elem_ty.abiSize(zcu);
+    const slice_ptr_field_type = slice_ty.slicePtrFieldType(zcu);
 
     const index_ty = self.typeOf(rhs);
     const index_mcv = try self.resolveInst(rhs);
@@ -5804,15 +5804,15 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
 
 fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
     const result: MCValue = result: {
         const elem_ty = self.typeOfIndex(inst);
-        if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
+        if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
 
         const slice_ty = self.typeOf(bin_op.lhs);
-        const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod);
+        const slice_ptr_field_type = slice_ty.slicePtrFieldType(zcu);
         const elem_ptr = try self.genSliceElemPtr(bin_op.lhs, bin_op.rhs);
         const dst_mcv = try self.allocRegOrMem(inst, false);
         try self.load(dst_mcv, slice_ptr_field_type, elem_ptr);
@@ -5830,12 +5830,12 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
     const result: MCValue = result: {
         const array_ty = self.typeOf(bin_op.lhs);
-        const elem_ty = array_ty.childType(mod);
+        const elem_ty = array_ty.childType(zcu);
 
         const array_mcv = try self.resolveInst(bin_op.lhs);
         const array_lock: ?RegisterLock = switch (array_mcv) {
@@ -5853,7 +5853,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
         defer if (index_lock) |lock| self.register_manager.unlockReg(lock);
 
         try self.spillEflagsIfOccupied();
-        if (array_ty.isVector(mod) and elem_ty.bitSize(pt) == 1) {
+        if (array_ty.isVector(zcu) and elem_ty.bitSize(zcu) == 1) {
             const index_reg = switch (index_mcv) {
                 .register => |reg| reg,
                 else => try self.copyToTmpRegister(index_ty, index_mcv),
@@ -5866,7 +5866,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
                         index_reg.to64(),
                     ),
                     .sse => {
-                        const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, pt));
+                        const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, zcu));
                         try self.genSetMem(.{ .frame = frame_index }, 0, array_ty, array_mcv, .{});
                         try self.asmMemoryRegister(
                             .{ ._, .bt },
@@ -5904,14 +5904,14 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
             break :result .{ .register = dst_reg };
         }
 
-        const elem_abi_size = elem_ty.abiSize(pt);
+        const elem_abi_size = elem_ty.abiSize(zcu);
         const addr_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
         const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
         defer self.register_manager.unlockReg(addr_lock);
 
         switch (array_mcv) {
             .register => {
-                const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, pt));
+                const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, zcu));
                 try self.genSetMem(.{ .frame = frame_index }, 0, array_ty, array_mcv, .{});
                 try self.asmRegisterMemory(
                     .{ ._, .lea },
@@ -5960,7 +5960,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
     const ptr_ty = self.typeOf(bin_op.lhs);
 
@@ -5968,10 +5968,10 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
     // additional `mov` is needed at the end to get the actual value
 
     const result = result: {
-        const elem_ty = ptr_ty.elemType2(mod);
-        if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
+        const elem_ty = ptr_ty.elemType2(zcu);
+        if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
 
-        const elem_abi_size: u32 = @intCast(elem_ty.abiSize(pt));
+        const elem_abi_size: u32 = @intCast(elem_ty.abiSize(zcu));
         const index_ty = self.typeOf(bin_op.rhs);
         const index_mcv = try self.resolveInst(bin_op.rhs);
         const index_lock = switch (index_mcv) {
@@ -6011,7 +6011,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
 
@@ -6026,15 +6026,15 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
         };
         defer if (base_ptr_lock) |lock| self.register_manager.unlockReg(lock);
 
-        if (elem_ptr_ty.ptrInfo(mod).flags.vector_index != .none) {
+        if (elem_ptr_ty.ptrInfo(zcu).flags.vector_index != .none) {
             break :result if (self.reuseOperand(inst, extra.lhs, 0, base_ptr_mcv))
                 base_ptr_mcv
             else
                 try self.copyToRegisterWithInstTracking(inst, elem_ptr_ty, base_ptr_mcv);
         }
 
-        const elem_ty = base_ptr_ty.elemType2(mod);
-        const elem_abi_size = elem_ty.abiSize(pt);
+        const elem_ty = base_ptr_ty.elemType2(zcu);
+        const elem_abi_size = elem_ty.abiSize(zcu);
         const index_ty = self.typeOf(extra.rhs);
         const index_mcv = try self.resolveInst(extra.rhs);
         const index_lock: ?RegisterLock = switch (index_mcv) {
@@ -6057,12 +6057,12 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
     const ptr_union_ty = self.typeOf(bin_op.lhs);
-    const union_ty = ptr_union_ty.childType(mod);
+    const union_ty = ptr_union_ty.childType(zcu);
     const tag_ty = self.typeOf(bin_op.rhs);
-    const layout = union_ty.unionGetLayout(pt);
+    const layout = union_ty.unionGetLayout(zcu);
 
     if (layout.tag_size == 0) {
         return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none });
@@ -6101,12 +6101,12 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
 }
 
 fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
-    const pt = self.pt;
+    const zcu = self.pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const tag_ty = self.typeOfIndex(inst);
     const union_ty = self.typeOf(ty_op.operand);
-    const layout = union_ty.unionGetLayout(pt);
+    const layout = union_ty.unionGetLayout(zcu);
 
     if (layout.tag_size == 0) {
         return self.finishAir(inst, .none, .{ ty_op.operand, .none, .none });
@@ -6120,7 +6120,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
     };
     defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const tag_abi_size = tag_ty.abiSize(pt);
+    const tag_abi_size = tag_ty.abiSize(zcu);
     const dst_mcv: MCValue = blk: {
         switch (operand) {
             .load_frame => |frame_addr| {
@@ -6159,14 +6159,14 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airClz(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const result = result: {
         try self.spillEflagsIfOccupied();
 
         const dst_ty = self.typeOfIndex(inst);
         const src_ty = self.typeOf(ty_op.operand);
-        if (src_ty.zigTypeTag(mod) == .Vector) return self.fail("TODO implement airClz for {}", .{
+        if (src_ty.zigTypeTag(zcu) == .Vector) return self.fail("TODO implement airClz for {}", .{
             src_ty.fmt(pt),
         });
 
@@ -6186,8 +6186,8 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
         const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
         defer self.register_manager.unlockReg(dst_lock);
 
-        const abi_size: u31 = @intCast(src_ty.abiSize(pt));
-        const src_bits: u31 = @intCast(src_ty.bitSize(pt));
+        const abi_size: u31 = @intCast(src_ty.abiSize(zcu));
+        const src_bits: u31 = @intCast(src_ty.bitSize(zcu));
         const has_lzcnt = self.hasFeature(.lzcnt);
         if (src_bits > @as(u32, if (has_lzcnt) 128 else 64)) {
             const limbs_len = math.divCeil(u32, abi_size, 8) catch unreachable;
@@ -6297,7 +6297,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
         }
 
         assert(src_bits <= 64);
-        const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(pt))), 2);
+        const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(zcu))), 2);
         if (math.isPowerOfTwo(src_bits)) {
             const imm_reg = try self.copyToTmpRegister(dst_ty, .{
                 .immediate = src_bits ^ (src_bits - 1),
@@ -6356,14 +6356,14 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const result = result: {
         try self.spillEflagsIfOccupied();
 
         const dst_ty = self.typeOfIndex(inst);
         const src_ty = self.typeOf(ty_op.operand);
-        if (src_ty.zigTypeTag(mod) == .Vector) return self.fail("TODO implement airCtz for {}", .{
+        if (src_ty.zigTypeTag(zcu) == .Vector) return self.fail("TODO implement airCtz for {}", .{
             src_ty.fmt(pt),
         });
 
@@ -6383,8 +6383,8 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
         const dst_lock = self.register_manager.lockReg(dst_reg);
         defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
 
-        const abi_size: u31 = @intCast(src_ty.abiSize(pt));
-        const src_bits: u31 = @intCast(src_ty.bitSize(pt));
+        const abi_size: u31 = @intCast(src_ty.abiSize(zcu));
+        const src_bits: u31 = @intCast(src_ty.bitSize(zcu));
         const has_bmi = self.hasFeature(.bmi);
         if (src_bits > @as(u32, if (has_bmi) 128 else 64)) {
             const limbs_len = math.divCeil(u32, abi_size, 8) catch unreachable;
@@ -6505,7 +6505,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
             try self.genBinOpMir(.{ ._, .bsf }, wide_ty, dst_mcv, .{ .register = wide_reg });
         } else try self.genBinOpMir(.{ ._, .bsf }, src_ty, dst_mcv, mat_src_mcv);
 
-        const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(pt))), 2);
+        const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(zcu))), 2);
         try self.asmCmovccRegisterRegister(
             .z,
             registerAlias(dst_reg, cmov_abi_size),
@@ -6518,14 +6518,14 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airPopCount(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const result: MCValue = result: {
         try self.spillEflagsIfOccupied();
 
         const src_ty = self.typeOf(ty_op.operand);
-        const src_abi_size: u32 = @intCast(src_ty.abiSize(pt));
-        if (src_ty.zigTypeTag(mod) == .Vector or src_abi_size > 16)
+        const src_abi_size: u32 = @intCast(src_ty.abiSize(zcu));
+        if (src_ty.zigTypeTag(zcu) == .Vector or src_abi_size > 16)
             return self.fail("TODO implement airPopCount for {}", .{src_ty.fmt(pt)});
         const src_mcv = try self.resolveInst(ty_op.operand);
 
@@ -6562,7 +6562,7 @@ fn airPopCount(self: *Self, inst: Air.Inst.Index) !void {
             mat_src_mcv
         else
             .{ .register = mat_src_mcv.register_pair[0] }, false);
-        const src_info = src_ty.intInfo(mod);
+        const src_info = src_ty.intInfo(zcu);
         const hi_ty = try pt.intType(src_info.signedness, (src_info.bits - 1) % 64 + 1);
         try self.genPopCount(tmp_regs[1], hi_ty, if (mat_src_mcv.isMemory())
             mat_src_mcv.address().offset(8).deref()
@@ -6583,7 +6583,7 @@ fn genPopCount(
 ) !void {
     const pt = self.pt;
 
-    const src_abi_size: u32 = @intCast(src_ty.abiSize(pt));
+    const src_abi_size: u32 = @intCast(src_ty.abiSize(pt.zcu));
     if (self.hasFeature(.popcnt)) return self.genBinOpMir(
         .{ ._, .popcnt },
         if (src_abi_size > 1) src_ty else Type.u32,
@@ -6674,11 +6674,11 @@ fn genByteSwap(
     mem_ok: bool,
 ) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const has_movbe = self.hasFeature(.movbe);
 
-    if (src_ty.zigTypeTag(mod) == .Vector) return self.fail(
+    if (src_ty.zigTypeTag(zcu) == .Vector) return self.fail(
         "TODO implement genByteSwap for {}",
         .{src_ty.fmt(pt)},
     );
@@ -6689,7 +6689,7 @@ fn genByteSwap(
     };
     defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const abi_size: u32 = @intCast(src_ty.abiSize(pt));
+    const abi_size: u32 = @intCast(src_ty.abiSize(zcu));
     switch (abi_size) {
         0 => unreachable,
         1 => return if ((mem_ok or src_mcv.isRegister()) and
@@ -6838,35 +6838,35 @@ fn genByteSwap(
 
 fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const src_ty = self.typeOf(ty_op.operand);
-    const src_bits: u32 = @intCast(src_ty.bitSize(pt));
+    const src_bits: u32 = @intCast(src_ty.bitSize(zcu));
     const src_mcv = try self.resolveInst(ty_op.operand);
 
     const dst_mcv = try self.genByteSwap(inst, src_ty, src_mcv, true);
     try self.genShiftBinOpMir(
-        .{ ._r, switch (if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned) {
+        .{ ._r, switch (if (src_ty.isAbiInt(zcu)) src_ty.intInfo(zcu).signedness else .unsigned) {
             .signed => .sa,
             .unsigned => .sh,
         } },
         src_ty,
         dst_mcv,
         if (src_bits > 256) Type.u16 else Type.u8,
-        .{ .immediate = src_ty.abiSize(pt) * 8 - src_bits },
+        .{ .immediate = src_ty.abiSize(zcu) * 8 - src_bits },
     );
     return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
 }
 
 fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const src_ty = self.typeOf(ty_op.operand);
-    const abi_size: u32 = @intCast(src_ty.abiSize(pt));
-    const bit_size: u32 = @intCast(src_ty.bitSize(pt));
+    const abi_size: u32 = @intCast(src_ty.abiSize(zcu));
+    const bit_size: u32 = @intCast(src_ty.bitSize(zcu));
     const src_mcv = try self.resolveInst(ty_op.operand);
 
     const dst_mcv = try self.genByteSwap(inst, src_ty, src_mcv, false);
@@ -6973,7 +6973,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
 
     const extra_bits = abi_size * 8 - bit_size;
     const signedness: std.builtin.Signedness =
-        if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned;
+        if (src_ty.isAbiInt(zcu)) src_ty.intInfo(zcu).signedness else .unsigned;
     if (extra_bits > 0) try self.genShiftBinOpMir(switch (signedness) {
         .signed => .{ ._r, .sa },
         .unsigned => .{ ._r, .sh },
@@ -6984,13 +6984,13 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
 
 fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
 
     const result = result: {
-        const scalar_bits = ty.scalarType(mod).floatBits(self.target.*);
+        const scalar_bits = ty.scalarType(zcu).floatBits(self.target.*);
         if (scalar_bits == 80) {
-            if (ty.zigTypeTag(mod) != .Float) return self.fail("TODO implement floatSign for {}", .{
+            if (ty.zigTypeTag(zcu) != .Float) return self.fail("TODO implement floatSign for {}", .{
                 ty.fmt(pt),
             });
 
@@ -7011,7 +7011,7 @@ fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type)
             break :result dst_mcv;
         }
 
-        const abi_size: u32 = switch (ty.abiSize(pt)) {
+        const abi_size: u32 = switch (ty.abiSize(zcu)) {
             1...16 => 16,
             17...32 => 32,
             else => return self.fail("TODO implement floatSign for {}", .{
@@ -7161,23 +7161,23 @@ fn airRound(self: *Self, inst: Air.Inst.Index, mode: RoundMode) !void {
 
 fn getRoundTag(self: *Self, ty: Type) ?Mir.Inst.FixedTag {
     const pt = self.pt;
-    const mod = pt.zcu;
-    return if (self.hasFeature(.sse4_1)) switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    return if (self.hasFeature(.sse4_1)) switch (ty.zigTypeTag(zcu)) {
         .Float => switch (ty.floatBits(self.target.*)) {
             32 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round },
             64 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round },
             16, 80, 128 => null,
             else => unreachable,
         },
-        .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
-            .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
-                32 => switch (ty.vectorLen(mod)) {
+        .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+            .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+                32 => switch (ty.vectorLen(zcu)) {
                     1 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round },
                     2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else .{ ._ps, .round },
                     5...8 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else null,
                     else => null,
                 },
-                64 => switch (ty.vectorLen(mod)) {
+                64 => switch (ty.vectorLen(zcu)) {
                     1 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round },
                     2 => if (self.hasFeature(.avx)) .{ .v_pd, .round } else .{ ._pd, .round },
                     3...4 => if (self.hasFeature(.avx)) .{ .v_pd, .round } else null,
@@ -7194,10 +7194,10 @@ fn getRoundTag(self: *Self, ty: Type) ?Mir.Inst.FixedTag {
 
 fn genRoundLibcall(self: *Self, ty: Type, src_mcv: MCValue, mode: RoundMode) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     if (self.getRoundTag(ty)) |_| return .none;
 
-    if (ty.zigTypeTag(mod) != .Float)
+    if (ty.zigTypeTag(zcu) != .Float)
         return self.fail("TODO implement genRound for {}", .{ty.fmt(pt)});
 
     var callee_buf: ["__trunc?".len]u8 = undefined;
@@ -7223,7 +7223,7 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: Ro
         const result = try self.genRoundLibcall(ty, src_mcv, mode);
         return self.genSetReg(dst_reg, ty, result, .{});
     };
-    const abi_size: u32 = @intCast(ty.abiSize(pt));
+    const abi_size: u32 = @intCast(ty.abiSize(pt.zcu));
     const dst_alias = registerAlias(dst_reg, abi_size);
     switch (mir_tag[0]) {
         .v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate(
@@ -7261,14 +7261,14 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: Ro
 
 fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const ty = self.typeOf(ty_op.operand);
 
     const result: MCValue = result: {
-        const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) {
+        const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(zcu)) {
             else => null,
-            .Int => switch (ty.abiSize(pt)) {
+            .Int => switch (ty.abiSize(zcu)) {
                 0 => unreachable,
                 1...8 => {
                     try self.spillEflagsIfOccupied();
@@ -7277,7 +7277,7 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
 
                     try self.genUnOpMir(.{ ._, .neg }, ty, dst_mcv);
 
-                    const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(pt))), 2);
+                    const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(zcu))), 2);
                     switch (src_mcv) {
                         .register => |val_reg| try self.asmCmovccRegisterRegister(
                             .l,
@@ -7336,7 +7336,7 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
                     break :result dst_mcv;
                 },
                 else => {
-                    const abi_size: u31 = @intCast(ty.abiSize(pt));
+                    const abi_size: u31 = @intCast(ty.abiSize(zcu));
                     const limb_len = math.divCeil(u31, abi_size, 8) catch unreachable;
 
                     const tmp_regs =
@@ -7397,11 +7397,11 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
                 },
             },
             .Float => return self.floatSign(inst, ty_op.operand, ty),
-            .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
+            .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
                 else => null,
-                .Int => switch (ty.childType(mod).intInfo(mod).bits) {
+                .Int => switch (ty.childType(zcu).intInfo(zcu).bits) {
                     else => null,
-                    8 => switch (ty.vectorLen(mod)) {
+                    8 => switch (ty.vectorLen(zcu)) {
                         else => null,
                         1...16 => if (self.hasFeature(.avx))
                             .{ .vp_b, .abs }
@@ -7411,7 +7411,7 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
                             null,
                         17...32 => if (self.hasFeature(.avx2)) .{ .vp_b, .abs } else null,
                     },
-                    16 => switch (ty.vectorLen(mod)) {
+                    16 => switch (ty.vectorLen(zcu)) {
                         else => null,
                         1...8 => if (self.hasFeature(.avx))
                             .{ .vp_w, .abs }
@@ -7421,7 +7421,7 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
                             null,
                         9...16 => if (self.hasFeature(.avx2)) .{ .vp_w, .abs } else null,
                     },
-                    32 => switch (ty.vectorLen(mod)) {
+                    32 => switch (ty.vectorLen(zcu)) {
                         else => null,
                         1...4 => if (self.hasFeature(.avx))
                             .{ .vp_d, .abs }
@@ -7436,7 +7436,7 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
             },
         }) orelse return self.fail("TODO implement airAbs for {}", .{ty.fmt(pt)});
 
-        const abi_size: u32 = @intCast(ty.abiSize(pt));
+        const abi_size: u32 = @intCast(ty.abiSize(zcu));
         const src_mcv = try self.resolveInst(ty_op.operand);
         const dst_reg = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
             src_mcv.getReg().?
@@ -7462,13 +7462,13 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
     const ty = self.typeOf(un_op);
-    const abi_size: u32 = @intCast(ty.abiSize(pt));
+    const abi_size: u32 = @intCast(ty.abiSize(zcu));
 
     const result: MCValue = result: {
-        switch (ty.zigTypeTag(mod)) {
+        switch (ty.zigTypeTag(zcu)) {
             .Float => {
                 const float_bits = ty.floatBits(self.target.*);
                 if (switch (float_bits) {
@@ -7500,7 +7500,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
         const dst_lock = self.register_manager.lockReg(dst_reg);
         defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
 
-        const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) {
+        const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(zcu)) {
             .Float => switch (ty.floatBits(self.target.*)) {
                 16 => {
                     assert(self.hasFeature(.f16c));
@@ -7522,9 +7522,9 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
                 64 => if (self.hasFeature(.avx)) .{ .v_sd, .sqrt } else .{ ._sd, .sqrt },
                 else => unreachable,
             },
-            .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
-                .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
-                    16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen(mod)) {
+            .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+                .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+                    16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen(zcu)) {
                         1 => {
                             try self.asmRegisterRegister(
                                 .{ .v_ps, .cvtph2 },
@@ -7575,13 +7575,13 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
                         },
                         else => null,
                     } else null,
-                    32 => switch (ty.vectorLen(mod)) {
+                    32 => switch (ty.vectorLen(zcu)) {
                         1 => if (self.hasFeature(.avx)) .{ .v_ss, .sqrt } else .{ ._ss, .sqrt },
                         2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .sqrt } else .{ ._ps, .sqrt },
                         5...8 => if (self.hasFeature(.avx)) .{ .v_ps, .sqrt } else null,
                         else => null,
                     },
-                    64 => switch (ty.vectorLen(mod)) {
+                    64 => switch (ty.vectorLen(zcu)) {
                         1 => if (self.hasFeature(.avx)) .{ .v_sd, .sqrt } else .{ ._sd, .sqrt },
                         2 => if (self.hasFeature(.avx)) .{ .v_pd, .sqrt } else .{ ._pd, .sqrt },
                         3...4 => if (self.hasFeature(.avx)) .{ .v_pd, .sqrt } else null,
@@ -7708,14 +7708,14 @@ fn reuseOperandAdvanced(
 
 fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
-    const ptr_info = ptr_ty.ptrInfo(mod);
+    const ptr_info = ptr_ty.ptrInfo(zcu);
     const val_ty = Type.fromInterned(ptr_info.child);
-    if (!val_ty.hasRuntimeBitsIgnoreComptime(pt)) return;
-    const val_abi_size: u32 = @intCast(val_ty.abiSize(pt));
+    if (!val_ty.hasRuntimeBitsIgnoreComptime(zcu)) return;
+    const val_abi_size: u32 = @intCast(val_ty.abiSize(zcu));
 
-    const val_bit_size: u32 = @intCast(val_ty.bitSize(pt));
+    const val_bit_size: u32 = @intCast(val_ty.bitSize(zcu));
     const ptr_bit_off = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
         .none => 0,
         .runtime => unreachable,
@@ -7821,9 +7821,9 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn
 
 fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const dst_ty = ptr_ty.childType(mod);
-    if (!dst_ty.hasRuntimeBitsIgnoreComptime(pt)) return;
+    const zcu = pt.zcu;
+    const dst_ty = ptr_ty.childType(zcu);
+    if (!dst_ty.hasRuntimeBitsIgnoreComptime(zcu)) return;
     switch (ptr_mcv) {
         .none,
         .unreach,
@@ -7864,18 +7864,18 @@ fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerErro
 
 fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const elem_ty = self.typeOfIndex(inst);
     const result: MCValue = result: {
-        if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
+        if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
 
         try self.spillRegisters(&.{ .rdi, .rsi, .rcx });
         const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rdi, .rsi, .rcx });
         defer for (reg_locks) |lock| self.register_manager.unlockReg(lock);
 
         const ptr_ty = self.typeOf(ty_op.operand);
-        const elem_size = elem_ty.abiSize(pt);
+        const elem_size = elem_ty.abiSize(zcu);
 
         const elem_rc = self.regClassForType(elem_ty);
         const ptr_rc = self.regClassForType(ptr_ty);
@@ -7888,14 +7888,14 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
         else
             try self.allocRegOrMem(inst, true);
 
-        const ptr_info = ptr_ty.ptrInfo(mod);
+        const ptr_info = ptr_ty.ptrInfo(zcu);
         if (ptr_info.flags.vector_index != .none or ptr_info.packed_offset.host_size > 0) {
             try self.packedLoad(dst_mcv, ptr_ty, ptr_mcv);
         } else {
             try self.load(dst_mcv, ptr_ty, ptr_mcv);
         }
 
-        if (elem_ty.isAbiInt(mod) and elem_size * 8 > elem_ty.bitSize(pt)) {
+        if (elem_ty.isAbiInt(zcu) and elem_size * 8 > elem_ty.bitSize(zcu)) {
             const high_mcv: MCValue = switch (dst_mcv) {
                 .register => |dst_reg| .{ .register = dst_reg },
                 .register_pair => |dst_regs| .{ .register = dst_regs[1] },
@@ -7923,16 +7923,16 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
 
 fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const ptr_info = ptr_ty.ptrInfo(mod);
+    const zcu = pt.zcu;
+    const ptr_info = ptr_ty.ptrInfo(zcu);
     const src_ty = Type.fromInterned(ptr_info.child);
-    if (!src_ty.hasRuntimeBitsIgnoreComptime(pt)) return;
+    if (!src_ty.hasRuntimeBitsIgnoreComptime(zcu)) return;
 
     const limb_abi_size: u16 = @min(ptr_info.packed_offset.host_size, 8);
     const limb_abi_bits = limb_abi_size * 8;
     const limb_ty = try pt.intType(.unsigned, limb_abi_bits);
 
-    const src_bit_size = src_ty.bitSize(pt);
+    const src_bit_size = src_ty.bitSize(zcu);
     const ptr_bit_off = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
         .none => 0,
         .runtime => unreachable,
@@ -8029,9 +8029,9 @@ fn store(
     opts: CopyOptions,
 ) InnerError!void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const src_ty = ptr_ty.childType(mod);
-    if (!src_ty.hasRuntimeBitsIgnoreComptime(pt)) return;
+    const zcu = pt.zcu;
+    const src_ty = ptr_ty.childType(zcu);
+    if (!src_ty.hasRuntimeBitsIgnoreComptime(zcu)) return;
     switch (ptr_mcv) {
         .none,
         .unreach,
@@ -8072,7 +8072,7 @@ fn store(
 
 fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
     result: {
@@ -8086,7 +8086,7 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
         const ptr_mcv = try self.resolveInst(bin_op.lhs);
         const ptr_ty = self.typeOf(bin_op.lhs);
 
-        const ptr_info = ptr_ty.ptrInfo(mod);
+        const ptr_info = ptr_ty.ptrInfo(zcu);
         if (ptr_info.flags.vector_index != .none or ptr_info.packed_offset.host_size > 0) {
             try self.packedStore(ptr_ty, ptr_mcv, src_mcv);
         } else {
@@ -8111,16 +8111,16 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
 
 fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ptr_field_ty = self.typeOfIndex(inst);
     const ptr_container_ty = self.typeOf(operand);
-    const container_ty = ptr_container_ty.childType(mod);
+    const container_ty = ptr_container_ty.childType(zcu);
 
-    const field_off: i32 = switch (container_ty.containerLayout(mod)) {
-        .auto, .@"extern" => @intCast(container_ty.structFieldOffset(index, pt)),
-        .@"packed" => @divExact(@as(i32, ptr_container_ty.ptrInfo(mod).packed_offset.bit_offset) +
-            (if (mod.typeToStruct(container_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, index) else 0) -
-            ptr_field_ty.ptrInfo(mod).packed_offset.bit_offset, 8),
+    const field_off: i32 = switch (container_ty.containerLayout(zcu)) {
+        .auto, .@"extern" => @intCast(container_ty.structFieldOffset(index, zcu)),
+        .@"packed" => @divExact(@as(i32, ptr_container_ty.ptrInfo(zcu).packed_offset.bit_offset) +
+            (if (zcu.typeToStruct(container_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, index) else 0) -
+            ptr_field_ty.ptrInfo(zcu).packed_offset.bit_offset, 8),
     };
 
     const src_mcv = try self.resolveInst(operand);
@@ -8134,7 +8134,7 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32
 
 fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
     const result: MCValue = result: {
@@ -8143,15 +8143,15 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
 
         const container_ty = self.typeOf(operand);
         const container_rc = self.regClassForType(container_ty);
-        const field_ty = container_ty.structFieldType(index, mod);
-        if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
+        const field_ty = container_ty.structFieldType(index, zcu);
+        if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
         const field_rc = self.regClassForType(field_ty);
         const field_is_gp = field_rc.supersetOf(abi.RegisterClass.gp);
 
         const src_mcv = try self.resolveInst(operand);
-        const field_off: u32 = switch (container_ty.containerLayout(mod)) {
-            .auto, .@"extern" => @intCast(container_ty.structFieldOffset(extra.field_index, pt) * 8),
-            .@"packed" => if (mod.typeToStruct(container_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, extra.field_index) else 0,
+        const field_off: u32 = switch (container_ty.containerLayout(zcu)) {
+            .auto, .@"extern" => @intCast(container_ty.structFieldOffset(extra.field_index, zcu) * 8),
+            .@"packed" => if (zcu.typeToStruct(container_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, extra.field_index) else 0,
         };
 
         switch (src_mcv) {
@@ -8182,7 +8182,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
                     );
                 }
                 if (abi.RegisterClass.gp.isSet(RegisterManager.indexOfRegIntoTracked(dst_reg).?) and
-                    container_ty.abiSize(pt) * 8 > field_ty.bitSize(pt))
+                    container_ty.abiSize(zcu) * 8 > field_ty.bitSize(zcu))
                     try self.truncateRegister(field_ty, dst_reg);
 
                 break :result if (field_off == 0 or field_rc.supersetOf(abi.RegisterClass.gp))
@@ -8194,7 +8194,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
                 const src_regs_lock = self.register_manager.lockRegsAssumeUnused(2, src_regs);
                 defer for (src_regs_lock) |lock| self.register_manager.unlockReg(lock);
 
-                const field_bit_size: u32 = @intCast(field_ty.bitSize(pt));
+                const field_bit_size: u32 = @intCast(field_ty.bitSize(zcu));
                 const src_reg = if (field_off + field_bit_size <= 64)
                     src_regs[0]
                 else if (field_off >= 64)
@@ -8293,15 +8293,15 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
                 }
             },
             .load_frame => |frame_addr| {
-                const field_abi_size: u32 = @intCast(field_ty.abiSize(pt));
+                const field_abi_size: u32 = @intCast(field_ty.abiSize(zcu));
                 if (field_off % 8 == 0) {
                     const field_byte_off = @divExact(field_off, 8);
                     const off_mcv = src_mcv.address().offset(@intCast(field_byte_off)).deref();
-                    const field_bit_size = field_ty.bitSize(pt);
+                    const field_bit_size = field_ty.bitSize(zcu);
 
                     if (field_abi_size <= 8) {
                         const int_ty = try pt.intType(
-                            if (field_ty.isAbiInt(mod)) field_ty.intInfo(mod).signedness else .unsigned,
+                            if (field_ty.isAbiInt(zcu)) field_ty.intInfo(zcu).signedness else .unsigned,
                             @intCast(field_bit_size),
                         );
 
@@ -8321,7 +8321,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
                             try self.copyToRegisterWithInstTracking(inst, field_ty, dst_mcv);
                     }
 
-                    const container_abi_size: u32 = @intCast(container_ty.abiSize(pt));
+                    const container_abi_size: u32 = @intCast(container_ty.abiSize(zcu));
                     const dst_mcv = if (field_byte_off + field_abi_size <= container_abi_size and
                         self.reuseOperand(inst, operand, 0, src_mcv))
                         off_mcv
@@ -8423,17 +8423,17 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
 
     const inst_ty = self.typeOfIndex(inst);
-    const parent_ty = inst_ty.childType(mod);
-    const field_off: i32 = switch (parent_ty.containerLayout(mod)) {
-        .auto, .@"extern" => @intCast(parent_ty.structFieldOffset(extra.field_index, pt)),
-        .@"packed" => @divExact(@as(i32, inst_ty.ptrInfo(mod).packed_offset.bit_offset) +
-            (if (mod.typeToStruct(parent_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, extra.field_index) else 0) -
-            self.typeOf(extra.field_ptr).ptrInfo(mod).packed_offset.bit_offset, 8),
+    const parent_ty = inst_ty.childType(zcu);
+    const field_off: i32 = switch (parent_ty.containerLayout(zcu)) {
+        .auto, .@"extern" => @intCast(parent_ty.structFieldOffset(extra.field_index, zcu)),
+        .@"packed" => @divExact(@as(i32, inst_ty.ptrInfo(zcu).packed_offset.bit_offset) +
+            (if (zcu.typeToStruct(parent_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, extra.field_index) else 0) -
+            self.typeOf(extra.field_ptr).ptrInfo(zcu).packed_offset.bit_offset, 8),
     };
 
     const src_mcv = try self.resolveInst(extra.field_ptr);
@@ -8448,9 +8448,9 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
 
 fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const src_ty = self.typeOf(src_air);
-    if (src_ty.zigTypeTag(mod) == .Vector)
+    if (src_ty.zigTypeTag(zcu) == .Vector)
         return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(pt)});
 
     var src_mcv = try self.resolveInst(src_air);
@@ -8486,14 +8486,14 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air:
     };
     defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const abi_size: u16 = @intCast(src_ty.abiSize(pt));
+    const abi_size: u16 = @intCast(src_ty.abiSize(zcu));
     switch (tag) {
         .not => {
             const limb_abi_size: u16 = @min(abi_size, 8);
             const int_info = if (src_ty.ip_index == .bool_type)
                 std.builtin.Type.Int{ .signedness = .unsigned, .bits = 1 }
             else
-                src_ty.intInfo(mod);
+                src_ty.intInfo(zcu);
             var byte_off: i32 = 0;
             while (byte_off * 8 < int_info.bits) : (byte_off += limb_abi_size) {
                 const limb_bits: u16 = @intCast(@min(switch (int_info.signedness) {
@@ -8514,7 +8514,7 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air:
         },
         .neg => {
             try self.genUnOpMir(.{ ._, .neg }, src_ty, dst_mcv);
-            const bit_size = src_ty.intInfo(mod).bits;
+            const bit_size = src_ty.intInfo(zcu).bits;
             if (abi_size * 8 > bit_size) {
                 if (dst_mcv.isRegister()) {
                     try self.truncateRegister(src_ty, dst_mcv.getReg().?);
@@ -8537,7 +8537,7 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air:
 
 fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: MCValue) !void {
     const pt = self.pt;
-    const abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+    const abi_size: u32 = @intCast(dst_ty.abiSize(pt.zcu));
     if (abi_size > 8) return self.fail("TODO implement {} for {}", .{ mir_tag, dst_ty.fmt(pt) });
     switch (dst_mcv) {
         .none,
@@ -8586,8 +8586,9 @@ fn genShiftBinOpMir(
     rhs_mcv: MCValue,
 ) !void {
     const pt = self.pt;
-    const abi_size: u32 = @intCast(lhs_ty.abiSize(pt));
-    const shift_abi_size: u32 = @intCast(rhs_ty.abiSize(pt));
+    const zcu = pt.zcu;
+    const abi_size: u32 = @intCast(lhs_ty.abiSize(zcu));
+    const shift_abi_size: u32 = @intCast(rhs_ty.abiSize(zcu));
     try self.spillEflagsIfOccupied();
 
     if (abi_size > 16) {
@@ -9243,8 +9244,8 @@ fn genShiftBinOp(
     rhs_ty: Type,
 ) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    if (lhs_ty.zigTypeTag(mod) == .Vector) return self.fail("TODO implement genShiftBinOp for {}", .{
+    const zcu = pt.zcu;
+    if (lhs_ty.zigTypeTag(zcu) == .Vector) return self.fail("TODO implement genShiftBinOp for {}", .{
         lhs_ty.fmt(pt),
     });
 
@@ -9274,7 +9275,7 @@ fn genShiftBinOp(
         break :dst dst_mcv;
     };
 
-    const signedness = lhs_ty.intInfo(mod).signedness;
+    const signedness = lhs_ty.intInfo(zcu).signedness;
     try self.genShiftBinOpMir(switch (air_tag) {
         .shl, .shl_exact => switch (signedness) {
             .signed => .{ ._l, .sa },
@@ -9302,13 +9303,13 @@ fn genMulDivBinOp(
     rhs_mcv: MCValue,
 ) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    if (dst_ty.zigTypeTag(mod) == .Vector or dst_ty.zigTypeTag(mod) == .Float) return self.fail(
+    const zcu = pt.zcu;
+    if (dst_ty.zigTypeTag(zcu) == .Vector or dst_ty.zigTypeTag(zcu) == .Float) return self.fail(
         "TODO implement genMulDivBinOp for {s} from {} to {}",
         .{ @tagName(tag), src_ty.fmt(pt), dst_ty.fmt(pt) },
     );
-    const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt));
-    const src_abi_size: u32 = @intCast(src_ty.abiSize(pt));
+    const dst_abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
+    const src_abi_size: u32 = @intCast(src_ty.abiSize(zcu));
 
     assert(self.register_manager.isRegFree(.rax));
     assert(self.register_manager.isRegFree(.rcx));
@@ -9384,7 +9385,7 @@ fn genMulDivBinOp(
         .mul, .mul_wrap => dst_abi_size != src_abi_size and dst_abi_size != src_abi_size * 2,
         .div_trunc, .div_floor, .div_exact, .rem, .mod => dst_abi_size != src_abi_size,
     } or src_abi_size > 8) {
-        const src_info = src_ty.intInfo(mod);
+        const src_info = src_ty.intInfo(zcu);
         switch (tag) {
             .mul, .mul_wrap => {
                 const slow_inc = self.hasFeature(.slow_incdec);
@@ -9555,7 +9556,7 @@ fn genMulDivBinOp(
     const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx });
     defer for (reg_locks) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const signedness = ty.intInfo(mod).signedness;
+    const signedness = ty.intInfo(zcu).signedness;
     switch (tag) {
         .mul,
         .mul_wrap,
@@ -9714,10 +9715,10 @@ fn genBinOp(
     rhs_air: Air.Inst.Ref,
 ) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const lhs_ty = self.typeOf(lhs_air);
     const rhs_ty = self.typeOf(rhs_air);
-    const abi_size: u32 = @intCast(lhs_ty.abiSize(pt));
+    const abi_size: u32 = @intCast(lhs_ty.abiSize(zcu));
 
     if (lhs_ty.isRuntimeFloat()) libcall: {
         const float_bits = lhs_ty.floatBits(self.target.*);
@@ -9889,23 +9890,23 @@ fn genBinOp(
         };
     }
 
-    const sse_op = switch (lhs_ty.zigTypeTag(mod)) {
+    const sse_op = switch (lhs_ty.zigTypeTag(zcu)) {
         else => false,
         .Float => true,
-        .Vector => switch (lhs_ty.childType(mod).toIntern()) {
+        .Vector => switch (lhs_ty.childType(zcu).toIntern()) {
             .bool_type, .u1_type => false,
             else => true,
         },
     };
-    if (sse_op and ((lhs_ty.scalarType(mod).isRuntimeFloat() and
-        lhs_ty.scalarType(mod).floatBits(self.target.*) == 80) or
-        lhs_ty.abiSize(pt) > @as(u6, if (self.hasFeature(.avx)) 32 else 16)))
+    if (sse_op and ((lhs_ty.scalarType(zcu).isRuntimeFloat() and
+        lhs_ty.scalarType(zcu).floatBits(self.target.*) == 80) or
+        lhs_ty.abiSize(zcu) > @as(u6, if (self.hasFeature(.avx)) 32 else 16)))
         return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(air_tag), lhs_ty.fmt(pt) });
 
     const maybe_mask_reg = switch (air_tag) {
         else => null,
         .rem, .mod => unreachable,
-        .max, .min => if (lhs_ty.scalarType(mod).isRuntimeFloat()) registerAlias(
+        .max, .min => if (lhs_ty.scalarType(zcu).isRuntimeFloat()) registerAlias(
             if (!self.hasFeature(.avx) and self.hasFeature(.sse4_1)) mask: {
                 try self.register_manager.getKnownReg(.xmm0, null);
                 break :mask .xmm0;
@@ -9917,8 +9918,8 @@ fn genBinOp(
         if (maybe_mask_reg) |mask_reg| self.register_manager.lockRegAssumeUnused(mask_reg) else null;
     defer if (mask_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const ordered_air: [2]Air.Inst.Ref = if (lhs_ty.isVector(mod) and
-        switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
+    const ordered_air: [2]Air.Inst.Ref = if (lhs_ty.isVector(zcu) and
+        switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
         .Bool => false,
         .Int => switch (air_tag) {
             .cmp_lt, .cmp_gte => true,
@@ -9931,7 +9932,7 @@ fn genBinOp(
         else => unreachable,
     }) .{ rhs_air, lhs_air } else .{ lhs_air, rhs_air };
 
-    if (lhs_ty.isAbiInt(mod)) for (ordered_air) |op_air| {
+    if (lhs_ty.isAbiInt(zcu)) for (ordered_air) |op_air| {
         switch (try self.resolveInst(op_air)) {
             .register => |op_reg| switch (op_reg.class()) {
                 .sse => try self.register_manager.getReg(op_reg, null),
@@ -10056,7 +10057,7 @@ fn genBinOp(
                 const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
                 defer self.register_manager.unlockReg(tmp_lock);
 
-                const elem_size = lhs_ty.elemType2(mod).abiSize(pt);
+                const elem_size = lhs_ty.elemType2(zcu).abiSize(zcu);
                 try self.genIntMulComplexOpMir(rhs_ty, tmp_mcv, .{ .immediate = elem_size });
                 try self.genBinOpMir(
                     switch (air_tag) {
@@ -10112,7 +10113,7 @@ fn genBinOp(
                     const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
                     defer self.register_manager.unlockReg(tmp_lock);
 
-                    const signed = lhs_ty.isSignedInt(mod);
+                    const signed = lhs_ty.isSignedInt(zcu);
                     const cc: Condition = switch (air_tag) {
                         .min => if (signed) .nl else .nb,
                         .max => if (signed) .nge else .nae,
@@ -10188,7 +10189,7 @@ fn genBinOp(
 
                     try self.genBinOpMir(.{ ._, .cmp }, lhs_ty, dst_mcv, mat_src_mcv);
 
-                    const int_info = lhs_ty.intInfo(mod);
+                    const int_info = lhs_ty.intInfo(zcu);
                     const cc: Condition = switch (int_info.signedness) {
                         .unsigned => switch (air_tag) {
                             .min => .a,
@@ -10202,7 +10203,7 @@ fn genBinOp(
                         },
                     };
 
-                    const cmov_abi_size = @max(@as(u32, @intCast(lhs_ty.abiSize(pt))), 2);
+                    const cmov_abi_size = @max(@as(u32, @intCast(lhs_ty.abiSize(zcu))), 2);
                     const tmp_reg = switch (dst_mcv) {
                         .register => |reg| reg,
                         else => try self.copyToTmpRegister(lhs_ty, dst_mcv),
@@ -10271,7 +10272,7 @@ fn genBinOp(
             },
 
             .cmp_eq, .cmp_neq => {
-                assert(lhs_ty.isVector(mod) and lhs_ty.childType(mod).toIntern() == .bool_type);
+                assert(lhs_ty.isVector(zcu) and lhs_ty.childType(zcu).toIntern() == .bool_type);
                 try self.genBinOpMir(.{ ._, .xor }, lhs_ty, dst_mcv, src_mcv);
                 switch (air_tag) {
                     .cmp_eq => try self.genUnOpMir(.{ ._, .not }, lhs_ty, dst_mcv),
@@ -10288,7 +10289,7 @@ fn genBinOp(
     }
 
     const dst_reg = registerAlias(dst_mcv.getReg().?, abi_size);
-    const mir_tag = @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) {
+    const mir_tag = @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
         else => unreachable,
         .Float => switch (lhs_ty.floatBits(self.target.*)) {
             16 => {
@@ -10383,10 +10384,10 @@ fn genBinOp(
             80, 128 => null,
             else => unreachable,
         },
-        .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
+        .Vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
             else => null,
-            .Int => switch (lhs_ty.childType(mod).intInfo(mod).bits) {
-                8 => switch (lhs_ty.vectorLen(mod)) {
+            .Int => switch (lhs_ty.childType(zcu).intInfo(zcu).bits) {
+                8 => switch (lhs_ty.vectorLen(zcu)) {
                     1...16 => switch (air_tag) {
                         .add,
                         .add_wrap,
@@ -10400,7 +10401,7 @@ fn genBinOp(
                             .{ .p_, .@"and" },
                         .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
                         .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
-                        .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                        .min => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                             .signed => if (self.hasFeature(.avx))
                                 .{ .vp_b, .mins }
                             else if (self.hasFeature(.sse4_1))
@@ -10414,7 +10415,7 @@ fn genBinOp(
                             else
                                 null,
                         },
-                        .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                        .max => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                             .signed => if (self.hasFeature(.avx))
                                 .{ .vp_b, .maxs }
                             else if (self.hasFeature(.sse4_1))
@@ -10432,7 +10433,7 @@ fn genBinOp(
                         .cmp_lte,
                         .cmp_gte,
                         .cmp_gt,
-                        => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                        => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                             .signed => if (self.hasFeature(.avx))
                                 .{ .vp_b, .cmpgt }
                             else
@@ -10454,11 +10455,11 @@ fn genBinOp(
                         .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
                         .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
                         .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null,
-                        .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                        .min => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                             .signed => if (self.hasFeature(.avx2)) .{ .vp_b, .mins } else null,
                             .unsigned => if (self.hasFeature(.avx)) .{ .vp_b, .minu } else null,
                         },
-                        .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                        .max => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                             .signed => if (self.hasFeature(.avx2)) .{ .vp_b, .maxs } else null,
                             .unsigned => if (self.hasFeature(.avx2)) .{ .vp_b, .maxu } else null,
                         },
@@ -10466,7 +10467,7 @@ fn genBinOp(
                         .cmp_lte,
                         .cmp_gte,
                         .cmp_gt,
-                        => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                        => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                             .signed => if (self.hasFeature(.avx)) .{ .vp_b, .cmpgt } else null,
                             .unsigned => null,
                         },
@@ -10477,7 +10478,7 @@ fn genBinOp(
                     },
                     else => null,
                 },
-                16 => switch (lhs_ty.vectorLen(mod)) {
+                16 => switch (lhs_ty.vectorLen(zcu)) {
                     1...8 => switch (air_tag) {
                         .add,
                         .add_wrap,
@@ -10494,7 +10495,7 @@ fn genBinOp(
                             .{ .p_, .@"and" },
                         .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
                         .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
-                        .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                        .min => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                             .signed => if (self.hasFeature(.avx))
                                 .{ .vp_w, .mins }
                             else
@@ -10504,7 +10505,7 @@ fn genBinOp(
                             else
                                 .{ .p_w, .minu },
                         },
-                        .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                        .max => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                             .signed => if (self.hasFeature(.avx))
                                 .{ .vp_w, .maxs }
                             else
@@ -10518,7 +10519,7 @@ fn genBinOp(
                         .cmp_lte,
                         .cmp_gte,
                         .cmp_gt,
-                        => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                        => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                             .signed => if (self.hasFeature(.avx))
                                 .{ .vp_w, .cmpgt }
                             else
@@ -10543,11 +10544,11 @@ fn genBinOp(
                         .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
                         .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
                         .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null,
-                        .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                        .min => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                             .signed => if (self.hasFeature(.avx2)) .{ .vp_w, .mins } else null,
                             .unsigned => if (self.hasFeature(.avx)) .{ .vp_w, .minu } else null,
                         },
-                        .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                        .max => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                             .signed => if (self.hasFeature(.avx2)) .{ .vp_w, .maxs } else null,
                             .unsigned => if (self.hasFeature(.avx2)) .{ .vp_w, .maxu } else null,
                         },
@@ -10555,7 +10556,7 @@ fn genBinOp(
                         .cmp_lte,
                         .cmp_gte,
                         .cmp_gt,
-                        => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                        => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                             .signed => if (self.hasFeature(.avx)) .{ .vp_w, .cmpgt } else null,
                             .unsigned => null,
                         },
@@ -10566,7 +10567,7 @@ fn genBinOp(
                     },
                     else => null,
                 },
-                32 => switch (lhs_ty.vectorLen(mod)) {
+                32 => switch (lhs_ty.vectorLen(zcu)) {
                     1...4 => switch (air_tag) {
                         .add,
                         .add_wrap,
@@ -10588,7 +10589,7 @@ fn genBinOp(
                             .{ .p_, .@"and" },
                         .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
                         .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
-                        .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                        .min => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                             .signed => if (self.hasFeature(.avx))
                                 .{ .vp_d, .mins }
                             else if (self.hasFeature(.sse4_1))
@@ -10602,7 +10603,7 @@ fn genBinOp(
                             else
                                 null,
                         },
-                        .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                        .max => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                             .signed => if (self.hasFeature(.avx))
                                 .{ .vp_d, .maxs }
                             else if (self.hasFeature(.sse4_1))
@@ -10620,7 +10621,7 @@ fn genBinOp(
                         .cmp_lte,
                         .cmp_gte,
                         .cmp_gt,
-                        => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                        => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                             .signed => if (self.hasFeature(.avx))
                                 .{ .vp_d, .cmpgt }
                             else
@@ -10645,11 +10646,11 @@ fn genBinOp(
                         .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
                         .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
                         .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null,
-                        .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                        .min => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                             .signed => if (self.hasFeature(.avx2)) .{ .vp_d, .mins } else null,
                             .unsigned => if (self.hasFeature(.avx)) .{ .vp_d, .minu } else null,
                         },
-                        .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                        .max => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                             .signed => if (self.hasFeature(.avx2)) .{ .vp_d, .maxs } else null,
                             .unsigned => if (self.hasFeature(.avx2)) .{ .vp_d, .maxu } else null,
                         },
@@ -10657,7 +10658,7 @@ fn genBinOp(
                         .cmp_lte,
                         .cmp_gte,
                         .cmp_gt,
-                        => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                        => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                             .signed => if (self.hasFeature(.avx)) .{ .vp_d, .cmpgt } else null,
                             .unsigned => null,
                         },
@@ -10668,7 +10669,7 @@ fn genBinOp(
                     },
                     else => null,
                 },
-                64 => switch (lhs_ty.vectorLen(mod)) {
+                64 => switch (lhs_ty.vectorLen(zcu)) {
                     1...2 => switch (air_tag) {
                         .add,
                         .add_wrap,
@@ -10686,7 +10687,7 @@ fn genBinOp(
                         .cmp_lte,
                         .cmp_gte,
                         .cmp_gt,
-                        => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                        => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                             .signed => if (self.hasFeature(.avx))
                                 .{ .vp_q, .cmpgt }
                             else if (self.hasFeature(.sse4_2))
@@ -10722,7 +10723,7 @@ fn genBinOp(
                         .cmp_lte,
                         .cmp_gt,
                         .cmp_gte,
-                        => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+                        => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
                             .signed => if (self.hasFeature(.avx)) .{ .vp_d, .cmpgt } else null,
                             .unsigned => null,
                         },
@@ -10732,10 +10733,10 @@ fn genBinOp(
                 },
                 else => null,
             },
-            .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) {
+            .Float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
                 16 => tag: {
                     assert(self.hasFeature(.f16c));
-                    switch (lhs_ty.vectorLen(mod)) {
+                    switch (lhs_ty.vectorLen(zcu)) {
                         1 => {
                             const tmp_reg = (try self.register_manager.allocReg(
                                 null,
@@ -10923,7 +10924,7 @@ fn genBinOp(
                         else => break :tag null,
                     }
                 },
-                32 => switch (lhs_ty.vectorLen(mod)) {
+                32 => switch (lhs_ty.vectorLen(zcu)) {
                     1 => switch (air_tag) {
                         .add => if (self.hasFeature(.avx)) .{ .v_ss, .add } else .{ ._ss, .add },
                         .sub => if (self.hasFeature(.avx)) .{ .v_ss, .sub } else .{ ._ss, .sub },
@@ -10976,7 +10977,7 @@ fn genBinOp(
                     } else null,
                     else => null,
                 },
-                64 => switch (lhs_ty.vectorLen(mod)) {
+                64 => switch (lhs_ty.vectorLen(zcu)) {
                     1 => switch (air_tag) {
                         .add => if (self.hasFeature(.avx)) .{ .v_sd, .add } else .{ ._sd, .add },
                         .sub => if (self.hasFeature(.avx)) .{ .v_sd, .sub } else .{ ._sd, .sub },
@@ -11052,7 +11053,7 @@ fn genBinOp(
                 mir_tag,
                 dst_reg,
                 lhs_reg,
-                try src_mcv.mem(self, switch (lhs_ty.zigTypeTag(mod)) {
+                try src_mcv.mem(self, switch (lhs_ty.zigTypeTag(zcu)) {
                     else => Memory.Size.fromSize(abi_size),
                     .Vector => Memory.Size.fromBitSize(dst_reg.bitSize()),
                 }),
@@ -11070,7 +11071,7 @@ fn genBinOp(
             if (src_mcv.isMemory()) try self.asmRegisterMemory(
                 mir_tag,
                 dst_reg,
-                try src_mcv.mem(self, switch (lhs_ty.zigTypeTag(mod)) {
+                try src_mcv.mem(self, switch (lhs_ty.zigTypeTag(zcu)) {
                     else => Memory.Size.fromSize(abi_size),
                     .Vector => Memory.Size.fromBitSize(dst_reg.bitSize()),
                 }),
@@ -11098,7 +11099,7 @@ fn genBinOp(
                     mir_tag,
                     dst_reg,
                     lhs_reg,
-                    try src_mcv.mem(self, switch (lhs_ty.zigTypeTag(mod)) {
+                    try src_mcv.mem(self, switch (lhs_ty.zigTypeTag(zcu)) {
                         else => Memory.Size.fromSize(abi_size),
                         .Vector => Memory.Size.fromBitSize(dst_reg.bitSize()),
                     }),
@@ -11118,7 +11119,7 @@ fn genBinOp(
                 if (src_mcv.isMemory()) try self.asmRegisterMemoryImmediate(
                     mir_tag,
                     dst_reg,
-                    try src_mcv.mem(self, switch (lhs_ty.zigTypeTag(mod)) {
+                    try src_mcv.mem(self, switch (lhs_ty.zigTypeTag(zcu)) {
                         else => Memory.Size.fromSize(abi_size),
                         .Vector => Memory.Size.fromBitSize(dst_reg.bitSize()),
                     }),
@@ -11151,21 +11152,21 @@ fn genBinOp(
             const rhs_copy_reg = registerAlias(src_mcv.getReg().?, abi_size);
 
             try self.asmRegisterRegisterRegisterImmediate(
-                @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) {
+                @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
                     .Float => switch (lhs_ty.floatBits(self.target.*)) {
                         32 => .{ .v_ss, .cmp },
                         64 => .{ .v_sd, .cmp },
                         16, 80, 128 => null,
                         else => unreachable,
                     },
-                    .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
-                        .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) {
-                            32 => switch (lhs_ty.vectorLen(mod)) {
+                    .Vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
+                        .Float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+                            32 => switch (lhs_ty.vectorLen(zcu)) {
                                 1 => .{ .v_ss, .cmp },
                                 2...8 => .{ .v_ps, .cmp },
                                 else => null,
                             },
-                            64 => switch (lhs_ty.vectorLen(mod)) {
+                            64 => switch (lhs_ty.vectorLen(zcu)) {
                                 1 => .{ .v_sd, .cmp },
                                 2...4 => .{ .v_pd, .cmp },
                                 else => null,
@@ -11185,20 +11186,20 @@ fn genBinOp(
                 Immediate.u(3), // unord
             );
             try self.asmRegisterRegisterRegisterRegister(
-                @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) {
+                @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
                     .Float => switch (lhs_ty.floatBits(self.target.*)) {
                         32 => .{ .v_ps, .blendv },
                         64 => .{ .v_pd, .blendv },
                         16, 80, 128 => null,
                         else => unreachable,
                     },
-                    .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
-                        .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) {
-                            32 => switch (lhs_ty.vectorLen(mod)) {
+                    .Vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
+                        .Float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+                            32 => switch (lhs_ty.vectorLen(zcu)) {
                                 1...8 => .{ .v_ps, .blendv },
                                 else => null,
                             },
-                            64 => switch (lhs_ty.vectorLen(mod)) {
+                            64 => switch (lhs_ty.vectorLen(zcu)) {
                                 1...4 => .{ .v_pd, .blendv },
                                 else => null,
                             },
@@ -11219,21 +11220,21 @@ fn genBinOp(
         } else {
             const has_blend = self.hasFeature(.sse4_1);
             try self.asmRegisterRegisterImmediate(
-                @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) {
+                @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
                     .Float => switch (lhs_ty.floatBits(self.target.*)) {
                         32 => .{ ._ss, .cmp },
                         64 => .{ ._sd, .cmp },
                         16, 80, 128 => null,
                         else => unreachable,
                     },
-                    .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
-                        .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) {
-                            32 => switch (lhs_ty.vectorLen(mod)) {
+                    .Vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
+                        .Float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+                            32 => switch (lhs_ty.vectorLen(zcu)) {
                                 1 => .{ ._ss, .cmp },
                                 2...4 => .{ ._ps, .cmp },
                                 else => null,
                             },
-                            64 => switch (lhs_ty.vectorLen(mod)) {
+                            64 => switch (lhs_ty.vectorLen(zcu)) {
                                 1 => .{ ._sd, .cmp },
                                 2 => .{ ._pd, .cmp },
                                 else => null,
@@ -11252,20 +11253,20 @@ fn genBinOp(
                 Immediate.u(if (has_blend) 3 else 7), // unord, ord
             );
             if (has_blend) try self.asmRegisterRegisterRegister(
-                @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) {
+                @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
                     .Float => switch (lhs_ty.floatBits(self.target.*)) {
                         32 => .{ ._ps, .blendv },
                         64 => .{ ._pd, .blendv },
                         16, 80, 128 => null,
                         else => unreachable,
                     },
-                    .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
-                        .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) {
-                            32 => switch (lhs_ty.vectorLen(mod)) {
+                    .Vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
+                        .Float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+                            32 => switch (lhs_ty.vectorLen(zcu)) {
                                 1...4 => .{ ._ps, .blendv },
                                 else => null,
                             },
-                            64 => switch (lhs_ty.vectorLen(mod)) {
+                            64 => switch (lhs_ty.vectorLen(zcu)) {
                                 1...2 => .{ ._pd, .blendv },
                                 else => null,
                             },
@@ -11282,20 +11283,20 @@ fn genBinOp(
                 lhs_copy_reg.?,
                 mask_reg,
             ) else {
-                const mir_fixes = @as(?Mir.Inst.Fixes, switch (lhs_ty.zigTypeTag(mod)) {
+                const mir_fixes = @as(?Mir.Inst.Fixes, switch (lhs_ty.zigTypeTag(zcu)) {
                     .Float => switch (lhs_ty.floatBits(self.target.*)) {
                         32 => ._ps,
                         64 => ._pd,
                         16, 80, 128 => null,
                         else => unreachable,
                     },
-                    .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
-                        .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) {
-                            32 => switch (lhs_ty.vectorLen(mod)) {
+                    .Vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
+                        .Float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+                            32 => switch (lhs_ty.vectorLen(zcu)) {
                                 1...4 => ._ps,
                                 else => null,
                             },
-                            64 => switch (lhs_ty.vectorLen(mod)) {
+                            64 => switch (lhs_ty.vectorLen(zcu)) {
                                 1...2 => ._pd,
                                 else => null,
                             },
@@ -11314,7 +11315,7 @@ fn genBinOp(
             }
         },
         .cmp_lt, .cmp_lte, .cmp_eq, .cmp_gte, .cmp_gt, .cmp_neq => {
-            switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
+            switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
                 .Int => switch (air_tag) {
                     .cmp_lt,
                     .cmp_eq,
@@ -11395,8 +11396,8 @@ fn genBinOpMir(
     src_mcv: MCValue,
 ) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const abi_size: u32 = @intCast(ty.abiSize(pt));
+    const zcu = pt.zcu;
+    const abi_size: u32 = @intCast(ty.abiSize(zcu));
     try self.spillEflagsIfOccupied();
     switch (dst_mcv) {
         .none,
@@ -11643,7 +11644,7 @@ fn genBinOpMir(
             defer if (src_info) |info| self.register_manager.unlockReg(info.addr_lock);
 
             const ty_signedness =
-                if (ty.isAbiInt(mod)) ty.intInfo(mod).signedness else .unsigned;
+                if (ty.isAbiInt(zcu)) ty.intInfo(zcu).signedness else .unsigned;
             const limb_ty = if (abi_size <= 8) ty else switch (ty_signedness) {
                 .signed => Type.usize,
                 .unsigned => Type.isize,
@@ -11820,7 +11821,7 @@ fn genBinOpMir(
 /// Does not support byte-size operands.
 fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError!void {
     const pt = self.pt;
-    const abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+    const abi_size: u32 = @intCast(dst_ty.abiSize(pt.zcu));
     try self.spillEflagsIfOccupied();
     switch (dst_mcv) {
         .none,
@@ -12009,7 +12010,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
                 try self.genInlineMemset(
                     dst_mcv.address().offset(@intFromBool(regs_frame_addr.regs > 0)),
                     .{ .immediate = 0 },
-                    .{ .immediate = arg_ty.abiSize(pt) - @intFromBool(regs_frame_addr.regs > 0) },
+                    .{ .immediate = arg_ty.abiSize(zcu) - @intFromBool(regs_frame_addr.regs > 0) },
                     .{},
                 );
 
@@ -12296,7 +12297,7 @@ fn genCall(self: *Self, info: union(enum) {
                 try reg_locks.appendSlice(&self.register_manager.lockRegs(2, regs));
             },
             .indirect => |reg_off| {
-                frame_index.* = try self.allocFrameIndex(FrameAlloc.initType(arg_ty, pt));
+                frame_index.* = try self.allocFrameIndex(FrameAlloc.initType(arg_ty, zcu));
                 try self.genSetMem(.{ .frame = frame_index.* }, 0, arg_ty, src_arg, .{});
                 try self.register_manager.getReg(reg_off.reg, null);
                 try reg_locks.append(self.register_manager.lockReg(reg_off.reg));
@@ -12368,7 +12369,7 @@ fn genCall(self: *Self, info: union(enum) {
         .none, .unreach => {},
         .indirect => |reg_off| {
             const ret_ty = Type.fromInterned(fn_info.return_type);
-            const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(ret_ty, pt));
+            const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(ret_ty, zcu));
             try self.genSetReg(reg_off.reg, Type.usize, .{
                 .lea_frame = .{ .index = frame_index, .off = -reg_off.off },
             }, .{});
@@ -12383,14 +12384,14 @@ fn genCall(self: *Self, info: union(enum) {
             .none, .load_frame => {},
             .register => |dst_reg| switch (fn_info.cc) {
                 else => try self.genSetReg(
-                    registerAlias(dst_reg, @intCast(arg_ty.abiSize(pt))),
+                    registerAlias(dst_reg, @intCast(arg_ty.abiSize(zcu))),
                     arg_ty,
                     src_arg,
                     .{},
                 ),
                 .C, .SysV, .Win64 => {
                     const promoted_ty = self.promoteInt(arg_ty);
-                    const promoted_abi_size: u32 = @intCast(promoted_ty.abiSize(pt));
+                    const promoted_abi_size: u32 = @intCast(promoted_ty.abiSize(zcu));
                     const dst_alias = registerAlias(dst_reg, promoted_abi_size);
                     try self.genSetReg(dst_alias, promoted_ty, src_arg, .{});
                     if (promoted_ty.toIntern() != arg_ty.toIntern())
@@ -12514,10 +12515,10 @@ fn genCall(self: *Self, info: union(enum) {
 
 fn airRet(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
 
-    const ret_ty = self.fn_type.fnReturnType(mod);
+    const ret_ty = self.fn_type.fnReturnType(zcu);
     switch (self.ret_mcv.short) {
         .none => {},
         .register,
@@ -12570,7 +12571,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
     var ty = self.typeOf(bin_op.lhs);
     var null_compare: ?Mir.Inst.Index = null;
@@ -12602,7 +12603,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
         };
         defer for (rhs_locks) |rhs_lock| if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
 
-        switch (ty.zigTypeTag(mod)) {
+        switch (ty.zigTypeTag(zcu)) {
             .Float => {
                 const float_bits = ty.floatBits(self.target.*);
                 if (switch (float_bits) {
@@ -12638,11 +12639,11 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
                     };
                 }
             },
-            .Optional => if (!ty.optionalReprIsPayload(mod)) {
+            .Optional => if (!ty.optionalReprIsPayload(zcu)) {
                 const opt_ty = ty;
-                const opt_abi_size: u31 = @intCast(opt_ty.abiSize(pt));
-                ty = opt_ty.optionalChild(mod);
-                const payload_abi_size: u31 = @intCast(ty.abiSize(pt));
+                const opt_abi_size: u31 = @intCast(opt_ty.abiSize(zcu));
+                ty = opt_ty.optionalChild(zcu);
+                const payload_abi_size: u31 = @intCast(ty.abiSize(zcu));
 
                 const temp_lhs_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
                 const temp_lhs_lock = self.register_manager.lockRegAssumeUnused(temp_lhs_reg);
@@ -12699,9 +12700,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
             else => {},
         }
 
-        switch (ty.zigTypeTag(mod)) {
+        switch (ty.zigTypeTag(zcu)) {
             else => {
-                const abi_size: u16 = @intCast(ty.abiSize(pt));
+                const abi_size: u16 = @intCast(ty.abiSize(zcu));
                 const may_flip: enum {
                     may_flip,
                     must_flip,
@@ -12734,7 +12735,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
                 defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
 
                 break :result Condition.fromCompareOperator(
-                    if (ty.isAbiInt(mod)) ty.intInfo(mod).signedness else .unsigned,
+                    if (ty.isAbiInt(zcu)) ty.intInfo(zcu).signedness else .unsigned,
                     result_op: {
                         const flipped_op = if (flipped) op.reverse() else op;
                         if (abi_size > 8) switch (flipped_op) {
@@ -13029,6 +13030,7 @@ fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
+    const zcu = pt.zcu;
     const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
 
     const addr_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
@@ -13040,7 +13042,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
     try self.spillEflagsIfOccupied();
 
     const op_ty = self.typeOf(un_op);
-    const op_abi_size: u32 = @intCast(op_ty.abiSize(pt));
+    const op_abi_size: u32 = @intCast(op_ty.abiSize(zcu));
     const op_mcv = try self.resolveInst(un_op);
     const dst_reg = switch (op_mcv) {
         .register => |reg| reg,
@@ -13164,7 +13166,7 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void {
 
 fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !Mir.Inst.Index {
     const pt = self.pt;
-    const abi_size = ty.abiSize(pt);
+    const abi_size = ty.abiSize(pt.zcu);
     switch (mcv) {
         .eflags => |cc| {
             // Here we map the opposites since the jump is to the false branch.
@@ -13237,7 +13239,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
 
 fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     switch (opt_mcv) {
         .register_overflow => |ro| return .{ .eflags = ro.eflags.negate() },
         else => {},
@@ -13245,12 +13247,12 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
 
     try self.spillEflagsIfOccupied();
 
-    const pl_ty = opt_ty.optionalChild(mod);
+    const pl_ty = opt_ty.optionalChild(zcu);
 
-    const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod))
-        .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty }
+    const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(zcu))
+        .{ .off = 0, .ty = if (pl_ty.isSlice(zcu)) pl_ty.slicePtrFieldType(zcu) else pl_ty }
     else
-        .{ .off = @intCast(pl_ty.abiSize(pt)), .ty = Type.bool };
+        .{ .off = @intCast(pl_ty.abiSize(zcu)), .ty = Type.bool };
 
     self.eflags_inst = inst;
     switch (opt_mcv) {
@@ -13279,14 +13281,14 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
 
         .register => |opt_reg| {
             if (some_info.off == 0) {
-                const some_abi_size: u32 = @intCast(some_info.ty.abiSize(pt));
+                const some_abi_size: u32 = @intCast(some_info.ty.abiSize(zcu));
                 const alias_reg = registerAlias(opt_reg, some_abi_size);
                 assert(some_abi_size * 8 == alias_reg.bitSize());
                 try self.asmRegisterRegister(.{ ._, .@"test" }, alias_reg, alias_reg);
                 return .{ .eflags = .z };
             }
             assert(some_info.ty.ip_index == .bool_type);
-            const opt_abi_size: u32 = @intCast(opt_ty.abiSize(pt));
+            const opt_abi_size: u32 = @intCast(opt_ty.abiSize(zcu));
             try self.asmRegisterImmediate(
                 .{ ._, .bt },
                 registerAlias(opt_reg, opt_abi_size),
@@ -13306,7 +13308,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
             defer self.register_manager.unlockReg(addr_reg_lock);
 
             try self.genSetReg(addr_reg, Type.usize, opt_mcv.address(), .{});
-            const some_abi_size: u32 = @intCast(some_info.ty.abiSize(pt));
+            const some_abi_size: u32 = @intCast(some_info.ty.abiSize(zcu));
             try self.asmMemoryImmediate(
                 .{ ._, .cmp },
                 .{
@@ -13322,7 +13324,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
         },
 
         .indirect, .load_frame => {
-            const some_abi_size: u32 = @intCast(some_info.ty.abiSize(pt));
+            const some_abi_size: u32 = @intCast(some_info.ty.abiSize(zcu));
             try self.asmMemoryImmediate(
                 .{ ._, .cmp },
                 switch (opt_mcv) {
@@ -13351,16 +13353,16 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
 
 fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const opt_ty = ptr_ty.childType(mod);
-    const pl_ty = opt_ty.optionalChild(mod);
+    const zcu = pt.zcu;
+    const opt_ty = ptr_ty.childType(zcu);
+    const pl_ty = opt_ty.optionalChild(zcu);
 
     try self.spillEflagsIfOccupied();
 
-    const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod))
-        .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty }
+    const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(zcu))
+        .{ .off = 0, .ty = if (pl_ty.isSlice(zcu)) pl_ty.slicePtrFieldType(zcu) else pl_ty }
     else
-        .{ .off = @intCast(pl_ty.abiSize(pt)), .ty = Type.bool };
+        .{ .off = @intCast(pl_ty.abiSize(zcu)), .ty = Type.bool };
 
     const ptr_reg = switch (ptr_mcv) {
         .register => |reg| reg,
@@ -13369,7 +13371,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue)
     const ptr_lock = self.register_manager.lockReg(ptr_reg);
     defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const some_abi_size: u32 = @intCast(some_info.ty.abiSize(pt));
+    const some_abi_size: u32 = @intCast(some_info.ty.abiSize(zcu));
     try self.asmMemoryImmediate(
         .{ ._, .cmp },
         .{
@@ -13388,13 +13390,13 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue)
 
 fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const err_ty = eu_ty.errorUnionSet(mod);
-    if (err_ty.errorSetIsEmpty(mod)) return MCValue{ .immediate = 0 }; // always false
+    const zcu = pt.zcu;
+    const err_ty = eu_ty.errorUnionSet(zcu);
+    if (err_ty.errorSetIsEmpty(zcu)) return MCValue{ .immediate = 0 }; // always false
 
     try self.spillEflagsIfOccupied();
 
-    const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(mod), pt));
+    const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(zcu), zcu));
     switch (eu_mcv) {
         .register => |reg| {
             const eu_lock = self.register_manager.lockReg(reg);
@@ -13437,10 +13439,10 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue)
 
 fn isErrPtr(self: *Self, maybe_inst: ?Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) !MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const eu_ty = ptr_ty.childType(mod);
-    const err_ty = eu_ty.errorUnionSet(mod);
-    if (err_ty.errorSetIsEmpty(mod)) return MCValue{ .immediate = 0 }; // always false
+    const zcu = pt.zcu;
+    const eu_ty = ptr_ty.childType(zcu);
+    const err_ty = eu_ty.errorUnionSet(zcu);
+    if (err_ty.errorSetIsEmpty(zcu)) return MCValue{ .immediate = 0 }; // always false
 
     try self.spillEflagsIfOccupied();
 
@@ -13451,7 +13453,7 @@ fn isErrPtr(self: *Self, maybe_inst: ?Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCV
     const ptr_lock = self.register_manager.lockReg(ptr_reg);
     defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(mod), pt));
+    const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(zcu), zcu));
     try self.asmMemoryImmediate(
         .{ ._, .cmp },
         .{
@@ -13724,12 +13726,12 @@ fn performReloc(self: *Self, reloc: Mir.Inst.Index) void {
 }
 
 fn airBr(self: *Self, inst: Air.Inst.Index) !void {
-    const pt = self.pt;
+    const zcu = self.pt.zcu;
     const br = self.air.instructions.items(.data)[@intFromEnum(inst)].br;
 
     const block_ty = self.typeOfIndex(br.block_inst);
     const block_unused =
-        !block_ty.hasRuntimeBitsIgnoreComptime(pt) or self.liveness.isUnused(br.block_inst);
+        !block_ty.hasRuntimeBitsIgnoreComptime(zcu) or self.liveness.isUnused(br.block_inst);
     const block_tracking = self.inst_tracking.getPtr(br.block_inst).?;
     const block_data = self.blocks.getPtr(br.block_inst).?;
     const first_br = block_data.relocs.items.len == 0;
@@ -13786,7 +13788,7 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = self.air.extraData(Air.Asm, ty_pl.payload);
     const clobbers_len: u31 = @truncate(extra.data.flags);
@@ -13825,7 +13827,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
         };
         const ty = switch (output) {
             .none => self.typeOfIndex(inst),
-            else => self.typeOf(output).childType(mod),
+            else => self.typeOf(output).childType(zcu),
         };
         const is_read = switch (constraint[0]) {
             '=' => false,
@@ -13850,7 +13852,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
                         'x' => abi.RegisterClass.sse,
                         else => unreachable,
                     }) orelse return self.fail("ran out of registers lowering inline asm", .{}),
-                    @intCast(ty.abiSize(pt)),
+                    @intCast(ty.abiSize(zcu)),
                 )
             else if (mem.eql(u8, rest, "m"))
                 if (output != .none) null else return self.fail(
@@ -13920,7 +13922,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
                 break :arg input_mcv;
             const reg = try self.register_manager.allocReg(null, rc);
             try self.genSetReg(reg, ty, input_mcv, .{});
-            break :arg .{ .register = registerAlias(reg, @intCast(ty.abiSize(pt))) };
+            break :arg .{ .register = registerAlias(reg, @intCast(ty.abiSize(zcu))) };
         } else if (mem.eql(u8, constraint, "i") or mem.eql(u8, constraint, "n"))
             switch (input_mcv) {
                 .immediate => |imm| .{ .immediate = imm },
@@ -14497,18 +14499,18 @@ const MoveStrategy = union(enum) {
 };
 fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !MoveStrategy {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     switch (class) {
         .general_purpose, .segment => return .{ .move = .{ ._, .mov } },
         .x87 => return .x87_load_store,
         .mmx => {},
-        .sse => switch (ty.zigTypeTag(mod)) {
+        .sse => switch (ty.zigTypeTag(zcu)) {
             else => {
-                const classes = mem.sliceTo(&abi.classifySystemV(ty, pt, self.target.*, .other), .none);
+                const classes = mem.sliceTo(&abi.classifySystemV(ty, zcu, self.target.*, .other), .none);
                 assert(std.mem.indexOfNone(abi.Class, classes, &.{
                     .integer, .sse, .sseup, .memory, .float, .float_combine,
                 }) == null);
-                const abi_size = ty.abiSize(pt);
+                const abi_size = ty.abiSize(zcu);
                 if (abi_size < 4 or
                     std.mem.indexOfScalar(abi.Class, classes, .integer) != null) switch (abi_size) {
                     1 => if (self.hasFeature(.avx)) return .{ .vex_insert_extract = .{
@@ -14579,16 +14581,16 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
                 else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
                 else => {},
             },
-            .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
-                .Bool => switch (ty.vectorLen(mod)) {
+            .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+                .Bool => switch (ty.vectorLen(zcu)) {
                     33...64 => return .{ .move = if (self.hasFeature(.avx))
                         .{ .v_q, .mov }
                     else
                         .{ ._q, .mov } },
                     else => {},
                 },
-                .Int => switch (ty.childType(mod).intInfo(mod).bits) {
-                    1...8 => switch (ty.vectorLen(mod)) {
+                .Int => switch (ty.childType(zcu).intInfo(zcu).bits) {
+                    1...8 => switch (ty.vectorLen(zcu)) {
                         1...16 => return .{ .move = if (self.hasFeature(.avx))
                             if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu }
                         else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
@@ -14599,7 +14601,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
                                 .{ .v_, .movdqu } },
                         else => {},
                     },
-                    9...16 => switch (ty.vectorLen(mod)) {
+                    9...16 => switch (ty.vectorLen(zcu)) {
                         1...8 => return .{ .move = if (self.hasFeature(.avx))
                             if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu }
                         else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
@@ -14610,7 +14612,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
                                 .{ .v_, .movdqu } },
                         else => {},
                     },
-                    17...32 => switch (ty.vectorLen(mod)) {
+                    17...32 => switch (ty.vectorLen(zcu)) {
                         1...4 => return .{ .move = if (self.hasFeature(.avx))
                             if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu }
                         else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
@@ -14621,7 +14623,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
                                 .{ .v_, .movdqu } },
                         else => {},
                     },
-                    33...64 => switch (ty.vectorLen(mod)) {
+                    33...64 => switch (ty.vectorLen(zcu)) {
                         1...2 => return .{ .move = if (self.hasFeature(.avx))
                             if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu }
                         else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
@@ -14632,7 +14634,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
                                 .{ .v_, .movdqu } },
                         else => {},
                     },
-                    65...128 => switch (ty.vectorLen(mod)) {
+                    65...128 => switch (ty.vectorLen(zcu)) {
                         1 => return .{ .move = if (self.hasFeature(.avx))
                             if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu }
                         else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
@@ -14643,7 +14645,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
                                 .{ .v_, .movdqu } },
                         else => {},
                     },
-                    129...256 => switch (ty.vectorLen(mod)) {
+                    129...256 => switch (ty.vectorLen(zcu)) {
                         1 => if (self.hasFeature(.avx))
                             return .{ .move = if (aligned)
                                 .{ .v_, .movdqa }
@@ -14653,8 +14655,8 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
                     },
                     else => {},
                 },
-                .Pointer, .Optional => if (ty.childType(mod).isPtrAtRuntime(mod))
-                    switch (ty.vectorLen(mod)) {
+                .Pointer, .Optional => if (ty.childType(zcu).isPtrAtRuntime(zcu))
+                    switch (ty.vectorLen(zcu)) {
                         1...2 => return .{ .move = if (self.hasFeature(.avx))
                             if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu }
                         else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
@@ -14667,8 +14669,8 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
                     }
                 else
                     unreachable,
-                .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
-                    16 => switch (ty.vectorLen(mod)) {
+                .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+                    16 => switch (ty.vectorLen(zcu)) {
                         1...8 => return .{ .move = if (self.hasFeature(.avx))
                             if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu }
                         else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
@@ -14679,7 +14681,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
                                 .{ .v_, .movdqu } },
                         else => {},
                     },
-                    32 => switch (ty.vectorLen(mod)) {
+                    32 => switch (ty.vectorLen(zcu)) {
                         1...4 => return .{ .move = if (self.hasFeature(.avx))
                             if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu }
                         else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu } },
@@ -14690,7 +14692,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
                                 .{ .v_ps, .movu } },
                         else => {},
                     },
-                    64 => switch (ty.vectorLen(mod)) {
+                    64 => switch (ty.vectorLen(zcu)) {
                         1...2 => return .{ .move = if (self.hasFeature(.avx))
                             if (aligned) .{ .v_pd, .mova } else .{ .v_pd, .movu }
                         else if (aligned) .{ ._pd, .mova } else .{ ._pd, .movu } },
@@ -14701,7 +14703,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
                                 .{ .v_pd, .movu } },
                         else => {},
                     },
-                    128 => switch (ty.vectorLen(mod)) {
+                    128 => switch (ty.vectorLen(zcu)) {
                         1 => return .{ .move = if (self.hasFeature(.avx))
                             if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu }
                         else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
@@ -14804,7 +14806,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue, opts: Copy
                     } },
                     else => unreachable,
                 }, opts);
-                part_disp += @intCast(dst_ty.abiSize(pt));
+                part_disp += @intCast(dst_ty.abiSize(pt.zcu));
             }
         },
         .indirect => |reg_off| try self.genSetMem(
@@ -14846,9 +14848,9 @@ fn genSetReg(
     opts: CopyOptions,
 ) InnerError!void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const abi_size: u32 = @intCast(ty.abiSize(pt));
-    if (ty.bitSize(pt) > dst_reg.bitSize())
+    const zcu = pt.zcu;
+    const abi_size: u32 = @intCast(ty.abiSize(zcu));
+    if (ty.bitSize(zcu) > dst_reg.bitSize())
         return self.fail("genSetReg called with a value larger than dst_reg", .{});
     switch (src_mcv) {
         .none,
@@ -14965,13 +14967,13 @@ fn genSetReg(
                 ),
                 .x87, .mmx, .ip => unreachable,
                 .sse => try self.asmRegisterRegister(
-                    @as(?Mir.Inst.FixedTag, switch (ty.scalarType(mod).zigTypeTag(mod)) {
+                    @as(?Mir.Inst.FixedTag, switch (ty.scalarType(zcu).zigTypeTag(zcu)) {
                         else => switch (abi_size) {
                             1...16 => if (self.hasFeature(.avx)) .{ .v_, .movdqa } else .{ ._, .movdqa },
                             17...32 => if (self.hasFeature(.avx)) .{ .v_, .movdqa } else null,
                             else => null,
                         },
-                        .Float => switch (ty.scalarType(mod).floatBits(self.target.*)) {
+                        .Float => switch (ty.scalarType(zcu).floatBits(self.target.*)) {
                             16, 128 => switch (abi_size) {
                                 2...16 => if (self.hasFeature(.avx))
                                     .{ .v_, .movdqa }
@@ -15035,7 +15037,7 @@ fn genSetReg(
                     return (try self.moveStrategy(
                         ty,
                         dst_reg.class(),
-                        ty.abiAlignment(pt).check(@as(u32, @bitCast(small_addr))),
+                        ty.abiAlignment(zcu).check(@as(u32, @bitCast(small_addr))),
                     )).read(self, registerAlias(dst_reg, abi_size), .{
                         .base = .{ .reg = .ds },
                         .mod = .{ .rm = .{
@@ -15136,8 +15138,8 @@ fn genSetMem(
     opts: CopyOptions,
 ) InnerError!void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const abi_size: u32 = @intCast(ty.abiSize(pt));
+    const zcu = pt.zcu;
+    const abi_size: u32 = @intCast(ty.abiSize(zcu));
     const dst_ptr_mcv: MCValue = switch (base) {
         .none => .{ .immediate = @bitCast(@as(i64, disp)) },
         .reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } },
@@ -15159,8 +15161,8 @@ fn genSetMem(
         ),
         .immediate => |imm| switch (abi_size) {
             1, 2, 4 => {
-                const immediate = switch (if (ty.isAbiInt(mod))
-                    ty.intInfo(mod).signedness
+                const immediate = switch (if (ty.isAbiInt(zcu))
+                    ty.intInfo(zcu).signedness
                 else
                     .unsigned) {
                     .signed => Immediate.s(@truncate(@as(i64, @bitCast(imm)))),
@@ -15193,7 +15195,7 @@ fn genSetMem(
                         .size = .dword,
                         .disp = disp + offset,
                     } } },
-                    if (ty.isSignedInt(mod)) Immediate.s(
+                    if (ty.isSignedInt(zcu)) Immediate.s(
                         @truncate(@as(i64, @bitCast(imm)) >> (math.cast(u6, offset * 8) orelse 63)),
                     ) else Immediate.u(
                         @as(u32, @truncate(if (math.cast(u6, offset * 8)) |shift| imm >> shift else 0)),
@@ -15263,33 +15265,33 @@ fn genSetMem(
             var part_disp: i32 = disp;
             for (try self.splitType(ty), src_regs) |src_ty, src_reg| {
                 try self.genSetMem(base, part_disp, src_ty, .{ .register = src_reg }, opts);
-                part_disp += @intCast(src_ty.abiSize(pt));
+                part_disp += @intCast(src_ty.abiSize(zcu));
             }
         },
-        .register_overflow => |ro| switch (ty.zigTypeTag(mod)) {
+        .register_overflow => |ro| switch (ty.zigTypeTag(zcu)) {
             .Struct => {
                 try self.genSetMem(
                     base,
-                    disp + @as(i32, @intCast(ty.structFieldOffset(0, pt))),
-                    ty.structFieldType(0, mod),
+                    disp + @as(i32, @intCast(ty.structFieldOffset(0, zcu))),
+                    ty.structFieldType(0, zcu),
                     .{ .register = ro.reg },
                     opts,
                 );
                 try self.genSetMem(
                     base,
-                    disp + @as(i32, @intCast(ty.structFieldOffset(1, pt))),
-                    ty.structFieldType(1, mod),
+                    disp + @as(i32, @intCast(ty.structFieldOffset(1, zcu))),
+                    ty.structFieldType(1, zcu),
                     .{ .eflags = ro.eflags },
                     opts,
                 );
             },
             .Optional => {
-                assert(!ty.optionalReprIsPayload(mod));
-                const child_ty = ty.optionalChild(mod);
+                assert(!ty.optionalReprIsPayload(zcu));
+                const child_ty = ty.optionalChild(zcu);
                 try self.genSetMem(base, disp, child_ty, .{ .register = ro.reg }, opts);
                 try self.genSetMem(
                     base,
-                    disp + @as(i32, @intCast(child_ty.abiSize(pt))),
+                    disp + @as(i32, @intCast(child_ty.abiSize(zcu))),
                     Type.bool,
                     .{ .eflags = ro.eflags },
                     opts,
@@ -15521,14 +15523,14 @@ fn airIntFromPtr(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const dst_ty = self.typeOfIndex(inst);
     const src_ty = self.typeOf(ty_op.operand);
 
     const result = result: {
         const src_mcv = try self.resolveInst(ty_op.operand);
-        if (dst_ty.isPtrAtRuntime(mod) and src_ty.isPtrAtRuntime(mod)) switch (src_mcv) {
+        if (dst_ty.isPtrAtRuntime(zcu) and src_ty.isPtrAtRuntime(zcu)) switch (src_mcv) {
             .lea_frame => break :result src_mcv,
             else => if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv,
         };
@@ -15539,10 +15541,10 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
         const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null;
         defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
 
-        const dst_mcv = if (dst_rc.supersetOf(src_rc) and dst_ty.abiSize(pt) <= src_ty.abiSize(pt) and
+        const dst_mcv = if (dst_rc.supersetOf(src_rc) and dst_ty.abiSize(zcu) <= src_ty.abiSize(zcu) and
             self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: {
             const dst_mcv = try self.allocRegOrMem(inst, true);
-            try self.genCopy(switch (math.order(dst_ty.abiSize(pt), src_ty.abiSize(pt))) {
+            try self.genCopy(switch (math.order(dst_ty.abiSize(zcu), src_ty.abiSize(zcu))) {
                 .lt => dst_ty,
                 .eq => if (!dst_mcv.isMemory() or src_mcv.isMemory()) dst_ty else src_ty,
                 .gt => src_ty,
@@ -15552,12 +15554,12 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
 
         if (dst_ty.isRuntimeFloat()) break :result dst_mcv;
 
-        if (dst_ty.isAbiInt(mod) and src_ty.isAbiInt(mod) and
-            dst_ty.intInfo(mod).signedness == src_ty.intInfo(mod).signedness) break :result dst_mcv;
+        if (dst_ty.isAbiInt(zcu) and src_ty.isAbiInt(zcu) and
+            dst_ty.intInfo(zcu).signedness == src_ty.intInfo(zcu).signedness) break :result dst_mcv;
 
-        const abi_size = dst_ty.abiSize(pt);
-        const bit_size = dst_ty.bitSize(pt);
-        if (abi_size * 8 <= bit_size or dst_ty.isVector(mod)) break :result dst_mcv;
+        const abi_size = dst_ty.abiSize(zcu);
+        const bit_size = dst_ty.bitSize(zcu);
+        if (abi_size * 8 <= bit_size or dst_ty.isVector(zcu)) break :result dst_mcv;
 
         const dst_limbs_len = math.divCeil(i32, @intCast(bit_size), 64) catch unreachable;
         const high_mcv: MCValue = switch (dst_mcv) {
@@ -15586,20 +15588,20 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const slice_ty = self.typeOfIndex(inst);
     const ptr_ty = self.typeOf(ty_op.operand);
     const ptr = try self.resolveInst(ty_op.operand);
-    const array_ty = ptr_ty.childType(mod);
-    const array_len = array_ty.arrayLen(mod);
+    const array_ty = ptr_ty.childType(zcu);
+    const array_len = array_ty.arrayLen(zcu);
 
-    const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(slice_ty, pt));
+    const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(slice_ty, zcu));
     try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr, .{});
     try self.genSetMem(
         .{ .frame = frame_index },
-        @intCast(ptr_ty.abiSize(pt)),
+        @intCast(ptr_ty.abiSize(zcu)),
         Type.usize,
         .{ .immediate = array_len },
         .{},
@@ -15611,16 +15613,16 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const dst_ty = self.typeOfIndex(inst);
     const dst_bits = dst_ty.floatBits(self.target.*);
 
     const src_ty = self.typeOf(ty_op.operand);
-    const src_bits: u32 = @intCast(src_ty.bitSize(pt));
+    const src_bits: u32 = @intCast(src_ty.bitSize(zcu));
     const src_signedness =
-        if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned;
+        if (src_ty.isAbiInt(zcu)) src_ty.intInfo(zcu).signedness else .unsigned;
     const src_size = math.divCeil(u32, @max(switch (src_signedness) {
         .signed => src_bits,
         .unsigned => src_bits + 1,
@@ -15666,7 +15668,7 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void {
         const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
         defer self.register_manager.unlockReg(dst_lock);
 
-        const mir_tag = @as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag(mod)) {
+        const mir_tag = @as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag(zcu)) {
             .Float => switch (dst_ty.floatBits(self.target.*)) {
                 32 => if (self.hasFeature(.avx)) .{ .v_ss, .cvtsi2 } else .{ ._ss, .cvtsi2 },
                 64 => if (self.hasFeature(.avx)) .{ .v_sd, .cvtsi2 } else .{ ._sd, .cvtsi2 },
@@ -15691,13 +15693,13 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const dst_ty = self.typeOfIndex(inst);
-    const dst_bits: u32 = @intCast(dst_ty.bitSize(pt));
+    const dst_bits: u32 = @intCast(dst_ty.bitSize(zcu));
     const dst_signedness =
-        if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned;
+        if (dst_ty.isAbiInt(zcu)) dst_ty.intInfo(zcu).signedness else .unsigned;
     const dst_size = math.divCeil(u32, @max(switch (dst_signedness) {
         .signed => dst_bits,
         .unsigned => dst_bits + 1,
@@ -15768,7 +15770,7 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
 
     const ptr_ty = self.typeOf(extra.ptr);
     const val_ty = self.typeOf(extra.expected_value);
-    const val_abi_size: u32 = @intCast(val_ty.abiSize(pt));
+    const val_abi_size: u32 = @intCast(val_ty.abiSize(pt.zcu));
 
     try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx });
     const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx });
@@ -15859,7 +15861,7 @@ fn atomicOp(
     order: std.builtin.AtomicOrder,
 ) InnerError!MCValue {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ptr_lock = switch (ptr_mcv) {
         .register => |reg| self.register_manager.lockReg(reg),
         else => null,
@@ -15872,7 +15874,7 @@ fn atomicOp(
     };
     defer if (val_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const val_abi_size: u32 = @intCast(val_ty.abiSize(pt));
+    const val_abi_size: u32 = @intCast(val_ty.abiSize(zcu));
     const mem_size = Memory.Size.fromSize(val_abi_size);
     const ptr_mem: Memory = switch (ptr_mcv) {
         .immediate, .register, .register_offset, .lea_frame => try ptr_mcv.deref().mem(self, mem_size),
@@ -16031,8 +16033,8 @@ fn atomicOp(
                 .Or => try self.genBinOpMir(.{ ._, .@"or" }, val_ty, tmp_mcv, val_mcv),
                 .Xor => try self.genBinOpMir(.{ ._, .xor }, val_ty, tmp_mcv, val_mcv),
                 .Min, .Max => {
-                    const cc: Condition = switch (if (val_ty.isAbiInt(mod))
-                        val_ty.intInfo(mod).signedness
+                    const cc: Condition = switch (if (val_ty.isAbiInt(zcu))
+                        val_ty.intInfo(zcu).signedness
                     else
                         .unsigned) {
                         .unsigned => switch (op) {
@@ -16156,8 +16158,8 @@ fn atomicOp(
                     try self.asmRegisterMemory(.{ ._, .xor }, .rcx, val_hi_mem);
                 },
                 .Min, .Max => {
-                    const cc: Condition = switch (if (val_ty.isAbiInt(mod))
-                        val_ty.intInfo(mod).signedness
+                    const cc: Condition = switch (if (val_ty.isAbiInt(zcu))
+                        val_ty.intInfo(zcu).signedness
                     else
                         .unsigned) {
                         .unsigned => switch (op) {
@@ -16264,7 +16266,7 @@ fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOr
 
 fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
     result: {
@@ -16290,19 +16292,19 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
         };
         defer if (src_val_lock) |lock| self.register_manager.unlockReg(lock);
 
-        const elem_abi_size: u31 = @intCast(elem_ty.abiSize(pt));
+        const elem_abi_size: u31 = @intCast(elem_ty.abiSize(zcu));
 
         if (elem_abi_size == 1) {
-            const ptr: MCValue = switch (dst_ptr_ty.ptrSize(mod)) {
+            const ptr: MCValue = switch (dst_ptr_ty.ptrSize(zcu)) {
                 // TODO: this only handles slices stored in the stack
                 .Slice => dst_ptr,
                 .One => dst_ptr,
                 .C, .Many => unreachable,
             };
-            const len: MCValue = switch (dst_ptr_ty.ptrSize(mod)) {
+            const len: MCValue = switch (dst_ptr_ty.ptrSize(zcu)) {
                 // TODO: this only handles slices stored in the stack
                 .Slice => dst_ptr.address().offset(8).deref(),
-                .One => .{ .immediate = dst_ptr_ty.childType(mod).arrayLen(mod) },
+                .One => .{ .immediate = dst_ptr_ty.childType(zcu).arrayLen(zcu) },
                 .C, .Many => unreachable,
             };
             const len_lock: ?RegisterLock = switch (len) {
@@ -16318,9 +16320,9 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
         // Store the first element, and then rely on memcpy copying forwards.
         // Length zero requires a runtime check - so we handle arrays specially
         // here to elide it.
-        switch (dst_ptr_ty.ptrSize(mod)) {
+        switch (dst_ptr_ty.ptrSize(zcu)) {
             .Slice => {
-                const slice_ptr_ty = dst_ptr_ty.slicePtrFieldType(mod);
+                const slice_ptr_ty = dst_ptr_ty.slicePtrFieldType(zcu);
 
                 // TODO: this only handles slices stored in the stack
                 const ptr = dst_ptr;
@@ -16365,7 +16367,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
             .One => {
                 const elem_ptr_ty = try pt.singleMutPtrType(elem_ty);
 
-                const len = dst_ptr_ty.childType(mod).arrayLen(mod);
+                const len = dst_ptr_ty.childType(zcu).arrayLen(zcu);
 
                 assert(len != 0); // prevented by Sema
                 try self.store(elem_ptr_ty, dst_ptr, src_val, .{ .safety = safety });
@@ -16393,7 +16395,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
 
 fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
     try self.spillRegisters(&.{ .rdi, .rsi, .rcx });
@@ -16415,7 +16417,7 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
     };
     defer if (src_ptr_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const len: MCValue = switch (dst_ptr_ty.ptrSize(mod)) {
+    const len: MCValue = switch (dst_ptr_ty.ptrSize(zcu)) {
         .Slice => len: {
             const len_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
             const len_lock = self.register_manager.lockRegAssumeUnused(len_reg);
@@ -16425,13 +16427,13 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
                 .{ .i_, .mul },
                 len_reg,
                 try dst_ptr.address().offset(8).deref().mem(self, .qword),
-                Immediate.s(@intCast(dst_ptr_ty.childType(mod).abiSize(pt))),
+                Immediate.s(@intCast(dst_ptr_ty.childType(zcu).abiSize(zcu))),
             );
             break :len .{ .register = len_reg };
         },
         .One => len: {
-            const array_ty = dst_ptr_ty.childType(mod);
-            break :len .{ .immediate = array_ty.arrayLen(mod) * array_ty.childType(mod).abiSize(pt) };
+            const array_ty = dst_ptr_ty.childType(zcu);
+            break :len .{ .immediate = array_ty.arrayLen(zcu) * array_ty.childType(zcu).abiSize(zcu) };
         },
         .C, .Many => unreachable,
     };
@@ -16449,6 +16451,7 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
+    const zcu = pt.zcu;
     const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
     const inst_ty = self.typeOfIndex(inst);
     const enum_ty = self.typeOf(un_op);
@@ -16457,8 +16460,8 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
     // We need a properly aligned and sized call frame to be able to call this function.
     {
         const needed_call_frame = FrameAlloc.init(.{
-            .size = inst_ty.abiSize(pt),
-            .alignment = inst_ty.abiAlignment(pt),
+            .size = inst_ty.abiSize(zcu),
+            .alignment = inst_ty.abiAlignment(zcu),
         });
         const frame_allocs_slice = self.frame_allocs.slice();
         const stack_frame_size =
@@ -16590,15 +16593,15 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const vector_ty = self.typeOfIndex(inst);
-    const vector_len = vector_ty.vectorLen(mod);
+    const vector_len = vector_ty.vectorLen(zcu);
     const dst_rc = self.regClassForType(vector_ty);
     const scalar_ty = self.typeOf(ty_op.operand);
 
     const result: MCValue = result: {
-        switch (scalar_ty.zigTypeTag(mod)) {
+        switch (scalar_ty.zigTypeTag(zcu)) {
             else => {},
             .Bool => {
                 const regs =
@@ -16641,7 +16644,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
                 break :result .{ .register = regs[0] };
             },
             .Int => if (self.hasFeature(.avx2)) avx2: {
-                const mir_tag = @as(?Mir.Inst.FixedTag, switch (scalar_ty.intInfo(mod).bits) {
+                const mir_tag = @as(?Mir.Inst.FixedTag, switch (scalar_ty.intInfo(zcu).bits) {
                     else => null,
                     1...8 => switch (vector_len) {
                         else => null,
@@ -16672,15 +16675,15 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
                 const src_mcv = try self.resolveInst(ty_op.operand);
                 if (src_mcv.isMemory()) try self.asmRegisterMemory(
                     mir_tag,
-                    registerAlias(dst_reg, @intCast(vector_ty.abiSize(pt))),
+                    registerAlias(dst_reg, @intCast(vector_ty.abiSize(zcu))),
                     try src_mcv.mem(self, self.memSize(scalar_ty)),
                 ) else {
                     if (mir_tag[0] == .v_i128) break :avx2;
                     try self.genSetReg(dst_reg, scalar_ty, src_mcv, .{});
                     try self.asmRegisterRegister(
                         mir_tag,
-                        registerAlias(dst_reg, @intCast(vector_ty.abiSize(pt))),
-                        registerAlias(dst_reg, @intCast(scalar_ty.abiSize(pt))),
+                        registerAlias(dst_reg, @intCast(vector_ty.abiSize(zcu))),
+                        registerAlias(dst_reg, @intCast(scalar_ty.abiSize(zcu))),
                     );
                 }
                 break :result .{ .register = dst_reg };
@@ -16692,8 +16695,8 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
                 try self.genSetReg(dst_reg, scalar_ty, .{ .air_ref = ty_op.operand }, .{});
                 if (vector_len == 1) break :result .{ .register = dst_reg };
 
-                const dst_alias = registerAlias(dst_reg, @intCast(vector_ty.abiSize(pt)));
-                const scalar_bits = scalar_ty.intInfo(mod).bits;
+                const dst_alias = registerAlias(dst_reg, @intCast(vector_ty.abiSize(zcu)));
+                const scalar_bits = scalar_ty.intInfo(zcu).bits;
                 if (switch (scalar_bits) {
                     1...8 => true,
                     9...128 => false,
@@ -16929,14 +16932,14 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
     const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
     const ty = self.typeOfIndex(inst);
-    const vec_len = ty.vectorLen(mod);
-    const elem_ty = ty.childType(mod);
-    const elem_abi_size: u32 = @intCast(elem_ty.abiSize(pt));
-    const abi_size: u32 = @intCast(ty.abiSize(pt));
+    const vec_len = ty.vectorLen(zcu);
+    const elem_ty = ty.childType(zcu);
+    const elem_abi_size: u32 = @intCast(elem_ty.abiSize(zcu));
+    const abi_size: u32 = @intCast(ty.abiSize(zcu));
     const pred_ty = self.typeOf(pl_op.operand);
 
     const result = result: {
@@ -17160,7 +17163,7 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
         const dst_lock = self.register_manager.lockReg(dst_reg);
         defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
 
-        const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.childType(mod).zigTypeTag(mod)) {
+        const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.childType(zcu).zigTypeTag(zcu)) {
             else => null,
             .Int => switch (abi_size) {
                 0 => unreachable,
@@ -17176,7 +17179,7 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
                     null,
                 else => null,
             },
-            .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
+            .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
                 else => unreachable,
                 16, 80, 128 => null,
                 32 => switch (vec_len) {
@@ -17230,7 +17233,7 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
                 try self.copyToTmpRegister(ty, lhs_mcv), abi_size),
             mask_alias,
         ) else {
-            const mir_fixes = @as(?Mir.Inst.Fixes, switch (elem_ty.zigTypeTag(mod)) {
+            const mir_fixes = @as(?Mir.Inst.Fixes, switch (elem_ty.zigTypeTag(zcu)) {
                 else => null,
                 .Int => .p_,
                 .Float => switch (elem_ty.floatBits(self.target.*)) {
@@ -17262,18 +17265,18 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
 
     const dst_ty = self.typeOfIndex(inst);
-    const elem_ty = dst_ty.childType(mod);
-    const elem_abi_size: u16 = @intCast(elem_ty.abiSize(pt));
-    const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+    const elem_ty = dst_ty.childType(zcu);
+    const elem_abi_size: u16 = @intCast(elem_ty.abiSize(zcu));
+    const dst_abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
     const lhs_ty = self.typeOf(extra.a);
-    const lhs_abi_size: u32 = @intCast(lhs_ty.abiSize(pt));
+    const lhs_abi_size: u32 = @intCast(lhs_ty.abiSize(zcu));
     const rhs_ty = self.typeOf(extra.b);
-    const rhs_abi_size: u32 = @intCast(rhs_ty.abiSize(pt));
+    const rhs_abi_size: u32 = @intCast(rhs_ty.abiSize(zcu));
     const max_abi_size = @max(dst_abi_size, lhs_abi_size, rhs_abi_size);
 
     const ExpectedContents = [32]?i32;
@@ -17286,10 +17289,10 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
     for (mask_elems, 0..) |*mask_elem, elem_index| {
         const mask_elem_val =
             Value.fromInterned(extra.mask).elemValue(pt, elem_index) catch unreachable;
-        mask_elem.* = if (mask_elem_val.isUndef(mod))
+        mask_elem.* = if (mask_elem_val.isUndef(zcu))
             null
         else
-            @intCast(mask_elem_val.toSignedInt(pt));
+            @intCast(mask_elem_val.toSignedInt(zcu));
     }
 
     const has_avx = self.hasFeature(.avx);
@@ -18028,7 +18031,7 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
             );
 
             if (has_avx) try self.asmRegisterRegisterRegister(
-                .{ switch (elem_ty.zigTypeTag(mod)) {
+                .{ switch (elem_ty.zigTypeTag(zcu)) {
                     else => break :result null,
                     .Int => .vp_,
                     .Float => switch (elem_ty.floatBits(self.target.*)) {
@@ -18042,7 +18045,7 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
                 lhs_temp_alias,
                 rhs_temp_alias,
             ) else try self.asmRegisterRegister(
-                .{ switch (elem_ty.zigTypeTag(mod)) {
+                .{ switch (elem_ty.zigTypeTag(zcu)) {
                     else => break :result null,
                     .Int => .p_,
                     .Float => switch (elem_ty.floatBits(self.target.*)) {
@@ -18068,19 +18071,19 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce;
 
     const result: MCValue = result: {
         const operand_ty = self.typeOf(reduce.operand);
-        if (operand_ty.isVector(mod) and operand_ty.childType(mod).toIntern() == .bool_type) {
+        if (operand_ty.isVector(zcu) and operand_ty.childType(zcu).toIntern() == .bool_type) {
             try self.spillEflagsIfOccupied();
 
             const operand_mcv = try self.resolveInst(reduce.operand);
-            const mask_len = (math.cast(u6, operand_ty.vectorLen(mod)) orelse
+            const mask_len = (math.cast(u6, operand_ty.vectorLen(zcu)) orelse
                 return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(pt)}));
             const mask = (@as(u64, 1) << mask_len) - 1;
-            const abi_size: u32 = @intCast(operand_ty.abiSize(pt));
+            const abi_size: u32 = @intCast(operand_ty.abiSize(zcu));
             switch (reduce.operation) {
                 .Or => {
                     if (operand_mcv.isMemory()) try self.asmMemoryImmediate(
@@ -18126,36 +18129,36 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const result_ty = self.typeOfIndex(inst);
-    const len: usize = @intCast(result_ty.arrayLen(mod));
+    const len: usize = @intCast(result_ty.arrayLen(zcu));
     const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]);
     const result: MCValue = result: {
-        switch (result_ty.zigTypeTag(mod)) {
+        switch (result_ty.zigTypeTag(zcu)) {
             .Struct => {
-                const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, pt));
-                if (result_ty.containerLayout(mod) == .@"packed") {
-                    const struct_obj = mod.typeToStruct(result_ty).?;
+                const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu));
+                if (result_ty.containerLayout(zcu) == .@"packed") {
+                    const struct_obj = zcu.typeToStruct(result_ty).?;
                     try self.genInlineMemset(
                         .{ .lea_frame = .{ .index = frame_index } },
                         .{ .immediate = 0 },
-                        .{ .immediate = result_ty.abiSize(pt) },
+                        .{ .immediate = result_ty.abiSize(zcu) },
                         .{},
                     );
                     for (elements, 0..) |elem, elem_i_usize| {
                         const elem_i: u32 = @intCast(elem_i_usize);
                         if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue;
 
-                        const elem_ty = result_ty.structFieldType(elem_i, mod);
-                        const elem_bit_size: u32 = @intCast(elem_ty.bitSize(pt));
+                        const elem_ty = result_ty.structFieldType(elem_i, zcu);
+                        const elem_bit_size: u32 = @intCast(elem_ty.bitSize(zcu));
                         if (elem_bit_size > 64) {
                             return self.fail(
                                 "TODO airAggregateInit implement packed structs with large fields",
                                 .{},
                             );
                         }
-                        const elem_abi_size: u32 = @intCast(elem_ty.abiSize(pt));
+                        const elem_abi_size: u32 = @intCast(elem_ty.abiSize(zcu));
                         const elem_abi_bits = elem_abi_size * 8;
                         const elem_off = pt.structPackedFieldBitOffset(struct_obj, elem_i);
                         const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size);
@@ -18229,8 +18232,8 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
                 } else for (elements, 0..) |elem, elem_i| {
                     if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue;
 
-                    const elem_ty = result_ty.structFieldType(elem_i, mod);
-                    const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, pt));
+                    const elem_ty = result_ty.structFieldType(elem_i, zcu);
+                    const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, zcu));
                     const elem_mcv = try self.resolveInst(elem);
                     const mat_elem_mcv = switch (elem_mcv) {
                         .load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index },
@@ -18241,9 +18244,9 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
                 break :result .{ .load_frame = .{ .index = frame_index } };
             },
             .Array, .Vector => {
-                const elem_ty = result_ty.childType(mod);
-                if (result_ty.isVector(mod) and elem_ty.toIntern() == .bool_type) {
-                    const result_size: u32 = @intCast(result_ty.abiSize(pt));
+                const elem_ty = result_ty.childType(zcu);
+                if (result_ty.isVector(zcu) and elem_ty.toIntern() == .bool_type) {
+                    const result_size: u32 = @intCast(result_ty.abiSize(zcu));
                     const dst_reg = try self.register_manager.allocReg(inst, abi.RegisterClass.gp);
                     try self.asmRegisterRegister(
                         .{ ._, .xor },
@@ -18274,8 +18277,8 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
                     }
                     break :result .{ .register = dst_reg };
                 } else {
-                    const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, pt));
-                    const elem_size: u32 = @intCast(elem_ty.abiSize(pt));
+                    const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu));
+                    const elem_size: u32 = @intCast(elem_ty.abiSize(zcu));
 
                     for (elements, 0..) |elem, elem_i| {
                         const elem_mcv = try self.resolveInst(elem);
@@ -18292,7 +18295,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
                             .{},
                         );
                     }
-                    if (result_ty.sentinel(mod)) |sentinel| try self.genSetMem(
+                    if (result_ty.sentinel(zcu)) |sentinel| try self.genSetMem(
                         .{ .frame = frame_index },
                         @intCast(elem_size * elements.len),
                         elem_ty,
@@ -18318,18 +18321,18 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
     const result: MCValue = result: {
         const union_ty = self.typeOfIndex(inst);
-        const layout = union_ty.unionGetLayout(pt);
+        const layout = union_ty.unionGetLayout(zcu);
 
         const src_ty = self.typeOf(extra.init);
         const src_mcv = try self.resolveInst(extra.init);
         if (layout.tag_size == 0) {
-            if (layout.abi_size <= src_ty.abiSize(pt) and
+            if (layout.abi_size <= src_ty.abiSize(zcu) and
                 self.reuseOperand(inst, extra.init, 0, src_mcv)) break :result src_mcv;
 
             const dst_mcv = try self.allocRegOrMem(inst, true);
@@ -18339,13 +18342,13 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
 
         const dst_mcv = try self.allocRegOrMem(inst, false);
 
-        const union_obj = mod.typeToUnion(union_ty).?;
+        const union_obj = zcu.typeToUnion(union_ty).?;
         const field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
         const tag_ty = Type.fromInterned(union_obj.enum_tag_ty);
-        const field_index = tag_ty.enumFieldIndex(field_name, mod).?;
+        const field_index = tag_ty.enumFieldIndex(field_name, zcu).?;
         const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
         const tag_int_val = try tag_val.intFromEnum(tag_ty, pt);
-        const tag_int = tag_int_val.toUnsignedInt(pt);
+        const tag_int = tag_int_val.toUnsignedInt(zcu);
         const tag_off: i32 = @intCast(layout.tagOffset());
         try self.genCopy(
             tag_ty,
@@ -18369,19 +18372,19 @@ fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
     const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
     const ty = self.typeOfIndex(inst);
 
     const ops = [3]Air.Inst.Ref{ extra.lhs, extra.rhs, pl_op.operand };
     const result = result: {
-        if (switch (ty.scalarType(mod).floatBits(self.target.*)) {
+        if (switch (ty.scalarType(zcu).floatBits(self.target.*)) {
             16, 80, 128 => true,
             32, 64 => !self.hasFeature(.fma),
             else => unreachable,
         }) {
-            if (ty.zigTypeTag(mod) != .Float) return self.fail("TODO implement airMulAdd for {}", .{
+            if (ty.zigTypeTag(zcu) != .Float) return self.fail("TODO implement airMulAdd for {}", .{
                 ty.fmt(pt),
             });
 
@@ -18430,21 +18433,21 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
 
         const mir_tag = @as(?Mir.Inst.FixedTag, if (mem.eql(u2, &order, &.{ 1, 3, 2 }) or
             mem.eql(u2, &order, &.{ 3, 1, 2 }))
-            switch (ty.zigTypeTag(mod)) {
+            switch (ty.zigTypeTag(zcu)) {
                 .Float => switch (ty.floatBits(self.target.*)) {
                     32 => .{ .v_ss, .fmadd132 },
                     64 => .{ .v_sd, .fmadd132 },
                     16, 80, 128 => null,
                     else => unreachable,
                 },
-                .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
-                    .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
-                        32 => switch (ty.vectorLen(mod)) {
+                .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+                    .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+                        32 => switch (ty.vectorLen(zcu)) {
                             1 => .{ .v_ss, .fmadd132 },
                             2...8 => .{ .v_ps, .fmadd132 },
                             else => null,
                         },
-                        64 => switch (ty.vectorLen(mod)) {
+                        64 => switch (ty.vectorLen(zcu)) {
                             1 => .{ .v_sd, .fmadd132 },
                             2...4 => .{ .v_pd, .fmadd132 },
                             else => null,
@@ -18457,21 +18460,21 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
                 else => unreachable,
             }
         else if (mem.eql(u2, &order, &.{ 2, 1, 3 }) or mem.eql(u2, &order, &.{ 1, 2, 3 }))
-            switch (ty.zigTypeTag(mod)) {
+            switch (ty.zigTypeTag(zcu)) {
                 .Float => switch (ty.floatBits(self.target.*)) {
                     32 => .{ .v_ss, .fmadd213 },
                     64 => .{ .v_sd, .fmadd213 },
                     16, 80, 128 => null,
                     else => unreachable,
                 },
-                .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
-                    .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
-                        32 => switch (ty.vectorLen(mod)) {
+                .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+                    .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+                        32 => switch (ty.vectorLen(zcu)) {
                             1 => .{ .v_ss, .fmadd213 },
                             2...8 => .{ .v_ps, .fmadd213 },
                             else => null,
                         },
-                        64 => switch (ty.vectorLen(mod)) {
+                        64 => switch (ty.vectorLen(zcu)) {
                             1 => .{ .v_sd, .fmadd213 },
                             2...4 => .{ .v_pd, .fmadd213 },
                             else => null,
@@ -18484,21 +18487,21 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
                 else => unreachable,
             }
         else if (mem.eql(u2, &order, &.{ 2, 3, 1 }) or mem.eql(u2, &order, &.{ 3, 2, 1 }))
-            switch (ty.zigTypeTag(mod)) {
+            switch (ty.zigTypeTag(zcu)) {
                 .Float => switch (ty.floatBits(self.target.*)) {
                     32 => .{ .v_ss, .fmadd231 },
                     64 => .{ .v_sd, .fmadd231 },
                     16, 80, 128 => null,
                     else => unreachable,
                 },
-                .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
-                    .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
-                        32 => switch (ty.vectorLen(mod)) {
+                .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+                    .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+                        32 => switch (ty.vectorLen(zcu)) {
                             1 => .{ .v_ss, .fmadd231 },
                             2...8 => .{ .v_ps, .fmadd231 },
                             else => null,
                         },
-                        64 => switch (ty.vectorLen(mod)) {
+                        64 => switch (ty.vectorLen(zcu)) {
                             1 => .{ .v_sd, .fmadd231 },
                             2...4 => .{ .v_pd, .fmadd231 },
                             else => null,
@@ -18516,7 +18519,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
         var mops: [3]MCValue = undefined;
         for (order, mcvs) |mop_index, mcv| mops[mop_index - 1] = mcv;
 
-        const abi_size: u32 = @intCast(ty.abiSize(pt));
+        const abi_size: u32 = @intCast(ty.abiSize(zcu));
         const mop1_reg = registerAlias(mops[0].getReg().?, abi_size);
         const mop2_reg = registerAlias(mops[1].getReg().?, abi_size);
         if (mops[2].isRegister()) try self.asmRegisterRegisterRegister(
@@ -18537,17 +18540,17 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const va_list_ty = self.air.instructions.items(.data)[@intFromEnum(inst)].ty;
     const ptr_anyopaque_ty = try pt.singleMutPtrType(Type.anyopaque);
 
     const result: MCValue = switch (abi.resolveCallingConvention(
-        self.fn_type.fnCallingConvention(mod),
+        self.fn_type.fnCallingConvention(zcu),
         self.target.*,
     )) {
         .SysV => result: {
             const info = self.va_info.sysv;
-            const dst_fi = try self.allocFrameIndex(FrameAlloc.initSpill(va_list_ty, pt));
+            const dst_fi = try self.allocFrameIndex(FrameAlloc.initSpill(va_list_ty, zcu));
             var field_off: u31 = 0;
             // gp_offset: c_uint,
             try self.genSetMem(
@@ -18557,7 +18560,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
                 .{ .immediate = info.gp_count * 8 },
                 .{},
             );
-            field_off += @intCast(Type.c_uint.abiSize(pt));
+            field_off += @intCast(Type.c_uint.abiSize(zcu));
             // fp_offset: c_uint,
             try self.genSetMem(
                 .{ .frame = dst_fi },
@@ -18566,7 +18569,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
                 .{ .immediate = abi.SysV.c_abi_int_param_regs.len * 8 + info.fp_count * 16 },
                 .{},
             );
-            field_off += @intCast(Type.c_uint.abiSize(pt));
+            field_off += @intCast(Type.c_uint.abiSize(zcu));
             // overflow_arg_area: *anyopaque,
             try self.genSetMem(
                 .{ .frame = dst_fi },
@@ -18575,7 +18578,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
                 .{ .lea_frame = info.overflow_arg_area },
                 .{},
             );
-            field_off += @intCast(ptr_anyopaque_ty.abiSize(pt));
+            field_off += @intCast(ptr_anyopaque_ty.abiSize(zcu));
             // reg_save_area: *anyopaque,
             try self.genSetMem(
                 .{ .frame = dst_fi },
@@ -18584,7 +18587,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
                 .{ .lea_frame = info.reg_save_area },
                 .{},
             );
-            field_off += @intCast(ptr_anyopaque_ty.abiSize(pt));
+            field_off += @intCast(ptr_anyopaque_ty.abiSize(zcu));
             break :result .{ .load_frame = .{ .index = dst_fi } };
         },
         .Win64 => return self.fail("TODO implement c_va_start for Win64", .{}),
@@ -18595,7 +18598,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
     const ty = self.typeOfIndex(inst);
     const promote_ty = self.promoteVarArg(ty);
@@ -18603,7 +18606,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
     const unused = self.liveness.isUnused(inst);
 
     const result: MCValue = switch (abi.resolveCallingConvention(
-        self.fn_type.fnCallingConvention(mod),
+        self.fn_type.fnCallingConvention(zcu),
         self.target.*,
     )) {
         .SysV => result: {
@@ -18633,7 +18636,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
             const overflow_arg_area: MCValue = .{ .indirect = .{ .reg = ptr_arg_list_reg, .off = 8 } };
             const reg_save_area: MCValue = .{ .indirect = .{ .reg = ptr_arg_list_reg, .off = 16 } };
 
-            const classes = mem.sliceTo(&abi.classifySystemV(promote_ty, pt, self.target.*, .arg), .none);
+            const classes = mem.sliceTo(&abi.classifySystemV(promote_ty, zcu, self.target.*, .arg), .none);
             switch (classes[0]) {
                 .integer => {
                     assert(classes.len == 1);
@@ -18668,7 +18671,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
                         .base = .{ .reg = addr_reg },
                         .mod = .{ .rm = .{
                             .size = .qword,
-                            .disp = @intCast(@max(promote_ty.abiSize(pt), 8)),
+                            .disp = @intCast(@max(promote_ty.abiSize(zcu), 8)),
                         } },
                     });
                     try self.genCopy(
@@ -18716,7 +18719,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
                         .base = .{ .reg = addr_reg },
                         .mod = .{ .rm = .{
                             .size = .qword,
-                            .disp = @intCast(@max(promote_ty.abiSize(pt), 8)),
+                            .disp = @intCast(@max(promote_ty.abiSize(zcu), 8)),
                         } },
                     });
                     try self.genCopy(
@@ -18806,11 +18809,11 @@ fn airVaEnd(self: *Self, inst: Air.Inst.Index) !void {
 }
 
 fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
-    const pt = self.pt;
+    const zcu = self.pt.zcu;
     const ty = self.typeOf(ref);
 
     // If the type has no codegen bits, no need to store it.
-    if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return .none;
+    if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) return .none;
 
     const mcv = if (ref.toIndex()) |inst| mcv: {
         break :mcv self.inst_tracking.getPtr(inst).?.short;
@@ -18927,8 +18930,8 @@ fn resolveCallingConventionValues(
     stack_frame_base: FrameIndex,
 ) !CallMCValues {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const cc = fn_info.cc;
     const param_types = try self.gpa.alloc(Type, fn_info.param_types.len + var_args.len);
     defer self.gpa.free(param_types);
@@ -18970,15 +18973,15 @@ fn resolveCallingConventionValues(
                 .SysV => {},
                 .Win64 => {
                     // Align the stack to 16bytes before allocating shadow stack space (if any).
-                    result.stack_byte_count += @intCast(4 * Type.usize.abiSize(pt));
+                    result.stack_byte_count += @intCast(4 * Type.usize.abiSize(zcu));
                 },
                 else => unreachable,
             }
 
             // Return values
-            if (ret_ty.zigTypeTag(mod) == .NoReturn) {
+            if (ret_ty.zigTypeTag(zcu) == .NoReturn) {
                 result.return_value = InstTracking.init(.unreach);
-            } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+            } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                 // TODO: is this even possible for C calling convention?
                 result.return_value = InstTracking.init(.none);
             } else {
@@ -18986,15 +18989,15 @@ fn resolveCallingConventionValues(
                 var ret_tracking_i: usize = 0;
 
                 const classes = switch (resolved_cc) {
-                    .SysV => mem.sliceTo(&abi.classifySystemV(ret_ty, pt, self.target.*, .ret), .none),
-                    .Win64 => &.{abi.classifyWindows(ret_ty, pt)},
+                    .SysV => mem.sliceTo(&abi.classifySystemV(ret_ty, zcu, self.target.*, .ret), .none),
+                    .Win64 => &.{abi.classifyWindows(ret_ty, zcu)},
                     else => unreachable,
                 };
                 for (classes) |class| switch (class) {
                     .integer => {
                         const ret_int_reg = registerAlias(
                             abi.getCAbiIntReturnRegs(resolved_cc)[ret_int_reg_i],
-                            @intCast(@min(ret_ty.abiSize(pt), 8)),
+                            @intCast(@min(ret_ty.abiSize(zcu), 8)),
                         );
                         ret_int_reg_i += 1;
 
@@ -19004,7 +19007,7 @@ fn resolveCallingConventionValues(
                     .sse, .float, .float_combine, .win_i128 => {
                         const ret_sse_reg = registerAlias(
                             abi.getCAbiSseReturnRegs(resolved_cc)[ret_sse_reg_i],
-                            @intCast(ret_ty.abiSize(pt)),
+                            @intCast(ret_ty.abiSize(zcu)),
                         );
                         ret_sse_reg_i += 1;
 
@@ -19047,7 +19050,7 @@ fn resolveCallingConventionValues(
 
             // Input params
             for (param_types, result.args) |ty, *arg| {
-                assert(ty.hasRuntimeBitsIgnoreComptime(pt));
+                assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
                 switch (resolved_cc) {
                     .SysV => {},
                     .Win64 => {
@@ -19061,8 +19064,8 @@ fn resolveCallingConventionValues(
                 var arg_mcv_i: usize = 0;
 
                 const classes = switch (resolved_cc) {
-                    .SysV => mem.sliceTo(&abi.classifySystemV(ty, pt, self.target.*, .arg), .none),
-                    .Win64 => &.{abi.classifyWindows(ty, pt)},
+                    .SysV => mem.sliceTo(&abi.classifySystemV(ty, zcu, self.target.*, .arg), .none),
+                    .Win64 => &.{abi.classifyWindows(ty, zcu)},
                     else => unreachable,
                 };
                 for (classes) |class| switch (class) {
@@ -19072,7 +19075,7 @@ fn resolveCallingConventionValues(
 
                         const param_int_reg = registerAlias(
                             abi.getCAbiIntParamRegs(resolved_cc)[param_int_reg_i],
-                            @intCast(@min(ty.abiSize(pt), 8)),
+                            @intCast(@min(ty.abiSize(zcu), 8)),
                         );
                         param_int_reg_i += 1;
 
@@ -19085,7 +19088,7 @@ fn resolveCallingConventionValues(
 
                         const param_sse_reg = registerAlias(
                             abi.getCAbiSseParamRegs(resolved_cc)[param_sse_reg_i],
-                            @intCast(ty.abiSize(pt)),
+                            @intCast(ty.abiSize(zcu)),
                         );
                         param_sse_reg_i += 1;
 
@@ -19098,7 +19101,7 @@ fn resolveCallingConventionValues(
                             .x87, .x87up, .complex_x87, .memory => break,
                             else => unreachable,
                         },
-                        .Win64 => if (ty.abiSize(pt) > 8) {
+                        .Win64 => if (ty.abiSize(zcu) > 8) {
                             const param_int_reg =
                                 abi.getCAbiIntParamRegs(resolved_cc)[param_int_reg_i].to64();
                             param_int_reg_i += 1;
@@ -19117,10 +19120,10 @@ fn resolveCallingConventionValues(
                         param_int_reg_i = param_int_regs_len;
 
                         const frame_elem_align = 8;
-                        const frame_elems_len = ty.vectorLen(mod) - remaining_param_int_regs;
+                        const frame_elems_len = ty.vectorLen(zcu) - remaining_param_int_regs;
                         const frame_elem_size = mem.alignForward(
                             u64,
-                            ty.childType(mod).abiSize(pt),
+                            ty.childType(zcu).abiSize(zcu),
                             frame_elem_align,
                         );
                         const frame_size: u31 = @intCast(frame_elems_len * frame_elem_size);
@@ -19144,9 +19147,9 @@ fn resolveCallingConventionValues(
                     continue;
                 }
 
-                const param_size: u31 = @intCast(ty.abiSize(pt));
+                const param_size: u31 = @intCast(ty.abiSize(zcu));
                 const param_align: u31 =
-                    @intCast(@max(ty.abiAlignment(pt).toByteUnits().?, 8));
+                    @intCast(@max(ty.abiAlignment(zcu).toByteUnits().?, 8));
                 result.stack_byte_count =
                     mem.alignForward(u31, result.stack_byte_count, param_align);
                 arg.* = .{ .load_frame = .{
@@ -19164,13 +19167,13 @@ fn resolveCallingConventionValues(
             result.stack_align = .@"16";
 
             // Return values
-            if (ret_ty.zigTypeTag(mod) == .NoReturn) {
+            if (ret_ty.zigTypeTag(zcu) == .NoReturn) {
                 result.return_value = InstTracking.init(.unreach);
-            } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+            } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                 result.return_value = InstTracking.init(.none);
             } else {
                 const ret_reg = abi.getCAbiIntReturnRegs(resolved_cc)[0];
-                const ret_ty_size: u31 = @intCast(ret_ty.abiSize(pt));
+                const ret_ty_size: u31 = @intCast(ret_ty.abiSize(zcu));
                 if (ret_ty_size <= 8 and !ret_ty.isRuntimeFloat()) {
                     const aliased_reg = registerAlias(ret_reg, ret_ty_size);
                     result.return_value = .{ .short = .{ .register = aliased_reg }, .long = .none };
@@ -19185,12 +19188,12 @@ fn resolveCallingConventionValues(
 
             // Input params
             for (param_types, result.args) |ty, *arg| {
-                if (!ty.hasRuntimeBitsIgnoreComptime(pt)) {
+                if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                     arg.* = .none;
                     continue;
                 }
-                const param_size: u31 = @intCast(ty.abiSize(pt));
-                const param_align: u31 = @intCast(ty.abiAlignment(pt).toByteUnits().?);
+                const param_size: u31 = @intCast(ty.abiSize(zcu));
+                const param_align: u31 = @intCast(ty.abiAlignment(zcu).toByteUnits().?);
                 result.stack_byte_count =
                     mem.alignForward(u31, result.stack_byte_count, param_align);
                 arg.* = .{ .load_frame = .{
@@ -19276,25 +19279,26 @@ fn registerAlias(reg: Register, size_bytes: u32) Register {
 
 fn memSize(self: *Self, ty: Type) Memory.Size {
     const pt = self.pt;
-    const mod = pt.zcu;
-    return switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    return switch (ty.zigTypeTag(zcu)) {
         .Float => Memory.Size.fromBitSize(ty.floatBits(self.target.*)),
-        else => Memory.Size.fromSize(@intCast(ty.abiSize(pt))),
+        else => Memory.Size.fromSize(@intCast(ty.abiSize(zcu))),
     };
 }
 
 fn splitType(self: *Self, ty: Type) ![2]Type {
     const pt = self.pt;
-    const classes = mem.sliceTo(&abi.classifySystemV(ty, pt, self.target.*, .other), .none);
+    const zcu = pt.zcu;
+    const classes = mem.sliceTo(&abi.classifySystemV(ty, zcu, self.target.*, .other), .none);
     var parts: [2]Type = undefined;
     if (classes.len == 2) for (&parts, classes, 0..) |*part, class, part_i| {
         part.* = switch (class) {
             .integer => switch (part_i) {
                 0 => Type.u64,
                 1 => part: {
-                    const elem_size = ty.abiAlignment(pt).minStrict(.@"8").toByteUnits().?;
+                    const elem_size = ty.abiAlignment(zcu).minStrict(.@"8").toByteUnits().?;
                     const elem_ty = try pt.intType(.unsigned, @intCast(elem_size * 8));
-                    break :part switch (@divExact(ty.abiSize(pt) - 8, elem_size)) {
+                    break :part switch (@divExact(ty.abiSize(zcu) - 8, elem_size)) {
                         1 => elem_ty,
                         else => |len| try pt.arrayType(.{ .len = len, .child = elem_ty.toIntern() }),
                     };
@@ -19306,7 +19310,7 @@ fn splitType(self: *Self, ty: Type) ![2]Type {
             .sse => Type.f64,
             else => break,
         };
-    } else if (parts[0].abiSize(pt) + parts[1].abiSize(pt) == ty.abiSize(pt)) return parts;
+    } else if (parts[0].abiSize(zcu) + parts[1].abiSize(zcu) == ty.abiSize(zcu)) return parts;
     return self.fail("TODO implement splitType for {}", .{ty.fmt(pt)});
 }
 
@@ -19314,10 +19318,10 @@ fn splitType(self: *Self, ty: Type) ![2]Type {
 /// Clobbers any remaining bits.
 fn truncateRegister(self: *Self, ty: Type, reg: Register) !void {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{
+    const zcu = pt.zcu;
+    const int_info = if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else std.builtin.Type.Int{
         .signedness = .unsigned,
-        .bits = @intCast(ty.bitSize(pt)),
+        .bits = @intCast(ty.bitSize(zcu)),
     };
     const shift = math.cast(u6, 64 - int_info.bits % 64) orelse return;
     try self.spillEflagsIfOccupied();
@@ -19362,9 +19366,9 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void {
 
 fn regBitSize(self: *Self, ty: Type) u64 {
     const pt = self.pt;
-    const mod = pt.zcu;
-    const abi_size = ty.abiSize(pt);
-    return switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    const abi_size = ty.abiSize(zcu);
+    return switch (ty.zigTypeTag(zcu)) {
         else => switch (abi_size) {
             1 => 8,
             2 => 16,
@@ -19381,7 +19385,7 @@ fn regBitSize(self: *Self, ty: Type) u64 {
 }
 
 fn regExtraBits(self: *Self, ty: Type) u64 {
-    return self.regBitSize(ty) - ty.bitSize(self.pt);
+    return self.regBitSize(ty) - ty.bitSize(self.pt.zcu);
 }
 
 fn hasFeature(self: *Self, feature: Target.x86.Feature) bool {
@@ -19396,14 +19400,14 @@ fn hasAllFeatures(self: *Self, features: anytype) bool {
 
 fn typeOf(self: *Self, inst: Air.Inst.Ref) Type {
     const pt = self.pt;
-    const mod = pt.zcu;
-    return self.air.typeOf(inst, &mod.intern_pool);
+    const zcu = pt.zcu;
+    return self.air.typeOf(inst, &zcu.intern_pool);
 }
 
 fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type {
     const pt = self.pt;
-    const mod = pt.zcu;
-    return self.air.typeOfIndex(inst, &mod.intern_pool);
+    const zcu = pt.zcu;
+    return self.air.typeOfIndex(inst, &zcu.intern_pool);
 }
 
 fn intCompilerRtAbiName(int_bits: u32) u8 {
@@ -19455,17 +19459,17 @@ fn floatLibcAbiSuffix(ty: Type) []const u8 {
 
 fn promoteInt(self: *Self, ty: Type) Type {
     const pt = self.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const int_info: InternPool.Key.IntType = switch (ty.toIntern()) {
         .bool_type => .{ .signedness = .unsigned, .bits = 1 },
-        else => if (ty.isAbiInt(mod)) ty.intInfo(mod) else return ty,
+        else => if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else return ty,
     };
     for ([_]Type{
         Type.c_int,      Type.c_uint,
         Type.c_long,     Type.c_ulong,
         Type.c_longlong, Type.c_ulonglong,
     }) |promote_ty| {
-        const promote_info = promote_ty.intInfo(mod);
+        const promote_info = promote_ty.intInfo(zcu);
         if (int_info.signedness == .signed and promote_info.signedness == .unsigned) continue;
         if (int_info.bits + @intFromBool(int_info.signedness == .unsigned and
             promote_info.signedness == .signed) <= promote_info.bits) return promote_ty;
src/codegen/c/Type.zig
@@ -1344,6 +1344,7 @@ pub const Pool = struct {
         kind: Kind,
     ) !CType {
         const ip = &pt.zcu.intern_pool;
+        const zcu = pt.zcu;
         switch (ty.toIntern()) {
             .u0_type,
             .i0_type,
@@ -1476,7 +1477,7 @@ pub const Pool = struct {
                                 ),
                                 .alignas = AlignAs.fromAlignment(.{
                                     .@"align" = ptr_info.flags.alignment,
-                                    .abi = Type.fromInterned(ptr_info.child).abiAlignment(pt),
+                                    .abi = Type.fromInterned(ptr_info.child).abiAlignment(zcu),
                                 }),
                             };
                             break :elem_ctype if (elem.alignas.abiOrder().compare(.gte))
@@ -1552,7 +1553,7 @@ pub const Pool = struct {
                         .{
                             .name = .{ .index = .array },
                             .ctype = array_ctype,
-                            .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(pt)),
+                            .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)),
                         },
                     };
                     return pool.fromFields(allocator, .@"struct", &fields, kind);
@@ -1578,7 +1579,7 @@ pub const Pool = struct {
                         .{
                             .name = .{ .index = .array },
                             .ctype = vector_ctype,
-                            .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(pt)),
+                            .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)),
                         },
                     };
                     return pool.fromFields(allocator, .@"struct", &fields, kind);
@@ -1613,7 +1614,7 @@ pub const Pool = struct {
                             .name = .{ .index = .payload },
                             .ctype = payload_ctype,
                             .alignas = AlignAs.fromAbiAlignment(
-                                Type.fromInterned(payload_type).abiAlignment(pt),
+                                Type.fromInterned(payload_type).abiAlignment(zcu),
                             ),
                         },
                     };
@@ -1649,7 +1650,7 @@ pub const Pool = struct {
                         .{
                             .name = .{ .index = .payload },
                             .ctype = payload_ctype,
-                            .alignas = AlignAs.fromAbiAlignment(payload_type.abiAlignment(pt)),
+                            .alignas = AlignAs.fromAbiAlignment(payload_type.abiAlignment(zcu)),
                         },
                     };
                     return pool.fromFields(allocator, .@"struct", &fields, kind);
@@ -1663,7 +1664,7 @@ pub const Pool = struct {
                                 .tag = .@"struct",
                                 .name = .{ .index = ip_index },
                             });
-                            if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(pt))
+                            if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(zcu))
                                 fwd_decl
                             else
                                 CType.void;
@@ -1696,7 +1697,7 @@ pub const Pool = struct {
                                     String.fromUnnamed(@intCast(field_index));
                                 const field_alignas = AlignAs.fromAlignment(.{
                                     .@"align" = loaded_struct.fieldAlign(ip, field_index),
-                                    .abi = field_type.abiAlignment(pt),
+                                    .abi = field_type.abiAlignment(zcu),
                                 });
                                 pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{
                                     .name = field_name.index,
@@ -1758,7 +1759,7 @@ pub const Pool = struct {
                             .name = field_name.index,
                             .ctype = field_ctype.index,
                             .flags = .{ .alignas = AlignAs.fromAbiAlignment(
-                                field_type.abiAlignment(pt),
+                                field_type.abiAlignment(zcu),
                             ) },
                         });
                     }
@@ -1802,7 +1803,7 @@ pub const Pool = struct {
                                 .tag = if (has_tag) .@"struct" else .@"union",
                                 .name = .{ .index = ip_index },
                             });
-                            if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(pt))
+                            if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(zcu))
                                 fwd_decl
                             else
                                 CType.void;
@@ -1836,7 +1837,7 @@ pub const Pool = struct {
                                 );
                                 const field_alignas = AlignAs.fromAlignment(.{
                                     .@"align" = loaded_union.fieldAlign(ip, field_index),
-                                    .abi = field_type.abiAlignment(pt),
+                                    .abi = field_type.abiAlignment(zcu),
                                 });
                                 pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{
                                     .name = field_name.index,
@@ -1881,7 +1882,7 @@ pub const Pool = struct {
                                     struct_fields[struct_fields_len] = .{
                                         .name = .{ .index = .tag },
                                         .ctype = tag_ctype,
-                                        .alignas = AlignAs.fromAbiAlignment(tag_type.abiAlignment(pt)),
+                                        .alignas = AlignAs.fromAbiAlignment(tag_type.abiAlignment(zcu)),
                                     };
                                     struct_fields_len += 1;
                                 }
@@ -1929,7 +1930,7 @@ pub const Pool = struct {
                         },
                         .@"packed" => return pool.fromIntInfo(allocator, .{
                             .signedness = .unsigned,
-                            .bits = @intCast(ty.bitSize(pt)),
+                            .bits = @intCast(ty.bitSize(zcu)),
                         }, mod, kind),
                     }
                 },
src/codegen/c.zig
@@ -334,7 +334,7 @@ pub const Function = struct {
             const writer = f.object.codeHeaderWriter();
             const decl_c_value = try f.allocLocalValue(.{
                 .ctype = try f.ctypeFromType(ty, .complete),
-                .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(pt)),
+                .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(pt.zcu)),
             });
             const gpa = f.object.dg.gpa;
             try f.allocs.put(gpa, decl_c_value.new_local, false);
@@ -372,7 +372,7 @@ pub const Function = struct {
     fn allocLocal(f: *Function, inst: ?Air.Inst.Index, ty: Type) !CValue {
         return f.allocAlignedLocal(inst, .{
             .ctype = try f.ctypeFromType(ty, .complete),
-            .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(f.object.dg.pt)),
+            .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(f.object.dg.pt.zcu)),
         });
     }
 
@@ -648,7 +648,7 @@ pub const DeclGen = struct {
 
         // Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
         const ptr_ty = Type.fromInterned(uav.orig_ty);
-        if (ptr_ty.isPtrAtRuntime(zcu) and !uav_ty.isFnOrHasRuntimeBits(pt)) {
+        if (ptr_ty.isPtrAtRuntime(zcu) and !uav_ty.isFnOrHasRuntimeBits(zcu)) {
             return dg.writeCValue(writer, .{ .undef = ptr_ty });
         }
 
@@ -688,7 +688,7 @@ pub const DeclGen = struct {
         // alignment. If there is already an entry, keep the greater alignment.
         const explicit_alignment = ptr_type.flags.alignment;
         if (explicit_alignment != .none) {
-            const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(pt);
+            const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(zcu);
             if (explicit_alignment.order(abi_alignment).compare(.gt)) {
                 const aligned_gop = try dg.aligned_uavs.getOrPut(dg.gpa, uav.val);
                 aligned_gop.value_ptr.* = if (aligned_gop.found_existing)
@@ -722,7 +722,7 @@ pub const DeclGen = struct {
         // Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
         const nav_ty = Type.fromInterned(ip.getNav(owner_nav).typeOf(ip));
         const ptr_ty = try pt.navPtrType(owner_nav);
-        if (!nav_ty.isFnOrHasRuntimeBits(pt)) {
+        if (!nav_ty.isFnOrHasRuntimeBits(zcu)) {
             return dg.writeCValue(writer, .{ .undef = ptr_ty });
         }
 
@@ -805,7 +805,7 @@ pub const DeclGen = struct {
                 }
             },
 
-            .elem_ptr => |elem| if (!(try elem.parent.ptrType(pt)).childType(zcu).hasRuntimeBits(pt)) {
+            .elem_ptr => |elem| if (!(try elem.parent.ptrType(pt)).childType(zcu).hasRuntimeBits(zcu)) {
                 // Element type is zero-bit, so lowers to `void`. The index is irrelevant; just cast the pointer.
                 const ptr_ctype = try dg.ctypeFromType(elem.result_ptr_ty, .complete);
                 try writer.writeByte('(');
@@ -923,7 +923,7 @@ pub const DeclGen = struct {
                     try writer.writeAll("((");
                     try dg.renderCType(writer, ctype);
                     try writer.print("){x})", .{try dg.fmtIntLiteral(
-                        try pt.intValue(Type.usize, val.toUnsignedInt(pt)),
+                        try pt.intValue(Type.usize, val.toUnsignedInt(zcu)),
                         .Other,
                     )});
                 },
@@ -970,7 +970,7 @@ pub const DeclGen = struct {
             .enum_tag => |enum_tag| try dg.renderValue(writer, Value.fromInterned(enum_tag.int), location),
             .float => {
                 const bits = ty.floatBits(target.*);
-                const f128_val = val.toFloat(f128, pt);
+                const f128_val = val.toFloat(f128, zcu);
 
                 // All unsigned ints matching float types are pre-allocated.
                 const repr_ty = pt.intType(.unsigned, bits) catch unreachable;
@@ -984,10 +984,10 @@ pub const DeclGen = struct {
                 };
 
                 switch (bits) {
-                    16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, pt)))),
-                    32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, pt)))),
-                    64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, pt)))),
-                    80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, pt)))),
+                    16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, zcu)))),
+                    32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, zcu)))),
+                    64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, zcu)))),
+                    80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, zcu)))),
                     128 => repr_val_big.set(@as(u128, @bitCast(f128_val))),
                     else => unreachable,
                 }
@@ -998,10 +998,10 @@ pub const DeclGen = struct {
                     try dg.renderTypeForBuiltinFnName(writer, ty);
                     try writer.writeByte('(');
                     switch (bits) {
-                        16 => try writer.print("{x}", .{val.toFloat(f16, pt)}),
-                        32 => try writer.print("{x}", .{val.toFloat(f32, pt)}),
-                        64 => try writer.print("{x}", .{val.toFloat(f64, pt)}),
-                        80 => try writer.print("{x}", .{val.toFloat(f80, pt)}),
+                        16 => try writer.print("{x}", .{val.toFloat(f16, zcu)}),
+                        32 => try writer.print("{x}", .{val.toFloat(f32, zcu)}),
+                        64 => try writer.print("{x}", .{val.toFloat(f64, zcu)}),
+                        80 => try writer.print("{x}", .{val.toFloat(f80, zcu)}),
                         128 => try writer.print("{x}", .{f128_val}),
                         else => unreachable,
                     }
@@ -1041,10 +1041,10 @@ pub const DeclGen = struct {
                     if (std.math.isNan(f128_val)) switch (bits) {
                         // We only actually need to pass the significand, but it will get
                         // properly masked anyway, so just pass the whole value.
-                        16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, pt)))}),
-                        32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, pt)))}),
-                        64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, pt)))}),
-                        80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, pt)))}),
+                        16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, zcu)))}),
+                        32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, zcu)))}),
+                        64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, zcu)))}),
+                        80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, zcu)))}),
                         128 => try writer.print("\"0x{x}\"", .{@as(u128, @bitCast(f128_val))}),
                         else => unreachable,
                     };
@@ -1167,11 +1167,11 @@ pub const DeclGen = struct {
                             const elem_val_u8: u8 = if (elem_val.isUndef(zcu))
                                 undefPattern(u8)
                             else
-                                @intCast(elem_val.toUnsignedInt(pt));
+                                @intCast(elem_val.toUnsignedInt(zcu));
                             try literal.writeChar(elem_val_u8);
                         }
                         if (ai.sentinel) |s| {
-                            const s_u8: u8 = @intCast(s.toUnsignedInt(pt));
+                            const s_u8: u8 = @intCast(s.toUnsignedInt(zcu));
                             if (s_u8 != 0) try literal.writeChar(s_u8);
                         }
                         try literal.end();
@@ -1203,7 +1203,7 @@ pub const DeclGen = struct {
                         const comptime_val = tuple.values.get(ip)[field_index];
                         if (comptime_val != .none) continue;
                         const field_ty = Type.fromInterned(tuple.types.get(ip)[field_index]);
-                        if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+                        if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
 
                         if (!empty) try writer.writeByte(',');
 
@@ -1238,7 +1238,7 @@ pub const DeclGen = struct {
                             var need_comma = false;
                             while (field_it.next()) |field_index| {
                                 const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
-                                if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+                                if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
 
                                 if (need_comma) try writer.writeByte(',');
                                 need_comma = true;
@@ -1265,7 +1265,7 @@ pub const DeclGen = struct {
 
                             for (0..loaded_struct.field_types.len) |field_index| {
                                 const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
-                                if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+                                if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
                                 eff_num_fields += 1;
                             }
 
@@ -1273,7 +1273,7 @@ pub const DeclGen = struct {
                                 try writer.writeByte('(');
                                 try dg.renderUndefValue(writer, ty, location);
                                 try writer.writeByte(')');
-                            } else if (ty.bitSize(pt) > 64) {
+                            } else if (ty.bitSize(zcu) > 64) {
                                 // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
                                 var num_or = eff_num_fields - 1;
                                 while (num_or > 0) : (num_or -= 1) {
@@ -1286,7 +1286,7 @@ pub const DeclGen = struct {
                                 var needs_closing_paren = false;
                                 for (0..loaded_struct.field_types.len) |field_index| {
                                     const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
-                                    if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+                                    if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
 
                                     const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
                                         .bytes => |bytes| try pt.intern(.{ .int = .{
@@ -1312,7 +1312,7 @@ pub const DeclGen = struct {
                                     if (needs_closing_paren) try writer.writeByte(')');
                                     if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
 
-                                    bit_offset += field_ty.bitSize(pt);
+                                    bit_offset += field_ty.bitSize(zcu);
                                     needs_closing_paren = true;
                                     eff_index += 1;
                                 }
@@ -1322,7 +1322,7 @@ pub const DeclGen = struct {
                                 var empty = true;
                                 for (0..loaded_struct.field_types.len) |field_index| {
                                     const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
-                                    if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+                                    if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
 
                                     if (!empty) try writer.writeAll(" | ");
                                     try writer.writeByte('(');
@@ -1346,7 +1346,7 @@ pub const DeclGen = struct {
                                         try dg.renderValue(writer, Value.fromInterned(field_val), .Other);
                                     }
 
-                                    bit_offset += field_ty.bitSize(pt);
+                                    bit_offset += field_ty.bitSize(zcu);
                                     empty = false;
                                 }
                                 try writer.writeByte(')');
@@ -1396,7 +1396,7 @@ pub const DeclGen = struct {
                     const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
                     const field_name = loaded_union.loadTagType(ip).names.get(ip)[field_index];
                     if (loaded_union.flagsUnordered(ip).layout == .@"packed") {
-                        if (field_ty.hasRuntimeBits(pt)) {
+                        if (field_ty.hasRuntimeBits(zcu)) {
                             if (field_ty.isPtrAtRuntime(zcu)) {
                                 try writer.writeByte('(');
                                 try dg.renderCType(writer, ctype);
@@ -1427,7 +1427,7 @@ pub const DeclGen = struct {
                             ),
                             .payload => {
                                 try writer.writeByte('{');
-                                if (field_ty.hasRuntimeBits(pt)) {
+                                if (field_ty.hasRuntimeBits(zcu)) {
                                     try writer.print(" .{ } = ", .{fmtIdent(field_name.toSlice(ip))});
                                     try dg.renderValue(
                                         writer,
@@ -1439,7 +1439,7 @@ pub const DeclGen = struct {
                                     const inner_field_ty = Type.fromInterned(
                                         loaded_union.field_types.get(ip)[inner_field_index],
                                     );
-                                    if (!inner_field_ty.hasRuntimeBits(pt)) continue;
+                                    if (!inner_field_ty.hasRuntimeBits(zcu)) continue;
                                     try dg.renderUndefValue(writer, inner_field_ty, initializer_type);
                                     break;
                                 }
@@ -1588,7 +1588,7 @@ pub const DeclGen = struct {
                             var need_comma = false;
                             while (field_it.next()) |field_index| {
                                 const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
-                                if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+                                if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
 
                                 if (need_comma) try writer.writeByte(',');
                                 need_comma = true;
@@ -1613,7 +1613,7 @@ pub const DeclGen = struct {
                     for (0..anon_struct_info.types.len) |field_index| {
                         if (anon_struct_info.values.get(ip)[field_index] != .none) continue;
                         const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]);
-                        if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+                        if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
 
                         if (need_comma) try writer.writeByte(',');
                         need_comma = true;
@@ -1651,7 +1651,7 @@ pub const DeclGen = struct {
                                             const inner_field_ty = Type.fromInterned(
                                                 loaded_union.field_types.get(ip)[inner_field_index],
                                             );
-                                            if (!inner_field_ty.hasRuntimeBits(pt)) continue;
+                                            if (!inner_field_ty.hasRuntimeBits(pt.zcu)) continue;
                                             try dg.renderUndefValue(
                                                 writer,
                                                 inner_field_ty,
@@ -1902,7 +1902,8 @@ pub const DeclGen = struct {
     };
     fn intCastIsNoop(dg: *DeclGen, dest_ty: Type, src_ty: Type) bool {
         const pt = dg.pt;
-        const dest_bits = dest_ty.bitSize(pt);
+        const zcu = pt.zcu;
+        const dest_bits = dest_ty.bitSize(zcu);
         const dest_int_info = dest_ty.intInfo(pt.zcu);
 
         const src_is_ptr = src_ty.isPtrAtRuntime(pt.zcu);
@@ -1911,7 +1912,7 @@ pub const DeclGen = struct {
             .signed => Type.isize,
         } else src_ty;
 
-        const src_bits = src_eff_ty.bitSize(pt);
+        const src_bits = src_eff_ty.bitSize(zcu);
         const src_int_info = if (src_eff_ty.isAbiInt(pt.zcu)) src_eff_ty.intInfo(pt.zcu) else null;
         if (dest_bits <= 64 and src_bits <= 64) {
             const needs_cast = src_int_info == null or
@@ -1943,7 +1944,7 @@ pub const DeclGen = struct {
     ) !void {
         const pt = dg.pt;
         const zcu = pt.zcu;
-        const dest_bits = dest_ty.bitSize(pt);
+        const dest_bits = dest_ty.bitSize(zcu);
         const dest_int_info = dest_ty.intInfo(zcu);
 
         const src_is_ptr = src_ty.isPtrAtRuntime(zcu);
@@ -1952,7 +1953,7 @@ pub const DeclGen = struct {
             .signed => Type.isize,
         } else src_ty;
 
-        const src_bits = src_eff_ty.bitSize(pt);
+        const src_bits = src_eff_ty.bitSize(zcu);
         const src_int_info = if (src_eff_ty.isAbiInt(zcu)) src_eff_ty.intInfo(zcu) else null;
         if (dest_bits <= 64 and src_bits <= 64) {
             const needs_cast = src_int_info == null or
@@ -2033,7 +2034,7 @@ pub const DeclGen = struct {
             qualifiers,
             CType.AlignAs.fromAlignment(.{
                 .@"align" = alignment,
-                .abi = ty.abiAlignment(dg.pt),
+                .abi = ty.abiAlignment(dg.pt.zcu),
             }),
         );
     }
@@ -2239,9 +2240,10 @@ pub const DeclGen = struct {
         }
 
         const pt = dg.pt;
-        const int_info = if (ty.isAbiInt(pt.zcu)) ty.intInfo(pt.zcu) else std.builtin.Type.Int{
+        const zcu = pt.zcu;
+        const int_info = if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else std.builtin.Type.Int{
             .signedness = .unsigned,
-            .bits = @as(u16, @intCast(ty.bitSize(pt))),
+            .bits = @as(u16, @intCast(ty.bitSize(zcu))),
         };
 
         if (is_big) try writer.print(", {}", .{int_info.signedness == .signed});
@@ -2891,7 +2893,7 @@ pub fn genDecl(o: *Object) !void {
     const nav = ip.getNav(o.dg.pass.nav);
     const nav_ty = Type.fromInterned(nav.typeOf(ip));
 
-    if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return;
+    if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return;
     switch (ip.indexToKey(nav.status.resolved.val)) {
         .@"extern" => |@"extern"| {
             if (!ip.isFunctionType(nav_ty.toIntern())) return o.dg.renderFwdDecl(o.dg.pass.nav, .{
@@ -3420,10 +3422,10 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [
 }
 
 fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
-    const pt = f.object.dg.pt;
+    const zcu = f.object.dg.pt.zcu;
     const inst_ty = f.typeOfIndex(inst);
     const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
-    if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+    if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
         try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
         return .none;
     }
@@ -3453,7 +3455,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
 
     const inst_ty = f.typeOfIndex(inst);
     const ptr_ty = f.typeOf(bin_op.lhs);
-    const elem_has_bits = ptr_ty.elemType2(zcu).hasRuntimeBitsIgnoreComptime(pt);
+    const elem_has_bits = ptr_ty.elemType2(zcu).hasRuntimeBitsIgnoreComptime(zcu);
 
     const ptr = try f.resolveInst(bin_op.lhs);
     const index = try f.resolveInst(bin_op.rhs);
@@ -3482,10 +3484,10 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
 }
 
 fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
-    const pt = f.object.dg.pt;
+    const zcu = f.object.dg.pt.zcu;
     const inst_ty = f.typeOfIndex(inst);
     const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
-    if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+    if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
         try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
         return .none;
     }
@@ -3516,7 +3518,7 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
     const inst_ty = f.typeOfIndex(inst);
     const slice_ty = f.typeOf(bin_op.lhs);
     const elem_ty = slice_ty.elemType2(zcu);
-    const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(pt);
+    const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(zcu);
 
     const slice = try f.resolveInst(bin_op.lhs);
     const index = try f.resolveInst(bin_op.rhs);
@@ -3539,10 +3541,10 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
 }
 
 fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
-    const pt = f.object.dg.pt;
+    const zcu = f.object.dg.pt.zcu;
     const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
     const inst_ty = f.typeOfIndex(inst);
-    if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+    if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
         try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
         return .none;
     }
@@ -3569,13 +3571,13 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
     const zcu = pt.zcu;
     const inst_ty = f.typeOfIndex(inst);
     const elem_ty = inst_ty.childType(zcu);
-    if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .{ .undef = inst_ty };
+    if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty };
 
     const local = try f.allocLocalValue(.{
         .ctype = try f.ctypeFromType(elem_ty, .complete),
         .alignas = CType.AlignAs.fromAlignment(.{
             .@"align" = inst_ty.ptrInfo(zcu).flags.alignment,
-            .abi = elem_ty.abiAlignment(pt),
+            .abi = elem_ty.abiAlignment(zcu),
         }),
     });
     log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
@@ -3588,13 +3590,13 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue {
     const zcu = pt.zcu;
     const inst_ty = f.typeOfIndex(inst);
     const elem_ty = inst_ty.childType(zcu);
-    if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .{ .undef = inst_ty };
+    if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty };
 
     const local = try f.allocLocalValue(.{
         .ctype = try f.ctypeFromType(elem_ty, .complete),
         .alignas = CType.AlignAs.fromAlignment(.{
             .@"align" = inst_ty.ptrInfo(zcu).flags.alignment,
-            .abi = elem_ty.abiAlignment(pt),
+            .abi = elem_ty.abiAlignment(zcu),
         }),
     });
     log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
@@ -3636,7 +3638,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
     const ptr_info = ptr_scalar_ty.ptrInfo(zcu);
     const src_ty = Type.fromInterned(ptr_info.child);
 
-    if (!src_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+    if (!src_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
         try reap(f, inst, &.{ty_op.operand});
         return .none;
     }
@@ -3646,7 +3648,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
     try reap(f, inst, &.{ty_op.operand});
 
     const is_aligned = if (ptr_info.flags.alignment != .none)
-        ptr_info.flags.alignment.order(src_ty.abiAlignment(pt)).compare(.gte)
+        ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte)
     else
         true;
     const is_array = lowersToArray(src_ty, pt);
@@ -3674,7 +3676,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
         const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
         const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
 
-        const field_ty = try pt.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(pt))));
+        const field_ty = try pt.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(zcu))));
 
         try f.writeCValue(writer, local, .Other);
         try v.elem(f, writer);
@@ -3685,9 +3687,9 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
         try writer.writeAll("((");
         try f.renderType(writer, field_ty);
         try writer.writeByte(')');
-        const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(pt) > 64;
+        const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64;
         if (cant_cast) {
-            if (field_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+            if (field_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
             try writer.writeAll("zig_lo_");
             try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
             try writer.writeByte('(');
@@ -3735,7 +3737,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
         const ret_val = if (is_array) ret_val: {
             const array_local = try f.allocAlignedLocal(inst, .{
                 .ctype = ret_ctype,
-                .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(pt)),
+                .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(zcu)),
             });
             try writer.writeAll("memcpy(");
             try f.writeCValueMember(writer, array_local, .{ .identifier = "array" });
@@ -3926,7 +3928,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
     }
 
     const is_aligned = if (ptr_info.flags.alignment != .none)
-        ptr_info.flags.alignment.order(src_ty.abiAlignment(pt)).compare(.gte)
+        ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte)
     else
         true;
     const is_array = lowersToArray(Type.fromInterned(ptr_info.child), pt);
@@ -3976,7 +3978,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
         const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
         const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
 
-        const src_bits = src_ty.bitSize(pt);
+        const src_bits = src_ty.bitSize(zcu);
 
         const ExpectedContents = [BigInt.Managed.default_capacity]BigIntLimb;
         var stack align(@alignOf(ExpectedContents)) =
@@ -4006,9 +4008,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
         try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(mask_val)});
         try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
         try writer.writeByte('(');
-        const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(pt) > 64;
+        const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64;
         if (cant_cast) {
-            if (src_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+            if (src_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
             try writer.writeAll("zig_make_");
             try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
             try writer.writeAll("(0, ");
@@ -4130,7 +4132,7 @@ fn airBinOp(
     const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
     const operand_ty = f.typeOf(bin_op.lhs);
     const scalar_ty = operand_ty.scalarType(zcu);
-    if ((scalar_ty.isInt(zcu) and scalar_ty.bitSize(pt) > 64) or scalar_ty.isRuntimeFloat())
+    if ((scalar_ty.isInt(zcu) and scalar_ty.bitSize(zcu) > 64) or scalar_ty.isRuntimeFloat())
         return try airBinBuiltinCall(f, inst, operation, info);
 
     const lhs = try f.resolveInst(bin_op.lhs);
@@ -4169,7 +4171,7 @@ fn airCmpOp(
     const lhs_ty = f.typeOf(data.lhs);
     const scalar_ty = lhs_ty.scalarType(zcu);
 
-    const scalar_bits = scalar_ty.bitSize(pt);
+    const scalar_bits = scalar_ty.bitSize(zcu);
     if (scalar_ty.isInt(zcu) and scalar_bits > 64)
         return airCmpBuiltinCall(
             f,
@@ -4219,7 +4221,7 @@ fn airEquality(
     const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
     const operand_ty = f.typeOf(bin_op.lhs);
-    const operand_bits = operand_ty.bitSize(pt);
+    const operand_bits = operand_ty.bitSize(zcu);
     if (operand_ty.isAbiInt(zcu) and operand_bits > 64)
         return airCmpBuiltinCall(
             f,
@@ -4312,7 +4314,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
     const inst_ty = f.typeOfIndex(inst);
     const inst_scalar_ty = inst_ty.scalarType(zcu);
     const elem_ty = inst_scalar_ty.elemType2(zcu);
-    if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return f.moveCValue(inst, inst_ty, lhs);
+    if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) return f.moveCValue(inst, inst_ty, lhs);
     const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete);
 
     const local = try f.allocLocal(inst, inst_ty);
@@ -4351,7 +4353,7 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons
     const inst_ty = f.typeOfIndex(inst);
     const inst_scalar_ty = inst_ty.scalarType(zcu);
 
-    if ((inst_scalar_ty.isInt(zcu) and inst_scalar_ty.bitSize(pt) > 64) or inst_scalar_ty.isRuntimeFloat())
+    if ((inst_scalar_ty.isInt(zcu) and inst_scalar_ty.bitSize(zcu) > 64) or inst_scalar_ty.isRuntimeFloat())
         return try airBinBuiltinCall(f, inst, operation, .none);
 
     const lhs = try f.resolveInst(bin_op.lhs);
@@ -4446,7 +4448,7 @@ fn airCall(
         if (!arg_ctype.eql(try f.ctypeFromType(arg_ty, .complete))) {
             const array_local = try f.allocAlignedLocal(inst, .{
                 .ctype = arg_ctype,
-                .alignas = CType.AlignAs.fromAbiAlignment(arg_ty.abiAlignment(pt)),
+                .alignas = CType.AlignAs.fromAbiAlignment(arg_ty.abiAlignment(zcu)),
             });
             try writer.writeAll("memcpy(");
             try f.writeCValueMember(writer, array_local, .{ .identifier = "array" });
@@ -4493,7 +4495,7 @@ fn airCall(
         } else {
             const local = try f.allocAlignedLocal(inst, .{
                 .ctype = ret_ctype,
-                .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(pt)),
+                .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(zcu)),
             });
             try f.writeCValue(writer, local, .Other);
             try writer.writeAll(" = ");
@@ -4618,7 +4620,7 @@ fn lowerBlock(f: *Function, inst: Air.Inst.Index, body: []const Air.Inst.Index)
     const writer = f.object.writer();
 
     const inst_ty = f.typeOfIndex(inst);
-    const result = if (inst_ty.hasRuntimeBitsIgnoreComptime(pt) and !f.liveness.isUnused(inst))
+    const result = if (inst_ty.hasRuntimeBitsIgnoreComptime(zcu) and !f.liveness.isUnused(inst))
         try f.allocLocal(inst, inst_ty)
     else
         .none;
@@ -4681,7 +4683,7 @@ fn lowerTry(
     const liveness_condbr = f.liveness.getCondBr(inst);
     const writer = f.object.writer();
     const payload_ty = err_union_ty.errorUnionPayload(zcu);
-    const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(pt);
+    const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(zcu);
 
     if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
         try writer.writeAll("if (");
@@ -4820,7 +4822,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !CVal
     try writer.writeAll(", sizeof(");
     try f.renderType(
         writer,
-        if (dest_ty.abiSize(pt) <= operand_ty.abiSize(pt)) dest_ty else operand_ty,
+        if (dest_ty.abiSize(zcu) <= operand_ty.abiSize(zcu)) dest_ty else operand_ty,
     );
     try writer.writeAll("));\n");
 
@@ -5030,7 +5032,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
             try f.object.indent_writer.insertNewline();
             try writer.writeAll("case ");
             const item_value = try f.air.value(item, pt);
-            if (item_value.?.getUnsignedInt(pt)) |item_int| try writer.print("{}\n", .{
+            if (item_value.?.getUnsignedInt(zcu)) |item_int| try writer.print("{}\n", .{
                 try f.fmtIntLiteral(try pt.intValue(lowered_condition_ty, item_int)),
             }) else {
                 if (condition_ty.isPtrAtRuntime(zcu)) {
@@ -5112,10 +5114,10 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
     const result = result: {
         const writer = f.object.writer();
         const inst_ty = f.typeOfIndex(inst);
-        const inst_local = if (inst_ty.hasRuntimeBitsIgnoreComptime(pt)) local: {
+        const inst_local = if (inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) local: {
             const inst_local = try f.allocLocalValue(.{
                 .ctype = try f.ctypeFromType(inst_ty, .complete),
-                .alignas = CType.AlignAs.fromAbiAlignment(inst_ty.abiAlignment(pt)),
+                .alignas = CType.AlignAs.fromAbiAlignment(inst_ty.abiAlignment(zcu)),
             });
             if (f.wantSafety()) {
                 try f.writeCValue(writer, inst_local, .Other);
@@ -5148,7 +5150,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
                 try writer.writeAll("register ");
                 const output_local = try f.allocLocalValue(.{
                     .ctype = try f.ctypeFromType(output_ty, .complete),
-                    .alignas = CType.AlignAs.fromAbiAlignment(output_ty.abiAlignment(pt)),
+                    .alignas = CType.AlignAs.fromAbiAlignment(output_ty.abiAlignment(zcu)),
                 });
                 try f.allocs.put(gpa, output_local.new_local, false);
                 try f.object.dg.renderTypeAndName(writer, output_ty, output_local, .{}, .none, .complete);
@@ -5183,7 +5185,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
                 if (is_reg) try writer.writeAll("register ");
                 const input_local = try f.allocLocalValue(.{
                     .ctype = try f.ctypeFromType(input_ty, .complete),
-                    .alignas = CType.AlignAs.fromAbiAlignment(input_ty.abiAlignment(pt)),
+                    .alignas = CType.AlignAs.fromAbiAlignment(input_ty.abiAlignment(zcu)),
                 });
                 try f.allocs.put(gpa, input_local.new_local, false);
                 try f.object.dg.renderTypeAndName(writer, input_ty, input_local, Const, .none, .complete);
@@ -5526,9 +5528,9 @@ fn fieldLocation(
         .struct_type => {
             const loaded_struct = ip.loadStructType(container_ty.toIntern());
             return switch (loaded_struct.layout) {
-                .auto, .@"extern" => if (!container_ty.hasRuntimeBitsIgnoreComptime(pt))
+                .auto, .@"extern" => if (!container_ty.hasRuntimeBitsIgnoreComptime(zcu))
                     .begin
-                else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(pt))
+                else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu))
                     .{ .byte_offset = loaded_struct.offsets.get(ip)[field_index] }
                 else
                     .{ .field = if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name|
@@ -5542,10 +5544,10 @@ fn fieldLocation(
                     .begin,
             };
         },
-        .anon_struct_type => |anon_struct_info| return if (!container_ty.hasRuntimeBitsIgnoreComptime(pt))
+        .anon_struct_type => |anon_struct_info| return if (!container_ty.hasRuntimeBitsIgnoreComptime(zcu))
             .begin
-        else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(pt))
-            .{ .byte_offset = container_ty.structFieldOffset(field_index, pt) }
+        else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu))
+            .{ .byte_offset = container_ty.structFieldOffset(field_index, zcu) }
         else
             .{ .field = if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name|
                 .{ .identifier = field_name.toSlice(ip) }
@@ -5556,8 +5558,8 @@ fn fieldLocation(
             switch (loaded_union.flagsUnordered(ip).layout) {
                 .auto, .@"extern" => {
                     const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
-                    if (!field_ty.hasRuntimeBitsIgnoreComptime(pt))
-                        return if (loaded_union.hasTag(ip) and !container_ty.unionHasAllZeroBitFieldTypes(pt))
+                    if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu))
+                        return if (loaded_union.hasTag(ip) and !container_ty.unionHasAllZeroBitFieldTypes(zcu))
                             .{ .field = .{ .identifier = "payload" } }
                         else
                             .begin;
@@ -5706,7 +5708,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
     const extra = f.air.extraData(Air.StructField, ty_pl.payload).data;
 
     const inst_ty = f.typeOfIndex(inst);
-    if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+    if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
         try reap(f, inst, &.{extra.struct_operand});
         return .none;
     }
@@ -5738,7 +5740,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
                         inst_ty.intInfo(zcu).signedness
                     else
                         .unsigned;
-                    const field_int_ty = try pt.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(pt))));
+                    const field_int_ty = try pt.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(zcu))));
 
                     const temp_local = try f.allocLocal(inst, field_int_ty);
                     try f.writeCValue(writer, temp_local, .Other);
@@ -5749,7 +5751,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
                     try writer.writeByte(')');
                     const cant_cast = int_info.bits > 64;
                     if (cant_cast) {
-                        if (field_int_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+                        if (field_int_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
                         try writer.writeAll("zig_lo_");
                         try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
                         try writer.writeByte('(');
@@ -5857,7 +5859,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
     const payload_ty = error_union_ty.errorUnionPayload(zcu);
     const local = try f.allocLocal(inst, inst_ty);
 
-    if (!payload_ty.hasRuntimeBits(pt) and operand == .local and operand.local == local.new_local) {
+    if (!payload_ty.hasRuntimeBits(zcu) and operand == .local and operand.local == local.new_local) {
         // The store will be 'x = x'; elide it.
         return local;
     }
@@ -5866,7 +5868,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
     try f.writeCValue(writer, local, .Other);
     try writer.writeAll(" = ");
 
-    if (!payload_ty.hasRuntimeBits(pt))
+    if (!payload_ty.hasRuntimeBits(zcu))
         try f.writeCValue(writer, operand, .Other)
     else if (error_ty.errorSetIsEmpty(zcu))
         try writer.print("{}", .{
@@ -5892,7 +5894,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu
     const error_union_ty = if (is_ptr) operand_ty.childType(zcu) else operand_ty;
 
     const writer = f.object.writer();
-    if (!error_union_ty.errorUnionPayload(zcu).hasRuntimeBits(pt)) {
+    if (!error_union_ty.errorUnionPayload(zcu).hasRuntimeBits(zcu)) {
         if (!is_ptr) return .none;
 
         const local = try f.allocLocal(inst, inst_ty);
@@ -5963,7 +5965,7 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
 
     const inst_ty = f.typeOfIndex(inst);
     const payload_ty = inst_ty.errorUnionPayload(zcu);
-    const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(pt);
+    const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(zcu);
     const err_ty = inst_ty.errorUnionSet(zcu);
     const err = try f.resolveInst(ty_op.operand);
     try reap(f, inst, &.{ty_op.operand});
@@ -6012,7 +6014,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
     try reap(f, inst, &.{ty_op.operand});
 
     // First, set the non-error value.
-    if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+    if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
         const a = try Assignment.start(f, writer, try f.ctypeFromType(operand_ty, .complete));
         try f.writeCValueDeref(writer, operand);
         try a.assign(f, writer);
@@ -6064,7 +6066,7 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue {
     const inst_ty = f.typeOfIndex(inst);
     const payload_ty = inst_ty.errorUnionPayload(zcu);
     const payload = try f.resolveInst(ty_op.operand);
-    const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(pt);
+    const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(zcu);
     const err_ty = inst_ty.errorUnionSet(zcu);
     try reap(f, inst, &.{ty_op.operand});
 
@@ -6109,7 +6111,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const
     try a.assign(f, writer);
     const err_int_ty = try pt.errorIntType();
     if (!error_ty.errorSetIsEmpty(zcu))
-        if (payload_ty.hasRuntimeBits(pt))
+        if (payload_ty.hasRuntimeBits(zcu))
             if (is_ptr)
                 try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" })
             else
@@ -6430,7 +6432,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
     try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value });
 
     const repr_ty = if (ty.isRuntimeFloat())
-        pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable
+        pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable
     else
         ty;
 
@@ -6534,7 +6536,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
     const operand_mat = try Materialize.start(f, inst, ty, operand);
     try reap(f, inst, &.{ pl_op.operand, extra.operand });
 
-    const repr_bits = @as(u16, @intCast(ty.abiSize(pt) * 8));
+    const repr_bits = @as(u16, @intCast(ty.abiSize(zcu) * 8));
     const is_float = ty.isRuntimeFloat();
     const is_128 = repr_bits == 128;
     const repr_ty = if (is_float) pt.intType(.unsigned, repr_bits) catch unreachable else ty;
@@ -6585,7 +6587,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
     const ty = ptr_ty.childType(zcu);
 
     const repr_ty = if (ty.isRuntimeFloat())
-        pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable
+        pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable
     else
         ty;
 
@@ -6626,7 +6628,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa
     try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
 
     const repr_ty = if (ty.isRuntimeFloat())
-        pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable
+        pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable
     else
         ty;
 
@@ -6666,7 +6668,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
     const dest_slice = try f.resolveInst(bin_op.lhs);
     const value = try f.resolveInst(bin_op.rhs);
     const elem_ty = f.typeOf(bin_op.rhs);
-    const elem_abi_size = elem_ty.abiSize(pt);
+    const elem_abi_size = elem_ty.abiSize(zcu);
     const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(zcu) else false;
     const writer = f.object.writer();
 
@@ -6831,7 +6833,7 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
     try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
 
     const union_ty = f.typeOf(bin_op.lhs).childType(zcu);
-    const layout = union_ty.unionGetLayout(pt);
+    const layout = union_ty.unionGetLayout(zcu);
     if (layout.tag_size == 0) return .none;
     const tag_ty = union_ty.unionTagTypeSafety(zcu).?;
 
@@ -6846,13 +6848,14 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
 
 fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
     const pt = f.object.dg.pt;
+    const zcu = pt.zcu;
     const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const operand = try f.resolveInst(ty_op.operand);
     try reap(f, inst, &.{ty_op.operand});
 
     const union_ty = f.typeOf(ty_op.operand);
-    const layout = union_ty.unionGetLayout(pt);
+    const layout = union_ty.unionGetLayout(zcu);
     if (layout.tag_size == 0) return .none;
 
     const inst_ty = f.typeOfIndex(inst);
@@ -6960,6 +6963,7 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue {
 
 fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
     const pt = f.object.dg.pt;
+    const zcu = pt.zcu;
     const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
     const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data;
 
@@ -6978,7 +6982,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
         try f.object.dg.renderValue(writer, try pt.intValue(Type.usize, index), .Other);
         try writer.writeAll("] = ");
 
-        const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(pt);
+        const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(zcu);
         const src_val = try pt.intValue(Type.usize, @as(u64, @intCast(mask_elem ^ mask_elem >> 63)));
 
         try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other);
@@ -7001,7 +7005,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
     const operand_ty = f.typeOf(reduce.operand);
     const writer = f.object.writer();
 
-    const use_operator = scalar_ty.bitSize(pt) <= 64;
+    const use_operator = scalar_ty.bitSize(zcu) <= 64;
     const op: union(enum) {
         const Func = struct { operation: []const u8, info: BuiltinInfo = .none };
         builtin: Func,
@@ -7178,7 +7182,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
                     var field_it = loaded_struct.iterateRuntimeOrder(ip);
                     while (field_it.next()) |field_index| {
                         const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
-                        if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+                        if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
 
                         const a = try Assignment.start(f, writer, try f.ctypeFromType(field_ty, .complete));
                         try f.writeCValueMember(writer, local, if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name|
@@ -7203,7 +7207,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
                     for (0..elements.len) |field_index| {
                         if (inst_ty.structFieldIsComptime(field_index, zcu)) continue;
                         const field_ty = inst_ty.structFieldType(field_index, zcu);
-                        if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+                        if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
 
                         if (!empty) {
                             try writer.writeAll("zig_or_");
@@ -7216,7 +7220,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
                     for (resolved_elements, 0..) |element, field_index| {
                         if (inst_ty.structFieldIsComptime(field_index, zcu)) continue;
                         const field_ty = inst_ty.structFieldType(field_index, zcu);
-                        if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+                        if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
 
                         if (!empty) try writer.writeAll(", ");
                         // TODO: Skip this entire shift if val is 0?
@@ -7248,7 +7252,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
                         try writer.writeByte(')');
                         if (!empty) try writer.writeByte(')');
 
-                        bit_offset += field_ty.bitSize(pt);
+                        bit_offset += field_ty.bitSize(zcu);
                         empty = false;
                     }
                     try writer.writeAll(";\n");
@@ -7258,7 +7262,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
         .anon_struct_type => |anon_struct_info| for (0..anon_struct_info.types.len) |field_index| {
             if (anon_struct_info.values.get(ip)[field_index] != .none) continue;
             const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]);
-            if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+            if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
 
             const a = try Assignment.start(f, writer, try f.ctypeFromType(field_ty, .complete));
             try f.writeCValueMember(writer, local, if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name|
@@ -7294,7 +7298,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
     if (loaded_union.flagsUnordered(ip).layout == .@"packed") return f.moveCValue(inst, union_ty, payload);
 
     const field: CValue = if (union_ty.unionTagTypeSafety(zcu)) |tag_ty| field: {
-        const layout = union_ty.unionGetLayout(pt);
+        const layout = union_ty.unionGetLayout(zcu);
         if (layout.tag_size != 0) {
             const field_index = tag_ty.enumFieldIndex(field_name, zcu).?;
             const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
@@ -7818,7 +7822,7 @@ fn formatIntLiteral(
         };
         undef_int.truncate(undef_int.toConst(), data.int_info.signedness, data.int_info.bits);
         break :blk undef_int.toConst();
-    } else data.val.toBigInt(&int_buf, pt);
+    } else data.val.toBigInt(&int_buf, zcu);
     assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits));
 
     const c_bits: usize = @intCast(data.ctype.byteSize(ctype_pool, data.dg.mod) * 8);
@@ -8062,9 +8066,10 @@ const Vectorize = struct {
 };
 
 fn lowersToArray(ty: Type, pt: Zcu.PerThread) bool {
-    return switch (ty.zigTypeTag(pt.zcu)) {
+    const zcu = pt.zcu;
+    return switch (ty.zigTypeTag(zcu)) {
         .Array, .Vector => return true,
-        else => return ty.isAbiInt(pt.zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(pt)))) == null,
+        else => return ty.isAbiInt(zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(zcu)))) == null,
     };
 }
 
src/codegen/llvm.zig
@@ -1001,12 +1001,12 @@ pub const Object = struct {
         if (o.error_name_table == .none) return;
 
         const pt = o.pt;
-        const mod = pt.zcu;
-        const ip = &mod.intern_pool;
+        const zcu = pt.zcu;
+        const ip = &zcu.intern_pool;
 
         const error_name_list = ip.global_error_set.getNamesFromMainThread();
-        const llvm_errors = try mod.gpa.alloc(Builder.Constant, 1 + error_name_list.len);
-        defer mod.gpa.free(llvm_errors);
+        const llvm_errors = try zcu.gpa.alloc(Builder.Constant, 1 + error_name_list.len);
+        defer zcu.gpa.free(llvm_errors);
 
         // TODO: Address space
         const slice_ty = Type.slice_const_u8_sentinel_0;
@@ -1041,7 +1041,7 @@ pub const Object = struct {
         table_variable_index.setMutability(.constant, &o.builder);
         table_variable_index.setUnnamedAddr(.unnamed_addr, &o.builder);
         table_variable_index.setAlignment(
-            slice_ty.abiAlignment(pt).toLlvm(),
+            slice_ty.abiAlignment(zcu).toLlvm(),
             &o.builder,
         );
 
@@ -1428,7 +1428,7 @@ pub const Object = struct {
         var llvm_arg_i: u32 = 0;
 
         // This gets the LLVM values from the function and stores them in `ng.args`.
-        const sret = firstParamSRet(fn_info, pt, target);
+        const sret = firstParamSRet(fn_info, zcu, target);
         const ret_ptr: Builder.Value = if (sret) param: {
             const param = wip.arg(llvm_arg_i);
             llvm_arg_i += 1;
@@ -1469,8 +1469,8 @@ pub const Object = struct {
                         const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]);
                         const param = wip.arg(llvm_arg_i);
 
-                        if (isByRef(param_ty, pt)) {
-                            const alignment = param_ty.abiAlignment(pt).toLlvm();
+                        if (isByRef(param_ty, zcu)) {
+                            const alignment = param_ty.abiAlignment(zcu).toLlvm();
                             const param_llvm_ty = param.typeOfWip(&wip);
                             const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target);
                             _ = try wip.store(.normal, param, arg_ptr, alignment);
@@ -1486,12 +1486,12 @@ pub const Object = struct {
                         const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
                         const param_llvm_ty = try o.lowerType(param_ty);
                         const param = wip.arg(llvm_arg_i);
-                        const alignment = param_ty.abiAlignment(pt).toLlvm();
+                        const alignment = param_ty.abiAlignment(zcu).toLlvm();
 
                         try o.addByRefParamAttrs(&attributes, llvm_arg_i, alignment, it.byval_attr, param_llvm_ty);
                         llvm_arg_i += 1;
 
-                        if (isByRef(param_ty, pt)) {
+                        if (isByRef(param_ty, zcu)) {
                             args.appendAssumeCapacity(param);
                         } else {
                             args.appendAssumeCapacity(try wip.load(.normal, param_llvm_ty, param, alignment, ""));
@@ -1501,12 +1501,12 @@ pub const Object = struct {
                         const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
                         const param_llvm_ty = try o.lowerType(param_ty);
                         const param = wip.arg(llvm_arg_i);
-                        const alignment = param_ty.abiAlignment(pt).toLlvm();
+                        const alignment = param_ty.abiAlignment(zcu).toLlvm();
 
                         try attributes.addParamAttr(llvm_arg_i, .noundef, &o.builder);
                         llvm_arg_i += 1;
 
-                        if (isByRef(param_ty, pt)) {
+                        if (isByRef(param_ty, zcu)) {
                             args.appendAssumeCapacity(param);
                         } else {
                             args.appendAssumeCapacity(try wip.load(.normal, param_llvm_ty, param, alignment, ""));
@@ -1519,11 +1519,11 @@ pub const Object = struct {
                         llvm_arg_i += 1;
 
                         const param_llvm_ty = try o.lowerType(param_ty);
-                        const alignment = param_ty.abiAlignment(pt).toLlvm();
+                        const alignment = param_ty.abiAlignment(zcu).toLlvm();
                         const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target);
                         _ = try wip.store(.normal, param, arg_ptr, alignment);
 
-                        args.appendAssumeCapacity(if (isByRef(param_ty, pt))
+                        args.appendAssumeCapacity(if (isByRef(param_ty, zcu))
                             arg_ptr
                         else
                             try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, ""));
@@ -1547,7 +1547,7 @@ pub const Object = struct {
                         const elem_align = (if (ptr_info.flags.alignment != .none)
                             @as(InternPool.Alignment, ptr_info.flags.alignment)
                         else
-                            Type.fromInterned(ptr_info.child).abiAlignment(pt).max(.@"1")).toLlvm();
+                            Type.fromInterned(ptr_info.child).abiAlignment(zcu).max(.@"1")).toLlvm();
                         try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder);
                         const ptr_param = wip.arg(llvm_arg_i);
                         llvm_arg_i += 1;
@@ -1564,7 +1564,7 @@ pub const Object = struct {
                         const field_types = it.types_buffer[0..it.types_len];
                         const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
                         const param_llvm_ty = try o.lowerType(param_ty);
-                        const param_alignment = param_ty.abiAlignment(pt).toLlvm();
+                        const param_alignment = param_ty.abiAlignment(zcu).toLlvm();
                         const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, param_alignment, target);
                         const llvm_ty = try o.builder.structType(.normal, field_types);
                         for (0..field_types.len) |field_i| {
@@ -1576,7 +1576,7 @@ pub const Object = struct {
                             _ = try wip.store(.normal, param, field_ptr, alignment);
                         }
 
-                        const is_by_ref = isByRef(param_ty, pt);
+                        const is_by_ref = isByRef(param_ty, zcu);
                         args.appendAssumeCapacity(if (is_by_ref)
                             arg_ptr
                         else
@@ -1594,11 +1594,11 @@ pub const Object = struct {
                         const param = wip.arg(llvm_arg_i);
                         llvm_arg_i += 1;
 
-                        const alignment = param_ty.abiAlignment(pt).toLlvm();
+                        const alignment = param_ty.abiAlignment(zcu).toLlvm();
                         const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target);
                         _ = try wip.store(.normal, param, arg_ptr, alignment);
 
-                        args.appendAssumeCapacity(if (isByRef(param_ty, pt))
+                        args.appendAssumeCapacity(if (isByRef(param_ty, zcu))
                             arg_ptr
                         else
                             try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, ""));
@@ -1609,11 +1609,11 @@ pub const Object = struct {
                         const param = wip.arg(llvm_arg_i);
                         llvm_arg_i += 1;
 
-                        const alignment = param_ty.abiAlignment(pt).toLlvm();
+                        const alignment = param_ty.abiAlignment(zcu).toLlvm();
                         const arg_ptr = try buildAllocaInner(&wip, param.typeOfWip(&wip), alignment, target);
                         _ = try wip.store(.normal, param, arg_ptr, alignment);
 
-                        args.appendAssumeCapacity(if (isByRef(param_ty, pt))
+                        args.appendAssumeCapacity(if (isByRef(param_ty, zcu))
                             arg_ptr
                         else
                             try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, ""));
@@ -1738,13 +1738,13 @@ pub const Object = struct {
 
     fn updateExportedValue(
         o: *Object,
-        mod: *Zcu,
+        zcu: *Zcu,
         exported_value: InternPool.Index,
         export_indices: []const u32,
     ) link.File.UpdateExportsError!void {
-        const gpa = mod.gpa;
-        const ip = &mod.intern_pool;
-        const main_exp_name = try o.builder.strtabString(mod.all_exports.items[export_indices[0]].opts.name.toSlice(ip));
+        const gpa = zcu.gpa;
+        const ip = &zcu.intern_pool;
+        const main_exp_name = try o.builder.strtabString(zcu.all_exports.items[export_indices[0]].opts.name.toSlice(ip));
         const global_index = i: {
             const gop = try o.uav_map.getOrPut(gpa, exported_value);
             if (gop.found_existing) {
@@ -1768,18 +1768,18 @@ pub const Object = struct {
             try variable_index.setInitializer(init_val, &o.builder);
             break :i global_index;
         };
-        return updateExportedGlobal(o, mod, global_index, export_indices);
+        return updateExportedGlobal(o, zcu, global_index, export_indices);
     }
 
     fn updateExportedGlobal(
         o: *Object,
-        mod: *Zcu,
+        zcu: *Zcu,
         global_index: Builder.Global.Index,
         export_indices: []const u32,
     ) link.File.UpdateExportsError!void {
-        const comp = mod.comp;
-        const ip = &mod.intern_pool;
-        const first_export = mod.all_exports.items[export_indices[0]];
+        const comp = zcu.comp;
+        const ip = &zcu.intern_pool;
+        const first_export = zcu.all_exports.items[export_indices[0]];
 
         // We will rename this global to have a name matching `first_export`.
         // Successive exports become aliases.
@@ -1836,7 +1836,7 @@ pub const Object = struct {
         // Until then we iterate over existing aliases and make them point
         // to the correct decl, or otherwise add a new alias. Old aliases are leaked.
         for (export_indices[1..]) |export_idx| {
-            const exp = mod.all_exports.items[export_idx];
+            const exp = zcu.all_exports.items[export_idx];
             const exp_name = try o.builder.strtabString(exp.opts.name.toSlice(ip));
             if (o.builder.getGlobal(exp_name)) |global| {
                 switch (global.ptrConst(&o.builder).kind) {
@@ -1923,7 +1923,7 @@ pub const Object = struct {
                 const name = try o.allocTypeName(ty);
                 defer gpa.free(name);
                 const builder_name = try o.builder.metadataString(name);
-                const debug_bits = ty.abiSize(pt) * 8; // lldb cannot handle non-byte sized types
+                const debug_bits = ty.abiSize(zcu) * 8; // lldb cannot handle non-byte sized types
                 const debug_int_type = switch (info.signedness) {
                     .signed => try o.builder.debugSignedType(builder_name, debug_bits),
                     .unsigned => try o.builder.debugUnsignedType(builder_name, debug_bits),
@@ -1932,7 +1932,7 @@ pub const Object = struct {
                 return debug_int_type;
             },
             .Enum => {
-                if (!ty.hasRuntimeBitsIgnoreComptime(pt)) {
+                if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                     const debug_enum_type = try o.makeEmptyNamespaceDebugType(ty);
                     try o.debug_type_map.put(gpa, ty, debug_enum_type);
                     return debug_enum_type;
@@ -1949,7 +1949,7 @@ pub const Object = struct {
                 for (enum_type.names.get(ip), 0..) |field_name_ip, i| {
                     var bigint_space: Value.BigIntSpace = undefined;
                     const bigint = if (enum_type.values.len != 0)
-                        Value.fromInterned(enum_type.values.get(ip)[i]).toBigInt(&bigint_space, pt)
+                        Value.fromInterned(enum_type.values.get(ip)[i]).toBigInt(&bigint_space, zcu)
                     else
                         std.math.big.int.Mutable.init(&bigint_space.limbs, i).toConst();
 
@@ -1976,8 +1976,8 @@ pub const Object = struct {
                     scope,
                     ty.typeDeclSrcLine(zcu).? + 1, // Line
                     try o.lowerDebugType(int_ty),
-                    ty.abiSize(pt) * 8,
-                    (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+                    ty.abiSize(zcu) * 8,
+                    (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
                     try o.builder.debugTuple(enumerators),
                 );
 
@@ -2017,10 +2017,10 @@ pub const Object = struct {
                     ptr_info.flags.is_const or
                     ptr_info.flags.is_volatile or
                     ptr_info.flags.size == .Many or ptr_info.flags.size == .C or
-                    !Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(pt))
+                    !Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(zcu))
                 {
                     const bland_ptr_ty = try pt.ptrType(.{
-                        .child = if (!Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(pt))
+                        .child = if (!Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(zcu))
                             .anyopaque_type
                         else
                             ptr_info.child,
@@ -2050,10 +2050,10 @@ pub const Object = struct {
                     defer gpa.free(name);
                     const line = 0;
 
-                    const ptr_size = ptr_ty.abiSize(pt);
-                    const ptr_align = ptr_ty.abiAlignment(pt);
-                    const len_size = len_ty.abiSize(pt);
-                    const len_align = len_ty.abiAlignment(pt);
+                    const ptr_size = ptr_ty.abiSize(zcu);
+                    const ptr_align = ptr_ty.abiAlignment(zcu);
+                    const len_size = len_ty.abiSize(zcu);
+                    const len_align = len_ty.abiAlignment(zcu);
 
                     const len_offset = len_align.forward(ptr_size);
 
@@ -2085,8 +2085,8 @@ pub const Object = struct {
                         o.debug_compile_unit, // Scope
                         line,
                         .none, // Underlying type
-                        ty.abiSize(pt) * 8,
-                        (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+                        ty.abiSize(zcu) * 8,
+                        (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
                         try o.builder.debugTuple(&.{
                             debug_ptr_type,
                             debug_len_type,
@@ -2114,7 +2114,7 @@ pub const Object = struct {
                     0, // Line
                     debug_elem_ty,
                     target.ptrBitWidth(),
-                    (ty.ptrAlignment(pt).toByteUnits() orelse 0) * 8,
+                    (ty.ptrAlignment(zcu).toByteUnits() orelse 0) * 8,
                     0, // Offset
                 );
 
@@ -2165,8 +2165,8 @@ pub const Object = struct {
                     .none, // Scope
                     0, // Line
                     try o.lowerDebugType(ty.childType(zcu)),
-                    ty.abiSize(pt) * 8,
-                    (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+                    ty.abiSize(zcu) * 8,
+                    (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
                     try o.builder.debugTuple(&.{
                         try o.builder.debugSubrange(
                             try o.builder.debugConstant(try o.builder.intConst(.i64, 0)),
@@ -2208,8 +2208,8 @@ pub const Object = struct {
                     .none, // Scope
                     0, // Line
                     debug_elem_type,
-                    ty.abiSize(pt) * 8,
-                    (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+                    ty.abiSize(zcu) * 8,
+                    (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
                     try o.builder.debugTuple(&.{
                         try o.builder.debugSubrange(
                             try o.builder.debugConstant(try o.builder.intConst(.i64, 0)),
@@ -2225,7 +2225,7 @@ pub const Object = struct {
                 const name = try o.allocTypeName(ty);
                 defer gpa.free(name);
                 const child_ty = ty.optionalChild(zcu);
-                if (!child_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+                if (!child_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                     const debug_bool_type = try o.builder.debugBoolType(
                         try o.builder.metadataString(name),
                         8,
@@ -2252,10 +2252,10 @@ pub const Object = struct {
                 }
 
                 const non_null_ty = Type.u8;
-                const payload_size = child_ty.abiSize(pt);
-                const payload_align = child_ty.abiAlignment(pt);
-                const non_null_size = non_null_ty.abiSize(pt);
-                const non_null_align = non_null_ty.abiAlignment(pt);
+                const payload_size = child_ty.abiSize(zcu);
+                const payload_align = child_ty.abiAlignment(zcu);
+                const non_null_size = non_null_ty.abiSize(zcu);
+                const non_null_align = non_null_ty.abiAlignment(zcu);
                 const non_null_offset = non_null_align.forward(payload_size);
 
                 const debug_data_type = try o.builder.debugMemberType(
@@ -2286,8 +2286,8 @@ pub const Object = struct {
                     o.debug_compile_unit, // Scope
                     0, // Line
                     .none, // Underlying type
-                    ty.abiSize(pt) * 8,
-                    (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+                    ty.abiSize(zcu) * 8,
+                    (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
                     try o.builder.debugTuple(&.{
                         debug_data_type,
                         debug_some_type,
@@ -2304,7 +2304,7 @@ pub const Object = struct {
             },
             .ErrorUnion => {
                 const payload_ty = ty.errorUnionPayload(zcu);
-                if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+                if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                     // TODO: Maybe remove?
                     const debug_error_union_type = try o.lowerDebugType(Type.anyerror);
                     try o.debug_type_map.put(gpa, ty, debug_error_union_type);
@@ -2314,10 +2314,10 @@ pub const Object = struct {
                 const name = try o.allocTypeName(ty);
                 defer gpa.free(name);
 
-                const error_size = Type.anyerror.abiSize(pt);
-                const error_align = Type.anyerror.abiAlignment(pt);
-                const payload_size = payload_ty.abiSize(pt);
-                const payload_align = payload_ty.abiAlignment(pt);
+                const error_size = Type.anyerror.abiSize(zcu);
+                const error_align = Type.anyerror.abiAlignment(zcu);
+                const payload_size = payload_ty.abiSize(zcu);
+                const payload_align = payload_ty.abiAlignment(zcu);
 
                 var error_index: u32 = undefined;
                 var payload_index: u32 = undefined;
@@ -2365,8 +2365,8 @@ pub const Object = struct {
                     o.debug_compile_unit, // Sope
                     0, // Line
                     .none, // Underlying type
-                    ty.abiSize(pt) * 8,
-                    (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+                    ty.abiSize(zcu) * 8,
+                    (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
                     try o.builder.debugTuple(&fields),
                 );
 
@@ -2393,8 +2393,8 @@ pub const Object = struct {
                         const info = Type.fromInterned(backing_int_ty).intInfo(zcu);
                         const builder_name = try o.builder.metadataString(name);
                         const debug_int_type = switch (info.signedness) {
-                            .signed => try o.builder.debugSignedType(builder_name, ty.abiSize(pt) * 8),
-                            .unsigned => try o.builder.debugUnsignedType(builder_name, ty.abiSize(pt) * 8),
+                            .signed => try o.builder.debugSignedType(builder_name, ty.abiSize(zcu) * 8),
+                            .unsigned => try o.builder.debugUnsignedType(builder_name, ty.abiSize(zcu) * 8),
                         };
                         try o.debug_type_map.put(gpa, ty, debug_int_type);
                         return debug_int_type;
@@ -2414,10 +2414,10 @@ pub const Object = struct {
                         const debug_fwd_ref = try o.builder.debugForwardReference();
 
                         for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| {
-                            if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
+                            if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
 
-                            const field_size = Type.fromInterned(field_ty).abiSize(pt);
-                            const field_align = Type.fromInterned(field_ty).abiAlignment(pt);
+                            const field_size = Type.fromInterned(field_ty).abiSize(zcu);
+                            const field_align = Type.fromInterned(field_ty).abiAlignment(zcu);
                             const field_offset = field_align.forward(offset);
                             offset = field_offset + field_size;
 
@@ -2445,8 +2445,8 @@ pub const Object = struct {
                             o.debug_compile_unit, // Scope
                             0, // Line
                             .none, // Underlying type
-                            ty.abiSize(pt) * 8,
-                            (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+                            ty.abiSize(zcu) * 8,
+                            (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
                             try o.builder.debugTuple(fields.items),
                         );
 
@@ -2472,7 +2472,7 @@ pub const Object = struct {
                     else => {},
                 }
 
-                if (!ty.hasRuntimeBitsIgnoreComptime(pt)) {
+                if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                     const debug_struct_type = try o.makeEmptyNamespaceDebugType(ty);
                     try o.debug_type_map.put(gpa, ty, debug_struct_type);
                     return debug_struct_type;
@@ -2494,14 +2494,14 @@ pub const Object = struct {
                 var it = struct_type.iterateRuntimeOrder(ip);
                 while (it.next()) |field_index| {
                     const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
-                    if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
-                    const field_size = field_ty.abiSize(pt);
+                    if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+                    const field_size = field_ty.abiSize(zcu);
                     const field_align = pt.structFieldAlignment(
                         struct_type.fieldAlign(ip, field_index),
                         field_ty,
                         struct_type.layout,
                     );
-                    const field_offset = ty.structFieldOffset(field_index, pt);
+                    const field_offset = ty.structFieldOffset(field_index, zcu);
 
                     const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse
                         try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
@@ -2524,8 +2524,8 @@ pub const Object = struct {
                     o.debug_compile_unit, // Scope
                     0, // Line
                     .none, // Underlying type
-                    ty.abiSize(pt) * 8,
-                    (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+                    ty.abiSize(zcu) * 8,
+                    (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
                     try o.builder.debugTuple(fields.items),
                 );
 
@@ -2543,7 +2543,7 @@ pub const Object = struct {
 
                 const union_type = ip.loadUnionType(ty.toIntern());
                 if (!union_type.haveFieldTypes(ip) or
-                    !ty.hasRuntimeBitsIgnoreComptime(pt) or
+                    !ty.hasRuntimeBitsIgnoreComptime(zcu) or
                     !union_type.haveLayout(ip))
                 {
                     const debug_union_type = try o.makeEmptyNamespaceDebugType(ty);
@@ -2551,7 +2551,7 @@ pub const Object = struct {
                     return debug_union_type;
                 }
 
-                const layout = pt.getUnionLayout(union_type);
+                const layout = Type.getUnionLayout(union_type, zcu);
 
                 const debug_fwd_ref = try o.builder.debugForwardReference();
 
@@ -2565,8 +2565,8 @@ pub const Object = struct {
                         o.debug_compile_unit, // Scope
                         0, // Line
                         .none, // Underlying type
-                        ty.abiSize(pt) * 8,
-                        (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+                        ty.abiSize(zcu) * 8,
+                        (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
                         try o.builder.debugTuple(
                             &.{try o.lowerDebugType(Type.fromInterned(union_type.enum_tag_ty))},
                         ),
@@ -2593,12 +2593,12 @@ pub const Object = struct {
 
                 for (0..tag_type.names.len) |field_index| {
                     const field_ty = union_type.field_types.get(ip)[field_index];
-                    if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue;
+                    if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(zcu)) continue;
 
-                    const field_size = Type.fromInterned(field_ty).abiSize(pt);
+                    const field_size = Type.fromInterned(field_ty).abiSize(zcu);
                     const field_align: InternPool.Alignment = switch (union_type.flagsUnordered(ip).layout) {
                         .@"packed" => .none,
-                        .auto, .@"extern" => pt.unionFieldNormalAlignment(union_type, @intCast(field_index)),
+                        .auto, .@"extern" => Type.unionFieldNormalAlignment(union_type, @intCast(field_index), zcu),
                     };
 
                     const field_name = tag_type.names.get(ip)[field_index];
@@ -2627,8 +2627,8 @@ pub const Object = struct {
                     o.debug_compile_unit, // Scope
                     0, // Line
                     .none, // Underlying type
-                    ty.abiSize(pt) * 8,
-                    (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+                    ty.abiSize(zcu) * 8,
+                    (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
                     try o.builder.debugTuple(fields.items),
                 );
 
@@ -2686,8 +2686,8 @@ pub const Object = struct {
                     o.debug_compile_unit, // Scope
                     0, // Line
                     .none, // Underlying type
-                    ty.abiSize(pt) * 8,
-                    (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+                    ty.abiSize(zcu) * 8,
+                    (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
                     try o.builder.debugTuple(&full_fields),
                 );
 
@@ -2708,8 +2708,8 @@ pub const Object = struct {
                 try debug_param_types.ensureUnusedCapacity(3 + fn_info.param_types.len);
 
                 // Return type goes first.
-                if (Type.fromInterned(fn_info.return_type).hasRuntimeBitsIgnoreComptime(pt)) {
-                    const sret = firstParamSRet(fn_info, pt, target);
+                if (Type.fromInterned(fn_info.return_type).hasRuntimeBitsIgnoreComptime(zcu)) {
+                    const sret = firstParamSRet(fn_info, zcu, target);
                     const ret_ty = if (sret) Type.void else Type.fromInterned(fn_info.return_type);
                     debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ret_ty));
 
@@ -2730,9 +2730,9 @@ pub const Object = struct {
 
                 for (0..fn_info.param_types.len) |i| {
                     const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[i]);
-                    if (!param_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+                    if (!param_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
 
-                    if (isByRef(param_ty, pt)) {
+                    if (isByRef(param_ty, zcu)) {
                         const ptr_ty = try pt.singleMutPtrType(param_ty);
                         debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ptr_ty));
                     } else {
@@ -2842,7 +2842,7 @@ pub const Object = struct {
 
         const fn_info = zcu.typeToFunc(ty).?;
         const target = owner_mod.resolved_target.result;
-        const sret = firstParamSRet(fn_info, pt, target);
+        const sret = firstParamSRet(fn_info, zcu, target);
 
         const is_extern, const lib_name = switch (ip.indexToKey(val.toIntern())) {
             .variable => |variable| .{ false, variable.lib_name },
@@ -2934,14 +2934,14 @@ pub const Object = struct {
                 .byval => {
                     const param_index = it.zig_index - 1;
                     const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]);
-                    if (!isByRef(param_ty, pt)) {
+                    if (!isByRef(param_ty, zcu)) {
                         try o.addByValParamAttrs(&attributes, param_ty, param_index, fn_info, it.llvm_index - 1);
                     }
                 },
                 .byref => {
                     const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
                     const param_llvm_ty = try o.lowerType(param_ty);
-                    const alignment = param_ty.abiAlignment(pt);
+                    const alignment = param_ty.abiAlignment(zcu);
                     try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment.toLlvm(), it.byval_attr, param_llvm_ty);
                 },
                 .byref_mut => try attributes.addParamAttr(it.llvm_index - 1, .noundef, &o.builder),
@@ -3042,8 +3042,8 @@ pub const Object = struct {
         }
         errdefer assert(o.uav_map.remove(uav));
 
-        const mod = o.pt.zcu;
-        const decl_ty = mod.intern_pool.typeOf(uav);
+        const zcu = o.pt.zcu;
+        const decl_ty = zcu.intern_pool.typeOf(uav);
 
         const variable_index = try o.builder.addVariable(
             try o.builder.strtabStringFmt("__anon_{d}", .{@intFromEnum(uav)}),
@@ -3106,9 +3106,9 @@ pub const Object = struct {
 
     fn lowerType(o: *Object, t: Type) Allocator.Error!Builder.Type {
         const pt = o.pt;
-        const mod = pt.zcu;
-        const target = mod.getTarget();
-        const ip = &mod.intern_pool;
+        const zcu = pt.zcu;
+        const target = zcu.getTarget();
+        const ip = &zcu.intern_pool;
         return switch (t.toIntern()) {
             .u0_type, .i0_type => unreachable,
             inline .u1_type,
@@ -3230,16 +3230,16 @@ pub const Object = struct {
                 ),
                 .opt_type => |child_ty| {
                     // Must stay in sync with `opt_payload` logic in `lowerPtr`.
-                    if (!Type.fromInterned(child_ty).hasRuntimeBitsIgnoreComptime(pt)) return .i8;
+                    if (!Type.fromInterned(child_ty).hasRuntimeBitsIgnoreComptime(zcu)) return .i8;
 
                     const payload_ty = try o.lowerType(Type.fromInterned(child_ty));
-                    if (t.optionalReprIsPayload(mod)) return payload_ty;
+                    if (t.optionalReprIsPayload(zcu)) return payload_ty;
 
                     comptime assert(optional_layout_version == 3);
                     var fields: [3]Builder.Type = .{ payload_ty, .i8, undefined };
                     var fields_len: usize = 2;
-                    const offset = Type.fromInterned(child_ty).abiSize(pt) + 1;
-                    const abi_size = t.abiSize(pt);
+                    const offset = Type.fromInterned(child_ty).abiSize(zcu) + 1;
+                    const abi_size = t.abiSize(zcu);
                     const padding_len = abi_size - offset;
                     if (padding_len > 0) {
                         fields[2] = try o.builder.arrayType(padding_len, .i8);
@@ -3252,16 +3252,16 @@ pub const Object = struct {
                     // Must stay in sync with `codegen.errUnionPayloadOffset`.
                     // See logic in `lowerPtr`.
                     const error_type = try o.errorIntType();
-                    if (!Type.fromInterned(error_union_type.payload_type).hasRuntimeBitsIgnoreComptime(pt))
+                    if (!Type.fromInterned(error_union_type.payload_type).hasRuntimeBitsIgnoreComptime(zcu))
                         return error_type;
                     const payload_type = try o.lowerType(Type.fromInterned(error_union_type.payload_type));
                     const err_int_ty = try o.pt.errorIntType();
 
-                    const payload_align = Type.fromInterned(error_union_type.payload_type).abiAlignment(pt);
-                    const error_align = err_int_ty.abiAlignment(pt);
+                    const payload_align = Type.fromInterned(error_union_type.payload_type).abiAlignment(zcu);
+                    const error_align = err_int_ty.abiAlignment(zcu);
 
-                    const payload_size = Type.fromInterned(error_union_type.payload_type).abiSize(pt);
-                    const error_size = err_int_ty.abiSize(pt);
+                    const payload_size = Type.fromInterned(error_union_type.payload_type).abiSize(zcu);
+                    const error_size = err_int_ty.abiSize(zcu);
 
                     var fields: [3]Builder.Type = undefined;
                     var fields_len: usize = 2;
@@ -3320,7 +3320,7 @@ pub const Object = struct {
                             field_ty,
                             struct_type.layout,
                         );
-                        const field_ty_align = field_ty.abiAlignment(pt);
+                        const field_ty_align = field_ty.abiAlignment(zcu);
                         if (field_align.compare(.lt, field_ty_align)) struct_kind = .@"packed";
                         big_align = big_align.max(field_align);
                         const prev_offset = offset;
@@ -3332,7 +3332,7 @@ pub const Object = struct {
                             try o.builder.arrayType(padding_len, .i8),
                         );
 
-                        if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+                        if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                             // This is a zero-bit field. If there are runtime bits after this field,
                             // map to the next LLVM field (which we know exists): otherwise, don't
                             // map the field, indicating it's at the end of the struct.
@@ -3351,7 +3351,7 @@ pub const Object = struct {
                         }, @intCast(llvm_field_types.items.len));
                         try llvm_field_types.append(o.gpa, try o.lowerType(field_ty));
 
-                        offset += field_ty.abiSize(pt);
+                        offset += field_ty.abiSize(zcu);
                     }
                     {
                         const prev_offset = offset;
@@ -3384,7 +3384,7 @@ pub const Object = struct {
                     var offset: u64 = 0;
                     var big_align: InternPool.Alignment = .none;
 
-                    const struct_size = t.abiSize(pt);
+                    const struct_size = t.abiSize(zcu);
 
                     for (
                         anon_struct_type.types.get(ip),
@@ -3393,7 +3393,7 @@ pub const Object = struct {
                     ) |field_ty, field_val, field_index| {
                         if (field_val != .none) continue;
 
-                        const field_align = Type.fromInterned(field_ty).abiAlignment(pt);
+                        const field_align = Type.fromInterned(field_ty).abiAlignment(zcu);
                         big_align = big_align.max(field_align);
                         const prev_offset = offset;
                         offset = field_align.forward(offset);
@@ -3403,7 +3403,7 @@ pub const Object = struct {
                             o.gpa,
                             try o.builder.arrayType(padding_len, .i8),
                         );
-                        if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) {
+                        if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(zcu)) {
                             // This is a zero-bit field. If there are runtime bits after this field,
                             // map to the next LLVM field (which we know exists): otherwise, don't
                             // map the field, indicating it's at the end of the struct.
@@ -3421,7 +3421,7 @@ pub const Object = struct {
                         }, @intCast(llvm_field_types.items.len));
                         try llvm_field_types.append(o.gpa, try o.lowerType(Type.fromInterned(field_ty)));
 
-                        offset += Type.fromInterned(field_ty).abiSize(pt);
+                        offset += Type.fromInterned(field_ty).abiSize(zcu);
                     }
                     {
                         const prev_offset = offset;
@@ -3438,10 +3438,10 @@ pub const Object = struct {
                     if (o.type_map.get(t.toIntern())) |value| return value;
 
                     const union_obj = ip.loadUnionType(t.toIntern());
-                    const layout = pt.getUnionLayout(union_obj);
+                    const layout = Type.getUnionLayout(union_obj, zcu);
 
                     if (union_obj.flagsUnordered(ip).layout == .@"packed") {
-                        const int_ty = try o.builder.intType(@intCast(t.bitSize(pt)));
+                        const int_ty = try o.builder.intType(@intCast(t.bitSize(zcu)));
                         try o.type_map.put(o.gpa, t.toIntern(), int_ty);
                         return int_ty;
                     }
@@ -3547,32 +3547,32 @@ pub const Object = struct {
     /// There are other similar cases handled here as well.
     fn lowerPtrElemTy(o: *Object, elem_ty: Type) Allocator.Error!Builder.Type {
         const pt = o.pt;
-        const mod = pt.zcu;
-        const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) {
+        const zcu = pt.zcu;
+        const lower_elem_ty = switch (elem_ty.zigTypeTag(zcu)) {
             .Opaque => true,
-            .Fn => !mod.typeToFunc(elem_ty).?.is_generic,
-            .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(pt),
-            else => elem_ty.hasRuntimeBitsIgnoreComptime(pt),
+            .Fn => !zcu.typeToFunc(elem_ty).?.is_generic,
+            .Array => elem_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu),
+            else => elem_ty.hasRuntimeBitsIgnoreComptime(zcu),
         };
         return if (lower_elem_ty) try o.lowerType(elem_ty) else .i8;
     }
 
     fn lowerTypeFn(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
         const pt = o.pt;
-        const mod = pt.zcu;
-        const ip = &mod.intern_pool;
-        const target = mod.getTarget();
+        const zcu = pt.zcu;
+        const ip = &zcu.intern_pool;
+        const target = zcu.getTarget();
         const ret_ty = try lowerFnRetTy(o, fn_info);
 
         var llvm_params = std.ArrayListUnmanaged(Builder.Type){};
         defer llvm_params.deinit(o.gpa);
 
-        if (firstParamSRet(fn_info, pt, target)) {
+        if (firstParamSRet(fn_info, zcu, target)) {
             try llvm_params.append(o.gpa, .ptr);
         }
 
-        if (Type.fromInterned(fn_info.return_type).isError(mod) and
-            mod.comp.config.any_error_tracing)
+        if (Type.fromInterned(fn_info.return_type).isError(zcu) and
+            zcu.comp.config.any_error_tracing)
         {
             const ptr_ty = try pt.singleMutPtrType(try o.getStackTraceType());
             try llvm_params.append(o.gpa, try o.lowerType(ptr_ty));
@@ -3591,13 +3591,13 @@ pub const Object = struct {
             .abi_sized_int => {
                 const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
                 try llvm_params.append(o.gpa, try o.builder.intType(
-                    @intCast(param_ty.abiSize(pt) * 8),
+                    @intCast(param_ty.abiSize(zcu) * 8),
                 ));
             },
             .slice => {
                 const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
                 try llvm_params.appendSlice(o.gpa, &.{
-                    try o.builder.ptrType(toLlvmAddressSpace(param_ty.ptrAddressSpace(mod), target)),
+                    try o.builder.ptrType(toLlvmAddressSpace(param_ty.ptrAddressSpace(zcu), target)),
                     try o.lowerType(Type.usize),
                 });
             },
@@ -3609,7 +3609,7 @@ pub const Object = struct {
             },
             .float_array => |count| {
                 const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
-                const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?);
+                const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, zcu).?);
                 try llvm_params.append(o.gpa, try o.builder.arrayType(count, float_ty));
             },
             .i32_array, .i64_array => |arr_len| {
@@ -3630,14 +3630,14 @@ pub const Object = struct {
 
     fn lowerValueToInt(o: *Object, llvm_int_ty: Builder.Type, arg_val: InternPool.Index) Error!Builder.Constant {
         const pt = o.pt;
-        const mod = pt.zcu;
-        const ip = &mod.intern_pool;
-        const target = mod.getTarget();
+        const zcu = pt.zcu;
+        const ip = &zcu.intern_pool;
+        const target = zcu.getTarget();
 
         const val = Value.fromInterned(arg_val);
         const val_key = ip.indexToKey(val.toIntern());
 
-        if (val.isUndefDeep(mod)) return o.builder.undefConst(llvm_int_ty);
+        if (val.isUndefDeep(zcu)) return o.builder.undefConst(llvm_int_ty);
 
         const ty = Type.fromInterned(val_key.typeOf());
         switch (val_key) {
@@ -3661,7 +3661,7 @@ pub const Object = struct {
                     var running_int = try o.builder.intConst(llvm_int_ty, 0);
                     var running_bits: u16 = 0;
                     for (struct_type.field_types.get(ip), 0..) |field_ty, field_index| {
-                        if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue;
+                        if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(zcu)) continue;
 
                         const shift_rhs = try o.builder.intConst(llvm_int_ty, running_bits);
                         const field_val = try o.lowerValueToInt(llvm_int_ty, (try val.fieldValue(pt, field_index)).toIntern());
@@ -3669,7 +3669,7 @@ pub const Object = struct {
 
                         running_int = try o.builder.binConst(.xor, running_int, shifted);
 
-                        const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(pt));
+                        const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(zcu));
                         running_bits += ty_bit_size;
                     }
                     return running_int;
@@ -3678,10 +3678,10 @@ pub const Object = struct {
                 else => unreachable,
             },
             .un => |un| {
-                const layout = ty.unionGetLayout(pt);
+                const layout = ty.unionGetLayout(zcu);
                 if (layout.payload_size == 0) return o.lowerValue(un.tag);
 
-                const union_obj = mod.typeToUnion(ty).?;
+                const union_obj = zcu.typeToUnion(ty).?;
                 const container_layout = union_obj.flagsUnordered(ip).layout;
 
                 assert(container_layout == .@"packed");
@@ -3694,9 +3694,9 @@ pub const Object = struct {
                     need_unnamed = true;
                     return union_val;
                 }
-                const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
+                const field_index = zcu.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
                 const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
-                if (!field_ty.hasRuntimeBits(pt)) return o.builder.intConst(llvm_int_ty, 0);
+                if (!field_ty.hasRuntimeBits(zcu)) return o.builder.intConst(llvm_int_ty, 0);
                 return o.lowerValueToInt(llvm_int_ty, un.val);
             },
             .simple_value => |simple_value| switch (simple_value) {
@@ -3710,7 +3710,7 @@ pub const Object = struct {
             .opt => {}, // pointer like optional expected
             else => unreachable,
         }
-        const bits = ty.bitSize(pt);
+        const bits = ty.bitSize(zcu);
         const bytes: usize = @intCast(std.mem.alignForward(u64, bits, 8) / 8);
 
         var stack = std.heap.stackFallback(32, o.gpa);
@@ -3743,14 +3743,14 @@ pub const Object = struct {
 
     fn lowerValue(o: *Object, arg_val: InternPool.Index) Error!Builder.Constant {
         const pt = o.pt;
-        const mod = pt.zcu;
-        const ip = &mod.intern_pool;
-        const target = mod.getTarget();
+        const zcu = pt.zcu;
+        const ip = &zcu.intern_pool;
+        const target = zcu.getTarget();
 
         const val = Value.fromInterned(arg_val);
         const val_key = ip.indexToKey(val.toIntern());
 
-        if (val.isUndefDeep(mod)) {
+        if (val.isUndefDeep(zcu)) {
             return o.builder.undefConst(try o.lowerType(Type.fromInterned(val_key.typeOf())));
         }
 
@@ -3800,7 +3800,7 @@ pub const Object = struct {
             },
             .int => {
                 var bigint_space: Value.BigIntSpace = undefined;
-                const bigint = val.toBigInt(&bigint_space, pt);
+                const bigint = val.toBigInt(&bigint_space, zcu);
                 return lowerBigInt(o, ty, bigint);
             },
             .err => |err| {
@@ -3811,20 +3811,20 @@ pub const Object = struct {
             .error_union => |error_union| {
                 const err_val = switch (error_union.val) {
                     .err_name => |err_name| try pt.intern(.{ .err = .{
-                        .ty = ty.errorUnionSet(mod).toIntern(),
+                        .ty = ty.errorUnionSet(zcu).toIntern(),
                         .name = err_name,
                     } }),
                     .payload => (try pt.intValue(try pt.errorIntType(), 0)).toIntern(),
                 };
                 const err_int_ty = try pt.errorIntType();
-                const payload_type = ty.errorUnionPayload(mod);
-                if (!payload_type.hasRuntimeBitsIgnoreComptime(pt)) {
+                const payload_type = ty.errorUnionPayload(zcu);
+                if (!payload_type.hasRuntimeBitsIgnoreComptime(zcu)) {
                     // We use the error type directly as the type.
                     return o.lowerValue(err_val);
                 }
 
-                const payload_align = payload_type.abiAlignment(pt);
-                const error_align = err_int_ty.abiAlignment(pt);
+                const payload_align = payload_type.abiAlignment(zcu);
+                const error_align = err_int_ty.abiAlignment(zcu);
                 const llvm_error_value = try o.lowerValue(err_val);
                 const llvm_payload_value = try o.lowerValue(switch (error_union.val) {
                     .err_name => try pt.intern(.{ .undef = payload_type.toIntern() }),
@@ -3858,16 +3858,16 @@ pub const Object = struct {
             .enum_tag => |enum_tag| o.lowerValue(enum_tag.int),
             .float => switch (ty.floatBits(target)) {
                 16 => if (backendSupportsF16(target))
-                    try o.builder.halfConst(val.toFloat(f16, pt))
+                    try o.builder.halfConst(val.toFloat(f16, zcu))
                 else
-                    try o.builder.intConst(.i16, @as(i16, @bitCast(val.toFloat(f16, pt)))),
-                32 => try o.builder.floatConst(val.toFloat(f32, pt)),
-                64 => try o.builder.doubleConst(val.toFloat(f64, pt)),
+                    try o.builder.intConst(.i16, @as(i16, @bitCast(val.toFloat(f16, zcu)))),
+                32 => try o.builder.floatConst(val.toFloat(f32, zcu)),
+                64 => try o.builder.doubleConst(val.toFloat(f64, zcu)),
                 80 => if (backendSupportsF80(target))
-                    try o.builder.x86_fp80Const(val.toFloat(f80, pt))
+                    try o.builder.x86_fp80Const(val.toFloat(f80, zcu))
                 else
-                    try o.builder.intConst(.i80, @as(i80, @bitCast(val.toFloat(f80, pt)))),
-                128 => try o.builder.fp128Const(val.toFloat(f128, pt)),
+                    try o.builder.intConst(.i80, @as(i80, @bitCast(val.toFloat(f80, zcu)))),
+                128 => try o.builder.fp128Const(val.toFloat(f128, zcu)),
                 else => unreachable,
             },
             .ptr => try o.lowerPtr(arg_val, 0),
@@ -3877,14 +3877,14 @@ pub const Object = struct {
             }),
             .opt => |opt| {
                 comptime assert(optional_layout_version == 3);
-                const payload_ty = ty.optionalChild(mod);
+                const payload_ty = ty.optionalChild(zcu);
 
                 const non_null_bit = try o.builder.intConst(.i8, @intFromBool(opt.val != .none));
-                if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+                if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                     return non_null_bit;
                 }
                 const llvm_ty = try o.lowerType(ty);
-                if (ty.optionalReprIsPayload(mod)) return switch (opt.val) {
+                if (ty.optionalReprIsPayload(zcu)) return switch (opt.val) {
                     .none => switch (llvm_ty.tag(&o.builder)) {
                         .integer => try o.builder.intConst(llvm_ty, 0),
                         .pointer => try o.builder.nullConst(llvm_ty),
@@ -3893,7 +3893,7 @@ pub const Object = struct {
                     },
                     else => |payload| try o.lowerValue(payload),
                 };
-                assert(payload_ty.zigTypeTag(mod) != .Fn);
+                assert(payload_ty.zigTypeTag(zcu) != .Fn);
 
                 var fields: [3]Builder.Type = undefined;
                 var vals: [3]Builder.Constant = undefined;
@@ -4047,9 +4047,9 @@ pub const Object = struct {
                         0..,
                     ) |field_ty, field_val, field_index| {
                         if (field_val != .none) continue;
-                        if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue;
+                        if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(zcu)) continue;
 
-                        const field_align = Type.fromInterned(field_ty).abiAlignment(pt);
+                        const field_align = Type.fromInterned(field_ty).abiAlignment(zcu);
                         big_align = big_align.max(field_align);
                         const prev_offset = offset;
                         offset = field_align.forward(offset);
@@ -4071,7 +4071,7 @@ pub const Object = struct {
                             need_unnamed = true;
                         llvm_index += 1;
 
-                        offset += Type.fromInterned(field_ty).abiSize(pt);
+                        offset += Type.fromInterned(field_ty).abiSize(zcu);
                     }
                     {
                         const prev_offset = offset;
@@ -4098,7 +4098,7 @@ pub const Object = struct {
                     if (struct_type.layout == .@"packed") {
                         comptime assert(Type.packed_struct_layout_version == 2);
 
-                        const bits = ty.bitSize(pt);
+                        const bits = ty.bitSize(zcu);
                         const llvm_int_ty = try o.builder.intType(@intCast(bits));
 
                         return o.lowerValueToInt(llvm_int_ty, arg_val);
@@ -4147,7 +4147,7 @@ pub const Object = struct {
                             llvm_index += 1;
                         }
 
-                        if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+                        if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                             // This is a zero-bit field - we only needed it for the alignment.
                             continue;
                         }
@@ -4160,7 +4160,7 @@ pub const Object = struct {
                             need_unnamed = true;
                         llvm_index += 1;
 
-                        offset += field_ty.abiSize(pt);
+                        offset += field_ty.abiSize(zcu);
                     }
                     {
                         const prev_offset = offset;
@@ -4184,19 +4184,19 @@ pub const Object = struct {
             },
             .un => |un| {
                 const union_ty = try o.lowerType(ty);
-                const layout = ty.unionGetLayout(pt);
+                const layout = ty.unionGetLayout(zcu);
                 if (layout.payload_size == 0) return o.lowerValue(un.tag);
 
-                const union_obj = mod.typeToUnion(ty).?;
+                const union_obj = zcu.typeToUnion(ty).?;
                 const container_layout = union_obj.flagsUnordered(ip).layout;
 
                 var need_unnamed = false;
                 const payload = if (un.tag != .none) p: {
-                    const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
+                    const field_index = zcu.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
                     const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
                     if (container_layout == .@"packed") {
-                        if (!field_ty.hasRuntimeBits(pt)) return o.builder.intConst(union_ty, 0);
-                        const bits = ty.bitSize(pt);
+                        if (!field_ty.hasRuntimeBits(zcu)) return o.builder.intConst(union_ty, 0);
+                        const bits = ty.bitSize(zcu);
                         const llvm_int_ty = try o.builder.intType(@intCast(bits));
 
                         return o.lowerValueToInt(llvm_int_ty, arg_val);
@@ -4208,7 +4208,7 @@ pub const Object = struct {
                     // must pointer cast to the expected type before accessing the union.
                     need_unnamed = layout.most_aligned_field != field_index;
 
-                    if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+                    if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                         const padding_len = layout.payload_size;
                         break :p try o.builder.undefConst(try o.builder.arrayType(padding_len, .i8));
                     }
@@ -4217,7 +4217,7 @@ pub const Object = struct {
                     if (payload_ty != union_ty.structFields(&o.builder)[
                         @intFromBool(layout.tag_align.compare(.gte, layout.payload_align))
                     ]) need_unnamed = true;
-                    const field_size = field_ty.abiSize(pt);
+                    const field_size = field_ty.abiSize(zcu);
                     if (field_size == layout.payload_size) break :p payload;
                     const padding_len = layout.payload_size - field_size;
                     const padding_ty = try o.builder.arrayType(padding_len, .i8);
@@ -4228,7 +4228,7 @@ pub const Object = struct {
                 } else p: {
                     assert(layout.tag_size == 0);
                     if (container_layout == .@"packed") {
-                        const bits = ty.bitSize(pt);
+                        const bits = ty.bitSize(zcu);
                         const llvm_int_ty = try o.builder.intType(@intCast(bits));
 
                         return o.lowerValueToInt(llvm_int_ty, arg_val);
@@ -4275,8 +4275,8 @@ pub const Object = struct {
         ty: Type,
         bigint: std.math.big.int.Const,
     ) Allocator.Error!Builder.Constant {
-        const mod = o.pt.zcu;
-        return o.builder.bigIntConst(try o.builder.intType(ty.intInfo(mod).bits), bigint);
+        const zcu = o.pt.zcu;
+        return o.builder.bigIntConst(try o.builder.intType(ty.intInfo(zcu).bits), bigint);
     }
 
     fn lowerPtr(
@@ -4310,7 +4310,7 @@ pub const Object = struct {
                 eu_ptr,
                 offset + @import("../codegen.zig").errUnionPayloadOffset(
                     Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu),
-                    pt,
+                    zcu,
                 ),
             ),
             .opt_payload => |opt_ptr| try o.lowerPtr(opt_ptr, offset),
@@ -4326,7 +4326,7 @@ pub const Object = struct {
                         };
                     },
                     .Struct, .Union => switch (agg_ty.containerLayout(zcu)) {
-                        .auto => agg_ty.structFieldOffset(@intCast(field.index), pt),
+                        .auto => agg_ty.structFieldOffset(@intCast(field.index), zcu),
                         .@"extern", .@"packed" => unreachable,
                     },
                     else => unreachable,
@@ -4344,11 +4344,11 @@ pub const Object = struct {
         uav: InternPool.Key.Ptr.BaseAddr.Uav,
     ) Error!Builder.Constant {
         const pt = o.pt;
-        const mod = pt.zcu;
-        const ip = &mod.intern_pool;
+        const zcu = pt.zcu;
+        const ip = &zcu.intern_pool;
         const uav_val = uav.val;
         const uav_ty = Type.fromInterned(ip.typeOf(uav_val));
-        const target = mod.getTarget();
+        const target = zcu.getTarget();
 
         switch (ip.indexToKey(uav_val)) {
             .func => @panic("TODO"),
@@ -4358,15 +4358,15 @@ pub const Object = struct {
 
         const ptr_ty = Type.fromInterned(uav.orig_ty);
 
-        const is_fn_body = uav_ty.zigTypeTag(mod) == .Fn;
-        if ((!is_fn_body and !uav_ty.hasRuntimeBits(pt)) or
-            (is_fn_body and mod.typeToFunc(uav_ty).?.is_generic)) return o.lowerPtrToVoid(ptr_ty);
+        const is_fn_body = uav_ty.zigTypeTag(zcu) == .Fn;
+        if ((!is_fn_body and !uav_ty.hasRuntimeBits(zcu)) or
+            (is_fn_body and zcu.typeToFunc(uav_ty).?.is_generic)) return o.lowerPtrToVoid(ptr_ty);
 
         if (is_fn_body)
             @panic("TODO");
 
-        const llvm_addr_space = toLlvmAddressSpace(ptr_ty.ptrAddressSpace(mod), target);
-        const alignment = ptr_ty.ptrAlignment(pt);
+        const llvm_addr_space = toLlvmAddressSpace(ptr_ty.ptrAddressSpace(zcu), target);
+        const alignment = ptr_ty.ptrAlignment(zcu);
         const llvm_global = (try o.resolveGlobalUav(uav.val, llvm_addr_space, alignment)).ptrConst(&o.builder).global;
 
         const llvm_val = try o.builder.convConst(
@@ -4398,7 +4398,7 @@ pub const Object = struct {
         const ptr_ty = try pt.navPtrType(owner_nav_index);
 
         const is_fn_body = nav_ty.zigTypeTag(zcu) == .Fn;
-        if ((!is_fn_body and !nav_ty.hasRuntimeBits(pt)) or
+        if ((!is_fn_body and !nav_ty.hasRuntimeBits(zcu)) or
             (is_fn_body and zcu.typeToFunc(nav_ty).?.is_generic))
         {
             return o.lowerPtrToVoid(ptr_ty);
@@ -4418,19 +4418,19 @@ pub const Object = struct {
     }
 
     fn lowerPtrToVoid(o: *Object, ptr_ty: Type) Allocator.Error!Builder.Constant {
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         // Even though we are pointing at something which has zero bits (e.g. `void`),
         // Pointers are defined to have bits. So we must return something here.
         // The value cannot be undefined, because we use the `nonnull` annotation
         // for non-optional pointers. We also need to respect the alignment, even though
         // the address will never be dereferenced.
-        const int: u64 = ptr_ty.ptrInfo(mod).flags.alignment.toByteUnits() orelse
+        const int: u64 = ptr_ty.ptrInfo(zcu).flags.alignment.toByteUnits() orelse
             // Note that these 0xaa values are appropriate even in release-optimized builds
             // because we need a well-defined value that is not null, and LLVM does not
             // have an "undef_but_not_null" attribute. As an example, if this `alloc` AIR
             // instruction is followed by a `wrap_optional`, it will return this value
             // verbatim, and the result should test as non-null.
-            switch (mod.getTarget().ptrBitWidth()) {
+            switch (zcu.getTarget().ptrBitWidth()) {
             16 => 0xaaaa,
             32 => 0xaaaaaaaa,
             64 => 0xaaaaaaaa_aaaaaaaa,
@@ -4447,20 +4447,20 @@ pub const Object = struct {
     /// types to work around a LLVM deficiency when targeting ARM/AArch64.
     fn getAtomicAbiType(o: *Object, ty: Type, is_rmw_xchg: bool) Allocator.Error!Builder.Type {
         const pt = o.pt;
-        const mod = pt.zcu;
-        const int_ty = switch (ty.zigTypeTag(mod)) {
+        const zcu = pt.zcu;
+        const int_ty = switch (ty.zigTypeTag(zcu)) {
             .Int => ty,
-            .Enum => ty.intTagType(mod),
+            .Enum => ty.intTagType(zcu),
             .Float => {
                 if (!is_rmw_xchg) return .none;
-                return o.builder.intType(@intCast(ty.abiSize(pt) * 8));
+                return o.builder.intType(@intCast(ty.abiSize(zcu) * 8));
             },
             .Bool => return .i8,
             else => return .none,
         };
-        const bit_count = int_ty.intInfo(mod).bits;
+        const bit_count = int_ty.intInfo(zcu).bits;
         if (!std.math.isPowerOfTwo(bit_count) or (bit_count % 8) != 0) {
-            return o.builder.intType(@intCast(int_ty.abiSize(pt) * 8));
+            return o.builder.intType(@intCast(int_ty.abiSize(zcu) * 8));
         } else {
             return .none;
         }
@@ -4475,15 +4475,15 @@ pub const Object = struct {
         llvm_arg_i: u32,
     ) Allocator.Error!void {
         const pt = o.pt;
-        const mod = pt.zcu;
-        if (param_ty.isPtrAtRuntime(mod)) {
-            const ptr_info = param_ty.ptrInfo(mod);
+        const zcu = pt.zcu;
+        if (param_ty.isPtrAtRuntime(zcu)) {
+            const ptr_info = param_ty.ptrInfo(zcu);
             if (math.cast(u5, param_index)) |i| {
                 if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) {
                     try attributes.addParamAttr(llvm_arg_i, .@"noalias", &o.builder);
                 }
             }
-            if (!param_ty.isPtrLikeOptional(mod) and !ptr_info.flags.is_allowzero) {
+            if (!param_ty.isPtrLikeOptional(zcu) and !ptr_info.flags.is_allowzero) {
                 try attributes.addParamAttr(llvm_arg_i, .nonnull, &o.builder);
             }
             if (fn_info.cc == .Interrupt) {
@@ -4496,9 +4496,9 @@ pub const Object = struct {
             const elem_align = if (ptr_info.flags.alignment != .none)
                 ptr_info.flags.alignment
             else
-                Type.fromInterned(ptr_info.child).abiAlignment(pt).max(.@"1");
+                Type.fromInterned(ptr_info.child).abiAlignment(zcu).max(.@"1");
             try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align.toLlvm() }, &o.builder);
-        } else if (ccAbiPromoteInt(fn_info.cc, mod, param_ty)) |s| switch (s) {
+        } else if (ccAbiPromoteInt(fn_info.cc, zcu, param_ty)) |s| switch (s) {
             .signed => try attributes.addParamAttr(llvm_arg_i, .signext, &o.builder),
             .unsigned => try attributes.addParamAttr(llvm_arg_i, .zeroext, &o.builder),
         };
@@ -4814,14 +4814,14 @@ pub const FuncGen = struct {
 
     fn resolveValue(self: *FuncGen, val: Value) Error!Builder.Constant {
         const o = self.ng.object;
-        const pt = o.pt;
-        const ty = val.typeOf(pt.zcu);
+        const zcu = o.pt.zcu;
+        const ty = val.typeOf(zcu);
         const llvm_val = try o.lowerValue(val.toIntern());
-        if (!isByRef(ty, pt)) return llvm_val;
+        if (!isByRef(ty, zcu)) return llvm_val;
 
         // We have an LLVM value but we need to create a global constant and
         // set the value as its initializer, and then return a pointer to the global.
-        const target = pt.zcu.getTarget();
+        const target = zcu.getTarget();
         const variable_index = try o.builder.addVariable(
             .empty,
             llvm_val.typeOf(&o.builder),
@@ -4831,7 +4831,7 @@ pub const FuncGen = struct {
         variable_index.setLinkage(.private, &o.builder);
         variable_index.setMutability(.constant, &o.builder);
         variable_index.setUnnamedAddr(.unnamed_addr, &o.builder);
-        variable_index.setAlignment(ty.abiAlignment(pt).toLlvm(), &o.builder);
+        variable_index.setAlignment(ty.abiAlignment(zcu).toLlvm(), &o.builder);
         return o.builder.convConst(
             variable_index.toConst(&o.builder),
             try o.builder.ptrType(toLlvmAddressSpace(.generic, target)),
@@ -4852,8 +4852,8 @@ pub const FuncGen = struct {
 
     fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
-        const ip = &mod.intern_pool;
+        const zcu = o.pt.zcu;
+        const ip = &zcu.intern_pool;
         const air_tags = self.air.instructions.items(.tag);
         for (body, 0..) |inst, i| {
             if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue;
@@ -5200,19 +5200,19 @@ pub const FuncGen = struct {
         const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]);
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
-        const ip = &mod.intern_pool;
+        const zcu = pt.zcu;
+        const ip = &zcu.intern_pool;
         const callee_ty = self.typeOf(pl_op.operand);
-        const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) {
+        const zig_fn_ty = switch (callee_ty.zigTypeTag(zcu)) {
             .Fn => callee_ty,
-            .Pointer => callee_ty.childType(mod),
+            .Pointer => callee_ty.childType(zcu),
             else => unreachable,
         };
-        const fn_info = mod.typeToFunc(zig_fn_ty).?;
+        const fn_info = zcu.typeToFunc(zig_fn_ty).?;
         const return_type = Type.fromInterned(fn_info.return_type);
         const llvm_fn = try self.resolveInst(pl_op.operand);
-        const target = mod.getTarget();
-        const sret = firstParamSRet(fn_info, pt, target);
+        const target = zcu.getTarget();
+        const sret = firstParamSRet(fn_info, zcu, target);
 
         var llvm_args = std.ArrayList(Builder.Value).init(self.gpa);
         defer llvm_args.deinit();
@@ -5230,13 +5230,13 @@ pub const FuncGen = struct {
             const llvm_ret_ty = try o.lowerType(return_type);
             try attributes.addParamAttr(0, .{ .sret = llvm_ret_ty }, &o.builder);
 
-            const alignment = return_type.abiAlignment(pt).toLlvm();
+            const alignment = return_type.abiAlignment(zcu).toLlvm();
             const ret_ptr = try self.buildAllocaWorkaround(return_type, alignment);
             try llvm_args.append(ret_ptr);
             break :blk ret_ptr;
         };
 
-        const err_return_tracing = return_type.isError(mod) and mod.comp.config.any_error_tracing;
+        const err_return_tracing = return_type.isError(zcu) and zcu.comp.config.any_error_tracing;
         if (err_return_tracing) {
             assert(self.err_ret_trace != .none);
             try llvm_args.append(self.err_ret_trace);
@@ -5250,8 +5250,8 @@ pub const FuncGen = struct {
                 const param_ty = self.typeOf(arg);
                 const llvm_arg = try self.resolveInst(arg);
                 const llvm_param_ty = try o.lowerType(param_ty);
-                if (isByRef(param_ty, pt)) {
-                    const alignment = param_ty.abiAlignment(pt).toLlvm();
+                if (isByRef(param_ty, zcu)) {
+                    const alignment = param_ty.abiAlignment(zcu).toLlvm();
                     const loaded = try self.wip.load(.normal, llvm_param_ty, llvm_arg, alignment, "");
                     try llvm_args.append(loaded);
                 } else {
@@ -5262,10 +5262,10 @@ pub const FuncGen = struct {
                 const arg = args[it.zig_index - 1];
                 const param_ty = self.typeOf(arg);
                 const llvm_arg = try self.resolveInst(arg);
-                if (isByRef(param_ty, pt)) {
+                if (isByRef(param_ty, zcu)) {
                     try llvm_args.append(llvm_arg);
                 } else {
-                    const alignment = param_ty.abiAlignment(pt).toLlvm();
+                    const alignment = param_ty.abiAlignment(zcu).toLlvm();
                     const param_llvm_ty = llvm_arg.typeOfWip(&self.wip);
                     const arg_ptr = try self.buildAlloca(param_llvm_ty, alignment);
                     _ = try self.wip.store(.normal, llvm_arg, arg_ptr, alignment);
@@ -5277,10 +5277,10 @@ pub const FuncGen = struct {
                 const param_ty = self.typeOf(arg);
                 const llvm_arg = try self.resolveInst(arg);
 
-                const alignment = param_ty.abiAlignment(pt).toLlvm();
+                const alignment = param_ty.abiAlignment(zcu).toLlvm();
                 const param_llvm_ty = try o.lowerType(param_ty);
                 const arg_ptr = try self.buildAllocaWorkaround(param_ty, alignment);
-                if (isByRef(param_ty, pt)) {
+                if (isByRef(param_ty, zcu)) {
                     const loaded = try self.wip.load(.normal, param_llvm_ty, llvm_arg, alignment, "");
                     _ = try self.wip.store(.normal, loaded, arg_ptr, alignment);
                 } else {
@@ -5292,16 +5292,16 @@ pub const FuncGen = struct {
                 const arg = args[it.zig_index - 1];
                 const param_ty = self.typeOf(arg);
                 const llvm_arg = try self.resolveInst(arg);
-                const int_llvm_ty = try o.builder.intType(@intCast(param_ty.abiSize(pt) * 8));
+                const int_llvm_ty = try o.builder.intType(@intCast(param_ty.abiSize(zcu) * 8));
 
-                if (isByRef(param_ty, pt)) {
-                    const alignment = param_ty.abiAlignment(pt).toLlvm();
+                if (isByRef(param_ty, zcu)) {
+                    const alignment = param_ty.abiAlignment(zcu).toLlvm();
                     const loaded = try self.wip.load(.normal, int_llvm_ty, llvm_arg, alignment, "");
                     try llvm_args.append(loaded);
                 } else {
                     // LLVM does not allow bitcasting structs so we must allocate
                     // a local, store as one type, and then load as another type.
-                    const alignment = param_ty.abiAlignment(pt).toLlvm();
+                    const alignment = param_ty.abiAlignment(zcu).toLlvm();
                     const int_ptr = try self.buildAllocaWorkaround(param_ty, alignment);
                     _ = try self.wip.store(.normal, llvm_arg, int_ptr, alignment);
                     const loaded = try self.wip.load(.normal, int_llvm_ty, int_ptr, alignment, "");
@@ -5320,9 +5320,9 @@ pub const FuncGen = struct {
                 const param_ty = self.typeOf(arg);
                 const llvm_types = it.types_buffer[0..it.types_len];
                 const llvm_arg = try self.resolveInst(arg);
-                const is_by_ref = isByRef(param_ty, pt);
+                const is_by_ref = isByRef(param_ty, zcu);
                 const arg_ptr = if (is_by_ref) llvm_arg else ptr: {
-                    const alignment = param_ty.abiAlignment(pt).toLlvm();
+                    const alignment = param_ty.abiAlignment(zcu).toLlvm();
                     const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment);
                     _ = try self.wip.store(.normal, llvm_arg, ptr, alignment);
                     break :ptr ptr;
@@ -5348,14 +5348,14 @@ pub const FuncGen = struct {
                 const arg = args[it.zig_index - 1];
                 const arg_ty = self.typeOf(arg);
                 var llvm_arg = try self.resolveInst(arg);
-                const alignment = arg_ty.abiAlignment(pt).toLlvm();
-                if (!isByRef(arg_ty, pt)) {
+                const alignment = arg_ty.abiAlignment(zcu).toLlvm();
+                if (!isByRef(arg_ty, zcu)) {
                     const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment);
                     _ = try self.wip.store(.normal, llvm_arg, ptr, alignment);
                     llvm_arg = ptr;
                 }
 
-                const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty, mod).?);
+                const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty, zcu).?);
                 const array_ty = try o.builder.arrayType(count, float_ty);
 
                 const loaded = try self.wip.load(.normal, array_ty, llvm_arg, alignment, "");
@@ -5366,8 +5366,8 @@ pub const FuncGen = struct {
                 const arg = args[it.zig_index - 1];
                 const arg_ty = self.typeOf(arg);
                 var llvm_arg = try self.resolveInst(arg);
-                const alignment = arg_ty.abiAlignment(pt).toLlvm();
-                if (!isByRef(arg_ty, pt)) {
+                const alignment = arg_ty.abiAlignment(zcu).toLlvm();
+                if (!isByRef(arg_ty, zcu)) {
                     const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment);
                     _ = try self.wip.store(.normal, llvm_arg, ptr, alignment);
                     llvm_arg = ptr;
@@ -5389,7 +5389,7 @@ pub const FuncGen = struct {
                 .byval => {
                     const param_index = it.zig_index - 1;
                     const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]);
-                    if (!isByRef(param_ty, pt)) {
+                    if (!isByRef(param_ty, zcu)) {
                         try o.addByValParamAttrs(&attributes, param_ty, param_index, fn_info, it.llvm_index - 1);
                     }
                 },
@@ -5397,7 +5397,7 @@ pub const FuncGen = struct {
                     const param_index = it.zig_index - 1;
                     const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]);
                     const param_llvm_ty = try o.lowerType(param_ty);
-                    const alignment = param_ty.abiAlignment(pt).toLlvm();
+                    const alignment = param_ty.abiAlignment(zcu).toLlvm();
                     try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
                 },
                 .byref_mut => try attributes.addParamAttr(it.llvm_index - 1, .noundef, &o.builder),
@@ -5414,7 +5414,7 @@ pub const FuncGen = struct {
                 .slice => {
                     assert(!it.byval_attr);
                     const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
-                    const ptr_info = param_ty.ptrInfo(mod);
+                    const ptr_info = param_ty.ptrInfo(zcu);
                     const llvm_arg_i = it.llvm_index - 2;
 
                     if (math.cast(u5, it.zig_index - 1)) |i| {
@@ -5422,7 +5422,7 @@ pub const FuncGen = struct {
                             try attributes.addParamAttr(llvm_arg_i, .@"noalias", &o.builder);
                         }
                     }
-                    if (param_ty.zigTypeTag(mod) != .Optional) {
+                    if (param_ty.zigTypeTag(zcu) != .Optional) {
                         try attributes.addParamAttr(llvm_arg_i, .nonnull, &o.builder);
                     }
                     if (ptr_info.flags.is_const) {
@@ -5431,7 +5431,7 @@ pub const FuncGen = struct {
                     const elem_align = (if (ptr_info.flags.alignment != .none)
                         @as(InternPool.Alignment, ptr_info.flags.alignment)
                     else
-                        Type.fromInterned(ptr_info.child).abiAlignment(pt).max(.@"1")).toLlvm();
+                        Type.fromInterned(ptr_info.child).abiAlignment(zcu).max(.@"1")).toLlvm();
                     try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder);
                 },
             };
@@ -5456,17 +5456,17 @@ pub const FuncGen = struct {
             return .none;
         }
 
-        if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(pt)) {
+        if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(zcu)) {
             return .none;
         }
 
         const llvm_ret_ty = try o.lowerType(return_type);
         if (ret_ptr) |rp| {
-            if (isByRef(return_type, pt)) {
+            if (isByRef(return_type, zcu)) {
                 return rp;
             } else {
                 // our by-ref status disagrees with sret so we must load.
-                const return_alignment = return_type.abiAlignment(pt).toLlvm();
+                const return_alignment = return_type.abiAlignment(zcu).toLlvm();
                 return self.wip.load(.normal, llvm_ret_ty, rp, return_alignment, "");
             }
         }
@@ -5477,19 +5477,19 @@ pub const FuncGen = struct {
             // In this case the function return type is honoring the calling convention by having
             // a different LLVM type than the usual one. We solve this here at the callsite
             // by using our canonical type, then loading it if necessary.
-            const alignment = return_type.abiAlignment(pt).toLlvm();
+            const alignment = return_type.abiAlignment(zcu).toLlvm();
             const rp = try self.buildAlloca(abi_ret_ty, alignment);
             _ = try self.wip.store(.normal, call, rp, alignment);
-            return if (isByRef(return_type, pt))
+            return if (isByRef(return_type, zcu))
                 rp
             else
                 try self.wip.load(.normal, llvm_ret_ty, rp, alignment, "");
         }
 
-        if (isByRef(return_type, pt)) {
+        if (isByRef(return_type, zcu)) {
             // our by-ref status disagrees with sret so we must allocate, store,
             // and return the allocation pointer.
-            const alignment = return_type.abiAlignment(pt).toLlvm();
+            const alignment = return_type.abiAlignment(zcu).toLlvm();
             const rp = try self.buildAlloca(llvm_ret_ty, alignment);
             _ = try self.wip.store(.normal, call, rp, alignment);
             return rp;
@@ -5540,8 +5540,8 @@ pub const FuncGen = struct {
     fn airRet(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
-        const ip = &mod.intern_pool;
+        const zcu = pt.zcu;
+        const ip = &zcu.intern_pool;
         const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
         const ret_ty = self.typeOf(un_op);
 
@@ -5549,9 +5549,9 @@ pub const FuncGen = struct {
             const ptr_ty = try pt.singleMutPtrType(ret_ty);
 
             const operand = try self.resolveInst(un_op);
-            const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndefDeep(mod) else false;
+            const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndefDeep(zcu) else false;
             if (val_is_undef and safety) undef: {
-                const ptr_info = ptr_ty.ptrInfo(mod);
+                const ptr_info = ptr_ty.ptrInfo(zcu);
                 const needs_bitmask = (ptr_info.packed_offset.host_size != 0);
                 if (needs_bitmask) {
                     // TODO: only some bits are to be undef, we cannot write with a simple memset.
@@ -5559,13 +5559,13 @@ pub const FuncGen = struct {
                     // https://github.com/ziglang/zig/issues/15337
                     break :undef;
                 }
-                const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(pt));
+                const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(zcu));
                 _ = try self.wip.callMemSet(
                     self.ret_ptr,
-                    ptr_ty.ptrAlignment(pt).toLlvm(),
+                    ptr_ty.ptrAlignment(zcu).toLlvm(),
                     try o.builder.intValue(.i8, 0xaa),
                     len,
-                    if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal,
+                    if (ptr_ty.isVolatilePtr(zcu)) .@"volatile" else .normal,
                 );
                 const owner_mod = self.ng.ownerModule();
                 if (owner_mod.valgrind) {
@@ -5588,9 +5588,9 @@ pub const FuncGen = struct {
             _ = try self.wip.retVoid();
             return .none;
         }
-        const fn_info = mod.typeToFunc(Type.fromInterned(ip.getNav(self.ng.nav_index).typeOf(ip))).?;
-        if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
-            if (Type.fromInterned(fn_info.return_type).isError(mod)) {
+        const fn_info = zcu.typeToFunc(Type.fromInterned(ip.getNav(self.ng.nav_index).typeOf(ip))).?;
+        if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+            if (Type.fromInterned(fn_info.return_type).isError(zcu)) {
                 // Functions with an empty error set are emitted with an error code
                 // return type and return zero so they can be function pointers coerced
                 // to functions that return anyerror.
@@ -5603,13 +5603,13 @@ pub const FuncGen = struct {
 
         const abi_ret_ty = try lowerFnRetTy(o, fn_info);
         const operand = try self.resolveInst(un_op);
-        const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndefDeep(mod) else false;
-        const alignment = ret_ty.abiAlignment(pt).toLlvm();
+        const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndefDeep(zcu) else false;
+        const alignment = ret_ty.abiAlignment(zcu).toLlvm();
 
         if (val_is_undef and safety) {
             const llvm_ret_ty = operand.typeOfWip(&self.wip);
             const rp = try self.buildAlloca(llvm_ret_ty, alignment);
-            const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(pt));
+            const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(zcu));
             _ = try self.wip.callMemSet(
                 rp,
                 alignment,
@@ -5625,7 +5625,7 @@ pub const FuncGen = struct {
             return .none;
         }
 
-        if (isByRef(ret_ty, pt)) {
+        if (isByRef(ret_ty, zcu)) {
             // operand is a pointer however self.ret_ptr is null so that means
             // we need to return a value.
             _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, operand, alignment, ""));
@@ -5647,14 +5647,14 @@ pub const FuncGen = struct {
     fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
-        const ip = &mod.intern_pool;
+        const zcu = pt.zcu;
+        const ip = &zcu.intern_pool;
         const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
         const ptr_ty = self.typeOf(un_op);
-        const ret_ty = ptr_ty.childType(mod);
-        const fn_info = mod.typeToFunc(Type.fromInterned(ip.getNav(self.ng.nav_index).typeOf(ip))).?;
-        if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
-            if (Type.fromInterned(fn_info.return_type).isError(mod)) {
+        const ret_ty = ptr_ty.childType(zcu);
+        const fn_info = zcu.typeToFunc(Type.fromInterned(ip.getNav(self.ng.nav_index).typeOf(ip))).?;
+        if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+            if (Type.fromInterned(fn_info.return_type).isError(zcu)) {
                 // Functions with an empty error set are emitted with an error code
                 // return type and return zero so they can be function pointers coerced
                 // to functions that return anyerror.
@@ -5670,7 +5670,7 @@ pub const FuncGen = struct {
         }
         const ptr = try self.resolveInst(un_op);
         const abi_ret_ty = try lowerFnRetTy(o, fn_info);
-        const alignment = ret_ty.abiAlignment(pt).toLlvm();
+        const alignment = ret_ty.abiAlignment(zcu).toLlvm();
         _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, ptr, alignment, ""));
         return .none;
     }
@@ -5688,16 +5688,17 @@ pub const FuncGen = struct {
     fn airCVaCopy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
+        const zcu = pt.zcu;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const src_list = try self.resolveInst(ty_op.operand);
         const va_list_ty = ty_op.ty.toType();
         const llvm_va_list_ty = try o.lowerType(va_list_ty);
 
-        const result_alignment = va_list_ty.abiAlignment(pt).toLlvm();
+        const result_alignment = va_list_ty.abiAlignment(pt.zcu).toLlvm();
         const dest_list = try self.buildAllocaWorkaround(va_list_ty, result_alignment);
 
         _ = try self.wip.callIntrinsic(.normal, .none, .va_copy, &.{}, &.{ dest_list, src_list }, "");
-        return if (isByRef(va_list_ty, pt))
+        return if (isByRef(va_list_ty, zcu))
             dest_list
         else
             try self.wip.load(.normal, llvm_va_list_ty, dest_list, result_alignment, "");
@@ -5714,14 +5715,15 @@ pub const FuncGen = struct {
     fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
+        const zcu = pt.zcu;
         const va_list_ty = self.typeOfIndex(inst);
         const llvm_va_list_ty = try o.lowerType(va_list_ty);
 
-        const result_alignment = va_list_ty.abiAlignment(pt).toLlvm();
+        const result_alignment = va_list_ty.abiAlignment(pt.zcu).toLlvm();
         const dest_list = try self.buildAllocaWorkaround(va_list_ty, result_alignment);
 
         _ = try self.wip.callIntrinsic(.normal, .none, .va_start, &.{}, &.{dest_list}, "");
-        return if (isByRef(va_list_ty, pt))
+        return if (isByRef(va_list_ty, zcu))
             dest_list
         else
             try self.wip.load(.normal, llvm_va_list_ty, dest_list, result_alignment, "");
@@ -5779,21 +5781,21 @@ pub const FuncGen = struct {
     ) Allocator.Error!Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
-        const scalar_ty = operand_ty.scalarType(mod);
-        const int_ty = switch (scalar_ty.zigTypeTag(mod)) {
-            .Enum => scalar_ty.intTagType(mod),
+        const zcu = pt.zcu;
+        const scalar_ty = operand_ty.scalarType(zcu);
+        const int_ty = switch (scalar_ty.zigTypeTag(zcu)) {
+            .Enum => scalar_ty.intTagType(zcu),
             .Int, .Bool, .Pointer, .ErrorSet => scalar_ty,
             .Optional => blk: {
-                const payload_ty = operand_ty.optionalChild(mod);
-                if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt) or
-                    operand_ty.optionalReprIsPayload(mod))
+                const payload_ty = operand_ty.optionalChild(zcu);
+                if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu) or
+                    operand_ty.optionalReprIsPayload(zcu))
                 {
                     break :blk operand_ty;
                 }
                 // We need to emit instructions to check for equality/inequality
                 // of optionals that are not pointers.
-                const is_by_ref = isByRef(scalar_ty, pt);
+                const is_by_ref = isByRef(scalar_ty, zcu);
                 const opt_llvm_ty = try o.lowerType(scalar_ty);
                 const lhs_non_null = try self.optCmpNull(.ne, opt_llvm_ty, lhs, is_by_ref);
                 const rhs_non_null = try self.optCmpNull(.ne, opt_llvm_ty, rhs, is_by_ref);
@@ -5860,7 +5862,7 @@ pub const FuncGen = struct {
             .Float => return self.buildFloatCmp(fast, op, operand_ty, .{ lhs, rhs }),
             else => unreachable,
         };
-        const is_signed = int_ty.isSignedInt(mod);
+        const is_signed = int_ty.isSignedInt(zcu);
         const cond: Builder.IntegerCondition = switch (op) {
             .eq => .eq,
             .neq => .ne,
@@ -5886,15 +5888,15 @@ pub const FuncGen = struct {
     ) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const inst_ty = self.typeOfIndex(inst);
 
-        if (inst_ty.isNoReturn(mod)) {
+        if (inst_ty.isNoReturn(zcu)) {
             try self.genBodyDebugScope(maybe_inline_func, body);
             return .none;
         }
 
-        const have_block_result = inst_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt);
+        const have_block_result = inst_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu);
 
         var breaks: BreakList = if (have_block_result) .{ .list = .{} } else .{ .len = 0 };
         defer if (have_block_result) breaks.list.deinit(self.gpa);
@@ -5918,7 +5920,7 @@ pub const FuncGen = struct {
                 // a pointer to it. LLVM IR allows the call instruction to use function bodies instead
                 // of function pointers, however the phi makes it a runtime value and therefore
                 // the LLVM type has to be wrapped in a pointer.
-                if (inst_ty.zigTypeTag(mod) == .Fn or isByRef(inst_ty, pt)) {
+                if (inst_ty.zigTypeTag(zcu) == .Fn or isByRef(inst_ty, zcu)) {
                     break :ty .ptr;
                 }
                 break :ty raw_llvm_ty;
@@ -5936,13 +5938,13 @@ pub const FuncGen = struct {
 
     fn airBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
-        const pt = o.pt;
+        const zcu = o.pt.zcu;
         const branch = self.air.instructions.items(.data)[@intFromEnum(inst)].br;
         const block = self.blocks.get(branch.block_inst).?;
 
         // Add the values to the lists only if the break provides a value.
         const operand_ty = self.typeOf(branch.operand);
-        if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
+        if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
             const val = try self.resolveInst(branch.operand);
 
             // For the phi node, we need the basic blocks and the values of the
@@ -5977,6 +5979,7 @@ pub const FuncGen = struct {
     fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
+        const zcu = pt.zcu;
         const inst = body_tail[0];
         const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
         const err_union = try self.resolveInst(pl_op.operand);
@@ -5984,19 +5987,19 @@ pub const FuncGen = struct {
         const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]);
         const err_union_ty = self.typeOf(pl_op.operand);
         const payload_ty = self.typeOfIndex(inst);
-        const can_elide_load = if (isByRef(payload_ty, pt)) self.canElideLoad(body_tail) else false;
+        const can_elide_load = if (isByRef(payload_ty, zcu)) self.canElideLoad(body_tail) else false;
         const is_unused = self.liveness.isUnused(inst);
         return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused);
     }
 
     fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
         const err_union_ptr = try self.resolveInst(extra.data.ptr);
         const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]);
-        const err_union_ty = self.typeOf(extra.data.ptr).childType(mod);
+        const err_union_ty = self.typeOf(extra.data.ptr).childType(zcu);
         const is_unused = self.liveness.isUnused(inst);
         return lowerTry(self, err_union_ptr, body, err_union_ty, true, true, is_unused);
     }
@@ -6012,13 +6015,13 @@ pub const FuncGen = struct {
     ) !Builder.Value {
         const o = fg.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
-        const payload_ty = err_union_ty.errorUnionPayload(mod);
-        const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(pt);
+        const zcu = pt.zcu;
+        const payload_ty = err_union_ty.errorUnionPayload(zcu);
+        const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(zcu);
         const err_union_llvm_ty = try o.lowerType(err_union_ty);
         const error_type = try o.errorIntType();
 
-        if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
+        if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
             const loaded = loaded: {
                 if (!payload_has_bits) {
                     // TODO add alignment to this load
@@ -6028,7 +6031,7 @@ pub const FuncGen = struct {
                         err_union;
                 }
                 const err_field_index = try errUnionErrorOffset(payload_ty, pt);
-                if (operand_is_ptr or isByRef(err_union_ty, pt)) {
+                if (operand_is_ptr or isByRef(err_union_ty, zcu)) {
                     const err_field_ptr =
                         try fg.wip.gepStruct(err_union_llvm_ty, err_union, err_field_index, "");
                     // TODO add alignment to this load
@@ -6059,10 +6062,10 @@ pub const FuncGen = struct {
         const offset = try errUnionPayloadOffset(payload_ty, pt);
         if (operand_is_ptr) {
             return fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, "");
-        } else if (isByRef(err_union_ty, pt)) {
+        } else if (isByRef(err_union_ty, zcu)) {
             const payload_ptr = try fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, "");
-            const payload_alignment = payload_ty.abiAlignment(pt).toLlvm();
-            if (isByRef(payload_ty, pt)) {
+            const payload_alignment = payload_ty.abiAlignment(zcu).toLlvm();
+            if (isByRef(payload_ty, zcu)) {
                 if (can_elide_load)
                     return payload_ptr;
 
@@ -6140,7 +6143,7 @@ pub const FuncGen = struct {
 
     fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const loop = self.air.extraData(Air.Block, ty_pl.payload);
         const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end..][0..loop.data.body_len]);
@@ -6156,7 +6159,7 @@ pub const FuncGen = struct {
         // would have been emitted already. Also the main loop in genBody can
         // be while(true) instead of for(body), which will eliminate 1 branch on
         // a hot path.
-        if (body.len == 0 or !self.typeOfIndex(body[body.len - 1]).isNoReturn(mod)) {
+        if (body.len == 0 or !self.typeOfIndex(body[body.len - 1]).isNoReturn(zcu)) {
             _ = try self.wip.br(loop_block);
         }
         return .none;
@@ -6165,15 +6168,15 @@ pub const FuncGen = struct {
     fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const operand_ty = self.typeOf(ty_op.operand);
-        const array_ty = operand_ty.childType(mod);
+        const array_ty = operand_ty.childType(zcu);
         const llvm_usize = try o.lowerType(Type.usize);
-        const len = try o.builder.intValue(llvm_usize, array_ty.arrayLen(mod));
+        const len = try o.builder.intValue(llvm_usize, array_ty.arrayLen(zcu));
         const slice_llvm_ty = try o.lowerType(self.typeOfIndex(inst));
         const operand = try self.resolveInst(ty_op.operand);
-        if (!array_ty.hasRuntimeBitsIgnoreComptime(pt))
+        if (!array_ty.hasRuntimeBitsIgnoreComptime(zcu))
             return self.wip.buildAggregate(slice_llvm_ty, &.{ operand, len }, "");
         const ptr = try self.wip.gep(.inbounds, try o.lowerType(array_ty), operand, &.{
             try o.builder.intValue(llvm_usize, 0), try o.builder.intValue(llvm_usize, 0),
@@ -6184,17 +6187,17 @@ pub const FuncGen = struct {
     fn airFloatFromInt(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
         const workaround_operand = try self.resolveInst(ty_op.operand);
         const operand_ty = self.typeOf(ty_op.operand);
-        const operand_scalar_ty = operand_ty.scalarType(mod);
-        const is_signed_int = operand_scalar_ty.isSignedInt(mod);
+        const operand_scalar_ty = operand_ty.scalarType(zcu);
+        const is_signed_int = operand_scalar_ty.isSignedInt(zcu);
 
         const operand = o: {
             // Work around LLVM bug. See https://github.com/ziglang/zig/issues/17381.
-            const bit_size = operand_scalar_ty.bitSize(pt);
+            const bit_size = operand_scalar_ty.bitSize(zcu);
             for ([_]u8{ 8, 16, 32, 64, 128 }) |b| {
                 if (bit_size < b) {
                     break :o try self.wip.cast(
@@ -6211,9 +6214,9 @@ pub const FuncGen = struct {
         };
 
         const dest_ty = self.typeOfIndex(inst);
-        const dest_scalar_ty = dest_ty.scalarType(mod);
+        const dest_scalar_ty = dest_ty.scalarType(zcu);
         const dest_llvm_ty = try o.lowerType(dest_ty);
-        const target = mod.getTarget();
+        const target = zcu.getTarget();
 
         if (intrinsicsAllowed(dest_scalar_ty, target)) return self.wip.conv(
             if (is_signed_int) .signed else .unsigned,
@@ -6222,7 +6225,7 @@ pub const FuncGen = struct {
             "",
         );
 
-        const rt_int_bits = compilerRtIntBits(@intCast(operand_scalar_ty.bitSize(pt)));
+        const rt_int_bits = compilerRtIntBits(@intCast(operand_scalar_ty.bitSize(zcu)));
         const rt_int_ty = try o.builder.intType(rt_int_bits);
         var extended = try self.wip.conv(
             if (is_signed_int) .signed else .unsigned,
@@ -6269,29 +6272,29 @@ pub const FuncGen = struct {
 
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
-        const target = mod.getTarget();
+        const zcu = pt.zcu;
+        const target = zcu.getTarget();
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
         const operand = try self.resolveInst(ty_op.operand);
         const operand_ty = self.typeOf(ty_op.operand);
-        const operand_scalar_ty = operand_ty.scalarType(mod);
+        const operand_scalar_ty = operand_ty.scalarType(zcu);
 
         const dest_ty = self.typeOfIndex(inst);
-        const dest_scalar_ty = dest_ty.scalarType(mod);
+        const dest_scalar_ty = dest_ty.scalarType(zcu);
         const dest_llvm_ty = try o.lowerType(dest_ty);
 
         if (intrinsicsAllowed(operand_scalar_ty, target)) {
             // TODO set fast math flag
             return self.wip.conv(
-                if (dest_scalar_ty.isSignedInt(mod)) .signed else .unsigned,
+                if (dest_scalar_ty.isSignedInt(zcu)) .signed else .unsigned,
                 operand,
                 dest_llvm_ty,
                 "",
             );
         }
 
-        const rt_int_bits = compilerRtIntBits(@intCast(dest_scalar_ty.bitSize(pt)));
+        const rt_int_bits = compilerRtIntBits(@intCast(dest_scalar_ty.bitSize(zcu)));
         const ret_ty = try o.builder.intType(rt_int_bits);
         const libc_ret_ty = if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) b: {
             // On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard
@@ -6303,7 +6306,7 @@ pub const FuncGen = struct {
         const compiler_rt_operand_abbrev = compilerRtFloatAbbrev(operand_bits);
 
         const compiler_rt_dest_abbrev = compilerRtIntAbbrev(rt_int_bits);
-        const sign_prefix = if (dest_scalar_ty.isSignedInt(mod)) "" else "uns";
+        const sign_prefix = if (dest_scalar_ty.isSignedInt(zcu)) "" else "uns";
 
         const fn_name = try o.builder.strtabStringFmt("__fix{s}{s}f{s}i", .{
             sign_prefix,
@@ -6330,29 +6333,29 @@ pub const FuncGen = struct {
 
     fn sliceOrArrayPtr(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value {
         const o = fg.ng.object;
-        const mod = o.pt.zcu;
-        return if (ty.isSlice(mod)) fg.wip.extractValue(ptr, &.{0}, "") else ptr;
+        const zcu = o.pt.zcu;
+        return if (ty.isSlice(zcu)) fg.wip.extractValue(ptr, &.{0}, "") else ptr;
     }
 
     fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value {
         const o = fg.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const llvm_usize = try o.lowerType(Type.usize);
-        switch (ty.ptrSize(mod)) {
+        switch (ty.ptrSize(zcu)) {
             .Slice => {
                 const len = try fg.wip.extractValue(ptr, &.{1}, "");
-                const elem_ty = ty.childType(mod);
-                const abi_size = elem_ty.abiSize(pt);
+                const elem_ty = ty.childType(zcu);
+                const abi_size = elem_ty.abiSize(zcu);
                 if (abi_size == 1) return len;
                 const abi_size_llvm_val = try o.builder.intValue(llvm_usize, abi_size);
                 return fg.wip.bin(.@"mul nuw", len, abi_size_llvm_val, "");
             },
             .One => {
-                const array_ty = ty.childType(mod);
-                const elem_ty = array_ty.childType(mod);
-                const abi_size = elem_ty.abiSize(pt);
-                return o.builder.intValue(llvm_usize, array_ty.arrayLen(mod) * abi_size);
+                const array_ty = ty.childType(zcu);
+                const elem_ty = array_ty.childType(zcu);
+                const abi_size = elem_ty.abiSize(zcu);
+                return o.builder.intValue(llvm_usize, array_ty.arrayLen(zcu) * abi_size);
             },
             .Many, .C => unreachable,
         }
@@ -6366,11 +6369,11 @@ pub const FuncGen = struct {
 
     fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const slice_ptr = try self.resolveInst(ty_op.operand);
         const slice_ptr_ty = self.typeOf(ty_op.operand);
-        const slice_llvm_ty = try o.lowerPtrElemTy(slice_ptr_ty.childType(mod));
+        const slice_llvm_ty = try o.lowerPtrElemTy(slice_ptr_ty.childType(zcu));
 
         return self.wip.gepStruct(slice_llvm_ty, slice_ptr, index, "");
     }
@@ -6378,21 +6381,21 @@ pub const FuncGen = struct {
     fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const inst = body_tail[0];
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const slice_ty = self.typeOf(bin_op.lhs);
         const slice = try self.resolveInst(bin_op.lhs);
         const index = try self.resolveInst(bin_op.rhs);
-        const elem_ty = slice_ty.childType(mod);
+        const elem_ty = slice_ty.childType(zcu);
         const llvm_elem_ty = try o.lowerPtrElemTy(elem_ty);
         const base_ptr = try self.wip.extractValue(slice, &.{0}, "");
         const ptr = try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, &.{index}, "");
-        if (isByRef(elem_ty, pt)) {
+        if (isByRef(elem_ty, zcu)) {
             if (self.canElideLoad(body_tail))
                 return ptr;
 
-            const elem_alignment = elem_ty.abiAlignment(pt).toLlvm();
+            const elem_alignment = elem_ty.abiAlignment(zcu).toLlvm();
             return self.loadByRef(ptr, elem_ty, elem_alignment, .normal);
         }
 
@@ -6401,14 +6404,14 @@ pub const FuncGen = struct {
 
     fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
         const slice_ty = self.typeOf(bin_op.lhs);
 
         const slice = try self.resolveInst(bin_op.lhs);
         const index = try self.resolveInst(bin_op.rhs);
-        const llvm_elem_ty = try o.lowerPtrElemTy(slice_ty.childType(mod));
+        const llvm_elem_ty = try o.lowerPtrElemTy(slice_ty.childType(zcu));
         const base_ptr = try self.wip.extractValue(slice, &.{0}, "");
         return self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, &.{index}, "");
     }
@@ -6416,7 +6419,7 @@ pub const FuncGen = struct {
     fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const inst = body_tail[0];
 
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
@@ -6424,16 +6427,16 @@ pub const FuncGen = struct {
         const array_llvm_val = try self.resolveInst(bin_op.lhs);
         const rhs = try self.resolveInst(bin_op.rhs);
         const array_llvm_ty = try o.lowerType(array_ty);
-        const elem_ty = array_ty.childType(mod);
-        if (isByRef(array_ty, pt)) {
+        const elem_ty = array_ty.childType(zcu);
+        if (isByRef(array_ty, zcu)) {
             const indices: [2]Builder.Value = .{
                 try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs,
             };
-            if (isByRef(elem_ty, pt)) {
+            if (isByRef(elem_ty, zcu)) {
                 const elem_ptr =
                     try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, "");
                 if (canElideLoad(self, body_tail)) return elem_ptr;
-                const elem_alignment = elem_ty.abiAlignment(pt).toLlvm();
+                const elem_alignment = elem_ty.abiAlignment(zcu).toLlvm();
                 return self.loadByRef(elem_ptr, elem_ty, elem_alignment, .normal);
             } else {
                 const elem_ptr =
@@ -6449,23 +6452,23 @@ pub const FuncGen = struct {
     fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const inst = body_tail[0];
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const ptr_ty = self.typeOf(bin_op.lhs);
-        const elem_ty = ptr_ty.childType(mod);
+        const elem_ty = ptr_ty.childType(zcu);
         const llvm_elem_ty = try o.lowerPtrElemTy(elem_ty);
         const base_ptr = try self.resolveInst(bin_op.lhs);
         const rhs = try self.resolveInst(bin_op.rhs);
         // TODO: when we go fully opaque pointers in LLVM 16 we can remove this branch
-        const ptr = try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, if (ptr_ty.isSinglePointer(mod))
+        const ptr = try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, if (ptr_ty.isSinglePointer(zcu))
             // If this is a single-item pointer to an array, we need another index in the GEP.
             &.{ try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs }
         else
             &.{rhs}, "");
-        if (isByRef(elem_ty, pt)) {
+        if (isByRef(elem_ty, zcu)) {
             if (self.canElideLoad(body_tail)) return ptr;
-            const elem_alignment = elem_ty.abiAlignment(pt).toLlvm();
+            const elem_alignment = elem_ty.abiAlignment(zcu).toLlvm();
             return self.loadByRef(ptr, elem_ty, elem_alignment, .normal);
         }
 
@@ -6475,21 +6478,21 @@ pub const FuncGen = struct {
     fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
         const ptr_ty = self.typeOf(bin_op.lhs);
-        const elem_ty = ptr_ty.childType(mod);
-        if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return self.resolveInst(bin_op.lhs);
+        const elem_ty = ptr_ty.childType(zcu);
+        if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) return self.resolveInst(bin_op.lhs);
 
         const base_ptr = try self.resolveInst(bin_op.lhs);
         const rhs = try self.resolveInst(bin_op.rhs);
 
         const elem_ptr = ty_pl.ty.toType();
-        if (elem_ptr.ptrInfo(mod).flags.vector_index != .none) return base_ptr;
+        if (elem_ptr.ptrInfo(zcu).flags.vector_index != .none) return base_ptr;
 
         const llvm_elem_ty = try o.lowerPtrElemTy(elem_ty);
-        return self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, if (ptr_ty.isSinglePointer(mod))
+        return self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, if (ptr_ty.isSinglePointer(zcu))
             // If this is a single-item pointer to an array, we need another index in the GEP.
             &.{ try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs }
         else
@@ -6518,35 +6521,35 @@ pub const FuncGen = struct {
     fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const inst = body_tail[0];
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
         const struct_ty = self.typeOf(struct_field.struct_operand);
         const struct_llvm_val = try self.resolveInst(struct_field.struct_operand);
         const field_index = struct_field.field_index;
-        const field_ty = struct_ty.structFieldType(field_index, mod);
-        if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none;
+        const field_ty = struct_ty.structFieldType(field_index, zcu);
+        if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return .none;
 
-        if (!isByRef(struct_ty, pt)) {
-            assert(!isByRef(field_ty, pt));
-            switch (struct_ty.zigTypeTag(mod)) {
-                .Struct => switch (struct_ty.containerLayout(mod)) {
+        if (!isByRef(struct_ty, zcu)) {
+            assert(!isByRef(field_ty, zcu));
+            switch (struct_ty.zigTypeTag(zcu)) {
+                .Struct => switch (struct_ty.containerLayout(zcu)) {
                     .@"packed" => {
-                        const struct_type = mod.typeToStruct(struct_ty).?;
+                        const struct_type = zcu.typeToStruct(struct_ty).?;
                         const bit_offset = pt.structPackedFieldBitOffset(struct_type, field_index);
                         const containing_int = struct_llvm_val;
                         const shift_amt =
                             try o.builder.intValue(containing_int.typeOfWip(&self.wip), bit_offset);
                         const shifted_value = try self.wip.bin(.lshr, containing_int, shift_amt, "");
                         const elem_llvm_ty = try o.lowerType(field_ty);
-                        if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) {
-                            const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt)));
+                        if (field_ty.zigTypeTag(zcu) == .Float or field_ty.zigTypeTag(zcu) == .Vector) {
+                            const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(zcu)));
                             const truncated_int =
                                 try self.wip.cast(.trunc, shifted_value, same_size_int, "");
                             return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, "");
-                        } else if (field_ty.isPtrAtRuntime(mod)) {
-                            const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt)));
+                        } else if (field_ty.isPtrAtRuntime(zcu)) {
+                            const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(zcu)));
                             const truncated_int =
                                 try self.wip.cast(.trunc, shifted_value, same_size_int, "");
                             return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, "");
@@ -6559,16 +6562,16 @@ pub const FuncGen = struct {
                     },
                 },
                 .Union => {
-                    assert(struct_ty.containerLayout(mod) == .@"packed");
+                    assert(struct_ty.containerLayout(zcu) == .@"packed");
                     const containing_int = struct_llvm_val;
                     const elem_llvm_ty = try o.lowerType(field_ty);
-                    if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) {
-                        const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt)));
+                    if (field_ty.zigTypeTag(zcu) == .Float or field_ty.zigTypeTag(zcu) == .Vector) {
+                        const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(zcu)));
                         const truncated_int =
                             try self.wip.cast(.trunc, containing_int, same_size_int, "");
                         return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, "");
-                    } else if (field_ty.isPtrAtRuntime(mod)) {
-                        const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt)));
+                    } else if (field_ty.isPtrAtRuntime(zcu)) {
+                        const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(zcu)));
                         const truncated_int =
                             try self.wip.cast(.trunc, containing_int, same_size_int, "");
                         return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, "");
@@ -6579,20 +6582,20 @@ pub const FuncGen = struct {
             }
         }
 
-        switch (struct_ty.zigTypeTag(mod)) {
+        switch (struct_ty.zigTypeTag(zcu)) {
             .Struct => {
-                const layout = struct_ty.containerLayout(mod);
+                const layout = struct_ty.containerLayout(zcu);
                 assert(layout != .@"packed");
                 const struct_llvm_ty = try o.lowerType(struct_ty);
                 const llvm_field_index = o.llvmFieldIndex(struct_ty, field_index).?;
                 const field_ptr =
                     try self.wip.gepStruct(struct_llvm_ty, struct_llvm_val, llvm_field_index, "");
-                const alignment = struct_ty.structFieldAlign(field_index, pt);
+                const alignment = struct_ty.structFieldAlign(field_index, zcu);
                 const field_ptr_ty = try pt.ptrType(.{
                     .child = field_ty.toIntern(),
                     .flags = .{ .alignment = alignment },
                 });
-                if (isByRef(field_ty, pt)) {
+                if (isByRef(field_ty, zcu)) {
                     if (canElideLoad(self, body_tail))
                         return field_ptr;
 
@@ -6605,12 +6608,12 @@ pub const FuncGen = struct {
             },
             .Union => {
                 const union_llvm_ty = try o.lowerType(struct_ty);
-                const layout = struct_ty.unionGetLayout(pt);
+                const layout = struct_ty.unionGetLayout(zcu);
                 const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align));
                 const field_ptr =
                     try self.wip.gepStruct(union_llvm_ty, struct_llvm_val, payload_index, "");
                 const payload_alignment = layout.payload_align.toLlvm();
-                if (isByRef(field_ty, pt)) {
+                if (isByRef(field_ty, zcu)) {
                     if (canElideLoad(self, body_tail)) return field_ptr;
                     return self.loadByRef(field_ptr, field_ty, payload_alignment, .normal);
                 } else {
@@ -6624,14 +6627,14 @@ pub const FuncGen = struct {
     fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
 
         const field_ptr = try self.resolveInst(extra.field_ptr);
 
-        const parent_ty = ty_pl.ty.toType().childType(mod);
-        const field_offset = parent_ty.structFieldOffset(extra.field_index, pt);
+        const parent_ty = ty_pl.ty.toType().childType(zcu);
+        const field_offset = parent_ty.structFieldOffset(extra.field_index, zcu);
         if (field_offset == 0) return field_ptr;
 
         const res_ty = try o.lowerType(ty_pl.ty.toType());
@@ -6686,7 +6689,7 @@ pub const FuncGen = struct {
 
     fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
         const operand = try self.resolveInst(pl_op.operand);
         const name: Air.NullTerminatedString = @enumFromInt(pl_op.payload);
@@ -6697,7 +6700,7 @@ pub const FuncGen = struct {
             self.file,
             self.scope,
             self.prev_dbg_line,
-            try o.lowerDebugType(ptr_ty.childType(mod)),
+            try o.lowerDebugType(ptr_ty.childType(zcu)),
         );
 
         _ = try self.wip.callIntrinsic(
@@ -6741,9 +6744,9 @@ pub const FuncGen = struct {
             try o.lowerDebugType(operand_ty),
         );
 
-        const pt = o.pt;
+        const zcu = o.pt.zcu;
         const owner_mod = self.ng.ownerModule();
-        if (isByRef(operand_ty, pt)) {
+        if (isByRef(operand_ty, zcu)) {
             _ = try self.wip.callIntrinsic(
                 .normal,
                 .none,
@@ -6760,7 +6763,7 @@ pub const FuncGen = struct {
             // We avoid taking this path for naked functions because there's no guarantee that such
             // functions even have a valid stack pointer, making the `alloca` + `store` unsafe.
 
-            const alignment = operand_ty.abiAlignment(pt).toLlvm();
+            const alignment = operand_ty.abiAlignment(zcu).toLlvm();
             const alloca = try self.buildAlloca(operand.typeOfWip(&self.wip), alignment);
             _ = try self.wip.store(.normal, operand, alloca, alignment);
             _ = try self.wip.callIntrinsic(
@@ -6832,8 +6835,8 @@ pub const FuncGen = struct {
         // if so, the element type itself.
         const llvm_param_attrs = try arena.alloc(Builder.Type, max_param_count);
         const pt = o.pt;
-        const mod = pt.zcu;
-        const target = mod.getTarget();
+        const zcu = pt.zcu;
+        const target = zcu.getTarget();
 
         var llvm_ret_i: usize = 0;
         var llvm_param_i: usize = 0;
@@ -6860,8 +6863,8 @@ pub const FuncGen = struct {
             if (output != .none) {
                 const output_inst = try self.resolveInst(output);
                 const output_ty = self.typeOf(output);
-                assert(output_ty.zigTypeTag(mod) == .Pointer);
-                const elem_llvm_ty = try o.lowerPtrElemTy(output_ty.childType(mod));
+                assert(output_ty.zigTypeTag(zcu) == .Pointer);
+                const elem_llvm_ty = try o.lowerPtrElemTy(output_ty.childType(zcu));
 
                 switch (constraint[0]) {
                     '=' => {},
@@ -6932,13 +6935,13 @@ pub const FuncGen = struct {
 
             const arg_llvm_value = try self.resolveInst(input);
             const arg_ty = self.typeOf(input);
-            const is_by_ref = isByRef(arg_ty, pt);
+            const is_by_ref = isByRef(arg_ty, zcu);
             if (is_by_ref) {
                 if (constraintAllowsMemory(constraint)) {
                     llvm_param_values[llvm_param_i] = arg_llvm_value;
                     llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip);
                 } else {
-                    const alignment = arg_ty.abiAlignment(pt).toLlvm();
+                    const alignment = arg_ty.abiAlignment(zcu).toLlvm();
                     const arg_llvm_ty = try o.lowerType(arg_ty);
                     const load_inst =
                         try self.wip.load(.normal, arg_llvm_ty, arg_llvm_value, alignment, "");
@@ -6950,7 +6953,7 @@ pub const FuncGen = struct {
                     llvm_param_values[llvm_param_i] = arg_llvm_value;
                     llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip);
                 } else {
-                    const alignment = arg_ty.abiAlignment(pt).toLlvm();
+                    const alignment = arg_ty.abiAlignment(zcu).toLlvm();
                     const arg_ptr = try self.buildAlloca(arg_llvm_value.typeOfWip(&self.wip), alignment);
                     _ = try self.wip.store(.normal, arg_llvm_value, arg_ptr, alignment);
                     llvm_param_values[llvm_param_i] = arg_ptr;
@@ -6978,7 +6981,7 @@ pub const FuncGen = struct {
             // In the case of indirect inputs, LLVM requires the callsite to have
             // an elementtype(<ty>) attribute.
             llvm_param_attrs[llvm_param_i] = if (constraint[0] == '*')
-                try o.lowerPtrElemTy(if (is_by_ref) arg_ty else arg_ty.childType(mod))
+                try o.lowerPtrElemTy(if (is_by_ref) arg_ty else arg_ty.childType(zcu))
             else
                 .none;
 
@@ -6997,12 +7000,12 @@ pub const FuncGen = struct {
             if (constraint[0] != '+') continue;
 
             const rw_ty = self.typeOf(output);
-            const llvm_elem_ty = try o.lowerPtrElemTy(rw_ty.childType(mod));
+            const llvm_elem_ty = try o.lowerPtrElemTy(rw_ty.childType(zcu));
             if (is_indirect) {
                 llvm_param_values[llvm_param_i] = llvm_rw_val;
                 llvm_param_types[llvm_param_i] = llvm_rw_val.typeOfWip(&self.wip);
             } else {
-                const alignment = rw_ty.abiAlignment(pt).toLlvm();
+                const alignment = rw_ty.abiAlignment(zcu).toLlvm();
                 const loaded = try self.wip.load(.normal, llvm_elem_ty, llvm_rw_val, alignment, "");
                 llvm_param_values[llvm_param_i] = loaded;
                 llvm_param_types[llvm_param_i] = llvm_elem_ty;
@@ -7163,7 +7166,7 @@ pub const FuncGen = struct {
                 const output_ptr = try self.resolveInst(output);
                 const output_ptr_ty = self.typeOf(output);
 
-                const alignment = output_ptr_ty.ptrAlignment(pt).toLlvm();
+                const alignment = output_ptr_ty.ptrAlignment(zcu).toLlvm();
                 _ = try self.wip.store(.normal, output_value, output_ptr, alignment);
             } else {
                 ret_val = output_value;
@@ -7182,23 +7185,23 @@ pub const FuncGen = struct {
     ) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
         const operand = try self.resolveInst(un_op);
         const operand_ty = self.typeOf(un_op);
-        const optional_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty;
+        const optional_ty = if (operand_is_ptr) operand_ty.childType(zcu) else operand_ty;
         const optional_llvm_ty = try o.lowerType(optional_ty);
-        const payload_ty = optional_ty.optionalChild(mod);
-        if (optional_ty.optionalReprIsPayload(mod)) {
+        const payload_ty = optional_ty.optionalChild(zcu);
+        if (optional_ty.optionalReprIsPayload(zcu)) {
             const loaded = if (operand_is_ptr)
                 try self.wip.load(.normal, optional_llvm_ty, operand, .default, "")
             else
                 operand;
-            if (payload_ty.isSlice(mod)) {
+            if (payload_ty.isSlice(zcu)) {
                 const slice_ptr = try self.wip.extractValue(loaded, &.{0}, "");
                 const ptr_ty = try o.builder.ptrType(toLlvmAddressSpace(
-                    payload_ty.ptrAddressSpace(mod),
-                    mod.getTarget(),
+                    payload_ty.ptrAddressSpace(zcu),
+                    zcu.getTarget(),
                 ));
                 return self.wip.icmp(cond, slice_ptr, try o.builder.nullValue(ptr_ty), "");
             }
@@ -7207,7 +7210,7 @@ pub const FuncGen = struct {
 
         comptime assert(optional_layout_version == 3);
 
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             const loaded = if (operand_is_ptr)
                 try self.wip.load(.normal, optional_llvm_ty, operand, .default, "")
             else
@@ -7215,7 +7218,7 @@ pub const FuncGen = struct {
             return self.wip.icmp(cond, loaded, try o.builder.intValue(.i8, 0), "");
         }
 
-        const is_by_ref = operand_is_ptr or isByRef(optional_ty, pt);
+        const is_by_ref = operand_is_ptr or isByRef(optional_ty, zcu);
         return self.optCmpNull(cond, optional_llvm_ty, operand, is_by_ref);
     }
 
@@ -7227,16 +7230,16 @@ pub const FuncGen = struct {
     ) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
         const operand = try self.resolveInst(un_op);
         const operand_ty = self.typeOf(un_op);
-        const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty;
-        const payload_ty = err_union_ty.errorUnionPayload(mod);
+        const err_union_ty = if (operand_is_ptr) operand_ty.childType(zcu) else operand_ty;
+        const payload_ty = err_union_ty.errorUnionPayload(zcu);
         const error_type = try o.errorIntType();
         const zero = try o.builder.intValue(error_type, 0);
 
-        if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
+        if (err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
             const val: Builder.Constant = switch (cond) {
                 .eq => .true, // 0 == 0
                 .ne => .false, // 0 != 0
@@ -7245,7 +7248,7 @@ pub const FuncGen = struct {
             return val.toValue();
         }
 
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             const loaded = if (operand_is_ptr)
                 try self.wip.load(.normal, try o.lowerType(err_union_ty), operand, .default, "")
             else
@@ -7255,7 +7258,7 @@ pub const FuncGen = struct {
 
         const err_field_index = try errUnionErrorOffset(payload_ty, pt);
 
-        const loaded = if (operand_is_ptr or isByRef(err_union_ty, pt)) loaded: {
+        const loaded = if (operand_is_ptr or isByRef(err_union_ty, zcu)) loaded: {
             const err_union_llvm_ty = try o.lowerType(err_union_ty);
             const err_field_ptr =
                 try self.wip.gepStruct(err_union_llvm_ty, operand, err_field_index, "");
@@ -7267,17 +7270,17 @@ pub const FuncGen = struct {
     fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const operand = try self.resolveInst(ty_op.operand);
-        const optional_ty = self.typeOf(ty_op.operand).childType(mod);
-        const payload_ty = optional_ty.optionalChild(mod);
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        const optional_ty = self.typeOf(ty_op.operand).childType(zcu);
+        const payload_ty = optional_ty.optionalChild(zcu);
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             // We have a pointer to a zero-bit value and we need to return
             // a pointer to a zero-bit value.
             return operand;
         }
-        if (optional_ty.optionalReprIsPayload(mod)) {
+        if (optional_ty.optionalReprIsPayload(zcu)) {
             // The payload and the optional are the same value.
             return operand;
         }
@@ -7289,18 +7292,18 @@ pub const FuncGen = struct {
 
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const operand = try self.resolveInst(ty_op.operand);
-        const optional_ty = self.typeOf(ty_op.operand).childType(mod);
-        const payload_ty = optional_ty.optionalChild(mod);
+        const optional_ty = self.typeOf(ty_op.operand).childType(zcu);
+        const payload_ty = optional_ty.optionalChild(zcu);
         const non_null_bit = try o.builder.intValue(.i8, 1);
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             // We have a pointer to a i8. We need to set it to 1 and then return the same pointer.
             _ = try self.wip.store(.normal, non_null_bit, operand, .default);
             return operand;
         }
-        if (optional_ty.optionalReprIsPayload(mod)) {
+        if (optional_ty.optionalReprIsPayload(zcu)) {
             // The payload and the optional are the same value.
             // Setting to non-null will be done when the payload is set.
             return operand;
@@ -7321,21 +7324,21 @@ pub const FuncGen = struct {
     fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const inst = body_tail[0];
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const operand = try self.resolveInst(ty_op.operand);
         const optional_ty = self.typeOf(ty_op.operand);
         const payload_ty = self.typeOfIndex(inst);
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none;
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return .none;
 
-        if (optional_ty.optionalReprIsPayload(mod)) {
+        if (optional_ty.optionalReprIsPayload(zcu)) {
             // Payload value is the same as the optional value.
             return operand;
         }
 
         const opt_llvm_ty = try o.lowerType(optional_ty);
-        const can_elide_load = if (isByRef(payload_ty, pt)) self.canElideLoad(body_tail) else false;
+        const can_elide_load = if (isByRef(payload_ty, zcu)) self.canElideLoad(body_tail) else false;
         return self.optPayloadHandle(opt_llvm_ty, operand, optional_ty, can_elide_load);
     }
 
@@ -7346,26 +7349,26 @@ pub const FuncGen = struct {
     ) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const inst = body_tail[0];
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const operand = try self.resolveInst(ty_op.operand);
         const operand_ty = self.typeOf(ty_op.operand);
-        const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty;
+        const err_union_ty = if (operand_is_ptr) operand_ty.childType(zcu) else operand_ty;
         const result_ty = self.typeOfIndex(inst);
-        const payload_ty = if (operand_is_ptr) result_ty.childType(mod) else result_ty;
+        const payload_ty = if (operand_is_ptr) result_ty.childType(zcu) else result_ty;
 
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             return if (operand_is_ptr) operand else .none;
         }
         const offset = try errUnionPayloadOffset(payload_ty, pt);
         const err_union_llvm_ty = try o.lowerType(err_union_ty);
         if (operand_is_ptr) {
             return self.wip.gepStruct(err_union_llvm_ty, operand, offset, "");
-        } else if (isByRef(err_union_ty, pt)) {
-            const payload_alignment = payload_ty.abiAlignment(pt).toLlvm();
+        } else if (isByRef(err_union_ty, zcu)) {
+            const payload_alignment = payload_ty.abiAlignment(zcu).toLlvm();
             const payload_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, "");
-            if (isByRef(payload_ty, pt)) {
+            if (isByRef(payload_ty, zcu)) {
                 if (self.canElideLoad(body_tail)) return payload_ptr;
                 return self.loadByRef(payload_ptr, payload_ty, payload_alignment, .normal);
             }
@@ -7382,13 +7385,13 @@ pub const FuncGen = struct {
     ) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const operand = try self.resolveInst(ty_op.operand);
         const operand_ty = self.typeOf(ty_op.operand);
         const error_type = try o.errorIntType();
-        const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty;
-        if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
+        const err_union_ty = if (operand_is_ptr) operand_ty.childType(zcu) else operand_ty;
+        if (err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
             if (operand_is_ptr) {
                 return operand;
             } else {
@@ -7396,15 +7399,15 @@ pub const FuncGen = struct {
             }
         }
 
-        const payload_ty = err_union_ty.errorUnionPayload(mod);
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        const payload_ty = err_union_ty.errorUnionPayload(zcu);
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             if (!operand_is_ptr) return operand;
             return self.wip.load(.normal, error_type, operand, .default, "");
         }
 
         const offset = try errUnionErrorOffset(payload_ty, pt);
 
-        if (operand_is_ptr or isByRef(err_union_ty, pt)) {
+        if (operand_is_ptr or isByRef(err_union_ty, zcu)) {
             const err_union_llvm_ty = try o.lowerType(err_union_ty);
             const err_field_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, "");
             return self.wip.load(.normal, error_type, err_field_ptr, .default, "");
@@ -7416,21 +7419,21 @@ pub const FuncGen = struct {
     fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const operand = try self.resolveInst(ty_op.operand);
-        const err_union_ty = self.typeOf(ty_op.operand).childType(mod);
+        const err_union_ty = self.typeOf(ty_op.operand).childType(zcu);
 
-        const payload_ty = err_union_ty.errorUnionPayload(mod);
+        const payload_ty = err_union_ty.errorUnionPayload(zcu);
         const non_error_val = try o.builder.intValue(try o.errorIntType(), 0);
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             _ = try self.wip.store(.normal, non_error_val, operand, .default);
             return operand;
         }
         const err_union_llvm_ty = try o.lowerType(err_union_ty);
         {
             const err_int_ty = try pt.errorIntType();
-            const error_alignment = err_int_ty.abiAlignment(pt).toLlvm();
+            const error_alignment = err_int_ty.abiAlignment(zcu).toLlvm();
             const error_offset = try errUnionErrorOffset(payload_ty, pt);
             // First set the non-error value.
             const non_null_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, error_offset, "");
@@ -7457,7 +7460,7 @@ pub const FuncGen = struct {
     fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
 
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const struct_ty = ty_pl.ty.toType();
@@ -7468,8 +7471,8 @@ pub const FuncGen = struct {
         assert(self.err_ret_trace != .none);
         const field_ptr =
             try self.wip.gepStruct(struct_llvm_ty, self.err_ret_trace, llvm_field_index, "");
-        const field_alignment = struct_ty.structFieldAlign(field_index, pt);
-        const field_ty = struct_ty.structFieldType(field_index, mod);
+        const field_alignment = struct_ty.structFieldAlign(field_index, zcu);
+        const field_ty = struct_ty.structFieldType(field_index, zcu);
         const field_ptr_ty = try pt.ptrType(.{
             .child = field_ty.toIntern(),
             .flags = .{ .alignment = field_alignment },
@@ -7503,23 +7506,23 @@ pub const FuncGen = struct {
     fn airWrapOptional(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const inst = body_tail[0];
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const payload_ty = self.typeOf(ty_op.operand);
         const non_null_bit = try o.builder.intValue(.i8, 1);
         comptime assert(optional_layout_version == 3);
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return non_null_bit;
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return non_null_bit;
         const operand = try self.resolveInst(ty_op.operand);
         const optional_ty = self.typeOfIndex(inst);
-        if (optional_ty.optionalReprIsPayload(mod)) return operand;
+        if (optional_ty.optionalReprIsPayload(zcu)) return operand;
         const llvm_optional_ty = try o.lowerType(optional_ty);
-        if (isByRef(optional_ty, pt)) {
+        if (isByRef(optional_ty, zcu)) {
             const directReturn = self.isNextRet(body_tail);
             const optional_ptr = if (directReturn)
                 self.ret_ptr
             else brk: {
-                const alignment = optional_ty.abiAlignment(pt).toLlvm();
+                const alignment = optional_ty.abiAlignment(zcu).toLlvm();
                 const optional_ptr = try self.buildAllocaWorkaround(optional_ty, alignment);
                 break :brk optional_ptr;
             };
@@ -7537,12 +7540,13 @@ pub const FuncGen = struct {
     fn airWrapErrUnionPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
+        const zcu = pt.zcu;
         const inst = body_tail[0];
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const err_un_ty = self.typeOfIndex(inst);
         const operand = try self.resolveInst(ty_op.operand);
         const payload_ty = self.typeOf(ty_op.operand);
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             return operand;
         }
         const ok_err_code = try o.builder.intValue(try o.errorIntType(), 0);
@@ -7550,19 +7554,19 @@ pub const FuncGen = struct {
 
         const payload_offset = try errUnionPayloadOffset(payload_ty, pt);
         const error_offset = try errUnionErrorOffset(payload_ty, pt);
-        if (isByRef(err_un_ty, pt)) {
+        if (isByRef(err_un_ty, zcu)) {
             const directReturn = self.isNextRet(body_tail);
             const result_ptr = if (directReturn)
                 self.ret_ptr
             else brk: {
-                const alignment = err_un_ty.abiAlignment(pt).toLlvm();
+                const alignment = err_un_ty.abiAlignment(pt.zcu).toLlvm();
                 const result_ptr = try self.buildAllocaWorkaround(err_un_ty, alignment);
                 break :brk result_ptr;
             };
 
             const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, "");
             const err_int_ty = try pt.errorIntType();
-            const error_alignment = err_int_ty.abiAlignment(pt).toLlvm();
+            const error_alignment = err_int_ty.abiAlignment(pt.zcu).toLlvm();
             _ = try self.wip.store(.normal, ok_err_code, err_ptr, error_alignment);
             const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, "");
             const payload_ptr_ty = try pt.singleMutPtrType(payload_ty);
@@ -7578,30 +7582,30 @@ pub const FuncGen = struct {
     fn airWrapErrUnionErr(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const inst = body_tail[0];
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const err_un_ty = self.typeOfIndex(inst);
-        const payload_ty = err_un_ty.errorUnionPayload(mod);
+        const payload_ty = err_un_ty.errorUnionPayload(zcu);
         const operand = try self.resolveInst(ty_op.operand);
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return operand;
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return operand;
         const err_un_llvm_ty = try o.lowerType(err_un_ty);
 
         const payload_offset = try errUnionPayloadOffset(payload_ty, pt);
         const error_offset = try errUnionErrorOffset(payload_ty, pt);
-        if (isByRef(err_un_ty, pt)) {
+        if (isByRef(err_un_ty, zcu)) {
             const directReturn = self.isNextRet(body_tail);
             const result_ptr = if (directReturn)
                 self.ret_ptr
             else brk: {
-                const alignment = err_un_ty.abiAlignment(pt).toLlvm();
+                const alignment = err_un_ty.abiAlignment(zcu).toLlvm();
                 const result_ptr = try self.buildAllocaWorkaround(err_un_ty, alignment);
                 break :brk result_ptr;
             };
 
             const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, "");
             const err_int_ty = try pt.errorIntType();
-            const error_alignment = err_int_ty.abiAlignment(pt).toLlvm();
+            const error_alignment = err_int_ty.abiAlignment(zcu).toLlvm();
             _ = try self.wip.store(.normal, operand, err_ptr, error_alignment);
             const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, "");
             const payload_ptr_ty = try pt.singleMutPtrType(payload_ty);
@@ -7639,7 +7643,7 @@ pub const FuncGen = struct {
     fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const data = self.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem;
         const extra = self.air.extraData(Air.Bin, data.payload).data;
 
@@ -7649,9 +7653,9 @@ pub const FuncGen = struct {
         const operand = try self.resolveInst(extra.rhs);
 
         const access_kind: Builder.MemoryAccessKind =
-            if (vector_ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal;
-        const elem_llvm_ty = try o.lowerType(vector_ptr_ty.childType(mod));
-        const alignment = vector_ptr_ty.ptrAlignment(pt).toLlvm();
+            if (vector_ptr_ty.isVolatilePtr(zcu)) .@"volatile" else .normal;
+        const elem_llvm_ty = try o.lowerType(vector_ptr_ty.childType(zcu));
+        const alignment = vector_ptr_ty.ptrAlignment(zcu).toLlvm();
         const loaded = try self.wip.load(access_kind, elem_llvm_ty, vector_ptr, alignment, "");
 
         const new_vector = try self.wip.insertElement(loaded, operand, index, "");
@@ -7661,18 +7665,18 @@ pub const FuncGen = struct {
 
     fn airMin(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const lhs = try self.resolveInst(bin_op.lhs);
         const rhs = try self.resolveInst(bin_op.rhs);
         const inst_ty = self.typeOfIndex(inst);
-        const scalar_ty = inst_ty.scalarType(mod);
+        const scalar_ty = inst_ty.scalarType(zcu);
 
         if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmin, .normal, inst_ty, 2, .{ lhs, rhs });
         return self.wip.callIntrinsic(
             .normal,
             .none,
-            if (scalar_ty.isSignedInt(mod)) .smin else .umin,
+            if (scalar_ty.isSignedInt(zcu)) .smin else .umin,
             &.{try o.lowerType(inst_ty)},
             &.{ lhs, rhs },
             "",
@@ -7681,18 +7685,18 @@ pub const FuncGen = struct {
 
     fn airMax(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const lhs = try self.resolveInst(bin_op.lhs);
         const rhs = try self.resolveInst(bin_op.rhs);
         const inst_ty = self.typeOfIndex(inst);
-        const scalar_ty = inst_ty.scalarType(mod);
+        const scalar_ty = inst_ty.scalarType(zcu);
 
         if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmax, .normal, inst_ty, 2, .{ lhs, rhs });
         return self.wip.callIntrinsic(
             .normal,
             .none,
-            if (scalar_ty.isSignedInt(mod)) .smax else .umax,
+            if (scalar_ty.isSignedInt(zcu)) .smax else .umax,
             &.{try o.lowerType(inst_ty)},
             &.{ lhs, rhs },
             "",
@@ -7711,15 +7715,15 @@ pub const FuncGen = struct {
 
     fn airAdd(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const lhs = try self.resolveInst(bin_op.lhs);
         const rhs = try self.resolveInst(bin_op.rhs);
         const inst_ty = self.typeOfIndex(inst);
-        const scalar_ty = inst_ty.scalarType(mod);
+        const scalar_ty = inst_ty.scalarType(zcu);
 
         if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.add, fast, inst_ty, 2, .{ lhs, rhs });
-        return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .@"add nsw" else .@"add nuw", lhs, rhs, "");
+        return self.wip.bin(if (scalar_ty.isSignedInt(zcu)) .@"add nsw" else .@"add nuw", lhs, rhs, "");
     }
 
     fn airSafeArithmetic(
@@ -7729,15 +7733,15 @@ pub const FuncGen = struct {
         unsigned_intrinsic: Builder.Intrinsic,
     ) !Builder.Value {
         const o = fg.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
 
         const bin_op = fg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const lhs = try fg.resolveInst(bin_op.lhs);
         const rhs = try fg.resolveInst(bin_op.rhs);
         const inst_ty = fg.typeOfIndex(inst);
-        const scalar_ty = inst_ty.scalarType(mod);
+        const scalar_ty = inst_ty.scalarType(zcu);
 
-        const intrinsic = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic;
+        const intrinsic = if (scalar_ty.isSignedInt(zcu)) signed_intrinsic else unsigned_intrinsic;
         const llvm_inst_ty = try o.lowerType(inst_ty);
         const results =
             try fg.wip.callIntrinsic(.normal, .none, intrinsic, &.{llvm_inst_ty}, &.{ lhs, rhs }, "");
@@ -7777,18 +7781,18 @@ pub const FuncGen = struct {
 
     fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const lhs = try self.resolveInst(bin_op.lhs);
         const rhs = try self.resolveInst(bin_op.rhs);
         const inst_ty = self.typeOfIndex(inst);
-        const scalar_ty = inst_ty.scalarType(mod);
+        const scalar_ty = inst_ty.scalarType(zcu);
 
         if (scalar_ty.isAnyFloat()) return self.todo("saturating float add", .{});
         return self.wip.callIntrinsic(
             .normal,
             .none,
-            if (scalar_ty.isSignedInt(mod)) .@"sadd.sat" else .@"uadd.sat",
+            if (scalar_ty.isSignedInt(zcu)) .@"sadd.sat" else .@"uadd.sat",
             &.{try o.lowerType(inst_ty)},
             &.{ lhs, rhs },
             "",
@@ -7797,15 +7801,15 @@ pub const FuncGen = struct {
 
     fn airSub(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const lhs = try self.resolveInst(bin_op.lhs);
         const rhs = try self.resolveInst(bin_op.rhs);
         const inst_ty = self.typeOfIndex(inst);
-        const scalar_ty = inst_ty.scalarType(mod);
+        const scalar_ty = inst_ty.scalarType(zcu);
 
         if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.sub, fast, inst_ty, 2, .{ lhs, rhs });
-        return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .@"sub nsw" else .@"sub nuw", lhs, rhs, "");
+        return self.wip.bin(if (scalar_ty.isSignedInt(zcu)) .@"sub nsw" else .@"sub nuw", lhs, rhs, "");
     }
 
     fn airSubWrap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
@@ -7818,18 +7822,18 @@ pub const FuncGen = struct {
 
     fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const lhs = try self.resolveInst(bin_op.lhs);
         const rhs = try self.resolveInst(bin_op.rhs);
         const inst_ty = self.typeOfIndex(inst);
-        const scalar_ty = inst_ty.scalarType(mod);
+        const scalar_ty = inst_ty.scalarType(zcu);
 
         if (scalar_ty.isAnyFloat()) return self.todo("saturating float sub", .{});
         return self.wip.callIntrinsic(
             .normal,
             .none,
-            if (scalar_ty.isSignedInt(mod)) .@"ssub.sat" else .@"usub.sat",
+            if (scalar_ty.isSignedInt(zcu)) .@"ssub.sat" else .@"usub.sat",
             &.{try o.lowerType(inst_ty)},
             &.{ lhs, rhs },
             "",
@@ -7838,15 +7842,15 @@ pub const FuncGen = struct {
 
     fn airMul(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const lhs = try self.resolveInst(bin_op.lhs);
         const rhs = try self.resolveInst(bin_op.rhs);
         const inst_ty = self.typeOfIndex(inst);
-        const scalar_ty = inst_ty.scalarType(mod);
+        const scalar_ty = inst_ty.scalarType(zcu);
 
         if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.mul, fast, inst_ty, 2, .{ lhs, rhs });
-        return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .@"mul nsw" else .@"mul nuw", lhs, rhs, "");
+        return self.wip.bin(if (scalar_ty.isSignedInt(zcu)) .@"mul nsw" else .@"mul nuw", lhs, rhs, "");
     }
 
     fn airMulWrap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
@@ -7859,18 +7863,18 @@ pub const FuncGen = struct {
 
     fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const lhs = try self.resolveInst(bin_op.lhs);
         const rhs = try self.resolveInst(bin_op.rhs);
         const inst_ty = self.typeOfIndex(inst);
-        const scalar_ty = inst_ty.scalarType(mod);
+        const scalar_ty = inst_ty.scalarType(zcu);
 
         if (scalar_ty.isAnyFloat()) return self.todo("saturating float mul", .{});
         return self.wip.callIntrinsic(
             .normal,
             .none,
-            if (scalar_ty.isSignedInt(mod)) .@"smul.fix.sat" else .@"umul.fix.sat",
+            if (scalar_ty.isSignedInt(zcu)) .@"smul.fix.sat" else .@"umul.fix.sat",
             &.{try o.lowerType(inst_ty)},
             &.{ lhs, rhs, .@"0" },
             "",
@@ -7888,34 +7892,34 @@ pub const FuncGen = struct {
 
     fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const lhs = try self.resolveInst(bin_op.lhs);
         const rhs = try self.resolveInst(bin_op.rhs);
         const inst_ty = self.typeOfIndex(inst);
-        const scalar_ty = inst_ty.scalarType(mod);
+        const scalar_ty = inst_ty.scalarType(zcu);
 
         if (scalar_ty.isRuntimeFloat()) {
             const result = try self.buildFloatOp(.div, fast, inst_ty, 2, .{ lhs, rhs });
             return self.buildFloatOp(.trunc, fast, inst_ty, 1, .{result});
         }
-        return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .sdiv else .udiv, lhs, rhs, "");
+        return self.wip.bin(if (scalar_ty.isSignedInt(zcu)) .sdiv else .udiv, lhs, rhs, "");
     }
 
     fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const lhs = try self.resolveInst(bin_op.lhs);
         const rhs = try self.resolveInst(bin_op.rhs);
         const inst_ty = self.typeOfIndex(inst);
-        const scalar_ty = inst_ty.scalarType(mod);
+        const scalar_ty = inst_ty.scalarType(zcu);
 
         if (scalar_ty.isRuntimeFloat()) {
             const result = try self.buildFloatOp(.div, fast, inst_ty, 2, .{ lhs, rhs });
             return self.buildFloatOp(.floor, fast, inst_ty, 1, .{result});
         }
-        if (scalar_ty.isSignedInt(mod)) {
+        if (scalar_ty.isSignedInt(zcu)) {
             const inst_llvm_ty = try o.lowerType(inst_ty);
             const bit_size_minus_one = try o.builder.splatValue(inst_llvm_ty, try o.builder.intConst(
                 inst_llvm_ty.scalarType(&o.builder),
@@ -7936,16 +7940,16 @@ pub const FuncGen = struct {
 
     fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const lhs = try self.resolveInst(bin_op.lhs);
         const rhs = try self.resolveInst(bin_op.rhs);
         const inst_ty = self.typeOfIndex(inst);
-        const scalar_ty = inst_ty.scalarType(mod);
+        const scalar_ty = inst_ty.scalarType(zcu);
 
         if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.div, fast, inst_ty, 2, .{ lhs, rhs });
         return self.wip.bin(
-            if (scalar_ty.isSignedInt(mod)) .@"sdiv exact" else .@"udiv exact",
+            if (scalar_ty.isSignedInt(zcu)) .@"sdiv exact" else .@"udiv exact",
             lhs,
             rhs,
             "",
@@ -7954,16 +7958,16 @@ pub const FuncGen = struct {
 
     fn airRem(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const lhs = try self.resolveInst(bin_op.lhs);
         const rhs = try self.resolveInst(bin_op.rhs);
         const inst_ty = self.typeOfIndex(inst);
-        const scalar_ty = inst_ty.scalarType(mod);
+        const scalar_ty = inst_ty.scalarType(zcu);
 
         if (scalar_ty.isRuntimeFloat())
             return self.buildFloatOp(.fmod, fast, inst_ty, 2, .{ lhs, rhs });
-        return self.wip.bin(if (scalar_ty.isSignedInt(mod))
+        return self.wip.bin(if (scalar_ty.isSignedInt(zcu))
             .srem
         else
             .urem, lhs, rhs, "");
@@ -7971,13 +7975,13 @@ pub const FuncGen = struct {
 
     fn airMod(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const lhs = try self.resolveInst(bin_op.lhs);
         const rhs = try self.resolveInst(bin_op.rhs);
         const inst_ty = self.typeOfIndex(inst);
         const inst_llvm_ty = try o.lowerType(inst_ty);
-        const scalar_ty = inst_ty.scalarType(mod);
+        const scalar_ty = inst_ty.scalarType(zcu);
 
         if (scalar_ty.isRuntimeFloat()) {
             const a = try self.buildFloatOp(.fmod, fast, inst_ty, 2, .{ lhs, rhs });
@@ -7987,7 +7991,7 @@ pub const FuncGen = struct {
             const ltz = try self.buildFloatCmp(fast, .lt, inst_ty, .{ lhs, zero });
             return self.wip.select(fast, ltz, c, a, "");
         }
-        if (scalar_ty.isSignedInt(mod)) {
+        if (scalar_ty.isSignedInt(zcu)) {
             const bit_size_minus_one = try o.builder.splatValue(inst_llvm_ty, try o.builder.intConst(
                 inst_llvm_ty.scalarType(&o.builder),
                 inst_llvm_ty.scalarBits(&o.builder) - 1,
@@ -8007,14 +8011,14 @@ pub const FuncGen = struct {
 
     fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
         const ptr = try self.resolveInst(bin_op.lhs);
         const offset = try self.resolveInst(bin_op.rhs);
         const ptr_ty = self.typeOf(bin_op.lhs);
-        const llvm_elem_ty = try o.lowerPtrElemTy(ptr_ty.childType(mod));
-        switch (ptr_ty.ptrSize(mod)) {
+        const llvm_elem_ty = try o.lowerPtrElemTy(ptr_ty.childType(zcu));
+        switch (ptr_ty.ptrSize(zcu)) {
             // It's a pointer to an array, so according to LLVM we need an extra GEP index.
             .One => return self.wip.gep(.inbounds, llvm_elem_ty, ptr, &.{
                 try o.builder.intValue(try o.lowerType(Type.usize), 0), offset,
@@ -8029,15 +8033,15 @@ pub const FuncGen = struct {
 
     fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
         const ptr = try self.resolveInst(bin_op.lhs);
         const offset = try self.resolveInst(bin_op.rhs);
         const negative_offset = try self.wip.neg(offset, "");
         const ptr_ty = self.typeOf(bin_op.lhs);
-        const llvm_elem_ty = try o.lowerPtrElemTy(ptr_ty.childType(mod));
-        switch (ptr_ty.ptrSize(mod)) {
+        const llvm_elem_ty = try o.lowerPtrElemTy(ptr_ty.childType(zcu));
+        switch (ptr_ty.ptrSize(zcu)) {
             // It's a pointer to an array, so according to LLVM we need an extra GEP index.
             .One => return self.wip.gep(.inbounds, llvm_elem_ty, ptr, &.{
                 try o.builder.intValue(try o.lowerType(Type.usize), 0), negative_offset,
@@ -8058,7 +8062,7 @@ pub const FuncGen = struct {
     ) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
 
@@ -8066,10 +8070,10 @@ pub const FuncGen = struct {
         const rhs = try self.resolveInst(extra.rhs);
 
         const lhs_ty = self.typeOf(extra.lhs);
-        const scalar_ty = lhs_ty.scalarType(mod);
+        const scalar_ty = lhs_ty.scalarType(zcu);
         const inst_ty = self.typeOfIndex(inst);
 
-        const intrinsic = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic;
+        const intrinsic = if (scalar_ty.isSignedInt(zcu)) signed_intrinsic else unsigned_intrinsic;
         const llvm_inst_ty = try o.lowerType(inst_ty);
         const llvm_lhs_ty = try o.lowerType(lhs_ty);
         const results =
@@ -8081,8 +8085,8 @@ pub const FuncGen = struct {
         const result_index = o.llvmFieldIndex(inst_ty, 0).?;
         const overflow_index = o.llvmFieldIndex(inst_ty, 1).?;
 
-        if (isByRef(inst_ty, pt)) {
-            const result_alignment = inst_ty.abiAlignment(pt).toLlvm();
+        if (isByRef(inst_ty, zcu)) {
+            const result_alignment = inst_ty.abiAlignment(zcu).toLlvm();
             const alloca_inst = try self.buildAllocaWorkaround(inst_ty, result_alignment);
             {
                 const field_ptr = try self.wip.gepStruct(llvm_inst_ty, alloca_inst, result_index, "");
@@ -8165,9 +8169,9 @@ pub const FuncGen = struct {
         params: [2]Builder.Value,
     ) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
-        const target = mod.getTarget();
-        const scalar_ty = ty.scalarType(mod);
+        const zcu = o.pt.zcu;
+        const target = zcu.getTarget();
+        const scalar_ty = ty.scalarType(zcu);
         const scalar_llvm_ty = try o.lowerType(scalar_ty);
 
         if (intrinsicsAllowed(scalar_ty, target)) {
@@ -8205,8 +8209,8 @@ pub const FuncGen = struct {
             .gte => .sge,
         };
 
-        if (ty.zigTypeTag(mod) == .Vector) {
-            const vec_len = ty.vectorLen(mod);
+        if (ty.zigTypeTag(zcu) == .Vector) {
+            const vec_len = ty.vectorLen(zcu);
             const vector_result_ty = try o.builder.vectorType(.normal, vec_len, .i32);
 
             const init = try o.builder.poisonValue(vector_result_ty);
@@ -8271,9 +8275,9 @@ pub const FuncGen = struct {
         params: [params_len]Builder.Value,
     ) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
-        const target = mod.getTarget();
-        const scalar_ty = ty.scalarType(mod);
+        const zcu = o.pt.zcu;
+        const target = zcu.getTarget();
+        const scalar_ty = ty.scalarType(zcu);
         const llvm_ty = try o.lowerType(ty);
 
         if (op != .tan and intrinsicsAllowed(scalar_ty, target)) switch (op) {
@@ -8382,9 +8386,9 @@ pub const FuncGen = struct {
             ([1]Builder.Type{scalar_llvm_ty} ** 3)[0..params.len],
             scalar_llvm_ty,
         );
-        if (ty.zigTypeTag(mod) == .Vector) {
+        if (ty.zigTypeTag(zcu) == .Vector) {
             const result = try o.builder.poisonValue(llvm_ty);
-            return self.buildElementwiseCall(libc_fn, &params, result, ty.vectorLen(mod));
+            return self.buildElementwiseCall(libc_fn, &params, result, ty.vectorLen(zcu));
         }
 
         return self.wip.call(
@@ -8413,7 +8417,7 @@ pub const FuncGen = struct {
     fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
 
@@ -8421,7 +8425,7 @@ pub const FuncGen = struct {
         const rhs = try self.resolveInst(extra.rhs);
 
         const lhs_ty = self.typeOf(extra.lhs);
-        const lhs_scalar_ty = lhs_ty.scalarType(mod);
+        const lhs_scalar_ty = lhs_ty.scalarType(zcu);
 
         const dest_ty = self.typeOfIndex(inst);
         const llvm_dest_ty = try o.lowerType(dest_ty);
@@ -8429,7 +8433,7 @@ pub const FuncGen = struct {
         const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), "");
 
         const result = try self.wip.bin(.shl, lhs, casted_rhs, "");
-        const reconstructed = try self.wip.bin(if (lhs_scalar_ty.isSignedInt(mod))
+        const reconstructed = try self.wip.bin(if (lhs_scalar_ty.isSignedInt(zcu))
             .ashr
         else
             .lshr, result, casted_rhs, "");
@@ -8439,8 +8443,8 @@ pub const FuncGen = struct {
         const result_index = o.llvmFieldIndex(dest_ty, 0).?;
         const overflow_index = o.llvmFieldIndex(dest_ty, 1).?;
 
-        if (isByRef(dest_ty, pt)) {
-            const result_alignment = dest_ty.abiAlignment(pt).toLlvm();
+        if (isByRef(dest_ty, zcu)) {
+            const result_alignment = dest_ty.abiAlignment(zcu).toLlvm();
             const alloca_inst = try self.buildAllocaWorkaround(dest_ty, result_alignment);
             {
                 const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, result_index, "");
@@ -8483,17 +8487,17 @@ pub const FuncGen = struct {
 
     fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
         const lhs = try self.resolveInst(bin_op.lhs);
         const rhs = try self.resolveInst(bin_op.rhs);
 
         const lhs_ty = self.typeOf(bin_op.lhs);
-        const lhs_scalar_ty = lhs_ty.scalarType(mod);
+        const lhs_scalar_ty = lhs_ty.scalarType(zcu);
 
         const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), "");
-        return self.wip.bin(if (lhs_scalar_ty.isSignedInt(mod))
+        return self.wip.bin(if (lhs_scalar_ty.isSignedInt(zcu))
             .@"shl nsw"
         else
             .@"shl nuw", lhs, casted_rhs, "");
@@ -8515,15 +8519,15 @@ pub const FuncGen = struct {
     fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
         const lhs = try self.resolveInst(bin_op.lhs);
         const rhs = try self.resolveInst(bin_op.rhs);
 
         const lhs_ty = self.typeOf(bin_op.lhs);
-        const lhs_scalar_ty = lhs_ty.scalarType(mod);
-        const lhs_bits = lhs_scalar_ty.bitSize(pt);
+        const lhs_scalar_ty = lhs_ty.scalarType(zcu);
+        const lhs_bits = lhs_scalar_ty.bitSize(zcu);
 
         const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), "");
 
@@ -8532,7 +8536,7 @@ pub const FuncGen = struct {
         const result = try self.wip.callIntrinsic(
             .normal,
             .none,
-            if (lhs_scalar_ty.isSignedInt(mod)) .@"sshl.sat" else .@"ushl.sat",
+            if (lhs_scalar_ty.isSignedInt(zcu)) .@"sshl.sat" else .@"ushl.sat",
             &.{llvm_lhs_ty},
             &.{ lhs, casted_rhs },
             "",
@@ -8557,17 +8561,17 @@ pub const FuncGen = struct {
 
     fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
         const lhs = try self.resolveInst(bin_op.lhs);
         const rhs = try self.resolveInst(bin_op.rhs);
 
         const lhs_ty = self.typeOf(bin_op.lhs);
-        const lhs_scalar_ty = lhs_ty.scalarType(mod);
+        const lhs_scalar_ty = lhs_ty.scalarType(zcu);
 
         const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), "");
-        const is_signed_int = lhs_scalar_ty.isSignedInt(mod);
+        const is_signed_int = lhs_scalar_ty.isSignedInt(zcu);
 
         return self.wip.bin(if (is_exact)
             if (is_signed_int) .@"ashr exact" else .@"lshr exact"
@@ -8576,13 +8580,13 @@ pub const FuncGen = struct {
 
     fn airAbs(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const operand = try self.resolveInst(ty_op.operand);
         const operand_ty = self.typeOf(ty_op.operand);
-        const scalar_ty = operand_ty.scalarType(mod);
+        const scalar_ty = operand_ty.scalarType(zcu);
 
-        switch (scalar_ty.zigTypeTag(mod)) {
+        switch (scalar_ty.zigTypeTag(zcu)) {
             .Int => return self.wip.callIntrinsic(
                 .normal,
                 .none,
@@ -8598,13 +8602,13 @@ pub const FuncGen = struct {
 
     fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const dest_ty = self.typeOfIndex(inst);
         const dest_llvm_ty = try o.lowerType(dest_ty);
         const operand = try self.resolveInst(ty_op.operand);
         const operand_ty = self.typeOf(ty_op.operand);
-        const operand_info = operand_ty.intInfo(mod);
+        const operand_info = operand_ty.intInfo(zcu);
 
         return self.wip.conv(switch (operand_info.signedness) {
             .signed => .signed,
@@ -8622,12 +8626,12 @@ pub const FuncGen = struct {
 
     fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const operand = try self.resolveInst(ty_op.operand);
         const operand_ty = self.typeOf(ty_op.operand);
         const dest_ty = self.typeOfIndex(inst);
-        const target = mod.getTarget();
+        const target = zcu.getTarget();
         const dest_bits = dest_ty.floatBits(target);
         const src_bits = operand_ty.floatBits(target);
 
@@ -8656,12 +8660,12 @@ pub const FuncGen = struct {
 
     fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const operand = try self.resolveInst(ty_op.operand);
         const operand_ty = self.typeOf(ty_op.operand);
         const dest_ty = self.typeOfIndex(inst);
-        const target = mod.getTarget();
+        const target = zcu.getTarget();
 
         if (intrinsicsAllowed(dest_ty, target) and intrinsicsAllowed(operand_ty, target)) {
             return self.wip.cast(.fpext, operand, try o.lowerType(dest_ty), "");
@@ -8669,18 +8673,18 @@ pub const FuncGen = struct {
             const operand_llvm_ty = try o.lowerType(operand_ty);
             const dest_llvm_ty = try o.lowerType(dest_ty);
 
-            const dest_bits = dest_ty.scalarType(mod).floatBits(target);
-            const src_bits = operand_ty.scalarType(mod).floatBits(target);
+            const dest_bits = dest_ty.scalarType(zcu).floatBits(target);
+            const src_bits = operand_ty.scalarType(zcu).floatBits(target);
             const fn_name = try o.builder.strtabStringFmt("__extend{s}f{s}f2", .{
                 compilerRtFloatAbbrev(src_bits), compilerRtFloatAbbrev(dest_bits),
             });
 
             const libc_fn = try self.getLibcFunction(fn_name, &.{operand_llvm_ty}, dest_llvm_ty);
-            if (dest_ty.isVector(mod)) return self.buildElementwiseCall(
+            if (dest_ty.isVector(zcu)) return self.buildElementwiseCall(
                 libc_fn,
                 &.{operand},
                 try o.builder.poisonValue(dest_llvm_ty),
-                dest_ty.vectorLen(mod),
+                dest_ty.vectorLen(zcu),
             );
             return self.wip.call(
                 .normal,
@@ -8715,9 +8719,9 @@ pub const FuncGen = struct {
     fn bitCast(self: *FuncGen, operand: Builder.Value, operand_ty: Type, inst_ty: Type) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
-        const operand_is_ref = isByRef(operand_ty, pt);
-        const result_is_ref = isByRef(inst_ty, pt);
+        const zcu = pt.zcu;
+        const operand_is_ref = isByRef(operand_ty, zcu);
+        const result_is_ref = isByRef(inst_ty, zcu);
         const llvm_dest_ty = try o.lowerType(inst_ty);
 
         if (operand_is_ref and result_is_ref) {
@@ -8731,18 +8735,18 @@ pub const FuncGen = struct {
             return self.wip.conv(.unsigned, operand, llvm_dest_ty, "");
         }
 
-        if (operand_ty.zigTypeTag(mod) == .Int and inst_ty.isPtrAtRuntime(mod)) {
+        if (operand_ty.zigTypeTag(zcu) == .Int and inst_ty.isPtrAtRuntime(zcu)) {
             return self.wip.cast(.inttoptr, operand, llvm_dest_ty, "");
         }
 
-        if (operand_ty.zigTypeTag(mod) == .Vector and inst_ty.zigTypeTag(mod) == .Array) {
-            const elem_ty = operand_ty.childType(mod);
+        if (operand_ty.zigTypeTag(zcu) == .Vector and inst_ty.zigTypeTag(zcu) == .Array) {
+            const elem_ty = operand_ty.childType(zcu);
             if (!result_is_ref) {
                 return self.ng.todo("implement bitcast vector to non-ref array", .{});
             }
-            const alignment = inst_ty.abiAlignment(pt).toLlvm();
+            const alignment = inst_ty.abiAlignment(zcu).toLlvm();
             const array_ptr = try self.buildAllocaWorkaround(inst_ty, alignment);
-            const bitcast_ok = elem_ty.bitSize(pt) == elem_ty.abiSize(pt) * 8;
+            const bitcast_ok = elem_ty.bitSize(zcu) == elem_ty.abiSize(zcu) * 8;
             if (bitcast_ok) {
                 _ = try self.wip.store(.normal, operand, array_ptr, alignment);
             } else {
@@ -8750,7 +8754,7 @@ pub const FuncGen = struct {
                 // a simple bitcast will not work, and we fall back to extractelement.
                 const llvm_usize = try o.lowerType(Type.usize);
                 const usize_zero = try o.builder.intValue(llvm_usize, 0);
-                const vector_len = operand_ty.arrayLen(mod);
+                const vector_len = operand_ty.arrayLen(zcu);
                 var i: u64 = 0;
                 while (i < vector_len) : (i += 1) {
                     const elem_ptr = try self.wip.gep(.inbounds, llvm_dest_ty, array_ptr, &.{
@@ -8762,16 +8766,16 @@ pub const FuncGen = struct {
                 }
             }
             return array_ptr;
-        } else if (operand_ty.zigTypeTag(mod) == .Array and inst_ty.zigTypeTag(mod) == .Vector) {
-            const elem_ty = operand_ty.childType(mod);
+        } else if (operand_ty.zigTypeTag(zcu) == .Array and inst_ty.zigTypeTag(zcu) == .Vector) {
+            const elem_ty = operand_ty.childType(zcu);
             const llvm_vector_ty = try o.lowerType(inst_ty);
             if (!operand_is_ref) return self.ng.todo("implement bitcast non-ref array to vector", .{});
 
-            const bitcast_ok = elem_ty.bitSize(pt) == elem_ty.abiSize(pt) * 8;
+            const bitcast_ok = elem_ty.bitSize(zcu) == elem_ty.abiSize(zcu) * 8;
             if (bitcast_ok) {
                 // The array is aligned to the element's alignment, while the vector might have a completely
                 // different alignment. This means we need to enforce the alignment of this load.
-                const alignment = elem_ty.abiAlignment(pt).toLlvm();
+                const alignment = elem_ty.abiAlignment(zcu).toLlvm();
                 return self.wip.load(.normal, llvm_vector_ty, operand, alignment, "");
             } else {
                 // If the ABI size of the element type is not evenly divisible by size in bits;
@@ -8780,7 +8784,7 @@ pub const FuncGen = struct {
                 const elem_llvm_ty = try o.lowerType(elem_ty);
                 const llvm_usize = try o.lowerType(Type.usize);
                 const usize_zero = try o.builder.intValue(llvm_usize, 0);
-                const vector_len = operand_ty.arrayLen(mod);
+                const vector_len = operand_ty.arrayLen(zcu);
                 var vector = try o.builder.poisonValue(llvm_vector_ty);
                 var i: u64 = 0;
                 while (i < vector_len) : (i += 1) {
@@ -8796,25 +8800,25 @@ pub const FuncGen = struct {
         }
 
         if (operand_is_ref) {
-            const alignment = operand_ty.abiAlignment(pt).toLlvm();
+            const alignment = operand_ty.abiAlignment(zcu).toLlvm();
             return self.wip.load(.normal, llvm_dest_ty, operand, alignment, "");
         }
 
         if (result_is_ref) {
-            const alignment = operand_ty.abiAlignment(pt).max(inst_ty.abiAlignment(pt)).toLlvm();
+            const alignment = operand_ty.abiAlignment(zcu).max(inst_ty.abiAlignment(zcu)).toLlvm();
             const result_ptr = try self.buildAllocaWorkaround(inst_ty, alignment);
             _ = try self.wip.store(.normal, operand, result_ptr, alignment);
             return result_ptr;
         }
 
         if (llvm_dest_ty.isStruct(&o.builder) or
-            ((operand_ty.zigTypeTag(mod) == .Vector or inst_ty.zigTypeTag(mod) == .Vector) and
-            operand_ty.bitSize(pt) != inst_ty.bitSize(pt)))
+            ((operand_ty.zigTypeTag(zcu) == .Vector or inst_ty.zigTypeTag(zcu) == .Vector) and
+            operand_ty.bitSize(zcu) != inst_ty.bitSize(zcu)))
         {
             // Both our operand and our result are values, not pointers,
             // but LLVM won't let us bitcast struct values or vectors with padding bits.
             // Therefore, we store operand to alloca, then load for result.
-            const alignment = operand_ty.abiAlignment(pt).max(inst_ty.abiAlignment(pt)).toLlvm();
+            const alignment = operand_ty.abiAlignment(zcu).max(inst_ty.abiAlignment(zcu)).toLlvm();
             const result_ptr = try self.buildAllocaWorkaround(inst_ty, alignment);
             _ = try self.wip.store(.normal, operand, result_ptr, alignment);
             return self.wip.load(.normal, llvm_dest_ty, result_ptr, alignment, "");
@@ -8868,7 +8872,7 @@ pub const FuncGen = struct {
         };
 
         const mod = self.ng.ownerModule();
-        if (isByRef(inst_ty, pt)) {
+        if (isByRef(inst_ty, zcu)) {
             _ = try self.wip.callIntrinsic(
                 .normal,
                 .none,
@@ -8882,7 +8886,7 @@ pub const FuncGen = struct {
                 "",
             );
         } else if (mod.optimize_mode == .Debug) {
-            const alignment = inst_ty.abiAlignment(pt).toLlvm();
+            const alignment = inst_ty.abiAlignment(zcu).toLlvm();
             const alloca = try self.buildAlloca(arg_val.typeOfWip(&self.wip), alignment);
             _ = try self.wip.store(.normal, arg_val, alloca, alignment);
             _ = try self.wip.callIntrinsic(
@@ -8919,28 +8923,28 @@ pub const FuncGen = struct {
     fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ptr_ty = self.typeOfIndex(inst);
-        const pointee_type = ptr_ty.childType(mod);
-        if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(pt))
+        const pointee_type = ptr_ty.childType(zcu);
+        if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(zcu))
             return (try o.lowerPtrToVoid(ptr_ty)).toValue();
 
         //const pointee_llvm_ty = try o.lowerType(pointee_type);
-        const alignment = ptr_ty.ptrAlignment(pt).toLlvm();
+        const alignment = ptr_ty.ptrAlignment(zcu).toLlvm();
         return self.buildAllocaWorkaround(pointee_type, alignment);
     }
 
     fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ptr_ty = self.typeOfIndex(inst);
-        const ret_ty = ptr_ty.childType(mod);
-        if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt))
+        const ret_ty = ptr_ty.childType(zcu);
+        if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu))
             return (try o.lowerPtrToVoid(ptr_ty)).toValue();
         if (self.ret_ptr != .none) return self.ret_ptr;
         //const ret_llvm_ty = try o.lowerType(ret_ty);
-        const alignment = ptr_ty.ptrAlignment(pt).toLlvm();
+        const alignment = ptr_ty.ptrAlignment(zcu).toLlvm();
         return self.buildAllocaWorkaround(ret_ty, alignment);
     }
 
@@ -8962,19 +8966,19 @@ pub const FuncGen = struct {
         alignment: Builder.Alignment,
     ) Allocator.Error!Builder.Value {
         const o = self.ng.object;
-        return self.buildAlloca(try o.builder.arrayType(ty.abiSize(o.pt), .i8), alignment);
+        return self.buildAlloca(try o.builder.arrayType(ty.abiSize(o.pt.zcu), .i8), alignment);
     }
 
     fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const dest_ptr = try self.resolveInst(bin_op.lhs);
         const ptr_ty = self.typeOf(bin_op.lhs);
-        const operand_ty = ptr_ty.childType(mod);
+        const operand_ty = ptr_ty.childType(zcu);
 
-        const val_is_undef = if (try self.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(mod) else false;
+        const val_is_undef = if (try self.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(zcu) else false;
         if (val_is_undef) {
             const owner_mod = self.ng.ownerModule();
 
@@ -8991,7 +8995,7 @@ pub const FuncGen = struct {
                 return .none;
             }
 
-            const ptr_info = ptr_ty.ptrInfo(mod);
+            const ptr_info = ptr_ty.ptrInfo(zcu);
             const needs_bitmask = (ptr_info.packed_offset.host_size != 0);
             if (needs_bitmask) {
                 // TODO: only some bits are to be undef, we cannot write with a simple memset.
@@ -9000,13 +9004,13 @@ pub const FuncGen = struct {
                 return .none;
             }
 
-            const len = try o.builder.intValue(try o.lowerType(Type.usize), operand_ty.abiSize(pt));
+            const len = try o.builder.intValue(try o.lowerType(Type.usize), operand_ty.abiSize(zcu));
             _ = try self.wip.callMemSet(
                 dest_ptr,
-                ptr_ty.ptrAlignment(pt).toLlvm(),
+                ptr_ty.ptrAlignment(zcu).toLlvm(),
                 if (safety) try o.builder.intValue(.i8, 0xaa) else try o.builder.undefValue(.i8),
                 len,
-                if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal,
+                if (ptr_ty.isVolatilePtr(zcu)) .@"volatile" else .normal,
             );
             if (safety and owner_mod.valgrind) {
                 try self.valgrindMarkUndef(dest_ptr, len);
@@ -9027,8 +9031,8 @@ pub const FuncGen = struct {
     /// The first instruction of `body_tail` is the one whose copy we want to elide.
     fn canElideLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) bool {
         const o = fg.ng.object;
-        const mod = o.pt.zcu;
-        const ip = &mod.intern_pool;
+        const zcu = o.pt.zcu;
+        const ip = &zcu.intern_pool;
         for (body_tail[1..]) |body_inst| {
             switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip)) {
                 .none => continue,
@@ -9044,15 +9048,15 @@ pub const FuncGen = struct {
     fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
         const o = fg.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const inst = body_tail[0];
         const ty_op = fg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const ptr_ty = fg.typeOf(ty_op.operand);
-        const ptr_info = ptr_ty.ptrInfo(mod);
+        const ptr_info = ptr_ty.ptrInfo(zcu);
         const ptr = try fg.resolveInst(ty_op.operand);
 
         elide: {
-            if (!isByRef(Type.fromInterned(ptr_info.child), pt)) break :elide;
+            if (!isByRef(Type.fromInterned(ptr_info.child), zcu)) break :elide;
             if (!canElideLoad(fg, body_tail)) break :elide;
             return ptr;
         }
@@ -9105,34 +9109,34 @@ pub const FuncGen = struct {
     ) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
         const ptr = try self.resolveInst(extra.ptr);
         const ptr_ty = self.typeOf(extra.ptr);
         var expected_value = try self.resolveInst(extra.expected_value);
         var new_value = try self.resolveInst(extra.new_value);
-        const operand_ty = ptr_ty.childType(mod);
+        const operand_ty = ptr_ty.childType(zcu);
         const llvm_operand_ty = try o.lowerType(operand_ty);
         const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, false);
         if (llvm_abi_ty != .none) {
             // operand needs widening and truncating
             const signedness: Builder.Function.Instruction.Cast.Signedness =
-                if (operand_ty.isSignedInt(mod)) .signed else .unsigned;
+                if (operand_ty.isSignedInt(zcu)) .signed else .unsigned;
             expected_value = try self.wip.conv(signedness, expected_value, llvm_abi_ty, "");
             new_value = try self.wip.conv(signedness, new_value, llvm_abi_ty, "");
         }
 
         const result = try self.wip.cmpxchg(
             kind,
-            if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal,
+            if (ptr_ty.isVolatilePtr(zcu)) .@"volatile" else .normal,
             ptr,
             expected_value,
             new_value,
             self.sync_scope,
             toLlvmAtomicOrdering(extra.successOrder()),
             toLlvmAtomicOrdering(extra.failureOrder()),
-            ptr_ty.ptrAlignment(pt).toLlvm(),
+            ptr_ty.ptrAlignment(zcu).toLlvm(),
             "",
         );
 
@@ -9142,7 +9146,7 @@ pub const FuncGen = struct {
         if (llvm_abi_ty != .none) payload = try self.wip.cast(.trunc, payload, llvm_operand_ty, "");
         const success_bit = try self.wip.extractValue(result, &.{1}, "");
 
-        if (optional_ty.optionalReprIsPayload(mod)) {
+        if (optional_ty.optionalReprIsPayload(zcu)) {
             const zero = try o.builder.zeroInitValue(payload.typeOfWip(&self.wip));
             return self.wip.select(.normal, success_bit, zero, payload, "");
         }
@@ -9156,14 +9160,14 @@ pub const FuncGen = struct {
     fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
         const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data;
         const ptr = try self.resolveInst(pl_op.operand);
         const ptr_ty = self.typeOf(pl_op.operand);
-        const operand_ty = ptr_ty.childType(mod);
+        const operand_ty = ptr_ty.childType(zcu);
         const operand = try self.resolveInst(extra.operand);
-        const is_signed_int = operand_ty.isSignedInt(mod);
+        const is_signed_int = operand_ty.isSignedInt(zcu);
         const is_float = operand_ty.isRuntimeFloat();
         const op = toLlvmAtomicRmwBinOp(extra.op(), is_signed_int, is_float);
         const ordering = toLlvmAtomicOrdering(extra.ordering());
@@ -9171,8 +9175,8 @@ pub const FuncGen = struct {
         const llvm_operand_ty = try o.lowerType(operand_ty);
 
         const access_kind: Builder.MemoryAccessKind =
-            if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal;
-        const ptr_alignment = ptr_ty.ptrAlignment(pt).toLlvm();
+            if (ptr_ty.isVolatilePtr(zcu)) .@"volatile" else .normal;
+        const ptr_alignment = ptr_ty.ptrAlignment(zcu).toLlvm();
 
         if (llvm_abi_ty != .none) {
             // operand needs widening and truncating or bitcasting.
@@ -9220,19 +9224,19 @@ pub const FuncGen = struct {
     fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const atomic_load = self.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load;
         const ptr = try self.resolveInst(atomic_load.ptr);
         const ptr_ty = self.typeOf(atomic_load.ptr);
-        const info = ptr_ty.ptrInfo(mod);
+        const info = ptr_ty.ptrInfo(zcu);
         const elem_ty = Type.fromInterned(info.child);
-        if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none;
+        if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) return .none;
         const ordering = toLlvmAtomicOrdering(atomic_load.order);
         const llvm_abi_ty = try o.getAtomicAbiType(elem_ty, false);
         const ptr_alignment = (if (info.flags.alignment != .none)
             @as(InternPool.Alignment, info.flags.alignment)
         else
-            Type.fromInterned(info.child).abiAlignment(pt)).toLlvm();
+            Type.fromInterned(info.child).abiAlignment(zcu)).toLlvm();
         const access_kind: Builder.MemoryAccessKind =
             if (info.flags.is_volatile) .@"volatile" else .normal;
         const elem_llvm_ty = try o.lowerType(elem_ty);
@@ -9268,11 +9272,11 @@ pub const FuncGen = struct {
     ) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const ptr_ty = self.typeOf(bin_op.lhs);
-        const operand_ty = ptr_ty.childType(mod);
-        if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .none;
+        const operand_ty = ptr_ty.childType(zcu);
+        if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .none;
         const ptr = try self.resolveInst(bin_op.lhs);
         var element = try self.resolveInst(bin_op.rhs);
         const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, false);
@@ -9280,7 +9284,7 @@ pub const FuncGen = struct {
         if (llvm_abi_ty != .none) {
             // operand needs widening
             element = try self.wip.conv(
-                if (operand_ty.isSignedInt(mod)) .signed else .unsigned,
+                if (operand_ty.isSignedInt(zcu)) .signed else .unsigned,
                 element,
                 llvm_abi_ty,
                 "",
@@ -9293,26 +9297,26 @@ pub const FuncGen = struct {
     fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const dest_slice = try self.resolveInst(bin_op.lhs);
         const ptr_ty = self.typeOf(bin_op.lhs);
         const elem_ty = self.typeOf(bin_op.rhs);
-        const dest_ptr_align = ptr_ty.ptrAlignment(pt).toLlvm();
+        const dest_ptr_align = ptr_ty.ptrAlignment(zcu).toLlvm();
         const dest_ptr = try self.sliceOrArrayPtr(dest_slice, ptr_ty);
         const access_kind: Builder.MemoryAccessKind =
-            if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal;
+            if (ptr_ty.isVolatilePtr(zcu)) .@"volatile" else .normal;
 
         // Any WebAssembly runtime will trap when the destination pointer is out-of-bounds, regardless
         // of the length. This means we need to emit a check where we skip the memset when the length
         // is 0 as we allow for undefined pointers in 0-sized slices.
         // This logic can be removed once https://github.com/ziglang/zig/issues/16360 is done.
         const intrinsic_len0_traps = o.target.isWasm() and
-            ptr_ty.isSlice(mod) and
+            ptr_ty.isSlice(zcu) and
             std.Target.wasm.featureSetHas(o.target.cpu.features, .bulk_memory);
 
         if (try self.air.value(bin_op.rhs, pt)) |elem_val| {
-            if (elem_val.isUndefDeep(mod)) {
+            if (elem_val.isUndefDeep(zcu)) {
                 // Even if safety is disabled, we still emit a memset to undefined since it conveys
                 // extra information to LLVM. However, safety makes the difference between using
                 // 0xaa or actual undefined for the fill byte.
@@ -9350,7 +9354,7 @@ pub const FuncGen = struct {
         }
 
         const value = try self.resolveInst(bin_op.rhs);
-        const elem_abi_size = elem_ty.abiSize(pt);
+        const elem_abi_size = elem_ty.abiSize(zcu);
 
         if (elem_abi_size == 1) {
             // In this case we can take advantage of LLVM's intrinsic.
@@ -9387,9 +9391,9 @@ pub const FuncGen = struct {
         const end_block = try self.wip.block(1, "InlineMemsetEnd");
 
         const llvm_usize_ty = try o.lowerType(Type.usize);
-        const len = switch (ptr_ty.ptrSize(mod)) {
+        const len = switch (ptr_ty.ptrSize(zcu)) {
             .Slice => try self.wip.extractValue(dest_slice, &.{1}, ""),
-            .One => try o.builder.intValue(llvm_usize_ty, ptr_ty.childType(mod).arrayLen(mod)),
+            .One => try o.builder.intValue(llvm_usize_ty, ptr_ty.childType(zcu).arrayLen(zcu)),
             .Many, .C => unreachable,
         };
         const elem_llvm_ty = try o.lowerType(elem_ty);
@@ -9402,9 +9406,9 @@ pub const FuncGen = struct {
         _ = try self.wip.brCond(end, body_block, end_block);
 
         self.wip.cursor = .{ .block = body_block };
-        const elem_abi_align = elem_ty.abiAlignment(pt);
+        const elem_abi_align = elem_ty.abiAlignment(zcu);
         const it_ptr_align = InternPool.Alignment.fromLlvm(dest_ptr_align).min(elem_abi_align).toLlvm();
-        if (isByRef(elem_ty, pt)) {
+        if (isByRef(elem_ty, zcu)) {
             _ = try self.wip.callMemCpy(
                 it_ptr.toValue(),
                 it_ptr_align,
@@ -9447,7 +9451,7 @@ pub const FuncGen = struct {
     fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const dest_slice = try self.resolveInst(bin_op.lhs);
         const dest_ptr_ty = self.typeOf(bin_op.lhs);
@@ -9456,8 +9460,8 @@ pub const FuncGen = struct {
         const src_ptr = try self.sliceOrArrayPtr(src_slice, src_ptr_ty);
         const len = try self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty);
         const dest_ptr = try self.sliceOrArrayPtr(dest_slice, dest_ptr_ty);
-        const access_kind: Builder.MemoryAccessKind = if (src_ptr_ty.isVolatilePtr(mod) or
-            dest_ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal;
+        const access_kind: Builder.MemoryAccessKind = if (src_ptr_ty.isVolatilePtr(zcu) or
+            dest_ptr_ty.isVolatilePtr(zcu)) .@"volatile" else .normal;
 
         // When bulk-memory is enabled, this will be lowered to WebAssembly's memory.copy instruction.
         // This instruction will trap on an invalid address, regardless of the length.
@@ -9466,7 +9470,7 @@ pub const FuncGen = struct {
         // This logic can be removed once https://github.com/ziglang/zig/issues/16360 is done.
         if (o.target.isWasm() and
             std.Target.wasm.featureSetHas(o.target.cpu.features, .bulk_memory) and
-            dest_ptr_ty.isSlice(mod))
+            dest_ptr_ty.isSlice(zcu))
         {
             const usize_zero = try o.builder.intValue(try o.lowerType(Type.usize), 0);
             const cond = try self.cmp(.normal, .neq, Type.usize, len, usize_zero);
@@ -9476,9 +9480,9 @@ pub const FuncGen = struct {
             self.wip.cursor = .{ .block = memcpy_block };
             _ = try self.wip.callMemCpy(
                 dest_ptr,
-                dest_ptr_ty.ptrAlignment(pt).toLlvm(),
+                dest_ptr_ty.ptrAlignment(zcu).toLlvm(),
                 src_ptr,
-                src_ptr_ty.ptrAlignment(pt).toLlvm(),
+                src_ptr_ty.ptrAlignment(zcu).toLlvm(),
                 len,
                 access_kind,
             );
@@ -9489,9 +9493,9 @@ pub const FuncGen = struct {
 
         _ = try self.wip.callMemCpy(
             dest_ptr,
-            dest_ptr_ty.ptrAlignment(pt).toLlvm(),
+            dest_ptr_ty.ptrAlignment(zcu).toLlvm(),
             src_ptr,
-            src_ptr_ty.ptrAlignment(pt).toLlvm(),
+            src_ptr_ty.ptrAlignment(zcu).toLlvm(),
             len,
             access_kind,
         );
@@ -9501,10 +9505,10 @@ pub const FuncGen = struct {
     fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
-        const un_ty = self.typeOf(bin_op.lhs).childType(mod);
-        const layout = un_ty.unionGetLayout(pt);
+        const un_ty = self.typeOf(bin_op.lhs).childType(zcu);
+        const layout = un_ty.unionGetLayout(zcu);
         if (layout.tag_size == 0) return .none;
         const union_ptr = try self.resolveInst(bin_op.lhs);
         const new_tag = try self.resolveInst(bin_op.rhs);
@@ -9523,12 +9527,13 @@ pub const FuncGen = struct {
     fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
+        const zcu = pt.zcu;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const un_ty = self.typeOf(ty_op.operand);
-        const layout = un_ty.unionGetLayout(pt);
+        const layout = un_ty.unionGetLayout(zcu);
         if (layout.tag_size == 0) return .none;
         const union_handle = try self.resolveInst(ty_op.operand);
-        if (isByRef(un_ty, pt)) {
+        if (isByRef(un_ty, zcu)) {
             const llvm_un_ty = try o.lowerType(un_ty);
             if (layout.payload_size == 0)
                 return self.wip.load(.normal, llvm_un_ty, union_handle, .default, "");
@@ -9597,10 +9602,10 @@ pub const FuncGen = struct {
 
     fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
+        const zcu = o.pt.zcu;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const operand_ty = self.typeOf(ty_op.operand);
-        var bits = operand_ty.intInfo(mod).bits;
+        var bits = operand_ty.intInfo(zcu).bits;
         assert(bits % 8 == 0);
 
         const inst_ty = self.typeOfIndex(inst);
@@ -9611,8 +9616,8 @@ pub const FuncGen = struct {
             // If not an even byte-multiple, we need zero-extend + shift-left 1 byte
             // The truncated result at the end will be the correct bswap
             const scalar_ty = try o.builder.intType(@intCast(bits + 8));
-            if (operand_ty.zigTypeTag(mod) == .Vector) {
-                const vec_len = operand_ty.vectorLen(mod);
+            if (operand_ty.zigTypeTag(zcu) == .Vector) {
+                const vec_len = operand_ty.vectorLen(zcu);
                 llvm_operand_ty = try o.builder.vectorType(.normal, vec_len, scalar_ty);
             } else llvm_operand_ty = scalar_ty;
 
@@ -9631,13 +9636,13 @@ pub const FuncGen = struct {
 
     fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
-        const ip = &mod.intern_pool;
+        const zcu = o.pt.zcu;
+        const ip = &zcu.intern_pool;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const operand = try self.resolveInst(ty_op.operand);
         const error_set_ty = ty_op.ty.toType();
 
-        const names = error_set_ty.errorSetNames(mod);
+        const names = error_set_ty.errorSetNames(zcu);
         const valid_block = try self.wip.block(@intCast(names.len), "Valid");
         const invalid_block = try self.wip.block(1, "Invalid");
         const end_block = try self.wip.block(2, "End");
@@ -9790,14 +9795,14 @@ pub const FuncGen = struct {
     fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
         const a = try self.resolveInst(extra.a);
         const b = try self.resolveInst(extra.b);
         const mask = Value.fromInterned(extra.mask);
         const mask_len = extra.mask_len;
-        const a_len = self.typeOf(extra.a).vectorLen(mod);
+        const a_len = self.typeOf(extra.a).vectorLen(zcu);
 
         // LLVM uses integers larger than the length of the first array to
         // index into the second array. This was deemed unnecessarily fragile
@@ -9809,10 +9814,10 @@ pub const FuncGen = struct {
 
         for (values, 0..) |*val, i| {
             const elem = try mask.elemValue(pt, i);
-            if (elem.isUndef(mod)) {
+            if (elem.isUndef(zcu)) {
                 val.* = try o.builder.undefConst(.i32);
             } else {
-                const int = elem.toSignedInt(pt);
+                const int = elem.toSignedInt(zcu);
                 const unsigned: u32 = @intCast(if (int >= 0) int else ~int + a_len);
                 val.* = try o.builder.intConst(.i32, unsigned);
             }
@@ -9899,8 +9904,8 @@ pub const FuncGen = struct {
 
     fn airReduce(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
         const o = self.ng.object;
-        const mod = o.pt.zcu;
-        const target = mod.getTarget();
+        const zcu = o.pt.zcu;
+        const target = zcu.getTarget();
 
         const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce;
         const operand = try self.resolveInst(reduce.operand);
@@ -9916,13 +9921,13 @@ pub const FuncGen = struct {
                 .Xor => .@"vector.reduce.xor",
                 else => unreachable,
             }, &.{llvm_operand_ty}, &.{operand}, ""),
-            .Min, .Max => switch (scalar_ty.zigTypeTag(mod)) {
+            .Min, .Max => switch (scalar_ty.zigTypeTag(zcu)) {
                 .Int => return self.wip.callIntrinsic(.normal, .none, switch (reduce.operation) {
-                    .Min => if (scalar_ty.isSignedInt(mod))
+                    .Min => if (scalar_ty.isSignedInt(zcu))
                         .@"vector.reduce.smin"
                     else
                         .@"vector.reduce.umin",
-                    .Max => if (scalar_ty.isSignedInt(mod))
+                    .Max => if (scalar_ty.isSignedInt(zcu))
                         .@"vector.reduce.smax"
                     else
                         .@"vector.reduce.umax",
@@ -9936,7 +9941,7 @@ pub const FuncGen = struct {
                     }, &.{llvm_operand_ty}, &.{operand}, ""),
                 else => unreachable,
             },
-            .Add, .Mul => switch (scalar_ty.zigTypeTag(mod)) {
+            .Add, .Mul => switch (scalar_ty.zigTypeTag(zcu)) {
                 .Int => return self.wip.callIntrinsic(.normal, .none, switch (reduce.operation) {
                     .Add => .@"vector.reduce.add",
                     .Mul => .@"vector.reduce.mul",
@@ -10004,21 +10009,21 @@ pub const FuncGen = struct {
             ))),
             else => unreachable,
         };
-        return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(mod), init_val);
+        return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(zcu), init_val);
     }
 
     fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
-        const ip = &mod.intern_pool;
+        const zcu = pt.zcu;
+        const ip = &zcu.intern_pool;
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const result_ty = self.typeOfIndex(inst);
-        const len: usize = @intCast(result_ty.arrayLen(mod));
+        const len: usize = @intCast(result_ty.arrayLen(zcu));
         const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]);
         const llvm_result_ty = try o.lowerType(result_ty);
 
-        switch (result_ty.zigTypeTag(mod)) {
+        switch (result_ty.zigTypeTag(zcu)) {
             .Vector => {
                 var vector = try o.builder.poisonValue(llvm_result_ty);
                 for (elements, 0..) |elem, i| {
@@ -10029,21 +10034,21 @@ pub const FuncGen = struct {
                 return vector;
             },
             .Struct => {
-                if (mod.typeToPackedStruct(result_ty)) |struct_type| {
+                if (zcu.typeToPackedStruct(result_ty)) |struct_type| {
                     const backing_int_ty = struct_type.backingIntTypeUnordered(ip);
                     assert(backing_int_ty != .none);
-                    const big_bits = Type.fromInterned(backing_int_ty).bitSize(pt);
+                    const big_bits = Type.fromInterned(backing_int_ty).bitSize(zcu);
                     const int_ty = try o.builder.intType(@intCast(big_bits));
                     comptime assert(Type.packed_struct_layout_version == 2);
                     var running_int = try o.builder.intValue(int_ty, 0);
                     var running_bits: u16 = 0;
                     for (elements, struct_type.field_types.get(ip)) |elem, field_ty| {
-                        if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue;
+                        if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(zcu)) continue;
 
                         const non_int_val = try self.resolveInst(elem);
-                        const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(pt));
+                        const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(zcu));
                         const small_int_ty = try o.builder.intType(ty_bit_size);
-                        const small_int_val = if (Type.fromInterned(field_ty).isPtrAtRuntime(mod))
+                        const small_int_val = if (Type.fromInterned(field_ty).isPtrAtRuntime(zcu))
                             try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "")
                         else
                             try self.wip.cast(.bitcast, non_int_val, small_int_ty, "");
@@ -10057,12 +10062,12 @@ pub const FuncGen = struct {
                     return running_int;
                 }
 
-                assert(result_ty.containerLayout(mod) != .@"packed");
+                assert(result_ty.containerLayout(zcu) != .@"packed");
 
-                if (isByRef(result_ty, pt)) {
+                if (isByRef(result_ty, zcu)) {
                     // TODO in debug builds init to undef so that the padding will be 0xaa
                     // even if we fully populate the fields.
-                    const alignment = result_ty.abiAlignment(pt).toLlvm();
+                    const alignment = result_ty.abiAlignment(zcu).toLlvm();
                     const alloca_inst = try self.buildAllocaWorkaround(result_ty, alignment);
 
                     for (elements, 0..) |elem, i| {
@@ -10075,7 +10080,7 @@ pub const FuncGen = struct {
                         const field_ptr_ty = try pt.ptrType(.{
                             .child = self.typeOf(elem).toIntern(),
                             .flags = .{
-                                .alignment = result_ty.structFieldAlign(i, pt),
+                                .alignment = result_ty.structFieldAlign(i, zcu),
                             },
                         });
                         try self.store(field_ptr, field_ptr_ty, llvm_elem, .none);
@@ -10095,14 +10100,14 @@ pub const FuncGen = struct {
                 }
             },
             .Array => {
-                assert(isByRef(result_ty, pt));
+                assert(isByRef(result_ty, zcu));
 
                 const llvm_usize = try o.lowerType(Type.usize);
                 const usize_zero = try o.builder.intValue(llvm_usize, 0);
-                const alignment = result_ty.abiAlignment(pt).toLlvm();
+                const alignment = result_ty.abiAlignment(zcu).toLlvm();
                 const alloca_inst = try self.buildAllocaWorkaround(result_ty, alignment);
 
-                const array_info = result_ty.arrayInfo(mod);
+                const array_info = result_ty.arrayInfo(zcu);
                 const elem_ptr_ty = try pt.ptrType(.{
                     .child = array_info.elem_type.toIntern(),
                 });
@@ -10131,22 +10136,22 @@ pub const FuncGen = struct {
     fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
-        const ip = &mod.intern_pool;
+        const zcu = pt.zcu;
+        const ip = &zcu.intern_pool;
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
         const union_ty = self.typeOfIndex(inst);
         const union_llvm_ty = try o.lowerType(union_ty);
-        const layout = union_ty.unionGetLayout(pt);
-        const union_obj = mod.typeToUnion(union_ty).?;
+        const layout = union_ty.unionGetLayout(zcu);
+        const union_obj = zcu.typeToUnion(union_ty).?;
 
         if (union_obj.flagsUnordered(ip).layout == .@"packed") {
-            const big_bits = union_ty.bitSize(pt);
+            const big_bits = union_ty.bitSize(zcu);
             const int_llvm_ty = try o.builder.intType(@intCast(big_bits));
             const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
             const non_int_val = try self.resolveInst(extra.init);
-            const small_int_ty = try o.builder.intType(@intCast(field_ty.bitSize(pt)));
-            const small_int_val = if (field_ty.isPtrAtRuntime(mod))
+            const small_int_ty = try o.builder.intType(@intCast(field_ty.bitSize(zcu)));
+            const small_int_val = if (field_ty.isPtrAtRuntime(zcu))
                 try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "")
             else
                 try self.wip.cast(.bitcast, non_int_val, small_int_ty, "");
@@ -10154,9 +10159,9 @@ pub const FuncGen = struct {
         }
 
         const tag_int_val = blk: {
-            const tag_ty = union_ty.unionTagTypeHypothetical(mod);
+            const tag_ty = union_ty.unionTagTypeHypothetical(zcu);
             const union_field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
-            const enum_field_index = tag_ty.enumFieldIndex(union_field_name, mod).?;
+            const enum_field_index = tag_ty.enumFieldIndex(union_field_name, zcu).?;
             const tag_val = try pt.enumValueFieldIndex(tag_ty, enum_field_index);
             break :blk try tag_val.intFromEnum(tag_ty, pt);
         };
@@ -10164,12 +10169,12 @@ pub const FuncGen = struct {
             if (layout.tag_size == 0) {
                 return .none;
             }
-            assert(!isByRef(union_ty, pt));
+            assert(!isByRef(union_ty, zcu));
             var big_int_space: Value.BigIntSpace = undefined;
-            const tag_big_int = tag_int_val.toBigInt(&big_int_space, pt);
+            const tag_big_int = tag_int_val.toBigInt(&big_int_space, zcu);
             return try o.builder.bigIntValue(union_llvm_ty, tag_big_int);
         }
-        assert(isByRef(union_ty, pt));
+        assert(isByRef(union_ty, zcu));
         // The llvm type of the alloca will be the named LLVM union type, and will not
         // necessarily match the format that we need, depending on which tag is active.
         // We must construct the correct unnamed struct type here, in order to then set
@@ -10179,14 +10184,14 @@ pub const FuncGen = struct {
         const llvm_payload = try self.resolveInst(extra.init);
         const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
         const field_llvm_ty = try o.lowerType(field_ty);
-        const field_size = field_ty.abiSize(pt);
-        const field_align = pt.unionFieldNormalAlignment(union_obj, extra.field_index);
+        const field_size = field_ty.abiSize(zcu);
+        const field_align = Type.unionFieldNormalAlignment(union_obj, extra.field_index, zcu);
         const llvm_usize = try o.lowerType(Type.usize);
         const usize_zero = try o.builder.intValue(llvm_usize, 0);
 
         const llvm_union_ty = t: {
             const payload_ty = p: {
-                if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+                if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                     const padding_len = layout.payload_size;
                     break :p try o.builder.arrayType(padding_len, .i8);
                 }
@@ -10242,9 +10247,9 @@ pub const FuncGen = struct {
             const field_ptr = try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, &indices, "");
             const tag_ty = try o.lowerType(Type.fromInterned(union_obj.enum_tag_ty));
             var big_int_space: Value.BigIntSpace = undefined;
-            const tag_big_int = tag_int_val.toBigInt(&big_int_space, pt);
+            const tag_big_int = tag_int_val.toBigInt(&big_int_space, zcu);
             const llvm_tag = try o.builder.bigIntValue(tag_ty, tag_big_int);
-            const tag_alignment = Type.fromInterned(union_obj.enum_tag_ty).abiAlignment(pt).toLlvm();
+            const tag_alignment = Type.fromInterned(union_obj.enum_tag_ty).abiAlignment(zcu).toLlvm();
             _ = try self.wip.store(.normal, llvm_tag, field_ptr, tag_alignment);
         }
 
@@ -10270,8 +10275,8 @@ pub const FuncGen = struct {
         // by the target.
         // To work around this, don't emit llvm.prefetch in this case.
         // See https://bugs.llvm.org/show_bug.cgi?id=21037
-        const mod = o.pt.zcu;
-        const target = mod.getTarget();
+        const zcu = o.pt.zcu;
+        const target = zcu.getTarget();
         switch (prefetch.cache) {
             .instruction => switch (target.cpu.arch) {
                 .x86_64,
@@ -10397,7 +10402,7 @@ pub const FuncGen = struct {
         variable_index.setMutability(.constant, &o.builder);
         variable_index.setUnnamedAddr(.unnamed_addr, &o.builder);
         variable_index.setAlignment(
-            Type.slice_const_u8_sentinel_0.abiAlignment(pt).toLlvm(),
+            Type.slice_const_u8_sentinel_0.abiAlignment(pt.zcu).toLlvm(),
             &o.builder,
         );
 
@@ -10436,15 +10441,15 @@ pub const FuncGen = struct {
     ) !Builder.Value {
         const o = fg.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
-        const payload_ty = opt_ty.optionalChild(mod);
+        const zcu = pt.zcu;
+        const payload_ty = opt_ty.optionalChild(zcu);
 
-        if (isByRef(opt_ty, pt)) {
+        if (isByRef(opt_ty, zcu)) {
             // We have a pointer and we need to return a pointer to the first field.
             const payload_ptr = try fg.wip.gepStruct(opt_llvm_ty, opt_handle, 0, "");
 
-            const payload_alignment = payload_ty.abiAlignment(pt).toLlvm();
-            if (isByRef(payload_ty, pt)) {
+            const payload_alignment = payload_ty.abiAlignment(zcu).toLlvm();
+            if (isByRef(payload_ty, zcu)) {
                 if (can_elide_load)
                     return payload_ptr;
 
@@ -10453,7 +10458,7 @@ pub const FuncGen = struct {
             return fg.loadTruncate(.normal, payload_ty, payload_ptr, payload_alignment);
         }
 
-        assert(!isByRef(payload_ty, pt));
+        assert(!isByRef(payload_ty, zcu));
         return fg.wip.extractValue(opt_handle, &.{0}, "");
     }
 
@@ -10465,11 +10470,12 @@ pub const FuncGen = struct {
     ) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
+        const zcu = pt.zcu;
         const optional_llvm_ty = try o.lowerType(optional_ty);
         const non_null_field = try self.wip.cast(.zext, non_null_bit, .i8, "");
 
-        if (isByRef(optional_ty, pt)) {
-            const payload_alignment = optional_ty.abiAlignment(pt).toLlvm();
+        if (isByRef(optional_ty, zcu)) {
+            const payload_alignment = optional_ty.abiAlignment(pt.zcu).toLlvm();
             const alloca_inst = try self.buildAllocaWorkaround(optional_ty, payload_alignment);
 
             {
@@ -10497,15 +10503,15 @@ pub const FuncGen = struct {
     ) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
-        const struct_ty = struct_ptr_ty.childType(mod);
-        switch (struct_ty.zigTypeTag(mod)) {
-            .Struct => switch (struct_ty.containerLayout(mod)) {
+        const zcu = pt.zcu;
+        const struct_ty = struct_ptr_ty.childType(zcu);
+        switch (struct_ty.zigTypeTag(zcu)) {
+            .Struct => switch (struct_ty.containerLayout(zcu)) {
                 .@"packed" => {
                     const result_ty = self.typeOfIndex(inst);
-                    const result_ty_info = result_ty.ptrInfo(mod);
-                    const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod);
-                    const struct_type = mod.typeToStruct(struct_ty).?;
+                    const result_ty_info = result_ty.ptrInfo(zcu);
+                    const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(zcu);
+                    const struct_type = zcu.typeToStruct(struct_ty).?;
 
                     if (result_ty_info.packed_offset.host_size != 0) {
                         // From LLVM's perspective, a pointer to a packed struct and a pointer
@@ -10535,15 +10541,15 @@ pub const FuncGen = struct {
                         // the struct.
                         const llvm_index = try o.builder.intValue(
                             try o.lowerType(Type.usize),
-                            @intFromBool(struct_ty.hasRuntimeBitsIgnoreComptime(pt)),
+                            @intFromBool(struct_ty.hasRuntimeBitsIgnoreComptime(zcu)),
                         );
                         return self.wip.gep(.inbounds, struct_llvm_ty, struct_ptr, &.{llvm_index}, "");
                     }
                 },
             },
             .Union => {
-                const layout = struct_ty.unionGetLayout(pt);
-                if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .@"packed") return struct_ptr;
+                const layout = struct_ty.unionGetLayout(zcu);
+                if (layout.payload_size == 0 or struct_ty.containerLayout(zcu) == .@"packed") return struct_ptr;
                 const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align));
                 const union_llvm_ty = try o.lowerType(struct_ty);
                 return self.wip.gepStruct(union_llvm_ty, struct_ptr, payload_index, "");
@@ -10566,9 +10572,9 @@ pub const FuncGen = struct {
 
         const o = fg.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const payload_llvm_ty = try o.lowerType(payload_ty);
-        const abi_size = payload_ty.abiSize(pt);
+        const abi_size = payload_ty.abiSize(zcu);
 
         // llvm bug workarounds:
         const workaround_explicit_mask = o.target.cpu.arch == .powerpc and abi_size >= 4;
@@ -10580,7 +10586,7 @@ pub const FuncGen = struct {
             return try fg.wip.load(access_kind, payload_llvm_ty, payload_ptr, payload_alignment, "");
         }
 
-        const load_llvm_ty = if (payload_ty.isAbiInt(mod))
+        const load_llvm_ty = if (payload_ty.isAbiInt(zcu))
             try o.builder.intType(@intCast(abi_size * 8))
         else
             payload_llvm_ty;
@@ -10588,7 +10594,7 @@ pub const FuncGen = struct {
         const shifted = if (payload_llvm_ty != load_llvm_ty and o.target.cpu.arch.endian() == .big)
             try fg.wip.bin(.lshr, loaded, try o.builder.intValue(
                 load_llvm_ty,
-                (payload_ty.abiSize(pt) - (std.math.divCeil(u64, payload_ty.bitSize(pt), 8) catch unreachable)) * 8,
+                (payload_ty.abiSize(zcu) - (std.math.divCeil(u64, payload_ty.bitSize(zcu), 8) catch unreachable)) * 8,
             ), "")
         else
             loaded;
@@ -10614,9 +10620,10 @@ pub const FuncGen = struct {
         const o = fg.ng.object;
         const pt = o.pt;
         //const pointee_llvm_ty = try o.lowerType(pointee_type);
-        const result_align = InternPool.Alignment.fromLlvm(ptr_alignment).max(pointee_type.abiAlignment(pt)).toLlvm();
+        const result_align = InternPool.Alignment.fromLlvm(ptr_alignment)
+            .max(pointee_type.abiAlignment(pt.zcu)).toLlvm();
         const result_ptr = try fg.buildAllocaWorkaround(pointee_type, result_align);
-        const size_bytes = pointee_type.abiSize(pt);
+        const size_bytes = pointee_type.abiSize(pt.zcu);
         _ = try fg.wip.callMemCpy(
             result_ptr,
             result_align,
@@ -10634,15 +10641,15 @@ pub const FuncGen = struct {
     fn load(self: *FuncGen, ptr: Builder.Value, ptr_ty: Type) !Builder.Value {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
-        const info = ptr_ty.ptrInfo(mod);
+        const zcu = pt.zcu;
+        const info = ptr_ty.ptrInfo(zcu);
         const elem_ty = Type.fromInterned(info.child);
-        if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none;
+        if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) return .none;
 
         const ptr_alignment = (if (info.flags.alignment != .none)
             @as(InternPool.Alignment, info.flags.alignment)
         else
-            elem_ty.abiAlignment(pt)).toLlvm();
+            elem_ty.abiAlignment(zcu)).toLlvm();
 
         const access_kind: Builder.MemoryAccessKind =
             if (info.flags.is_volatile) .@"volatile" else .normal;
@@ -10658,7 +10665,7 @@ pub const FuncGen = struct {
         }
 
         if (info.packed_offset.host_size == 0) {
-            if (isByRef(elem_ty, pt)) {
+            if (isByRef(elem_ty, zcu)) {
                 return self.loadByRef(ptr, elem_ty, ptr_alignment, access_kind);
             }
             return self.loadTruncate(access_kind, elem_ty, ptr, ptr_alignment);
@@ -10668,13 +10675,13 @@ pub const FuncGen = struct {
         const containing_int =
             try self.wip.load(access_kind, containing_int_ty, ptr, ptr_alignment, "");
 
-        const elem_bits = ptr_ty.childType(mod).bitSize(pt);
+        const elem_bits = ptr_ty.childType(zcu).bitSize(zcu);
         const shift_amt = try o.builder.intValue(containing_int_ty, info.packed_offset.bit_offset);
         const shifted_value = try self.wip.bin(.lshr, containing_int, shift_amt, "");
         const elem_llvm_ty = try o.lowerType(elem_ty);
 
-        if (isByRef(elem_ty, pt)) {
-            const result_align = elem_ty.abiAlignment(pt).toLlvm();
+        if (isByRef(elem_ty, zcu)) {
+            const result_align = elem_ty.abiAlignment(zcu).toLlvm();
             const result_ptr = try self.buildAllocaWorkaround(elem_ty, result_align);
 
             const same_size_int = try o.builder.intType(@intCast(elem_bits));
@@ -10683,13 +10690,13 @@ pub const FuncGen = struct {
             return result_ptr;
         }
 
-        if (elem_ty.zigTypeTag(mod) == .Float or elem_ty.zigTypeTag(mod) == .Vector) {
+        if (elem_ty.zigTypeTag(zcu) == .Float or elem_ty.zigTypeTag(zcu) == .Vector) {
             const same_size_int = try o.builder.intType(@intCast(elem_bits));
             const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, "");
             return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, "");
         }
 
-        if (elem_ty.isPtrAtRuntime(mod)) {
+        if (elem_ty.isPtrAtRuntime(zcu)) {
             const same_size_int = try o.builder.intType(@intCast(elem_bits));
             const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, "");
             return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, "");
@@ -10707,13 +10714,13 @@ pub const FuncGen = struct {
     ) !void {
         const o = self.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
-        const info = ptr_ty.ptrInfo(mod);
+        const zcu = pt.zcu;
+        const info = ptr_ty.ptrInfo(zcu);
         const elem_ty = Type.fromInterned(info.child);
-        if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
+        if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
             return;
         }
-        const ptr_alignment = ptr_ty.ptrAlignment(pt).toLlvm();
+        const ptr_alignment = ptr_ty.ptrAlignment(zcu).toLlvm();
         const access_kind: Builder.MemoryAccessKind =
             if (info.flags.is_volatile) .@"volatile" else .normal;
 
@@ -10737,12 +10744,12 @@ pub const FuncGen = struct {
             assert(ordering == .none);
             const containing_int =
                 try self.wip.load(access_kind, containing_int_ty, ptr, ptr_alignment, "");
-            const elem_bits = ptr_ty.childType(mod).bitSize(pt);
+            const elem_bits = ptr_ty.childType(zcu).bitSize(zcu);
             const shift_amt = try o.builder.intConst(containing_int_ty, info.packed_offset.bit_offset);
             // Convert to equally-sized integer type in order to perform the bit
             // operations on the value to store
             const value_bits_type = try o.builder.intType(@intCast(elem_bits));
-            const value_bits = if (elem_ty.isPtrAtRuntime(mod))
+            const value_bits = if (elem_ty.isPtrAtRuntime(zcu))
                 try self.wip.cast(.ptrtoint, elem, value_bits_type, "")
             else
                 try self.wip.cast(.bitcast, elem, value_bits_type, "");
@@ -10772,7 +10779,7 @@ pub const FuncGen = struct {
             _ = try self.wip.store(access_kind, ored_value, ptr, ptr_alignment);
             return;
         }
-        if (!isByRef(elem_ty, pt)) {
+        if (!isByRef(elem_ty, zcu)) {
             _ = try self.wip.storeAtomic(
                 access_kind,
                 elem,
@@ -10788,8 +10795,8 @@ pub const FuncGen = struct {
             ptr,
             ptr_alignment,
             elem,
-            elem_ty.abiAlignment(pt).toLlvm(),
-            try o.builder.intValue(try o.lowerType(Type.usize), elem_ty.abiSize(pt)),
+            elem_ty.abiAlignment(zcu).toLlvm(),
+            try o.builder.intValue(try o.lowerType(Type.usize), elem_ty.abiSize(zcu)),
             access_kind,
         );
     }
@@ -10816,12 +10823,12 @@ pub const FuncGen = struct {
     ) Allocator.Error!Builder.Value {
         const o = fg.ng.object;
         const pt = o.pt;
-        const mod = pt.zcu;
-        const target = mod.getTarget();
+        const zcu = pt.zcu;
+        const target = zcu.getTarget();
         if (!target_util.hasValgrindSupport(target)) return default_value;
 
         const llvm_usize = try o.lowerType(Type.usize);
-        const usize_alignment = Type.usize.abiAlignment(pt).toLlvm();
+        const usize_alignment = Type.usize.abiAlignment(zcu).toLlvm();
 
         const array_llvm_ty = try o.builder.arrayType(6, llvm_usize);
         const array_ptr = if (fg.valgrind_client_request_array == .none) a: {
@@ -10882,14 +10889,14 @@ pub const FuncGen = struct {
 
     fn typeOf(fg: *FuncGen, inst: Air.Inst.Ref) Type {
         const o = fg.ng.object;
-        const mod = o.pt.zcu;
-        return fg.air.typeOf(inst, &mod.intern_pool);
+        const zcu = o.pt.zcu;
+        return fg.air.typeOf(inst, &zcu.intern_pool);
     }
 
     fn typeOfIndex(fg: *FuncGen, inst: Air.Inst.Index) Type {
         const o = fg.ng.object;
-        const mod = o.pt.zcu;
-        return fg.air.typeOfIndex(inst, &mod.intern_pool);
+        const zcu = o.pt.zcu;
+        return fg.air.typeOfIndex(inst, &zcu.intern_pool);
     }
 };
 
@@ -11059,12 +11066,12 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ
     };
 }
 
-fn returnTypeByRef(pt: Zcu.PerThread, target: std.Target, ty: Type) bool {
-    if (isByRef(ty, pt)) {
+fn returnTypeByRef(zcu: *Zcu, target: std.Target, ty: Type) bool {
+    if (isByRef(ty, zcu)) {
         return true;
     } else if (target.cpu.arch.isX86() and
         !std.Target.x86.featureSetHas(target.cpu.features, .evex512) and
-        ty.totalVectorBits(pt) >= 512)
+        ty.totalVectorBits(zcu) >= 512)
     {
         // As of LLVM 18, passing a vector byval with fastcc that is 512 bits or more returns
         // "512-bit vector arguments require 'evex512' for AVX512"
@@ -11074,38 +11081,38 @@ fn returnTypeByRef(pt: Zcu.PerThread, target: std.Target, ty: Type) bool {
     }
 }
 
-fn firstParamSRet(fn_info: InternPool.Key.FuncType, pt: Zcu.PerThread, target: std.Target) bool {
+fn firstParamSRet(fn_info: InternPool.Key.FuncType, zcu: *Zcu, target: std.Target) bool {
     const return_type = Type.fromInterned(fn_info.return_type);
-    if (!return_type.hasRuntimeBitsIgnoreComptime(pt)) return false;
+    if (!return_type.hasRuntimeBitsIgnoreComptime(zcu)) return false;
 
     return switch (fn_info.cc) {
-        .Unspecified, .Inline => returnTypeByRef(pt, target, return_type),
+        .Unspecified, .Inline => returnTypeByRef(zcu, target, return_type),
         .C => switch (target.cpu.arch) {
             .mips, .mipsel => false,
-            .x86 => isByRef(return_type, pt),
+            .x86 => isByRef(return_type, zcu),
             .x86_64 => switch (target.os.tag) {
-                .windows => x86_64_abi.classifyWindows(return_type, pt) == .memory,
-                else => firstParamSRetSystemV(return_type, pt, target),
+                .windows => x86_64_abi.classifyWindows(return_type, zcu) == .memory,
+                else => firstParamSRetSystemV(return_type, zcu, target),
             },
-            .wasm32 => wasm_c_abi.classifyType(return_type, pt)[0] == .indirect,
-            .aarch64, .aarch64_be => aarch64_c_abi.classifyType(return_type, pt) == .memory,
-            .arm, .armeb => switch (arm_c_abi.classifyType(return_type, pt, .ret)) {
+            .wasm32 => wasm_c_abi.classifyType(return_type, zcu)[0] == .indirect,
+            .aarch64, .aarch64_be => aarch64_c_abi.classifyType(return_type, zcu) == .memory,
+            .arm, .armeb => switch (arm_c_abi.classifyType(return_type, zcu, .ret)) {
                 .memory, .i64_array => true,
                 .i32_array => |size| size != 1,
                 .byval => false,
             },
-            .riscv32, .riscv64 => riscv_c_abi.classifyType(return_type, pt) == .memory,
+            .riscv32, .riscv64 => riscv_c_abi.classifyType(return_type, zcu) == .memory,
             else => false, // TODO investigate C ABI for other architectures
         },
-        .SysV => firstParamSRetSystemV(return_type, pt, target),
-        .Win64 => x86_64_abi.classifyWindows(return_type, pt) == .memory,
-        .Stdcall => !isScalar(pt.zcu, return_type),
+        .SysV => firstParamSRetSystemV(return_type, zcu, target),
+        .Win64 => x86_64_abi.classifyWindows(return_type, zcu) == .memory,
+        .Stdcall => !isScalar(zcu, return_type),
         else => false,
     };
 }
 
-fn firstParamSRetSystemV(ty: Type, pt: Zcu.PerThread, target: std.Target) bool {
-    const class = x86_64_abi.classifySystemV(ty, pt, target, .ret);
+fn firstParamSRetSystemV(ty: Type, zcu: *Zcu, target: std.Target) bool {
+    const class = x86_64_abi.classifySystemV(ty, zcu, target, .ret);
     if (class[0] == .memory) return true;
     if (class[0] == .x87 and class[2] != .none) return true;
     return false;
@@ -11116,62 +11123,62 @@ fn firstParamSRetSystemV(ty: Type, pt: Zcu.PerThread, target: std.Target) bool {
 /// be effectively bitcasted to the actual return type.
 fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
     const pt = o.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const return_type = Type.fromInterned(fn_info.return_type);
-    if (!return_type.hasRuntimeBitsIgnoreComptime(pt)) {
+    if (!return_type.hasRuntimeBitsIgnoreComptime(zcu)) {
         // If the return type is an error set or an error union, then we make this
         // anyerror return type instead, so that it can be coerced into a function
         // pointer type which has anyerror as the return type.
-        return if (return_type.isError(mod)) try o.errorIntType() else .void;
+        return if (return_type.isError(zcu)) try o.errorIntType() else .void;
     }
-    const target = mod.getTarget();
+    const target = zcu.getTarget();
     switch (fn_info.cc) {
         .Unspecified,
         .Inline,
-        => return if (returnTypeByRef(pt, target, return_type)) .void else o.lowerType(return_type),
+        => return if (returnTypeByRef(zcu, target, return_type)) .void else o.lowerType(return_type),
 
         .C => {
             switch (target.cpu.arch) {
                 .mips, .mipsel => return o.lowerType(return_type),
-                .x86 => return if (isByRef(return_type, pt)) .void else o.lowerType(return_type),
+                .x86 => return if (isByRef(return_type, zcu)) .void else o.lowerType(return_type),
                 .x86_64 => switch (target.os.tag) {
                     .windows => return lowerWin64FnRetTy(o, fn_info),
                     else => return lowerSystemVFnRetTy(o, fn_info),
                 },
                 .wasm32 => {
-                    if (isScalar(mod, return_type)) {
+                    if (isScalar(zcu, return_type)) {
                         return o.lowerType(return_type);
                     }
-                    const classes = wasm_c_abi.classifyType(return_type, pt);
+                    const classes = wasm_c_abi.classifyType(return_type, zcu);
                     if (classes[0] == .indirect or classes[0] == .none) {
                         return .void;
                     }
 
                     assert(classes[0] == .direct and classes[1] == .none);
-                    const scalar_type = wasm_c_abi.scalarType(return_type, pt);
-                    return o.builder.intType(@intCast(scalar_type.abiSize(pt) * 8));
+                    const scalar_type = wasm_c_abi.scalarType(return_type, zcu);
+                    return o.builder.intType(@intCast(scalar_type.abiSize(zcu) * 8));
                 },
                 .aarch64, .aarch64_be => {
-                    switch (aarch64_c_abi.classifyType(return_type, pt)) {
+                    switch (aarch64_c_abi.classifyType(return_type, zcu)) {
                         .memory => return .void,
                         .float_array => return o.lowerType(return_type),
                         .byval => return o.lowerType(return_type),
-                        .integer => return o.builder.intType(@intCast(return_type.bitSize(pt))),
+                        .integer => return o.builder.intType(@intCast(return_type.bitSize(zcu))),
                         .double_integer => return o.builder.arrayType(2, .i64),
                     }
                 },
                 .arm, .armeb => {
-                    switch (arm_c_abi.classifyType(return_type, pt, .ret)) {
+                    switch (arm_c_abi.classifyType(return_type, zcu, .ret)) {
                         .memory, .i64_array => return .void,
                         .i32_array => |len| return if (len == 1) .i32 else .void,
                         .byval => return o.lowerType(return_type),
                     }
                 },
                 .riscv32, .riscv64 => {
-                    switch (riscv_c_abi.classifyType(return_type, pt)) {
+                    switch (riscv_c_abi.classifyType(return_type, zcu)) {
                         .memory => return .void,
                         .integer => {
-                            return o.builder.intType(@intCast(return_type.bitSize(pt)));
+                            return o.builder.intType(@intCast(return_type.bitSize(zcu)));
                         },
                         .double_integer => {
                             return o.builder.structType(.normal, &.{ .i64, .i64 });
@@ -11180,9 +11187,9 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu
                         .fields => {
                             var types_len: usize = 0;
                             var types: [8]Builder.Type = undefined;
-                            for (0..return_type.structFieldCount(mod)) |field_index| {
-                                const field_ty = return_type.structFieldType(field_index, mod);
-                                if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+                            for (0..return_type.structFieldCount(zcu)) |field_index| {
+                                const field_ty = return_type.structFieldType(field_index, zcu);
+                                if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
                                 types[types_len] = try o.lowerType(field_ty);
                                 types_len += 1;
                             }
@@ -11196,20 +11203,20 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu
         },
         .Win64 => return lowerWin64FnRetTy(o, fn_info),
         .SysV => return lowerSystemVFnRetTy(o, fn_info),
-        .Stdcall => return if (isScalar(mod, return_type)) o.lowerType(return_type) else .void,
+        .Stdcall => return if (isScalar(zcu, return_type)) o.lowerType(return_type) else .void,
         else => return o.lowerType(return_type),
     }
 }
 
 fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
-    const pt = o.pt;
+    const zcu = o.pt.zcu;
     const return_type = Type.fromInterned(fn_info.return_type);
-    switch (x86_64_abi.classifyWindows(return_type, pt)) {
+    switch (x86_64_abi.classifyWindows(return_type, zcu)) {
         .integer => {
-            if (isScalar(pt.zcu, return_type)) {
+            if (isScalar(zcu, return_type)) {
                 return o.lowerType(return_type);
             } else {
-                return o.builder.intType(@intCast(return_type.abiSize(pt) * 8));
+                return o.builder.intType(@intCast(return_type.abiSize(zcu) * 8));
             }
         },
         .win_i128 => return o.builder.vectorType(.normal, 2, .i64),
@@ -11221,14 +11228,14 @@ fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Err
 
 fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
     const pt = o.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const return_type = Type.fromInterned(fn_info.return_type);
-    if (isScalar(mod, return_type)) {
+    if (isScalar(zcu, return_type)) {
         return o.lowerType(return_type);
     }
-    const target = mod.getTarget();
-    const classes = x86_64_abi.classifySystemV(return_type, pt, target, .ret);
+    const target = zcu.getTarget();
+    const classes = x86_64_abi.classifySystemV(return_type, zcu, target, .ret);
     if (classes[0] == .memory) return .void;
     var types_index: u32 = 0;
     var types_buffer: [8]Builder.Type = undefined;
@@ -11345,7 +11352,7 @@ const ParamTypeIterator = struct {
         const zcu = pt.zcu;
         const target = zcu.getTarget();
 
-        if (!ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             it.zig_index += 1;
             return .no_bits;
         }
@@ -11358,11 +11365,11 @@ const ParamTypeIterator = struct {
                 {
                     it.llvm_index += 1;
                     return .slice;
-                } else if (isByRef(ty, pt)) {
+                } else if (isByRef(ty, zcu)) {
                     return .byref;
                 } else if (target.cpu.arch.isX86() and
                     !std.Target.x86.featureSetHas(target.cpu.features, .evex512) and
-                    ty.totalVectorBits(pt) >= 512)
+                    ty.totalVectorBits(zcu) >= 512)
                 {
                     // As of LLVM 18, passing a vector byval with fastcc that is 512 bits or more returns
                     // "512-bit vector arguments require 'evex512' for AVX512"
@@ -11390,7 +11397,7 @@ const ParamTypeIterator = struct {
                     if (isScalar(zcu, ty)) {
                         return .byval;
                     }
-                    const classes = wasm_c_abi.classifyType(ty, pt);
+                    const classes = wasm_c_abi.classifyType(ty, zcu);
                     if (classes[0] == .indirect) {
                         return .byref;
                     }
@@ -11399,7 +11406,7 @@ const ParamTypeIterator = struct {
                 .aarch64, .aarch64_be => {
                     it.zig_index += 1;
                     it.llvm_index += 1;
-                    switch (aarch64_c_abi.classifyType(ty, pt)) {
+                    switch (aarch64_c_abi.classifyType(ty, zcu)) {
                         .memory => return .byref_mut,
                         .float_array => |len| return Lowering{ .float_array = len },
                         .byval => return .byval,
@@ -11414,7 +11421,7 @@ const ParamTypeIterator = struct {
                 .arm, .armeb => {
                     it.zig_index += 1;
                     it.llvm_index += 1;
-                    switch (arm_c_abi.classifyType(ty, pt, .arg)) {
+                    switch (arm_c_abi.classifyType(ty, zcu, .arg)) {
                         .memory => {
                             it.byval_attr = true;
                             return .byref;
@@ -11429,7 +11436,7 @@ const ParamTypeIterator = struct {
                     it.llvm_index += 1;
                     if (ty.toIntern() == .f16_type and
                         !std.Target.riscv.featureSetHas(target.cpu.features, .d)) return .as_u16;
-                    switch (riscv_c_abi.classifyType(ty, pt)) {
+                    switch (riscv_c_abi.classifyType(ty, zcu)) {
                         .memory => return .byref_mut,
                         .byval => return .byval,
                         .integer => return .abi_sized_int,
@@ -11438,7 +11445,7 @@ const ParamTypeIterator = struct {
                             it.types_len = 0;
                             for (0..ty.structFieldCount(zcu)) |field_index| {
                                 const field_ty = ty.structFieldType(field_index, zcu);
-                                if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+                                if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
                                 it.types_buffer[it.types_len] = try it.object.lowerType(field_ty);
                                 it.types_len += 1;
                             }
@@ -11476,10 +11483,10 @@ const ParamTypeIterator = struct {
     }
 
     fn nextWin64(it: *ParamTypeIterator, ty: Type) ?Lowering {
-        const pt = it.object.pt;
-        switch (x86_64_abi.classifyWindows(ty, pt)) {
+        const zcu = it.object.pt.zcu;
+        switch (x86_64_abi.classifyWindows(ty, zcu)) {
             .integer => {
-                if (isScalar(pt.zcu, ty)) {
+                if (isScalar(zcu, ty)) {
                     it.zig_index += 1;
                     it.llvm_index += 1;
                     return .byval;
@@ -11509,17 +11516,17 @@ const ParamTypeIterator = struct {
     }
 
     fn nextSystemV(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering {
-        const pt = it.object.pt;
-        const ip = &pt.zcu.intern_pool;
-        const target = pt.zcu.getTarget();
-        const classes = x86_64_abi.classifySystemV(ty, pt, target, .arg);
+        const zcu = it.object.pt.zcu;
+        const ip = &zcu.intern_pool;
+        const target = zcu.getTarget();
+        const classes = x86_64_abi.classifySystemV(ty, zcu, target, .arg);
         if (classes[0] == .memory) {
             it.zig_index += 1;
             it.llvm_index += 1;
             it.byval_attr = true;
             return .byref;
         }
-        if (isScalar(pt.zcu, ty)) {
+        if (isScalar(zcu, ty)) {
             it.zig_index += 1;
             it.llvm_index += 1;
             return .byval;
@@ -11620,17 +11627,17 @@ fn iterateParamTypes(object: *Object, fn_info: InternPool.Key.FuncType) ParamTyp
 
 fn ccAbiPromoteInt(
     cc: std.builtin.CallingConvention,
-    mod: *Zcu,
+    zcu: *Zcu,
     ty: Type,
 ) ?std.builtin.Signedness {
-    const target = mod.getTarget();
+    const target = zcu.getTarget();
     switch (cc) {
         .Unspecified, .Inline, .Async => return null,
         else => {},
     }
-    const int_info = switch (ty.zigTypeTag(mod)) {
-        .Bool => Type.u1.intInfo(mod),
-        .Int, .Enum, .ErrorSet => ty.intInfo(mod),
+    const int_info = switch (ty.zigTypeTag(zcu)) {
+        .Bool => Type.u1.intInfo(zcu),
+        .Int, .Enum, .ErrorSet => ty.intInfo(zcu),
         else => return null,
     };
     return switch (target.os.tag) {
@@ -11668,13 +11675,13 @@ fn ccAbiPromoteInt(
 
 /// This is the one source of truth for whether a type is passed around as an LLVM pointer,
 /// or as an LLVM value.
-fn isByRef(ty: Type, pt: Zcu.PerThread) bool {
+fn isByRef(ty: Type, zcu: *Zcu) bool {
     // For tuples and structs, if there are more than this many non-void
     // fields, then we make it byref, otherwise byval.
     const max_fields_byval = 0;
-    const ip = &pt.zcu.intern_pool;
+    const ip = &zcu.intern_pool;
 
-    switch (ty.zigTypeTag(pt.zcu)) {
+    switch (ty.zigTypeTag(zcu)) {
         .Type,
         .ComptimeInt,
         .ComptimeFloat,
@@ -11697,17 +11704,17 @@ fn isByRef(ty: Type, pt: Zcu.PerThread) bool {
         .AnyFrame,
         => return false,
 
-        .Array, .Frame => return ty.hasRuntimeBits(pt),
+        .Array, .Frame => return ty.hasRuntimeBits(zcu),
         .Struct => {
             const struct_type = switch (ip.indexToKey(ty.toIntern())) {
                 .anon_struct_type => |tuple| {
                     var count: usize = 0;
                     for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| {
-                        if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
+                        if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
 
                         count += 1;
                         if (count > max_fields_byval) return true;
-                        if (isByRef(Type.fromInterned(field_ty), pt)) return true;
+                        if (isByRef(Type.fromInterned(field_ty), zcu)) return true;
                     }
                     return false;
                 },
@@ -11725,27 +11732,27 @@ fn isByRef(ty: Type, pt: Zcu.PerThread) bool {
                 count += 1;
                 if (count > max_fields_byval) return true;
                 const field_ty = Type.fromInterned(field_types[field_index]);
-                if (isByRef(field_ty, pt)) return true;
+                if (isByRef(field_ty, zcu)) return true;
             }
             return false;
         },
-        .Union => switch (ty.containerLayout(pt.zcu)) {
+        .Union => switch (ty.containerLayout(zcu)) {
             .@"packed" => return false,
-            else => return ty.hasRuntimeBits(pt),
+            else => return ty.hasRuntimeBits(zcu),
         },
         .ErrorUnion => {
-            const payload_ty = ty.errorUnionPayload(pt.zcu);
-            if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+            const payload_ty = ty.errorUnionPayload(zcu);
+            if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                 return false;
             }
             return true;
         },
         .Optional => {
-            const payload_ty = ty.optionalChild(pt.zcu);
-            if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+            const payload_ty = ty.optionalChild(zcu);
+            if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                 return false;
             }
-            if (ty.optionalReprIsPayload(pt.zcu)) {
+            if (ty.optionalReprIsPayload(zcu)) {
                 return false;
             }
             return true;
@@ -11753,8 +11760,8 @@ fn isByRef(ty: Type, pt: Zcu.PerThread) bool {
     }
 }
 
-fn isScalar(mod: *Zcu, ty: Type) bool {
-    return switch (ty.zigTypeTag(mod)) {
+fn isScalar(zcu: *Zcu, ty: Type) bool {
+    return switch (ty.zigTypeTag(zcu)) {
         .Void,
         .Bool,
         .NoReturn,
@@ -11768,8 +11775,8 @@ fn isScalar(mod: *Zcu, ty: Type) bool {
         .Vector,
         => true,
 
-        .Struct => ty.containerLayout(mod) == .@"packed",
-        .Union => ty.containerLayout(mod) == .@"packed",
+        .Struct => ty.containerLayout(zcu) == .@"packed",
+        .Union => ty.containerLayout(zcu) == .@"packed",
         else => false,
     };
 }
@@ -11892,13 +11899,15 @@ fn buildAllocaInner(
 }
 
 fn errUnionPayloadOffset(payload_ty: Type, pt: Zcu.PerThread) !u1 {
+    const zcu = pt.zcu;
     const err_int_ty = try pt.errorIntType();
-    return @intFromBool(err_int_ty.abiAlignment(pt).compare(.gt, payload_ty.abiAlignment(pt)));
+    return @intFromBool(err_int_ty.abiAlignment(zcu).compare(.gt, payload_ty.abiAlignment(zcu)));
 }
 
 fn errUnionErrorOffset(payload_ty: Type, pt: Zcu.PerThread) !u1 {
+    const zcu = pt.zcu;
     const err_int_ty = try pt.errorIntType();
-    return @intFromBool(err_int_ty.abiAlignment(pt).compare(.lte, payload_ty.abiAlignment(pt)));
+    return @intFromBool(err_int_ty.abiAlignment(zcu).compare(.lte, payload_ty.abiAlignment(zcu)));
 }
 
 /// Returns true for asm constraint (e.g. "=*m", "=r") if it accepts a memory location
src/codegen/spirv.zig
@@ -436,16 +436,16 @@ const NavGen = struct {
     /// Fetch the result-id for a previously generated instruction or constant.
     fn resolve(self: *NavGen, inst: Air.Inst.Ref) !IdRef {
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         if (try self.air.value(inst, pt)) |val| {
             const ty = self.typeOf(inst);
-            if (ty.zigTypeTag(mod) == .Fn) {
-                const fn_nav = switch (mod.intern_pool.indexToKey(val.ip_index)) {
+            if (ty.zigTypeTag(zcu) == .Fn) {
+                const fn_nav = switch (zcu.intern_pool.indexToKey(val.ip_index)) {
                     .@"extern" => |@"extern"| @"extern".owner_nav,
                     .func => |func| func.owner_nav,
                     else => unreachable,
                 };
-                const spv_decl_index = try self.object.resolveNav(mod, fn_nav);
+                const spv_decl_index = try self.object.resolveNav(zcu, fn_nav);
                 try self.func.decl_deps.put(self.spv.gpa, spv_decl_index, {});
                 return self.spv.declPtr(spv_decl_index).result_id;
             }
@@ -459,8 +459,8 @@ const NavGen = struct {
     fn resolveUav(self: *NavGen, val: InternPool.Index) !IdRef {
         // TODO: This cannot be a function at this point, but it should probably be handled anyway.
 
-        const mod = self.pt.zcu;
-        const ty = Type.fromInterned(mod.intern_pool.typeOf(val));
+        const zcu = self.pt.zcu;
+        const ty = Type.fromInterned(zcu.intern_pool.typeOf(val));
         const decl_ptr_ty_id = try self.ptrType(ty, .Generic);
 
         const spv_decl_index = blk: {
@@ -639,15 +639,15 @@ const NavGen = struct {
 
     /// Checks whether the type can be directly translated to SPIR-V vectors
     fn isSpvVector(self: *NavGen, ty: Type) bool {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const target = self.getTarget();
-        if (ty.zigTypeTag(mod) != .Vector) return false;
+        if (ty.zigTypeTag(zcu) != .Vector) return false;
 
         // TODO: This check must be expanded for types that can be represented
         // as integers (enums / packed structs?) and types that are represented
         // by multiple SPIR-V values.
-        const scalar_ty = ty.scalarType(mod);
-        switch (scalar_ty.zigTypeTag(mod)) {
+        const scalar_ty = ty.scalarType(zcu);
+        switch (scalar_ty.zigTypeTag(zcu)) {
             .Bool,
             .Int,
             .Float,
@@ -655,24 +655,24 @@ const NavGen = struct {
             else => return false,
         }
 
-        const elem_ty = ty.childType(mod);
+        const elem_ty = ty.childType(zcu);
 
-        const len = ty.vectorLen(mod);
-        const is_scalar = elem_ty.isNumeric(mod) or elem_ty.toIntern() == .bool_type;
+        const len = ty.vectorLen(zcu);
+        const is_scalar = elem_ty.isNumeric(zcu) or elem_ty.toIntern() == .bool_type;
         const spirv_len = len > 1 and len <= 4;
         const opencl_len = if (target.os.tag == .opencl) (len == 8 or len == 16) else false;
         return is_scalar and (spirv_len or opencl_len);
     }
 
     fn arithmeticTypeInfo(self: *NavGen, ty: Type) ArithmeticTypeInfo {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const target = self.getTarget();
-        var scalar_ty = ty.scalarType(mod);
-        if (scalar_ty.zigTypeTag(mod) == .Enum) {
-            scalar_ty = scalar_ty.intTagType(mod);
+        var scalar_ty = ty.scalarType(zcu);
+        if (scalar_ty.zigTypeTag(zcu) == .Enum) {
+            scalar_ty = scalar_ty.intTagType(zcu);
         }
-        const vector_len = if (ty.isVector(mod)) ty.vectorLen(mod) else null;
-        return switch (scalar_ty.zigTypeTag(mod)) {
+        const vector_len = if (ty.isVector(zcu)) ty.vectorLen(zcu) else null;
+        return switch (scalar_ty.zigTypeTag(zcu)) {
             .Bool => ArithmeticTypeInfo{
                 .bits = 1, // Doesn't matter for this class.
                 .backing_bits = self.backingIntBits(1).?,
@@ -688,7 +688,7 @@ const NavGen = struct {
                 .class = .float,
             },
             .Int => blk: {
-                const int_info = scalar_ty.intInfo(mod);
+                const int_info = scalar_ty.intInfo(zcu);
                 // TODO: Maybe it's useful to also return this value.
                 const maybe_backing_bits = self.backingIntBits(int_info.bits);
                 break :blk ArithmeticTypeInfo{
@@ -741,9 +741,9 @@ const NavGen = struct {
     /// the value to an unsigned int first for Kernels.
     fn constInt(self: *NavGen, ty: Type, value: anytype, repr: Repr) !IdRef {
         // TODO: Cache?
-        const mod = self.pt.zcu;
-        const scalar_ty = ty.scalarType(mod);
-        const int_info = scalar_ty.intInfo(mod);
+        const zcu = self.pt.zcu;
+        const scalar_ty = ty.scalarType(zcu);
+        const int_info = scalar_ty.intInfo(zcu);
         // Use backing bits so that negatives are sign extended
         const backing_bits = self.backingIntBits(int_info.bits).?; // Assertion failure means big int
 
@@ -783,11 +783,11 @@ const NavGen = struct {
             else => unreachable, // TODO: Large integer constants
         }
 
-        if (!ty.isVector(mod)) {
+        if (!ty.isVector(zcu)) {
             return result_id;
         }
 
-        const n = ty.vectorLen(mod);
+        const n = ty.vectorLen(zcu);
         const ids = try self.gpa.alloc(IdRef, n);
         defer self.gpa.free(ids);
         @memset(ids, result_id);
@@ -821,8 +821,8 @@ const NavGen = struct {
     /// Construct a vector at runtime.
     /// ty must be an vector type.
     fn constructVector(self: *NavGen, ty: Type, constituents: []const IdRef) !IdRef {
-        const mod = self.pt.zcu;
-        assert(ty.vectorLen(mod) == constituents.len);
+        const zcu = self.pt.zcu;
+        assert(ty.vectorLen(zcu) == constituents.len);
 
         // Note: older versions of the Khronos SPRIV-LLVM translator crash on this instruction
         // because it cannot construct structs which' operands are not constant.
@@ -845,8 +845,8 @@ const NavGen = struct {
     /// Construct a vector at runtime with all lanes set to the same value.
     /// ty must be an vector type.
     fn constructVectorSplat(self: *NavGen, ty: Type, constituent: IdRef) !IdRef {
-        const mod = self.pt.zcu;
-        const n = ty.vectorLen(mod);
+        const zcu = self.pt.zcu;
+        const n = ty.vectorLen(zcu);
 
         const constituents = try self.gpa.alloc(IdRef, n);
         defer self.gpa.free(constituents);
@@ -884,13 +884,13 @@ const NavGen = struct {
         }
 
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const target = self.getTarget();
         const result_ty_id = try self.resolveType(ty, repr);
-        const ip = &mod.intern_pool;
+        const ip = &zcu.intern_pool;
 
         log.debug("lowering constant: ty = {}, val = {}", .{ ty.fmt(pt), val.fmtValue(pt) });
-        if (val.isUndefDeep(mod)) {
+        if (val.isUndefDeep(zcu)) {
             return self.spv.constUndef(result_ty_id);
         }
 
@@ -937,17 +937,17 @@ const NavGen = struct {
                     .false, .true => break :cache try self.constBool(val.toBool(), repr),
                 },
                 .int => {
-                    if (ty.isSignedInt(mod)) {
-                        break :cache try self.constInt(ty, val.toSignedInt(pt), repr);
+                    if (ty.isSignedInt(zcu)) {
+                        break :cache try self.constInt(ty, val.toSignedInt(zcu), repr);
                     } else {
-                        break :cache try self.constInt(ty, val.toUnsignedInt(pt), repr);
+                        break :cache try self.constInt(ty, val.toUnsignedInt(zcu), repr);
                     }
                 },
                 .float => {
                     const lit: spec.LiteralContextDependentNumber = switch (ty.floatBits(target)) {
-                        16 => .{ .uint32 = @as(u16, @bitCast(val.toFloat(f16, pt))) },
-                        32 => .{ .float32 = val.toFloat(f32, pt) },
-                        64 => .{ .float64 = val.toFloat(f64, pt) },
+                        16 => .{ .uint32 = @as(u16, @bitCast(val.toFloat(f16, zcu))) },
+                        32 => .{ .float32 = val.toFloat(f32, zcu) },
+                        64 => .{ .float64 = val.toFloat(f64, zcu) },
                         80, 128 => unreachable, // TODO
                         else => unreachable,
                     };
@@ -968,17 +968,17 @@ const NavGen = struct {
                     // allows it. For now, just generate it here regardless.
                     const err_int_ty = try pt.errorIntType();
                     const err_ty = switch (error_union.val) {
-                        .err_name => ty.errorUnionSet(mod),
+                        .err_name => ty.errorUnionSet(zcu),
                         .payload => err_int_ty,
                     };
                     const err_val = switch (error_union.val) {
                         .err_name => |err_name| Value.fromInterned(try pt.intern(.{ .err = .{
-                            .ty = ty.errorUnionSet(mod).toIntern(),
+                            .ty = ty.errorUnionSet(zcu).toIntern(),
                             .name = err_name,
                         } })),
                         .payload => try pt.intValue(err_int_ty, 0),
                     };
-                    const payload_ty = ty.errorUnionPayload(mod);
+                    const payload_ty = ty.errorUnionPayload(zcu);
                     const eu_layout = self.errorUnionLayout(payload_ty);
                     if (!eu_layout.payload_has_bits) {
                         // We use the error type directly as the type.
@@ -1006,12 +1006,12 @@ const NavGen = struct {
                 },
                 .enum_tag => {
                     const int_val = try val.intFromEnum(ty, pt);
-                    const int_ty = ty.intTagType(mod);
+                    const int_ty = ty.intTagType(zcu);
                     break :cache try self.constant(int_ty, int_val, repr);
                 },
                 .ptr => return self.constantPtr(val),
                 .slice => |slice| {
-                    const ptr_ty = ty.slicePtrFieldType(mod);
+                    const ptr_ty = ty.slicePtrFieldType(zcu);
                     const ptr_id = try self.constantPtr(Value.fromInterned(slice.ptr));
                     const len_id = try self.constant(Type.usize, Value.fromInterned(slice.len), .indirect);
                     return self.constructStruct(
@@ -1021,12 +1021,12 @@ const NavGen = struct {
                     );
                 },
                 .opt => {
-                    const payload_ty = ty.optionalChild(mod);
-                    const maybe_payload_val = val.optionalValue(mod);
+                    const payload_ty = ty.optionalChild(zcu);
+                    const maybe_payload_val = val.optionalValue(zcu);
 
-                    if (!payload_ty.hasRuntimeBits(pt)) {
+                    if (!payload_ty.hasRuntimeBits(zcu)) {
                         break :cache try self.constBool(maybe_payload_val != null, .indirect);
-                    } else if (ty.optionalReprIsPayload(mod)) {
+                    } else if (ty.optionalReprIsPayload(zcu)) {
                         // Optional representation is a nullable pointer or slice.
                         if (maybe_payload_val) |payload_val| {
                             return try self.constant(payload_ty, payload_val, .indirect);
@@ -1054,7 +1054,7 @@ const NavGen = struct {
                     inline .array_type, .vector_type => |array_type, tag| {
                         const elem_ty = Type.fromInterned(array_type.child);
 
-                        const constituents = try self.gpa.alloc(IdRef, @intCast(ty.arrayLenIncludingSentinel(mod)));
+                        const constituents = try self.gpa.alloc(IdRef, @intCast(ty.arrayLenIncludingSentinel(zcu)));
                         defer self.gpa.free(constituents);
 
                         const child_repr: Repr = switch (tag) {
@@ -1088,7 +1088,7 @@ const NavGen = struct {
                         }
                     },
                     .struct_type => {
-                        const struct_type = mod.typeToStruct(ty).?;
+                        const struct_type = zcu.typeToStruct(ty).?;
                         if (struct_type.layout == .@"packed") {
                             return self.todo("packed struct constants", .{});
                         }
@@ -1102,7 +1102,7 @@ const NavGen = struct {
                         var it = struct_type.iterateRuntimeOrder(ip);
                         while (it.next()) |field_index| {
                             const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
-                            if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+                            if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                                 // This is a zero-bit field - we only needed it for the alignment.
                                 continue;
                             }
@@ -1121,10 +1121,10 @@ const NavGen = struct {
                     else => unreachable,
                 },
                 .un => |un| {
-                    const active_field = ty.unionTagFieldIndex(Value.fromInterned(un.tag), mod).?;
-                    const union_obj = mod.typeToUnion(ty).?;
+                    const active_field = ty.unionTagFieldIndex(Value.fromInterned(un.tag), zcu).?;
+                    const union_obj = zcu.typeToUnion(ty).?;
                     const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[active_field]);
-                    const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(pt))
+                    const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(zcu))
                         try self.constant(field_ty, Value.fromInterned(un.val), .direct)
                     else
                         null;
@@ -1232,8 +1232,8 @@ const NavGen = struct {
         // TODO: Merge this function with constantDeclRef.
 
         const pt = self.pt;
-        const mod = pt.zcu;
-        const ip = &mod.intern_pool;
+        const zcu = pt.zcu;
+        const ip = &zcu.intern_pool;
         const ty_id = try self.resolveType(ty, .direct);
         const uav_ty = Type.fromInterned(ip.typeOf(uav.val));
 
@@ -1243,14 +1243,14 @@ const NavGen = struct {
             else => {},
         }
 
-        // const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn;
-        if (!uav_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
+        // const is_fn_body = decl_ty.zigTypeTag(zcu) == .Fn;
+        if (!uav_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
             // Pointer to nothing - return undefined
             return self.spv.constUndef(ty_id);
         }
 
         // Uav refs are always generic.
-        assert(ty.ptrAddressSpace(mod) == .generic);
+        assert(ty.ptrAddressSpace(zcu) == .generic);
         const decl_ptr_ty_id = try self.ptrType(uav_ty, .Generic);
         const ptr_id = try self.resolveUav(uav.val);
 
@@ -1270,12 +1270,12 @@ const NavGen = struct {
 
     fn constantNavRef(self: *NavGen, ty: Type, nav_index: InternPool.Nav.Index) !IdRef {
         const pt = self.pt;
-        const mod = pt.zcu;
-        const ip = &mod.intern_pool;
+        const zcu = pt.zcu;
+        const ip = &zcu.intern_pool;
         const ty_id = try self.resolveType(ty, .direct);
         const nav = ip.getNav(nav_index);
-        const nav_val = mod.navValue(nav_index);
-        const nav_ty = nav_val.typeOf(mod);
+        const nav_val = zcu.navValue(nav_index);
+        const nav_ty = nav_val.typeOf(zcu);
 
         switch (ip.indexToKey(nav_val.toIntern())) {
             .func => {
@@ -1287,12 +1287,12 @@ const NavGen = struct {
             else => {},
         }
 
-        if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
+        if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
             // Pointer to nothing - return undefined.
             return self.spv.constUndef(ty_id);
         }
 
-        const spv_decl_index = try self.object.resolveNav(mod, nav_index);
+        const spv_decl_index = try self.object.resolveNav(zcu, nav_index);
         const spv_decl = self.spv.declPtr(spv_decl_index);
 
         const decl_id = switch (spv_decl.kind) {
@@ -1452,9 +1452,9 @@ const NavGen = struct {
     ///  }
     /// If any of the fields' size is 0, it will be omitted.
     fn resolveUnionType(self: *NavGen, ty: Type) !IdRef {
-        const mod = self.pt.zcu;
-        const ip = &mod.intern_pool;
-        const union_obj = mod.typeToUnion(ty).?;
+        const zcu = self.pt.zcu;
+        const ip = &zcu.intern_pool;
+        const union_obj = zcu.typeToUnion(ty).?;
 
         if (union_obj.flagsUnordered(ip).layout == .@"packed") {
             return self.todo("packed union types", .{});
@@ -1503,12 +1503,12 @@ const NavGen = struct {
     }
 
     fn resolveFnReturnType(self: *NavGen, ret_ty: Type) !IdRef {
-        const pt = self.pt;
-        if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        const zcu = self.pt.zcu;
+        if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             // If the return type is an error set or an error union, then we make this
             // anyerror return type instead, so that it can be coerced into a function
             // pointer type which has anyerror as the return type.
-            if (ret_ty.isError(pt.zcu)) {
+            if (ret_ty.isError(zcu)) {
                 return self.resolveType(Type.anyerror, .direct);
             } else {
                 return self.resolveType(Type.void, .direct);
@@ -1531,14 +1531,14 @@ const NavGen = struct {
 
     fn resolveTypeInner(self: *NavGen, ty: Type, repr: Repr) Error!IdRef {
         const pt = self.pt;
-        const mod = pt.zcu;
-        const ip = &mod.intern_pool;
+        const zcu = pt.zcu;
+        const ip = &zcu.intern_pool;
         log.debug("resolveType: ty = {}", .{ty.fmt(pt)});
         const target = self.getTarget();
 
         const section = &self.spv.sections.types_globals_constants;
 
-        switch (ty.zigTypeTag(mod)) {
+        switch (ty.zigTypeTag(zcu)) {
             .NoReturn => {
                 assert(repr == .direct);
                 return try self.spv.voidType();
@@ -1562,7 +1562,7 @@ const NavGen = struct {
                 .indirect => return try self.resolveType(Type.u1, .indirect),
             },
             .Int => {
-                const int_info = ty.intInfo(mod);
+                const int_info = ty.intInfo(zcu);
                 if (int_info.bits == 0) {
                     // Some times, the backend will be asked to generate a pointer to i0. OpTypeInt
                     // with 0 bits is invalid, so return an opaque type in this case.
@@ -1577,7 +1577,7 @@ const NavGen = struct {
                 return try self.intType(int_info.signedness, int_info.bits);
             },
             .Enum => {
-                const tag_ty = ty.intTagType(mod);
+                const tag_ty = ty.intTagType(zcu);
                 return try self.resolveType(tag_ty, repr);
             },
             .Float => {
@@ -1599,13 +1599,13 @@ const NavGen = struct {
                 return try self.spv.floatType(bits);
             },
             .Array => {
-                const elem_ty = ty.childType(mod);
+                const elem_ty = ty.childType(zcu);
                 const elem_ty_id = try self.resolveType(elem_ty, .indirect);
-                const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel(mod)) orelse {
-                    return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel(mod)});
+                const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel(zcu)) orelse {
+                    return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel(zcu)});
                 };
 
-                if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+                if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                     // The size of the array would be 0, but that is not allowed in SPIR-V.
                     // This path can be reached when the backend is asked to generate a pointer to
                     // an array of some zero-bit type. This should always be an indirect path.
@@ -1635,7 +1635,7 @@ const NavGen = struct {
             },
             .Fn => switch (repr) {
                 .direct => {
-                    const fn_info = mod.typeToFunc(ty).?;
+                    const fn_info = zcu.typeToFunc(ty).?;
 
                     comptime assert(zig_call_abi_ver == 3);
                     switch (fn_info.cc) {
@@ -1653,7 +1653,7 @@ const NavGen = struct {
                     var param_index: usize = 0;
                     for (fn_info.param_types.get(ip)) |param_ty_index| {
                         const param_ty = Type.fromInterned(param_ty_index);
-                        if (!param_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+                        if (!param_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
 
                         param_ty_ids[param_index] = try self.resolveType(param_ty, .direct);
                         param_index += 1;
@@ -1677,7 +1677,7 @@ const NavGen = struct {
                 },
             },
             .Pointer => {
-                const ptr_info = ty.ptrInfo(mod);
+                const ptr_info = ty.ptrInfo(zcu);
 
                 const storage_class = self.spvStorageClass(ptr_info.flags.address_space);
                 const ptr_ty_id = try self.ptrType(Type.fromInterned(ptr_info.child), storage_class);
@@ -1693,9 +1693,9 @@ const NavGen = struct {
                 );
             },
             .Vector => {
-                const elem_ty = ty.childType(mod);
+                const elem_ty = ty.childType(zcu);
                 const elem_ty_id = try self.resolveType(elem_ty, repr);
-                const len = ty.vectorLen(mod);
+                const len = ty.vectorLen(zcu);
 
                 if (self.isSpvVector(ty)) {
                     return try self.spv.vectorType(len, elem_ty_id);
@@ -1711,7 +1711,7 @@ const NavGen = struct {
 
                         var member_index: usize = 0;
                         for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| {
-                            if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
+                            if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
 
                             member_types[member_index] = try self.resolveType(Type.fromInterned(field_ty), .indirect);
                             member_index += 1;
@@ -1740,13 +1740,13 @@ const NavGen = struct {
                 var it = struct_type.iterateRuntimeOrder(ip);
                 while (it.next()) |field_index| {
                     const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
-                    if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+                    if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                         // This is a zero-bit field - we only needed it for the alignment.
                         continue;
                     }
 
                     const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse
-                        try ip.getOrPutStringFmt(mod.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
+                        try ip.getOrPutStringFmt(zcu.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
                     try member_types.append(try self.resolveType(field_ty, .indirect));
                     try member_names.append(field_name.toSlice(ip));
                 }
@@ -1758,8 +1758,8 @@ const NavGen = struct {
                 return result_id;
             },
             .Optional => {
-                const payload_ty = ty.optionalChild(mod);
-                if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+                const payload_ty = ty.optionalChild(zcu);
+                if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                     // Just use a bool.
                     // Note: Always generate the bool with indirect format, to save on some sanity
                     // Perform the conversion to a direct bool when the field is extracted.
@@ -1767,7 +1767,7 @@ const NavGen = struct {
                 }
 
                 const payload_ty_id = try self.resolveType(payload_ty, .indirect);
-                if (ty.optionalReprIsPayload(mod)) {
+                if (ty.optionalReprIsPayload(zcu)) {
                     // Optional is actually a pointer or a slice.
                     return payload_ty_id;
                 }
@@ -1782,7 +1782,7 @@ const NavGen = struct {
             .Union => return try self.resolveUnionType(ty),
             .ErrorSet => return try self.resolveType(Type.u16, repr),
             .ErrorUnion => {
-                const payload_ty = ty.errorUnionPayload(mod);
+                const payload_ty = ty.errorUnionPayload(zcu);
                 const error_ty_id = try self.resolveType(Type.anyerror, .indirect);
 
                 const eu_layout = self.errorUnionLayout(payload_ty);
@@ -1877,13 +1877,14 @@ const NavGen = struct {
 
     fn errorUnionLayout(self: *NavGen, payload_ty: Type) ErrorUnionLayout {
         const pt = self.pt;
+        const zcu = pt.zcu;
 
-        const error_align = Type.anyerror.abiAlignment(pt);
-        const payload_align = payload_ty.abiAlignment(pt);
+        const error_align = Type.anyerror.abiAlignment(zcu);
+        const payload_align = payload_ty.abiAlignment(zcu);
 
         const error_first = error_align.compare(.gt, payload_align);
         return .{
-            .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(pt),
+            .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(zcu),
             .error_first = error_first,
         };
     }
@@ -1908,10 +1909,10 @@ const NavGen = struct {
 
     fn unionLayout(self: *NavGen, ty: Type) UnionLayout {
         const pt = self.pt;
-        const mod = pt.zcu;
-        const ip = &mod.intern_pool;
-        const layout = ty.unionGetLayout(pt);
-        const union_obj = mod.typeToUnion(ty).?;
+        const zcu = pt.zcu;
+        const ip = &zcu.intern_pool;
+        const layout = ty.unionGetLayout(zcu);
+        const union_obj = zcu.typeToUnion(ty).?;
 
         var union_layout = UnionLayout{
             .has_payload = layout.payload_size != 0,
@@ -1931,7 +1932,7 @@ const NavGen = struct {
             const most_aligned_field = layout.most_aligned_field;
             const most_aligned_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[most_aligned_field]);
             union_layout.payload_ty = most_aligned_field_ty;
-            union_layout.payload_size = @intCast(most_aligned_field_ty.abiSize(pt));
+            union_layout.payload_size = @intCast(most_aligned_field_ty.abiSize(zcu));
         } else {
             union_layout.payload_size = 0;
         }
@@ -1998,12 +1999,12 @@ const NavGen = struct {
         }
 
         fn materialize(self: Temporary, ng: *NavGen) !IdResult {
-            const mod = ng.pt.zcu;
+            const zcu = ng.pt.zcu;
             switch (self.value) {
                 .singleton => |id| return id,
                 .exploded_vector => |range| {
-                    assert(self.ty.isVector(mod));
-                    assert(self.ty.vectorLen(mod) == range.len);
+                    assert(self.ty.isVector(zcu));
+                    assert(self.ty.vectorLen(zcu) == range.len);
                     const consituents = try ng.gpa.alloc(IdRef, range.len);
                     defer ng.gpa.free(consituents);
                     for (consituents, 0..range.len) |*id, i| {
@@ -2028,18 +2029,18 @@ const NavGen = struct {
         /// 'Explode' a temporary into separate elements. This turns a vector
         /// into a bag of elements.
         fn explode(self: Temporary, ng: *NavGen) !IdRange {
-            const mod = ng.pt.zcu;
+            const zcu = ng.pt.zcu;
 
             // If the value is a scalar, then this is a no-op.
-            if (!self.ty.isVector(mod)) {
+            if (!self.ty.isVector(zcu)) {
                 return switch (self.value) {
                     .singleton => |id| .{ .base = @intFromEnum(id), .len = 1 },
                     .exploded_vector => |range| range,
                 };
             }
 
-            const ty_id = try ng.resolveType(self.ty.scalarType(mod), .direct);
-            const n = self.ty.vectorLen(mod);
+            const ty_id = try ng.resolveType(self.ty.scalarType(zcu), .direct);
+            const n = self.ty.vectorLen(zcu);
             const results = ng.spv.allocIds(n);
 
             const id = switch (self.value) {
@@ -2087,13 +2088,13 @@ const NavGen = struct {
         /// only checks the size, but the source-of-truth is implemented
         /// by `isSpvVector()`.
         fn fromType(ty: Type, ng: *NavGen) Vectorization {
-            const mod = ng.pt.zcu;
-            if (!ty.isVector(mod)) {
+            const zcu = ng.pt.zcu;
+            if (!ty.isVector(zcu)) {
                 return .scalar;
             } else if (ng.isSpvVector(ty)) {
-                return .{ .spv_vectorized = ty.vectorLen(mod) };
+                return .{ .spv_vectorized = ty.vectorLen(zcu) };
             } else {
-                return .{ .unrolled = ty.vectorLen(mod) };
+                return .{ .unrolled = ty.vectorLen(zcu) };
             }
         }
 
@@ -2339,10 +2340,10 @@ const NavGen = struct {
     /// This function builds an OpSConvert of OpUConvert depending on the
     /// signedness of the types.
     fn buildIntConvert(self: *NavGen, dst_ty: Type, src: Temporary) !Temporary {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
 
-        const dst_ty_id = try self.resolveType(dst_ty.scalarType(mod), .direct);
-        const src_ty_id = try self.resolveType(src.ty.scalarType(mod), .direct);
+        const dst_ty_id = try self.resolveType(dst_ty.scalarType(zcu), .direct);
+        const src_ty_id = try self.resolveType(src.ty.scalarType(zcu), .direct);
 
         const v = self.vectorization(.{ dst_ty, src });
         const result_ty = try v.resultType(self, dst_ty);
@@ -2363,7 +2364,7 @@ const NavGen = struct {
         const op_result_ty = try v.operationType(self, dst_ty);
         const op_result_ty_id = try self.resolveType(op_result_ty, .direct);
 
-        const opcode: Opcode = if (dst_ty.isSignedInt(mod)) .OpSConvert else .OpUConvert;
+        const opcode: Opcode = if (dst_ty.isSignedInt(zcu)) .OpSConvert else .OpUConvert;
 
         const op_src = try v.prepare(self, src);
 
@@ -2418,7 +2419,7 @@ const NavGen = struct {
     }
 
     fn buildSelect(self: *NavGen, condition: Temporary, lhs: Temporary, rhs: Temporary) !Temporary {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
 
         const v = self.vectorization(.{ condition, lhs, rhs });
         const ops = v.operations();
@@ -2428,7 +2429,7 @@ const NavGen = struct {
         const op_result_ty_id = try self.resolveType(op_result_ty, .direct);
         const result_ty = try v.resultType(self, lhs.ty);
 
-        assert(condition.ty.scalarType(mod).zigTypeTag(mod) == .Bool);
+        assert(condition.ty.scalarType(zcu).zigTypeTag(zcu) == .Bool);
 
         const cond = try v.prepare(self, condition);
         const object_1 = try v.prepare(self, lhs);
@@ -2764,9 +2765,9 @@ const NavGen = struct {
         rhs: Temporary,
     ) !struct { Temporary, Temporary } {
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const target = self.getTarget();
-        const ip = &mod.intern_pool;
+        const ip = &zcu.intern_pool;
 
         const v = lhs.vectorization(self).unify(rhs.vectorization(self));
         const ops = v.operations();
@@ -2814,7 +2815,7 @@ const NavGen = struct {
                     // where T is maybe vectorized.
                     const types = [2]InternPool.Index{ arith_op_ty.toIntern(), arith_op_ty.toIntern() };
                     const values = [2]InternPool.Index{ .none, .none };
-                    const index = try ip.getAnonStructType(mod.gpa, pt.tid, .{
+                    const index = try ip.getAnonStructType(zcu.gpa, pt.tid, .{
                         .types = &types,
                         .values = &values,
                         .names = &.{},
@@ -2941,17 +2942,17 @@ const NavGen = struct {
 
     fn genNav(self: *NavGen) !void {
         const pt = self.pt;
-        const mod = pt.zcu;
-        const ip = &mod.intern_pool;
-        const spv_decl_index = try self.object.resolveNav(mod, self.owner_nav);
+        const zcu = pt.zcu;
+        const ip = &zcu.intern_pool;
+        const spv_decl_index = try self.object.resolveNav(zcu, self.owner_nav);
         const result_id = self.spv.declPtr(spv_decl_index).result_id;
 
         const nav = ip.getNav(self.owner_nav);
-        const val = mod.navValue(self.owner_nav);
-        const ty = val.typeOf(mod);
+        const val = zcu.navValue(self.owner_nav);
+        const ty = val.typeOf(zcu);
         switch (self.spv.declPtr(spv_decl_index).kind) {
             .func => {
-                const fn_info = mod.typeToFunc(ty).?;
+                const fn_info = zcu.typeToFunc(ty).?;
                 const return_ty_id = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type));
 
                 const prototype_ty_id = try self.resolveType(ty, .direct);
@@ -2969,7 +2970,7 @@ const NavGen = struct {
                 try self.args.ensureUnusedCapacity(self.gpa, fn_info.param_types.len);
                 for (fn_info.param_types.get(ip)) |param_ty_index| {
                     const param_ty = Type.fromInterned(param_ty_index);
-                    if (!param_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+                    if (!param_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
 
                     const param_type_id = try self.resolveType(param_ty, .direct);
                     const arg_result_id = self.spv.allocId();
@@ -3116,8 +3117,8 @@ const NavGen = struct {
     /// Convert representation from indirect (in memory) to direct (in 'register')
     /// This converts the argument type from resolveType(ty, .indirect) to resolveType(ty, .direct).
     fn convertToDirect(self: *NavGen, ty: Type, operand_id: IdRef) !IdRef {
-        const mod = self.pt.zcu;
-        switch (ty.scalarType(mod).zigTypeTag(mod)) {
+        const zcu = self.pt.zcu;
+        switch (ty.scalarType(zcu).zigTypeTag(zcu)) {
             .Bool => {
                 const false_id = try self.constBool(false, .indirect);
                 // The operation below requires inputs in direct representation, but the operand
@@ -3142,8 +3143,8 @@ const NavGen = struct {
     /// Convert representation from direct (in 'register) to direct (in memory)
     /// This converts the argument type from resolveType(ty, .direct) to resolveType(ty, .indirect).
     fn convertToIndirect(self: *NavGen, ty: Type, operand_id: IdRef) !IdRef {
-        const mod = self.pt.zcu;
-        switch (ty.scalarType(mod).zigTypeTag(mod)) {
+        const zcu = self.pt.zcu;
+        switch (ty.scalarType(zcu).zigTypeTag(zcu)) {
             .Bool => {
                 const result = try self.intFromBool(Temporary.init(ty, operand_id));
                 return try result.materialize(self);
@@ -3219,8 +3220,8 @@ const NavGen = struct {
     }
 
     fn genInst(self: *NavGen, inst: Air.Inst.Index) !void {
-        const mod = self.pt.zcu;
-        const ip = &mod.intern_pool;
+        const zcu = self.pt.zcu;
+        const ip = &zcu.intern_pool;
         if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip))
             return;
 
@@ -3399,7 +3400,7 @@ const NavGen = struct {
     }
 
     fn airShift(self: *NavGen, inst: Air.Inst.Index, unsigned: BinaryOp, signed: BinaryOp) !?IdRef {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
         const base = try self.temporary(bin_op.lhs);
@@ -3420,7 +3421,7 @@ const NavGen = struct {
         // Note: The sign may differ here between the shift and the base type, in case
         // of an arithmetic right shift. SPIR-V still expects the same type,
         // so in that case we have to cast convert to signed.
-        const casted_shift = try self.buildIntConvert(base.ty.scalarType(mod), shift);
+        const casted_shift = try self.buildIntConvert(base.ty.scalarType(zcu), shift);
 
         const shifted = switch (info.signedness) {
             .unsigned => try self.buildBinary(unsigned, base, casted_shift),
@@ -3477,7 +3478,7 @@ const NavGen = struct {
     /// All other values are returned unmodified (this makes strange integer
     /// wrapping easier to use in generic operations).
     fn normalize(self: *NavGen, value: Temporary, info: ArithmeticTypeInfo) !Temporary {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const ty = value.ty;
         switch (info.class) {
             .integer, .bool, .float => return value,
@@ -3485,13 +3486,13 @@ const NavGen = struct {
             .strange_integer => switch (info.signedness) {
                 .unsigned => {
                     const mask_value = if (info.bits == 64) 0xFFFF_FFFF_FFFF_FFFF else (@as(u64, 1) << @as(u6, @intCast(info.bits))) - 1;
-                    const mask_id = try self.constInt(ty.scalarType(mod), mask_value, .direct);
-                    return try self.buildBinary(.bit_and, value, Temporary.init(ty.scalarType(mod), mask_id));
+                    const mask_id = try self.constInt(ty.scalarType(zcu), mask_value, .direct);
+                    return try self.buildBinary(.bit_and, value, Temporary.init(ty.scalarType(zcu), mask_id));
                 },
                 .signed => {
                     // Shift left and right so that we can copy the sight bit that way.
-                    const shift_amt_id = try self.constInt(ty.scalarType(mod), info.backing_bits - info.bits, .direct);
-                    const shift_amt = Temporary.init(ty.scalarType(mod), shift_amt_id);
+                    const shift_amt_id = try self.constInt(ty.scalarType(zcu), info.backing_bits - info.bits, .direct);
+                    const shift_amt = Temporary.init(ty.scalarType(zcu), shift_amt_id);
                     const left = try self.buildBinary(.sll, value, shift_amt);
                     return try self.buildBinary(.sra, left, shift_amt);
                 },
@@ -3897,7 +3898,7 @@ const NavGen = struct {
     }
 
     fn airShlOverflow(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
 
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -3916,7 +3917,7 @@ const NavGen = struct {
 
         // Sometimes Zig doesn't make both of the arguments the same types here. SPIR-V expects that,
         // so just manually upcast it if required.
-        const casted_shift = try self.buildIntConvert(base.ty.scalarType(mod), shift);
+        const casted_shift = try self.buildIntConvert(base.ty.scalarType(zcu), shift);
 
         const left = try self.buildBinary(.sll, base, casted_shift);
         const result = try self.normalize(left, info);
@@ -3955,12 +3956,12 @@ const NavGen = struct {
     fn airClzCtz(self: *NavGen, inst: Air.Inst.Index, op: UnaryOp) !?IdRef {
         if (self.liveness.isUnused(inst)) return null;
 
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const target = self.getTarget();
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const operand = try self.temporary(ty_op.operand);
 
-        const scalar_result_ty = self.typeOfIndex(inst).scalarType(mod);
+        const scalar_result_ty = self.typeOfIndex(inst).scalarType(zcu);
 
         const info = self.arithmeticTypeInfo(operand.ty);
         switch (info.class) {
@@ -4004,16 +4005,16 @@ const NavGen = struct {
     }
 
     fn airReduce(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce;
         const operand = try self.resolve(reduce.operand);
         const operand_ty = self.typeOf(reduce.operand);
-        const scalar_ty = operand_ty.scalarType(mod);
+        const scalar_ty = operand_ty.scalarType(zcu);
         const scalar_ty_id = try self.resolveType(scalar_ty, .direct);
 
         const info = self.arithmeticTypeInfo(operand_ty);
 
-        const len = operand_ty.vectorLen(mod);
+        const len = operand_ty.vectorLen(zcu);
 
         const first = try self.extractVectorComponent(scalar_ty, operand, 0);
 
@@ -4080,7 +4081,7 @@ const NavGen = struct {
 
     fn airShuffle(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
         const a = try self.resolve(extra.a);
@@ -4092,7 +4093,7 @@ const NavGen = struct {
         const a_ty = self.typeOf(extra.a);
         const b_ty = self.typeOf(extra.b);
 
-        const scalar_ty = result_ty.scalarType(mod);
+        const scalar_ty = result_ty.scalarType(zcu);
         const scalar_ty_id = try self.resolveType(scalar_ty, .direct);
 
         // If all of the types are SPIR-V vectors, we can use OpVectorShuffle.
@@ -4100,20 +4101,20 @@ const NavGen = struct {
             // The SPIR-V shuffle instruction is similar to the Air instruction, except that the elements are
             // numbered consecutively instead of using negatives.
 
-            const components = try self.gpa.alloc(Word, result_ty.vectorLen(mod));
+            const components = try self.gpa.alloc(Word, result_ty.vectorLen(zcu));
             defer self.gpa.free(components);
 
-            const a_len = a_ty.vectorLen(mod);
+            const a_len = a_ty.vectorLen(zcu);
 
             for (components, 0..) |*component, i| {
                 const elem = try mask.elemValue(pt, i);
-                if (elem.isUndef(mod)) {
+                if (elem.isUndef(zcu)) {
                     // This is explicitly valid for OpVectorShuffle, it indicates undefined.
                     component.* = 0xFFFF_FFFF;
                     continue;
                 }
 
-                const index = elem.toSignedInt(pt);
+                const index = elem.toSignedInt(zcu);
                 if (index >= 0) {
                     component.* = @intCast(index);
                 } else {
@@ -4134,17 +4135,17 @@ const NavGen = struct {
 
         // Fall back to manually extracting and inserting components.
 
-        const components = try self.gpa.alloc(IdRef, result_ty.vectorLen(mod));
+        const components = try self.gpa.alloc(IdRef, result_ty.vectorLen(zcu));
         defer self.gpa.free(components);
 
         for (components, 0..) |*id, i| {
             const elem = try mask.elemValue(pt, i);
-            if (elem.isUndef(mod)) {
+            if (elem.isUndef(zcu)) {
                 id.* = try self.spv.constUndef(scalar_ty_id);
                 continue;
             }
 
-            const index = elem.toSignedInt(pt);
+            const index = elem.toSignedInt(zcu);
             if (index >= 0) {
                 id.* = try self.extractVectorComponent(scalar_ty, a, @intCast(index));
             } else {
@@ -4218,10 +4219,10 @@ const NavGen = struct {
     }
 
     fn ptrAdd(self: *NavGen, result_ty: Type, ptr_ty: Type, ptr_id: IdRef, offset_id: IdRef) !IdRef {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const result_ty_id = try self.resolveType(result_ty, .direct);
 
-        switch (ptr_ty.ptrSize(mod)) {
+        switch (ptr_ty.ptrSize(zcu)) {
             .One => {
                 // Pointer to array
                 // TODO: Is this correct?
@@ -4275,15 +4276,15 @@ const NavGen = struct {
         rhs: Temporary,
     ) !Temporary {
         const pt = self.pt;
-        const mod = pt.zcu;
-        const scalar_ty = lhs.ty.scalarType(mod);
-        const is_vector = lhs.ty.isVector(mod);
+        const zcu = pt.zcu;
+        const scalar_ty = lhs.ty.scalarType(zcu);
+        const is_vector = lhs.ty.isVector(zcu);
 
-        switch (scalar_ty.zigTypeTag(mod)) {
+        switch (scalar_ty.zigTypeTag(zcu)) {
             .Int, .Bool, .Float => {},
             .Enum => {
                 assert(!is_vector);
-                const ty = lhs.ty.intTagType(mod);
+                const ty = lhs.ty.intTagType(zcu);
                 return try self.cmp(op, lhs.pun(ty), rhs.pun(ty));
             },
             .ErrorSet => {
@@ -4321,10 +4322,10 @@ const NavGen = struct {
 
                 const ty = lhs.ty;
 
-                const payload_ty = ty.optionalChild(mod);
-                if (ty.optionalReprIsPayload(mod)) {
-                    assert(payload_ty.hasRuntimeBitsIgnoreComptime(pt));
-                    assert(!payload_ty.isSlice(mod));
+                const payload_ty = ty.optionalChild(zcu);
+                if (ty.optionalReprIsPayload(zcu)) {
+                    assert(payload_ty.hasRuntimeBitsIgnoreComptime(zcu));
+                    assert(!payload_ty.isSlice(zcu));
 
                     return try self.cmp(op, lhs.pun(payload_ty), rhs.pun(payload_ty));
                 }
@@ -4332,12 +4333,12 @@ const NavGen = struct {
                 const lhs_id = try lhs.materialize(self);
                 const rhs_id = try rhs.materialize(self);
 
-                const lhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(pt))
+                const lhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu))
                     try self.extractField(Type.bool, lhs_id, 1)
                 else
                     try self.convertToDirect(Type.bool, lhs_id);
 
-                const rhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(pt))
+                const rhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu))
                     try self.extractField(Type.bool, rhs_id, 1)
                 else
                     try self.convertToDirect(Type.bool, rhs_id);
@@ -4345,7 +4346,7 @@ const NavGen = struct {
                 const lhs_valid = Temporary.init(Type.bool, lhs_valid_id);
                 const rhs_valid = Temporary.init(Type.bool, rhs_valid_id);
 
-                if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+                if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                     return try self.cmp(op, lhs_valid, rhs_valid);
                 }
 
@@ -4465,7 +4466,7 @@ const NavGen = struct {
         src_ty: Type,
         src_id: IdRef,
     ) !IdRef {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const src_ty_id = try self.resolveType(src_ty, .direct);
         const dst_ty_id = try self.resolveType(dst_ty, .direct);
 
@@ -4477,7 +4478,7 @@ const NavGen = struct {
             // TODO: Some more cases are missing here
             //   See fn bitCast in llvm.zig
 
-            if (src_ty.zigTypeTag(mod) == .Int and dst_ty.isPtrAtRuntime(mod)) {
+            if (src_ty.zigTypeTag(zcu) == .Int and dst_ty.isPtrAtRuntime(zcu)) {
                 const result_id = self.spv.allocId();
                 try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{
                     .id_result_type = dst_ty_id,
@@ -4490,7 +4491,7 @@ const NavGen = struct {
             // We can only use OpBitcast for specific conversions: between numerical types, and
             // between pointers. If the resolved spir-v types fall into this category then emit OpBitcast,
             // otherwise use a temporary and perform a pointer cast.
-            const can_bitcast = (src_ty.isNumeric(mod) and dst_ty.isNumeric(mod)) or (src_ty.isPtrAtRuntime(mod) and dst_ty.isPtrAtRuntime(mod));
+            const can_bitcast = (src_ty.isNumeric(zcu) and dst_ty.isNumeric(zcu)) or (src_ty.isPtrAtRuntime(zcu) and dst_ty.isPtrAtRuntime(zcu));
             if (can_bitcast) {
                 const result_id = self.spv.allocId();
                 try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
@@ -4519,7 +4520,7 @@ const NavGen = struct {
         // the result here.
         // TODO: This detail could cause stuff like @as(*const i1, @ptrCast(&@as(u1, 1))) to break
         // should we change the representation of strange integers?
-        if (dst_ty.zigTypeTag(mod) == .Int) {
+        if (dst_ty.zigTypeTag(zcu) == .Int) {
             const info = self.arithmeticTypeInfo(dst_ty);
             const result = try self.normalize(Temporary.init(dst_ty, result_id), info);
             return try result.materialize(self);
@@ -4675,19 +4676,19 @@ const NavGen = struct {
 
     fn airArrayToSlice(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const array_ptr_ty = self.typeOf(ty_op.operand);
-        const array_ty = array_ptr_ty.childType(mod);
+        const array_ty = array_ptr_ty.childType(zcu);
         const slice_ty = self.typeOfIndex(inst);
-        const elem_ptr_ty = slice_ty.slicePtrFieldType(mod);
+        const elem_ptr_ty = slice_ty.slicePtrFieldType(zcu);
 
         const elem_ptr_ty_id = try self.resolveType(elem_ptr_ty, .direct);
 
         const array_ptr_id = try self.resolve(ty_op.operand);
-        const len_id = try self.constInt(Type.usize, array_ty.arrayLen(mod), .direct);
+        const len_id = try self.constInt(Type.usize, array_ty.arrayLen(zcu), .direct);
 
-        const elem_ptr_id = if (!array_ty.hasRuntimeBitsIgnoreComptime(pt))
+        const elem_ptr_id = if (!array_ty.hasRuntimeBitsIgnoreComptime(zcu))
             // Note: The pointer is something like *opaque{}, so we need to bitcast it to the element type.
             try self.bitCast(elem_ptr_ty, array_ptr_ty, array_ptr_id)
         else
@@ -4720,16 +4721,16 @@ const NavGen = struct {
 
     fn airAggregateInit(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
         const pt = self.pt;
-        const mod = pt.zcu;
-        const ip = &mod.intern_pool;
+        const zcu = pt.zcu;
+        const ip = &zcu.intern_pool;
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const result_ty = self.typeOfIndex(inst);
-        const len: usize = @intCast(result_ty.arrayLen(mod));
+        const len: usize = @intCast(result_ty.arrayLen(zcu));
         const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]);
 
-        switch (result_ty.zigTypeTag(mod)) {
+        switch (result_ty.zigTypeTag(zcu)) {
             .Struct => {
-                if (mod.typeToPackedStruct(result_ty)) |struct_type| {
+                if (zcu.typeToPackedStruct(result_ty)) |struct_type| {
                     _ = struct_type;
                     unreachable; // TODO
                 }
@@ -4744,7 +4745,7 @@ const NavGen = struct {
                     .anon_struct_type => |tuple| {
                         for (tuple.types.get(ip), elements, 0..) |field_ty, element, i| {
                             if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue;
-                            assert(Type.fromInterned(field_ty).hasRuntimeBits(pt));
+                            assert(Type.fromInterned(field_ty).hasRuntimeBits(zcu));
 
                             const id = try self.resolve(element);
                             types[index] = Type.fromInterned(field_ty);
@@ -4759,7 +4760,7 @@ const NavGen = struct {
                             const field_index = it.next().?;
                             if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue;
                             const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
-                            assert(field_ty.hasRuntimeBitsIgnoreComptime(pt));
+                            assert(field_ty.hasRuntimeBitsIgnoreComptime(zcu));
 
                             const id = try self.resolve(element);
                             types[index] = field_ty;
@@ -4777,7 +4778,7 @@ const NavGen = struct {
                 );
             },
             .Vector => {
-                const n_elems = result_ty.vectorLen(mod);
+                const n_elems = result_ty.vectorLen(zcu);
                 const elem_ids = try self.gpa.alloc(IdRef, n_elems);
                 defer self.gpa.free(elem_ids);
 
@@ -4788,8 +4789,8 @@ const NavGen = struct {
                 return try self.constructVector(result_ty, elem_ids);
             },
             .Array => {
-                const array_info = result_ty.arrayInfo(mod);
-                const n_elems: usize = @intCast(result_ty.arrayLenIncludingSentinel(mod));
+                const array_info = result_ty.arrayInfo(zcu);
+                const n_elems: usize = @intCast(result_ty.arrayLenIncludingSentinel(zcu));
                 const elem_ids = try self.gpa.alloc(IdRef, n_elems);
                 defer self.gpa.free(elem_ids);
 
@@ -4810,14 +4811,14 @@ const NavGen = struct {
 
     fn sliceOrArrayLen(self: *NavGen, operand_id: IdRef, ty: Type) !IdRef {
         const pt = self.pt;
-        const mod = pt.zcu;
-        switch (ty.ptrSize(mod)) {
+        const zcu = pt.zcu;
+        switch (ty.ptrSize(zcu)) {
             .Slice => return self.extractField(Type.usize, operand_id, 1),
             .One => {
-                const array_ty = ty.childType(mod);
-                const elem_ty = array_ty.childType(mod);
-                const abi_size = elem_ty.abiSize(pt);
-                const size = array_ty.arrayLenIncludingSentinel(mod) * abi_size;
+                const array_ty = ty.childType(zcu);
+                const elem_ty = array_ty.childType(zcu);
+                const abi_size = elem_ty.abiSize(zcu);
+                const size = array_ty.arrayLenIncludingSentinel(zcu) * abi_size;
                 return try self.constInt(Type.usize, size, .direct);
             },
             .Many, .C => unreachable,
@@ -4825,9 +4826,9 @@ const NavGen = struct {
     }
 
     fn sliceOrArrayPtr(self: *NavGen, operand_id: IdRef, ty: Type) !IdRef {
-        const mod = self.pt.zcu;
-        if (ty.isSlice(mod)) {
-            const ptr_ty = ty.slicePtrFieldType(mod);
+        const zcu = self.pt.zcu;
+        if (ty.isSlice(zcu)) {
+            const ptr_ty = ty.slicePtrFieldType(zcu);
             return self.extractField(ptr_ty, operand_id, 0);
         }
         return operand_id;
@@ -4857,11 +4858,11 @@ const NavGen = struct {
     }
 
     fn airSliceElemPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
         const slice_ty = self.typeOf(bin_op.lhs);
-        if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null;
+        if (!slice_ty.isVolatilePtr(zcu) and self.liveness.isUnused(inst)) return null;
 
         const slice_id = try self.resolve(bin_op.lhs);
         const index_id = try self.resolve(bin_op.rhs);
@@ -4874,28 +4875,28 @@ const NavGen = struct {
     }
 
     fn airSliceElemVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const slice_ty = self.typeOf(bin_op.lhs);
-        if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null;
+        if (!slice_ty.isVolatilePtr(zcu) and self.liveness.isUnused(inst)) return null;
 
         const slice_id = try self.resolve(bin_op.lhs);
         const index_id = try self.resolve(bin_op.rhs);
 
-        const ptr_ty = slice_ty.slicePtrFieldType(mod);
+        const ptr_ty = slice_ty.slicePtrFieldType(zcu);
         const ptr_ty_id = try self.resolveType(ptr_ty, .direct);
 
         const slice_ptr = try self.extractField(ptr_ty, slice_id, 0);
         const elem_ptr = try self.ptrAccessChain(ptr_ty_id, slice_ptr, index_id, &.{});
-        return try self.load(slice_ty.childType(mod), elem_ptr, .{ .is_volatile = slice_ty.isVolatilePtr(mod) });
+        return try self.load(slice_ty.childType(zcu), elem_ptr, .{ .is_volatile = slice_ty.isVolatilePtr(zcu) });
     }
 
     fn ptrElemPtr(self: *NavGen, ptr_ty: Type, ptr_id: IdRef, index_id: IdRef) !IdRef {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         // Construct new pointer type for the resulting pointer
-        const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T.
-        const elem_ptr_ty_id = try self.ptrType(elem_ty, self.spvStorageClass(ptr_ty.ptrAddressSpace(mod)));
-        if (ptr_ty.isSinglePointer(mod)) {
+        const elem_ty = ptr_ty.elemType2(zcu); // use elemType() so that we get T for *[N]T.
+        const elem_ptr_ty_id = try self.ptrType(elem_ty, self.spvStorageClass(ptr_ty.ptrAddressSpace(zcu)));
+        if (ptr_ty.isSinglePointer(zcu)) {
             // Pointer-to-array. In this case, the resulting pointer is not of the same type
             // as the ptr_ty (we want a *T, not a *[N]T), and hence we need to use accessChain.
             return try self.accessChainId(elem_ptr_ty_id, ptr_id, &.{index_id});
@@ -4907,14 +4908,14 @@ const NavGen = struct {
 
     fn airPtrElemPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
         const src_ptr_ty = self.typeOf(bin_op.lhs);
-        const elem_ty = src_ptr_ty.childType(mod);
+        const elem_ty = src_ptr_ty.childType(zcu);
         const ptr_id = try self.resolve(bin_op.lhs);
 
-        if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             const dst_ptr_ty = self.typeOfIndex(inst);
             return try self.bitCast(dst_ptr_ty, src_ptr_ty, ptr_id);
         }
@@ -4924,10 +4925,10 @@ const NavGen = struct {
     }
 
     fn airArrayElemVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const array_ty = self.typeOf(bin_op.lhs);
-        const elem_ty = array_ty.childType(mod);
+        const elem_ty = array_ty.childType(zcu);
         const array_id = try self.resolve(bin_op.lhs);
         const index_id = try self.resolve(bin_op.rhs);
 
@@ -4946,7 +4947,7 @@ const NavGen = struct {
         // For now, just generate a temporary and use that.
         // TODO: This backend probably also should use isByRef from llvm...
 
-        const is_vector = array_ty.isVector(mod);
+        const is_vector = array_ty.isVector(zcu);
 
         const elem_repr: Repr = if (is_vector) .direct else .indirect;
         const ptr_array_ty_id = try self.ptrType2(array_ty, .Function, .direct);
@@ -4985,26 +4986,26 @@ const NavGen = struct {
     }
 
     fn airPtrElemVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const ptr_ty = self.typeOf(bin_op.lhs);
         const elem_ty = self.typeOfIndex(inst);
         const ptr_id = try self.resolve(bin_op.lhs);
         const index_id = try self.resolve(bin_op.rhs);
         const elem_ptr_id = try self.ptrElemPtr(ptr_ty, ptr_id, index_id);
-        return try self.load(elem_ty, elem_ptr_id, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) });
+        return try self.load(elem_ty, elem_ptr_id, .{ .is_volatile = ptr_ty.isVolatilePtr(zcu) });
     }
 
     fn airVectorStoreElem(self: *NavGen, inst: Air.Inst.Index) !void {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const data = self.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem;
         const extra = self.air.extraData(Air.Bin, data.payload).data;
 
         const vector_ptr_ty = self.typeOf(data.vector_ptr);
-        const vector_ty = vector_ptr_ty.childType(mod);
-        const scalar_ty = vector_ty.scalarType(mod);
+        const vector_ty = vector_ptr_ty.childType(zcu);
+        const scalar_ty = vector_ty.scalarType(zcu);
 
-        const storage_class = self.spvStorageClass(vector_ptr_ty.ptrAddressSpace(mod));
+        const storage_class = self.spvStorageClass(vector_ptr_ty.ptrAddressSpace(zcu));
         const scalar_ptr_ty_id = try self.ptrType(scalar_ty, storage_class);
 
         const vector_ptr = try self.resolve(data.vector_ptr);
@@ -5013,30 +5014,30 @@ const NavGen = struct {
 
         const elem_ptr_id = try self.accessChainId(scalar_ptr_ty_id, vector_ptr, &.{index});
         try self.store(scalar_ty, elem_ptr_id, operand, .{
-            .is_volatile = vector_ptr_ty.isVolatilePtr(mod),
+            .is_volatile = vector_ptr_ty.isVolatilePtr(zcu),
         });
     }
 
     fn airSetUnionTag(self: *NavGen, inst: Air.Inst.Index) !void {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const un_ptr_ty = self.typeOf(bin_op.lhs);
-        const un_ty = un_ptr_ty.childType(mod);
+        const un_ty = un_ptr_ty.childType(zcu);
         const layout = self.unionLayout(un_ty);
 
         if (layout.tag_size == 0) return;
 
-        const tag_ty = un_ty.unionTagTypeSafety(mod).?;
-        const tag_ptr_ty_id = try self.ptrType(tag_ty, self.spvStorageClass(un_ptr_ty.ptrAddressSpace(mod)));
+        const tag_ty = un_ty.unionTagTypeSafety(zcu).?;
+        const tag_ptr_ty_id = try self.ptrType(tag_ty, self.spvStorageClass(un_ptr_ty.ptrAddressSpace(zcu)));
 
         const union_ptr_id = try self.resolve(bin_op.lhs);
         const new_tag_id = try self.resolve(bin_op.rhs);
 
         if (!layout.has_payload) {
-            try self.store(tag_ty, union_ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(mod) });
+            try self.store(tag_ty, union_ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(zcu) });
         } else {
             const ptr_id = try self.accessChain(tag_ptr_ty_id, union_ptr_id, &.{layout.tag_index});
-            try self.store(tag_ty, ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(mod) });
+            try self.store(tag_ty, ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(zcu) });
         }
     }
 
@@ -5044,14 +5045,14 @@ const NavGen = struct {
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const un_ty = self.typeOf(ty_op.operand);
 
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const layout = self.unionLayout(un_ty);
         if (layout.tag_size == 0) return null;
 
         const union_handle = try self.resolve(ty_op.operand);
         if (!layout.has_payload) return union_handle;
 
-        const tag_ty = un_ty.unionTagTypeSafety(mod).?;
+        const tag_ty = un_ty.unionTagTypeSafety(zcu).?;
         return try self.extractField(tag_ty, union_handle, layout.tag_index);
     }
 
@@ -5068,9 +5069,9 @@ const NavGen = struct {
         // Note: The result here is not cached, because it generates runtime code.
 
         const pt = self.pt;
-        const mod = pt.zcu;
-        const ip = &mod.intern_pool;
-        const union_ty = mod.typeToUnion(ty).?;
+        const zcu = pt.zcu;
+        const ip = &zcu.intern_pool;
+        const union_ty = zcu.typeToUnion(ty).?;
         const tag_ty = Type.fromInterned(union_ty.enum_tag_ty);
 
         if (union_ty.flagsUnordered(ip).layout == .@"packed") {
@@ -5082,7 +5083,7 @@ const NavGen = struct {
         const tag_int = if (layout.tag_size != 0) blk: {
             const tag_val = try pt.enumValueFieldIndex(tag_ty, active_field);
             const tag_int_val = try tag_val.intFromEnum(tag_ty, pt);
-            break :blk tag_int_val.toUnsignedInt(pt);
+            break :blk tag_int_val.toUnsignedInt(zcu);
         } else 0;
 
         if (!layout.has_payload) {
@@ -5099,7 +5100,7 @@ const NavGen = struct {
         }
 
         const payload_ty = Type.fromInterned(union_ty.field_types.get(ip)[active_field]);
-        if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, .Function);
             const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, tmp_id, &.{layout.payload_index});
             const active_pl_ptr_ty_id = try self.ptrType(payload_ty, .Function);
@@ -5123,15 +5124,15 @@ const NavGen = struct {
 
     fn airUnionInit(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
         const pt = self.pt;
-        const mod = pt.zcu;
-        const ip = &mod.intern_pool;
+        const zcu = pt.zcu;
+        const ip = &zcu.intern_pool;
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
         const ty = self.typeOfIndex(inst);
 
-        const union_obj = mod.typeToUnion(ty).?;
+        const union_obj = zcu.typeToUnion(ty).?;
         const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
-        const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(pt))
+        const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(zcu))
             try self.resolve(extra.init)
         else
             null;
@@ -5140,23 +5141,23 @@ const NavGen = struct {
 
     fn airStructFieldVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
 
         const object_ty = self.typeOf(struct_field.struct_operand);
         const object_id = try self.resolve(struct_field.struct_operand);
         const field_index = struct_field.field_index;
-        const field_ty = object_ty.structFieldType(field_index, mod);
+        const field_ty = object_ty.structFieldType(field_index, zcu);
 
-        if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) return null;
+        if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return null;
 
-        switch (object_ty.zigTypeTag(mod)) {
-            .Struct => switch (object_ty.containerLayout(mod)) {
+        switch (object_ty.zigTypeTag(zcu)) {
+            .Struct => switch (object_ty.containerLayout(zcu)) {
                 .@"packed" => unreachable, // TODO
                 else => return try self.extractField(field_ty, object_id, field_index),
             },
-            .Union => switch (object_ty.containerLayout(mod)) {
+            .Union => switch (object_ty.containerLayout(zcu)) {
                 .@"packed" => unreachable, // TODO
                 else => {
                     // Store, ptr-elem-ptr, pointer-cast, load
@@ -5185,16 +5186,16 @@ const NavGen = struct {
 
     fn airFieldParentPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
 
-        const parent_ty = ty_pl.ty.toType().childType(mod);
+        const parent_ty = ty_pl.ty.toType().childType(zcu);
         const result_ty_id = try self.resolveType(ty_pl.ty.toType(), .indirect);
 
         const field_ptr = try self.resolve(extra.field_ptr);
         const field_ptr_int = try self.intFromPtr(field_ptr);
-        const field_offset = parent_ty.structFieldOffset(extra.field_index, pt);
+        const field_offset = parent_ty.structFieldOffset(extra.field_index, zcu);
 
         const base_ptr_int = base_ptr_int: {
             if (field_offset == 0) break :base_ptr_int field_ptr_int;
@@ -5319,10 +5320,10 @@ const NavGen = struct {
     }
 
     fn airAlloc(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const ptr_ty = self.typeOfIndex(inst);
-        assert(ptr_ty.ptrAddressSpace(mod) == .generic);
-        const child_ty = ptr_ty.childType(mod);
+        assert(ptr_ty.ptrAddressSpace(zcu) == .generic);
+        const child_ty = ptr_ty.childType(zcu);
         return try self.alloc(child_ty, .{});
     }
 
@@ -5494,9 +5495,9 @@ const NavGen = struct {
         // ir.Block in a different SPIR-V block.
 
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ty = self.typeOfIndex(inst);
-        const have_block_result = ty.isFnOrHasRuntimeBitsIgnoreComptime(pt);
+        const have_block_result = ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu);
 
         const cf = switch (self.control_flow) {
             .structured => |*cf| cf,
@@ -5570,7 +5571,7 @@ const NavGen = struct {
 
         const sblock = cf.block_stack.getLast();
 
-        if (ty.isNoReturn(mod)) {
+        if (ty.isNoReturn(zcu)) {
             // If this block is noreturn, this instruction is the last of a block,
             // and we must simply jump to the block's merge unconditionally.
             try self.structuredBreak(next_block);
@@ -5626,13 +5627,13 @@ const NavGen = struct {
     }
 
     fn airBr(self: *NavGen, inst: Air.Inst.Index) !void {
-        const pt = self.pt;
+        const zcu = self.pt.zcu;
         const br = self.air.instructions.items(.data)[@intFromEnum(inst)].br;
         const operand_ty = self.typeOf(br.operand);
 
         switch (self.control_flow) {
             .structured => |*cf| {
-                if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
+                if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
                     const operand_id = try self.resolve(br.operand);
                     const block_result_var_id = cf.block_results.get(br.block_inst).?;
                     try self.store(operand_ty, block_result_var_id, operand_id, .{});
@@ -5643,7 +5644,7 @@ const NavGen = struct {
             },
             .unstructured => |cf| {
                 const block = cf.blocks.get(br.block_inst).?;
-                if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
+                if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
                     const operand_id = try self.resolve(br.operand);
                     // current_block_label should not be undefined here, lest there
                     // is a br or br_void in the function's body.
@@ -5770,35 +5771,35 @@ const NavGen = struct {
     }
 
     fn airLoad(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const ptr_ty = self.typeOf(ty_op.operand);
         const elem_ty = self.typeOfIndex(inst);
         const operand = try self.resolve(ty_op.operand);
-        if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null;
+        if (!ptr_ty.isVolatilePtr(zcu) and self.liveness.isUnused(inst)) return null;
 
-        return try self.load(elem_ty, operand, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) });
+        return try self.load(elem_ty, operand, .{ .is_volatile = ptr_ty.isVolatilePtr(zcu) });
     }
 
     fn airStore(self: *NavGen, inst: Air.Inst.Index) !void {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
         const ptr_ty = self.typeOf(bin_op.lhs);
-        const elem_ty = ptr_ty.childType(mod);
+        const elem_ty = ptr_ty.childType(zcu);
         const ptr = try self.resolve(bin_op.lhs);
         const value = try self.resolve(bin_op.rhs);
 
-        try self.store(elem_ty, ptr, value, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) });
+        try self.store(elem_ty, ptr, value, .{ .is_volatile = ptr_ty.isVolatilePtr(zcu) });
     }
 
     fn airRet(self: *NavGen, inst: Air.Inst.Index) !void {
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const operand = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
         const ret_ty = self.typeOf(operand);
-        if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
-            const fn_info = mod.typeToFunc(mod.navValue(self.owner_nav).typeOf(mod)).?;
-            if (Type.fromInterned(fn_info.return_type).isError(mod)) {
+        if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+            const fn_info = zcu.typeToFunc(zcu.navValue(self.owner_nav).typeOf(zcu)).?;
+            if (Type.fromInterned(fn_info.return_type).isError(zcu)) {
                 // Functions with an empty error set are emitted with an error code
                 // return type and return zero so they can be function pointers coerced
                 // to functions that return anyerror.
@@ -5815,14 +5816,14 @@ const NavGen = struct {
 
     fn airRetLoad(self: *NavGen, inst: Air.Inst.Index) !void {
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
         const ptr_ty = self.typeOf(un_op);
-        const ret_ty = ptr_ty.childType(mod);
+        const ret_ty = ptr_ty.childType(zcu);
 
-        if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
-            const fn_info = mod.typeToFunc(mod.navValue(self.owner_nav).typeOf(mod)).?;
-            if (Type.fromInterned(fn_info.return_type).isError(mod)) {
+        if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+            const fn_info = zcu.typeToFunc(zcu.navValue(self.owner_nav).typeOf(zcu)).?;
+            if (Type.fromInterned(fn_info.return_type).isError(zcu)) {
                 // Functions with an empty error set are emitted with an error code
                 // return type and return zero so they can be function pointers coerced
                 // to functions that return anyerror.
@@ -5834,14 +5835,14 @@ const NavGen = struct {
         }
 
         const ptr = try self.resolve(un_op);
-        const value = try self.load(ret_ty, ptr, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) });
+        const value = try self.load(ret_ty, ptr, .{ .is_volatile = ptr_ty.isVolatilePtr(zcu) });
         try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{
             .value = value,
         });
     }
 
     fn airTry(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
         const err_union_id = try self.resolve(pl_op.operand);
         const extra = self.air.extraData(Air.Try, pl_op.payload);
@@ -5854,7 +5855,7 @@ const NavGen = struct {
 
         const eu_layout = self.errorUnionLayout(payload_ty);
 
-        if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
+        if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
             const err_id = if (eu_layout.payload_has_bits)
                 try self.extractField(Type.anyerror, err_union_id, eu_layout.errorFieldIndex())
             else
@@ -5911,18 +5912,18 @@ const NavGen = struct {
     }
 
     fn airErrUnionErr(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const operand_id = try self.resolve(ty_op.operand);
         const err_union_ty = self.typeOf(ty_op.operand);
         const err_ty_id = try self.resolveType(Type.anyerror, .direct);
 
-        if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
+        if (err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
             // No error possible, so just return undefined.
             return try self.spv.constUndef(err_ty_id);
         }
 
-        const payload_ty = err_union_ty.errorUnionPayload(mod);
+        const payload_ty = err_union_ty.errorUnionPayload(zcu);
         const eu_layout = self.errorUnionLayout(payload_ty);
 
         if (!eu_layout.payload_has_bits) {
@@ -5947,10 +5948,10 @@ const NavGen = struct {
     }
 
     fn airWrapErrUnionErr(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const err_union_ty = self.typeOfIndex(inst);
-        const payload_ty = err_union_ty.errorUnionPayload(mod);
+        const payload_ty = err_union_ty.errorUnionPayload(zcu);
         const operand_id = try self.resolve(ty_op.operand);
         const eu_layout = self.errorUnionLayout(payload_ty);
 
@@ -5995,28 +5996,28 @@ const NavGen = struct {
 
     fn airIsNull(self: *NavGen, inst: Air.Inst.Index, is_pointer: bool, pred: enum { is_null, is_non_null }) !?IdRef {
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
         const operand_id = try self.resolve(un_op);
         const operand_ty = self.typeOf(un_op);
-        const optional_ty = if (is_pointer) operand_ty.childType(mod) else operand_ty;
-        const payload_ty = optional_ty.optionalChild(mod);
+        const optional_ty = if (is_pointer) operand_ty.childType(zcu) else operand_ty;
+        const payload_ty = optional_ty.optionalChild(zcu);
 
         const bool_ty_id = try self.resolveType(Type.bool, .direct);
 
-        if (optional_ty.optionalReprIsPayload(mod)) {
+        if (optional_ty.optionalReprIsPayload(zcu)) {
             // Pointer payload represents nullability: pointer or slice.
             const loaded_id = if (is_pointer)
                 try self.load(optional_ty, operand_id, .{})
             else
                 operand_id;
 
-            const ptr_ty = if (payload_ty.isSlice(mod))
-                payload_ty.slicePtrFieldType(mod)
+            const ptr_ty = if (payload_ty.isSlice(zcu))
+                payload_ty.slicePtrFieldType(zcu)
             else
                 payload_ty;
 
-            const ptr_id = if (payload_ty.isSlice(mod))
+            const ptr_id = if (payload_ty.isSlice(zcu))
                 try self.extractField(ptr_ty, loaded_id, 0)
             else
                 loaded_id;
@@ -6036,8 +6037,8 @@ const NavGen = struct {
 
         const is_non_null_id = blk: {
             if (is_pointer) {
-                if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
-                    const storage_class = self.spvStorageClass(operand_ty.ptrAddressSpace(mod));
+                if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+                    const storage_class = self.spvStorageClass(operand_ty.ptrAddressSpace(zcu));
                     const bool_ptr_ty_id = try self.ptrType(Type.bool, storage_class);
                     const tag_ptr_id = try self.accessChain(bool_ptr_ty_id, operand_id, &.{1});
                     break :blk try self.load(Type.bool, tag_ptr_id, .{});
@@ -6046,7 +6047,7 @@ const NavGen = struct {
                 break :blk try self.load(Type.bool, operand_id, .{});
             }
 
-            break :blk if (payload_ty.hasRuntimeBitsIgnoreComptime(pt))
+            break :blk if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu))
                 try self.extractField(Type.bool, operand_id, 1)
             else
                 // Optional representation is bool indicating whether the optional is set
@@ -6071,16 +6072,16 @@ const NavGen = struct {
     }
 
     fn airIsErr(self: *NavGen, inst: Air.Inst.Index, pred: enum { is_err, is_non_err }) !?IdRef {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
         const operand_id = try self.resolve(un_op);
         const err_union_ty = self.typeOf(un_op);
 
-        if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
+        if (err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
             return try self.constBool(pred == .is_non_err, .direct);
         }
 
-        const payload_ty = err_union_ty.errorUnionPayload(mod);
+        const payload_ty = err_union_ty.errorUnionPayload(zcu);
         const eu_layout = self.errorUnionLayout(payload_ty);
         const bool_ty_id = try self.resolveType(Type.bool, .direct);
 
@@ -6105,15 +6106,15 @@ const NavGen = struct {
 
     fn airUnwrapOptional(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const operand_id = try self.resolve(ty_op.operand);
         const optional_ty = self.typeOf(ty_op.operand);
         const payload_ty = self.typeOfIndex(inst);
 
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return null;
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return null;
 
-        if (optional_ty.optionalReprIsPayload(mod)) {
+        if (optional_ty.optionalReprIsPayload(zcu)) {
             return operand_id;
         }
 
@@ -6122,22 +6123,22 @@ const NavGen = struct {
 
     fn airUnwrapOptionalPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const operand_id = try self.resolve(ty_op.operand);
         const operand_ty = self.typeOf(ty_op.operand);
-        const optional_ty = operand_ty.childType(mod);
-        const payload_ty = optional_ty.optionalChild(mod);
+        const optional_ty = operand_ty.childType(zcu);
+        const payload_ty = optional_ty.optionalChild(zcu);
         const result_ty = self.typeOfIndex(inst);
         const result_ty_id = try self.resolveType(result_ty, .direct);
 
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             // There is no payload, but we still need to return a valid pointer.
             // We can just return anything here, so just return a pointer to the operand.
             return try self.bitCast(result_ty, operand_ty, operand_id);
         }
 
-        if (optional_ty.optionalReprIsPayload(mod)) {
+        if (optional_ty.optionalReprIsPayload(zcu)) {
             // They are the same value.
             return try self.bitCast(result_ty, operand_ty, operand_id);
         }
@@ -6147,18 +6148,18 @@ const NavGen = struct {
 
     fn airWrapOptional(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
         const payload_ty = self.typeOf(ty_op.operand);
 
-        if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+        if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             return try self.constBool(true, .indirect);
         }
 
         const operand_id = try self.resolve(ty_op.operand);
 
         const optional_ty = self.typeOfIndex(inst);
-        if (optional_ty.optionalReprIsPayload(mod)) {
+        if (optional_ty.optionalReprIsPayload(zcu)) {
             return operand_id;
         }
 
@@ -6170,7 +6171,7 @@ const NavGen = struct {
 
     fn airSwitchBr(self: *NavGen, inst: Air.Inst.Index) !void {
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const target = self.getTarget();
         const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
         const cond_ty = self.typeOf(pl_op.operand);
@@ -6178,18 +6179,18 @@ const NavGen = struct {
         var cond_indirect = try self.convertToIndirect(cond_ty, cond);
         const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload);
 
-        const cond_words: u32 = switch (cond_ty.zigTypeTag(mod)) {
+        const cond_words: u32 = switch (cond_ty.zigTypeTag(zcu)) {
             .Bool, .ErrorSet => 1,
             .Int => blk: {
-                const bits = cond_ty.intInfo(mod).bits;
+                const bits = cond_ty.intInfo(zcu).bits;
                 const backing_bits = self.backingIntBits(bits) orelse {
                     return self.todo("implement composite int switch", .{});
                 };
                 break :blk if (backing_bits <= 32) 1 else 2;
             },
             .Enum => blk: {
-                const int_ty = cond_ty.intTagType(mod);
-                const int_info = int_ty.intInfo(mod);
+                const int_ty = cond_ty.intTagType(zcu);
+                const int_info = int_ty.intInfo(zcu);
                 const backing_bits = self.backingIntBits(int_info.bits) orelse {
                     return self.todo("implement composite int switch", .{});
                 };
@@ -6200,7 +6201,7 @@ const NavGen = struct {
                 break :blk target.ptrBitWidth() / 32;
             },
             // TODO: Figure out which types apply here, and work around them as we can only do integers.
-            else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag(mod))}),
+            else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag(zcu))}),
         };
 
         const num_cases = switch_br.data.cases_len;
@@ -6255,14 +6256,14 @@ const NavGen = struct {
 
                 for (items) |item| {
                     const value = (try self.air.value(item, pt)) orelse unreachable;
-                    const int_val: u64 = switch (cond_ty.zigTypeTag(mod)) {
-                        .Bool, .Int => if (cond_ty.isSignedInt(mod)) @bitCast(value.toSignedInt(pt)) else value.toUnsignedInt(pt),
+                    const int_val: u64 = switch (cond_ty.zigTypeTag(zcu)) {
+                        .Bool, .Int => if (cond_ty.isSignedInt(zcu)) @bitCast(value.toSignedInt(zcu)) else value.toUnsignedInt(zcu),
                         .Enum => blk: {
                             // TODO: figure out of cond_ty is correct (something with enum literals)
-                            break :blk (try value.intFromEnum(cond_ty, pt)).toUnsignedInt(pt); // TODO: composite integer constants
+                            break :blk (try value.intFromEnum(cond_ty, pt)).toUnsignedInt(zcu); // TODO: composite integer constants
                         },
-                        .ErrorSet => value.getErrorInt(mod),
-                        .Pointer => value.toUnsignedInt(pt),
+                        .ErrorSet => value.getErrorInt(zcu),
+                        .Pointer => value.toUnsignedInt(zcu),
                         else => unreachable,
                     };
                     const int_lit: spec.LiteralContextDependentNumber = switch (cond_words) {
@@ -6343,9 +6344,9 @@ const NavGen = struct {
 
     fn airDbgStmt(self: *NavGen, inst: Air.Inst.Index) !void {
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const dbg_stmt = self.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt;
-        const path = mod.navFileScope(self.owner_nav).sub_file_path;
+        const path = zcu.navFileScope(self.owner_nav).sub_file_path;
         try self.func.body.emit(self.spv.gpa, .OpLine, .{
             .file = try self.spv.resolveString(path),
             .line = self.base_line + dbg_stmt.line + 1,
@@ -6354,12 +6355,12 @@ const NavGen = struct {
     }
 
     fn airDbgInlineBlock(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const inst_datas = self.air.instructions.items(.data);
         const extra = self.air.extraData(Air.DbgInlineBlock, inst_datas[@intFromEnum(inst)].ty_pl.payload);
         const old_base_line = self.base_line;
         defer self.base_line = old_base_line;
-        self.base_line = mod.navSrcLine(mod.funcInfo(extra.data.func).owner_nav);
+        self.base_line = zcu.navSrcLine(zcu.funcInfo(extra.data.func).owner_nav);
         return self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
     }
 
@@ -6371,7 +6372,7 @@ const NavGen = struct {
     }
 
     fn airAssembly(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
-        const mod = self.pt.zcu;
+        const zcu = self.pt.zcu;
         const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
         const extra = self.air.extraData(Air.Asm, ty_pl.payload);
 
@@ -6453,20 +6454,20 @@ const NavGen = struct {
                 // TODO: Translate proper error locations.
                 assert(as.errors.items.len != 0);
                 assert(self.error_msg == null);
-                const src_loc = mod.navSrcLoc(self.owner_nav);
-                self.error_msg = try Zcu.ErrorMsg.create(mod.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{});
-                const notes = try mod.gpa.alloc(Zcu.ErrorMsg, as.errors.items.len);
+                const src_loc = zcu.navSrcLoc(self.owner_nav);
+                self.error_msg = try Zcu.ErrorMsg.create(zcu.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{});
+                const notes = try zcu.gpa.alloc(Zcu.ErrorMsg, as.errors.items.len);
 
                 // Sub-scope to prevent `return error.CodegenFail` from running the errdefers.
                 {
-                    errdefer mod.gpa.free(notes);
+                    errdefer zcu.gpa.free(notes);
                     var i: usize = 0;
                     errdefer for (notes[0..i]) |*note| {
-                        note.deinit(mod.gpa);
+                        note.deinit(zcu.gpa);
                     };
 
                     while (i < as.errors.items.len) : (i += 1) {
-                        notes[i] = try Zcu.ErrorMsg.init(mod.gpa, src_loc, "{s}", .{as.errors.items[i].msg});
+                        notes[i] = try Zcu.ErrorMsg.init(zcu.gpa, src_loc, "{s}", .{as.errors.items[i].msg});
                     }
                 }
                 self.error_msg.?.notes = notes;
@@ -6503,17 +6504,17 @@ const NavGen = struct {
         _ = modifier;
 
         const pt = self.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
         const extra = self.air.extraData(Air.Call, pl_op.payload);
         const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]);
         const callee_ty = self.typeOf(pl_op.operand);
-        const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) {
+        const zig_fn_ty = switch (callee_ty.zigTypeTag(zcu)) {
             .Fn => callee_ty,
             .Pointer => return self.fail("cannot call function pointers", .{}),
             else => unreachable,
         };
-        const fn_info = mod.typeToFunc(zig_fn_ty).?;
+        const fn_info = zcu.typeToFunc(zig_fn_ty).?;
         const return_type = fn_info.return_type;
 
         const result_type_id = try self.resolveFnReturnType(Type.fromInterned(return_type));
@@ -6529,7 +6530,7 @@ const NavGen = struct {
             // before starting to emit OpFunctionCall instructions. Hence the
             // temporary params buffer.
             const arg_ty = self.typeOf(arg);
-            if (!arg_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+            if (!arg_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
             const arg_id = try self.resolve(arg);
 
             params[n_params] = arg_id;
@@ -6547,7 +6548,7 @@ const NavGen = struct {
             try self.func.body.emit(self.spv.gpa, .OpUnreachable, {});
         }
 
-        if (self.liveness.isUnused(inst) or !Type.fromInterned(return_type).hasRuntimeBitsIgnoreComptime(pt)) {
+        if (self.liveness.isUnused(inst) or !Type.fromInterned(return_type).hasRuntimeBitsIgnoreComptime(zcu)) {
             return null;
         }
 
@@ -6604,12 +6605,12 @@ const NavGen = struct {
     }
 
     fn typeOf(self: *NavGen, inst: Air.Inst.Ref) Type {
-        const mod = self.pt.zcu;
-        return self.air.typeOf(inst, &mod.intern_pool);
+        const zcu = self.pt.zcu;
+        return self.air.typeOf(inst, &zcu.intern_pool);
     }
 
     fn typeOfIndex(self: *NavGen, inst: Air.Inst.Index) Type {
-        const mod = self.pt.zcu;
-        return self.air.typeOfIndex(inst, &mod.intern_pool);
+        const zcu = self.pt.zcu;
+        return self.air.typeOfIndex(inst, &zcu.intern_pool);
     }
 };
src/link/Elf/ZigObject.zig
@@ -849,7 +849,7 @@ pub fn lowerUav(
     const gpa = zcu.gpa;
     const val = Value.fromInterned(uav);
     const uav_alignment = switch (explicit_alignment) {
-        .none => val.typeOf(zcu).abiAlignment(pt),
+        .none => val.typeOf(zcu).abiAlignment(zcu),
         else => explicit_alignment,
     };
     if (self.uavs.get(uav)) |metadata| {
src/link/MachO/ZigObject.zig
@@ -688,7 +688,7 @@ pub fn lowerUav(
     const gpa = zcu.gpa;
     const val = Value.fromInterned(uav);
     const uav_alignment = switch (explicit_alignment) {
-        .none => val.typeOf(zcu).abiAlignment(pt),
+        .none => val.typeOf(zcu).abiAlignment(zcu),
         else => explicit_alignment,
     };
     if (self.uavs.get(uav)) |metadata| {
src/link/Wasm/ZigObject.zig
@@ -487,9 +487,9 @@ fn lowerConst(
     src_loc: Zcu.LazySrcLoc,
 ) !LowerConstResult {
     const gpa = wasm_file.base.comp.gpa;
-    const mod = wasm_file.base.comp.module.?;
+    const zcu = wasm_file.base.comp.module.?;
 
-    const ty = val.typeOf(mod);
+    const ty = val.typeOf(zcu);
 
     // Create and initialize a new local symbol and atom
     const sym_index = try zig_object.allocateSymbol(gpa);
@@ -499,7 +499,7 @@ fn lowerConst(
 
     const code = code: {
         const atom = wasm_file.getAtomPtr(atom_index);
-        atom.alignment = ty.abiAlignment(pt);
+        atom.alignment = ty.abiAlignment(zcu);
         const segment_name = try std.mem.concat(gpa, u8, &.{ ".rodata.", name });
         errdefer gpa.free(segment_name);
         zig_object.symbol(sym_index).* = .{
@@ -509,7 +509,7 @@ fn lowerConst(
             .index = try zig_object.createDataSegment(
                 gpa,
                 segment_name,
-                ty.abiAlignment(pt),
+                ty.abiAlignment(zcu),
             ),
             .virtual_address = undefined,
         };
@@ -555,7 +555,7 @@ pub fn getErrorTableSymbol(zig_object: *ZigObject, wasm_file: *Wasm, pt: Zcu.Per
     const atom_index = try wasm_file.createAtom(sym_index, zig_object.index);
     const atom = wasm_file.getAtomPtr(atom_index);
     const slice_ty = Type.slice_const_u8_sentinel_0;
-    atom.alignment = slice_ty.abiAlignment(pt);
+    atom.alignment = slice_ty.abiAlignment(pt.zcu);
 
     const sym_name = try zig_object.string_table.insert(gpa, "__zig_err_name_table");
     const segment_name = try gpa.dupe(u8, ".rodata.__zig_err_name_table");
@@ -611,7 +611,7 @@ fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm, tid: Zcu.Per
         // TODO: remove this unreachable entry
         try atom.code.appendNTimes(gpa, 0, 4);
         try atom.code.writer(gpa).writeInt(u32, 0, .little);
-        atom.size += @intCast(slice_ty.abiSize(pt));
+        atom.size += @intCast(slice_ty.abiSize(pt.zcu));
         addend += 1;
 
         try names_atom.code.append(gpa, 0);
@@ -632,7 +632,7 @@ fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm, tid: Zcu.Per
             .offset = offset,
             .addend = @intCast(addend),
         });
-        atom.size += @intCast(slice_ty.abiSize(pt));
+        atom.size += @intCast(slice_ty.abiSize(pt.zcu));
         addend += len;
 
         // as we updated the error name table, we now store the actual name within the names atom
src/link/Coff.zig
@@ -1259,8 +1259,8 @@ fn updateLazySymbolAtom(
     atom_index: Atom.Index,
     section_index: u16,
 ) !void {
-    const mod = pt.zcu;
-    const gpa = mod.gpa;
+    const zcu = pt.zcu;
+    const gpa = zcu.gpa;
 
     var required_alignment: InternPool.Alignment = .none;
     var code_buffer = std.ArrayList(u8).init(gpa);
@@ -1275,7 +1275,7 @@ fn updateLazySymbolAtom(
     const atom = self.getAtomPtr(atom_index);
     const local_sym_index = atom.getSymbolIndex().?;
 
-    const src = Type.fromInterned(sym.ty).srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded;
+    const src = Type.fromInterned(sym.ty).srcLocOrNull(zcu) orelse Zcu.LazySrcLoc.unneeded;
     const res = try codegen.generateLazySymbol(
         &self.base,
         pt,
@@ -1849,7 +1849,7 @@ pub fn lowerUav(
     const gpa = zcu.gpa;
     const val = Value.fromInterned(uav);
     const uav_alignment = switch (explicit_alignment) {
-        .none => val.typeOf(zcu).abiAlignment(pt),
+        .none => val.typeOf(zcu).abiAlignment(zcu),
         else => explicit_alignment,
     };
     if (self.uavs.get(uav)) |metadata| {
src/Sema/bitcast.zig
@@ -85,23 +85,23 @@ fn bitCastInner(
     assert(val_ty.hasWellDefinedLayout(zcu));
 
     const abi_pad_bits, const host_pad_bits = if (host_bits > 0)
-        .{ val_ty.abiSize(pt) * 8 - host_bits, host_bits - val_ty.bitSize(pt) }
+        .{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) }
     else
-        .{ val_ty.abiSize(pt) * 8 - val_ty.bitSize(pt), 0 };
+        .{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 };
 
     const skip_bits = switch (endian) {
         .little => bit_offset + byte_offset * 8,
         .big => if (host_bits > 0)
-            val_ty.abiSize(pt) * 8 - byte_offset * 8 - host_bits + bit_offset
+            val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset
         else
-            val_ty.abiSize(pt) * 8 - byte_offset * 8 - dest_ty.bitSize(pt),
+            val_ty.abiSize(zcu) * 8 - byte_offset * 8 - dest_ty.bitSize(zcu),
     };
 
     var unpack: UnpackValueBits = .{
         .pt = sema.pt,
         .arena = sema.arena,
         .skip_bits = skip_bits,
-        .remaining_bits = dest_ty.bitSize(pt),
+        .remaining_bits = dest_ty.bitSize(zcu),
         .unpacked = std.ArrayList(InternPool.Index).init(sema.arena),
     };
     switch (endian) {
@@ -141,22 +141,22 @@ fn bitCastSpliceInner(
     try val_ty.resolveLayout(pt);
     try splice_val_ty.resolveLayout(pt);
 
-    const splice_bits = splice_val_ty.bitSize(pt);
+    const splice_bits = splice_val_ty.bitSize(zcu);
 
     const splice_offset = switch (endian) {
         .little => bit_offset + byte_offset * 8,
         .big => if (host_bits > 0)
-            val_ty.abiSize(pt) * 8 - byte_offset * 8 - host_bits + bit_offset
+            val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset
         else
-            val_ty.abiSize(pt) * 8 - byte_offset * 8 - splice_bits,
+            val_ty.abiSize(zcu) * 8 - byte_offset * 8 - splice_bits,
     };
 
-    assert(splice_offset + splice_bits <= val_ty.abiSize(pt) * 8);
+    assert(splice_offset + splice_bits <= val_ty.abiSize(zcu) * 8);
 
     const abi_pad_bits, const host_pad_bits = if (host_bits > 0)
-        .{ val_ty.abiSize(pt) * 8 - host_bits, host_bits - val_ty.bitSize(pt) }
+        .{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) }
     else
-        .{ val_ty.abiSize(pt) * 8 - val_ty.bitSize(pt), 0 };
+        .{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 };
 
     var unpack: UnpackValueBits = .{
         .pt = pt,
@@ -181,7 +181,7 @@ fn bitCastSpliceInner(
     try unpack.add(splice_val);
 
     unpack.skip_bits = splice_offset + splice_bits;
-    unpack.remaining_bits = val_ty.abiSize(pt) * 8 - splice_offset - splice_bits;
+    unpack.remaining_bits = val_ty.abiSize(zcu) * 8 - splice_offset - splice_bits;
     switch (endian) {
         .little => {
             try unpack.add(val);
@@ -229,7 +229,7 @@ const UnpackValueBits = struct {
         }
 
         const ty = val.typeOf(zcu);
-        const bit_size = ty.bitSize(pt);
+        const bit_size = ty.bitSize(zcu);
 
         if (unpack.skip_bits >= bit_size) {
             unpack.skip_bits -= bit_size;
@@ -291,7 +291,7 @@ const UnpackValueBits = struct {
                     // The final element does not have trailing padding.
                     // Elements are reversed in packed memory on BE targets.
                     const elem_ty = ty.childType(zcu);
-                    const pad_bits = elem_ty.abiSize(pt) * 8 - elem_ty.bitSize(pt);
+                    const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu);
                     const len = ty.arrayLen(zcu);
                     const maybe_sent = ty.sentinel(zcu);
 
@@ -323,12 +323,12 @@ const UnpackValueBits = struct {
                             var cur_bit_off: u64 = 0;
                             var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip);
                             while (it.next()) |field_idx| {
-                                const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8;
+                                const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8;
                                 const pad_bits = want_bit_off - cur_bit_off;
                                 const field_val = try val.fieldValue(pt, field_idx);
                                 try unpack.padding(pad_bits);
                                 try unpack.add(field_val);
-                                cur_bit_off = want_bit_off + field_val.typeOf(zcu).bitSize(pt);
+                                cur_bit_off = want_bit_off + field_val.typeOf(zcu).bitSize(zcu);
                             }
                             // Add trailing padding bits.
                             try unpack.padding(bit_size - cur_bit_off);
@@ -339,11 +339,11 @@ const UnpackValueBits = struct {
                             while (it.next()) |field_idx| {
                                 const field_val = try val.fieldValue(pt, field_idx);
                                 const field_ty = field_val.typeOf(zcu);
-                                const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8 + field_ty.bitSize(pt);
+                                const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu);
                                 const pad_bits = cur_bit_off - want_bit_off;
                                 try unpack.padding(pad_bits);
                                 try unpack.add(field_val);
-                                cur_bit_off = want_bit_off - field_ty.bitSize(pt);
+                                cur_bit_off = want_bit_off - field_ty.bitSize(zcu);
                             }
                             assert(cur_bit_off == 0);
                         },
@@ -366,7 +366,7 @@ const UnpackValueBits = struct {
                 // This correctly handles the case where `tag == .none`, since the payload is then
                 // either an integer or a byte array, both of which we can unpack.
                 const payload_val = Value.fromInterned(un.val);
-                const pad_bits = bit_size - payload_val.typeOf(zcu).bitSize(pt);
+                const pad_bits = bit_size - payload_val.typeOf(zcu).bitSize(zcu);
                 if (endian == .little or ty.containerLayout(zcu) == .@"packed") {
                     try unpack.add(payload_val);
                     try unpack.padding(pad_bits);
@@ -398,13 +398,14 @@ const UnpackValueBits = struct {
 
     fn primitive(unpack: *UnpackValueBits, val: Value) BitCastError!void {
         const pt = unpack.pt;
+        const zcu = pt.zcu;
 
         if (unpack.remaining_bits == 0) {
             return;
         }
 
         const ty = val.typeOf(pt.zcu);
-        const bit_size = ty.bitSize(pt);
+        const bit_size = ty.bitSize(zcu);
 
         // Note that this skips all zero-bit types.
         if (unpack.skip_bits >= bit_size) {
@@ -429,9 +430,10 @@ const UnpackValueBits = struct {
 
     fn splitPrimitive(unpack: *UnpackValueBits, val: Value, bit_offset: u64, bit_count: u64) BitCastError!void {
         const pt = unpack.pt;
+        const zcu = pt.zcu;
         const ty = val.typeOf(pt.zcu);
 
-        const val_bits = ty.bitSize(pt);
+        const val_bits = ty.bitSize(zcu);
         assert(bit_offset + bit_count <= val_bits);
 
         switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) {
@@ -499,12 +501,12 @@ const PackValueBits = struct {
                 const len = ty.arrayLen(zcu);
                 const elem_ty = ty.childType(zcu);
                 const maybe_sent = ty.sentinel(zcu);
-                const pad_bits = elem_ty.abiSize(pt) * 8 - elem_ty.bitSize(pt);
+                const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu);
                 const elems = try arena.alloc(InternPool.Index, @intCast(len));
 
                 if (endian == .big and maybe_sent != null) {
                     // TODO: validate sentinel was preserved!
-                    try pack.padding(elem_ty.bitSize(pt));
+                    try pack.padding(elem_ty.bitSize(zcu));
                     if (len != 0) try pack.padding(pad_bits);
                 }
 
@@ -520,7 +522,7 @@ const PackValueBits = struct {
                 if (endian == .little and maybe_sent != null) {
                     // TODO: validate sentinel was preserved!
                     if (len != 0) try pack.padding(pad_bits);
-                    try pack.padding(elem_ty.bitSize(pt));
+                    try pack.padding(elem_ty.bitSize(zcu));
                 }
 
                 return Value.fromInterned(try pt.intern(.{ .aggregate = .{
@@ -538,23 +540,23 @@ const PackValueBits = struct {
                             var cur_bit_off: u64 = 0;
                             var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip);
                             while (it.next()) |field_idx| {
-                                const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8;
+                                const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8;
                                 try pack.padding(want_bit_off - cur_bit_off);
                                 const field_ty = ty.structFieldType(field_idx, zcu);
                                 elems[field_idx] = (try pack.get(field_ty)).toIntern();
-                                cur_bit_off = want_bit_off + field_ty.bitSize(pt);
+                                cur_bit_off = want_bit_off + field_ty.bitSize(zcu);
                             }
-                            try pack.padding(ty.bitSize(pt) - cur_bit_off);
+                            try pack.padding(ty.bitSize(zcu) - cur_bit_off);
                         },
                         .big => {
-                            var cur_bit_off: u64 = ty.bitSize(pt);
+                            var cur_bit_off: u64 = ty.bitSize(zcu);
                             var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip);
                             while (it.next()) |field_idx| {
                                 const field_ty = ty.structFieldType(field_idx, zcu);
-                                const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8 + field_ty.bitSize(pt);
+                                const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu);
                                 try pack.padding(cur_bit_off - want_bit_off);
                                 elems[field_idx] = (try pack.get(field_ty)).toIntern();
-                                cur_bit_off = want_bit_off - field_ty.bitSize(pt);
+                                cur_bit_off = want_bit_off - field_ty.bitSize(zcu);
                             }
                             assert(cur_bit_off == 0);
                         },
@@ -622,16 +624,16 @@ const PackValueBits = struct {
                 for (field_order, 0..) |*f, i| f.* = @intCast(i);
                 // Sort `field_order` to put the fields with the largest bit sizes first.
                 const SizeSortCtx = struct {
-                    pt: Zcu.PerThread,
+                    zcu: *Zcu,
                     field_types: []const InternPool.Index,
                     fn lessThan(ctx: @This(), a_idx: u32, b_idx: u32) bool {
                         const a_ty = Type.fromInterned(ctx.field_types[a_idx]);
                         const b_ty = Type.fromInterned(ctx.field_types[b_idx]);
-                        return a_ty.bitSize(ctx.pt) > b_ty.bitSize(ctx.pt);
+                        return a_ty.bitSize(ctx.zcu) > b_ty.bitSize(ctx.zcu);
                     }
                 };
                 std.mem.sortUnstable(u32, field_order, SizeSortCtx{
-                    .pt = pt,
+                    .zcu = zcu,
                     .field_types = zcu.typeToUnion(ty).?.field_types.get(ip),
                 }, SizeSortCtx.lessThan);
 
@@ -639,7 +641,7 @@ const PackValueBits = struct {
 
                 for (field_order) |field_idx| {
                     const field_ty = Type.fromInterned(zcu.typeToUnion(ty).?.field_types.get(ip)[field_idx]);
-                    const pad_bits = ty.bitSize(pt) - field_ty.bitSize(pt);
+                    const pad_bits = ty.bitSize(zcu) - field_ty.bitSize(zcu);
                     if (!padding_after) try pack.padding(pad_bits);
                     const field_val = pack.get(field_ty) catch |err| switch (err) {
                         error.ReinterpretDeclRef => {
@@ -682,10 +684,11 @@ const PackValueBits = struct {
 
     fn primitive(pack: *PackValueBits, want_ty: Type) BitCastError!Value {
         const pt = pack.pt;
-        const vals, const bit_offset = pack.prepareBits(want_ty.bitSize(pt));
+        const zcu = pt.zcu;
+        const vals, const bit_offset = pack.prepareBits(want_ty.bitSize(zcu));
 
         for (vals) |val| {
-            if (!Value.fromInterned(val).isUndef(pt.zcu)) break;
+            if (!Value.fromInterned(val).isUndef(zcu)) break;
         } else {
             // All bits of the value are `undefined`.
             return pt.undefValue(want_ty);
@@ -706,8 +709,8 @@ const PackValueBits = struct {
         ptr_cast: {
             if (vals.len != 1) break :ptr_cast;
             const val = Value.fromInterned(vals[0]);
-            if (!val.typeOf(pt.zcu).isPtrAtRuntime(pt.zcu)) break :ptr_cast;
-            if (!want_ty.isPtrAtRuntime(pt.zcu)) break :ptr_cast;
+            if (!val.typeOf(zcu).isPtrAtRuntime(zcu)) break :ptr_cast;
+            if (!want_ty.isPtrAtRuntime(zcu)) break :ptr_cast;
             return pt.getCoerced(val, want_ty);
         }
 
@@ -717,7 +720,7 @@ const PackValueBits = struct {
         for (vals) |ip_val| {
             const val = Value.fromInterned(ip_val);
             const ty = val.typeOf(pt.zcu);
-            buf_bits += ty.bitSize(pt);
+            buf_bits += ty.bitSize(zcu);
         }
 
         const buf = try pack.arena.alloc(u8, @intCast((buf_bits + 7) / 8));
@@ -726,11 +729,11 @@ const PackValueBits = struct {
         var cur_bit_off: usize = 0;
         for (vals) |ip_val| {
             const val = Value.fromInterned(ip_val);
-            const ty = val.typeOf(pt.zcu);
-            if (!val.isUndef(pt.zcu)) {
+            const ty = val.typeOf(zcu);
+            if (!val.isUndef(zcu)) {
                 try val.writeToPackedMemory(ty, pt, buf, cur_bit_off);
             }
-            cur_bit_off += @intCast(ty.bitSize(pt));
+            cur_bit_off += @intCast(ty.bitSize(zcu));
         }
 
         return Value.readFromPackedMemory(want_ty, pt, buf, @intCast(bit_offset), pack.arena);
@@ -740,11 +743,12 @@ const PackValueBits = struct {
         if (need_bits == 0) return .{ &.{}, 0 };
 
         const pt = pack.pt;
+        const zcu = pt.zcu;
 
         var bits: u64 = 0;
         var len: usize = 0;
         while (bits < pack.bit_offset + need_bits) {
-            bits += Value.fromInterned(pack.unpacked[len]).typeOf(pt.zcu).bitSize(pt);
+            bits += Value.fromInterned(pack.unpacked[len]).typeOf(pt.zcu).bitSize(zcu);
             len += 1;
         }
 
@@ -757,7 +761,7 @@ const PackValueBits = struct {
             pack.bit_offset = 0;
         } else {
             pack.unpacked = pack.unpacked[len - 1 ..];
-            pack.bit_offset = Value.fromInterned(pack.unpacked[0]).typeOf(pt.zcu).bitSize(pt) - extra_bits;
+            pack.bit_offset = Value.fromInterned(pack.unpacked[0]).typeOf(pt.zcu).bitSize(zcu) - extra_bits;
         }
 
         return .{ result_vals, result_offset };
src/Sema/comptime_ptr_access.zig
@@ -13,14 +13,15 @@ pub const ComptimeLoadResult = union(enum) {
 
 pub fn loadComptimePtr(sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Value) !ComptimeLoadResult {
     const pt = sema.pt;
+    const zcu = pt.zcu;
     const ptr_info = ptr.typeOf(pt.zcu).ptrInfo(pt.zcu);
     // TODO: host size for vectors is terrible
     const host_bits = switch (ptr_info.flags.vector_index) {
         .none => ptr_info.packed_offset.host_size * 8,
-        else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(pt),
+        else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(zcu),
     };
     const bit_offset = if (host_bits != 0) bit_offset: {
-        const child_bits = Type.fromInterned(ptr_info.child).bitSize(pt);
+        const child_bits = Type.fromInterned(ptr_info.child).bitSize(zcu);
         const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
             .none => 0,
             .runtime => return .runtime_load,
@@ -67,18 +68,18 @@ pub fn storeComptimePtr(
     // TODO: host size for vectors is terrible
     const host_bits = switch (ptr_info.flags.vector_index) {
         .none => ptr_info.packed_offset.host_size * 8,
-        else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(pt),
+        else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(zcu),
     };
     const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
         .none => 0,
         .runtime => return .runtime_store,
         else => |idx| switch (zcu.getTarget().cpu.arch.endian()) {
-            .little => Type.fromInterned(ptr_info.child).bitSize(pt) * @intFromEnum(idx),
-            .big => host_bits - Type.fromInterned(ptr_info.child).bitSize(pt) * (@intFromEnum(idx) + 1), // element order reversed on big endian
+            .little => Type.fromInterned(ptr_info.child).bitSize(zcu) * @intFromEnum(idx),
+            .big => host_bits - Type.fromInterned(ptr_info.child).bitSize(zcu) * (@intFromEnum(idx) + 1), // element order reversed on big endian
         },
     };
     const pseudo_store_ty = if (host_bits > 0) t: {
-        const need_bits = Type.fromInterned(ptr_info.child).bitSize(pt);
+        const need_bits = Type.fromInterned(ptr_info.child).bitSize(zcu);
         if (need_bits + bit_offset > host_bits) {
             return .exceeds_host_size;
         }
@@ -166,9 +167,9 @@ pub fn storeComptimePtr(
         .direct => |direct| .{ direct.val, 0 },
         .index => |index| .{
             index.val,
-            index.elem_index * index.val.typeOf(zcu).childType(zcu).abiSize(pt),
+            index.elem_index * index.val.typeOf(zcu).childType(zcu).abiSize(zcu),
         },
-        .flat_index => |flat| .{ flat.val, flat.flat_elem_index * flat.val.typeOf(zcu).arrayBase(zcu)[0].abiSize(pt) },
+        .flat_index => |flat| .{ flat.val, flat.flat_elem_index * flat.val.typeOf(zcu).arrayBase(zcu)[0].abiSize(zcu) },
         .reinterpret => |reinterpret| .{ reinterpret.val, reinterpret.byte_offset },
         else => unreachable,
     };
@@ -347,8 +348,8 @@ fn loadComptimePtrInner(
         const load_one_ty, const load_count = load_ty.arrayBase(zcu);
 
         const extra_base_index: u64 = if (ptr.byte_offset == 0) 0 else idx: {
-            if (try sema.typeRequiresComptime(load_one_ty)) break :restructure_array;
-            const elem_len = try sema.typeAbiSize(load_one_ty);
+            if (try load_one_ty.comptimeOnlySema(pt)) break :restructure_array;
+            const elem_len = try load_one_ty.abiSizeSema(pt);
             if (ptr.byte_offset % elem_len != 0) break :restructure_array;
             break :idx @divExact(ptr.byte_offset, elem_len);
         };
@@ -394,12 +395,12 @@ fn loadComptimePtrInner(
     var cur_offset = ptr.byte_offset;
 
     if (load_ty.zigTypeTag(zcu) == .Array and array_offset > 0) {
-        cur_offset += try sema.typeAbiSize(load_ty.childType(zcu)) * array_offset;
+        cur_offset += try load_ty.childType(zcu).abiSizeSema(pt) * array_offset;
     }
 
-    const need_bytes = if (host_bits > 0) (host_bits + 7) / 8 else try sema.typeAbiSize(load_ty);
+    const need_bytes = if (host_bits > 0) (host_bits + 7) / 8 else try load_ty.abiSizeSema(pt);
 
-    if (cur_offset + need_bytes > try sema.typeAbiSize(cur_val.typeOf(zcu))) {
+    if (cur_offset + need_bytes > try cur_val.typeOf(zcu).abiSizeSema(pt)) {
         return .{ .out_of_bounds = cur_val.typeOf(zcu) };
     }
 
@@ -434,7 +435,7 @@ fn loadComptimePtrInner(
             .Optional => break, // this can only be a pointer-like optional so is terminal
             .Array => {
                 const elem_ty = cur_ty.childType(zcu);
-                const elem_size = try sema.typeAbiSize(elem_ty);
+                const elem_size = try elem_ty.abiSizeSema(pt);
                 const elem_idx = cur_offset / elem_size;
                 const next_elem_off = elem_size * (elem_idx + 1);
                 if (cur_offset + need_bytes <= next_elem_off) {
@@ -449,8 +450,8 @@ fn loadComptimePtrInner(
                 .auto => unreachable, // ill-defined layout
                 .@"packed" => break, // let the bitcast logic handle this
                 .@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
-                    const start_off = cur_ty.structFieldOffset(field_idx, pt);
-                    const end_off = start_off + try sema.typeAbiSize(cur_ty.structFieldType(field_idx, zcu));
+                    const start_off = cur_ty.structFieldOffset(field_idx, zcu);
+                    const end_off = start_off + try cur_ty.structFieldType(field_idx, zcu).abiSizeSema(pt);
                     if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
                         cur_val = try cur_val.getElem(sema.pt, field_idx);
                         cur_offset -= start_off;
@@ -477,7 +478,7 @@ fn loadComptimePtrInner(
                     };
                     // The payload always has offset 0. If it's big enough
                     // to represent the whole load type, we can use it.
-                    if (try sema.typeAbiSize(payload.typeOf(zcu)) >= need_bytes) {
+                    if (try payload.typeOf(zcu).abiSizeSema(pt) >= need_bytes) {
                         cur_val = payload;
                     } else {
                         break;
@@ -746,8 +747,8 @@ fn prepareComptimePtrStore(
 
         const store_one_ty, const store_count = store_ty.arrayBase(zcu);
         const extra_base_index: u64 = if (ptr.byte_offset == 0) 0 else idx: {
-            if (try sema.typeRequiresComptime(store_one_ty)) break :restructure_array;
-            const elem_len = try sema.typeAbiSize(store_one_ty);
+            if (try store_one_ty.comptimeOnlySema(pt)) break :restructure_array;
+            const elem_len = try store_one_ty.abiSizeSema(pt);
             if (ptr.byte_offset % elem_len != 0) break :restructure_array;
             break :idx @divExact(ptr.byte_offset, elem_len);
         };
@@ -800,11 +801,11 @@ fn prepareComptimePtrStore(
     var cur_val: *MutableValue, var cur_offset: u64 = switch (base_strat) {
         .direct => |direct| .{ direct.val, 0 },
         // It's okay to do `abiSize` - the comptime-only case will be caught below.
-        .index => |index| .{ index.val, index.elem_index * try sema.typeAbiSize(index.val.typeOf(zcu).childType(zcu)) },
+        .index => |index| .{ index.val, index.elem_index * try index.val.typeOf(zcu).childType(zcu).abiSizeSema(pt) },
         .flat_index => |flat_index| .{
             flat_index.val,
             // It's okay to do `abiSize` - the comptime-only case will be caught below.
-            flat_index.flat_elem_index * try sema.typeAbiSize(flat_index.val.typeOf(zcu).arrayBase(zcu)[0]),
+            flat_index.flat_elem_index * try flat_index.val.typeOf(zcu).arrayBase(zcu)[0].abiSizeSema(pt),
         },
         .reinterpret => |r| .{ r.val, r.byte_offset },
         else => unreachable,
@@ -816,12 +817,12 @@ fn prepareComptimePtrStore(
     }
 
     if (store_ty.zigTypeTag(zcu) == .Array and array_offset > 0) {
-        cur_offset += try sema.typeAbiSize(store_ty.childType(zcu)) * array_offset;
+        cur_offset += try store_ty.childType(zcu).abiSizeSema(pt) * array_offset;
     }
 
-    const need_bytes = try sema.typeAbiSize(store_ty);
+    const need_bytes = try store_ty.abiSizeSema(pt);
 
-    if (cur_offset + need_bytes > try sema.typeAbiSize(cur_val.typeOf(zcu))) {
+    if (cur_offset + need_bytes > try cur_val.typeOf(zcu).abiSizeSema(pt)) {
         return .{ .out_of_bounds = cur_val.typeOf(zcu) };
     }
 
@@ -856,7 +857,7 @@ fn prepareComptimePtrStore(
             .Optional => break, // this can only be a pointer-like optional so is terminal
             .Array => {
                 const elem_ty = cur_ty.childType(zcu);
-                const elem_size = try sema.typeAbiSize(elem_ty);
+                const elem_size = try elem_ty.abiSizeSema(pt);
                 const elem_idx = cur_offset / elem_size;
                 const next_elem_off = elem_size * (elem_idx + 1);
                 if (cur_offset + need_bytes <= next_elem_off) {
@@ -871,8 +872,8 @@ fn prepareComptimePtrStore(
                 .auto => unreachable, // ill-defined layout
                 .@"packed" => break, // let the bitcast logic handle this
                 .@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
-                    const start_off = cur_ty.structFieldOffset(field_idx, pt);
-                    const end_off = start_off + try sema.typeAbiSize(cur_ty.structFieldType(field_idx, zcu));
+                    const start_off = cur_ty.structFieldOffset(field_idx, zcu);
+                    const end_off = start_off + try cur_ty.structFieldType(field_idx, zcu).abiSizeSema(pt);
                     if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
                         cur_val = try cur_val.elem(pt, sema.arena, field_idx);
                         cur_offset -= start_off;
@@ -895,7 +896,7 @@ fn prepareComptimePtrStore(
                     };
                     // The payload always has offset 0. If it's big enough
                     // to represent the whole load type, we can use it.
-                    if (try sema.typeAbiSize(payload.typeOf(zcu)) >= need_bytes) {
+                    if (try payload.typeOf(zcu).abiSizeSema(pt) >= need_bytes) {
                         cur_val = payload;
                     } else {
                         break;
src/Zcu/PerThread.zig
@@ -2756,7 +2756,7 @@ pub fn ptrType(pt: Zcu.PerThread, info: InternPool.Key.PtrType) Allocator.Error!
     // pointee type needs to be resolved more, that needs to be done before calling
     // this ptr() function.
     if (info.flags.alignment != .none and
-        info.flags.alignment == Type.fromInterned(info.child).abiAlignment(pt))
+        info.flags.alignment == Type.fromInterned(info.child).abiAlignment(pt.zcu))
     {
         canon_info.flags.alignment = .none;
     }
@@ -2766,7 +2766,7 @@ pub fn ptrType(pt: Zcu.PerThread, info: InternPool.Key.PtrType) Allocator.Error!
         // we change it to 0 here. If this causes an assertion trip, the pointee type
         // needs to be resolved before calling this ptr() function.
         .none => if (info.packed_offset.host_size != 0) {
-            const elem_bit_size = Type.fromInterned(info.child).bitSize(pt);
+            const elem_bit_size = Type.fromInterned(info.child).bitSize(pt.zcu);
             assert(info.packed_offset.bit_offset + elem_bit_size <= info.packed_offset.host_size * 8);
             if (info.packed_offset.host_size * 8 == elem_bit_size) {
                 canon_info.packed_offset.host_size = 0;
@@ -2784,7 +2784,7 @@ pub fn ptrType(pt: Zcu.PerThread, info: InternPool.Key.PtrType) Allocator.Error!
 /// In general, prefer this function during semantic analysis.
 pub fn ptrTypeSema(pt: Zcu.PerThread, info: InternPool.Key.PtrType) Zcu.SemaError!Type {
     if (info.flags.alignment != .none) {
-        _ = try Type.fromInterned(info.child).abiAlignmentAdvanced(pt, .sema);
+        _ = try Type.fromInterned(info.child).abiAlignmentSema(pt);
     }
     return pt.ptrType(info);
 }
@@ -2984,15 +2984,15 @@ pub fn smallestUnsignedInt(pt: Zcu.PerThread, max: u64) Allocator.Error!Type {
 /// `max`. Asserts that neither value is undef.
 /// TODO: if #3806 is implemented, this becomes trivial
 pub fn intFittingRange(pt: Zcu.PerThread, min: Value, max: Value) !Type {
-    const mod = pt.zcu;
-    assert(!min.isUndef(mod));
-    assert(!max.isUndef(mod));
+    const zcu = pt.zcu;
+    assert(!min.isUndef(zcu));
+    assert(!max.isUndef(zcu));
 
     if (std.debug.runtime_safety) {
-        assert(Value.order(min, max, pt).compare(.lte));
+        assert(Value.order(min, max, zcu).compare(.lte));
     }
 
-    const sign = min.orderAgainstZero(pt) == .lt;
+    const sign = min.orderAgainstZero(zcu) == .lt;
 
     const min_val_bits = pt.intBitsForValue(min, sign);
     const max_val_bits = pt.intBitsForValue(max, sign);
@@ -3032,120 +3032,30 @@ pub fn intBitsForValue(pt: Zcu.PerThread, val: Value, sign: bool) u16 {
             return @as(u16, @intCast(big.bitCountTwosComp()));
         },
         .lazy_align => |lazy_ty| {
-            return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiAlignment(pt).toByteUnits() orelse 0) + @intFromBool(sign);
+            return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiAlignment(pt.zcu).toByteUnits() orelse 0) + @intFromBool(sign);
         },
         .lazy_size => |lazy_ty| {
-            return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiSize(pt)) + @intFromBool(sign);
+            return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiSize(pt.zcu)) + @intFromBool(sign);
         },
     }
 }
 
-pub fn getUnionLayout(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionType) Zcu.UnionLayout {
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    assert(loaded_union.haveLayout(ip));
-    var most_aligned_field: u32 = undefined;
-    var most_aligned_field_size: u64 = undefined;
-    var biggest_field: u32 = undefined;
-    var payload_size: u64 = 0;
-    var payload_align: InternPool.Alignment = .@"1";
-    for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| {
-        if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue;
-
-        const explicit_align = loaded_union.fieldAlign(ip, field_index);
-        const field_align = if (explicit_align != .none)
-            explicit_align
-        else
-            Type.fromInterned(field_ty).abiAlignment(pt);
-        const field_size = Type.fromInterned(field_ty).abiSize(pt);
-        if (field_size > payload_size) {
-            payload_size = field_size;
-            biggest_field = @intCast(field_index);
-        }
-        if (field_align.compare(.gte, payload_align)) {
-            payload_align = field_align;
-            most_aligned_field = @intCast(field_index);
-            most_aligned_field_size = field_size;
-        }
-    }
-    const have_tag = loaded_union.flagsUnordered(ip).runtime_tag.hasTag();
-    if (!have_tag or !Type.fromInterned(loaded_union.enum_tag_ty).hasRuntimeBits(pt)) {
-        return .{
-            .abi_size = payload_align.forward(payload_size),
-            .abi_align = payload_align,
-            .most_aligned_field = most_aligned_field,
-            .most_aligned_field_size = most_aligned_field_size,
-            .biggest_field = biggest_field,
-            .payload_size = payload_size,
-            .payload_align = payload_align,
-            .tag_align = .none,
-            .tag_size = 0,
-            .padding = 0,
-        };
-    }
-
-    const tag_size = Type.fromInterned(loaded_union.enum_tag_ty).abiSize(pt);
-    const tag_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(pt).max(.@"1");
-    return .{
-        .abi_size = loaded_union.sizeUnordered(ip),
-        .abi_align = tag_align.max(payload_align),
-        .most_aligned_field = most_aligned_field,
-        .most_aligned_field_size = most_aligned_field_size,
-        .biggest_field = biggest_field,
-        .payload_size = payload_size,
-        .payload_align = payload_align,
-        .tag_align = tag_align,
-        .tag_size = tag_size,
-        .padding = loaded_union.paddingUnordered(ip),
-    };
-}
-
-pub fn unionAbiSize(mod: *Module, loaded_union: InternPool.LoadedUnionType) u64 {
-    return mod.getUnionLayout(loaded_union).abi_size;
-}
-
 /// Returns 0 if the union is represented with 0 bits at runtime.
 pub fn unionAbiAlignment(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionType) InternPool.Alignment {
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag();
     var max_align: InternPool.Alignment = .none;
-    if (have_tag) max_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(pt);
+    if (have_tag) max_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(zcu);
     for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| {
-        if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
+        if (!Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
 
-        const field_align = mod.unionFieldNormalAlignment(loaded_union, @intCast(field_index));
+        const field_align = zcu.unionFieldNormalAlignment(loaded_union, @intCast(field_index));
         max_align = max_align.max(field_align);
     }
     return max_align;
 }
 
-/// Returns the field alignment of a non-packed union. Asserts the layout is not packed.
-pub fn unionFieldNormalAlignment(
-    pt: Zcu.PerThread,
-    loaded_union: InternPool.LoadedUnionType,
-    field_index: u32,
-) InternPool.Alignment {
-    return pt.unionFieldNormalAlignmentAdvanced(loaded_union, field_index, .normal) catch unreachable;
-}
-
-/// Returns the field alignment of a non-packed union. Asserts the layout is not packed.
-/// If `strat` is `.sema`, may perform type resolution.
-pub fn unionFieldNormalAlignmentAdvanced(
-    pt: Zcu.PerThread,
-    loaded_union: InternPool.LoadedUnionType,
-    field_index: u32,
-    comptime strat: Type.ResolveStrat,
-) Zcu.SemaError!InternPool.Alignment {
-    const ip = &pt.zcu.intern_pool;
-    assert(loaded_union.flagsUnordered(ip).layout != .@"packed");
-    const field_align = loaded_union.fieldAlign(ip, field_index);
-    if (field_align != .none) return field_align;
-    const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
-    if (field_ty.isNoReturn(pt.zcu)) return .none;
-    return (try field_ty.abiAlignmentAdvanced(pt, strat.toLazy())).scalar;
-}
-
 /// Returns the field alignment of a non-packed struct. Asserts the layout is not packed.
 pub fn structFieldAlignment(
     pt: Zcu.PerThread,
@@ -3153,31 +3063,13 @@ pub fn structFieldAlignment(
     field_ty: Type,
     layout: std.builtin.Type.ContainerLayout,
 ) InternPool.Alignment {
-    return pt.structFieldAlignmentAdvanced(explicit_alignment, field_ty, layout, .normal) catch unreachable;
-}
-
-/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed.
-/// If `strat` is `.sema`, may perform type resolution.
-pub fn structFieldAlignmentAdvanced(
-    pt: Zcu.PerThread,
-    explicit_alignment: InternPool.Alignment,
-    field_ty: Type,
-    layout: std.builtin.Type.ContainerLayout,
-    comptime strat: Type.ResolveStrat,
-) Zcu.SemaError!InternPool.Alignment {
-    assert(layout != .@"packed");
-    if (explicit_alignment != .none) return explicit_alignment;
-    const ty_abi_align = (try field_ty.abiAlignmentAdvanced(pt, strat.toLazy())).scalar;
-    switch (layout) {
-        .@"packed" => unreachable,
-        .auto => if (pt.zcu.getTarget().ofmt != .c) return ty_abi_align,
-        .@"extern" => {},
-    }
-    // extern
-    if (field_ty.isAbiInt(pt.zcu) and field_ty.intInfo(pt.zcu).bits >= 128) {
-        return ty_abi_align.maxStrict(.@"16");
-    }
-    return ty_abi_align;
+    return field_ty.structFieldAlignmentAdvanced(
+        explicit_alignment,
+        layout,
+        .normal,
+        pt.zcu,
+        {},
+    ) catch unreachable;
 }
 
 /// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets
@@ -3189,8 +3081,8 @@ pub fn structPackedFieldBitOffset(
     struct_type: InternPool.LoadedStructType,
     field_index: u32,
 ) u16 {
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     assert(struct_type.layout == .@"packed");
     assert(struct_type.haveLayout(ip));
     var bit_sum: u64 = 0;
@@ -3199,7 +3091,7 @@ pub fn structPackedFieldBitOffset(
             return @intCast(bit_sum);
         }
         const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
-        bit_sum += field_ty.bitSize(pt);
+        bit_sum += field_ty.bitSize(zcu);
     }
     unreachable; // index out of bounds
 }
@@ -3244,7 +3136,7 @@ pub fn navPtrType(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) Allocator.
     return pt.ptrType(.{
         .child = ty.toIntern(),
         .flags = .{
-            .alignment = if (r.alignment == ty.abiAlignment(pt))
+            .alignment = if (r.alignment == ty.abiAlignment(zcu))
                 .none
             else
                 r.alignment,
@@ -3274,7 +3166,7 @@ pub fn navAlignment(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) InternPo
     const zcu = pt.zcu;
     const r = zcu.intern_pool.getNav(nav_index).status.resolved;
     if (r.alignment != .none) return r.alignment;
-    return Value.fromInterned(r.val).typeOf(zcu).abiAlignment(pt);
+    return Value.fromInterned(r.val).typeOf(zcu).abiAlignment(zcu);
 }
 
 /// Given a container type requiring resolution, ensures that it is up-to-date.
src/codegen.zig
@@ -198,17 +198,17 @@ pub fn generateSymbol(
     const tracy = trace(@src());
     defer tracy.end();
 
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    const ty = val.typeOf(mod);
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    const ty = val.typeOf(zcu);
 
-    const target = mod.getTarget();
+    const target = zcu.getTarget();
     const endian = target.cpu.arch.endian();
 
     log.debug("generateSymbol: val = {}", .{val.fmtValue(pt)});
 
-    if (val.isUndefDeep(mod)) {
-        const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
+    if (val.isUndefDeep(zcu)) {
+        const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
         try code.appendNTimes(0xaa, abi_size);
         return .ok;
     }
@@ -254,9 +254,9 @@ pub fn generateSymbol(
         .empty_enum_value,
         => unreachable, // non-runtime values
         .int => {
-            const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
+            const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
             var space: Value.BigIntSpace = undefined;
-            const int_val = val.toBigInt(&space, pt);
+            const int_val = val.toBigInt(&space, zcu);
             int_val.writeTwosComplement(try code.addManyAsSlice(abi_size), endian);
         },
         .err => |err| {
@@ -264,20 +264,20 @@ pub fn generateSymbol(
             try code.writer().writeInt(u16, @intCast(int), endian);
         },
         .error_union => |error_union| {
-            const payload_ty = ty.errorUnionPayload(mod);
+            const payload_ty = ty.errorUnionPayload(zcu);
             const err_val: u16 = switch (error_union.val) {
                 .err_name => |err_name| @intCast(try pt.getErrorValue(err_name)),
                 .payload => 0,
             };
 
-            if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+            if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
                 try code.writer().writeInt(u16, err_val, endian);
                 return .ok;
             }
 
-            const payload_align = payload_ty.abiAlignment(pt);
-            const error_align = Type.anyerror.abiAlignment(pt);
-            const abi_align = ty.abiAlignment(pt);
+            const payload_align = payload_ty.abiAlignment(zcu);
+            const error_align = Type.anyerror.abiAlignment(zcu);
+            const abi_align = ty.abiAlignment(zcu);
 
             // error value first when its type is larger than the error union's payload
             if (error_align.order(payload_align) == .gt) {
@@ -317,7 +317,7 @@ pub fn generateSymbol(
             }
         },
         .enum_tag => |enum_tag| {
-            const int_tag_ty = ty.intTagType(mod);
+            const int_tag_ty = ty.intTagType(zcu);
             switch (try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, debug_output, reloc_info)) {
                 .ok => {},
                 .fail => |em| return .{ .fail = em },
@@ -329,7 +329,7 @@ pub fn generateSymbol(
             .f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(8)),
             .f80 => |f80_val| {
                 writeFloat(f80, f80_val, target, endian, try code.addManyAsArray(10));
-                const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
+                const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
                 try code.appendNTimes(0, abi_size - 10);
             },
             .f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(16)),
@@ -349,11 +349,11 @@ pub fn generateSymbol(
             }
         },
         .opt => {
-            const payload_type = ty.optionalChild(mod);
-            const payload_val = val.optionalValue(mod);
-            const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
+            const payload_type = ty.optionalChild(zcu);
+            const payload_val = val.optionalValue(zcu);
+            const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
 
-            if (ty.optionalReprIsPayload(mod)) {
+            if (ty.optionalReprIsPayload(zcu)) {
                 if (payload_val) |value| {
                     switch (try generateSymbol(bin_file, pt, src_loc, value, code, debug_output, reloc_info)) {
                         .ok => {},
@@ -363,8 +363,8 @@ pub fn generateSymbol(
                     try code.appendNTimes(0, abi_size);
                 }
             } else {
-                const padding = abi_size - (math.cast(usize, payload_type.abiSize(pt)) orelse return error.Overflow) - 1;
-                if (payload_type.hasRuntimeBits(pt)) {
+                const padding = abi_size - (math.cast(usize, payload_type.abiSize(zcu)) orelse return error.Overflow) - 1;
+                if (payload_type.hasRuntimeBits(zcu)) {
                     const value = payload_val orelse Value.fromInterned(try pt.intern(.{
                         .undef = payload_type.toIntern(),
                     }));
@@ -398,7 +398,7 @@ pub fn generateSymbol(
                 },
             },
             .vector_type => |vector_type| {
-                const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
+                const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
                 if (vector_type.child == .bool_type) {
                     const bytes = try code.addManyAsSlice(abi_size);
                     @memset(bytes, 0xaa);
@@ -458,7 +458,7 @@ pub fn generateSymbol(
                     }
 
                     const padding = abi_size -
-                        (math.cast(usize, Type.fromInterned(vector_type.child).abiSize(pt) * vector_type.len) orelse
+                        (math.cast(usize, Type.fromInterned(vector_type.child).abiSize(zcu) * vector_type.len) orelse
                         return error.Overflow);
                     if (padding > 0) try code.appendNTimes(0, padding);
                 }
@@ -471,7 +471,7 @@ pub fn generateSymbol(
                     0..,
                 ) |field_ty, comptime_val, index| {
                     if (comptime_val != .none) continue;
-                    if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
+                    if (!Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
 
                     const field_val = switch (aggregate.storage) {
                         .bytes => |bytes| try pt.intern(.{ .int = .{
@@ -489,7 +489,7 @@ pub fn generateSymbol(
                     const unpadded_field_end = code.items.len - struct_begin;
 
                     // Pad struct members if required
-                    const padded_field_end = ty.structFieldOffset(index + 1, pt);
+                    const padded_field_end = ty.structFieldOffset(index + 1, zcu);
                     const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse
                         return error.Overflow;
 
@@ -502,7 +502,7 @@ pub fn generateSymbol(
                 const struct_type = ip.loadStructType(ty.toIntern());
                 switch (struct_type.layout) {
                     .@"packed" => {
-                        const abi_size = math.cast(usize, ty.abiSize(pt)) orelse return error.Overflow;
+                        const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
                         const current_pos = code.items.len;
                         try code.appendNTimes(0, abi_size);
                         var bits: u16 = 0;
@@ -519,8 +519,8 @@ pub fn generateSymbol(
 
                             // pointer may point to a decl which must be marked used
                             // but can also result in a relocation. Therefore we handle those separately.
-                            if (Type.fromInterned(field_ty).zigTypeTag(mod) == .Pointer) {
-                                const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(pt)) orelse
+                            if (Type.fromInterned(field_ty).zigTypeTag(zcu) == .Pointer) {
+                                const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(zcu)) orelse
                                     return error.Overflow;
                                 var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size);
                                 defer tmp_list.deinit();
@@ -531,7 +531,7 @@ pub fn generateSymbol(
                             } else {
                                 Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), pt, code.items[current_pos..], bits) catch unreachable;
                             }
-                            bits += @intCast(Type.fromInterned(field_ty).bitSize(pt));
+                            bits += @intCast(Type.fromInterned(field_ty).bitSize(zcu));
                         }
                     },
                     .auto, .@"extern" => {
@@ -542,7 +542,7 @@ pub fn generateSymbol(
                         var it = struct_type.iterateRuntimeOrder(ip);
                         while (it.next()) |field_index| {
                             const field_ty = field_types[field_index];
-                            if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
+                            if (!Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
 
                             const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
                                 .bytes => |bytes| try pt.intern(.{ .int = .{
@@ -580,7 +580,7 @@ pub fn generateSymbol(
             else => unreachable,
         },
         .un => |un| {
-            const layout = ty.unionGetLayout(pt);
+            const layout = ty.unionGetLayout(zcu);
 
             if (layout.payload_size == 0) {
                 return generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info);
@@ -594,11 +594,11 @@ pub fn generateSymbol(
                 }
             }
 
-            const union_obj = mod.typeToUnion(ty).?;
+            const union_obj = zcu.typeToUnion(ty).?;
             if (un.tag != .none) {
-                const field_index = ty.unionTagFieldIndex(Value.fromInterned(un.tag), mod).?;
+                const field_index = ty.unionTagFieldIndex(Value.fromInterned(un.tag), zcu).?;
                 const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
-                if (!field_ty.hasRuntimeBits(pt)) {
+                if (!field_ty.hasRuntimeBits(zcu)) {
                     try code.appendNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow);
                 } else {
                     switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, debug_output, reloc_info)) {
@@ -606,7 +606,7 @@ pub fn generateSymbol(
                         .fail => |em| return Result{ .fail = em },
                     }
 
-                    const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(pt)) orelse return error.Overflow;
+                    const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(zcu)) orelse return error.Overflow;
                     if (padding > 0) {
                         try code.appendNTimes(0, padding);
                     }
@@ -661,7 +661,7 @@ fn lowerPtr(
             reloc_info,
             offset + errUnionPayloadOffset(
                 Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu).errorUnionPayload(zcu),
-                pt,
+                zcu,
             ),
         ),
         .opt_payload => |opt_ptr| try lowerPtr(
@@ -687,7 +687,7 @@ fn lowerPtr(
                     };
                 },
                 .Struct, .Union => switch (base_ty.containerLayout(zcu)) {
-                    .auto => base_ty.structFieldOffset(@intCast(field.index), pt),
+                    .auto => base_ty.structFieldOffset(@intCast(field.index), zcu),
                     .@"extern", .@"packed" => unreachable,
                 },
                 else => unreachable,
@@ -713,15 +713,16 @@ fn lowerUavRef(
     offset: u64,
 ) CodeGenError!Result {
     _ = debug_output;
-    const ip = &pt.zcu.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const target = lf.comp.root_mod.resolved_target.result;
 
     const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8);
     const uav_val = uav.val;
     const uav_ty = Type.fromInterned(ip.typeOf(uav_val));
     log.debug("lowerUavRef: ty = {}", .{uav_ty.fmt(pt)});
-    const is_fn_body = uav_ty.zigTypeTag(pt.zcu) == .Fn;
-    if (!is_fn_body and !uav_ty.hasRuntimeBits(pt)) {
+    const is_fn_body = uav_ty.zigTypeTag(zcu) == .Fn;
+    if (!is_fn_body and !uav_ty.hasRuntimeBits(zcu)) {
         try code.appendNTimes(0xaa, ptr_width_bytes);
         return Result.ok;
     }
@@ -768,7 +769,7 @@ fn lowerNavRef(
     const ptr_width = target.ptrBitWidth();
     const nav_ty = Type.fromInterned(ip.getNav(nav_index).typeOf(ip));
     const is_fn_body = nav_ty.zigTypeTag(zcu) == .Fn;
-    if (!is_fn_body and !nav_ty.hasRuntimeBits(pt)) {
+    if (!is_fn_body and !nav_ty.hasRuntimeBits(zcu)) {
         try code.appendNTimes(0xaa, @divExact(ptr_width, 8));
         return Result.ok;
     }
@@ -860,7 +861,7 @@ fn genNavRef(
     const ty = val.typeOf(zcu);
     log.debug("genNavRef: val = {}", .{val.fmtValue(pt)});
 
-    if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
+    if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
         const imm: u64 = switch (@divExact(target.ptrBitWidth(), 8)) {
             1 => 0xaa,
             2 => 0xaaaa,
@@ -994,8 +995,8 @@ pub fn genTypedValue(
             const info = ty.intInfo(zcu);
             if (info.bits <= target.ptrBitWidth()) {
                 const unsigned: u64 = switch (info.signedness) {
-                    .signed => @bitCast(val.toSignedInt(pt)),
-                    .unsigned => val.toUnsignedInt(pt),
+                    .signed => @bitCast(val.toSignedInt(zcu)),
+                    .unsigned => val.toUnsignedInt(zcu),
                 };
                 return .{ .mcv = .{ .immediate = unsigned } };
             }
@@ -1012,7 +1013,7 @@ pub fn genTypedValue(
                     val.optionalValue(zcu) orelse return .{ .mcv = .{ .immediate = 0 } },
                     target,
                 );
-            } else if (ty.abiSize(pt) == 1) {
+            } else if (ty.abiSize(zcu) == 1) {
                 return .{ .mcv = .{ .immediate = @intFromBool(!val.isNull(zcu)) } };
             }
         },
@@ -1034,7 +1035,7 @@ pub fn genTypedValue(
         .ErrorUnion => {
             const err_type = ty.errorUnionSet(zcu);
             const payload_type = ty.errorUnionPayload(zcu);
-            if (!payload_type.hasRuntimeBitsIgnoreComptime(pt)) {
+            if (!payload_type.hasRuntimeBitsIgnoreComptime(zcu)) {
                 // We use the error type directly as the type.
                 const err_int_ty = try pt.errorIntType();
                 switch (ip.indexToKey(val.toIntern()).error_union.val) {
@@ -1074,23 +1075,23 @@ pub fn genTypedValue(
     return lf.lowerUav(pt, val.toIntern(), .none, src_loc);
 }
 
-pub fn errUnionPayloadOffset(payload_ty: Type, pt: Zcu.PerThread) u64 {
-    if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return 0;
-    const payload_align = payload_ty.abiAlignment(pt);
-    const error_align = Type.anyerror.abiAlignment(pt);
-    if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+pub fn errUnionPayloadOffset(payload_ty: Type, zcu: *Zcu) u64 {
+    if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return 0;
+    const payload_align = payload_ty.abiAlignment(zcu);
+    const error_align = Type.anyerror.abiAlignment(zcu);
+    if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
         return 0;
     } else {
-        return payload_align.forward(Type.anyerror.abiSize(pt));
+        return payload_align.forward(Type.anyerror.abiSize(zcu));
     }
 }
 
-pub fn errUnionErrorOffset(payload_ty: Type, pt: Zcu.PerThread) u64 {
-    if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return 0;
-    const payload_align = payload_ty.abiAlignment(pt);
-    const error_align = Type.anyerror.abiAlignment(pt);
-    if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
-        return error_align.forward(payload_ty.abiSize(pt));
+pub fn errUnionErrorOffset(payload_ty: Type, zcu: *Zcu) u64 {
+    if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return 0;
+    const payload_align = payload_ty.abiAlignment(zcu);
+    const error_align = Type.anyerror.abiAlignment(zcu);
+    if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+        return error_align.forward(payload_ty.abiSize(zcu));
     } else {
         return 0;
     }
src/InternPool.zig
@@ -3483,7 +3483,7 @@ pub const LoadedStructType = struct {
         return s.field_aligns.get(ip)[i];
     }
 
-    pub fn fieldInit(s: LoadedStructType, ip: *InternPool, i: usize) Index {
+    pub fn fieldInit(s: LoadedStructType, ip: *const InternPool, i: usize) Index {
         if (s.field_inits.len == 0) return .none;
         assert(s.haveFieldInits(ip));
         return s.field_inits.get(ip)[i];
@@ -11066,7 +11066,7 @@ pub fn destroyNamespace(
     local.mutate.namespaces.free_list = @intFromEnum(namespace_index);
 }
 
-pub fn filePtr(ip: *InternPool, file_index: FileIndex) *Zcu.File {
+pub fn filePtr(ip: *const InternPool, file_index: FileIndex) *Zcu.File {
     const file_index_unwrapped = file_index.unwrap(ip);
     const files = ip.getLocalShared(file_index_unwrapped.tid).files.acquire();
     return files.view().items(.file)[file_index_unwrapped.index];
src/mutable_value.zig
@@ -369,7 +369,7 @@ pub const MutableValue = union(enum) {
             .bytes => |b| {
                 assert(is_trivial_int);
                 assert(field_val.typeOf(zcu).toIntern() == .u8_type);
-                b.data[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(pt));
+                b.data[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(zcu));
             },
             .repeated => |r| {
                 if (field_val.eqlTrivial(r.child.*)) return;
@@ -382,9 +382,9 @@ pub const MutableValue = union(enum) {
                 {
                     // We can use the `bytes` representation.
                     const bytes = try arena.alloc(u8, @intCast(len_inc_sent));
-                    const repeated_byte = Value.fromInterned(r.child.interned).toUnsignedInt(pt);
+                    const repeated_byte = Value.fromInterned(r.child.interned).toUnsignedInt(zcu);
                     @memset(bytes, @intCast(repeated_byte));
-                    bytes[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(pt));
+                    bytes[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(zcu));
                     mv.* = .{ .bytes = .{
                         .ty = r.ty,
                         .data = bytes,
@@ -431,7 +431,7 @@ pub const MutableValue = union(enum) {
                     } else {
                         const bytes = try arena.alloc(u8, a.elems.len);
                         for (a.elems, bytes) |elem_val, *b| {
-                            b.* = @intCast(Value.fromInterned(elem_val.interned).toUnsignedInt(pt));
+                            b.* = @intCast(Value.fromInterned(elem_val.interned).toUnsignedInt(zcu));
                         }
                         mv.* = .{ .bytes = .{
                             .ty = a.ty,
src/print_value.zig
@@ -95,11 +95,11 @@ pub fn print(
         .int => |int| switch (int.storage) {
             inline .u64, .i64, .big_int => |x| try writer.print("{}", .{x}),
             .lazy_align => |ty| if (have_sema) {
-                const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(pt, .sema)).scalar;
+                const a = try Type.fromInterned(ty).abiAlignmentSema(pt);
                 try writer.print("{}", .{a.toByteUnits() orelse 0});
             } else try writer.print("@alignOf({})", .{Type.fromInterned(ty).fmt(pt)}),
             .lazy_size => |ty| if (have_sema) {
-                const s = (try Type.fromInterned(ty).abiSizeAdvanced(pt, .sema)).scalar;
+                const s = try Type.fromInterned(ty).abiSizeSema(pt);
                 try writer.print("{}", .{s});
             } else try writer.print("@sizeOf({})", .{Type.fromInterned(ty).fmt(pt)}),
         },
@@ -245,7 +245,7 @@ fn printAggregate(
                     if (ty.childType(zcu).toIntern() != .u8_type) break :one_byte_str;
                     const elem_val = Value.fromInterned(aggregate.storage.values()[0]);
                     if (elem_val.isUndef(zcu)) break :one_byte_str;
-                    const byte = elem_val.toUnsignedInt(pt);
+                    const byte = elem_val.toUnsignedInt(zcu);
                     try writer.print("\"{}\"", .{std.zig.fmtEscapes(&.{@intCast(byte)})});
                     if (!is_ref) try writer.writeAll(".*");
                     return;
src/RangeSet.zig
@@ -9,7 +9,7 @@ const Zcu = @import("Zcu.zig");
 const RangeSet = @This();
 const LazySrcLoc = Zcu.LazySrcLoc;
 
-pt: Zcu.PerThread,
+zcu: *Zcu,
 ranges: std.ArrayList(Range),
 
 pub const Range = struct {
@@ -18,9 +18,9 @@ pub const Range = struct {
     src: LazySrcLoc,
 };
 
-pub fn init(allocator: std.mem.Allocator, pt: Zcu.PerThread) RangeSet {
+pub fn init(allocator: std.mem.Allocator, zcu: *Zcu) RangeSet {
     return .{
-        .pt = pt,
+        .zcu = zcu,
         .ranges = std.ArrayList(Range).init(allocator),
     };
 }
@@ -35,8 +35,8 @@ pub fn add(
     last: InternPool.Index,
     src: LazySrcLoc,
 ) !?LazySrcLoc {
-    const pt = self.pt;
-    const ip = &pt.zcu.intern_pool;
+    const zcu = self.zcu;
+    const ip = &zcu.intern_pool;
 
     const ty = ip.typeOf(first);
     assert(ty == ip.typeOf(last));
@@ -45,8 +45,8 @@ pub fn add(
         assert(ty == ip.typeOf(range.first));
         assert(ty == ip.typeOf(range.last));
 
-        if (Value.fromInterned(last).compareScalar(.gte, Value.fromInterned(range.first), Type.fromInterned(ty), pt) and
-            Value.fromInterned(first).compareScalar(.lte, Value.fromInterned(range.last), Type.fromInterned(ty), pt))
+        if (Value.fromInterned(last).compareScalar(.gte, Value.fromInterned(range.first), Type.fromInterned(ty), zcu) and
+            Value.fromInterned(first).compareScalar(.lte, Value.fromInterned(range.last), Type.fromInterned(ty), zcu))
         {
             return range.src; // They overlap.
         }
@@ -61,20 +61,20 @@ pub fn add(
 }
 
 /// Assumes a and b do not overlap
-fn lessThan(pt: Zcu.PerThread, a: Range, b: Range) bool {
-    const ty = Type.fromInterned(pt.zcu.intern_pool.typeOf(a.first));
-    return Value.fromInterned(a.first).compareScalar(.lt, Value.fromInterned(b.first), ty, pt);
+fn lessThan(zcu: *Zcu, a: Range, b: Range) bool {
+    const ty = Type.fromInterned(zcu.intern_pool.typeOf(a.first));
+    return Value.fromInterned(a.first).compareScalar(.lt, Value.fromInterned(b.first), ty, zcu);
 }
 
 pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !bool {
-    const pt = self.pt;
-    const ip = &pt.zcu.intern_pool;
+    const zcu = self.zcu;
+    const ip = &zcu.intern_pool;
     assert(ip.typeOf(first) == ip.typeOf(last));
 
     if (self.ranges.items.len == 0)
         return false;
 
-    std.mem.sort(Range, self.ranges.items, pt, lessThan);
+    std.mem.sort(Range, self.ranges.items, zcu, lessThan);
 
     if (self.ranges.items[0].first != first or
         self.ranges.items[self.ranges.items.len - 1].last != last)
@@ -93,10 +93,10 @@ pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !
         const prev = self.ranges.items[i];
 
         // prev.last + 1 == cur.first
-        try counter.copy(Value.fromInterned(prev.last).toBigInt(&space, pt));
+        try counter.copy(Value.fromInterned(prev.last).toBigInt(&space, zcu));
         try counter.addScalar(&counter, 1);
 
-        const cur_start_int = Value.fromInterned(cur.first).toBigInt(&space, pt);
+        const cur_start_int = Value.fromInterned(cur.first).toBigInt(&space, zcu);
         if (!cur_start_int.eql(counter.toConst())) {
             return false;
         }
src/Sema.zig
@@ -6,7 +6,7 @@
 //! This is the the heart of the Zig compiler.
 
 pt: Zcu.PerThread,
-/// Alias to `mod.gpa`.
+/// Alias to `zcu.gpa`.
 gpa: Allocator,
 /// Points to the temporary arena allocator of the Sema.
 /// This arena will be cleared when the sema is destroyed.
@@ -67,7 +67,7 @@ generic_call_src: LazySrcLoc = LazySrcLoc.unneeded,
 /// breaking from a block.
 post_hoc_blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *LabeledBlock) = .{},
 /// Populated with the last compile error created.
-err: ?*Module.ErrorMsg = null,
+err: ?*Zcu.ErrorMsg = null,
 /// Set to true when analyzing a func type instruction so that nested generic
 /// function types will emit generic poison instead of a partial type.
 no_partial_func_ty: bool = false,
@@ -172,11 +172,10 @@ const Type = @import("Type.zig");
 const Air = @import("Air.zig");
 const Zir = std.zig.Zir;
 const Zcu = @import("Zcu.zig");
-const Module = Zcu;
 const trace = @import("tracy.zig").trace;
-const Namespace = Module.Namespace;
-const CompileError = Module.CompileError;
-const SemaError = Module.SemaError;
+const Namespace = Zcu.Namespace;
+const CompileError = Zcu.CompileError;
+const SemaError = Zcu.SemaError;
 const LazySrcLoc = Zcu.LazySrcLoc;
 const RangeSet = @import("RangeSet.zig");
 const target_util = @import("target.zig");
@@ -431,7 +430,7 @@ pub const Block = struct {
             return_ty: Type,
         },
 
-        fn explain(cr: ComptimeReason, sema: *Sema, msg: ?*Module.ErrorMsg) !void {
+        fn explain(cr: ComptimeReason, sema: *Sema, msg: ?*Zcu.ErrorMsg) !void {
             const parent = msg orelse return;
             const pt = sema.pt;
             const prefix = "expression is evaluated at comptime because ";
@@ -733,12 +732,12 @@ pub const Block = struct {
     fn addCmpVector(block: *Block, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, cmp_op: std.math.CompareOperator) !Air.Inst.Ref {
         const sema = block.sema;
         const pt = sema.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         return block.addInst(.{
             .tag = if (block.float_mode == .optimized) .cmp_vector_optimized else .cmp_vector,
             .data = .{ .ty_pl = .{
                 .ty = Air.internedToRef((try pt.vectorType(.{
-                    .len = sema.typeOf(lhs).vectorLen(mod),
+                    .len = sema.typeOf(lhs).vectorLen(zcu),
                     .child = .bool_type,
                 })).toIntern()),
                 .payload = try sema.addExtra(Air.VectorCmp{
@@ -852,7 +851,7 @@ const LabeledBlock = struct {
 /// The value stored in the inferred allocation. This will go into
 /// peer type resolution. This is stored in a separate list so that
 /// the items are contiguous in memory and thus can be passed to
-/// `Module.resolvePeerTypes`.
+/// `Zcu.resolvePeerTypes`.
 const InferredAlloc = struct {
     /// The placeholder `store` instructions used before the result pointer type
     /// is known. These should be rewritten to perform any required coercions
@@ -1950,7 +1949,7 @@ fn resolveDestType(
     builtin_name: []const u8,
 ) !Type {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const remove_eu = switch (strat) {
         .remove_eu_opt, .remove_eu => true,
         .remove_opt => false,
@@ -1980,15 +1979,15 @@ fn resolveDestType(
         else => |e| return e,
     };
 
-    if (remove_eu and raw_ty.zigTypeTag(mod) == .ErrorUnion) {
-        const eu_child = raw_ty.errorUnionPayload(mod);
-        if (remove_opt and eu_child.zigTypeTag(mod) == .Optional) {
-            return eu_child.childType(mod);
+    if (remove_eu and raw_ty.zigTypeTag(zcu) == .ErrorUnion) {
+        const eu_child = raw_ty.errorUnionPayload(zcu);
+        if (remove_opt and eu_child.zigTypeTag(zcu) == .Optional) {
+            return eu_child.childType(zcu);
         }
         return eu_child;
     }
-    if (remove_opt and raw_ty.zigTypeTag(mod) == .Optional) {
-        return raw_ty.childType(mod);
+    if (remove_opt and raw_ty.zigTypeTag(zcu) == .Optional) {
+        return raw_ty.childType(zcu);
     }
     return raw_ty;
 }
@@ -2068,10 +2067,10 @@ fn analyzeAsType(
 
 pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) !void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const comp = mod.comp;
+    const zcu = pt.zcu;
+    const comp = zcu.comp;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
     if (!comp.config.any_error_tracing) return;
 
     assert(!block.is_comptime);
@@ -2140,9 +2139,9 @@ fn resolveDefinedValue(
     air_ref: Air.Inst.Ref,
 ) CompileError!?Value {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const val = try sema.resolveValue(air_ref) orelse return null;
-    if (val.isUndef(mod)) {
+    if (val.isUndef(zcu)) {
         return sema.failWithUseOfUndef(block, src);
     }
     return val;
@@ -2340,12 +2339,12 @@ fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty:
 
 fn failWithInvalidComptimeFieldStore(sema: *Sema, block: *Block, init_src: LazySrcLoc, container_ty: Type, field_index: usize) CompileError {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const msg = msg: {
         const msg = try sema.errMsg(init_src, "value stored in comptime field does not match the default value of the field", .{});
         errdefer msg.destroy(sema.gpa);
 
-        const struct_type = mod.typeToStruct(container_ty) orelse break :msg msg;
+        const struct_type = zcu.typeToStruct(container_ty) orelse break :msg msg;
         try sema.errNote(.{
             .base_node_inst = struct_type.zir_index.unwrap().?,
             .offset = .{ .container_field_value = @intCast(field_index) },
@@ -2372,12 +2371,12 @@ fn failWithInvalidFieldAccess(
     field_name: InternPool.NullTerminatedString,
 ) CompileError {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const inner_ty = if (object_ty.isSinglePointer(mod)) object_ty.childType(mod) else object_ty;
+    const zcu = pt.zcu;
+    const inner_ty = if (object_ty.isSinglePointer(zcu)) object_ty.childType(zcu) else object_ty;
 
-    if (inner_ty.zigTypeTag(mod) == .Optional) opt: {
-        const child_ty = inner_ty.optionalChild(mod);
-        if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :opt;
+    if (inner_ty.zigTypeTag(zcu) == .Optional) opt: {
+        const child_ty = inner_ty.optionalChild(zcu);
+        if (!typeSupportsFieldAccess(zcu, child_ty, field_name)) break :opt;
         const msg = msg: {
             const msg = try sema.errMsg(src, "optional type '{}' does not support field access", .{object_ty.fmt(pt)});
             errdefer msg.destroy(sema.gpa);
@@ -2385,9 +2384,9 @@ fn failWithInvalidFieldAccess(
             break :msg msg;
         };
         return sema.failWithOwnedErrorMsg(block, msg);
-    } else if (inner_ty.zigTypeTag(mod) == .ErrorUnion) err: {
-        const child_ty = inner_ty.errorUnionPayload(mod);
-        if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :err;
+    } else if (inner_ty.zigTypeTag(zcu) == .ErrorUnion) err: {
+        const child_ty = inner_ty.errorUnionPayload(zcu);
+        if (!typeSupportsFieldAccess(zcu, child_ty, field_name)) break :err;
         const msg = msg: {
             const msg = try sema.errMsg(src, "error union type '{}' does not support field access", .{object_ty.fmt(pt)});
             errdefer msg.destroy(sema.gpa);
@@ -2399,15 +2398,15 @@ fn failWithInvalidFieldAccess(
     return sema.fail(block, src, "type '{}' does not support field access", .{object_ty.fmt(pt)});
 }
 
-fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: InternPool.NullTerminatedString) bool {
-    const ip = &mod.intern_pool;
-    switch (ty.zigTypeTag(mod)) {
+fn typeSupportsFieldAccess(zcu: *const Zcu, ty: Type, field_name: InternPool.NullTerminatedString) bool {
+    const ip = &zcu.intern_pool;
+    switch (ty.zigTypeTag(zcu)) {
         .Array => return field_name.eqlSlice("len", ip),
         .Pointer => {
-            const ptr_info = ty.ptrInfo(mod);
+            const ptr_info = ty.ptrInfo(zcu);
             if (ptr_info.flags.size == .Slice) {
                 return field_name.eqlSlice("ptr", ip) or field_name.eqlSlice("len", ip);
-            } else if (Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Array) {
+            } else if (Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .Array) {
                 return field_name.eqlSlice("len", ip);
             } else return false;
         },
@@ -2423,9 +2422,9 @@ fn failWithComptimeErrorRetTrace(
     name: InternPool.NullTerminatedString,
 ) CompileError {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const msg = msg: {
-        const msg = try sema.errMsg(src, "caught unexpected error '{}'", .{name.fmt(&mod.intern_pool)});
+        const msg = try sema.errMsg(src, "caught unexpected error '{}'", .{name.fmt(&zcu.intern_pool)});
         errdefer msg.destroy(sema.gpa);
 
         for (sema.comptime_err_ret_trace.items) |src_loc| {
@@ -2451,7 +2450,7 @@ fn failWithInvalidPtrArithmetic(sema: *Sema, block: *Block, src: LazySrcLoc, ari
 pub fn errNote(
     sema: *Sema,
     src: LazySrcLoc,
-    parent: *Module.ErrorMsg,
+    parent: *Zcu.ErrorMsg,
     comptime format: []const u8,
     args: anytype,
 ) error{OutOfMemory}!void {
@@ -2462,7 +2461,7 @@ fn addFieldErrNote(
     sema: *Sema,
     container_ty: Type,
     field_index: usize,
-    parent: *Module.ErrorMsg,
+    parent: *Zcu.ErrorMsg,
     comptime format: []const u8,
     args: anytype,
 ) !void {
@@ -2480,9 +2479,9 @@ pub fn errMsg(
     src: LazySrcLoc,
     comptime format: []const u8,
     args: anytype,
-) Allocator.Error!*Module.ErrorMsg {
+) Allocator.Error!*Zcu.ErrorMsg {
     assert(src.offset != .unneeded);
-    return Module.ErrorMsg.create(sema.gpa, src, format, args);
+    return Zcu.ErrorMsg.create(sema.gpa, src, format, args);
 }
 
 pub fn fail(
@@ -2501,16 +2500,16 @@ pub fn fail(
     return sema.failWithOwnedErrorMsg(block, err_msg);
 }
 
-pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.ErrorMsg) error{ AnalysisFail, OutOfMemory } {
+pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Zcu.ErrorMsg) error{ AnalysisFail, OutOfMemory } {
     @setCold(true);
     const gpa = sema.gpa;
-    const mod = sema.pt.zcu;
+    const zcu = sema.pt.zcu;
 
-    if (build_options.enable_debug_extensions and mod.comp.debug_compile_errors) {
+    if (build_options.enable_debug_extensions and zcu.comp.debug_compile_errors) {
         var all_references: ?std.AutoHashMapUnmanaged(AnalUnit, ?Zcu.ResolvedReference) = null;
         var wip_errors: std.zig.ErrorBundle.Wip = undefined;
         wip_errors.init(gpa) catch @panic("out of memory");
-        Compilation.addModuleErrorMsg(mod, &wip_errors, err_msg.*, &all_references) catch @panic("out of memory");
+        Compilation.addModuleErrorMsg(zcu, &wip_errors, err_msg.*, &all_references) catch @panic("out of memory");
         std.debug.print("compile error during Sema:\n", .{});
         var error_bundle = wip_errors.toOwnedBundle("") catch @panic("out of memory");
         error_bundle.renderToStdErr(.{ .ttyconf = .no_color });
@@ -2530,12 +2529,12 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.Error
         }
     }
 
-    const use_ref_trace = if (mod.comp.reference_trace) |n| n > 0 else mod.failed_analysis.count() == 0;
+    const use_ref_trace = if (zcu.comp.reference_trace) |n| n > 0 else zcu.failed_analysis.count() == 0;
     if (use_ref_trace) {
         err_msg.reference_trace_root = sema.owner.toOptional();
     }
 
-    const gop = try mod.failed_analysis.getOrPut(gpa, sema.owner);
+    const gop = try zcu.failed_analysis.getOrPut(gpa, sema.owner);
     if (gop.found_existing) {
         // If there are multiple errors for the same Decl, prefer the first one added.
         sema.err = null;
@@ -2554,7 +2553,7 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.Error
 fn reparentOwnedErrorMsg(
     sema: *Sema,
     src: LazySrcLoc,
-    msg: *Module.ErrorMsg,
+    msg: *Zcu.ErrorMsg,
     comptime format: []const u8,
     args: anytype,
 ) !void {
@@ -2562,7 +2561,7 @@ fn reparentOwnedErrorMsg(
 
     const orig_notes = msg.notes.len;
     msg.notes = try sema.gpa.realloc(msg.notes, orig_notes + 1);
-    std.mem.copyBackwards(Module.ErrorMsg, msg.notes[1..], msg.notes[0..orig_notes]);
+    std.mem.copyBackwards(Zcu.ErrorMsg, msg.notes[1..], msg.notes[0..orig_notes]);
     msg.notes[0] = .{
         .src_loc = msg.src_loc,
         .msg = msg.msg,
@@ -2644,7 +2643,7 @@ fn analyzeAsInt(
 ) !u64 {
     const coerced = try sema.coerce(block, dest_ty, air_ref, src);
     const val = try sema.resolveConstDefinedValue(block, src, coerced, reason);
-    return (try val.getUnsignedIntAdvanced(sema.pt, .sema)).?;
+    return try val.toUnsignedIntSema(sema.pt);
 }
 
 /// Given a ZIR extra index which points to a list of `Zir.Inst.Capture`,
@@ -2722,9 +2721,9 @@ fn zirStructDecl(
     inst: Zir.Inst.Index,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
     const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
     const extra = sema.code.extraData(Zir.Inst.StructDecl, extended.operand);
 
@@ -2786,7 +2785,7 @@ fn zirStructDecl(
 
             // Make sure we update the namespace if the declaration is re-analyzed, to pick
             // up on e.g. changed comptime decls.
-            try pt.ensureNamespaceUpToDate(Type.fromInterned(new_ty).getNamespaceIndex(mod));
+            try pt.ensureNamespaceUpToDate(Type.fromInterned(new_ty).getNamespaceIndex(zcu));
 
             try sema.declareDependency(.{ .interned = new_ty });
             try sema.addTypeReferenceEntry(src, new_ty);
@@ -2807,8 +2806,8 @@ fn zirStructDecl(
     const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{
         .parent = block.namespace.toOptional(),
         .owner_type = wip_ty.index,
-        .file_scope = block.getFileScopeIndex(mod),
-        .generation = mod.generation,
+        .file_scope = block.getFileScopeIndex(zcu),
+        .generation = zcu.generation,
     });
     errdefer pt.destroyNamespace(new_namespace_index);
 
@@ -2825,11 +2824,11 @@ fn zirStructDecl(
     const decls = sema.code.bodySlice(extra_index, decls_len);
     try pt.scanNamespace(new_namespace_index, decls);
 
-    try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
+    try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
     codegen_type: {
-        if (mod.comp.config.use_llvm) break :codegen_type;
+        if (zcu.comp.config.use_llvm) break :codegen_type;
         if (block.ownerModule().strip) break :codegen_type;
-        try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
+        try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
     }
     try sema.declareDependency(.{ .interned = wip_ty.index });
     try sema.addTypeReferenceEntry(src, wip_ty.index);
@@ -2938,9 +2937,9 @@ fn zirEnumDecl(
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
     const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small);
     const extra = sema.code.extraData(Zir.Inst.EnumDecl, extended.operand);
     var extra_index: usize = extra.end;
@@ -3015,7 +3014,7 @@ fn zirEnumDecl(
 
             // Make sure we update the namespace if the declaration is re-analyzed, to pick
             // up on e.g. changed comptime decls.
-            try pt.ensureNamespaceUpToDate(Type.fromInterned(new_ty).getNamespaceIndex(mod));
+            try pt.ensureNamespaceUpToDate(Type.fromInterned(new_ty).getNamespaceIndex(zcu));
 
             try sema.declareDependency(.{ .interned = new_ty });
             try sema.addTypeReferenceEntry(src, new_ty);
@@ -3042,8 +3041,8 @@ fn zirEnumDecl(
     const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{
         .parent = block.namespace.toOptional(),
         .owner_type = wip_ty.index,
-        .file_scope = block.getFileScopeIndex(mod),
-        .generation = mod.generation,
+        .file_scope = block.getFileScopeIndex(zcu),
+        .generation = zcu.generation,
     });
     errdefer if (!done) pt.destroyNamespace(new_namespace_index);
 
@@ -3077,9 +3076,9 @@ fn zirEnumDecl(
     );
 
     codegen_type: {
-        if (mod.comp.config.use_llvm) break :codegen_type;
+        if (zcu.comp.config.use_llvm) break :codegen_type;
         if (block.ownerModule().strip) break :codegen_type;
-        try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
+        try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
     }
     return Air.internedToRef(wip_ty.index);
 }
@@ -3094,9 +3093,9 @@ fn zirUnionDecl(
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
     const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small);
     const extra = sema.code.extraData(Zir.Inst.UnionDecl, extended.operand);
     var extra_index: usize = extra.end;
@@ -3159,7 +3158,7 @@ fn zirUnionDecl(
 
             // Make sure we update the namespace if the declaration is re-analyzed, to pick
             // up on e.g. changed comptime decls.
-            try pt.ensureNamespaceUpToDate(Type.fromInterned(new_ty).getNamespaceIndex(mod));
+            try pt.ensureNamespaceUpToDate(Type.fromInterned(new_ty).getNamespaceIndex(zcu));
 
             try sema.declareDependency(.{ .interned = new_ty });
             try sema.addTypeReferenceEntry(src, new_ty);
@@ -3180,15 +3179,15 @@ fn zirUnionDecl(
     const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{
         .parent = block.namespace.toOptional(),
         .owner_type = wip_ty.index,
-        .file_scope = block.getFileScopeIndex(mod),
-        .generation = mod.generation,
+        .file_scope = block.getFileScopeIndex(zcu),
+        .generation = zcu.generation,
     });
     errdefer pt.destroyNamespace(new_namespace_index);
 
     const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index);
 
     if (pt.zcu.comp.incremental) {
-        try mod.intern_pool.addDependency(
+        try zcu.intern_pool.addDependency(
             gpa,
             AnalUnit.wrap(.{ .cau = new_cau_index }),
             .{ .src_hash = tracked_inst },
@@ -3198,11 +3197,11 @@ fn zirUnionDecl(
     const decls = sema.code.bodySlice(extra_index, decls_len);
     try pt.scanNamespace(new_namespace_index, decls);
 
-    try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
+    try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
     codegen_type: {
-        if (mod.comp.config.use_llvm) break :codegen_type;
+        if (zcu.comp.config.use_llvm) break :codegen_type;
         if (block.ownerModule().strip) break :codegen_type;
-        try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
+        try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
     }
     try sema.declareDependency(.{ .interned = wip_ty.index });
     try sema.addTypeReferenceEntry(src, wip_ty.index);
@@ -3219,9 +3218,9 @@ fn zirOpaqueDecl(
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
 
     const small: Zir.Inst.OpaqueDecl.Small = @bitCast(extended.small);
     const extra = sema.code.extraData(Zir.Inst.OpaqueDecl, extended.operand);
@@ -3255,7 +3254,7 @@ fn zirOpaqueDecl(
         .existing => |ty| {
             // Make sure we update the namespace if the declaration is re-analyzed, to pick
             // up on e.g. changed comptime decls.
-            try pt.ensureNamespaceUpToDate(Type.fromInterned(ty).getNamespaceIndex(mod));
+            try pt.ensureNamespaceUpToDate(Type.fromInterned(ty).getNamespaceIndex(zcu));
 
             try sema.declareDependency(.{ .interned = ty });
             try sema.addTypeReferenceEntry(src, ty);
@@ -3276,8 +3275,8 @@ fn zirOpaqueDecl(
     const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{
         .parent = block.namespace.toOptional(),
         .owner_type = wip_ty.index,
-        .file_scope = block.getFileScopeIndex(mod),
-        .generation = mod.generation,
+        .file_scope = block.getFileScopeIndex(zcu),
+        .generation = zcu.generation,
     });
     errdefer pt.destroyNamespace(new_namespace_index);
 
@@ -3285,9 +3284,9 @@ fn zirOpaqueDecl(
     try pt.scanNamespace(new_namespace_index, decls);
 
     codegen_type: {
-        if (mod.comp.config.use_llvm) break :codegen_type;
+        if (zcu.comp.config.use_llvm) break :codegen_type;
         if (block.ownerModule().strip) break :codegen_type;
-        try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
+        try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
     }
     try sema.addTypeReferenceEntry(src, wip_ty.index);
     return Air.internedToRef(wip_ty.finish(ip, .none, new_namespace_index));
@@ -3301,7 +3300,7 @@ fn zirErrorSetDecl(
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const extra = sema.code.extraData(Zir.Inst.ErrorSetDecl, inst_data.payload_index);
@@ -3314,7 +3313,7 @@ fn zirErrorSetDecl(
     while (extra_index < extra_index_end) : (extra_index += 2) { // +2 to skip over doc_string
         const name_index: Zir.NullTerminatedString = @enumFromInt(sema.code.extra[extra_index]);
         const name = sema.code.nullTerminatedString(name_index);
-        const name_ip = try mod.intern_pool.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls);
+        const name_ip = try zcu.intern_pool.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls);
         _ = try pt.getErrorValue(name_ip);
         const result = names.getOrPutAssumeCapacity(name_ip);
         assert(!result.found_existing); // verified in AstGen
@@ -3329,7 +3328,7 @@ fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
 
     const pt = sema.pt;
 
-    if (block.is_comptime or try sema.typeRequiresComptime(sema.fn_ret_ty)) {
+    if (block.is_comptime or try sema.fn_ret_ty.comptimeOnlySema(pt)) {
         try sema.fn_ret_ty.resolveFields(pt);
         return sema.analyzeComptimeAlloc(block, sema.fn_ret_ty, .none);
     }
@@ -3377,8 +3376,8 @@ fn ensureResultUsed(
     src: LazySrcLoc,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (ty.zigTypeTag(zcu)) {
         .Void, .NoReturn => return,
         .ErrorSet => return sema.fail(block, src, "error set is ignored", .{}),
         .ErrorUnion => {
@@ -3408,12 +3407,12 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const operand = try sema.resolveInst(inst_data.operand);
     const src = block.nodeOffset(inst_data.src_node);
     const operand_ty = sema.typeOf(operand);
-    switch (operand_ty.zigTypeTag(mod)) {
+    switch (operand_ty.zigTypeTag(zcu)) {
         .ErrorSet => return sema.fail(block, src, "error set is discarded", .{}),
         .ErrorUnion => {
             const msg = msg: {
@@ -3433,17 +3432,17 @@ fn zirEnsureErrUnionPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const src = block.nodeOffset(inst_data.src_node);
     const operand = try sema.resolveInst(inst_data.operand);
     const operand_ty = sema.typeOf(operand);
-    const err_union_ty = if (operand_ty.zigTypeTag(mod) == .Pointer)
-        operand_ty.childType(mod)
+    const err_union_ty = if (operand_ty.zigTypeTag(zcu) == .Pointer)
+        operand_ty.childType(zcu)
     else
         operand_ty;
-    if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) return;
-    const payload_ty = err_union_ty.errorUnionPayload(mod).zigTypeTag(mod);
+    if (err_union_ty.zigTypeTag(zcu) != .ErrorUnion) return;
+    const payload_ty = err_union_ty.errorUnionPayload(zcu).zigTypeTag(zcu);
     if (payload_ty != .Void and payload_ty != .NoReturn) {
         const msg = msg: {
             const msg = try sema.errMsg(src, "error union payload is ignored", .{});
@@ -3473,12 +3472,12 @@ fn indexablePtrLen(
     object: Air.Inst.Ref,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const object_ty = sema.typeOf(object);
-    const is_pointer_to = object_ty.isSinglePointer(mod);
-    const indexable_ty = if (is_pointer_to) object_ty.childType(mod) else object_ty;
+    const is_pointer_to = object_ty.isSinglePointer(zcu);
+    const indexable_ty = if (is_pointer_to) object_ty.childType(zcu) else object_ty;
     try checkIndexable(sema, block, src, indexable_ty);
-    const field_name = try mod.intern_pool.getOrPutString(sema.gpa, pt.tid, "len", .no_embedded_nulls);
+    const field_name = try zcu.intern_pool.getOrPutString(sema.gpa, pt.tid, "len", .no_embedded_nulls);
     return sema.fieldVal(block, src, object, field_name, src);
 }
 
@@ -3489,11 +3488,11 @@ fn indexablePtrLenOrNone(
     operand: Air.Inst.Ref,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const operand_ty = sema.typeOf(operand);
     try checkMemOperand(sema, block, src, operand_ty);
-    if (operand_ty.ptrSize(mod) == .Many) return .none;
-    const field_name = try mod.intern_pool.getOrPutString(sema.gpa, pt.tid, "len", .no_embedded_nulls);
+    if (operand_ty.ptrSize(zcu) == .Many) return .none;
+    const field_name = try zcu.intern_pool.getOrPutString(sema.gpa, pt.tid, "len", .no_embedded_nulls);
     return sema.fieldVal(block, src, operand, field_name, src);
 }
 
@@ -3592,11 +3591,11 @@ fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
 
 fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const alloc = try sema.resolveInst(inst_data.operand);
     const alloc_ty = sema.typeOf(alloc);
-    const ptr_info = alloc_ty.ptrInfo(mod);
+    const ptr_info = alloc_ty.ptrInfo(zcu);
     const elem_ty = Type.fromInterned(ptr_info.child);
 
     // If the alloc was created in a comptime scope, we already created a comptime alloc for it.
@@ -3607,7 +3606,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
 
         // If this was a comptime inferred alloc, then `storeToInferredAllocComptime`
         // might have already done our job and created an anon decl ref.
-        switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) {
+        switch (zcu.intern_pool.indexToKey(ptr_val.toIntern())) {
             .ptr => |ptr| switch (ptr.base_addr) {
                 .uav => {
                     // The comptime-ification was already done for us.
@@ -3620,12 +3619,12 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
         }
 
         if (!sema.isComptimeMutablePtr(ptr_val)) break :already_ct;
-        const ptr = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr;
+        const ptr = zcu.intern_pool.indexToKey(ptr_val.toIntern()).ptr;
         assert(ptr.byte_offset == 0);
         const alloc_index = ptr.base_addr.comptime_alloc;
         const ct_alloc = sema.getComptimeAlloc(alloc_index);
         const interned = try ct_alloc.val.intern(pt, sema.arena);
-        if (interned.canMutateComptimeVarState(mod)) {
+        if (interned.canMutateComptimeVarState(zcu)) {
             // Preserve the comptime alloc, just make the pointer const.
             ct_alloc.val = .{ .interned = interned.toIntern() };
             ct_alloc.is_const = true;
@@ -3649,7 +3648,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
         return sema.makePtrConst(block, Air.internedToRef(ptr_val));
     }
 
-    if (try sema.typeRequiresComptime(elem_ty)) {
+    if (try elem_ty.comptimeOnlySema(pt)) {
         // The value was initialized through RLS, so we didn't detect the runtime condition earlier.
         // TODO: source location of runtime control flow
         const init_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node });
@@ -3918,7 +3917,7 @@ fn finishResolveComptimeKnownAllocPtr(
 
     if (Value.fromInterned(result_val).canMutateComptimeVarState(zcu)) {
         const alloc_index = existing_comptime_alloc orelse a: {
-            const idx = try sema.newComptimeAlloc(block, alloc_ty.childType(zcu), alloc_ty.ptrAlignment(pt));
+            const idx = try sema.newComptimeAlloc(block, alloc_ty.childType(zcu), alloc_ty.ptrAlignment(zcu));
             const alloc = sema.getComptimeAlloc(idx);
             alloc.val = .{ .interned = result_val };
             break :a idx;
@@ -4072,14 +4071,14 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const src = block.nodeOffset(inst_data.src_node);
     const ty_src = block.src(.{ .node_offset_var_decl_ty = inst_data.src_node });
     const ptr = try sema.resolveInst(inst_data.operand);
     const ptr_inst = ptr.toIndex().?;
-    const target = mod.getTarget();
+    const target = zcu.getTarget();
 
     switch (sema.air_instructions.items(.tag)[@intFromEnum(ptr_inst)]) {
         .inferred_alloc_comptime => {
@@ -4093,7 +4092,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
                 sema.air_instructions.set(@intFromEnum(ptr_inst), .{ .tag = undefined, .data = undefined });
             }
 
-            const val = switch (mod.intern_pool.indexToKey(resolved_ptr).ptr.base_addr) {
+            const val = switch (zcu.intern_pool.indexToKey(resolved_ptr).ptr.base_addr) {
                 .uav => |a| a.val,
                 .comptime_alloc => |i| val: {
                     const alloc = sema.getComptimeAlloc(i);
@@ -4101,11 +4100,11 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
                 },
                 else => unreachable,
             };
-            if (mod.intern_pool.isFuncBody(val)) {
-                const ty = Type.fromInterned(mod.intern_pool.typeOf(val));
-                if (try sema.fnHasRuntimeBits(ty)) {
+            if (zcu.intern_pool.isFuncBody(val)) {
+                const ty = Type.fromInterned(zcu.intern_pool.typeOf(val));
+                if (try ty.fnHasRuntimeBitsSema(pt)) {
                     try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .func = val }));
-                    try mod.ensureFuncBodyAnalysisQueued(val);
+                    try zcu.ensureFuncBodyAnalysisQueued(val);
                 }
             }
 
@@ -4148,7 +4147,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
                 return;
             }
 
-            if (try sema.typeRequiresComptime(final_elem_ty)) {
+            if (try final_elem_ty.comptimeOnlySema(pt)) {
                 // The alloc wasn't comptime-known per the above logic, so the
                 // type cannot be comptime-only.
                 // TODO: source location of runtime control flow
@@ -4213,9 +4212,9 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
 
 fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
     const args = sema.code.refSlice(extra.end, extra.data.operands_len);
@@ -4238,7 +4237,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
         const object_ty = sema.typeOf(object);
         // Each arg could be an indexable, or a range, in which case the length
         // is passed directly as an integer.
-        const is_int = switch (object_ty.zigTypeTag(mod)) {
+        const is_int = switch (object_ty.zigTypeTag(zcu)) {
             .Int, .ComptimeInt => true,
             else => false,
         };
@@ -4247,14 +4246,14 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
             .input_index = i,
         } });
         const arg_len_uncoerced = if (is_int) object else l: {
-            if (!object_ty.isIndexable(mod)) {
+            if (!object_ty.isIndexable(zcu)) {
                 // Instead of using checkIndexable we customize this error.
                 const msg = msg: {
                     const msg = try sema.errMsg(arg_src, "type '{}' is not indexable and not a range", .{object_ty.fmt(pt)});
                     errdefer msg.destroy(sema.gpa);
                     try sema.errNote(arg_src, msg, "for loop operand must be a range, array, slice, tuple, or vector", .{});
 
-                    if (object_ty.zigTypeTag(mod) == .ErrorUnion) {
+                    if (object_ty.zigTypeTag(zcu) == .ErrorUnion) {
                         try sema.errNote(arg_src, msg, "consider using 'try', 'catch', or 'if'", .{});
                     }
 
@@ -4262,7 +4261,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
                 };
                 return sema.failWithOwnedErrorMsg(block, msg);
             }
-            if (!object_ty.indexableHasLen(mod)) continue;
+            if (!object_ty.indexableHasLen(zcu)) continue;
 
             break :l try sema.fieldVal(block, arg_src, object, try ip.getOrPutString(gpa, pt.tid, "len", .no_embedded_nulls), arg_src);
         };
@@ -4313,7 +4312,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
                 const object_ty = sema.typeOf(object);
                 // Each arg could be an indexable, or a range, in which case the length
                 // is passed directly as an integer.
-                switch (object_ty.zigTypeTag(mod)) {
+                switch (object_ty.zigTypeTag(zcu)) {
                     .Int, .ComptimeInt => continue,
                     else => {},
                 }
@@ -4349,9 +4348,9 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
 /// May invalidate already-stored payload data.
 fn optEuBasePtrInit(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, src: LazySrcLoc) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     var base_ptr = ptr;
-    while (true) switch (sema.typeOf(base_ptr).childType(mod).zigTypeTag(mod)) {
+    while (true) switch (sema.typeOf(base_ptr).childType(zcu).zigTypeTag(zcu)) {
         .ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true),
         .Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true),
         else => break,
@@ -4368,7 +4367,7 @@ fn zirOptEuBasePtrInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile
 
 fn zirCoercePtrElemTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.nodeOffset(pl_node.src_node);
     const extra = sema.code.extraData(Zir.Inst.Bin, pl_node.payload_index).data;
@@ -4377,13 +4376,13 @@ fn zirCoercePtrElemTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
         error.GenericPoison => return uncoerced_val,
         else => |e| return e,
     };
-    const ptr_ty = maybe_wrapped_ptr_ty.optEuBaseType(mod);
-    assert(ptr_ty.zigTypeTag(mod) == .Pointer); // validated by a previous instruction
-    const elem_ty = ptr_ty.childType(mod);
-    switch (ptr_ty.ptrSize(mod)) {
+    const ptr_ty = maybe_wrapped_ptr_ty.optEuBaseType(zcu);
+    assert(ptr_ty.zigTypeTag(zcu) == .Pointer); // validated by a previous instruction
+    const elem_ty = ptr_ty.childType(zcu);
+    switch (ptr_ty.ptrSize(zcu)) {
         .One => {
             const uncoerced_ty = sema.typeOf(uncoerced_val);
-            if (elem_ty.zigTypeTag(mod) == .Array and elem_ty.childType(mod).toIntern() == uncoerced_ty.toIntern()) {
+            if (elem_ty.zigTypeTag(zcu) == .Array and elem_ty.childType(zcu).toIntern() == uncoerced_ty.toIntern()) {
                 // We're trying to initialize a *[1]T with a reference to a T - don't perform any coercion.
                 return uncoerced_val;
             }
@@ -4397,16 +4396,16 @@ fn zirCoercePtrElemTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
         .Slice, .Many => {
             // Our goal is to coerce `uncoerced_val` to an array of `elem_ty`.
             const val_ty = sema.typeOf(uncoerced_val);
-            switch (val_ty.zigTypeTag(mod)) {
+            switch (val_ty.zigTypeTag(zcu)) {
                 .Array, .Vector => {},
-                else => if (!val_ty.isTuple(mod)) {
+                else => if (!val_ty.isTuple(zcu)) {
                     return sema.fail(block, src, "expected array of '{}', found '{}'", .{ elem_ty.fmt(pt), val_ty.fmt(pt) });
                 },
             }
             const want_ty = try pt.arrayType(.{
-                .len = val_ty.arrayLen(mod),
+                .len = val_ty.arrayLen(zcu),
                 .child = elem_ty.toIntern(),
-                .sentinel = if (ptr_ty.sentinel(mod)) |s| s.toIntern() else .none,
+                .sentinel = if (ptr_ty.sentinel(zcu)) |s| s.toIntern() else .none,
             });
             return sema.coerce(block, want_ty, uncoerced_val, src);
         },
@@ -4420,7 +4419,7 @@ fn zirCoercePtrElemTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
 
 fn zirValidateRefTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_tok = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_tok;
     const src = block.tokenOffset(un_tok.src_tok);
     // In case of GenericPoison, we don't actually have a type, so this will be
@@ -4434,7 +4433,7 @@ fn zirValidateRefTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
         else => |e| return e,
     };
     if (ty_operand.isGenericPoison()) return;
-    if (ty_operand.optEuBaseType(mod).zigTypeTag(mod) != .Pointer) {
+    if (ty_operand.optEuBaseType(zcu).zigTypeTag(zcu) != .Pointer) {
         return sema.failWithOwnedErrorMsg(block, msg: {
             const msg = try sema.errMsg(src, "expected type '{}', found pointer", .{ty_operand.fmt(pt)});
             errdefer msg.destroy(sema.gpa);
@@ -4450,7 +4449,7 @@ fn zirValidateArrayInitRefTy(
     inst: Zir.Inst.Index,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.nodeOffset(pl_node.src_node);
     const extra = sema.code.extraData(Zir.Inst.ArrayInitRefTy, pl_node.payload_index).data;
@@ -4458,16 +4457,16 @@ fn zirValidateArrayInitRefTy(
         error.GenericPoison => return .generic_poison_type,
         else => |e| return e,
     };
-    const ptr_ty = maybe_wrapped_ptr_ty.optEuBaseType(mod);
-    assert(ptr_ty.zigTypeTag(mod) == .Pointer); // validated by a previous instruction
-    switch (mod.intern_pool.indexToKey(ptr_ty.toIntern())) {
+    const ptr_ty = maybe_wrapped_ptr_ty.optEuBaseType(zcu);
+    assert(ptr_ty.zigTypeTag(zcu) == .Pointer); // validated by a previous instruction
+    switch (zcu.intern_pool.indexToKey(ptr_ty.toIntern())) {
         .ptr_type => |ptr_type| switch (ptr_type.flags.size) {
             .Slice, .Many => {
                 // Use array of correct length
                 const arr_ty = try pt.arrayType(.{
                     .len = extra.elem_count,
-                    .child = ptr_ty.childType(mod).toIntern(),
-                    .sentinel = if (ptr_ty.sentinel(mod)) |s| s.toIntern() else .none,
+                    .child = ptr_ty.childType(zcu).toIntern(),
+                    .sentinel = if (ptr_ty.sentinel(zcu)) |s| s.toIntern() else .none,
                 });
                 return Air.internedToRef(arr_ty.toIntern());
             },
@@ -4476,12 +4475,12 @@ fn zirValidateArrayInitRefTy(
         else => {},
     }
     // Otherwise, we just want the pointer child type
-    const ret_ty = ptr_ty.childType(mod);
+    const ret_ty = ptr_ty.childType(zcu);
     if (ret_ty.toIntern() == .anyopaque_type) {
         // The actual array type is unknown, which we represent with a generic poison.
         return .generic_poison_type;
     }
-    const arr_ty = ret_ty.optEuBaseType(mod);
+    const arr_ty = ret_ty.optEuBaseType(zcu);
     try sema.validateArrayInitTy(block, src, src, extra.elem_count, arr_ty);
     return Air.internedToRef(ret_ty.toIntern());
 }
@@ -4493,7 +4492,7 @@ fn zirValidateArrayInitTy(
     is_result_ty: bool,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.nodeOffset(inst_data.src_node);
     const ty_src: LazySrcLoc = if (is_result_ty) src else block.src(.{ .node_offset_init_ty = inst_data.src_node });
@@ -4503,7 +4502,7 @@ fn zirValidateArrayInitTy(
         error.GenericPoison => return,
         else => |e| return e,
     };
-    const arr_ty = if (is_result_ty) ty.optEuBaseType(mod) else ty;
+    const arr_ty = if (is_result_ty) ty.optEuBaseType(zcu) else ty;
     return sema.validateArrayInitTy(block, src, ty_src, extra.init_count, arr_ty);
 }
 
@@ -4516,10 +4515,10 @@ fn validateArrayInitTy(
     ty: Type,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (ty.zigTypeTag(zcu)) {
         .Array => {
-            const array_len = ty.arrayLen(mod);
+            const array_len = ty.arrayLen(zcu);
             if (init_count != array_len) {
                 return sema.fail(block, src, "expected {d} array elements; found {d}", .{
                     array_len, init_count,
@@ -4528,7 +4527,7 @@ fn validateArrayInitTy(
             return;
         },
         .Vector => {
-            const array_len = ty.arrayLen(mod);
+            const array_len = ty.arrayLen(zcu);
             if (init_count != array_len) {
                 return sema.fail(block, src, "expected {d} vector elements; found {d}", .{
                     array_len, init_count,
@@ -4536,9 +4535,9 @@ fn validateArrayInitTy(
             }
             return;
         },
-        .Struct => if (ty.isTuple(mod)) {
+        .Struct => if (ty.isTuple(zcu)) {
             try ty.resolveFields(pt);
-            const array_len = ty.arrayLen(mod);
+            const array_len = ty.arrayLen(zcu);
             if (init_count > array_len) {
                 return sema.fail(block, src, "expected at most {d} tuple fields; found {d}", .{
                     array_len, init_count,
@@ -4558,7 +4557,7 @@ fn zirValidateStructInitTy(
     is_result_ty: bool,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const src = block.nodeOffset(inst_data.src_node);
     const ty = sema.resolveType(block, src, inst_data.operand) catch |err| switch (err) {
@@ -4566,9 +4565,9 @@ fn zirValidateStructInitTy(
         error.GenericPoison => return,
         else => |e| return e,
     };
-    const struct_ty = if (is_result_ty) ty.optEuBaseType(mod) else ty;
+    const struct_ty = if (is_result_ty) ty.optEuBaseType(zcu) else ty;
 
-    switch (struct_ty.zigTypeTag(mod)) {
+    switch (struct_ty.zigTypeTag(zcu)) {
         .Struct, .Union => return,
         else => {},
     }
@@ -4584,7 +4583,7 @@ fn zirValidatePtrStructInit(
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const validate_inst = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const init_src = block.nodeOffset(validate_inst.src_node);
     const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index);
@@ -4592,8 +4591,8 @@ fn zirValidatePtrStructInit(
     const field_ptr_data = sema.code.instructions.items(.data)[@intFromEnum(instrs[0])].pl_node;
     const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
     const object_ptr = try sema.resolveInst(field_ptr_extra.lhs);
-    const agg_ty = sema.typeOf(object_ptr).childType(mod).optEuBaseType(mod);
-    switch (agg_ty.zigTypeTag(mod)) {
+    const agg_ty = sema.typeOf(object_ptr).childType(zcu).optEuBaseType(zcu);
+    switch (agg_ty.zigTypeTag(zcu)) {
         .Struct => return sema.validateStructInit(
             block,
             agg_ty,
@@ -4620,7 +4619,7 @@ fn validateUnionInit(
     union_ptr: Air.Inst.Ref,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
 
     if (instrs.len != 1) {
@@ -4654,7 +4653,7 @@ fn validateUnionInit(
     const field_ptr_data = sema.code.instructions.items(.data)[@intFromEnum(field_ptr)].pl_node;
     const field_src = block.src(.{ .node_offset_initializer = field_ptr_data.src_node });
     const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
-    const field_name = try mod.intern_pool.getOrPutString(
+    const field_name = try zcu.intern_pool.getOrPutString(
         gpa,
         pt.tid,
         sema.code.nullTerminatedString(field_ptr_extra.field_name_start),
@@ -4718,9 +4717,9 @@ fn validateUnionInit(
         break;
     }
 
-    const tag_ty = union_ty.unionTagTypeHypothetical(mod);
+    const tag_ty = union_ty.unionTagTypeHypothetical(zcu);
     const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
-    const field_type = union_ty.unionFieldType(tag_val, mod).?;
+    const field_type = union_ty.unionFieldType(tag_val, zcu).?;
 
     if (try sema.typeHasOnePossibleValue(field_type)) |field_only_value| {
         init_val = field_only_value;
@@ -4761,7 +4760,7 @@ fn validateUnionInit(
         const union_init = Air.internedToRef(union_val);
         try sema.storePtr2(block, init_src, union_ptr, init_src, union_init, init_src, .store);
         return;
-    } else if (try sema.typeRequiresComptime(union_ty)) {
+    } else if (try union_ty.comptimeOnlySema(pt)) {
         return sema.failWithNeededComptime(block, block.nodeOffset(field_ptr_data.src_node), .{
             .needed_comptime_reason = "initializer of comptime only union must be comptime-known",
         });
@@ -4781,15 +4780,15 @@ fn validateStructInit(
     instrs: []const Zir.Inst.Index,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
 
     const field_indices = try gpa.alloc(u32, instrs.len);
     defer gpa.free(field_indices);
 
     // Maps field index to field_ptr index of where it was already initialized.
-    const found_fields = try gpa.alloc(Zir.Inst.OptionalIndex, struct_ty.structFieldCount(mod));
+    const found_fields = try gpa.alloc(Zir.Inst.OptionalIndex, struct_ty.structFieldCount(zcu));
     defer gpa.free(found_fields);
     @memset(found_fields, .none);
 
@@ -4806,7 +4805,7 @@ fn validateStructInit(
             sema.code.nullTerminatedString(field_ptr_extra.field_name_start),
             .no_embedded_nulls,
         );
-        field_index.* = if (struct_ty.isTuple(mod))
+        field_index.* = if (struct_ty.isTuple(zcu))
             try sema.tupleFieldIndex(block, struct_ty, field_name, field_src)
         else
             try sema.structFieldIndex(block, struct_ty, field_name, field_src);
@@ -4814,7 +4813,7 @@ fn validateStructInit(
         found_fields[field_index.*] = field_ptr.toOptional();
     }
 
-    var root_msg: ?*Module.ErrorMsg = null;
+    var root_msg: ?*Zcu.ErrorMsg = null;
     errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
 
     const struct_ptr = try sema.resolveInst(struct_ptr_zir_ref);
@@ -4830,9 +4829,9 @@ fn validateStructInit(
             if (field_ptr != .none) continue;
 
             try struct_ty.resolveStructFieldInits(pt);
-            const default_val = struct_ty.structFieldDefaultValue(i, mod);
+            const default_val = struct_ty.structFieldDefaultValue(i, zcu);
             if (default_val.toIntern() == .unreachable_value) {
-                const field_name = struct_ty.structFieldName(i, mod).unwrap() orelse {
+                const field_name = struct_ty.structFieldName(i, zcu).unwrap() orelse {
                     const template = "missing tuple field with index {d}";
                     if (root_msg) |msg| {
                         try sema.errNote(init_src, msg, template, .{i});
@@ -4852,7 +4851,7 @@ fn validateStructInit(
             }
 
             const field_src = init_src; // TODO better source location
-            const default_field_ptr = if (struct_ty.isTuple(mod))
+            const default_field_ptr = if (struct_ty.isTuple(zcu))
                 try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(i), true)
             else
                 try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(i), field_src, struct_ty, true);
@@ -4874,7 +4873,7 @@ fn validateStructInit(
     var struct_is_comptime = true;
     var first_block_index = block.instructions.items.len;
 
-    const require_comptime = try sema.typeRequiresComptime(struct_ty);
+    const require_comptime = try struct_ty.comptimeOnlySema(pt);
     const air_tags = sema.air_instructions.items(.tag);
     const air_datas = sema.air_instructions.items(.data);
 
@@ -4882,13 +4881,13 @@ fn validateStructInit(
 
     // We collect the comptime field values in case the struct initialization
     // ends up being comptime-known.
-    const field_values = try sema.arena.alloc(InternPool.Index, struct_ty.structFieldCount(mod));
+    const field_values = try sema.arena.alloc(InternPool.Index, struct_ty.structFieldCount(zcu));
 
     field: for (found_fields, 0..) |opt_field_ptr, i_usize| {
         const i: u32 = @intCast(i_usize);
         if (opt_field_ptr.unwrap()) |field_ptr| {
             // Determine whether the value stored to this pointer is comptime-known.
-            const field_ty = struct_ty.structFieldType(i, mod);
+            const field_ty = struct_ty.structFieldType(i, zcu);
             if (try sema.typeHasOnePossibleValue(field_ty)) |opv| {
                 field_values[i] = opv.toIntern();
                 continue;
@@ -4958,9 +4957,9 @@ fn validateStructInit(
             continue :field;
         }
 
-        const default_val = struct_ty.structFieldDefaultValue(i, mod);
+        const default_val = struct_ty.structFieldDefaultValue(i, zcu);
         if (default_val.toIntern() == .unreachable_value) {
-            const field_name = struct_ty.structFieldName(i, mod).unwrap() orelse {
+            const field_name = struct_ty.structFieldName(i, zcu).unwrap() orelse {
                 const template = "missing tuple field with index {d}";
                 if (root_msg) |msg| {
                     try sema.errNote(init_src, msg, template, .{i});
@@ -5000,7 +4999,7 @@ fn validateStructInit(
         var block_index = first_block_index;
         for (block.instructions.items[first_block_index..]) |cur_inst| {
             while (field_ptr_ref == .none and init_index < instrs.len) : (init_index += 1) {
-                const field_ty = struct_ty.structFieldType(field_indices[init_index], mod);
+                const field_ty = struct_ty.structFieldType(field_indices[init_index], zcu);
                 if (try field_ty.onePossibleValue(pt)) |_| continue;
                 field_ptr_ref = sema.inst_map.get(instrs[init_index]).?;
             }
@@ -5044,7 +5043,7 @@ fn validateStructInit(
         if (field_ptr != .none) continue;
 
         const field_src = init_src; // TODO better source location
-        const default_field_ptr = if (struct_ty.isTuple(mod))
+        const default_field_ptr = if (struct_ty.isTuple(zcu))
             try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(i), true)
         else
             try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(i), field_src, struct_ty, true);
@@ -5060,7 +5059,7 @@ fn zirValidatePtrArrayInit(
     inst: Zir.Inst.Index,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const validate_inst = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const init_src = block.nodeOffset(validate_inst.src_node);
     const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index);
@@ -5068,8 +5067,8 @@ fn zirValidatePtrArrayInit(
     const first_elem_ptr_data = sema.code.instructions.items(.data)[@intFromEnum(instrs[0])].pl_node;
     const elem_ptr_extra = sema.code.extraData(Zir.Inst.ElemPtrImm, first_elem_ptr_data.payload_index).data;
     const array_ptr = try sema.resolveInst(elem_ptr_extra.ptr);
-    const array_ty = sema.typeOf(array_ptr).childType(mod).optEuBaseType(mod);
-    const array_len = array_ty.arrayLen(mod);
+    const array_ty = sema.typeOf(array_ptr).childType(zcu).optEuBaseType(zcu);
+    const array_len = array_ty.arrayLen(zcu);
 
     // Collect the comptime element values in case the array literal ends up
     // being comptime-known.
@@ -5078,15 +5077,15 @@ fn zirValidatePtrArrayInit(
         try sema.usizeCast(block, init_src, array_len),
     );
 
-    if (instrs.len != array_len) switch (array_ty.zigTypeTag(mod)) {
+    if (instrs.len != array_len) switch (array_ty.zigTypeTag(zcu)) {
         .Struct => {
-            var root_msg: ?*Module.ErrorMsg = null;
+            var root_msg: ?*Zcu.ErrorMsg = null;
             errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
 
             try array_ty.resolveStructFieldInits(pt);
             var i = instrs.len;
             while (i < array_len) : (i += 1) {
-                const default_val = array_ty.structFieldDefaultValue(i, mod).toIntern();
+                const default_val = array_ty.structFieldDefaultValue(i, zcu).toIntern();
                 if (default_val == .unreachable_value) {
                     const template = "missing tuple field with index {d}";
                     if (root_msg) |msg| {
@@ -5125,7 +5124,7 @@ fn zirValidatePtrArrayInit(
         // at comptime so we have almost nothing to do here. However, in case of a
         // sentinel-terminated array, the sentinel will not have been populated by
         // any ZIR instructions at comptime; we need to do that here.
-        if (array_ty.sentinel(mod)) |sentinel_val| {
+        if (array_ty.sentinel(zcu)) |sentinel_val| {
             const array_len_ref = try pt.intRef(Type.usize, array_len);
             const sentinel_ptr = try sema.elemPtrArray(block, init_src, init_src, array_ptr, init_src, array_len_ref, true, true);
             const sentinel = Air.internedToRef(sentinel_val.toIntern());
@@ -5150,8 +5149,8 @@ fn zirValidatePtrArrayInit(
     outer: for (instrs, 0..) |elem_ptr, i| {
         // Determine whether the value stored to this pointer is comptime-known.
 
-        if (array_ty.isTuple(mod)) {
-            if (array_ty.structFieldIsComptime(i, mod))
+        if (array_ty.isTuple(zcu)) {
+            if (array_ty.structFieldIsComptime(i, zcu))
                 try array_ty.resolveStructFieldInits(pt);
             if (try array_ty.structFieldValueComptime(pt, i)) |opv| {
                 element_vals[i] = opv.toIntern();
@@ -5216,7 +5215,7 @@ fn zirValidatePtrArrayInit(
 
     if (array_is_comptime) {
         if (try sema.resolveDefinedValue(block, init_src, array_ptr)) |ptr_val| {
-            switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) {
+            switch (zcu.intern_pool.indexToKey(ptr_val.toIntern())) {
                 .ptr => |ptr| switch (ptr.base_addr) {
                     .comptime_field => return, // This store was validated by the individual elem ptrs.
                     else => {},
@@ -5232,7 +5231,7 @@ fn zirValidatePtrArrayInit(
         var block_index = first_block_index;
         for (block.instructions.items[first_block_index..]) |cur_inst| {
             while (elem_ptr_ref == .none and elem_index < instrs.len) : (elem_index += 1) {
-                if (array_ty.isTuple(mod) and array_ty.structFieldIsComptime(elem_index, mod)) continue;
+                if (array_ty.isTuple(zcu) and array_ty.structFieldIsComptime(elem_index, zcu)) continue;
                 elem_ptr_ref = sema.inst_map.get(instrs[elem_index]).?;
             }
             switch (air_tags[@intFromEnum(cur_inst)]) {
@@ -5266,31 +5265,31 @@ fn zirValidatePtrArrayInit(
 
 fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const src = block.nodeOffset(inst_data.src_node);
     const operand = try sema.resolveInst(inst_data.operand);
     const operand_ty = sema.typeOf(operand);
 
-    if (operand_ty.zigTypeTag(mod) != .Pointer) {
+    if (operand_ty.zigTypeTag(zcu) != .Pointer) {
         return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(pt)});
-    } else switch (operand_ty.ptrSize(mod)) {
+    } else switch (operand_ty.ptrSize(zcu)) {
         .One, .C => {},
         .Many => return sema.fail(block, src, "index syntax required for unknown-length pointer type '{}'", .{operand_ty.fmt(pt)}),
         .Slice => return sema.fail(block, src, "index syntax required for slice type '{}'", .{operand_ty.fmt(pt)}),
     }
 
-    if ((try sema.typeHasOnePossibleValue(operand_ty.childType(mod))) != null) {
+    if ((try sema.typeHasOnePossibleValue(operand_ty.childType(zcu))) != null) {
         // No need to validate the actual pointer value, we don't need it!
         return;
     }
 
-    const elem_ty = operand_ty.elemType2(mod);
+    const elem_ty = operand_ty.elemType2(zcu);
     if (try sema.resolveValue(operand)) |val| {
-        if (val.isUndef(mod)) {
+        if (val.isUndef(zcu)) {
             return sema.fail(block, src, "cannot dereference undefined value", .{});
         }
-    } else if (try sema.typeRequiresComptime(elem_ty)) {
+    } else if (try elem_ty.comptimeOnlySema(pt)) {
         const msg = msg: {
             const msg = try sema.errMsg(
                 src,
@@ -5308,7 +5307,7 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
 
 fn zirValidateDestructure(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const extra = sema.code.extraData(Zir.Inst.ValidateDestructure, inst_data.payload_index).data;
     const src = block.nodeOffset(inst_data.src_node);
@@ -5316,9 +5315,9 @@ fn zirValidateDestructure(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
     const operand = try sema.resolveInst(extra.operand);
     const operand_ty = sema.typeOf(operand);
 
-    const can_destructure = switch (operand_ty.zigTypeTag(mod)) {
+    const can_destructure = switch (operand_ty.zigTypeTag(zcu)) {
         .Array, .Vector => true,
-        .Struct => operand_ty.isTuple(mod),
+        .Struct => operand_ty.isTuple(zcu),
         else => false,
     };
 
@@ -5331,11 +5330,11 @@ fn zirValidateDestructure(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
         });
     }
 
-    if (operand_ty.arrayLen(mod) != extra.expect_len) {
+    if (operand_ty.arrayLen(zcu) != extra.expect_len) {
         return sema.failWithOwnedErrorMsg(block, msg: {
             const msg = try sema.errMsg(src, "expected {} elements for destructure, found {}", .{
                 extra.expect_len,
-                operand_ty.arrayLen(mod),
+                operand_ty.arrayLen(zcu),
             });
             errdefer msg.destroy(sema.gpa);
             try sema.errNote(destructure_src, msg, "result destructured here", .{});
@@ -5423,7 +5422,7 @@ fn failWithBadUnionFieldAccess(
     return sema.failWithOwnedErrorMsg(block, msg);
 }
 
-fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !void {
+fn addDeclaredHereNote(sema: *Sema, parent: *Zcu.ErrorMsg, decl_ty: Type) !void {
     const zcu = sema.pt.zcu;
     const src_loc = decl_ty.srcLocOrNull(zcu) orelse return;
     const category = switch (decl_ty.zigTypeTag(zcu)) {
@@ -5537,7 +5536,7 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const zir_tags = sema.code.instructions.items(.tag);
     const zir_datas = sema.code.instructions.items(.data);
     const inst_data = zir_datas[@intFromEnum(inst)].pl_node;
@@ -5556,7 +5555,7 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v
     //   %b = store(%a, %c)
     // Where %c is an error union or error set. In such case we need to add
     // to the current function's inferred error set, if any.
-    if (is_ret and sema.fn_ret_ty_ies != null) switch (sema.typeOf(operand).zigTypeTag(mod)) {
+    if (is_ret and sema.fn_ret_ty_ies != null) switch (sema.typeOf(operand).zigTypeTag(zcu)) {
         .ErrorUnion, .ErrorSet => try sema.addToInferredErrorSet(operand),
         else => {},
     };
@@ -5688,9 +5687,9 @@ fn zirCompileLog(
     extended: Zir.Inst.Extended.InstData,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
-    var managed = mod.compile_log_text.toManaged(sema.gpa);
+    var managed = zcu.compile_log_text.toManaged(sema.gpa);
     defer pt.zcu.compile_log_text = managed.moveToUnmanaged();
     const writer = managed.writer();
 
@@ -5713,7 +5712,7 @@ fn zirCompileLog(
     }
     try writer.print("\n", .{});
 
-    const gop = try mod.compile_log_sources.getOrPut(sema.gpa, sema.owner);
+    const gop = try zcu.compile_log_sources.getOrPut(sema.gpa, sema.owner);
     if (!gop.found_existing) gop.value_ptr.* = .{
         .base_node_inst = block.src_base_inst,
         .node_offset = src_node,
@@ -5749,7 +5748,7 @@ fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = parent_block.nodeOffset(inst_data.src_node);
     const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
@@ -5800,7 +5799,7 @@ fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError
     try sema.analyzeBodyInner(&loop_block, body);
 
     const loop_block_len = loop_block.instructions.items.len;
-    if (loop_block_len > 0 and sema.typeOf(loop_block.instructions.items[loop_block_len - 1].toRef()).isNoReturn(mod)) {
+    if (loop_block_len > 0 and sema.typeOf(loop_block.instructions.items[loop_block_len - 1].toRef()).isNoReturn(zcu)) {
         // If the loop ended with a noreturn terminator, then there is no way for it to loop,
         // so we can just use the block instead.
         try child_block.instructions.appendSlice(gpa, loop_block.instructions.items);
@@ -6069,11 +6068,11 @@ fn resolveAnalyzedBlock(
 
     const gpa = sema.gpa;
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
     // Blocks must terminate with noreturn instruction.
     assert(child_block.instructions.items.len != 0);
-    assert(sema.typeOf(child_block.instructions.items[child_block.instructions.items.len - 1].toRef()).isNoReturn(mod));
+    assert(sema.typeOf(child_block.instructions.items[child_block.instructions.items.len - 1].toRef()).isNoReturn(zcu));
 
     const block_tag = sema.air_instructions.items(.tag)[@intFromEnum(merges.block_inst)];
     switch (block_tag) {
@@ -6178,7 +6177,7 @@ fn resolveAnalyzedBlock(
     // TODO add note "missing else causes void value"
 
     const type_src = src; // TODO: better source location
-    if (try sema.typeRequiresComptime(resolved_ty)) {
+    if (try resolved_ty.comptimeOnlySema(pt)) {
         const msg = msg: {
             const msg = try sema.errMsg(type_src, "value with comptime-only type '{}' depends on runtime control flow", .{resolved_ty.fmt(pt)});
             errdefer msg.destroy(sema.gpa);
@@ -6227,7 +6226,7 @@ fn resolveAnalyzedBlock(
         const br_operand = sema.air_instructions.items(.data)[@intFromEnum(br)].br.operand;
         const br_operand_src = src;
         const br_operand_ty = sema.typeOf(br_operand);
-        if (br_operand_ty.eql(resolved_ty, mod)) {
+        if (br_operand_ty.eql(resolved_ty, zcu)) {
             // No type coercion needed.
             continue;
         }
@@ -6354,7 +6353,7 @@ pub fn analyzeExport(
     sema: *Sema,
     block: *Block,
     src: LazySrcLoc,
-    options: Module.Export.Options,
+    options: Zcu.Export.Options,
     exported_nav_index: InternPool.Nav.Index,
 ) !void {
     const gpa = sema.gpa;
@@ -6427,8 +6426,8 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
 
 fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
     const operand_src = block.builtinCallArgSrc(extra.node, 0);
     const is_cold = try sema.resolveConstBool(block, operand_src, extra.operand, .{
@@ -6446,8 +6445,8 @@ fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData)
 
 fn zirDisableInstrumentation(sema: *Sema) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const func = switch (sema.owner.unwrap()) {
         .func => |func| func,
         .cau => return, // does nothing outside a function
@@ -6572,17 +6571,17 @@ fn addDbgVar(
     if (block.is_comptime or block.ownerModule().strip) return;
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const operand_ty = sema.typeOf(operand);
     const val_ty = switch (air_tag) {
-        .dbg_var_ptr => operand_ty.childType(mod),
+        .dbg_var_ptr => operand_ty.childType(zcu),
         .dbg_var_val, .dbg_arg_inline => operand_ty,
         else => unreachable,
     };
-    if (try sema.typeRequiresComptime(val_ty)) return;
-    if (!(try sema.typeHasRuntimeBits(val_ty))) return;
+    if (try val_ty.comptimeOnlySema(pt)) return;
+    if (!(try val_ty.hasRuntimeBitsSema(pt))) return;
     if (try sema.resolveValue(operand)) |operand_val| {
-        if (operand_val.canMutateComptimeVarState(mod)) return;
+        if (operand_val.canMutateComptimeVarState(zcu)) return;
     }
 
     // To ensure the lexical scoping is known to backends, this alloc must be
@@ -6619,10 +6618,10 @@ pub fn appendAirString(sema: *Sema, str: []const u8) Allocator.Error!Air.NullTer
 
 fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
     const src = block.tokenOffset(inst_data.src_tok);
-    const decl_name = try mod.intern_pool.getOrPutString(
+    const decl_name = try zcu.intern_pool.getOrPutString(
         sema.gpa,
         pt.tid,
         inst_data.get(sema.code),
@@ -6634,10 +6633,10 @@ fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
 
 fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
     const src = block.tokenOffset(inst_data.src_tok);
-    const decl_name = try mod.intern_pool.getOrPutString(
+    const decl_name = try zcu.intern_pool.getOrPutString(
         sema.gpa,
         pt.tid,
         inst_data.get(sema.code),
@@ -6649,14 +6648,14 @@ fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
 
 fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: InternPool.NullTerminatedString) !InternPool.Nav.Index {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     var namespace = block.namespace;
     while (true) {
         if (try sema.lookupInNamespace(block, src, namespace, name, false)) |lookup| {
             assert(lookup.accessible);
             return lookup.nav;
         }
-        namespace = mod.namespacePtr(namespace).parent.unwrap() orelse break;
+        namespace = zcu.namespacePtr(namespace).parent.unwrap() orelse break;
     }
     unreachable; // AstGen detects use of undeclared identifiers.
 }
@@ -6801,7 +6800,7 @@ fn funcDeclSrcInst(sema: *Sema, func_inst: Air.Inst.Ref) !?InternPool.TrackedIns
 
 pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
 
     if (block.is_comptime or block.is_typeof) {
@@ -6813,7 +6812,7 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref
 
     const stack_trace_ty = try pt.getBuiltinType("StackTrace");
     try stack_trace_ty.resolveFields(pt);
-    const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls);
+    const field_name = try zcu.intern_pool.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls);
     const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, LazySrcLoc.unneeded) catch |err| switch (err) {
         error.AnalysisFail => @panic("std.builtin.StackTrace is corrupt"),
         error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable,
@@ -6839,7 +6838,7 @@ fn popErrorReturnTrace(
     saved_error_trace_index: Air.Inst.Ref,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
     var is_non_error: ?bool = null;
     var is_non_error_inst: Air.Inst.Ref = undefined;
@@ -6857,7 +6856,7 @@ fn popErrorReturnTrace(
         try stack_trace_ty.resolveFields(pt);
         const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty);
         const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty);
-        const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls);
+        const field_name = try zcu.intern_pool.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls);
         const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, field_name, src, stack_trace_ty, true);
         try sema.storePtr2(block, src, field_ptr, src, saved_error_trace_index, src, .store);
     } else if (is_non_error == null) {
@@ -6883,7 +6882,7 @@ fn popErrorReturnTrace(
         try stack_trace_ty.resolveFields(pt);
         const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty);
         const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty);
-        const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls);
+        const field_name = try zcu.intern_pool.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls);
         const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, field_name, src, stack_trace_ty, true);
         try sema.storePtr2(&then_block, src, field_ptr, src, saved_error_trace_index, src, .store);
         _ = try then_block.addBr(cond_block_inst, .void_value);
@@ -6923,7 +6922,7 @@ fn zirCall(
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const callee_src = block.src(.{ .node_offset_call_func = inst_data.src_node });
     const call_src = block.nodeOffset(inst_data.src_node);
@@ -6942,7 +6941,7 @@ fn zirCall(
         .direct => .{ .direct = try sema.resolveInst(extra.data.callee) },
         .field => blk: {
             const object_ptr = try sema.resolveInst(extra.data.obj_ptr);
-            const field_name = try mod.intern_pool.getOrPutString(
+            const field_name = try zcu.intern_pool.getOrPutString(
                 sema.gpa,
                 pt.tid,
                 sema.code.nullTerminatedString(extra.data.field_name_start),
@@ -6987,7 +6986,7 @@ fn zirCall(
 
     switch (sema.owner.unwrap()) {
         .cau => input_is_error = false,
-        .func => |owner_func| if (!mod.intern_pool.funcAnalysisUnordered(owner_func).calls_or_awaits_errorable_fn) {
+        .func => |owner_func| if (!zcu.intern_pool.funcAnalysisUnordered(owner_func).calls_or_awaits_errorable_fn) {
             // No errorable fn actually called; we have no error return trace
             input_is_error = false;
         },
@@ -6997,7 +6996,7 @@ fn zirCall(
         !block.is_comptime and !block.is_typeof and (input_is_error or pop_error_return_trace))
     {
         const return_ty = sema.typeOf(call_inst);
-        if (modifier != .always_tail and return_ty.isNoReturn(mod))
+        if (modifier != .always_tail and return_ty.isNoReturn(zcu))
             return call_inst; // call to "fn (...) noreturn", don't pop
 
         // TODO: we don't fix up the error trace for always_tail correctly, we should be doing it
@@ -7008,10 +7007,10 @@ fn zirCall(
 
         // If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only
         // need to clean-up our own trace if we were passed to a non-error-handling expression.
-        if (input_is_error or (pop_error_return_trace and return_ty.isError(mod))) {
+        if (input_is_error or (pop_error_return_trace and return_ty.isError(zcu))) {
             const stack_trace_ty = try pt.getBuiltinType("StackTrace");
             try stack_trace_ty.resolveFields(pt);
-            const field_name = try mod.intern_pool.getOrPutString(sema.gpa, pt.tid, "index", .no_embedded_nulls);
+            const field_name = try zcu.intern_pool.getOrPutString(sema.gpa, pt.tid, "index", .no_embedded_nulls);
             const field_index = try sema.structFieldIndex(block, stack_trace_ty, field_name, call_src);
 
             // Insert a save instruction before the arg resolution + call instructions we just generated
@@ -7044,20 +7043,20 @@ fn checkCallArgumentCount(
     member_fn: bool,
 ) !Type {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const func_ty = func_ty: {
-        switch (callee_ty.zigTypeTag(mod)) {
+        switch (callee_ty.zigTypeTag(zcu)) {
             .Fn => break :func_ty callee_ty,
             .Pointer => {
-                const ptr_info = callee_ty.ptrInfo(mod);
-                if (ptr_info.flags.size == .One and Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Fn) {
+                const ptr_info = callee_ty.ptrInfo(zcu);
+                if (ptr_info.flags.size == .One and Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .Fn) {
                     break :func_ty Type.fromInterned(ptr_info.child);
                 }
             },
             .Optional => {
-                const opt_child = callee_ty.optionalChild(mod);
-                if (opt_child.zigTypeTag(mod) == .Fn or (opt_child.isSinglePointer(mod) and
-                    opt_child.childType(mod).zigTypeTag(mod) == .Fn))
+                const opt_child = callee_ty.optionalChild(zcu);
+                if (opt_child.zigTypeTag(zcu) == .Fn or (opt_child.isSinglePointer(zcu) and
+                    opt_child.childType(zcu).zigTypeTag(zcu) == .Fn))
                 {
                     const msg = msg: {
                         const msg = try sema.errMsg(func_src, "cannot call optional type '{}'", .{
@@ -7075,7 +7074,7 @@ fn checkCallArgumentCount(
         return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(pt)});
     };
 
-    const func_ty_info = mod.typeToFunc(func_ty).?;
+    const func_ty_info = zcu.typeToFunc(func_ty).?;
     const fn_params_len = func_ty_info.param_types.len;
     const args_len = total_args - @intFromBool(member_fn);
     if (func_ty_info.is_var_args) {
@@ -7122,14 +7121,14 @@ fn callBuiltin(
     operation: CallOperation,
 ) !void {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const callee_ty = sema.typeOf(builtin_fn);
     const func_ty = func_ty: {
-        switch (callee_ty.zigTypeTag(mod)) {
+        switch (callee_ty.zigTypeTag(zcu)) {
             .Fn => break :func_ty callee_ty,
             .Pointer => {
-                const ptr_info = callee_ty.ptrInfo(mod);
-                if (ptr_info.flags.size == .One and Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Fn) {
+                const ptr_info = callee_ty.ptrInfo(zcu);
+                if (ptr_info.flags.size == .One and Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .Fn) {
                     break :func_ty Type.fromInterned(ptr_info.child);
                 }
             },
@@ -7138,7 +7137,7 @@ fn callBuiltin(
         std.debug.panic("type '{}' is not a function calling builtin fn", .{callee_ty.fmt(pt)});
     };
 
-    const func_ty_info = mod.typeToFunc(func_ty).?;
+    const func_ty_info = zcu.typeToFunc(func_ty).?;
     const fn_params_len = func_ty_info.param_types.len;
     if (args.len != fn_params_len or (func_ty_info.is_var_args and args.len < fn_params_len)) {
         std.debug.panic("parameter count mismatch calling builtin fn, expected {d}, found {d}", .{ fn_params_len, args.len });
@@ -7242,7 +7241,7 @@ const CallArgsInfo = union(enum) {
         func_inst: Air.Inst.Ref,
     ) CompileError!Air.Inst.Ref {
         const pt = sema.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const param_count = func_ty_info.param_types.len;
         const uncoerced_arg: Air.Inst.Ref = switch (cai) {
             inline .resolved, .call_builtin => |resolved| resolved.args[arg_index],
@@ -7277,13 +7276,13 @@ const CallArgsInfo = union(enum) {
                 // Resolve the arg!
                 const uncoerced_arg = try sema.resolveInlineBody(block, arg_body, zir_call.call_inst);
 
-                if (sema.typeOf(uncoerced_arg).zigTypeTag(mod) == .NoReturn) {
+                if (sema.typeOf(uncoerced_arg).zigTypeTag(zcu) == .NoReturn) {
                     // This terminates resolution of arguments. The caller should
                     // propagate this.
                     return uncoerced_arg;
                 }
 
-                if (sema.typeOf(uncoerced_arg).isError(mod)) {
+                if (sema.typeOf(uncoerced_arg).isError(zcu)) {
                     zir_call.any_arg_is_error.* = true;
                 }
 
@@ -7476,7 +7475,7 @@ fn analyzeCall(
     var is_inline_call = is_comptime_call or modifier == .always_inline or func_ty_info.cc == .Inline;
     var comptime_reason: ?*const Block.ComptimeReason = null;
     if (!is_inline_call and !is_comptime_call) {
-        if (try sema.typeRequiresComptime(Type.fromInterned(func_ty_info.return_type))) {
+        if (try Type.fromInterned(func_ty_info.return_type).comptimeOnlySema(pt)) {
             is_comptime_call = true;
             is_inline_call = true;
             comptime_reason = &.{ .comptime_ret_ty = .{
@@ -7968,8 +7967,8 @@ fn analyzeInlineCallArg(
     func_ty_info: InternPool.Key.FuncType,
     func_inst: Air.Inst.Ref,
 ) !?Air.Inst.Ref {
-    const mod = ics.sema.pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = ics.sema.pt.zcu;
+    const ip = &zcu.intern_pool;
     const zir_tags = ics.callee().code.instructions.items(.tag);
     switch (zir_tags[@intFromEnum(inst)]) {
         .param_comptime, .param_anytype_comptime => param_block.inlining.?.has_comptime_args = true,
@@ -7992,11 +7991,11 @@ fn analyzeInlineCallArg(
             };
             new_param_types[arg_i.*] = param_ty;
             const casted_arg = try args_info.analyzeArg(ics.caller(), arg_block, arg_i.*, Type.fromInterned(param_ty), func_ty_info, func_inst);
-            if (ics.caller().typeOf(casted_arg).zigTypeTag(mod) == .NoReturn) {
+            if (ics.caller().typeOf(casted_arg).zigTypeTag(zcu) == .NoReturn) {
                 return casted_arg;
             }
             const arg_src = args_info.argSrc(arg_block, arg_i.*);
-            if (try ics.callee().typeRequiresComptime(Type.fromInterned(param_ty))) {
+            if (try Type.fromInterned(param_ty).comptimeOnlySema(ics.callee().pt)) {
                 _ = try ics.caller().resolveConstValue(arg_block, arg_src, casted_arg, .{
                     .needed_comptime_reason = "argument to parameter with comptime-only type must be comptime-known",
                     .block_comptime_reason = param_block.comptime_reason,
@@ -8025,7 +8024,7 @@ fn analyzeInlineCallArg(
                 // assertion due to type not being resolved
                 // when the hash function is called.
                 const resolved_arg_val = try ics.caller().resolveLazyValue(arg_val);
-                should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(mod);
+                should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(zcu);
                 memoized_arg_values[arg_i.*] = resolved_arg_val.toIntern();
             } else {
                 ics.callee().inst_map.putAssumeCapacityNoClobber(inst, casted_arg);
@@ -8040,7 +8039,7 @@ fn analyzeInlineCallArg(
         .param_anytype, .param_anytype_comptime => {
             // No coercion needed.
             const uncasted_arg = try args_info.analyzeArg(ics.caller(), arg_block, arg_i.*, Type.generic_poison, func_ty_info, func_inst);
-            if (ics.caller().typeOf(uncasted_arg).zigTypeTag(mod) == .NoReturn) {
+            if (ics.caller().typeOf(uncasted_arg).zigTypeTag(zcu) == .NoReturn) {
                 return uncasted_arg;
             }
             const arg_src = args_info.argSrc(arg_block, arg_i.*);
@@ -8064,7 +8063,7 @@ fn analyzeInlineCallArg(
                 // assertion due to type not being resolved
                 // when the hash function is called.
                 const resolved_arg_val = try ics.caller().resolveLazyValue(arg_val);
-                should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(mod);
+                should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(zcu);
                 memoized_arg_values[arg_i.*] = resolved_arg_val.toIntern();
             } else {
                 if (zir_tags[@intFromEnum(inst)] == .param_anytype_comptime) {
@@ -8236,7 +8235,7 @@ fn instantiateGenericCall(
 
         const arg_is_comptime = switch (param_tag) {
             .param_comptime, .param_anytype_comptime => true,
-            .param, .param_anytype => try sema.typeRequiresComptime(arg_ty),
+            .param, .param_anytype => try arg_ty.comptimeOnlySema(pt),
             else => unreachable,
         };
 
@@ -8325,7 +8324,7 @@ fn instantiateGenericCall(
 
     // If the call evaluated to a return type that requires comptime, never mind
     // our generic instantiation. Instead we need to perform a comptime call.
-    if (try sema.typeRequiresComptime(Type.fromInterned(func_ty_info.return_type))) {
+    if (try Type.fromInterned(func_ty_info.return_type).comptimeOnlySema(pt)) {
         return error.ComptimeReturn;
     }
     // Similarly, if the call evaluated to a generic type we need to instead
@@ -8376,8 +8375,8 @@ fn instantiateGenericCall(
 
 fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const tuple = switch (ip.indexToKey(ty.toIntern())) {
         .anon_struct_type => |tuple| tuple,
         else => return,
@@ -8401,13 +8400,13 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const operand_src = block.src(.{ .node_offset_un_op = inst_data.src_node });
     const child_type = try sema.resolveType(block, operand_src, inst_data.operand);
-    if (child_type.zigTypeTag(mod) == .Opaque) {
+    if (child_type.zigTypeTag(zcu) == .Opaque) {
         return sema.fail(block, operand_src, "opaque type '{}' cannot be optional", .{child_type.fmt(pt)});
-    } else if (child_type.zigTypeTag(mod) == .Null) {
+    } else if (child_type.zigTypeTag(zcu) == .Null) {
         return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(pt)});
     }
     const opt_type = try pt.optionalType(child_type.toIntern());
@@ -8417,7 +8416,7 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
 
 fn zirArrayInitElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const bin = sema.code.instructions.items(.data)[@intFromEnum(inst)].bin;
     const maybe_wrapped_indexable_ty = sema.resolveType(block, LazySrcLoc.unneeded, bin.lhs) catch |err| switch (err) {
         // Since this is a ZIR instruction that returns a type, encountering
@@ -8427,40 +8426,40 @@ fn zirArrayInitElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
         error.GenericPoison => return .generic_poison_type,
         else => |e| return e,
     };
-    const indexable_ty = maybe_wrapped_indexable_ty.optEuBaseType(mod);
+    const indexable_ty = maybe_wrapped_indexable_ty.optEuBaseType(zcu);
     try indexable_ty.resolveFields(pt);
-    assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction
-    if (indexable_ty.zigTypeTag(mod) == .Struct) {
-        const elem_type = indexable_ty.structFieldType(@intFromEnum(bin.rhs), mod);
+    assert(indexable_ty.isIndexable(zcu)); // validated by a previous instruction
+    if (indexable_ty.zigTypeTag(zcu) == .Struct) {
+        const elem_type = indexable_ty.structFieldType(@intFromEnum(bin.rhs), zcu);
         return Air.internedToRef(elem_type.toIntern());
     } else {
-        const elem_type = indexable_ty.elemType2(mod);
+        const elem_type = indexable_ty.elemType2(zcu);
         return Air.internedToRef(elem_type.toIntern());
     }
 }
 
 fn zirElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const maybe_wrapped_ptr_ty = sema.resolveType(block, LazySrcLoc.unneeded, un_node.operand) catch |err| switch (err) {
         error.GenericPoison => return .generic_poison_type,
         else => |e| return e,
     };
-    const ptr_ty = maybe_wrapped_ptr_ty.optEuBaseType(mod);
-    assert(ptr_ty.zigTypeTag(mod) == .Pointer); // validated by a previous instruction
-    const elem_ty = ptr_ty.childType(mod);
+    const ptr_ty = maybe_wrapped_ptr_ty.optEuBaseType(zcu);
+    assert(ptr_ty.zigTypeTag(zcu) == .Pointer); // validated by a previous instruction
+    const elem_ty = ptr_ty.childType(zcu);
     if (elem_ty.toIntern() == .anyopaque_type) {
         // The pointer's actual child type is effectively unknown, so it makes
         // sense to represent it with a generic poison.
         return .generic_poison_type;
     }
-    return Air.internedToRef(ptr_ty.childType(mod).toIntern());
+    return Air.internedToRef(ptr_ty.childType(zcu).toIntern());
 }
 
 fn zirIndexablePtrElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const src = block.nodeOffset(un_node.src_node);
     const ptr_ty = sema.resolveType(block, src, un_node.operand) catch |err| switch (err) {
@@ -8468,16 +8467,16 @@ fn zirIndexablePtrElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
         else => |e| return e,
     };
     try sema.checkMemOperand(block, src, ptr_ty);
-    const elem_ty = switch (ptr_ty.ptrSize(mod)) {
-        .Slice, .Many, .C => ptr_ty.childType(mod),
-        .One => ptr_ty.childType(mod).childType(mod),
+    const elem_ty = switch (ptr_ty.ptrSize(zcu)) {
+        .Slice, .Many, .C => ptr_ty.childType(zcu),
+        .One => ptr_ty.childType(zcu).childType(zcu),
     };
     return Air.internedToRef(elem_ty.toIntern());
 }
 
 fn zirVectorElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const vec_ty = sema.resolveType(block, LazySrcLoc.unneeded, un_node.operand) catch |err| switch (err) {
         // Since this is a ZIR instruction that returns a type, encountering
@@ -8487,10 +8486,10 @@ fn zirVectorElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
         error.GenericPoison => return .generic_poison_type,
         else => |e| return e,
     };
-    if (!vec_ty.isVector(mod)) {
+    if (!vec_ty.isVector(zcu)) {
         return sema.fail(block, block.nodeOffset(un_node.src_node), "expected vector type, found '{}'", .{vec_ty.fmt(pt)});
     }
-    return Air.internedToRef(vec_ty.childType(mod).toIntern());
+    return Air.internedToRef(vec_ty.childType(zcu).toIntern());
 }
 
 fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -8561,10 +8560,10 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
 
 fn validateArrayElemType(sema: *Sema, block: *Block, elem_type: Type, elem_src: LazySrcLoc) !void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    if (elem_type.zigTypeTag(mod) == .Opaque) {
+    const zcu = pt.zcu;
+    if (elem_type.zigTypeTag(zcu) == .Opaque) {
         return sema.fail(block, elem_src, "array of opaque type '{}' not allowed", .{elem_type.fmt(pt)});
-    } else if (elem_type.zigTypeTag(mod) == .NoReturn) {
+    } else if (elem_type.zigTypeTag(zcu) == .NoReturn) {
         return sema.fail(block, elem_src, "array of 'noreturn' not allowed", .{});
     }
 }
@@ -8577,10 +8576,10 @@ fn zirAnyframeType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
     if (true) {
         return sema.failWithUseOfAsync(block, block.nodeOffset(inst_data.src_node));
     }
-    const mod = sema.mod;
+    const zcu = sema.zcu;
     const operand_src = block.src(.{ .node_offset_anyframe_type = inst_data.src_node });
     const return_type = try sema.resolveType(block, operand_src, inst_data.operand);
-    const anyframe_type = try mod.anyframeType(return_type);
+    const anyframe_type = try zcu.anyframeType(return_type);
 
     return Air.internedToRef(anyframe_type.toIntern());
 }
@@ -8590,7 +8589,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
     const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -8598,7 +8597,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
     const error_set = try sema.resolveType(block, lhs_src, extra.lhs);
     const payload = try sema.resolveType(block, rhs_src, extra.rhs);
 
-    if (error_set.zigTypeTag(mod) != .ErrorSet) {
+    if (error_set.zigTypeTag(zcu) != .ErrorSet) {
         return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{
             error_set.fmt(pt),
         });
@@ -8610,12 +8609,12 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
 
 fn validateErrorUnionPayloadType(sema: *Sema, block: *Block, payload_ty: Type, payload_src: LazySrcLoc) !void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    if (payload_ty.zigTypeTag(mod) == .Opaque) {
+    const zcu = pt.zcu;
+    if (payload_ty.zigTypeTag(zcu) == .Opaque) {
         return sema.fail(block, payload_src, "error union with payload of opaque type '{}' not allowed", .{
             payload_ty.fmt(pt),
         });
-    } else if (payload_ty.zigTypeTag(mod) == .ErrorSet) {
+    } else if (payload_ty.zigTypeTag(zcu) == .ErrorSet) {
         return sema.fail(block, payload_src, "error union with payload of error set type '{}' not allowed", .{
             payload_ty.fmt(pt),
         });
@@ -8646,8 +8645,8 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
     const src = block.nodeOffset(extra.node);
     const operand_src = block.builtinCallArgSrc(extra.node, 0);
@@ -8656,7 +8655,7 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
     const err_int_ty = try pt.errorIntType();
 
     if (try sema.resolveValue(operand)) |val| {
-        if (val.isUndef(mod)) {
+        if (val.isUndef(zcu)) {
             return pt.undefRef(err_int_ty);
         }
         const err_name = ip.indexToKey(val.toIntern()).err.name;
@@ -8688,8 +8687,8 @@ fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
     const src = block.nodeOffset(extra.node);
     const operand_src = block.builtinCallArgSrc(extra.node, 0);
@@ -8733,8 +8732,8 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
     const src = block.src(.{ .node_offset_bin_op = inst_data.src_node });
@@ -8742,7 +8741,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
     const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node });
     const lhs = try sema.resolveInst(extra.lhs);
     const rhs = try sema.resolveInst(extra.rhs);
-    if (sema.typeOf(lhs).zigTypeTag(mod) == .Bool and sema.typeOf(rhs).zigTypeTag(mod) == .Bool) {
+    if (sema.typeOf(lhs).zigTypeTag(zcu) == .Bool and sema.typeOf(rhs).zigTypeTag(zcu) == .Bool) {
         const msg = msg: {
             const msg = try sema.errMsg(lhs_src, "expected error set type, found 'bool'", .{});
             errdefer msg.destroy(sema.gpa);
@@ -8753,9 +8752,9 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
     }
     const lhs_ty = try sema.analyzeAsType(block, lhs_src, lhs);
     const rhs_ty = try sema.analyzeAsType(block, rhs_src, rhs);
-    if (lhs_ty.zigTypeTag(mod) != .ErrorSet)
+    if (lhs_ty.zigTypeTag(zcu) != .ErrorSet)
         return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{lhs_ty.fmt(pt)});
-    if (rhs_ty.zigTypeTag(mod) != .ErrorSet)
+    if (rhs_ty.zigTypeTag(zcu) != .ErrorSet)
         return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(pt)});
 
     // Anything merged with anyerror is anyerror.
@@ -8790,28 +8789,28 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
     const name = inst_data.get(sema.code);
     return Air.internedToRef((try pt.intern(.{
-        .enum_literal = try mod.intern_pool.getOrPutString(sema.gpa, pt.tid, name, .no_embedded_nulls),
+        .enum_literal = try zcu.intern_pool.getOrPutString(sema.gpa, pt.tid, name, .no_embedded_nulls),
     })));
 }
 
 fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const src = block.nodeOffset(inst_data.src_node);
     const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
     const operand = try sema.resolveInst(inst_data.operand);
     const operand_ty = sema.typeOf(operand);
 
-    const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag(mod)) {
+    const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag(zcu)) {
         .Enum => operand,
         .Union => blk: {
             try operand_ty.resolveFields(pt);
-            const tag_ty = operand_ty.unionTagType(mod) orelse {
+            const tag_ty = operand_ty.unionTagType(zcu) orelse {
                 return sema.fail(
                     block,
                     operand_src,
@@ -8829,11 +8828,11 @@ fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
         },
     };
     const enum_tag_ty = sema.typeOf(enum_tag);
-    const int_tag_ty = enum_tag_ty.intTagType(mod);
+    const int_tag_ty = enum_tag_ty.intTagType(zcu);
 
     // TODO: use correct solution
     // https://github.com/ziglang/zig/issues/15909
-    if (enum_tag_ty.enumFieldCount(mod) == 0 and !enum_tag_ty.isNonexhaustiveEnum(mod)) {
+    if (enum_tag_ty.enumFieldCount(zcu) == 0 and !enum_tag_ty.isNonexhaustiveEnum(zcu)) {
         return sema.fail(block, operand_src, "cannot use @intFromEnum on empty enum '{}'", .{
             enum_tag_ty.fmt(pt),
         });
@@ -8844,7 +8843,7 @@ fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
     }
 
     if (try sema.resolveValue(enum_tag)) |enum_tag_val| {
-        if (enum_tag_val.isUndef(mod)) {
+        if (enum_tag_val.isUndef(zcu)) {
             return pt.undefRef(int_tag_ty);
         }
 
@@ -8858,7 +8857,7 @@ fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
 
 fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
     const src = block.nodeOffset(inst_data.src_node);
@@ -8866,14 +8865,14 @@ fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
     const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@enumFromInt");
     const operand = try sema.resolveInst(extra.rhs);
 
-    if (dest_ty.zigTypeTag(mod) != .Enum) {
+    if (dest_ty.zigTypeTag(zcu) != .Enum) {
         return sema.fail(block, src, "expected enum, found '{}'", .{dest_ty.fmt(pt)});
     }
     _ = try sema.checkIntType(block, operand_src, sema.typeOf(operand));
 
     if (try sema.resolveValue(operand)) |int_val| {
-        if (dest_ty.isNonexhaustiveEnum(mod)) {
-            const int_tag_ty = dest_ty.intTagType(mod);
+        if (dest_ty.isNonexhaustiveEnum(zcu)) {
+            const int_tag_ty = dest_ty.intTagType(zcu);
             if (try sema.intFitsInType(int_val, int_tag_ty, null)) {
                 return Air.internedToRef((try pt.getCoerced(int_val, dest_ty)).toIntern());
             }
@@ -8881,7 +8880,7 @@ fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
                 int_val.fmtValueSema(pt, sema), dest_ty.fmt(pt),
             });
         }
-        if (int_val.isUndef(mod)) {
+        if (int_val.isUndef(zcu)) {
             return sema.failWithUseOfUndef(block, operand_src);
         }
         if (!(try sema.enumHasInt(dest_ty, int_val))) {
@@ -8892,7 +8891,7 @@ fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
         return Air.internedToRef((try pt.getCoerced(int_val, dest_ty)).toIntern());
     }
 
-    if (dest_ty.intTagType(mod).zigTypeTag(mod) == .ComptimeInt) {
+    if (dest_ty.intTagType(zcu).zigTypeTag(zcu) == .ComptimeInt) {
         return sema.failWithNeededComptime(block, operand_src, .{
             .needed_comptime_reason = "value being casted to enum with 'comptime_int' tag type must be comptime-known",
         });
@@ -8909,8 +8908,8 @@ fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
 
     try sema.requireRuntimeBlock(block, src, operand_src);
     const result = try block.addTyOp(.intcast, dest_ty, operand);
-    if (block.wantSafety() and !dest_ty.isNonexhaustiveEnum(mod) and
-        mod.backendSupportsFeature(.is_named_enum_value))
+    if (block.wantSafety() and !dest_ty.isNonexhaustiveEnum(zcu) and
+        zcu.backendSupportsFeature(.is_named_enum_value))
     {
         const ok = try block.addUnOp(.is_named_enum_value, result);
         try sema.addSafetyCheck(block, src, ok, .invalid_enum_value);
@@ -9014,20 +9013,20 @@ fn zirOptionalPayload(
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const src = block.nodeOffset(inst_data.src_node);
     const operand = try sema.resolveInst(inst_data.operand);
     const operand_ty = sema.typeOf(operand);
-    const result_ty = switch (operand_ty.zigTypeTag(mod)) {
-        .Optional => operand_ty.optionalChild(mod),
+    const result_ty = switch (operand_ty.zigTypeTag(zcu)) {
+        .Optional => operand_ty.optionalChild(zcu),
         .Pointer => t: {
-            if (operand_ty.ptrSize(mod) != .C) {
+            if (operand_ty.ptrSize(zcu) != .C) {
                 return sema.failWithExpectedOptionalType(block, src, operand_ty);
             }
             // TODO https://github.com/ziglang/zig/issues/6597
             if (true) break :t operand_ty;
-            const ptr_info = operand_ty.ptrInfo(mod);
+            const ptr_info = operand_ty.ptrInfo(zcu);
             break :t try pt.ptrTypeSema(.{
                 .child = ptr_info.child,
                 .flags = .{
@@ -9043,7 +9042,7 @@ fn zirOptionalPayload(
     };
 
     if (try sema.resolveDefinedValue(block, src, operand)) |val| {
-        return if (val.optionalValue(mod)) |payload|
+        return if (val.optionalValue(zcu)) |payload|
             Air.internedToRef(payload.toIntern())
         else
             sema.fail(block, src, "unable to unwrap null", .{});
@@ -9067,13 +9066,13 @@ fn zirErrUnionPayload(
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const src = block.nodeOffset(inst_data.src_node);
     const operand = try sema.resolveInst(inst_data.operand);
     const operand_src = src;
     const err_union_ty = sema.typeOf(operand);
-    if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
+    if (err_union_ty.zigTypeTag(zcu) != .ErrorUnion) {
         return sema.fail(block, operand_src, "expected error union type, found '{}'", .{
             err_union_ty.fmt(pt),
         });
@@ -9091,20 +9090,20 @@ fn analyzeErrUnionPayload(
     safety_check: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const payload_ty = err_union_ty.errorUnionPayload(mod);
+    const zcu = pt.zcu;
+    const payload_ty = err_union_ty.errorUnionPayload(zcu);
     if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| {
-        if (val.getErrorName(mod).unwrap()) |name| {
+        if (val.getErrorName(zcu).unwrap()) |name| {
             return sema.failWithComptimeErrorRetTrace(block, src, name);
         }
-        return Air.internedToRef(mod.intern_pool.indexToKey(val.toIntern()).error_union.val.payload);
+        return Air.internedToRef(zcu.intern_pool.indexToKey(val.toIntern()).error_union.val.payload);
     }
 
     try sema.requireRuntimeBlock(block, src, null);
 
     // If the error set has no fields then no safety check is needed.
     if (safety_check and block.wantSafety() and
-        !err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod))
+        !err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu))
     {
         try sema.panicUnwrapError(block, src, operand, .unwrap_errunion_err, .is_non_err);
     }
@@ -9215,20 +9214,20 @@ fn zirErrUnionCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
 
 fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const operand_ty = sema.typeOf(operand);
-    if (operand_ty.zigTypeTag(mod) != .ErrorUnion) {
+    if (operand_ty.zigTypeTag(zcu) != .ErrorUnion) {
         return sema.fail(block, src, "expected error union type, found '{}'", .{
             operand_ty.fmt(pt),
         });
     }
 
-    const result_ty = operand_ty.errorUnionSet(mod);
+    const result_ty = operand_ty.errorUnionSet(zcu);
 
     if (try sema.resolveDefinedValue(block, src, operand)) |val| {
         return Air.internedToRef((try pt.intern(.{ .err = .{
             .ty = result_ty.toIntern(),
-            .name = mod.intern_pool.indexToKey(val.toIntern()).error_union.val.err_name,
+            .name = zcu.intern_pool.indexToKey(val.toIntern()).error_union.val.err_name,
         } })));
     }
 
@@ -9249,24 +9248,24 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
 
 fn analyzeErrUnionCodePtr(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const operand_ty = sema.typeOf(operand);
-    assert(operand_ty.zigTypeTag(mod) == .Pointer);
+    assert(operand_ty.zigTypeTag(zcu) == .Pointer);
 
-    if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) {
+    if (operand_ty.childType(zcu).zigTypeTag(zcu) != .ErrorUnion) {
         return sema.fail(block, src, "expected error union type, found '{}'", .{
-            operand_ty.childType(mod).fmt(pt),
+            operand_ty.childType(zcu).fmt(pt),
         });
     }
 
-    const result_ty = operand_ty.childType(mod).errorUnionSet(mod);
+    const result_ty = operand_ty.childType(zcu).errorUnionSet(zcu);
 
     if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| {
         if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| {
-            assert(val.getErrorName(mod) != .none);
+            assert(val.getErrorName(zcu) != .none);
             return Air.internedToRef((try pt.intern(.{ .err = .{
                 .ty = result_ty.toIntern(),
-                .name = mod.intern_pool.indexToKey(val.toIntern()).error_union.val.err_name,
+                .name = zcu.intern_pool.indexToKey(val.toIntern()).error_union.val.err_name,
             } })));
         }
     }
@@ -9412,7 +9411,7 @@ fn resolveGenericBody(
 /// and puts it there if it doesn't exist.
 /// It also dupes the library name which can then be saved as part of the
 /// respective `Decl` (either `ExternFn` or `Var`).
-/// The liveness of the duped library name is tied to liveness of `Module`.
+/// The liveness of the duped library name is tied to liveness of `Zcu`.
 /// To deallocate, call `deinit` on the respective `Decl` (`ExternFn` or `Var`).
 fn handleExternLibName(
     sema: *Sema,
@@ -9422,9 +9421,9 @@ fn handleExternLibName(
 ) CompileError!void {
     blk: {
         const pt = sema.pt;
-        const mod = pt.zcu;
-        const comp = mod.comp;
-        const target = mod.getTarget();
+        const zcu = pt.zcu;
+        const comp = zcu.comp;
+        const target = zcu.getTarget();
         log.debug("extern fn symbol expected in lib '{s}'", .{lib_name});
         if (target.is_libc_lib_name(lib_name)) {
             if (!comp.config.link_libc) {
@@ -9575,7 +9574,7 @@ fn funcCommon(
             .fn_proto_node_offset = src_node_offset,
             .param_index = @intCast(i),
         } });
-        const requires_comptime = try sema.typeRequiresComptime(param_ty);
+        const requires_comptime = try param_ty.comptimeOnlySema(pt);
         if (param_is_comptime or requires_comptime) {
             comptime_bits |= @as(u32, 1) << @intCast(i); // TODO: handle cast error
         }
@@ -9631,7 +9630,7 @@ fn funcCommon(
                 const err_code_size = target.ptrBitWidth();
                 switch (i) {
                     0 => if (param_ty.zigTypeTag(zcu) != .Pointer) return sema.fail(block, param_src, "first parameter of function with 'Interrupt' calling convention must be a pointer type", .{}),
-                    1 => if (param_ty.bitSize(pt) != err_code_size) return sema.fail(block, param_src, "second parameter of function with 'Interrupt' calling convention must be a {d}-bit integer", .{err_code_size}),
+                    1 => if (param_ty.bitSize(zcu) != err_code_size) return sema.fail(block, param_src, "second parameter of function with 'Interrupt' calling convention must be a {d}-bit integer", .{err_code_size}),
                     else => return sema.fail(block, param_src, "'Interrupt' calling convention supports up to 2 parameters, found {d}", .{i + 1}),
                 }
             } else return sema.fail(block, param_src, "parameters are not allowed with 'Interrupt' calling convention", .{}),
@@ -9640,7 +9639,7 @@ fn funcCommon(
         }
     }
 
-    const ret_ty_requires_comptime = try sema.typeRequiresComptime(bare_return_type);
+    const ret_ty_requires_comptime = try bare_return_type.comptimeOnlySema(pt);
     const ret_poison = bare_return_type.isGenericPoison();
     const final_is_generic = is_generic or comptime_bits != 0 or ret_ty_requires_comptime;
 
@@ -9881,18 +9880,18 @@ fn finishFunc(
     final_is_generic: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const gpa = sema.gpa;
-    const target = mod.getTarget();
+    const target = zcu.getTarget();
 
     const return_type: Type = if (opt_func_index == .none or ret_poison)
         bare_return_type
     else
         Type.fromInterned(ip.funcTypeReturnType(ip.typeOf(opt_func_index)));
 
-    if (!return_type.isValidReturnType(mod)) {
-        const opaque_str = if (return_type.zigTypeTag(mod) == .Opaque) "opaque " else "";
+    if (!return_type.isValidReturnType(zcu)) {
+        const opaque_str = if (return_type.zigTypeTag(zcu) == .Opaque) "opaque " else "";
         return sema.fail(block, ret_ty_src, "{s}return type '{}' not allowed", .{
             opaque_str, return_type.fmt(pt),
         });
@@ -9954,7 +9953,7 @@ fn finishFunc(
     }
 
     switch (cc_resolved) {
-        .Interrupt, .Signal => if (return_type.zigTypeTag(mod) != .Void and return_type.zigTypeTag(mod) != .NoReturn) {
+        .Interrupt, .Signal => if (return_type.zigTypeTag(zcu) != .Void and return_type.zigTypeTag(zcu) != .NoReturn) {
             return sema.fail(block, ret_ty_src, "function with calling convention '{s}' must return 'void' or 'noreturn'", .{@tagName(cc_resolved)});
         },
         .Inline => if (is_noinline) {
@@ -10070,7 +10069,7 @@ fn zirParam(
         }
     };
 
-    const is_comptime = try sema.typeRequiresComptime(param_ty) or comptime_syntax;
+    const is_comptime = try param_ty.comptimeOnlySema(sema.pt) or comptime_syntax;
 
     try block.params.append(sema.arena, .{
         .ty = param_ty.toIntern(),
@@ -10141,7 +10140,7 @@ fn analyzeAs(
     no_cast_to_comptime_int: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const operand = try sema.resolveInst(zir_operand);
     const operand_air_inst = sema.resolveInst(zir_dest_type) catch |err| switch (err) {
         error.GenericPoison => return operand,
@@ -10151,7 +10150,7 @@ fn analyzeAs(
         error.GenericPoison => return operand,
         else => |e| return e,
     };
-    const dest_ty_tag = dest_ty.zigTypeTagOrPoison(mod) catch |err| switch (err) {
+    const dest_ty_tag = dest_ty.zigTypeTagOrPoison(zcu) catch |err| switch (err) {
         error.GenericPoison => return operand,
     };
 
@@ -10189,7 +10188,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
         return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(pt)});
     }
     const pointee_ty = ptr_ty.childType(zcu);
-    if (try sema.typeRequiresComptime(ptr_ty)) {
+    if (try ptr_ty.comptimeOnlySema(pt)) {
         const msg = msg: {
             const msg = try sema.errMsg(ptr_src, "comptime-only type '{}' has no pointer address", .{pointee_ty.fmt(pt)});
             errdefer msg.destroy(sema.gpa);
@@ -10205,7 +10204,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
             }
             return Air.internedToRef((try pt.intValue(
                 Type.usize,
-                (try operand_val.getUnsignedIntAdvanced(pt, .sema)).?,
+                (try operand_val.toUnsignedIntSema(pt)),
             )).toIntern());
         }
         const len = operand_ty.vectorLen(zcu);
@@ -10217,7 +10216,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
                 new_elem.* = (try pt.undefValue(Type.usize)).toIntern();
                 continue;
             }
-            const addr = try ptr_val.getUnsignedIntAdvanced(pt, .sema) orelse {
+            const addr = try ptr_val.getUnsignedIntSema(pt) orelse {
                 // A vector element wasn't an integer pointer. This is a runtime operation.
                 break :ct;
             };
@@ -10252,12 +10251,12 @@ fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.nodeOffset(inst_data.src_node);
     const field_name_src = block.src(.{ .node_offset_field_name = inst_data.src_node });
     const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
-    const field_name = try mod.intern_pool.getOrPutString(
+    const field_name = try zcu.intern_pool.getOrPutString(
         sema.gpa,
         pt.tid,
         sema.code.nullTerminatedString(extra.field_name_start),
@@ -10272,12 +10271,12 @@ fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.nodeOffset(inst_data.src_node);
     const field_name_src = block.src(.{ .node_offset_field_name = inst_data.src_node });
     const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
-    const field_name = try mod.intern_pool.getOrPutString(
+    const field_name = try zcu.intern_pool.getOrPutString(
         sema.gpa,
         pt.tid,
         sema.code.nullTerminatedString(extra.field_name_start),
@@ -10292,20 +10291,20 @@ fn zirStructInitFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.nodeOffset(inst_data.src_node);
     const field_name_src = block.src(.{ .node_offset_field_name_init = inst_data.src_node });
     const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
-    const field_name = try mod.intern_pool.getOrPutString(
+    const field_name = try zcu.intern_pool.getOrPutString(
         sema.gpa,
         pt.tid,
         sema.code.nullTerminatedString(extra.field_name_start),
         .no_embedded_nulls,
     );
     const object_ptr = try sema.resolveInst(extra.lhs);
-    const struct_ty = sema.typeOf(object_ptr).childType(mod);
-    switch (struct_ty.zigTypeTag(mod)) {
+    const struct_ty = sema.typeOf(object_ptr).childType(zcu);
+    switch (struct_ty.zigTypeTag(zcu)) {
         .Struct, .Union => {
             return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, true);
         },
@@ -10371,25 +10370,25 @@ fn intCast(
     runtime_safety: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const operand_ty = sema.typeOf(operand);
     const dest_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, dest_ty, dest_ty_src);
     const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
 
     if (try sema.isComptimeKnown(operand)) {
         return sema.coerce(block, dest_ty, operand, operand_src);
-    } else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
+    } else if (dest_scalar_ty.zigTypeTag(zcu) == .ComptimeInt) {
         return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_int'", .{});
     }
 
     try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, dest_ty_src, operand_src);
-    const is_vector = dest_ty.zigTypeTag(mod) == .Vector;
+    const is_vector = dest_ty.zigTypeTag(zcu) == .Vector;
 
     if ((try sema.typeHasOnePossibleValue(dest_ty))) |opv| {
         // requirement: intCast(u0, input) iff input == 0
         if (runtime_safety and block.wantSafety()) {
             try sema.requireRuntimeBlock(block, src, operand_src);
-            const wanted_info = dest_scalar_ty.intInfo(mod);
+            const wanted_info = dest_scalar_ty.intInfo(zcu);
             const wanted_bits = wanted_info.bits;
 
             if (wanted_bits == 0) {
@@ -10416,8 +10415,8 @@ fn intCast(
 
     try sema.requireRuntimeBlock(block, src, operand_src);
     if (runtime_safety and block.wantSafety()) {
-        const actual_info = operand_scalar_ty.intInfo(mod);
-        const wanted_info = dest_scalar_ty.intInfo(mod);
+        const actual_info = operand_scalar_ty.intInfo(zcu);
+        const wanted_info = dest_scalar_ty.intInfo(zcu);
         const actual_bits = actual_info.bits;
         const wanted_bits = wanted_info.bits;
         const actual_value_bits = actual_bits - @intFromBool(actual_info.signedness == .signed);
@@ -10437,7 +10436,7 @@ fn intCast(
                 // negative differences (`operand` > `dest_max`) appear too big.
                 const unsigned_scalar_operand_ty = try pt.intType(.unsigned, actual_bits);
                 const unsigned_operand_ty = if (is_vector) try pt.vectorType(.{
-                    .len = dest_ty.vectorLen(mod),
+                    .len = dest_ty.vectorLen(zcu),
                     .child = unsigned_scalar_operand_ty.toIntern(),
                 }) else unsigned_scalar_operand_ty;
                 const diff_unsigned = try block.addBitCast(unsigned_operand_ty, diff);
@@ -10520,7 +10519,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.nodeOffset(inst_data.src_node);
     const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
@@ -10529,7 +10528,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
     const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@bitCast");
     const operand = try sema.resolveInst(extra.rhs);
     const operand_ty = sema.typeOf(operand);
-    switch (dest_ty.zigTypeTag(mod)) {
+    switch (dest_ty.zigTypeTag(zcu)) {
         .AnyFrame,
         .ComptimeFloat,
         .ComptimeInt,
@@ -10551,7 +10550,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
             const msg = msg: {
                 const msg = try sema.errMsg(src, "cannot @bitCast to '{}'", .{dest_ty.fmt(pt)});
                 errdefer msg.destroy(sema.gpa);
-                switch (operand_ty.zigTypeTag(mod)) {
+                switch (operand_ty.zigTypeTag(zcu)) {
                     .Int, .ComptimeInt => try sema.errNote(src, msg, "use @enumFromInt to cast from '{}'", .{operand_ty.fmt(pt)}),
                     else => {},
                 }
@@ -10565,7 +10564,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
             const msg = msg: {
                 const msg = try sema.errMsg(src, "cannot @bitCast to '{}'", .{dest_ty.fmt(pt)});
                 errdefer msg.destroy(sema.gpa);
-                switch (operand_ty.zigTypeTag(mod)) {
+                switch (operand_ty.zigTypeTag(zcu)) {
                     .Int, .ComptimeInt => try sema.errNote(src, msg, "use @ptrFromInt to cast from '{}'", .{operand_ty.fmt(pt)}),
                     .Pointer => try sema.errNote(src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(pt)}),
                     else => {},
@@ -10575,8 +10574,8 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
             };
             return sema.failWithOwnedErrorMsg(block, msg);
         },
-        .Struct, .Union => if (dest_ty.containerLayout(mod) == .auto) {
-            const container = switch (dest_ty.zigTypeTag(mod)) {
+        .Struct, .Union => if (dest_ty.containerLayout(zcu) == .auto) {
+            const container = switch (dest_ty.zigTypeTag(zcu)) {
                 .Struct => "struct",
                 .Union => "union",
                 else => unreachable,
@@ -10593,7 +10592,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
         .Vector,
         => {},
     }
-    switch (operand_ty.zigTypeTag(mod)) {
+    switch (operand_ty.zigTypeTag(zcu)) {
         .AnyFrame,
         .ComptimeFloat,
         .ComptimeInt,
@@ -10615,7 +10614,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
             const msg = msg: {
                 const msg = try sema.errMsg(operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(pt)});
                 errdefer msg.destroy(sema.gpa);
-                switch (dest_ty.zigTypeTag(mod)) {
+                switch (dest_ty.zigTypeTag(zcu)) {
                     .Int, .ComptimeInt => try sema.errNote(operand_src, msg, "use @intFromEnum to cast to '{}'", .{dest_ty.fmt(pt)}),
                     else => {},
                 }
@@ -10628,7 +10627,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
             const msg = msg: {
                 const msg = try sema.errMsg(operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(pt)});
                 errdefer msg.destroy(sema.gpa);
-                switch (dest_ty.zigTypeTag(mod)) {
+                switch (dest_ty.zigTypeTag(zcu)) {
                     .Int, .ComptimeInt => try sema.errNote(operand_src, msg, "use @intFromPtr to cast to '{}'", .{dest_ty.fmt(pt)}),
                     .Pointer => try sema.errNote(operand_src, msg, "use @ptrCast to cast to '{}'", .{dest_ty.fmt(pt)}),
                     else => {},
@@ -10638,8 +10637,8 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
             };
             return sema.failWithOwnedErrorMsg(block, msg);
         },
-        .Struct, .Union => if (operand_ty.containerLayout(mod) == .auto) {
-            const container = switch (operand_ty.zigTypeTag(mod)) {
+        .Struct, .Union => if (operand_ty.containerLayout(zcu) == .auto) {
+            const container = switch (operand_ty.zigTypeTag(zcu)) {
                 .Struct => "struct",
                 .Union => "union",
                 else => unreachable,
@@ -10664,24 +10663,24 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.nodeOffset(inst_data.src_node);
     const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
     const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
 
     const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@floatCast");
-    const dest_scalar_ty = dest_ty.scalarType(mod);
+    const dest_scalar_ty = dest_ty.scalarType(zcu);
 
     const operand = try sema.resolveInst(extra.rhs);
     const operand_ty = sema.typeOf(operand);
-    const operand_scalar_ty = operand_ty.scalarType(mod);
+    const operand_scalar_ty = operand_ty.scalarType(zcu);
 
     try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, src, operand_src);
-    const is_vector = dest_ty.zigTypeTag(mod) == .Vector;
+    const is_vector = dest_ty.zigTypeTag(zcu) == .Vector;
 
-    const target = mod.getTarget();
-    const dest_is_comptime_float = switch (dest_scalar_ty.zigTypeTag(mod)) {
+    const target = zcu.getTarget();
+    const dest_is_comptime_float = switch (dest_scalar_ty.zigTypeTag(zcu)) {
         .ComptimeFloat => true,
         .Float => false,
         else => return sema.fail(
@@ -10692,7 +10691,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
         ),
     };
 
-    switch (operand_scalar_ty.zigTypeTag(mod)) {
+    switch (operand_scalar_ty.zigTypeTag(zcu)) {
         .ComptimeFloat, .Float, .ComptimeInt => {},
         else => return sema.fail(
             block,
@@ -10706,7 +10705,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
         if (!is_vector) {
             return Air.internedToRef((try operand_val.floatCast(dest_ty, pt)).toIntern());
         }
-        const vec_len = operand_ty.vectorLen(mod);
+        const vec_len = operand_ty.vectorLen(zcu);
         const new_elems = try sema.arena.alloc(InternPool.Index, vec_len);
         for (new_elems, 0..) |*new_elem, i| {
             const old_elem = try operand_val.elemValue(pt, i);
@@ -10730,7 +10729,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
     if (!is_vector) {
         return block.addTyOp(.fptrunc, dest_ty, operand);
     }
-    const vec_len = operand_ty.vectorLen(mod);
+    const vec_len = operand_ty.vectorLen(zcu);
     const new_elems = try sema.arena.alloc(Air.Inst.Ref, vec_len);
     for (new_elems, 0..) |*new_elem, i| {
         const idx_ref = try pt.intRef(Type.usize, i);
@@ -10781,21 +10780,21 @@ fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.nodeOffset(inst_data.src_node);
     const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
     const array_ptr = try sema.resolveInst(extra.lhs);
     const elem_index = try sema.resolveInst(extra.rhs);
     const indexable_ty = sema.typeOf(array_ptr);
-    if (indexable_ty.zigTypeTag(mod) != .Pointer) {
+    if (indexable_ty.zigTypeTag(zcu) != .Pointer) {
         const capture_src = block.src(.{ .for_capture_from_input = inst_data.src_node });
         const msg = msg: {
             const msg = try sema.errMsg(capture_src, "pointer capture of non pointer type '{}'", .{
                 indexable_ty.fmt(pt),
             });
             errdefer msg.destroy(sema.gpa);
-            if (indexable_ty.isIndexable(mod)) {
+            if (indexable_ty.isIndexable(zcu)) {
                 try sema.errNote(src, msg, "consider using '&' here", .{});
             }
             break :msg msg;
@@ -10824,16 +10823,16 @@ fn zirArrayInitElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.nodeOffset(inst_data.src_node);
     const extra = sema.code.extraData(Zir.Inst.ElemPtrImm, inst_data.payload_index).data;
     const array_ptr = try sema.resolveInst(extra.ptr);
     const elem_index = try pt.intRef(Type.usize, extra.index);
-    const array_ty = sema.typeOf(array_ptr).childType(mod);
-    switch (array_ty.zigTypeTag(mod)) {
+    const array_ty = sema.typeOf(array_ptr).childType(zcu);
+    switch (array_ty.zigTypeTag(zcu)) {
         .Array, .Vector => {},
-        else => if (!array_ty.isTuple(mod)) {
+        else => if (!array_ty.isTuple(zcu)) {
             return sema.failWithArrayInitNotSupported(block, src, array_ty);
         },
     }
@@ -11059,9 +11058,9 @@ const SwitchProngAnalysis = struct {
     ) CompileError!Air.Inst.Ref {
         const sema = spa.sema;
         const pt = sema.pt;
-        const mod = pt.zcu;
+        const zcu = pt.zcu;
         const operand_ty = sema.typeOf(spa.operand);
-        if (operand_ty.zigTypeTag(mod) != .Union) {
+        if (operand_ty.zigTypeTag(zcu) != .Union) {
             const tag_capture_src: LazySrcLoc = .{
                 .base_node_inst = capture_src.base_node_inst,
                 .offset = .{ .switch_tag_capture = capture_src.offset.switch_capture },
@@ -11429,9 +11428,9 @@ fn switchCond(
     operand: Air.Inst.Ref,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const operand_ty = sema.typeOf(operand);
-    switch (operand_ty.zigTypeTag(mod)) {
+    switch (operand_ty.zigTypeTag(zcu)) {
         .Type,
         .Void,
         .Bool,
@@ -11445,7 +11444,7 @@ fn switchCond(
         .ErrorSet,
         .Enum,
         => {
-            if (operand_ty.isSlice(mod)) {
+            if (operand_ty.isSlice(zcu)) {
                 return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(pt)});
             }
             if ((try sema.typeHasOnePossibleValue(operand_ty))) |opv| {
@@ -11456,11 +11455,11 @@ fn switchCond(
 
         .Union => {
             try operand_ty.resolveFields(pt);
-            const enum_ty = operand_ty.unionTagType(mod) orelse {
+            const enum_ty = operand_ty.unionTagType(zcu) orelse {
                 const msg = msg: {
                     const msg = try sema.errMsg(src, "switch on union with no attached enum", .{});
                     errdefer msg.destroy(sema.gpa);
-                    if (operand_ty.srcLocOrNull(mod)) |union_src| {
+                    if (operand_ty.srcLocOrNull(zcu)) |union_src| {
                         try sema.errNote(union_src, msg, "consider 'union(enum)' here", .{});
                     }
                     break :msg msg;
@@ -11492,7 +11491,7 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const switch_src = block.nodeOffset(inst_data.src_node);
@@ -11577,17 +11576,17 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
 
     const operand_ty = sema.typeOf(raw_operand_val);
     const operand_err_set = if (extra.data.bits.payload_is_ref)
-        operand_ty.childType(mod)
+        operand_ty.childType(zcu)
     else
         operand_ty;
 
-    if (operand_err_set.zigTypeTag(mod) != .ErrorUnion) {
+    if (operand_err_set.zigTypeTag(zcu) != .ErrorUnion) {
         return sema.fail(block, switch_src, "expected error union type, found '{}'", .{
             operand_ty.fmt(pt),
         });
     }
 
-    const operand_err_set_ty = operand_err_set.errorUnionSet(mod);
+    const operand_err_set_ty = operand_err_set.errorUnionSet(zcu);
 
     const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
     try sema.air_instructions.append(gpa, .{
@@ -11628,7 +11627,7 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
     defer merges.deinit(gpa);
 
     const resolved_err_set = try sema.resolveInferredErrorSetTy(block, main_src, operand_err_set_ty.toIntern());
-    if (Type.fromInterned(resolved_err_set).errorSetIsEmpty(mod)) {
+    if (Type.fromInterned(resolved_err_set).errorSetIsEmpty(zcu)) {
         return sema.resolveBlockBody(block, main_operand_src, &child_block, non_error_case.body, inst, merges);
     }
 
@@ -11662,13 +11661,13 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
         else
             ov;
 
-        if (operand_val.errorUnionIsPayload(mod)) {
+        if (operand_val.errorUnionIsPayload(zcu)) {
             return sema.resolveBlockBody(block, main_operand_src, &child_block, non_error_case.body, inst, merges);
         } else {
             const err_val = Value.fromInterned(try pt.intern(.{
                 .err = .{
                     .ty = operand_err_set_ty.toIntern(),
-                    .name = operand_val.getErrorName(mod).unwrap().?,
+                    .name = operand_val.getErrorName(zcu).unwrap().?,
                 },
             }));
             spa.operand = if (extra.data.bits.payload_is_ref)
@@ -11706,7 +11705,7 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
     }
 
     if (scalar_cases_len + multi_cases_len == 0) {
-        if (else_error_ty) |ty| if (ty.errorSetIsEmpty(mod)) {
+        if (else_error_ty) |ty| if (ty.errorSetIsEmpty(zcu)) {
             return sema.resolveBlockBody(block, main_operand_src, &child_block, non_error_case.body, inst, merges);
         };
     }
@@ -11720,7 +11719,7 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
     }
 
     const cond = if (extra.data.bits.payload_is_ref) blk: {
-        try sema.checkErrorType(block, main_src, sema.typeOf(raw_operand_val).elemType2(mod));
+        try sema.checkErrorType(block, main_src, sema.typeOf(raw_operand_val).elemType2(zcu));
         const loaded = try sema.analyzeLoad(block, main_src, raw_operand_val, main_src);
         break :blk try sema.analyzeIsNonErr(block, main_src, loaded);
     } else blk: {
@@ -11803,7 +11802,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.nodeOffset(inst_data.src_node);
@@ -11873,12 +11872,12 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
     };
 
     const maybe_union_ty = sema.typeOf(raw_operand_val);
-    const union_originally = maybe_union_ty.zigTypeTag(mod) == .Union;
+    const union_originally = maybe_union_ty.zigTypeTag(zcu) == .Union;
 
     // Duplicate checking variables later also used for `inline else`.
     var seen_enum_fields: []?LazySrcLoc = &.{};
     var seen_errors = SwitchErrorSet.init(gpa);
-    var range_set = RangeSet.init(gpa, pt);
+    var range_set = RangeSet.init(gpa, zcu);
     var true_count: u8 = 0;
     var false_count: u8 = 0;
 
@@ -11891,12 +11890,12 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
     var empty_enum = false;
 
     const operand_ty = sema.typeOf(operand);
-    const err_set = operand_ty.zigTypeTag(mod) == .ErrorSet;
+    const err_set = operand_ty.zigTypeTag(zcu) == .ErrorSet;
 
     var else_error_ty: ?Type = null;
 
     // Validate usage of '_' prongs.
-    if (special_prong == .under and (!operand_ty.isNonexhaustiveEnum(mod) or union_originally)) {
+    if (special_prong == .under and (!operand_ty.isNonexhaustiveEnum(zcu) or union_originally)) {
         const msg = msg: {
             const msg = try sema.errMsg(
                 src,
@@ -11922,11 +11921,11 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
     }
 
     // Validate for duplicate items, missing else prong, and invalid range.
-    switch (operand_ty.zigTypeTag(mod)) {
+    switch (operand_ty.zigTypeTag(zcu)) {
         .Union => unreachable, // handled in `switchCond`
         .Enum => {
-            seen_enum_fields = try gpa.alloc(?LazySrcLoc, operand_ty.enumFieldCount(mod));
-            empty_enum = seen_enum_fields.len == 0 and !operand_ty.isNonexhaustiveEnum(mod);
+            seen_enum_fields = try gpa.alloc(?LazySrcLoc, operand_ty.enumFieldCount(zcu));
+            empty_enum = seen_enum_fields.len == 0 and !operand_ty.isNonexhaustiveEnum(zcu);
             @memset(seen_enum_fields, null);
             // `range_set` is used for non-exhaustive enum values that do not correspond to any tags.
 
@@ -11989,7 +11988,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
             } else true;
 
             if (special_prong == .@"else") {
-                if (all_tags_handled and !operand_ty.isNonexhaustiveEnum(mod)) return sema.fail(
+                if (all_tags_handled and !operand_ty.isNonexhaustiveEnum(zcu)) return sema.fail(
                     block,
                     special_prong_src,
                     "unreachable else prong; all cases already handled",
@@ -12006,17 +12005,17 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
                     for (seen_enum_fields, 0..) |seen_src, i| {
                         if (seen_src != null) continue;
 
-                        const field_name = operand_ty.enumFieldName(i, mod);
+                        const field_name = operand_ty.enumFieldName(i, zcu);
                         try sema.addFieldErrNote(
                             operand_ty,
                             i,
                             msg,
                             "unhandled enumeration value: '{}'",
-                            .{field_name.fmt(&mod.intern_pool)},
+                            .{field_name.fmt(&zcu.intern_pool)},
                         );
                     }
                     try sema.errNote(
-                        operand_ty.srcLoc(mod),
+                        operand_ty.srcLoc(zcu),
                         msg,
                         "enum '{}' declared here",
                         .{operand_ty.fmt(pt)},
@@ -12024,7 +12023,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
                     break :msg msg;
                 };
                 return sema.failWithOwnedErrorMsg(block, msg);
-            } else if (special_prong == .none and operand_ty.isNonexhaustiveEnum(mod) and !union_originally) {
+            } else if (special_prong == .none and operand_ty.isNonexhaustiveEnum(zcu) and !union_originally) {
                 return sema.fail(
                     block,
                     src,
@@ -12124,7 +12123,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
             }
 
             check_range: {
-                if (operand_ty.zigTypeTag(mod) == .Int) {
+                if (operand_ty.zigTypeTag(zcu) == .Int) {
                     const min_int = try operand_ty.minInt(pt, operand_ty);
                     const max_int = try operand_ty.maxInt(pt, operand_ty);
                     if (try range_set.spans(min_int.toIntern(), max_int.toIntern())) {
@@ -12388,8 +12387,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
         if (err_set and try sema.maybeErrorUnwrap(block, special.body, operand, operand_src, false)) {
             return .unreachable_value;
         }
-        if (mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and
-            (!operand_ty.isNonexhaustiveEnum(mod) or union_originally))
+        if (zcu.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(zcu) == .Enum and
+            (!operand_ty.isNonexhaustiveEnum(zcu) or union_originally))
         {
             try sema.zirDbgStmt(block, cond_dbg_node_index);
             const ok = try block.addUnOp(.is_named_enum_value, operand);
@@ -12482,9 +12481,9 @@ fn analyzeSwitchRuntimeBlock(
     allow_err_code_unwrap: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
 
     const block = child_block.parent.?;
 
@@ -12519,8 +12518,8 @@ fn analyzeSwitchRuntimeBlock(
         const analyze_body = if (union_originally) blk: {
             const unresolved_item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item, undefined) catch unreachable;
             const item_val = sema.resolveLazyValue(unresolved_item_val) catch unreachable;
-            const field_ty = maybe_union_ty.unionFieldType(item_val, mod).?;
-            break :blk field_ty.zigTypeTag(mod) != .NoReturn;
+            const field_ty = maybe_union_ty.unionFieldType(item_val, zcu).?;
+            break :blk field_ty.zigTypeTag(zcu) != .NoReturn;
         } else true;
 
         if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand, operand_src, allow_err_code_unwrap)) {
@@ -12592,7 +12591,7 @@ fn analyzeSwitchRuntimeBlock(
                 var item = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item_first_ref, undefined) catch unreachable;
                 const item_last = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item_last_ref, undefined) catch unreachable;
 
-                while (item.compareScalar(.lte, item_last, operand_ty, pt)) : ({
+                while (item.compareScalar(.lte, item_last, operand_ty, zcu)) : ({
                     // Previous validation has resolved any possible lazy values.
                     item = sema.intAddScalar(item, try pt.intValue(operand_ty, 1), operand_ty) catch |err| switch (err) {
                         error.Overflow => unreachable,
@@ -12633,7 +12632,7 @@ fn analyzeSwitchRuntimeBlock(
                     cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
                     cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
 
-                    if (item.compareScalar(.eq, item_last, operand_ty, pt)) break;
+                    if (item.compareScalar(.eq, item_last, operand_ty, zcu)) break;
                 }
             }
 
@@ -12645,8 +12644,8 @@ fn analyzeSwitchRuntimeBlock(
 
                 const analyze_body = if (union_originally) blk: {
                     const item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item, undefined) catch unreachable;
-                    const field_ty = maybe_union_ty.unionFieldType(item_val, mod).?;
-                    break :blk field_ty.zigTypeTag(mod) != .NoReturn;
+                    const field_ty = maybe_union_ty.unionFieldType(item_val, zcu).?;
+                    break :blk field_ty.zigTypeTag(zcu) != .NoReturn;
                 } else true;
 
                 if (emit_bb) try sema.emitBackwardBranch(block, block.src(.{ .switch_case_item = .{
@@ -12696,8 +12695,8 @@ fn analyzeSwitchRuntimeBlock(
             const analyze_body = if (union_originally)
                 for (items) |item| {
                     const item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item, undefined) catch unreachable;
-                    const field_ty = maybe_union_ty.unionFieldType(item_val, mod).?;
-                    if (field_ty.zigTypeTag(mod) != .NoReturn) break true;
+                    const field_ty = maybe_union_ty.unionFieldType(item_val, zcu).?;
+                    if (field_ty.zigTypeTag(zcu) != .NoReturn) break true;
                 } else false
             else
                 true;
@@ -12836,9 +12835,9 @@ fn analyzeSwitchRuntimeBlock(
     var final_else_body: []const Air.Inst.Index = &.{};
     if (special.body.len != 0 or !is_first or case_block.wantSafety()) {
         var emit_bb = false;
-        if (special.is_inline) switch (operand_ty.zigTypeTag(mod)) {
+        if (special.is_inline) switch (operand_ty.zigTypeTag(zcu)) {
             .Enum => {
-                if (operand_ty.isNonexhaustiveEnum(mod) and !union_originally) {
+                if (operand_ty.isNonexhaustiveEnum(zcu) and !union_originally) {
                     return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{
                         operand_ty.fmt(pt),
                     });
@@ -12854,8 +12853,8 @@ fn analyzeSwitchRuntimeBlock(
                     case_block.error_return_trace_index = child_block.error_return_trace_index;
 
                     const analyze_body = if (union_originally) blk: {
-                        const field_ty = maybe_union_ty.unionFieldType(item_val, mod).?;
-                        break :blk field_ty.zigTypeTag(mod) != .NoReturn;
+                        const field_ty = maybe_union_ty.unionFieldType(item_val, zcu).?;
+                        break :blk field_ty.zigTypeTag(zcu) != .NoReturn;
                     } else true;
 
                     if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
@@ -12887,12 +12886,12 @@ fn analyzeSwitchRuntimeBlock(
                 }
             },
             .ErrorSet => {
-                if (operand_ty.isAnyError(mod)) {
+                if (operand_ty.isAnyError(zcu)) {
                     return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{
                         operand_ty.fmt(pt),
                     });
                 }
-                const error_names = operand_ty.errorSetNames(mod);
+                const error_names = operand_ty.errorSetNames(zcu);
                 for (0..error_names.len) |name_index| {
                     const error_name = error_names.get(ip)[name_index];
                     if (seen_errors.contains(error_name)) continue;
@@ -13033,10 +13032,10 @@ fn analyzeSwitchRuntimeBlock(
         case_block.instructions.shrinkRetainingCapacity(0);
         case_block.error_return_trace_index = child_block.error_return_trace_index;
 
-        if (mod.backendSupportsFeature(.is_named_enum_value) and
+        if (zcu.backendSupportsFeature(.is_named_enum_value) and
             special.body.len != 0 and block.wantSafety() and
-            operand_ty.zigTypeTag(mod) == .Enum and
-            (!operand_ty.isNonexhaustiveEnum(mod) or union_originally))
+            operand_ty.zigTypeTag(zcu) == .Enum and
+            (!operand_ty.isNonexhaustiveEnum(zcu) or union_originally))
         {
             try sema.zirDbgStmt(&case_block, cond_dbg_node_index);
             const ok = try case_block.addUnOp(.is_named_enum_value, operand);
@@ -13046,9 +13045,9 @@ fn analyzeSwitchRuntimeBlock(
         const analyze_body = if (union_originally and !special.is_inline)
             for (seen_enum_fields, 0..) |seen_field, index| {
                 if (seen_field != null) continue;
-                const union_obj = mod.typeToUnion(maybe_union_ty).?;
+                const union_obj = zcu.typeToUnion(maybe_union_ty).?;
                 const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[index]);
-                if (field_ty.zigTypeTag(mod) != .NoReturn) break true;
+                if (field_ty.zigTypeTag(zcu) != .NoReturn) break true;
             } else false
         else
             true;
@@ -13371,8 +13370,8 @@ fn validateErrSetSwitch(
 ) CompileError!?Type {
     const gpa = sema.gpa;
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
 
     const src_node_offset = inst_data.src_node;
     const src = block.nodeOffset(src_node_offset);
@@ -13444,7 +13443,7 @@ fn validateErrSetSwitch(
         },
         else => |err_set_ty_index| else_validation: {
             const error_names = ip.indexToKey(err_set_ty_index).error_set_type.names;
-            var maybe_msg: ?*Module.ErrorMsg = null;
+            var maybe_msg: ?*Zcu.ErrorMsg = null;
             errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa);
 
             for (error_names.get(ip)) |error_name| {
@@ -13711,8 +13710,8 @@ fn maybeErrorUnwrap(
     allow_err_code_inst: bool,
 ) !bool {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    if (!mod.backendSupportsFeature(.panic_unwrap_error)) return false;
+    const zcu = pt.zcu;
+    if (!zcu.backendSupportsFeature(.panic_unwrap_error)) return false;
 
     const tags = sema.code.instructions.items(.tag);
     for (body) |inst| {
@@ -13745,7 +13744,7 @@ fn maybeErrorUnwrap(
             .as_node => try sema.zirAsNode(block, inst),
             .field_val => try sema.zirFieldVal(block, inst),
             .@"unreachable" => {
-                if (!mod.comp.formatted_panics) {
+                if (!zcu.comp.formatted_panics) {
                     try sema.safetyPanic(block, operand_src, .unwrap_error);
                     return true;
                 }
@@ -13768,7 +13767,7 @@ fn maybeErrorUnwrap(
             },
             else => unreachable,
         };
-        if (sema.typeOf(air_inst).isNoReturn(mod))
+        if (sema.typeOf(air_inst).isNoReturn(zcu))
             return true;
         sema.inst_map.putAssumeCapacity(inst, air_inst);
     }
@@ -13777,20 +13776,20 @@ fn maybeErrorUnwrap(
 
 fn maybeErrorUnwrapCondbr(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, cond: Zir.Inst.Ref, cond_src: LazySrcLoc) !void {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const index = cond.toIndex() orelse return;
     if (sema.code.instructions.items(.tag)[@intFromEnum(index)] != .is_non_err) return;
 
     const err_inst_data = sema.code.instructions.items(.data)[@intFromEnum(index)].un_node;
     const err_operand = try sema.resolveInst(err_inst_data.operand);
     const operand_ty = sema.typeOf(err_operand);
-    if (operand_ty.zigTypeTag(mod) == .ErrorSet) {
+    if (operand_ty.zigTypeTag(zcu) == .ErrorSet) {
         try sema.maybeErrorUnwrapComptime(block, body, err_operand);
         return;
     }
     if (try sema.resolveDefinedValue(block, cond_src, err_operand)) |val| {
-        if (!operand_ty.isError(mod)) return;
-        if (val.getErrorName(mod) == .none) return;
+        if (!operand_ty.isError(zcu)) return;
+        if (val.getErrorName(zcu) == .none) return;
         try sema.maybeErrorUnwrapComptime(block, body, err_operand);
     }
 }
@@ -13818,7 +13817,7 @@ fn maybeErrorUnwrapComptime(sema: *Sema, block: *Block, body: []const Zir.Inst.I
 
 fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
     const ty_src = block.builtinCallArgSrc(inst_data.src_node, 0);
@@ -13828,7 +13827,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
         .needed_comptime_reason = "field name must be comptime-known",
     });
     try ty.resolveFields(pt);
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
 
     const has_field = hf: {
         switch (ip.indexToKey(ty.toIntern())) {
@@ -13845,7 +13844,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                     break :hf mem.indexOfScalar(InternPool.NullTerminatedString, anon_struct.names.get(ip), field_name) != null;
                 } else {
                     const field_index = field_name.toUnsigned(ip) orelse break :hf false;
-                    break :hf field_index < ty.structFieldCount(mod);
+                    break :hf field_index < ty.structFieldCount(zcu);
                 }
             },
             .struct_type => {
@@ -13870,7 +13869,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
 
 fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
     const src = block.nodeOffset(inst_data.src_node);
@@ -13883,7 +13882,7 @@ fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
 
     try sema.checkNamespaceType(block, lhs_src, container_type);
 
-    const namespace = container_type.getNamespace(mod).unwrap() orelse return .bool_false;
+    const namespace = container_type.getNamespace(zcu).unwrap() orelse return .bool_false;
     if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |lookup| {
         if (lookup.accessible) {
             return .bool_true;
@@ -13958,9 +13957,9 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
 
 fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
-    const name = try mod.intern_pool.getOrPutString(
+    const name = try zcu.intern_pool.getOrPutString(
         sema.gpa,
         pt.tid,
         inst_data.get(sema.code),
@@ -13984,7 +13983,7 @@ fn zirShl(
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.nodeOffset(inst_data.src_node);
     const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -13996,8 +13995,8 @@ fn zirShl(
     const rhs_ty = sema.typeOf(rhs);
     try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
 
-    const scalar_ty = lhs_ty.scalarType(mod);
-    const scalar_rhs_ty = rhs_ty.scalarType(mod);
+    const scalar_ty = lhs_ty.scalarType(zcu);
+    const scalar_rhs_ty = rhs_ty.scalarType(zcu);
 
     // TODO coerce rhs if air_tag is not shl_sat
     const rhs_is_comptime_int = try sema.checkIntType(block, rhs_src, scalar_rhs_ty);
@@ -14006,20 +14005,20 @@ fn zirShl(
     const maybe_rhs_val = try sema.resolveValueIntable(rhs);
 
     if (maybe_rhs_val) |rhs_val| {
-        if (rhs_val.isUndef(mod)) {
+        if (rhs_val.isUndef(zcu)) {
             return pt.undefRef(sema.typeOf(lhs));
         }
         // If rhs is 0, return lhs without doing any calculations.
         if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
             return lhs;
         }
-        if (scalar_ty.zigTypeTag(mod) != .ComptimeInt and air_tag != .shl_sat) {
-            const bit_value = try pt.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits);
-            if (rhs_ty.zigTypeTag(mod) == .Vector) {
+        if (scalar_ty.zigTypeTag(zcu) != .ComptimeInt and air_tag != .shl_sat) {
+            const bit_value = try pt.intValue(Type.comptime_int, scalar_ty.intInfo(zcu).bits);
+            if (rhs_ty.zigTypeTag(zcu) == .Vector) {
                 var i: usize = 0;
-                while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
+                while (i < rhs_ty.vectorLen(zcu)) : (i += 1) {
                     const rhs_elem = try rhs_val.elemValue(pt, i);
-                    if (rhs_elem.compareHetero(.gte, bit_value, pt)) {
+                    if (rhs_elem.compareHetero(.gte, bit_value, zcu)) {
                         return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{
                             rhs_elem.fmtValueSema(pt, sema),
                             i,
@@ -14027,25 +14026,25 @@ fn zirShl(
                         });
                     }
                 }
-            } else if (rhs_val.compareHetero(.gte, bit_value, pt)) {
+            } else if (rhs_val.compareHetero(.gte, bit_value, zcu)) {
                 return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{
                     rhs_val.fmtValueSema(pt, sema),
                     scalar_ty.fmt(pt),
                 });
             }
         }
-        if (rhs_ty.zigTypeTag(mod) == .Vector) {
+        if (rhs_ty.zigTypeTag(zcu) == .Vector) {
             var i: usize = 0;
-            while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
+            while (i < rhs_ty.vectorLen(zcu)) : (i += 1) {
                 const rhs_elem = try rhs_val.elemValue(pt, i);
-                if (rhs_elem.compareHetero(.lt, try pt.intValue(scalar_rhs_ty, 0), pt)) {
+                if (rhs_elem.compareHetero(.lt, try pt.intValue(scalar_rhs_ty, 0), zcu)) {
                     return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{
                         rhs_elem.fmtValueSema(pt, sema),
                         i,
                     });
                 }
             }
-        } else if (rhs_val.compareHetero(.lt, try pt.intValue(rhs_ty, 0), pt)) {
+        } else if (rhs_val.compareHetero(.lt, try pt.intValue(rhs_ty, 0), zcu)) {
             return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{
                 rhs_val.fmtValueSema(pt, sema),
             });
@@ -14053,19 +14052,19 @@ fn zirShl(
     }
 
     const runtime_src = if (maybe_lhs_val) |lhs_val| rs: {
-        if (lhs_val.isUndef(mod)) return pt.undefRef(lhs_ty);
+        if (lhs_val.isUndef(zcu)) return pt.undefRef(lhs_ty);
         const rhs_val = maybe_rhs_val orelse {
-            if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
+            if (scalar_ty.zigTypeTag(zcu) == .ComptimeInt) {
                 return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{});
             }
             break :rs rhs_src;
         };
-        const val = if (scalar_ty.zigTypeTag(mod) == .ComptimeInt)
+        const val = if (scalar_ty.zigTypeTag(zcu) == .ComptimeInt)
             try lhs_val.shl(rhs_val, lhs_ty, sema.arena, pt)
         else switch (air_tag) {
             .shl_exact => val: {
                 const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, pt);
-                if (shifted.overflow_bit.compareAllWithZero(.eq, pt)) {
+                if (shifted.overflow_bit.compareAllWithZero(.eq, zcu)) {
                     break :val shifted.wrapped_result;
                 }
                 return sema.fail(block, src, "operation caused overflow", .{});
@@ -14080,7 +14079,7 @@ fn zirShl(
     const new_rhs = if (air_tag == .shl_sat) rhs: {
         // Limit the RHS type for saturating shl to be an integer as small as the LHS.
         if (rhs_is_comptime_int or
-            scalar_rhs_ty.intInfo(mod).bits > scalar_ty.intInfo(mod).bits)
+            scalar_rhs_ty.intInfo(zcu).bits > scalar_ty.intInfo(zcu).bits)
         {
             const max_int = Air.internedToRef((try lhs_ty.maxInt(pt, lhs_ty)).toIntern());
             const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src });
@@ -14092,10 +14091,10 @@ fn zirShl(
 
     try sema.requireRuntimeBlock(block, src, runtime_src);
     if (block.wantSafety()) {
-        const bit_count = scalar_ty.intInfo(mod).bits;
+        const bit_count = scalar_ty.intInfo(zcu).bits;
         if (!std.math.isPowerOfTwo(bit_count)) {
             const bit_count_val = try pt.intValue(scalar_rhs_ty, bit_count);
-            const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: {
+            const ok = if (rhs_ty.zigTypeTag(zcu) == .Vector) ok: {
                 const bit_count_inst = Air.internedToRef((try sema.splat(rhs_ty, bit_count_val)).toIntern());
                 const lt = try block.addCmpVector(rhs, bit_count_inst, .lt);
                 break :ok try block.addInst(.{
@@ -14125,7 +14124,7 @@ fn zirShl(
                 } },
             });
             const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty);
-            const any_ov_bit = if (lhs_ty.zigTypeTag(mod) == .Vector)
+            const any_ov_bit = if (lhs_ty.zigTypeTag(zcu) == .Vector)
                 try block.addInst(.{
                     .tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce,
                     .data = .{ .reduce = .{
@@ -14155,7 +14154,7 @@ fn zirShr(
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.nodeOffset(inst_data.src_node);
     const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -14166,26 +14165,26 @@ fn zirShr(
     const lhs_ty = sema.typeOf(lhs);
     const rhs_ty = sema.typeOf(rhs);
     try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
-    const scalar_ty = lhs_ty.scalarType(mod);
+    const scalar_ty = lhs_ty.scalarType(zcu);
 
     const maybe_lhs_val = try sema.resolveValueIntable(lhs);
     const maybe_rhs_val = try sema.resolveValueIntable(rhs);
 
     const runtime_src = if (maybe_rhs_val) |rhs_val| rs: {
-        if (rhs_val.isUndef(mod)) {
+        if (rhs_val.isUndef(zcu)) {
             return pt.undefRef(lhs_ty);
         }
         // If rhs is 0, return lhs without doing any calculations.
         if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
             return lhs;
         }
-        if (scalar_ty.zigTypeTag(mod) != .ComptimeInt) {
-            const bit_value = try pt.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits);
-            if (rhs_ty.zigTypeTag(mod) == .Vector) {
+        if (scalar_ty.zigTypeTag(zcu) != .ComptimeInt) {
+            const bit_value = try pt.intValue(Type.comptime_int, scalar_ty.intInfo(zcu).bits);
+            if (rhs_ty.zigTypeTag(zcu) == .Vector) {
                 var i: usize = 0;
-                while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
+                while (i < rhs_ty.vectorLen(zcu)) : (i += 1) {
                     const rhs_elem = try rhs_val.elemValue(pt, i);
-                    if (rhs_elem.compareHetero(.gte, bit_value, pt)) {
+                    if (rhs_elem.compareHetero(.gte, bit_value, zcu)) {
                         return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{
                             rhs_elem.fmtValueSema(pt, sema),
                             i,
@@ -14193,31 +14192,31 @@ fn zirShr(
                         });
                     }
                 }
-            } else if (rhs_val.compareHetero(.gte, bit_value, pt)) {
+            } else if (rhs_val.compareHetero(.gte, bit_value, zcu)) {
                 return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{
                     rhs_val.fmtValueSema(pt, sema),
                     scalar_ty.fmt(pt),
                 });
             }
         }
-        if (rhs_ty.zigTypeTag(mod) == .Vector) {
+        if (rhs_ty.zigTypeTag(zcu) == .Vector) {
             var i: usize = 0;
-            while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
+            while (i < rhs_ty.vectorLen(zcu)) : (i += 1) {
                 const rhs_elem = try rhs_val.elemValue(pt, i);
-                if (rhs_elem.compareHetero(.lt, try pt.intValue(rhs_ty.childType(mod), 0), pt)) {
+                if (rhs_elem.compareHetero(.lt, try pt.intValue(rhs_ty.childType(zcu), 0), zcu)) {
                     return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{
                         rhs_elem.fmtValueSema(pt, sema),
                         i,
                     });
                 }
             }
-        } else if (rhs_val.compareHetero(.lt, try pt.intValue(rhs_ty, 0), pt)) {
+        } else if (rhs_val.compareHetero(.lt, try pt.intValue(rhs_ty, 0), zcu)) {
             return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{
                 rhs_val.fmtValueSema(pt, sema),
             });
         }
         if (maybe_lhs_val) |lhs_val| {
-            if (lhs_val.isUndef(mod)) {
+            if (lhs_val.isUndef(zcu)) {
                 return pt.undefRef(lhs_ty);
             }
             if (air_tag == .shr_exact) {
@@ -14234,18 +14233,18 @@ fn zirShr(
         }
     } else rhs_src;
 
-    if (maybe_rhs_val == null and scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
+    if (maybe_rhs_val == null and scalar_ty.zigTypeTag(zcu) == .ComptimeInt) {
         return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{});
     }
 
     try sema.requireRuntimeBlock(block, src, runtime_src);
     const result = try block.addBinOp(air_tag, lhs, rhs);
     if (block.wantSafety()) {
-        const bit_count = scalar_ty.intInfo(mod).bits;
+        const bit_count = scalar_ty.intInfo(zcu).bits;
         if (!std.math.isPowerOfTwo(bit_count)) {
-            const bit_count_val = try pt.intValue(rhs_ty.scalarType(mod), bit_count);
+            const bit_count_val = try pt.intValue(rhs_ty.scalarType(zcu), bit_count);
 
-            const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: {
+            const ok = if (rhs_ty.zigTypeTag(zcu) == .Vector) ok: {
                 const bit_count_inst = Air.internedToRef((try sema.splat(rhs_ty, bit_count_val)).toIntern());
                 const lt = try block.addCmpVector(rhs, bit_count_inst, .lt);
                 break :ok try block.addInst(.{
@@ -14265,7 +14264,7 @@ fn zirShr(
         if (air_tag == .shr_exact) {
             const back = try block.addBinOp(.shl, result, rhs);
 
-            const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: {
+            const ok = if (rhs_ty.zigTypeTag(zcu) == .Vector) ok: {
                 const eql = try block.addCmpVector(lhs, back, .eq);
                 break :ok try block.addInst(.{
                     .tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce,
@@ -14291,7 +14290,7 @@ fn zirBitwise(
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.src(.{ .node_offset_bin_op = inst_data.src_node });
     const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -14305,8 +14304,8 @@ fn zirBitwise(
 
     const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
     const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } });
-    const scalar_type = resolved_type.scalarType(mod);
-    const scalar_tag = scalar_type.zigTypeTag(mod);
+    const scalar_type = resolved_type.scalarType(zcu);
+    const scalar_tag = scalar_type.zigTypeTag(zcu);
 
     const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
     const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
@@ -14314,7 +14313,7 @@ fn zirBitwise(
     const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
 
     if (!is_int) {
-        return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag(mod)), @tagName(rhs_ty.zigTypeTag(mod)) });
+        return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag(zcu)), @tagName(rhs_ty.zigTypeTag(zcu)) });
     }
 
     const runtime_src = runtime: {
@@ -14346,26 +14345,26 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const src = block.nodeOffset(inst_data.src_node);
     const operand_src = block.src(.{ .node_offset_un_op = inst_data.src_node });
 
     const operand = try sema.resolveInst(inst_data.operand);
     const operand_type = sema.typeOf(operand);
-    const scalar_type = operand_type.scalarType(mod);
+    const scalar_type = operand_type.scalarType(zcu);
 
-    if (scalar_type.zigTypeTag(mod) != .Int) {
+    if (scalar_type.zigTypeTag(zcu) != .Int) {
         return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{
             operand_type.fmt(pt),
         });
     }
 
     if (try sema.resolveValue(operand)) |val| {
-        if (val.isUndef(mod)) {
+        if (val.isUndef(zcu)) {
             return pt.undefRef(operand_type);
-        } else if (operand_type.zigTypeTag(mod) == .Vector) {
-            const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod));
+        } else if (operand_type.zigTypeTag(zcu) == .Vector) {
+            const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(zcu));
             const elems = try sema.arena.alloc(InternPool.Index, vec_len);
             for (elems, 0..) |*elem, i| {
                 const elem_val = try val.elemValue(pt, i);
@@ -14393,13 +14392,13 @@ fn analyzeTupleCat(
     rhs: Air.Inst.Ref,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const lhs_ty = sema.typeOf(lhs);
     const rhs_ty = sema.typeOf(rhs);
     const src = block.nodeOffset(src_node);
 
-    const lhs_len = lhs_ty.structFieldCount(mod);
-    const rhs_len = rhs_ty.structFieldCount(mod);
+    const lhs_len = lhs_ty.structFieldCount(zcu);
+    const rhs_len = rhs_ty.structFieldCount(zcu);
     const dest_fields = lhs_len + rhs_len;
 
     if (dest_fields == 0) {
@@ -14420,8 +14419,8 @@ fn analyzeTupleCat(
         var runtime_src: ?LazySrcLoc = null;
         var i: u32 = 0;
         while (i < lhs_len) : (i += 1) {
-            types[i] = lhs_ty.structFieldType(i, mod).toIntern();
-            const default_val = lhs_ty.structFieldDefaultValue(i, mod);
+            types[i] = lhs_ty.structFieldType(i, zcu).toIntern();
+            const default_val = lhs_ty.structFieldDefaultValue(i, zcu);
             values[i] = default_val.toIntern();
             const operand_src = block.src(.{ .array_cat_lhs = .{
                 .array_cat_offset = src_node,
@@ -14434,8 +14433,8 @@ fn analyzeTupleCat(
         }
         i = 0;
         while (i < rhs_len) : (i += 1) {
-            types[i + lhs_len] = rhs_ty.structFieldType(i, mod).toIntern();
-            const default_val = rhs_ty.structFieldDefaultValue(i, mod);
+            types[i + lhs_len] = rhs_ty.structFieldType(i, zcu).toIntern();
+            const default_val = rhs_ty.structFieldDefaultValue(i, zcu);
             values[i + lhs_len] = default_val.toIntern();
             const operand_src = block.src(.{ .array_cat_rhs = .{
                 .array_cat_offset = src_node,
@@ -14449,7 +14448,7 @@ fn analyzeTupleCat(
         break :rs runtime_src;
     };
 
-    const tuple_ty = try mod.intern_pool.getAnonStructType(mod.gpa, pt.tid, .{
+    const tuple_ty = try zcu.intern_pool.getAnonStructType(zcu.gpa, pt.tid, .{
         .types = types,
         .values = values,
         .names = &.{},
@@ -14492,7 +14491,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
     const lhs = try sema.resolveInst(extra.lhs);
@@ -14501,8 +14500,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     const rhs_ty = sema.typeOf(rhs);
     const src = block.nodeOffset(inst_data.src_node);
 
-    const lhs_is_tuple = lhs_ty.isTuple(mod);
-    const rhs_is_tuple = rhs_ty.isTuple(mod);
+    const lhs_is_tuple = lhs_ty.isTuple(zcu);
+    const rhs_is_tuple = rhs_ty.isTuple(zcu);
     if (lhs_is_tuple and rhs_is_tuple) {
         return sema.analyzeTupleCat(block, inst_data.src_node, lhs, rhs);
     }
@@ -14584,31 +14583,31 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
         .child = resolved_elem_ty.toIntern(),
     });
     const ptr_addrspace = p: {
-        if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace(mod);
-        if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace(mod);
+        if (lhs_ty.zigTypeTag(zcu) == .Pointer) break :p lhs_ty.ptrAddressSpace(zcu);
+        if (rhs_ty.zigTypeTag(zcu) == .Pointer) break :p rhs_ty.ptrAddressSpace(zcu);
         break :p null;
     };
 
-    const runtime_src = if (switch (lhs_ty.zigTypeTag(mod)) {
+    const runtime_src = if (switch (lhs_ty.zigTypeTag(zcu)) {
         .Array, .Struct => try sema.resolveValue(lhs),
         .Pointer => try sema.resolveDefinedValue(block, lhs_src, lhs),
         else => unreachable,
     }) |lhs_val| rs: {
-        if (switch (rhs_ty.zigTypeTag(mod)) {
+        if (switch (rhs_ty.zigTypeTag(zcu)) {
             .Array, .Struct => try sema.resolveValue(rhs),
             .Pointer => try sema.resolveDefinedValue(block, rhs_src, rhs),
             else => unreachable,
         }) |rhs_val| {
-            const lhs_sub_val = if (lhs_ty.isSinglePointer(mod))
+            const lhs_sub_val = if (lhs_ty.isSinglePointer(zcu))
                 try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty) orelse break :rs lhs_src
-            else if (lhs_ty.isSlice(mod))
+            else if (lhs_ty.isSlice(zcu))
                 try sema.maybeDerefSliceAsArray(block, lhs_src, lhs_val) orelse break :rs lhs_src
             else
                 lhs_val;
 
-            const rhs_sub_val = if (rhs_ty.isSinglePointer(mod))
+            const rhs_sub_val = if (rhs_ty.isSinglePointer(zcu))
                 try sema.pointerDeref(block, rhs_src, rhs_val, rhs_ty) orelse break :rs rhs_src
-            else if (rhs_ty.isSlice(mod))
+            else if (rhs_ty.isSlice(zcu))
                 try sema.maybeDerefSliceAsArray(block, rhs_src, rhs_val) orelse break :rs rhs_src
             else
                 rhs_val;
@@ -14617,7 +14616,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             var elem_i: u32 = 0;
             while (elem_i < lhs_len) : (elem_i += 1) {
                 const lhs_elem_i = elem_i;
-                const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i, mod) else Value.@"unreachable";
+                const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i, zcu) else Value.@"unreachable";
                 const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try lhs_sub_val.elemValue(pt, lhs_elem_i) else elem_default_val;
                 const elem_val_inst = Air.internedToRef(elem_val.toIntern());
                 const operand_src = block.src(.{ .array_cat_lhs = .{
@@ -14630,7 +14629,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             }
             while (elem_i < result_len) : (elem_i += 1) {
                 const rhs_elem_i = elem_i - lhs_len;
-                const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i, mod) else Value.@"unreachable";
+                const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i, zcu) else Value.@"unreachable";
                 const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try rhs_sub_val.elemValue(pt, rhs_elem_i) else elem_default_val;
                 const elem_val_inst = Air.internedToRef(elem_val.toIntern());
                 const operand_src = block.src(.{ .array_cat_rhs = .{
@@ -14723,12 +14722,12 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
 
 fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, peer_ty: Type) !?Type.ArrayInfo {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const operand_ty = sema.typeOf(operand);
-    switch (operand_ty.zigTypeTag(mod)) {
-        .Array => return operand_ty.arrayInfo(mod),
+    switch (operand_ty.zigTypeTag(zcu)) {
+        .Array => return operand_ty.arrayInfo(zcu),
         .Pointer => {
-            const ptr_info = operand_ty.ptrInfo(mod);
+            const ptr_info = operand_ty.ptrInfo(zcu);
             switch (ptr_info.flags.size) {
                 .Slice => {
                     const val = try sema.resolveConstDefinedValue(block, src, operand, .{
@@ -14744,20 +14743,20 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins
                     };
                 },
                 .One => {
-                    if (Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Array) {
-                        return Type.fromInterned(ptr_info.child).arrayInfo(mod);
+                    if (Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .Array) {
+                        return Type.fromInterned(ptr_info.child).arrayInfo(zcu);
                     }
                 },
                 .C, .Many => {},
             }
         },
         .Struct => {
-            if (operand_ty.isTuple(mod) and peer_ty.isIndexable(mod)) {
-                assert(!peer_ty.isTuple(mod));
+            if (operand_ty.isTuple(zcu) and peer_ty.isIndexable(zcu)) {
+                assert(!peer_ty.isTuple(zcu));
                 return .{
-                    .elem_type = peer_ty.elemType2(mod),
+                    .elem_type = peer_ty.elemType2(zcu),
                     .sentinel = null,
-                    .len = operand_ty.arrayLen(mod),
+                    .len = operand_ty.arrayLen(zcu),
                 };
             }
         },
@@ -14774,12 +14773,12 @@ fn analyzeTupleMul(
     factor: usize,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const operand_ty = sema.typeOf(operand);
     const src = block.nodeOffset(src_node);
     const len_src = block.src(.{ .node_offset_bin_rhs = src_node });
 
-    const tuple_len = operand_ty.structFieldCount(mod);
+    const tuple_len = operand_ty.structFieldCount(zcu);
     const final_len = std.math.mul(usize, tuple_len, factor) catch
         return sema.fail(block, len_src, "operation results in overflow", .{});
 
@@ -14792,8 +14791,8 @@ fn analyzeTupleMul(
     const opt_runtime_src = rs: {
         var runtime_src: ?LazySrcLoc = null;
         for (0..tuple_len) |i| {
-            types[i] = operand_ty.structFieldType(i, mod).toIntern();
-            values[i] = operand_ty.structFieldDefaultValue(i, mod).toIntern();
+            types[i] = operand_ty.structFieldType(i, zcu).toIntern();
+            values[i] = operand_ty.structFieldDefaultValue(i, zcu).toIntern();
             const operand_src = block.src(.{ .array_cat_lhs = .{
                 .array_cat_offset = src_node,
                 .elem_index = @intCast(i),
@@ -14810,7 +14809,7 @@ fn analyzeTupleMul(
         break :rs runtime_src;
     };
 
-    const tuple_ty = try mod.intern_pool.getAnonStructType(mod.gpa, pt.tid, .{
+    const tuple_ty = try zcu.intern_pool.getAnonStructType(zcu.gpa, pt.tid, .{
         .types = types,
         .values = values,
         .names = &.{},
@@ -14848,7 +14847,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const extra = sema.code.extraData(Zir.Inst.ArrayMul, inst_data.payload_index).data;
     const uncoerced_lhs = try sema.resolveInst(extra.lhs);
@@ -14867,17 +14866,17 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             const res_ty_inst = try sema.resolveInst(extra.res_ty);
             const res_ty = try sema.analyzeAsType(block, src, res_ty_inst);
             if (res_ty.isGenericPoison()) break :no_coerce;
-            if (!uncoerced_lhs_ty.isTuple(mod)) break :no_coerce;
-            const lhs_len = uncoerced_lhs_ty.structFieldCount(mod);
-            const lhs_dest_ty = switch (res_ty.zigTypeTag(mod)) {
+            if (!uncoerced_lhs_ty.isTuple(zcu)) break :no_coerce;
+            const lhs_len = uncoerced_lhs_ty.structFieldCount(zcu);
+            const lhs_dest_ty = switch (res_ty.zigTypeTag(zcu)) {
                 else => break :no_coerce,
                 .Array => try pt.arrayType(.{
-                    .child = res_ty.childType(mod).toIntern(),
+                    .child = res_ty.childType(zcu).toIntern(),
                     .len = lhs_len,
-                    .sentinel = if (res_ty.sentinel(mod)) |s| s.toIntern() else .none,
+                    .sentinel = if (res_ty.sentinel(zcu)) |s| s.toIntern() else .none,
                 }),
                 .Vector => try pt.vectorType(.{
-                    .child = res_ty.childType(mod).toIntern(),
+                    .child = res_ty.childType(zcu).toIntern(),
                     .len = lhs_len,
                 }),
             };
@@ -14893,7 +14892,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
         break :coerced_lhs .{ uncoerced_lhs, uncoerced_lhs_ty };
     };
 
-    if (lhs_ty.isTuple(mod)) {
+    if (lhs_ty.isTuple(zcu)) {
         // In `**` rhs must be comptime-known, but lhs can be runtime-known
         const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, .{
             .needed_comptime_reason = "array multiplication factor must be comptime-known",
@@ -14907,7 +14906,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
         const msg = msg: {
             const msg = try sema.errMsg(lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(pt)});
             errdefer msg.destroy(sema.gpa);
-            switch (lhs_ty.zigTypeTag(mod)) {
+            switch (lhs_ty.zigTypeTag(zcu)) {
                 .Int, .Float, .ComptimeFloat, .ComptimeInt, .Vector => {
                     try sema.errNote(operator_src, msg, "this operator multiplies arrays; use std.math.pow for exponentiation", .{});
                 },
@@ -14933,13 +14932,13 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
         .child = lhs_info.elem_type.toIntern(),
     });
 
-    const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace(mod) else null;
+    const ptr_addrspace = if (lhs_ty.zigTypeTag(zcu) == .Pointer) lhs_ty.ptrAddressSpace(zcu) else null;
     const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len);
 
     if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| ct: {
-        const lhs_sub_val = if (lhs_ty.isSinglePointer(mod))
+        const lhs_sub_val = if (lhs_ty.isSinglePointer(zcu))
             try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty) orelse break :ct
-        else if (lhs_ty.isSlice(mod))
+        else if (lhs_ty.isSlice(zcu))
             try sema.maybeDerefSliceAsArray(block, lhs_src, lhs_val) orelse break :ct
         else
             lhs_val;
@@ -15022,7 +15021,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
 
 fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const src = block.nodeOffset(inst_data.src_node);
     const lhs_src = src;
@@ -15030,9 +15029,9 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
 
     const rhs = try sema.resolveInst(inst_data.operand);
     const rhs_ty = sema.typeOf(rhs);
-    const rhs_scalar_ty = rhs_ty.scalarType(mod);
+    const rhs_scalar_ty = rhs_ty.scalarType(zcu);
 
-    if (rhs_scalar_ty.isUnsignedInt(mod) or switch (rhs_scalar_ty.zigTypeTag(mod)) {
+    if (rhs_scalar_ty.isUnsignedInt(zcu) or switch (rhs_scalar_ty.zigTypeTag(zcu)) {
         .Int, .ComptimeInt, .Float, .ComptimeFloat => false,
         else => true,
     }) {
@@ -15042,7 +15041,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
     if (rhs_scalar_ty.isAnyFloat()) {
         // We handle float negation here to ensure negative zero is represented in the bits.
         if (try sema.resolveValue(rhs)) |rhs_val| {
-            if (rhs_val.isUndef(mod)) return pt.undefRef(rhs_ty);
+            if (rhs_val.isUndef(zcu)) return pt.undefRef(rhs_ty);
             return Air.internedToRef((try rhs_val.floatNeg(rhs_ty, sema.arena, pt)).toIntern());
         }
         try sema.requireRuntimeBlock(block, src, null);
@@ -15055,7 +15054,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
 
 fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const src = block.nodeOffset(inst_data.src_node);
     const lhs_src = src;
@@ -15063,9 +15062,9 @@ fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
 
     const rhs = try sema.resolveInst(inst_data.operand);
     const rhs_ty = sema.typeOf(rhs);
-    const rhs_scalar_ty = rhs_ty.scalarType(mod);
+    const rhs_scalar_ty = rhs_ty.scalarType(zcu);
 
-    switch (rhs_scalar_ty.zigTypeTag(mod)) {
+    switch (rhs_scalar_ty.zigTypeTag(zcu)) {
         .Int, .ComptimeInt, .Float, .ComptimeFloat => {},
         else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(pt)}),
     }
@@ -15097,7 +15096,7 @@ fn zirArithmetic(
 
 fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.src(.{ .node_offset_bin_op = inst_data.src_node });
     const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -15107,8 +15106,8 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
     const rhs = try sema.resolveInst(extra.rhs);
     const lhs_ty = sema.typeOf(lhs);
     const rhs_ty = sema.typeOf(rhs);
-    const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
-    const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
+    const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(zcu);
+    const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(zcu);
     try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
     try sema.checkInvalidPtrIntArithmetic(block, src, lhs_ty);
 
@@ -15120,9 +15119,9 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
     const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
     const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
 
-    const lhs_scalar_ty = lhs_ty.scalarType(mod);
-    const rhs_scalar_ty = rhs_ty.scalarType(mod);
-    const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
+    const lhs_scalar_ty = lhs_ty.scalarType(zcu);
+    const rhs_scalar_ty = rhs_ty.scalarType(zcu);
+    const scalar_tag = resolved_type.scalarType(zcu).zigTypeTag(zcu);
 
     const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
 
@@ -15131,15 +15130,15 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
     const maybe_lhs_val = try sema.resolveValueIntable(casted_lhs);
     const maybe_rhs_val = try sema.resolveValueIntable(casted_rhs);
 
-    if ((lhs_ty.zigTypeTag(mod) == .ComptimeFloat and rhs_ty.zigTypeTag(mod) == .ComptimeInt) or
-        (lhs_ty.zigTypeTag(mod) == .ComptimeInt and rhs_ty.zigTypeTag(mod) == .ComptimeFloat))
+    if ((lhs_ty.zigTypeTag(zcu) == .ComptimeFloat and rhs_ty.zigTypeTag(zcu) == .ComptimeInt) or
+        (lhs_ty.zigTypeTag(zcu) == .ComptimeInt and rhs_ty.zigTypeTag(zcu) == .ComptimeFloat))
     {
         // If it makes a difference whether we coerce to ints or floats before doing the division, error.
         // If lhs % rhs is 0, it doesn't matter.
         const lhs_val = maybe_lhs_val orelse unreachable;
         const rhs_val = maybe_rhs_val orelse unreachable;
         const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, pt) catch unreachable;
-        if (!rem.compareAllWithZero(.eq, pt)) {
+        if (!rem.compareAllWithZero(.eq, zcu)) {
             return sema.fail(
                 block,
                 src,
@@ -15179,11 +15178,11 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
     switch (scalar_tag) {
         .Int, .ComptimeInt, .ComptimeFloat => {
             if (maybe_lhs_val) |lhs_val| {
-                if (!lhs_val.isUndef(mod)) {
+                if (!lhs_val.isUndef(zcu)) {
                     if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
                         const scalar_zero = switch (scalar_tag) {
-                            .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0),
-                            .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0),
+                            .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(zcu), 0.0),
+                            .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(zcu), 0),
                             else => unreachable,
                         };
                         const zero_val = try sema.splat(resolved_type, scalar_zero);
@@ -15192,7 +15191,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
                 }
             }
             if (maybe_rhs_val) |rhs_val| {
-                if (rhs_val.isUndef(mod)) {
+                if (rhs_val.isUndef(zcu)) {
                     return sema.failWithUseOfUndef(block, rhs_src);
                 }
                 if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) {
@@ -15206,8 +15205,8 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
 
     const runtime_src = rs: {
         if (maybe_lhs_val) |lhs_val| {
-            if (lhs_val.isUndef(mod)) {
-                if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) {
+            if (lhs_val.isUndef(zcu)) {
+                if (lhs_scalar_ty.isSignedInt(zcu) and rhs_scalar_ty.isSignedInt(zcu)) {
                     if (maybe_rhs_val) |rhs_val| {
                         if (try sema.compareAll(rhs_val, .neq, try pt.intValue(resolved_type, -1), resolved_type)) {
                             return pt.undefRef(resolved_type);
@@ -15245,7 +15244,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
     }
 
     const air_tag = if (is_int) blk: {
-        if (lhs_ty.isSignedInt(mod) or rhs_ty.isSignedInt(mod)) {
+        if (lhs_ty.isSignedInt(zcu) or rhs_ty.isSignedInt(zcu)) {
             return sema.fail(
                 block,
                 src,
@@ -15263,7 +15262,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
 
 fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.src(.{ .node_offset_bin_op = inst_data.src_node });
     const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -15273,8 +15272,8 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     const rhs = try sema.resolveInst(extra.rhs);
     const lhs_ty = sema.typeOf(lhs);
     const rhs_ty = sema.typeOf(rhs);
-    const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
-    const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
+    const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(zcu);
+    const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(zcu);
     try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
     try sema.checkInvalidPtrIntArithmetic(block, src, lhs_ty);
 
@@ -15286,8 +15285,8 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
     const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
 
-    const lhs_scalar_ty = lhs_ty.scalarType(mod);
-    const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
+    const lhs_scalar_ty = lhs_ty.scalarType(zcu);
+    const scalar_tag = resolved_type.scalarType(zcu).zigTypeTag(zcu);
 
     const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
 
@@ -15314,13 +15313,13 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
         // If the lhs is undefined, compile error because there is a possible
         // value for which the division would result in a remainder.
         if (maybe_lhs_val) |lhs_val| {
-            if (lhs_val.isUndef(mod)) {
+            if (lhs_val.isUndef(zcu)) {
                 return sema.failWithUseOfUndef(block, rhs_src);
             } else {
                 if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
                     const scalar_zero = switch (scalar_tag) {
-                        .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0),
-                        .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0),
+                        .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(zcu), 0.0),
+                        .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(zcu), 0),
                         else => unreachable,
                     };
                     const zero_val = try sema.splat(resolved_type, scalar_zero);
@@ -15329,7 +15328,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             }
         }
         if (maybe_rhs_val) |rhs_val| {
-            if (rhs_val.isUndef(mod)) {
+            if (rhs_val.isUndef(zcu)) {
                 return sema.failWithUseOfUndef(block, rhs_src);
             }
             if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) {
@@ -15341,7 +15340,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             if (maybe_rhs_val) |rhs_val| {
                 if (is_int) {
                     const modulus_val = try lhs_val.intMod(rhs_val, resolved_type, sema.arena, pt);
-                    if (!(modulus_val.compareAllWithZero(.eq, pt))) {
+                    if (!(modulus_val.compareAllWithZero(.eq, zcu))) {
                         return sema.fail(block, src, "exact division produced remainder", .{});
                     }
                     var overflow_idx: ?usize = null;
@@ -15352,7 +15351,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                     return Air.internedToRef(res.toIntern());
                 } else {
                     const modulus_val = try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, pt);
-                    if (!(modulus_val.compareAllWithZero(.eq, pt))) {
+                    if (!(modulus_val.compareAllWithZero(.eq, zcu))) {
                         return sema.fail(block, src, "exact division produced remainder", .{});
                     }
                     return Air.internedToRef((try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, pt)).toIntern());
@@ -15376,7 +15375,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
         const ok = if (!is_int) ok: {
             const floored = try block.addUnOp(.floor, result);
 
-            if (resolved_type.zigTypeTag(mod) == .Vector) {
+            if (resolved_type.zigTypeTag(zcu) == .Vector) {
                 const eql = try block.addCmpVector(result, floored, .eq);
                 break :ok try block.addInst(.{
                     .tag = switch (block.float_mode) {
@@ -15399,11 +15398,11 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             const remainder = try block.addBinOp(.rem, casted_lhs, casted_rhs);
 
             const scalar_zero = switch (scalar_tag) {
-                .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0),
-                .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0),
+                .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(zcu), 0.0),
+                .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(zcu), 0),
                 else => unreachable,
             };
-            if (resolved_type.zigTypeTag(mod) == .Vector) {
+            if (resolved_type.zigTypeTag(zcu) == .Vector) {
                 const zero_val = try sema.splat(resolved_type, scalar_zero);
                 const zero = Air.internedToRef(zero_val.toIntern());
                 const eql = try block.addCmpVector(remainder, zero, .eq);
@@ -15429,7 +15428,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
 
 fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.src(.{ .node_offset_bin_op = inst_data.src_node });
     const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -15439,8 +15438,8 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     const rhs = try sema.resolveInst(extra.rhs);
     const lhs_ty = sema.typeOf(lhs);
     const rhs_ty = sema.typeOf(rhs);
-    const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
-    const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
+    const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(zcu);
+    const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(zcu);
     try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
     try sema.checkInvalidPtrIntArithmetic(block, src, lhs_ty);
 
@@ -15452,9 +15451,9 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
     const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
 
-    const lhs_scalar_ty = lhs_ty.scalarType(mod);
-    const rhs_scalar_ty = rhs_ty.scalarType(mod);
-    const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
+    const lhs_scalar_ty = lhs_ty.scalarType(zcu);
+    const rhs_scalar_ty = rhs_ty.scalarType(zcu);
+    const scalar_tag = resolved_type.scalarType(zcu).zigTypeTag(zcu);
 
     const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
 
@@ -15484,11 +15483,11 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
         // value (zero) for which the division would be illegal behavior.
         // If the lhs is undefined, result is undefined.
         if (maybe_lhs_val) |lhs_val| {
-            if (!lhs_val.isUndef(mod)) {
+            if (!lhs_val.isUndef(zcu)) {
                 if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
                     const scalar_zero = switch (scalar_tag) {
-                        .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0),
-                        .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0),
+                        .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(zcu), 0.0),
+                        .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(zcu), 0),
                         else => unreachable,
                     };
                     const zero_val = try sema.splat(resolved_type, scalar_zero);
@@ -15497,7 +15496,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             }
         }
         if (maybe_rhs_val) |rhs_val| {
-            if (rhs_val.isUndef(mod)) {
+            if (rhs_val.isUndef(zcu)) {
                 return sema.failWithUseOfUndef(block, rhs_src);
             }
             if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) {
@@ -15506,8 +15505,8 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             // TODO: if the RHS is one, return the LHS directly
         }
         if (maybe_lhs_val) |lhs_val| {
-            if (lhs_val.isUndef(mod)) {
-                if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) {
+            if (lhs_val.isUndef(zcu)) {
+                if (lhs_scalar_ty.isSignedInt(zcu) and rhs_scalar_ty.isSignedInt(zcu)) {
                     if (maybe_rhs_val) |rhs_val| {
                         if (try sema.compareAll(rhs_val, .neq, try pt.intValue(resolved_type, -1), resolved_type)) {
                             return pt.undefRef(resolved_type);
@@ -15540,7 +15539,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
 
 fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.src(.{ .node_offset_bin_op = inst_data.src_node });
     const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -15550,8 +15549,8 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     const rhs = try sema.resolveInst(extra.rhs);
     const lhs_ty = sema.typeOf(lhs);
     const rhs_ty = sema.typeOf(rhs);
-    const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
-    const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
+    const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(zcu);
+    const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(zcu);
     try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
     try sema.checkInvalidPtrIntArithmetic(block, src, lhs_ty);
 
@@ -15563,9 +15562,9 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
     const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
 
-    const lhs_scalar_ty = lhs_ty.scalarType(mod);
-    const rhs_scalar_ty = rhs_ty.scalarType(mod);
-    const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
+    const lhs_scalar_ty = lhs_ty.scalarType(zcu);
+    const rhs_scalar_ty = rhs_ty.scalarType(zcu);
+    const scalar_tag = resolved_type.scalarType(zcu).zigTypeTag(zcu);
 
     const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
 
@@ -15595,11 +15594,11 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
         // value (zero) for which the division would be illegal behavior.
         // If the lhs is undefined, result is undefined.
         if (maybe_lhs_val) |lhs_val| {
-            if (!lhs_val.isUndef(mod)) {
+            if (!lhs_val.isUndef(zcu)) {
                 if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
                     const scalar_zero = switch (scalar_tag) {
-                        .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0),
-                        .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0),
+                        .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(zcu), 0.0),
+                        .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(zcu), 0),
                         else => unreachable,
                     };
                     const zero_val = try sema.splat(resolved_type, scalar_zero);
@@ -15608,7 +15607,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             }
         }
         if (maybe_rhs_val) |rhs_val| {
-            if (rhs_val.isUndef(mod)) {
+            if (rhs_val.isUndef(zcu)) {
                 return sema.failWithUseOfUndef(block, rhs_src);
             }
             if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) {
@@ -15616,8 +15615,8 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             }
         }
         if (maybe_lhs_val) |lhs_val| {
-            if (lhs_val.isUndef(mod)) {
-                if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) {
+            if (lhs_val.isUndef(zcu)) {
+                if (lhs_scalar_ty.isSignedInt(zcu) and rhs_scalar_ty.isSignedInt(zcu)) {
                     if (maybe_rhs_val) |rhs_val| {
                         if (try sema.compareAll(rhs_val, .neq, try pt.intValue(resolved_type, -1), resolved_type)) {
                             return pt.undefRef(resolved_type);
@@ -15666,14 +15665,14 @@ fn addDivIntOverflowSafety(
     is_int: bool,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     if (!is_int) return;
 
     // If the LHS is unsigned, it cannot cause overflow.
-    if (!lhs_scalar_ty.isSignedInt(mod)) return;
+    if (!lhs_scalar_ty.isSignedInt(zcu)) return;
 
     // If the LHS is widened to a larger integer type, no overflow is possible.
-    if (lhs_scalar_ty.intInfo(mod).bits < resolved_type.intInfo(mod).bits) {
+    if (lhs_scalar_ty.intInfo(zcu).bits < resolved_type.intInfo(zcu).bits) {
         return;
     }
 
@@ -15693,7 +15692,7 @@ fn addDivIntOverflowSafety(
     }
 
     var ok: Air.Inst.Ref = .none;
-    if (resolved_type.zigTypeTag(mod) == .Vector) {
+    if (resolved_type.zigTypeTag(zcu) == .Vector) {
         if (maybe_lhs_val == null) {
             const min_int_ref = Air.internedToRef(min_int.toIntern());
             ok = try block.addCmpVector(casted_lhs, min_int_ref, .neq);
@@ -15751,12 +15750,12 @@ fn addDivByZeroSafety(
     if (maybe_rhs_val != null) return;
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const scalar_zero = if (is_int)
-        try pt.intValue(resolved_type.scalarType(mod), 0)
+        try pt.intValue(resolved_type.scalarType(zcu), 0)
     else
-        try pt.floatValue(resolved_type.scalarType(mod), 0.0);
-    const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: {
+        try pt.floatValue(resolved_type.scalarType(zcu), 0.0);
+    const ok = if (resolved_type.zigTypeTag(zcu) == .Vector) ok: {
         const zero_val = try sema.splat(resolved_type, scalar_zero);
         const zero = Air.internedToRef(zero_val.toIntern());
         const ok = try block.addCmpVector(casted_rhs, zero, .neq);
@@ -15784,7 +15783,7 @@ fn airTag(block: *Block, is_int: bool, normal: Air.Inst.Tag, optimized: Air.Inst
 
 fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.src(.{ .node_offset_bin_op = inst_data.src_node });
     const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -15794,8 +15793,8 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
     const rhs = try sema.resolveInst(extra.rhs);
     const lhs_ty = sema.typeOf(lhs);
     const rhs_ty = sema.typeOf(rhs);
-    const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
-    const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
+    const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(zcu);
+    const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(zcu);
     try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
     try sema.checkInvalidPtrIntArithmetic(block, src, lhs_ty);
 
@@ -15804,14 +15803,14 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
         .override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
     });
 
-    const is_vector = resolved_type.zigTypeTag(mod) == .Vector;
+    const is_vector = resolved_type.zigTypeTag(zcu) == .Vector;
 
     const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
     const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
 
-    const lhs_scalar_ty = lhs_ty.scalarType(mod);
-    const rhs_scalar_ty = rhs_ty.scalarType(mod);
-    const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
+    const lhs_scalar_ty = lhs_ty.scalarType(zcu);
+    const rhs_scalar_ty = rhs_ty.scalarType(zcu);
+    const scalar_tag = resolved_type.scalarType(zcu).zigTypeTag(zcu);
 
     const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
 
@@ -15836,13 +15835,13 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
         // then emit a compile error saying you have to pick one.
         if (is_int) {
             if (maybe_lhs_val) |lhs_val| {
-                if (lhs_val.isUndef(mod)) {
+                if (lhs_val.isUndef(zcu)) {
                     return sema.failWithUseOfUndef(block, lhs_src);
                 }
                 if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
                     const scalar_zero = switch (scalar_tag) {
-                        .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0),
-                        .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0),
+                        .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(zcu), 0.0),
+                        .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(zcu), 0),
                         else => unreachable,
                     };
                     const zero_val = if (is_vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{
@@ -15851,11 +15850,11 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
                     } })) else scalar_zero;
                     return Air.internedToRef(zero_val.toIntern());
                 }
-            } else if (lhs_scalar_ty.isSignedInt(mod)) {
+            } else if (lhs_scalar_ty.isSignedInt(zcu)) {
                 return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
             }
             if (maybe_rhs_val) |rhs_val| {
-                if (rhs_val.isUndef(mod)) {
+                if (rhs_val.isUndef(zcu)) {
                     return sema.failWithUseOfUndef(block, rhs_src);
                 }
                 if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) {
@@ -15876,7 +15875,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
                     return Air.internedToRef(rem_result.toIntern());
                 }
                 break :rs lhs_src;
-            } else if (rhs_scalar_ty.isSignedInt(mod)) {
+            } else if (rhs_scalar_ty.isSignedInt(zcu)) {
                 return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
             } else {
                 break :rs rhs_src;
@@ -15884,7 +15883,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
         }
         // float operands
         if (maybe_rhs_val) |rhs_val| {
-            if (rhs_val.isUndef(mod)) {
+            if (rhs_val.isUndef(zcu)) {
                 return sema.failWithUseOfUndef(block, rhs_src);
             }
             if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) {
@@ -15894,7 +15893,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
                 return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
             }
             if (maybe_lhs_val) |lhs_val| {
-                if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroSema(.gte, pt))) {
+                if (lhs_val.isUndef(zcu) or !(try lhs_val.compareAllWithZeroSema(.gte, pt))) {
                     return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
                 }
                 return Air.internedToRef((try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, pt)).toIntern());
@@ -15923,10 +15922,10 @@ fn intRem(
     rhs: Value,
 ) CompileError!Value {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) == .Vector) {
-        const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod));
-        const scalar_ty = ty.scalarType(mod);
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Vector) {
+        const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(zcu));
+        const scalar_ty = ty.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const lhs_elem = try lhs.elemValue(pt, i);
             const rhs_elem = try rhs.elemValue(pt, i);
@@ -15946,8 +15945,8 @@ fn intRemScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) CompileErr
     // resorting to BigInt first.
     var lhs_space: Value.BigIntSpace = undefined;
     var rhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, pt, .sema);
-    const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, pt, .sema);
+    const lhs_bigint = try lhs.toBigIntSema(&lhs_space, pt);
+    const rhs_bigint = try rhs.toBigIntSema(&rhs_space, pt);
     const limbs_q = try sema.arena.alloc(
         math.big.Limb,
         lhs_bigint.limbs.len,
@@ -15970,7 +15969,7 @@ fn intRemScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) CompileErr
 
 fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.src(.{ .node_offset_bin_op = inst_data.src_node });
     const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -15980,8 +15979,8 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
     const rhs = try sema.resolveInst(extra.rhs);
     const lhs_ty = sema.typeOf(lhs);
     const rhs_ty = sema.typeOf(rhs);
-    const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
-    const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
+    const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(zcu);
+    const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(zcu);
     try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
     try sema.checkInvalidPtrIntArithmetic(block, src, lhs_ty);
 
@@ -15993,7 +15992,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
     const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
     const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
 
-    const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
+    const scalar_tag = resolved_type.scalarType(zcu).zigTypeTag(zcu);
 
     const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
 
@@ -16016,12 +16015,12 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
         // If the lhs is undefined, result is undefined.
         if (is_int) {
             if (maybe_lhs_val) |lhs_val| {
-                if (lhs_val.isUndef(mod)) {
+                if (lhs_val.isUndef(zcu)) {
                     return sema.failWithUseOfUndef(block, lhs_src);
                 }
             }
             if (maybe_rhs_val) |rhs_val| {
-                if (rhs_val.isUndef(mod)) {
+                if (rhs_val.isUndef(zcu)) {
                     return sema.failWithUseOfUndef(block, rhs_src);
                 }
                 if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) {
@@ -16037,7 +16036,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
         }
         // float operands
         if (maybe_rhs_val) |rhs_val| {
-            if (rhs_val.isUndef(mod)) {
+            if (rhs_val.isUndef(zcu)) {
                 return sema.failWithUseOfUndef(block, rhs_src);
             }
             if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) {
@@ -16045,7 +16044,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
             }
         }
         if (maybe_lhs_val) |lhs_val| {
-            if (lhs_val.isUndef(mod)) {
+            if (lhs_val.isUndef(zcu)) {
                 return pt.undefRef(resolved_type);
             }
             if (maybe_rhs_val) |rhs_val| {
@@ -16066,7 +16065,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
 
 fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.src(.{ .node_offset_bin_op = inst_data.src_node });
     const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
@@ -16076,8 +16075,8 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
     const rhs = try sema.resolveInst(extra.rhs);
     const lhs_ty = sema.typeOf(lhs);
     const rhs_ty = sema.typeOf(rhs);
-    const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
-    const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
+    const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(zcu);
+    const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(zcu);
     try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
     try sema.checkInvalidPtrIntArithmetic(block, src, lhs_ty);
 
@@ -16089,7 +16088,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
     const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
     const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
 
-    const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
+    const scalar_tag = resolved_type.scalarType(zcu).zigTypeTag(zcu);
 
     const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
 
@@ -16112,12 +16111,12 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
         // If the lhs is undefined, result is undefined.
         if (is_int) {
             if (maybe_lhs_val) |lhs_val| {
-                if (lhs_val.isUndef(mod)) {
+                if (lhs_val.isUndef(zcu)) {
                     return sema.failWithUseOfUndef(block, lhs_src);
                 }
             }
             if (maybe_rhs_val) |rhs_val| {
-                if (rhs_val.isUndef(mod)) {
+                if (rhs_val.isUndef(zcu)) {
                     return sema.failWithUseOfUndef(block, rhs_src);
                 }
                 if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) {
@@ -16133,7 +16132,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
         }
         // float operands
         if (maybe_rhs_val) |rhs_val| {
-            if (rhs_val.isUndef(mod)) {
+            if (rhs_val.isUndef(zcu)) {
                 return sema.failWithUseOfUndef(block, rhs_src);
             }
             if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) {
@@ -16141,7 +16140,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
             }
         }
         if (maybe_lhs_val) |lhs_val| {
-            if (lhs_val.isUndef(mod)) {
+            if (lhs_val.isUndef(zcu)) {
                 return pt.undefRef(resolved_type);
             }
             if (maybe_rhs_val) |rhs_val| {
@@ -16181,8 +16180,8 @@ fn zirOverflowArithmetic(
     const lhs_ty = sema.typeOf(uncasted_lhs);
     const rhs_ty = sema.typeOf(uncasted_rhs);
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
 
     try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
 
@@ -16202,7 +16201,7 @@ fn zirOverflowArithmetic(
     const lhs = try sema.coerce(block, dest_ty, uncasted_lhs, lhs_src);
     const rhs = try sema.coerce(block, rhs_dest_ty, uncasted_rhs, rhs_src);
 
-    if (dest_ty.scalarType(mod).zigTypeTag(mod) != .Int) {
+    if (dest_ty.scalarType(zcu).zigTypeTag(zcu) != .Int) {
         return sema.fail(block, src, "expected vector of integers or integer tag type, found '{}'", .{dest_ty.fmt(pt)});
     }
 
@@ -16224,18 +16223,18 @@ fn zirOverflowArithmetic(
                 // to the result, even if it is undefined..
                 // Otherwise, if either of the argument is undefined, undefined is returned.
                 if (maybe_lhs_val) |lhs_val| {
-                    if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) {
+                    if (!lhs_val.isUndef(zcu) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) {
                         break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs };
                     }
                 }
                 if (maybe_rhs_val) |rhs_val| {
-                    if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, pt))) {
+                    if (!rhs_val.isUndef(zcu) and (try rhs_val.compareAllWithZeroSema(.eq, pt))) {
                         break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
                     }
                 }
                 if (maybe_lhs_val) |lhs_val| {
                     if (maybe_rhs_val) |rhs_val| {
-                        if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
+                        if (lhs_val.isUndef(zcu) or rhs_val.isUndef(zcu)) {
                             break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
                         }
 
@@ -16248,12 +16247,12 @@ fn zirOverflowArithmetic(
                 // If the rhs is zero, then the result is lhs and no overflow occured.
                 // Otherwise, if either result is undefined, both results are undefined.
                 if (maybe_rhs_val) |rhs_val| {
-                    if (rhs_val.isUndef(mod)) {
+                    if (rhs_val.isUndef(zcu)) {
                         break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
                     } else if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
                         break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
                     } else if (maybe_lhs_val) |lhs_val| {
-                        if (lhs_val.isUndef(mod)) {
+                        if (lhs_val.isUndef(zcu)) {
                             break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
                         }
 
@@ -16266,9 +16265,9 @@ fn zirOverflowArithmetic(
                 // If either of the arguments is zero, the result is zero and no overflow occured.
                 // If either of the arguments is one, the result is the other and no overflow occured.
                 // Otherwise, if either of the arguments is undefined, both results are undefined.
-                const scalar_one = try pt.intValue(dest_ty.scalarType(mod), 1);
+                const scalar_one = try pt.intValue(dest_ty.scalarType(zcu), 1);
                 if (maybe_lhs_val) |lhs_val| {
-                    if (!lhs_val.isUndef(mod)) {
+                    if (!lhs_val.isUndef(zcu)) {
                         if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
                             break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
                         } else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) {
@@ -16278,7 +16277,7 @@ fn zirOverflowArithmetic(
                 }
 
                 if (maybe_rhs_val) |rhs_val| {
-                    if (!rhs_val.isUndef(mod)) {
+                    if (!rhs_val.isUndef(zcu)) {
                         if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
                             break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs };
                         } else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) {
@@ -16289,7 +16288,7 @@ fn zirOverflowArithmetic(
 
                 if (maybe_lhs_val) |lhs_val| {
                     if (maybe_rhs_val) |rhs_val| {
-                        if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
+                        if (lhs_val.isUndef(zcu) or rhs_val.isUndef(zcu)) {
                             break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
                         }
 
@@ -16303,18 +16302,18 @@ fn zirOverflowArithmetic(
                 // If rhs is zero, the result is lhs (even if undefined) and no overflow occurred.
                 // Oterhwise if either of the arguments is undefined, both results are undefined.
                 if (maybe_lhs_val) |lhs_val| {
-                    if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) {
+                    if (!lhs_val.isUndef(zcu) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) {
                         break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
                     }
                 }
                 if (maybe_rhs_val) |rhs_val| {
-                    if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, pt))) {
+                    if (!rhs_val.isUndef(zcu) and (try rhs_val.compareAllWithZeroSema(.eq, pt))) {
                         break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
                     }
                 }
                 if (maybe_lhs_val) |lhs_val| {
                     if (maybe_rhs_val) |rhs_val| {
-                        if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
+                        if (lhs_val.isUndef(zcu) or rhs_val.isUndef(zcu)) {
                             break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
                         }
 
@@ -16374,8 +16373,8 @@ fn zirOverflowArithmetic(
 
 fn splat(sema: *Sema, ty: Type, val: Value) !Value {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) != .Vector) return val;
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) != .Vector) return val;
     const repeated = try pt.intern(.{ .aggregate = .{
         .ty = ty.toIntern(),
         .storage = .{ .repeated_elem = val.toIntern() },
@@ -16385,16 +16384,16 @@ fn splat(sema: *Sema, ty: Type, val: Value) !Value {
 
 fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try pt.vectorType(.{
-        .len = ty.vectorLen(mod),
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    const ov_ty = if (ty.zigTypeTag(zcu) == .Vector) try pt.vectorType(.{
+        .len = ty.vectorLen(zcu),
         .child = .u1_type,
     }) else Type.u1;
 
     const types = [2]InternPool.Index{ ty.toIntern(), ov_ty.toIntern() };
     const values = [2]InternPool.Index{ .none, .none };
-    const tuple_ty = try ip.getAnonStructType(mod.gpa, pt.tid, .{
+    const tuple_ty = try ip.getAnonStructType(zcu.gpa, pt.tid, .{
         .types = &types,
         .values = &values,
         .names = &.{},
@@ -16415,41 +16414,41 @@ fn analyzeArithmetic(
     want_safety: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const lhs_ty = sema.typeOf(lhs);
     const rhs_ty = sema.typeOf(rhs);
-    const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
-    const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
+    const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(zcu);
+    const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(zcu);
     try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
 
     if (lhs_zig_ty_tag == .Pointer) {
         if (rhs_zig_ty_tag == .Pointer) {
-            if (lhs_ty.ptrSize(mod) != .Slice and rhs_ty.ptrSize(mod) != .Slice) {
+            if (lhs_ty.ptrSize(zcu) != .Slice and rhs_ty.ptrSize(zcu) != .Slice) {
                 if (zir_tag != .sub) {
                     return sema.failWithInvalidPtrArithmetic(block, src, "pointer-pointer", "subtraction");
                 }
-                if (!lhs_ty.elemType2(mod).eql(rhs_ty.elemType2(mod), mod)) {
+                if (!lhs_ty.elemType2(zcu).eql(rhs_ty.elemType2(zcu), zcu)) {
                     return sema.fail(block, src, "incompatible pointer arithmetic operands '{}' and '{}'", .{
                         lhs_ty.fmt(pt), rhs_ty.fmt(pt),
                     });
                 }
 
-                const elem_size = lhs_ty.elemType2(mod).abiSize(pt);
+                const elem_size = lhs_ty.elemType2(zcu).abiSize(zcu);
                 if (elem_size == 0) {
                     return sema.fail(block, src, "pointer arithmetic requires element type '{}' to have runtime bits", .{
-                        lhs_ty.elemType2(mod).fmt(pt),
+                        lhs_ty.elemType2(zcu).fmt(pt),
                     });
                 }
 
                 const runtime_src = runtime_src: {
                     if (try sema.resolveValue(lhs)) |lhs_value| {
                         if (try sema.resolveValue(rhs)) |rhs_value| {
-                            const lhs_ptr = switch (mod.intern_pool.indexToKey(lhs_value.toIntern())) {
+                            const lhs_ptr = switch (zcu.intern_pool.indexToKey(lhs_value.toIntern())) {
                                 .undef => return sema.failWithUseOfUndef(block, lhs_src),
                                 .ptr => |ptr| ptr,
                                 else => unreachable,
                             };
-                            const rhs_ptr = switch (mod.intern_pool.indexToKey(rhs_value.toIntern())) {
+                            const rhs_ptr = switch (zcu.intern_pool.indexToKey(rhs_value.toIntern())) {
                                 .undef => return sema.failWithUseOfUndef(block, rhs_src),
                                 .ptr => |ptr| ptr,
                                 else => unreachable,
@@ -16475,7 +16474,7 @@ fn analyzeArithmetic(
                 return try block.addBinOp(.div_exact, address, try pt.intRef(Type.usize, elem_size));
             }
         } else {
-            switch (lhs_ty.ptrSize(mod)) {
+            switch (lhs_ty.ptrSize(zcu)) {
                 .One, .Slice => {},
                 .Many, .C => {
                     const air_tag: Air.Inst.Tag = switch (zir_tag) {
@@ -16484,9 +16483,9 @@ fn analyzeArithmetic(
                         else => return sema.failWithInvalidPtrArithmetic(block, src, "pointer-integer", "addition and subtraction"),
                     };
 
-                    if (!try sema.typeHasRuntimeBits(lhs_ty.elemType2(mod))) {
+                    if (!try lhs_ty.elemType2(zcu).hasRuntimeBitsSema(pt)) {
                         return sema.fail(block, src, "pointer arithmetic requires element type '{}' to have runtime bits", .{
-                            lhs_ty.elemType2(mod).fmt(pt),
+                            lhs_ty.elemType2(zcu).fmt(pt),
                         });
                     }
                     return sema.analyzePtrArithmetic(block, src, lhs, rhs, air_tag, lhs_src, rhs_src);
@@ -16503,8 +16502,8 @@ fn analyzeArithmetic(
     const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
     const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
 
-    const scalar_type = resolved_type.scalarType(mod);
-    const scalar_tag = scalar_type.zigTypeTag(mod);
+    const scalar_type = resolved_type.scalarType(zcu);
+    const scalar_tag = scalar_type.zigTypeTag(zcu);
 
     const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
 
@@ -16523,12 +16522,12 @@ fn analyzeArithmetic(
                 // overflow (max_int), causing illegal behavior.
                 // For floats: either operand being undef makes the result undef.
                 if (maybe_lhs_val) |lhs_val| {
-                    if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) {
+                    if (!lhs_val.isUndef(zcu) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) {
                         return casted_rhs;
                     }
                 }
                 if (maybe_rhs_val) |rhs_val| {
-                    if (rhs_val.isUndef(mod)) {
+                    if (rhs_val.isUndef(zcu)) {
                         if (is_int) {
                             return sema.failWithUseOfUndef(block, rhs_src);
                         } else {
@@ -16541,7 +16540,7 @@ fn analyzeArithmetic(
                 }
                 const air_tag: Air.Inst.Tag = if (block.float_mode == .optimized) .add_optimized else .add;
                 if (maybe_lhs_val) |lhs_val| {
-                    if (lhs_val.isUndef(mod)) {
+                    if (lhs_val.isUndef(zcu)) {
                         if (is_int) {
                             return sema.failWithUseOfUndef(block, lhs_src);
                         } else {
@@ -16567,12 +16566,12 @@ fn analyzeArithmetic(
                 // If either of the operands are zero, the other operand is returned.
                 // If either of the operands are undefined, the result is undefined.
                 if (maybe_lhs_val) |lhs_val| {
-                    if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) {
+                    if (!lhs_val.isUndef(zcu) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) {
                         return casted_rhs;
                     }
                 }
                 if (maybe_rhs_val) |rhs_val| {
-                    if (rhs_val.isUndef(mod)) {
+                    if (rhs_val.isUndef(zcu)) {
                         return pt.undefRef(resolved_type);
                     }
                     if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
@@ -16588,19 +16587,19 @@ fn analyzeArithmetic(
                 // If either of the operands are zero, then the other operand is returned.
                 // If either of the operands are undefined, the result is undefined.
                 if (maybe_lhs_val) |lhs_val| {
-                    if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) {
+                    if (!lhs_val.isUndef(zcu) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) {
                         return casted_rhs;
                     }
                 }
                 if (maybe_rhs_val) |rhs_val| {
-                    if (rhs_val.isUndef(mod)) {
+                    if (rhs_val.isUndef(zcu)) {
                         return pt.undefRef(resolved_type);
                     }
                     if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
                         return casted_lhs;
                     }
                     if (maybe_lhs_val) |lhs_val| {
-                        if (lhs_val.isUndef(mod)) {
+                        if (lhs_val.isUndef(zcu)) {
                             return pt.undefRef(resolved_type);
                         }
 
@@ -16630,7 +16629,7 @@ fn analyzeArithmetic(
                 // overflow, causing illegal behavior.
                 // For floats: either operand being undef makes the result undef.
                 if (maybe_rhs_val) |rhs_val| {
-                    if (rhs_val.isUndef(mod)) {
+                    if (rhs_val.isUndef(zcu)) {
                         if (is_int) {
                             return sema.failWithUseOfUndef(block, rhs_src);
                         } else {
@@ -16643,7 +16642,7 @@ fn analyzeArithmetic(
                 }
                 const air_tag: Air.Inst.Tag = if (block.float_mode == .optimized) .sub_optimized else .sub;
                 if (maybe_lhs_val) |lhs_val| {
-                    if (lhs_val.isUndef(mod)) {
+                    if (lhs_val.isUndef(zcu)) {
                         if (is_int) {
                             return sema.failWithUseOfUndef(block, lhs_src);
                         } else {
@@ -16669,7 +16668,7 @@ fn analyzeArithmetic(
                 // If the RHS is zero, then the LHS is returned, even if it is undefined.
                 // If either of the operands are undefined, the result is undefined.
                 if (maybe_rhs_val) |rhs_val| {
-                    if (rhs_val.isUndef(mod)) {
+                    if (rhs_val.isUndef(zcu)) {
                         return pt.undefRef(resolved_type);
                     }
                     if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
@@ -16677,7 +16676,7 @@ fn analyzeArithmetic(
                     }
                 }
                 if (maybe_lhs_val) |lhs_val| {
-                    if (lhs_val.isUndef(mod)) {
+                    if (lhs_val.isUndef(zcu)) {
                         return pt.undefRef(resolved_type);
                     }
                     if (maybe_rhs_val) |rhs_val| {
@@ -16690,7 +16689,7 @@ fn analyzeArithmetic(
                 // If the RHS is zero, then the LHS is returned, even if it is undefined.
                 // If either of the operands are undefined, the result is undefined.
                 if (maybe_rhs_val) |rhs_val| {
-                    if (rhs_val.isUndef(mod)) {
+                    if (rhs_val.isUndef(zcu)) {
                         return pt.undefRef(resolved_type);
                     }
                     if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
@@ -16698,7 +16697,7 @@ fn analyzeArithmetic(
                     }
                 }
                 if (maybe_lhs_val) |lhs_val| {
-                    if (lhs_val.isUndef(mod)) {
+                    if (lhs_val.isUndef(zcu)) {
                         return pt.undefRef(resolved_type);
                     }
                     if (maybe_rhs_val) |rhs_val| {
@@ -16736,16 +16735,16 @@ fn analyzeArithmetic(
                     else => unreachable,
                 };
                 if (maybe_lhs_val) |lhs_val| {
-                    if (!lhs_val.isUndef(mod)) {
-                        if (lhs_val.isNan(mod)) {
+                    if (!lhs_val.isUndef(zcu)) {
+                        if (lhs_val.isNan(zcu)) {
                             return Air.internedToRef(lhs_val.toIntern());
                         }
                         if (try lhs_val.compareAllWithZeroSema(.eq, pt)) lz: {
                             if (maybe_rhs_val) |rhs_val| {
-                                if (rhs_val.isNan(mod)) {
+                                if (rhs_val.isNan(zcu)) {
                                     return Air.internedToRef(rhs_val.toIntern());
                                 }
-                                if (rhs_val.isInf(mod)) {
+                                if (rhs_val.isInf(zcu)) {
                                     return Air.internedToRef((try pt.floatValue(resolved_type, std.math.nan(f128))).toIntern());
                                 }
                             } else if (resolved_type.isAnyFloat()) {
@@ -16761,19 +16760,19 @@ fn analyzeArithmetic(
                 }
                 const air_tag: Air.Inst.Tag = if (block.float_mode == .optimized) .mul_optimized else .mul;
                 if (maybe_rhs_val) |rhs_val| {
-                    if (rhs_val.isUndef(mod)) {
+                    if (rhs_val.isUndef(zcu)) {
                         if (is_int) {
                             return sema.failWithUseOfUndef(block, rhs_src);
                         } else {
                             return pt.undefRef(resolved_type);
                         }
                     }
-                    if (rhs_val.isNan(mod)) {
+                    if (rhs_val.isNan(zcu)) {
                         return Air.internedToRef(rhs_val.toIntern());
                     }
                     if (try rhs_val.compareAllWithZeroSema(.eq, pt)) rz: {
                         if (maybe_lhs_val) |lhs_val| {
-                            if (lhs_val.isInf(mod)) {
+                            if (lhs_val.isInf(zcu)) {
                                 return Air.internedToRef((try pt.floatValue(resolved_type, std.math.nan(f128))).toIntern());
                             }
                         } else if (resolved_type.isAnyFloat()) {
@@ -16786,7 +16785,7 @@ fn analyzeArithmetic(
                         return casted_lhs;
                     }
                     if (maybe_lhs_val) |lhs_val| {
-                        if (lhs_val.isUndef(mod)) {
+                        if (lhs_val.isUndef(zcu)) {
                             if (is_int) {
                                 return sema.failWithUseOfUndef(block, lhs_src);
                             } else {
@@ -16822,7 +16821,7 @@ fn analyzeArithmetic(
                     else => unreachable,
                 };
                 if (maybe_lhs_val) |lhs_val| {
-                    if (!lhs_val.isUndef(mod)) {
+                    if (!lhs_val.isUndef(zcu)) {
                         if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
                             const zero_val = try sema.splat(resolved_type, scalar_zero);
                             return Air.internedToRef(zero_val.toIntern());
@@ -16833,7 +16832,7 @@ fn analyzeArithmetic(
                     }
                 }
                 if (maybe_rhs_val) |rhs_val| {
-                    if (rhs_val.isUndef(mod)) {
+                    if (rhs_val.isUndef(zcu)) {
                         return pt.undefRef(resolved_type);
                     }
                     if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
@@ -16844,7 +16843,7 @@ fn analyzeArithmetic(
                         return casted_lhs;
                     }
                     if (maybe_lhs_val) |lhs_val| {
-                        if (lhs_val.isUndef(mod)) {
+                        if (lhs_val.isUndef(zcu)) {
                             return pt.undefRef(resolved_type);
                         }
                         return Air.internedToRef((try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, pt)).toIntern());
@@ -16867,7 +16866,7 @@ fn analyzeArithmetic(
                     else => unreachable,
                 };
                 if (maybe_lhs_val) |lhs_val| {
-                    if (!lhs_val.isUndef(mod)) {
+                    if (!lhs_val.isUndef(zcu)) {
                         if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
                             const zero_val = try sema.splat(resolved_type, scalar_zero);
                             return Air.internedToRef(zero_val.toIntern());
@@ -16878,7 +16877,7 @@ fn analyzeArithmetic(
                     }
                 }
                 if (maybe_rhs_val) |rhs_val| {
-                    if (rhs_val.isUndef(mod)) {
+                    if (rhs_val.isUndef(zcu)) {
                         return pt.undefRef(resolved_type);
                     }
                     if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
@@ -16889,7 +16888,7 @@ fn analyzeArithmetic(
                         return casted_lhs;
                     }
                     if (maybe_lhs_val) |lhs_val| {
-                        if (lhs_val.isUndef(mod)) {
+                        if (lhs_val.isUndef(zcu)) {
                             return pt.undefRef(resolved_type);
                         }
 
@@ -16909,7 +16908,7 @@ fn analyzeArithmetic(
     try sema.requireRuntimeBlock(block, src, runtime_src);
 
     if (block.wantSafety() and want_safety and scalar_tag == .Int) {
-        if (mod.backendSupportsFeature(.safety_checked_instructions)) {
+        if (zcu.backendSupportsFeature(.safety_checked_instructions)) {
             if (air_tag != air_tag_safe) {
                 _ = try sema.preparePanicId(block, src, .integer_overflow);
             }
@@ -16934,7 +16933,7 @@ fn analyzeArithmetic(
                     } },
                 });
                 const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty);
-                const any_ov_bit = if (resolved_type.zigTypeTag(mod) == .Vector)
+                const any_ov_bit = if (resolved_type.zigTypeTag(zcu) == .Vector)
                     try block.addInst(.{
                         .tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce,
                         .data = .{ .reduce = .{
@@ -16969,11 +16968,11 @@ fn analyzePtrArithmetic(
     // coerce to isize instead of usize.
     const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src);
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const opt_ptr_val = try sema.resolveValue(ptr);
     const opt_off_val = try sema.resolveDefinedValue(block, offset_src, offset);
     const ptr_ty = sema.typeOf(ptr);
-    const ptr_info = ptr_ty.ptrInfo(mod);
+    const ptr_info = ptr_ty.ptrInfo(zcu);
     assert(ptr_info.flags.size == .Many or ptr_info.flags.size == .C);
 
     const new_ptr_ty = t: {
@@ -16985,7 +16984,7 @@ fn analyzePtrArithmetic(
         }
         // If the addend is not a comptime-known value we can still count on
         // it being a multiple of the type size.
-        const elem_size = try sema.typeAbiSize(Type.fromInterned(ptr_info.child));
+        const elem_size = try Type.fromInterned(ptr_info.child).abiSizeSema(pt);
         const addend = if (opt_off_val) |off_val| a: {
             const off_int = try sema.usizeCast(block, offset_src, try off_val.toUnsignedIntSema(pt));
             break :a elem_size * off_int;
@@ -17017,12 +17016,12 @@ fn analyzePtrArithmetic(
     const runtime_src = rs: {
         if (opt_ptr_val) |ptr_val| {
             if (opt_off_val) |offset_val| {
-                if (ptr_val.isUndef(mod)) return pt.undefRef(new_ptr_ty);
+                if (ptr_val.isUndef(zcu)) return pt.undefRef(new_ptr_ty);
 
                 const offset_int = try sema.usizeCast(block, offset_src, try offset_val.toUnsignedIntSema(pt));
                 if (offset_int == 0) return ptr;
                 if (air_tag == .ptr_sub) {
-                    const elem_size = try sema.typeAbiSize(Type.fromInterned(ptr_info.child));
+                    const elem_size = try Type.fromInterned(ptr_info.child).abiSizeSema(pt);
                     const new_ptr_val = try sema.ptrSubtract(block, op_src, ptr_val, offset_int * elem_size, new_ptr_ty);
                     return Air.internedToRef(new_ptr_val.toIntern());
                 } else {
@@ -17067,7 +17066,7 @@ fn zirAsm(
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const extra = sema.code.extraData(Zir.Inst.Asm, extended.operand);
     const src = block.nodeOffset(extra.data.src_node);
     const ret_ty_src = block.src(.{ .node_offset_asm_ret_ty = extra.data.src_node });
@@ -17099,7 +17098,7 @@ fn zirAsm(
         if (is_volatile) {
             return sema.fail(block, src, "volatile keyword is redundant on module-level assembly", .{});
         }
-        try mod.addGlobalAssembly(sema.owner.unwrap().cau, asm_source);
+        try zcu.addGlobalAssembly(sema.owner.unwrap().cau, asm_source);
         return .void_value;
     }
 
@@ -17153,7 +17152,7 @@ fn zirAsm(
 
         const uncasted_arg = try sema.resolveInst(input.data.operand);
         const uncasted_arg_ty = sema.typeOf(uncasted_arg);
-        switch (uncasted_arg_ty.zigTypeTag(mod)) {
+        switch (uncasted_arg_ty.zigTypeTag(zcu)) {
             .ComptimeInt => arg.* = try sema.coerce(block, Type.usize, uncasted_arg, src),
             .ComptimeFloat => arg.* = try sema.coerce(block, Type.f64, uncasted_arg, src),
             else => {
@@ -17236,7 +17235,7 @@ fn zirCmpEq(
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
     const src: LazySrcLoc = block.nodeOffset(inst_data.src_node);
@@ -17247,18 +17246,18 @@ fn zirCmpEq(
 
     const lhs_ty = sema.typeOf(lhs);
     const rhs_ty = sema.typeOf(rhs);
-    const lhs_ty_tag = lhs_ty.zigTypeTag(mod);
-    const rhs_ty_tag = rhs_ty.zigTypeTag(mod);
+    const lhs_ty_tag = lhs_ty.zigTypeTag(zcu);
+    const rhs_ty_tag = rhs_ty.zigTypeTag(zcu);
     if (lhs_ty_tag == .Null and rhs_ty_tag == .Null) {
         // null == null, null != null
         return if (op == .eq) .bool_true else .bool_false;
     }
 
     // comparing null with optionals
-    if (lhs_ty_tag == .Null and (rhs_ty_tag == .Optional or rhs_ty.isCPtr(mod))) {
+    if (lhs_ty_tag == .Null and (rhs_ty_tag == .Optional or rhs_ty.isCPtr(zcu))) {
         return sema.analyzeIsNull(block, src, rhs, op == .neq);
     }
-    if (rhs_ty_tag == .Null and (lhs_ty_tag == .Optional or lhs_ty.isCPtr(mod))) {
+    if (rhs_ty_tag == .Null and (lhs_ty_tag == .Optional or lhs_ty.isCPtr(zcu))) {
         return sema.analyzeIsNull(block, src, lhs, op == .neq);
     }
 
@@ -17278,11 +17277,11 @@ fn zirCmpEq(
         const runtime_src: LazySrcLoc = src: {
             if (try sema.resolveValue(lhs)) |lval| {
                 if (try sema.resolveValue(rhs)) |rval| {
-                    if (lval.isUndef(mod) or rval.isUndef(mod)) {
+                    if (lval.isUndef(zcu) or rval.isUndef(zcu)) {
                         return pt.undefRef(Type.bool);
                     }
-                    const lkey = mod.intern_pool.indexToKey(lval.toIntern());
-                    const rkey = mod.intern_pool.indexToKey(rval.toIntern());
+                    const lkey = zcu.intern_pool.indexToKey(lval.toIntern());
+                    const rkey = zcu.intern_pool.indexToKey(rval.toIntern());
                     return if ((lkey.err.name == rkey.err.name) == (op == .eq))
                         .bool_true
                     else
@@ -17300,7 +17299,7 @@ fn zirCmpEq(
     if (lhs_ty_tag == .Type and rhs_ty_tag == .Type) {
         const lhs_as_type = try sema.analyzeAsType(block, lhs_src, lhs);
         const rhs_as_type = try sema.analyzeAsType(block, rhs_src, rhs);
-        return if (lhs_as_type.eql(rhs_as_type, mod) == (op == .eq)) .bool_true else .bool_false;
+        return if (lhs_as_type.eql(rhs_as_type, zcu) == (op == .eq)) .bool_true else .bool_false;
     }
     return sema.analyzeCmp(block, src, lhs, rhs, op, lhs_src, rhs_src, true);
 }
@@ -17316,14 +17315,14 @@ fn analyzeCmpUnionTag(
     op: std.math.CompareOperator,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const union_ty = sema.typeOf(un);
     try union_ty.resolveFields(pt);
-    const union_tag_ty = union_ty.unionTagType(mod) orelse {
+    const union_tag_ty = union_ty.unionTagType(zcu) orelse {
         const msg = msg: {
             const msg = try sema.errMsg(un_src, "comparison of union and enum literal is only valid for tagged union types", .{});
             errdefer msg.destroy(sema.gpa);
-            try sema.errNote(union_ty.srcLoc(mod), msg, "union '{}' is not a tagged union", .{union_ty.fmt(pt)});
+            try sema.errNote(union_ty.srcLoc(zcu), msg, "union '{}' is not a tagged union", .{union_ty.fmt(pt)});
             break :msg msg;
         };
         return sema.failWithOwnedErrorMsg(block, msg);
@@ -17334,9 +17333,9 @@ fn analyzeCmpUnionTag(
     const coerced_union = try sema.coerce(block, union_tag_ty, un, un_src);
 
     if (try sema.resolveValue(coerced_tag)) |enum_val| {
-        if (enum_val.isUndef(mod)) return pt.undefRef(Type.bool);
-        const field_ty = union_ty.unionFieldType(enum_val, mod).?;
-        if (field_ty.zigTypeTag(mod) == .NoReturn) {
+        if (enum_val.isUndef(zcu)) return pt.undefRef(Type.bool);
+        const field_ty = union_ty.unionFieldType(enum_val, zcu).?;
+        if (field_ty.zigTypeTag(zcu) == .NoReturn) {
             return .bool_false;
         }
     }
@@ -17376,33 +17375,33 @@ fn analyzeCmp(
     is_equality_cmp: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const lhs_ty = sema.typeOf(lhs);
     const rhs_ty = sema.typeOf(rhs);
-    if (lhs_ty.zigTypeTag(mod) != .Optional and rhs_ty.zigTypeTag(mod) != .Optional) {
+    if (lhs_ty.zigTypeTag(zcu) != .Optional and rhs_ty.zigTypeTag(zcu) != .Optional) {
         try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
     }
 
-    if (lhs_ty.zigTypeTag(mod) == .Vector and rhs_ty.zigTypeTag(mod) == .Vector) {
+    if (lhs_ty.zigTypeTag(zcu) == .Vector and rhs_ty.zigTypeTag(zcu) == .Vector) {
         return sema.cmpVector(block, src, lhs, rhs, op, lhs_src, rhs_src);
     }
-    if (lhs_ty.isNumeric(mod) and rhs_ty.isNumeric(mod)) {
+    if (lhs_ty.isNumeric(zcu) and rhs_ty.isNumeric(zcu)) {
         // This operation allows any combination of integer and float types, regardless of the
         // signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for
         // numeric types.
         return sema.cmpNumeric(block, src, lhs, rhs, op, lhs_src, rhs_src);
     }
-    if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorUnion and rhs_ty.zigTypeTag(mod) == .ErrorSet) {
+    if (is_equality_cmp and lhs_ty.zigTypeTag(zcu) == .ErrorUnion and rhs_ty.zigTypeTag(zcu) == .ErrorSet) {
         const casted_lhs = try sema.analyzeErrUnionCode(block, lhs_src, lhs);
         return sema.cmpSelf(block, src, casted_lhs, rhs, op, lhs_src, rhs_src);
     }
-    if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorSet and rhs_ty.zigTypeTag(mod) == .ErrorUnion) {
+    if (is_equality_cmp and lhs_ty.zigTypeTag(zcu) == .ErrorSet and rhs_ty.zigTypeTag(zcu) == .ErrorUnion) {
         const casted_rhs = try sema.analyzeErrUnionCode(block, rhs_src, rhs);
         return sema.cmpSelf(block, src, lhs, casted_rhs, op, lhs_src, rhs_src);
     }
     const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
     const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } });
-    if (!resolved_type.isSelfComparable(mod, is_equality_cmp)) {
+    if (!resolved_type.isSelfComparable(zcu, is_equality_cmp)) {
         return sema.fail(block, src, "operator {s} not allowed for type '{}'", .{
             compareOperatorName(op), resolved_type.fmt(pt),
         });
@@ -17434,15 +17433,15 @@ fn cmpSelf(
     rhs_src: LazySrcLoc,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const resolved_type = sema.typeOf(casted_lhs);
     const runtime_src: LazySrcLoc = src: {
         if (try sema.resolveValue(casted_lhs)) |lhs_val| {
-            if (lhs_val.isUndef(mod)) return pt.undefRef(Type.bool);
+            if (lhs_val.isUndef(zcu)) return pt.undefRef(Type.bool);
             if (try sema.resolveValue(casted_rhs)) |rhs_val| {
-                if (rhs_val.isUndef(mod)) return pt.undefRef(Type.bool);
+                if (rhs_val.isUndef(zcu)) return pt.undefRef(Type.bool);
 
-                if (resolved_type.zigTypeTag(mod) == .Vector) {
+                if (resolved_type.zigTypeTag(zcu) == .Vector) {
                     const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_type);
                     return Air.internedToRef(cmp_val.toIntern());
                 }
@@ -17452,7 +17451,7 @@ fn cmpSelf(
                 else
                     .bool_false;
             } else {
-                if (resolved_type.zigTypeTag(mod) == .Bool) {
+                if (resolved_type.zigTypeTag(zcu) == .Bool) {
                     // We can lower bool eq/neq more efficiently.
                     return sema.runtimeBoolCmp(block, src, op, casted_rhs, lhs_val.toBool(), rhs_src);
                 }
@@ -17461,9 +17460,9 @@ fn cmpSelf(
         } else {
             // For bools, we still check the other operand, because we can lower
             // bool eq/neq more efficiently.
-            if (resolved_type.zigTypeTag(mod) == .Bool) {
+            if (resolved_type.zigTypeTag(zcu) == .Bool) {
                 if (try sema.resolveValue(casted_rhs)) |rhs_val| {
-                    if (rhs_val.isUndef(mod)) return pt.undefRef(Type.bool);
+                    if (rhs_val.isUndef(zcu)) return pt.undefRef(Type.bool);
                     return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(), lhs_src);
                 }
             }
@@ -17471,7 +17470,7 @@ fn cmpSelf(
         }
     };
     try sema.requireRuntimeBlock(block, src, runtime_src);
-    if (resolved_type.zigTypeTag(mod) == .Vector) {
+    if (resolved_type.zigTypeTag(zcu) == .Vector) {
         return block.addCmpVector(casted_lhs, casted_rhs, op);
     }
     const tag = Air.Inst.Tag.fromCmpOp(op, block.float_mode == .optimized);
@@ -17541,11 +17540,11 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
 
 fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
     const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand);
-    switch (operand_ty.zigTypeTag(mod)) {
+    switch (operand_ty.zigTypeTag(zcu)) {
         .Fn,
         .NoReturn,
         .Undefined,
@@ -17576,7 +17575,7 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
         .AnyFrame,
         => {},
     }
-    const bit_size = try operand_ty.bitSizeAdvanced(pt, .sema);
+    const bit_size = try operand_ty.bitSizeSema(pt);
     return pt.intRef(Type.comptime_int, bit_size);
 }
 
@@ -17599,9 +17598,9 @@ fn zirThis(
 
 fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    const captures = Type.fromInterned(mod.namespacePtr(block.namespace).owner_type).getCaptures(mod);
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    const captures = Type.fromInterned(zcu.namespacePtr(block.namespace).owner_type).getCaptures(zcu);
 
     const src_node: i32 = @bitCast(extended.operand);
     const src = block.nodeOffset(src_node);
@@ -17619,7 +17618,7 @@ fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
         const msg = msg: {
             const name = name: {
                 // TODO: we should probably store this name in the ZIR to avoid this complexity.
-                const file, const src_base_node = Module.LazySrcLoc.resolveBaseNode(block.src_base_inst, mod).?;
+                const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, zcu).?;
                 const tree = file.getTree(sema.gpa) catch |err| {
                     // In this case we emit a warning + a less precise source location.
                     log.warn("unable to load {s}: {s}", .{
@@ -17647,7 +17646,7 @@ fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
     if (!block.is_typeof and !block.is_comptime and sema.func_index != .none) {
         const msg = msg: {
             const name = name: {
-                const file, const src_base_node = Module.LazySrcLoc.resolveBaseNode(block.src_base_inst, mod).?;
+                const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, zcu).?;
                 const tree = file.getTree(sema.gpa) catch |err| {
                     // In this case we emit a warning + a less precise source location.
                     log.warn("unable to load {s}: {s}", .{
@@ -17816,20 +17815,20 @@ fn zirBuiltinSrc(
 
 fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const src = block.nodeOffset(inst_data.src_node);
     const ty = try sema.resolveType(block, src, inst_data.operand);
     const type_info_ty = try pt.getBuiltinType("Type");
-    const type_info_tag_ty = type_info_ty.unionTagType(mod).?;
+    const type_info_tag_ty = type_info_ty.unionTagType(zcu).?;
 
-    if (ty.typeDeclInst(mod)) |type_decl_inst| {
+    if (ty.typeDeclInst(zcu)) |type_decl_inst| {
         try sema.declareDependency(.{ .namespace = type_decl_inst });
     }
 
-    switch (ty.zigTypeTag(mod)) {
+    switch (ty.zigTypeTag(zcu)) {
         .Type,
         .Void,
         .Bool,
@@ -17848,7 +17847,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             const fn_info_nav = try sema.namespaceLookup(
                 block,
                 src,
-                type_info_ty.getNamespaceIndex(mod),
+                type_info_ty.getNamespaceIndex(zcu),
                 try ip.getOrPutString(gpa, pt.tid, "Fn", .no_embedded_nulls),
             ) orelse @panic("std.builtin.Type is corrupt");
             try sema.ensureNavResolved(src, fn_info_nav);
@@ -17857,13 +17856,13 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             const param_info_nav = try sema.namespaceLookup(
                 block,
                 src,
-                fn_info_ty.getNamespaceIndex(mod),
+                fn_info_ty.getNamespaceIndex(zcu),
                 try ip.getOrPutString(gpa, pt.tid, "Param", .no_embedded_nulls),
             ) orelse @panic("std.builtin.Type is corrupt");
             try sema.ensureNavResolved(src, param_info_nav);
             const param_info_ty = Type.fromInterned(ip.getNav(param_info_nav).status.resolved.val);
 
-            const func_ty_info = mod.typeToFunc(ty).?;
+            const func_ty_info = zcu.typeToFunc(ty).?;
             const param_vals = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len);
             for (param_vals, 0..) |*param_val, i| {
                 const param_ty = func_ty_info.param_types.get(ip)[i];
@@ -17908,7 +17907,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                         .is_const = true,
                     },
                 })).toIntern();
-                const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern();
+                const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(zcu).toIntern();
                 break :v try pt.intern(.{ .slice = .{
                     .ty = slice_ty,
                     .ptr = try pt.intern(.{ .ptr = .{
@@ -17958,14 +17957,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             const int_info_nav = try sema.namespaceLookup(
                 block,
                 src,
-                type_info_ty.getNamespaceIndex(mod),
+                type_info_ty.getNamespaceIndex(zcu),
                 try ip.getOrPutString(gpa, pt.tid, "Int", .no_embedded_nulls),
             ) orelse @panic("std.builtin.Type is corrupt");
             try sema.ensureNavResolved(src, int_info_nav);
             const int_info_ty = Type.fromInterned(ip.getNav(int_info_nav).status.resolved.val);
 
             const signedness_ty = try pt.getBuiltinType("Signedness");
-            const info = ty.intInfo(mod);
+            const info = ty.intInfo(zcu);
             const field_values = .{
                 // signedness: Signedness,
                 (try pt.enumValueFieldIndex(signedness_ty, @intFromEnum(info.signedness))).toIntern(),
@@ -17985,7 +17984,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             const float_info_nav = try sema.namespaceLookup(
                 block,
                 src,
-                type_info_ty.getNamespaceIndex(mod),
+                type_info_ty.getNamespaceIndex(zcu),
                 try ip.getOrPutString(gpa, pt.tid, "Float", .no_embedded_nulls),
             ) orelse @panic("std.builtin.Type is corrupt");
             try sema.ensureNavResolved(src, float_info_nav);
@@ -17993,7 +17992,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
 
             const field_vals = .{
                 // bits: u16,
-                (try pt.intValue(Type.u16, ty.bitSize(pt))).toIntern(),
+                (try pt.intValue(Type.u16, ty.bitSize(zcu))).toIntern(),
             };
             return Air.internedToRef((try pt.intern(.{ .un = .{
                 .ty = type_info_ty.toIntern(),
@@ -18005,7 +18004,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             } })));
         },
         .Pointer => {
-            const info = ty.ptrInfo(mod);
+            const info = ty.ptrInfo(zcu);
             const alignment = if (info.flags.alignment.toByteUnits()) |alignment|
                 try pt.intValue(Type.comptime_int, alignment)
             else
@@ -18016,7 +18015,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 const nav = try sema.namespaceLookup(
                     block,
                     src,
-                    (try pt.getBuiltinType("Type")).getNamespaceIndex(mod),
+                    (try pt.getBuiltinType("Type")).getNamespaceIndex(zcu),
                     try ip.getOrPutString(gpa, pt.tid, "Pointer", .no_embedded_nulls),
                 ) orelse @panic("std.builtin.Type is corrupt");
                 try sema.ensureNavResolved(src, nav);
@@ -18026,7 +18025,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 const nav = try sema.namespaceLookup(
                     block,
                     src,
-                    pointer_ty.getNamespaceIndex(mod),
+                    pointer_ty.getNamespaceIndex(zcu),
                     try ip.getOrPutString(gpa, pt.tid, "Size", .no_embedded_nulls),
                 ) orelse @panic("std.builtin.Type is corrupt");
                 try sema.ensureNavResolved(src, nav);
@@ -18068,14 +18067,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 const nav = try sema.namespaceLookup(
                     block,
                     src,
-                    type_info_ty.getNamespaceIndex(mod),
+                    type_info_ty.getNamespaceIndex(zcu),
                     try ip.getOrPutString(gpa, pt.tid, "Array", .no_embedded_nulls),
                 ) orelse @panic("std.builtin.Type is corrupt");
                 try sema.ensureNavResolved(src, nav);
                 break :t Type.fromInterned(ip.getNav(nav).status.resolved.val);
             };
 
-            const info = ty.arrayInfo(mod);
+            const info = ty.arrayInfo(zcu);
             const field_values = .{
                 // len: comptime_int,
                 (try pt.intValue(Type.comptime_int, info.len)).toIntern(),
@@ -18098,14 +18097,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 const nav = try sema.namespaceLookup(
                     block,
                     src,
-                    type_info_ty.getNamespaceIndex(mod),
+                    type_info_ty.getNamespaceIndex(zcu),
                     try ip.getOrPutString(gpa, pt.tid, "Vector", .no_embedded_nulls),
                 ) orelse @panic("std.builtin.Type is corrupt");
                 try sema.ensureNavResolved(src, nav);
                 break :t Type.fromInterned(ip.getNav(nav).status.resolved.val);
             };
 
-            const info = ty.arrayInfo(mod);
+            const info = ty.arrayInfo(zcu);
             const field_values = .{
                 // len: comptime_int,
                 (try pt.intValue(Type.comptime_int, info.len)).toIntern(),
@@ -18126,7 +18125,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 const nav = try sema.namespaceLookup(
                     block,
                     src,
-                    type_info_ty.getNamespaceIndex(mod),
+                    type_info_ty.getNamespaceIndex(zcu),
                     try ip.getOrPutString(gpa, pt.tid, "Optional", .no_embedded_nulls),
                 ) orelse @panic("std.builtin.Type is corrupt");
                 try sema.ensureNavResolved(src, nav);
@@ -18135,7 +18134,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
 
             const field_values = .{
                 // child: type,
-                ty.optionalChild(mod).toIntern(),
+                ty.optionalChild(zcu).toIntern(),
             };
             return Air.internedToRef((try pt.intern(.{ .un = .{
                 .ty = type_info_ty.toIntern(),
@@ -18152,7 +18151,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 const nav = try sema.namespaceLookup(
                     block,
                     src,
-                    type_info_ty.getNamespaceIndex(mod),
+                    type_info_ty.getNamespaceIndex(zcu),
                     try ip.getOrPutString(gpa, pt.tid, "Error", .no_embedded_nulls),
                 ) orelse @panic("std.builtin.Type is corrupt");
                 try sema.ensureNavResolved(src, nav);
@@ -18226,7 +18225,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                     .ty = array_errors_ty.toIntern(),
                     .storage = .{ .elems = vals },
                 } });
-                const manyptr_errors_ty = slice_errors_ty.slicePtrFieldType(mod).toIntern();
+                const manyptr_errors_ty = slice_errors_ty.slicePtrFieldType(zcu).toIntern();
                 break :v try pt.intern(.{ .slice = .{
                     .ty = slice_errors_ty.toIntern(),
                     .ptr = try pt.intern(.{ .ptr = .{
@@ -18257,7 +18256,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 const nav = try sema.namespaceLookup(
                     block,
                     src,
-                    type_info_ty.getNamespaceIndex(mod),
+                    type_info_ty.getNamespaceIndex(zcu),
                     try ip.getOrPutString(gpa, pt.tid, "ErrorUnion", .no_embedded_nulls),
                 ) orelse @panic("std.builtin.Type is corrupt");
                 try sema.ensureNavResolved(src, nav);
@@ -18266,9 +18265,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
 
             const field_values = .{
                 // error_set: type,
-                ty.errorUnionSet(mod).toIntern(),
+                ty.errorUnionSet(zcu).toIntern(),
                 // payload: type,
-                ty.errorUnionPayload(mod).toIntern(),
+                ty.errorUnionPayload(zcu).toIntern(),
             };
             return Air.internedToRef((try pt.intern(.{ .un = .{
                 .ty = type_info_ty.toIntern(),
@@ -18286,7 +18285,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 const nav = try sema.namespaceLookup(
                     block,
                     src,
-                    type_info_ty.getNamespaceIndex(mod),
+                    type_info_ty.getNamespaceIndex(zcu),
                     try ip.getOrPutString(gpa, pt.tid, "EnumField", .no_embedded_nulls),
                 ) orelse @panic("std.builtin.Type is corrupt");
                 try sema.ensureNavResolved(src, nav);
@@ -18298,7 +18297,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 const enum_type = ip.loadEnumType(ty.toIntern());
                 const value_val = if (enum_type.values.len > 0)
                     try ip.getCoercedInts(
-                        mod.gpa,
+                        zcu.gpa,
                         pt.tid,
                         ip.indexToKey(enum_type.values.get(ip)[tag_index]).int,
                         .comptime_int_type,
@@ -18361,7 +18360,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                         .is_const = true,
                     },
                 })).toIntern();
-                const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern();
+                const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(zcu).toIntern();
                 break :v try pt.intern(.{ .slice = .{
                     .ty = slice_ty,
                     .ptr = try pt.intern(.{ .ptr = .{
@@ -18382,7 +18381,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 const nav = try sema.namespaceLookup(
                     block,
                     src,
-                    type_info_ty.getNamespaceIndex(mod),
+                    type_info_ty.getNamespaceIndex(zcu),
                     try ip.getOrPutString(gpa, pt.tid, "Enum", .no_embedded_nulls),
                 ) orelse @panic("std.builtin.Type is corrupt");
                 try sema.ensureNavResolved(src, nav);
@@ -18413,7 +18412,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 const nav = try sema.namespaceLookup(
                     block,
                     src,
-                    type_info_ty.getNamespaceIndex(mod),
+                    type_info_ty.getNamespaceIndex(zcu),
                     try ip.getOrPutString(gpa, pt.tid, "Union", .no_embedded_nulls),
                 ) orelse @panic("std.builtin.Type is corrupt");
                 try sema.ensureNavResolved(src, nav);
@@ -18424,7 +18423,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 const nav = try sema.namespaceLookup(
                     block,
                     src,
-                    type_info_ty.getNamespaceIndex(mod),
+                    type_info_ty.getNamespaceIndex(zcu),
                     try ip.getOrPutString(gpa, pt.tid, "UnionField", .no_embedded_nulls),
                 ) orelse @panic("std.builtin.Type is corrupt");
                 try sema.ensureNavResolved(src, nav);
@@ -18432,7 +18431,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             };
 
             try ty.resolveLayout(pt); // Getting alignment requires type layout
-            const union_obj = mod.typeToUnion(ty).?;
+            const union_obj = zcu.typeToUnion(ty).?;
             const tag_type = union_obj.loadTagType(ip);
             const layout = union_obj.flagsUnordered(ip).layout;
 
@@ -18467,7 +18466,13 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 };
 
                 const alignment = switch (layout) {
-                    .auto, .@"extern" => try pt.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(field_index), .sema),
+                    .auto, .@"extern" => try Type.unionFieldNormalAlignmentAdvanced(
+                        union_obj,
+                        @intCast(field_index),
+                        .sema,
+                        pt.zcu,
+                        pt.tid,
+                    ),
                     .@"packed" => .none,
                 };
 
@@ -18502,7 +18507,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                         .is_const = true,
                     },
                 })).toIntern();
-                const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern();
+                const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(zcu).toIntern();
                 break :v try pt.intern(.{ .slice = .{
                     .ty = slice_ty,
                     .ptr = try pt.intern(.{ .ptr = .{
@@ -18517,18 +18522,18 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 } });
             };
 
-            const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespaceIndex(mod).toOptional());
+            const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespaceIndex(zcu).toOptional());
 
             const enum_tag_ty_val = try pt.intern(.{ .opt = .{
                 .ty = (try pt.optionalType(.type_type)).toIntern(),
-                .val = if (ty.unionTagType(mod)) |tag_ty| tag_ty.toIntern() else .none,
+                .val = if (ty.unionTagType(zcu)) |tag_ty| tag_ty.toIntern() else .none,
             } });
 
             const container_layout_ty = t: {
                 const nav = try sema.namespaceLookup(
                     block,
                     src,
-                    (try pt.getBuiltinType("Type")).getNamespaceIndex(mod),
+                    (try pt.getBuiltinType("Type")).getNamespaceIndex(zcu),
                     try ip.getOrPutString(gpa, pt.tid, "ContainerLayout", .no_embedded_nulls),
                 ) orelse @panic("std.builtin.Type is corrupt");
                 try sema.ensureNavResolved(src, nav);
@@ -18560,7 +18565,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 const nav = try sema.namespaceLookup(
                     block,
                     src,
-                    type_info_ty.getNamespaceIndex(mod),
+                    type_info_ty.getNamespaceIndex(zcu),
                     try ip.getOrPutString(gpa, pt.tid, "Struct", .no_embedded_nulls),
                 ) orelse @panic("std.builtin.Type is corrupt");
                 try sema.ensureNavResolved(src, nav);
@@ -18571,7 +18576,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 const nav = try sema.namespaceLookup(
                     block,
                     src,
-                    type_info_ty.getNamespaceIndex(mod),
+                    type_info_ty.getNamespaceIndex(zcu),
                     try ip.getOrPutString(gpa, pt.tid, "StructField", .no_embedded_nulls),
                 ) orelse @panic("std.builtin.Type is corrupt");
                 try sema.ensureNavResolved(src, nav);
@@ -18633,7 +18638,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                                 // is_comptime: bool,
                                 Value.makeBool(is_comptime).toIntern(),
                                 // alignment: comptime_int,
-                                (try pt.intValue(Type.comptime_int, Type.fromInterned(field_ty).abiAlignment(pt).toByteUnits() orelse 0)).toIntern(),
+                                (try pt.intValue(Type.comptime_int, Type.fromInterned(field_ty).abiAlignment(zcu).toByteUnits() orelse 0)).toIntern(),
                             };
                             struct_field_val.* = try pt.intern(.{ .aggregate = .{
                                 .ty = struct_field_ty.toIntern(),
@@ -18686,11 +18691,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                     const default_val_ptr = try sema.optRefValue(opt_default_val);
                     const alignment = switch (struct_type.layout) {
                         .@"packed" => .none,
-                        else => try pt.structFieldAlignmentAdvanced(
+                        else => try field_ty.structFieldAlignmentAdvanced(
                             struct_type.fieldAlign(ip, field_index),
-                            field_ty,
                             struct_type.layout,
                             .sema,
+                            pt.zcu,
+                            pt.tid,
                         ),
                     };
 
@@ -18729,7 +18735,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                         .is_const = true,
                     },
                 })).toIntern();
-                const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern();
+                const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(zcu).toIntern();
                 break :v try pt.intern(.{ .slice = .{
                     .ty = slice_ty,
                     .ptr = try pt.intern(.{ .ptr = .{
@@ -18744,12 +18750,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 } });
             };
 
-            const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespace(mod));
+            const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespace(zcu));
 
             const backing_integer_val = try pt.intern(.{ .opt = .{
                 .ty = (try pt.optionalType(.type_type)).toIntern(),
-                .val = if (mod.typeToPackedStruct(ty)) |packed_struct| val: {
-                    assert(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)).isInt(mod));
+                .val = if (zcu.typeToPackedStruct(ty)) |packed_struct| val: {
+                    assert(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)).isInt(zcu));
                     break :val packed_struct.backingIntTypeUnordered(ip);
                 } else .none,
             } });
@@ -18758,14 +18764,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 const nav = try sema.namespaceLookup(
                     block,
                     src,
-                    (try pt.getBuiltinType("Type")).getNamespaceIndex(mod),
+                    (try pt.getBuiltinType("Type")).getNamespaceIndex(zcu),
                     try ip.getOrPutString(gpa, pt.tid, "ContainerLayout", .no_embedded_nulls),
                 ) orelse @panic("std.builtin.Type is corrupt");
                 try sema.ensureNavResolved(src, nav);
                 break :t Type.fromInterned(ip.getNav(nav).status.resolved.val);
             };
 
-            const layout = ty.containerLayout(mod);
+            const layout = ty.containerLayout(zcu);
 
             const field_values = [_]InternPool.Index{
                 // layout: ContainerLayout,
@@ -18777,7 +18783,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 // decls: []const Declaration,
                 decls_val,
                 // is_tuple: bool,
-                Value.makeBool(ty.isTuple(mod)).toIntern(),
+                Value.makeBool(ty.isTuple(zcu)).toIntern(),
             };
             return Air.internedToRef((try pt.intern(.{ .un = .{
                 .ty = type_info_ty.toIntern(),
@@ -18793,7 +18799,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 const nav = try sema.namespaceLookup(
                     block,
                     src,
-                    type_info_ty.getNamespaceIndex(mod),
+                    type_info_ty.getNamespaceIndex(zcu),
                     try ip.getOrPutString(gpa, pt.tid, "Opaque", .no_embedded_nulls),
                 ) orelse @panic("std.builtin.Type is corrupt");
                 try sema.ensureNavResolved(src, nav);
@@ -18801,7 +18807,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             };
 
             try ty.resolveFields(pt);
-            const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespace(mod));
+            const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespace(zcu));
 
             const field_values = .{
                 // decls: []const Declaration,
@@ -19000,11 +19006,11 @@ fn zirTypeofLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
 
 fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) CompileError!Type {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    switch (operand.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (operand.zigTypeTag(zcu)) {
         .ComptimeInt => return Type.comptime_int,
         .Int => {
-            const bits = operand.bitSize(pt);
+            const bits = operand.bitSize(zcu);
             const count = if (bits == 0)
                 0
             else blk: {
@@ -19018,10 +19024,10 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi
             return pt.intType(.unsigned, count);
         },
         .Vector => {
-            const elem_ty = operand.elemType2(mod);
+            const elem_ty = operand.elemType2(zcu);
             const log2_elem_ty = try sema.log2IntType(block, elem_ty, src);
             return pt.vectorType(.{
-                .len = operand.vectorLen(mod),
+                .len = operand.vectorLen(zcu),
                 .child = log2_elem_ty.toIntern(),
             });
         },
@@ -19084,7 +19090,7 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const src = block.nodeOffset(inst_data.src_node);
     const operand_src = block.src(.{ .node_offset_un_op = inst_data.src_node });
@@ -19092,7 +19098,7 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
 
     const operand = try sema.coerce(block, Type.bool, uncasted_operand, operand_src);
     if (try sema.resolveValue(operand)) |val| {
-        return if (val.isUndef(mod))
+        return if (val.isUndef(zcu))
             pt.undefRef(Type.bool)
         else if (val.toBool()) .bool_false else .bool_true;
     }
@@ -19110,7 +19116,7 @@ fn zirBoolBr(
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
 
     const datas = sema.code.instructions.items(.data);
@@ -19134,7 +19140,7 @@ fn zirBoolBr(
         // is simply the rhs expression. Here we rely on there only being 1
         // break instruction (`break_inline`).
         const rhs_result = try sema.resolveInlineBody(parent_block, body, inst);
-        if (sema.typeOf(rhs_result).isNoReturn(mod)) {
+        if (sema.typeOf(rhs_result).isNoReturn(zcu)) {
             return rhs_result;
         }
         return sema.coerce(parent_block, Type.bool, rhs_result, rhs_src);
@@ -19168,7 +19174,7 @@ fn zirBoolBr(
     _ = try lhs_block.addBr(block_inst, lhs_result);
 
     const rhs_result = try sema.resolveInlineBody(rhs_block, body, inst);
-    const rhs_noret = sema.typeOf(rhs_result).isNoReturn(mod);
+    const rhs_noret = sema.typeOf(rhs_result).isNoReturn(zcu);
     const coerced_rhs_result = if (!rhs_noret) rhs: {
         const coerced_result = try sema.coerce(rhs_block, Type.bool, rhs_result, rhs_src);
         _ = try rhs_block.addBr(block_inst, coerced_result);
@@ -19227,10 +19233,10 @@ fn finishCondBr(
 
 fn checkNullableType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (ty.zigTypeTag(zcu)) {
         .Optional, .Null, .Undefined => return,
-        .Pointer => if (ty.isPtrLikeOptional(mod)) return,
+        .Pointer => if (ty.isPtrLikeOptional(zcu)) return,
         else => {},
     }
     return sema.failWithExpectedOptionalType(block, src, ty);
@@ -19260,11 +19266,11 @@ fn zirIsNonNullPtr(
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const src = block.nodeOffset(inst_data.src_node);
     const ptr = try sema.resolveInst(inst_data.operand);
-    try sema.checkNullableType(block, src, sema.typeOf(ptr).elemType2(mod));
+    try sema.checkNullableType(block, src, sema.typeOf(ptr).elemType2(zcu));
     if ((try sema.resolveValue(ptr)) == null) {
         return block.addUnOp(.is_non_null_ptr, ptr);
     }
@@ -19274,8 +19280,8 @@ fn zirIsNonNullPtr(
 
 fn checkErrorType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (ty.zigTypeTag(zcu)) {
         .ErrorSet, .ErrorUnion, .Undefined => return,
         else => return sema.fail(block, src, "expected error union type, found '{}'", .{
             ty.fmt(pt),
@@ -19299,11 +19305,11 @@ fn zirIsNonErrPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const src = block.nodeOffset(inst_data.src_node);
     const ptr = try sema.resolveInst(inst_data.operand);
-    try sema.checkErrorType(block, src, sema.typeOf(ptr).elemType2(mod));
+    try sema.checkErrorType(block, src, sema.typeOf(ptr).elemType2(zcu));
     const loaded = try sema.analyzeLoad(block, src, ptr, src);
     return sema.analyzeIsNonErr(block, src, loaded);
 }
@@ -19327,7 +19333,7 @@ fn zirCondbr(
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const cond_src = parent_block.src(.{ .node_offset_if_cond = inst_data.src_node });
     const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index);
@@ -19368,8 +19374,8 @@ fn zirCondbr(
         const err_inst_data = sema.code.instructions.items(.data)[@intFromEnum(index)].un_node;
         const err_operand = try sema.resolveInst(err_inst_data.operand);
         const operand_ty = sema.typeOf(err_operand);
-        assert(operand_ty.zigTypeTag(mod) == .ErrorUnion);
-        const result_ty = operand_ty.errorUnionSet(mod);
+        assert(operand_ty.zigTypeTag(zcu) == .ErrorUnion);
+        const result_ty = operand_ty.errorUnionSet(zcu);
         break :blk try sub_block.addTyOp(.unwrap_errunion_err, result_ty, err_operand);
     };
 
@@ -19403,8 +19409,8 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!
     const err_union = try sema.resolveInst(extra.data.operand);
     const err_union_ty = sema.typeOf(err_union);
     const pt = sema.pt;
-    const mod = pt.zcu;
-    if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
+    const zcu = pt.zcu;
+    if (err_union_ty.zigTypeTag(zcu) != .ErrorUnion) {
         return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{
             err_union_ty.fmt(pt),
         });
@@ -19452,8 +19458,8 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr
     const err_union = try sema.analyzeLoad(parent_block, src, operand, operand_src);
     const err_union_ty = sema.typeOf(err_union);
     const pt = sema.pt;
-    const mod = pt.zcu;
-    if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
+    const zcu = pt.zcu;
+    if (err_union_ty.zigTypeTag(zcu) != .ErrorUnion) {
         return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{
             err_union_ty.fmt(pt),
         });
@@ -19477,9 +19483,9 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr
     try sema.analyzeBodyInner(&sub_block, body);
 
     const operand_ty = sema.typeOf(operand);
-    const ptr_info = operand_ty.ptrInfo(mod);
+    const ptr_info = operand_ty.ptrInfo(zcu);
     const res_ty = try pt.ptrTypeSema(.{
-        .child = err_union_ty.errorUnionPayload(mod).toIntern(),
+        .child = err_union_ty.errorUnionPayload(zcu).toIntern(),
         .flags = .{
             .is_const = ptr_info.flags.is_const,
             .is_volatile = ptr_info.flags.is_volatile,
@@ -19594,10 +19600,10 @@ fn zirRetErrValue(
     inst: Zir.Inst.Index,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
     const src = block.tokenOffset(inst_data.src_tok);
-    const err_name = try mod.intern_pool.getOrPutString(
+    const err_name = try zcu.intern_pool.getOrPutString(
         sema.gpa,
         pt.tid,
         inst_data.get(sema.code),
@@ -19622,7 +19628,7 @@ fn zirRetImplicit(
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_tok;
     const r_brace_src = block.tokenOffset(inst_data.src_tok);
     if (block.inlining == null and sema.func_is_naked) {
@@ -19638,7 +19644,7 @@ fn zirRetImplicit(
 
     const operand = try sema.resolveInst(inst_data.operand);
     const ret_ty_src = block.src(.{ .node_offset_fn_type_ret_ty = 0 });
-    const base_tag = sema.fn_ret_ty.baseZigTypeTag(mod);
+    const base_tag = sema.fn_ret_ty.baseZigTypeTag(zcu);
     if (base_tag == .NoReturn) {
         const msg = msg: {
             const msg = try sema.errMsg(ret_ty_src, "function declared '{}' implicitly returns", .{
@@ -19755,13 +19761,13 @@ fn retWithErrTracing(
 
 fn wantErrorReturnTracing(sema: *Sema, fn_ret_ty: Type) bool {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    return fn_ret_ty.isError(mod) and mod.comp.config.any_error_tracing;
+    const zcu = pt.zcu;
+    return fn_ret_ty.isError(zcu) and zcu.comp.config.any_error_tracing;
 }
 
 fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].save_err_ret_index;
 
     if (!block.ownerModule().error_tracing) return;
@@ -19772,7 +19778,7 @@ fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
     const save_index = inst_data.operand == .none or b: {
         const operand = try sema.resolveInst(inst_data.operand);
         const operand_ty = sema.typeOf(operand);
-        break :b operand_ty.isError(mod);
+        break :b operand_ty.isError(zcu);
     };
 
     if (save_index)
@@ -19792,7 +19798,7 @@ fn restoreErrRetIndex(sema: *Sema, start_block: *Block, src: LazySrcLoc, target_
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
     const saved_index = if (target_block.toIndexAllowNone()) |zir_block| b: {
         var block = start_block;
@@ -19830,13 +19836,13 @@ fn restoreErrRetIndex(sema: *Sema, start_block: *Block, src: LazySrcLoc, target_
         if (is_non_error) return;
 
         const saved_index_val = try sema.resolveDefinedValue(start_block, src, saved_index);
-        const saved_index_int = saved_index_val.?.toUnsignedInt(pt);
+        const saved_index_int = saved_index_val.?.toUnsignedInt(zcu);
         assert(saved_index_int <= sema.comptime_err_ret_trace.items.len);
         sema.comptime_err_ret_trace.items.len = @intCast(saved_index_int);
         return;
     }
 
-    if (!mod.intern_pool.funcAnalysisUnordered(sema.owner.unwrap().func).calls_or_awaits_errorable_fn) return;
+    if (!zcu.intern_pool.funcAnalysisUnordered(sema.owner.unwrap().func).calls_or_awaits_errorable_fn) return;
     if (!start_block.ownerModule().error_tracing) return;
 
     assert(saved_index != .none); // The .error_return_trace_index field was dropped somewhere
@@ -19846,10 +19852,10 @@ fn restoreErrRetIndex(sema: *Sema, start_block: *Block, src: LazySrcLoc, target_
 
 fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    assert(sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion);
-    const err_set_ty = sema.fn_ret_ty.errorUnionSet(mod).toIntern();
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    assert(sema.fn_ret_ty.zigTypeTag(zcu) == .ErrorUnion);
+    const err_set_ty = sema.fn_ret_ty.errorUnionSet(zcu).toIntern();
     switch (err_set_ty) {
         .adhoc_inferred_error_set_type => {
             const ies = sema.fn_ret_ty_ies.?;
@@ -19867,11 +19873,11 @@ fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void {
 fn addToInferredErrorSetPtr(sema: *Sema, ies: *InferredErrorSet, op_ty: Type) !void {
     const arena = sema.arena;
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    switch (op_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    switch (op_ty.zigTypeTag(zcu)) {
         .ErrorSet => try ies.addErrorSet(op_ty, ip, arena),
-        .ErrorUnion => try ies.addErrorSet(op_ty.errorUnionSet(mod), ip, arena),
+        .ErrorUnion => try ies.addErrorSet(op_ty.errorUnionSet(zcu), ip, arena),
         else => {},
     }
 }
@@ -19887,8 +19893,8 @@ fn analyzeRet(
     // add the error tag to the inferred error set of the in-scope function, so
     // that the coercion below works correctly.
     const pt = sema.pt;
-    const mod = pt.zcu;
-    if (sema.fn_ret_ty_ies != null and sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) {
+    const zcu = pt.zcu;
+    if (sema.fn_ret_ty_ies != null and sema.fn_ret_ty.zigTypeTag(zcu) == .ErrorUnion) {
         try sema.addToInferredErrorSet(uncasted_operand);
     }
     const operand = sema.coerceExtra(block, sema.fn_ret_ty, uncasted_operand, operand_src, .{ .is_ret = true }) catch |err| switch (err) {
@@ -19903,7 +19909,7 @@ fn analyzeRet(
             });
             inlining.comptime_result = operand;
 
-            if (sema.fn_ret_ty.isError(mod) and ret_val.getErrorName(mod) != .none) {
+            if (sema.fn_ret_ty.isError(zcu) and ret_val.getErrorName(zcu) != .none) {
                 try sema.comptime_err_ret_trace.append(src);
             }
             return error.ComptimeReturn;
@@ -19955,7 +19961,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].ptr_type;
     const extra = sema.code.extraData(Zir.Inst.PtrType, inst_data.payload_index);
     const elem_ty_src = block.src(.{ .node_offset_ptr_elem = extra.data.src_node });
@@ -19968,7 +19974,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
     const elem_ty = blk: {
         const air_inst = try sema.resolveInst(extra.data.elem_type);
         const ty = sema.analyzeAsType(block, elem_ty_src, air_inst) catch |err| {
-            if (err == error.AnalysisFail and sema.err != null and sema.typeOf(air_inst).isSinglePointer(mod)) {
+            if (err == error.AnalysisFail and sema.err != null and sema.typeOf(air_inst).isSinglePointer(zcu)) {
                 try sema.errNote(elem_ty_src, sema.err.?, "use '.*' to dereference pointer", .{});
             }
             return err;
@@ -19977,10 +19983,10 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
         break :blk ty;
     };
 
-    if (elem_ty.zigTypeTag(mod) == .NoReturn)
+    if (elem_ty.zigTypeTag(zcu) == .NoReturn)
         return sema.fail(block, elem_ty_src, "pointer to noreturn not allowed", .{});
 
-    const target = mod.getTarget();
+    const target = zcu.getTarget();
 
     var extra_i = extra.end;
 
@@ -20003,14 +20009,14 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
         });
         // Check if this happens to be the lazy alignment of our element type, in
         // which case we can make this 0 without resolving it.
-        switch (mod.intern_pool.indexToKey(val.toIntern())) {
+        switch (zcu.intern_pool.indexToKey(val.toIntern())) {
             .int => |int| switch (int.storage) {
                 .lazy_align => |lazy_ty| if (lazy_ty == elem_ty.toIntern()) break :blk .none,
                 else => {},
             },
             else => {},
         }
-        const align_bytes = (try val.getUnsignedIntAdvanced(pt, .sema)).?;
+        const align_bytes = (try val.getUnsignedIntSema(pt)).?;
         break :blk try sema.validateAlignAllowZero(block, align_src, align_bytes);
     } else .none;
 
@@ -20018,7 +20024,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
         const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]);
         extra_i += 1;
         break :blk try sema.resolveAddressSpace(block, addrspace_src, ref, .pointer);
-    } else if (elem_ty.zigTypeTag(mod) == .Fn and target.cpu.arch == .avr) .flash else .generic;
+    } else if (elem_ty.zigTypeTag(zcu) == .Fn and target.cpu.arch == .avr) .flash else .generic;
 
     const bit_offset: u16 = if (inst_data.flags.has_bit_range) blk: {
         const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]);
@@ -20044,7 +20050,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
                 elem_ty.fmt(pt), bit_offset, bit_offset - host_size * 8, host_size,
             });
         }
-        const elem_bit_size = try elem_ty.bitSizeAdvanced(pt, .sema);
+        const elem_bit_size = try elem_ty.bitSizeSema(pt);
         if (elem_bit_size > host_size * 8 - bit_offset) {
             return sema.fail(block, bitoffset_src, "packed type '{}' at bit offset {} ends {} bits after the end of a {} byte host integer", .{
                 elem_ty.fmt(pt), bit_offset, elem_bit_size - (host_size * 8 - bit_offset), host_size,
@@ -20052,11 +20058,11 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
         }
     }
 
-    if (elem_ty.zigTypeTag(mod) == .Fn) {
+    if (elem_ty.zigTypeTag(zcu) == .Fn) {
         if (inst_data.size != .One) {
             return sema.fail(block, elem_ty_src, "function pointers must be single pointers", .{});
         }
-    } else if (inst_data.size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) {
+    } else if (inst_data.size == .Many and elem_ty.zigTypeTag(zcu) == .Opaque) {
         return sema.fail(block, elem_ty_src, "unknown-length pointer to opaque not allowed", .{});
     } else if (inst_data.size == .C) {
         if (!try sema.validateExternType(elem_ty, .other)) {
@@ -20071,7 +20077,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
             };
             return sema.failWithOwnedErrorMsg(block, msg);
         }
-        if (elem_ty.zigTypeTag(mod) == .Opaque) {
+        if (elem_ty.zigTypeTag(zcu) == .Opaque) {
             return sema.fail(block, elem_ty_src, "C pointers cannot point to opaque types", .{});
         }
     }
@@ -20113,9 +20119,9 @@ fn zirStructInitEmpty(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
     const ty_src = block.src(.{ .node_offset_init_ty = inst_data.src_node });
     const obj_ty = try sema.resolveType(block, ty_src, inst_data.operand);
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
-    switch (obj_ty.zigTypeTag(mod)) {
+    switch (obj_ty.zigTypeTag(zcu)) {
         .Struct => return sema.structInitEmpty(block, obj_ty, src, src),
         .Array, .Vector => return sema.arrayInitEmpty(block, src, obj_ty),
         .Void => return Air.internedToRef(Value.void.toIntern()),
@@ -20129,7 +20135,7 @@ fn zirStructInitEmptyResult(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const src = block.nodeOffset(inst_data.src_node);
     const ty_operand = sema.resolveType(block, src, inst_data.operand) catch |err| switch (err) {
@@ -20138,21 +20144,21 @@ fn zirStructInitEmptyResult(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is
         else => |e| return e,
     };
     const init_ty = if (is_byref) ty: {
-        const ptr_ty = ty_operand.optEuBaseType(mod);
-        assert(ptr_ty.zigTypeTag(mod) == .Pointer); // validated by a previous instruction
-        if (!ptr_ty.isSlice(mod)) {
-            break :ty ptr_ty.childType(mod);
+        const ptr_ty = ty_operand.optEuBaseType(zcu);
+        assert(ptr_ty.zigTypeTag(zcu) == .Pointer); // validated by a previous instruction
+        if (!ptr_ty.isSlice(zcu)) {
+            break :ty ptr_ty.childType(zcu);
         }
         // To make `&.{}` a `[:s]T`, the init should be a `[0:s]T`.
         break :ty try pt.arrayType(.{
             .len = 0,
-            .sentinel = if (ptr_ty.sentinel(mod)) |s| s.toIntern() else .none,
-            .child = ptr_ty.childType(mod).toIntern(),
+            .sentinel = if (ptr_ty.sentinel(zcu)) |s| s.toIntern() else .none,
+            .child = ptr_ty.childType(zcu).toIntern(),
         });
     } else ty_operand;
-    const obj_ty = init_ty.optEuBaseType(mod);
+    const obj_ty = init_ty.optEuBaseType(zcu);
 
-    const empty_ref = switch (obj_ty.zigTypeTag(mod)) {
+    const empty_ref = switch (obj_ty.zigTypeTag(zcu)) {
         .Struct => try sema.structInitEmpty(block, obj_ty, src, src),
         .Array, .Vector => try sema.arrayInitEmpty(block, src, obj_ty),
         .Union => return sema.fail(block, src, "union initializer must initialize one field", .{}),
@@ -20176,13 +20182,13 @@ fn structInitEmpty(
     init_src: LazySrcLoc,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
     // This logic must be synchronized with that in `zirStructInit`.
     try struct_ty.resolveFields(pt);
 
     // The init values to use for the struct instance.
-    const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount(mod));
+    const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount(zcu));
     defer gpa.free(field_inits);
     @memset(field_inits, .none);
 
@@ -20191,10 +20197,10 @@ fn structInitEmpty(
 
 fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const arr_len = obj_ty.arrayLen(mod);
+    const zcu = pt.zcu;
+    const arr_len = obj_ty.arrayLen(zcu);
     if (arr_len != 0) {
-        if (obj_ty.zigTypeTag(mod) == .Array) {
+        if (obj_ty.zigTypeTag(zcu) == .Array) {
             return sema.fail(block, src, "expected {d} array elements; found 0", .{arr_len});
         } else {
             return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len});
@@ -20235,14 +20241,14 @@ fn unionInit(
     field_src: LazySrcLoc,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_src);
-    const field_ty = Type.fromInterned(mod.typeToUnion(union_ty).?.field_types.get(ip)[field_index]);
+    const field_ty = Type.fromInterned(zcu.typeToUnion(union_ty).?.field_types.get(ip)[field_index]);
     const init = try sema.coerce(block, field_ty, uncasted_init, init_src);
 
     if (try sema.resolveValue(init)) |init_val| {
-        const tag_ty = union_ty.unionTagTypeHypothetical(mod);
+        const tag_ty = union_ty.unionTagTypeHypothetical(zcu);
         const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
         return Air.internedToRef((try pt.intern(.{ .un = .{
             .ty = union_ty.toIntern(),
@@ -20269,8 +20275,8 @@ fn zirStructInit(
     const src = block.nodeOffset(inst_data.src_node);
 
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const first_item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end).data;
     const first_field_type_data = zir_datas[@intFromEnum(first_item.field_type)].pl_node;
     const first_field_type_extra = sema.code.extraData(Zir.Inst.FieldType, first_field_type_data.payload_index).data;
@@ -20281,26 +20287,26 @@ fn zirStructInit(
         },
         else => |e| return e,
     };
-    const resolved_ty = result_ty.optEuBaseType(mod);
+    const resolved_ty = result_ty.optEuBaseType(zcu);
     try resolved_ty.resolveLayout(pt);
 
-    if (resolved_ty.zigTypeTag(mod) == .Struct) {
+    if (resolved_ty.zigTypeTag(zcu) == .Struct) {
         // This logic must be synchronized with that in `zirStructInitEmpty`.
 
         // Maps field index to field_type index of where it was already initialized.
         // For making sure all fields are accounted for and no fields are duplicated.
-        const found_fields = try gpa.alloc(Zir.Inst.Index, resolved_ty.structFieldCount(mod));
+        const found_fields = try gpa.alloc(Zir.Inst.Index, resolved_ty.structFieldCount(zcu));
         defer gpa.free(found_fields);
 
         // The init values to use for the struct instance.
-        const field_inits = try gpa.alloc(Air.Inst.Ref, resolved_ty.structFieldCount(mod));
+        const field_inits = try gpa.alloc(Air.Inst.Ref, resolved_ty.structFieldCount(zcu));
         defer gpa.free(field_inits);
         @memset(field_inits, .none);
 
         var field_i: u32 = 0;
         var extra_index = extra.end;
 
-        const is_packed = resolved_ty.containerLayout(mod) == .@"packed";
+        const is_packed = resolved_ty.containerLayout(zcu) == .@"packed";
         while (field_i < extra.data.fields_len) : (field_i += 1) {
             const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra_index);
             extra_index = item.end;
@@ -20314,14 +20320,14 @@ fn zirStructInit(
                 sema.code.nullTerminatedString(field_type_extra.name_start),
                 .no_embedded_nulls,
             );
-            const field_index = if (resolved_ty.isTuple(mod))
+            const field_index = if (resolved_ty.isTuple(zcu))
                 try sema.tupleFieldIndex(block, resolved_ty, field_name, field_src)
             else
                 try sema.structFieldIndex(block, resolved_ty, field_name, field_src);
             assert(field_inits[field_index] == .none);
             found_fields[field_index] = item.data.field_type;
             const uncoerced_init = try sema.resolveInst(item.data.init);
-            const field_ty = resolved_ty.structFieldType(field_index, mod);
+            const field_ty = resolved_ty.structFieldType(field_index, zcu);
             field_inits[field_index] = try sema.coerce(block, field_ty, uncoerced_init, field_src);
             if (!is_packed) {
                 try resolved_ty.resolveStructFieldInits(pt);
@@ -20332,7 +20338,7 @@ fn zirStructInit(
                         });
                     };
 
-                    if (!init_val.eql(default_value, resolved_ty.structFieldType(field_index, mod), mod)) {
+                    if (!init_val.eql(default_value, resolved_ty.structFieldType(field_index, zcu), zcu)) {
                         return sema.failWithInvalidComptimeFieldStore(block, field_src, resolved_ty, field_index);
                     }
                 }
@@ -20340,7 +20346,7 @@ fn zirStructInit(
         }
 
         return sema.finishStructInit(block, src, src, field_inits, resolved_ty, result_ty, is_ref);
-    } else if (resolved_ty.zigTypeTag(mod) == .Union) {
+    } else if (resolved_ty.zigTypeTag(zcu) == .Union) {
         if (extra.data.fields_len != 1) {
             return sema.fail(block, src, "union initialization expects exactly one field", .{});
         }
@@ -20357,11 +20363,11 @@ fn zirStructInit(
             .no_embedded_nulls,
         );
         const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src);
-        const tag_ty = resolved_ty.unionTagTypeHypothetical(mod);
+        const tag_ty = resolved_ty.unionTagTypeHypothetical(zcu);
         const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
-        const field_ty = Type.fromInterned(mod.typeToUnion(resolved_ty).?.field_types.get(ip)[field_index]);
+        const field_ty = Type.fromInterned(zcu.typeToUnion(resolved_ty).?.field_types.get(ip)[field_index]);
 
-        if (field_ty.zigTypeTag(mod) == .NoReturn) {
+        if (field_ty.zigTypeTag(zcu) == .NoReturn) {
             return sema.failWithOwnedErrorMsg(block, msg: {
                 const msg = try sema.errMsg(src, "cannot initialize 'noreturn' field of union", .{});
                 errdefer msg.destroy(sema.gpa);
@@ -20388,7 +20394,7 @@ fn zirStructInit(
             return sema.addConstantMaybeRef(final_val.toIntern(), is_ref);
         }
 
-        if (try sema.typeRequiresComptime(resolved_ty)) {
+        if (try resolved_ty.comptimeOnlySema(pt)) {
             return sema.failWithNeededComptime(block, field_src, .{
                 .needed_comptime_reason = "initializer of comptime only union must be comptime-known",
             });
@@ -20397,7 +20403,7 @@ fn zirStructInit(
         try sema.validateRuntimeValue(block, field_src, init_inst);
 
         if (is_ref) {
-            const target = mod.getTarget();
+            const target = zcu.getTarget();
             const alloc_ty = try pt.ptrTypeSema(.{
                 .child = result_ty.toIntern(),
                 .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
@@ -20429,10 +20435,10 @@ fn finishStructInit(
     is_ref: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
 
-    var root_msg: ?*Module.ErrorMsg = null;
+    var root_msg: ?*Zcu.ErrorMsg = null;
     errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
 
     switch (ip.indexToKey(struct_ty.toIntern())) {
@@ -20545,7 +20551,7 @@ fn finishStructInit(
         return sema.addConstantMaybeRef(final_val.toIntern(), is_ref);
     };
 
-    if (try sema.typeRequiresComptime(struct_ty)) {
+    if (try struct_ty.comptimeOnlySema(pt)) {
         return sema.failWithNeededComptime(block, block.src(.{ .init_elem = .{
             .init_node_offset = init_src.offset.node_offset.x,
             .elem_index = @intCast(runtime_index),
@@ -20560,7 +20566,7 @@ fn finishStructInit(
 
     if (is_ref) {
         try struct_ty.resolveLayout(pt);
-        const target = mod.getTarget();
+        const target = zcu.getTarget();
         const alloc_ty = try pt.ptrTypeSema(.{
             .child = result_ty.toIntern(),
             .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
@@ -20612,9 +20618,9 @@ fn structInitAnon(
     is_ref: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
     const zir_datas = sema.code.instructions.items(.data);
 
     const types = try sema.arena.alloc(InternPool.Index, extra_data.fields_len);
@@ -20642,11 +20648,11 @@ fn structInitAnon(
                 },
             };
 
-            field_name.* = try mod.intern_pool.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls);
+            field_name.* = try zcu.intern_pool.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls);
 
             const init = try sema.resolveInst(item.data.init);
             field_ty.* = sema.typeOf(init).toIntern();
-            if (Type.fromInterned(field_ty.*).zigTypeTag(mod) == .Opaque) {
+            if (Type.fromInterned(field_ty.*).zigTypeTag(zcu) == .Opaque) {
                 const msg = msg: {
                     const field_src = block.src(.{ .init_elem = .{
                         .init_node_offset = src.offset.node_offset.x,
@@ -20690,7 +20696,7 @@ fn structInitAnon(
     } }));
 
     if (is_ref) {
-        const target = mod.getTarget();
+        const target = zcu.getTarget();
         const alloc_ty = try pt.ptrTypeSema(.{
             .child = tuple_ty,
             .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
@@ -20740,7 +20746,7 @@ fn zirArrayInit(
     is_ref: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.nodeOffset(inst_data.src_node);
@@ -20756,14 +20762,14 @@ fn zirArrayInit(
         },
         else => |e| return e,
     };
-    const array_ty = result_ty.optEuBaseType(mod);
-    const is_tuple = array_ty.zigTypeTag(mod) == .Struct;
-    const sentinel_val = array_ty.sentinel(mod);
+    const array_ty = result_ty.optEuBaseType(zcu);
+    const is_tuple = array_ty.zigTypeTag(zcu) == .Struct;
+    const sentinel_val = array_ty.sentinel(zcu);
 
-    var root_msg: ?*Module.ErrorMsg = null;
+    var root_msg: ?*Zcu.ErrorMsg = null;
     errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
 
-    const final_len = try sema.usizeCast(block, src, array_ty.arrayLenIncludingSentinel(mod));
+    const final_len = try sema.usizeCast(block, src, array_ty.arrayLenIncludingSentinel(zcu));
     const resolved_args = try gpa.alloc(Air.Inst.Ref, final_len);
     defer gpa.free(resolved_args);
     for (resolved_args, 0..) |*dest, i| {
@@ -20773,7 +20779,7 @@ fn zirArrayInit(
         } });
         // Less inits than needed.
         if (i + 2 > args.len) if (is_tuple) {
-            const default_val = array_ty.structFieldDefaultValue(i, mod).toIntern();
+            const default_val = array_ty.structFieldDefaultValue(i, zcu).toIntern();
             if (default_val == .unreachable_value) {
                 const template = "missing tuple field with index {d}";
                 if (root_msg) |msg| {
@@ -20793,12 +20799,12 @@ fn zirArrayInit(
         const arg = args[i + 1];
         const resolved_arg = try sema.resolveInst(arg);
         const elem_ty = if (is_tuple)
-            array_ty.structFieldType(i, mod)
+            array_ty.structFieldType(i, zcu)
         else
-            array_ty.elemType2(mod);
+            array_ty.elemType2(zcu);
         dest.* = try sema.coerce(block, elem_ty, resolved_arg, elem_src);
         if (is_tuple) {
-            if (array_ty.structFieldIsComptime(i, mod))
+            if (array_ty.structFieldIsComptime(i, zcu))
                 try array_ty.resolveStructFieldInits(pt);
             if (try array_ty.structFieldValueComptime(pt, i)) |field_val| {
                 const init_val = try sema.resolveValue(dest.*) orelse {
@@ -20806,7 +20812,7 @@ fn zirArrayInit(
                         .needed_comptime_reason = "value stored in comptime field must be comptime-known",
                     });
                 };
-                if (!field_val.eql(init_val, elem_ty, mod)) {
+                if (!field_val.eql(init_val, elem_ty, zcu)) {
                     return sema.failWithInvalidComptimeFieldStore(block, elem_src, array_ty, i);
                 }
             }
@@ -20845,7 +20851,7 @@ fn zirArrayInit(
     } }));
 
     if (is_ref) {
-        const target = mod.getTarget();
+        const target = zcu.getTarget();
         const alloc_ty = try pt.ptrTypeSema(.{
             .child = result_ty.toIntern(),
             .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
@@ -20856,7 +20862,7 @@ fn zirArrayInit(
         if (is_tuple) {
             for (resolved_args, 0..) |arg, i| {
                 const elem_ptr_ty = try pt.ptrTypeSema(.{
-                    .child = array_ty.structFieldType(i, mod).toIntern(),
+                    .child = array_ty.structFieldType(i, zcu).toIntern(),
                     .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
                 });
                 const elem_ptr_ty_ref = Air.internedToRef(elem_ptr_ty.toIntern());
@@ -20869,7 +20875,7 @@ fn zirArrayInit(
         }
 
         const elem_ptr_ty = try pt.ptrTypeSema(.{
-            .child = array_ty.elemType2(mod).toIntern(),
+            .child = array_ty.elemType2(zcu).toIntern(),
             .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
         });
         const elem_ptr_ty_ref = Air.internedToRef(elem_ptr_ty.toIntern());
@@ -20906,9 +20912,9 @@ fn arrayInitAnon(
     is_ref: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
 
     const types = try sema.arena.alloc(InternPool.Index, operands.len);
     const values = try sema.arena.alloc(InternPool.Index, operands.len);
@@ -20919,7 +20925,7 @@ fn arrayInitAnon(
             const operand_src = src; // TODO better source location
             const elem = try sema.resolveInst(operand);
             types[i] = sema.typeOf(elem).toIntern();
-            if (Type.fromInterned(types[i]).zigTypeTag(mod) == .Opaque) {
+            if (Type.fromInterned(types[i]).zigTypeTag(zcu) == .Opaque) {
                 const msg = msg: {
                     const msg = try sema.errMsg(operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
                     errdefer msg.destroy(gpa);
@@ -21003,8 +21009,8 @@ fn zirFieldTypeRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
 
 fn zirStructInitFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data;
     const ty_src = block.nodeOffset(inst_data.src_node);
@@ -21017,7 +21023,7 @@ fn zirStructInitFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
         error.GenericPoison => return .generic_poison_type,
         else => |e| return e,
     };
-    const aggregate_ty = wrapped_aggregate_ty.optEuBaseType(mod);
+    const aggregate_ty = wrapped_aggregate_ty.optEuBaseType(zcu);
     const zir_field_name = sema.code.nullTerminatedString(extra.name_start);
     const field_name = try ip.getOrPutString(sema.gpa, pt.tid, zir_field_name, .no_embedded_nulls);
     return sema.fieldType(block, aggregate_ty, field_name, field_name_src, ty_src);
@@ -21032,12 +21038,12 @@ fn fieldType(
     ty_src: LazySrcLoc,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     var cur_ty = aggregate_ty;
     while (true) {
         try cur_ty.resolveFields(pt);
-        switch (cur_ty.zigTypeTag(mod)) {
+        switch (cur_ty.zigTypeTag(zcu)) {
             .Struct => switch (ip.indexToKey(cur_ty.toIntern())) {
                 .anon_struct_type => |anon_struct| {
                     const field_index = if (anon_struct.names.len == 0)
@@ -21056,7 +21062,7 @@ fn fieldType(
                 else => unreachable,
             },
             .Union => {
-                const union_obj = mod.typeToUnion(cur_ty).?;
+                const union_obj = zcu.typeToUnion(cur_ty).?;
                 const field_index = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse
                     return sema.failWithBadUnionFieldAccess(block, cur_ty, union_obj, field_src, field_name);
                 const field_ty = union_obj.field_types.get(ip)[field_index];
@@ -21069,7 +21075,7 @@ fn fieldType(
                 continue;
             },
             .ErrorUnion => {
-                cur_ty = cur_ty.errorUnionPayload(mod);
+                cur_ty = cur_ty.errorUnionPayload(zcu);
                 continue;
             },
             else => {},
@@ -21086,8 +21092,8 @@ fn zirErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
 
 fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const stack_trace_ty = try pt.getBuiltinType("StackTrace");
     try stack_trace_ty.resolveFields(pt);
     const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty);
@@ -21115,42 +21121,42 @@ fn zirFrame(
 }
 
 fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
-    const pt = sema.pt;
+    const zcu = sema.pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
     const ty = try sema.resolveType(block, operand_src, inst_data.operand);
-    if (ty.isNoReturn(pt.zcu)) {
-        return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(pt)});
+    if (ty.isNoReturn(zcu)) {
+        return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.pt)});
     }
-    const val = try ty.lazyAbiAlignment(pt);
+    const val = try ty.lazyAbiAlignment(sema.pt);
     return Air.internedToRef(val.toIntern());
 }
 
 fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const src = block.nodeOffset(inst_data.src_node);
     const operand = try sema.resolveInst(inst_data.operand);
     const operand_ty = sema.typeOf(operand);
-    const is_vector = operand_ty.zigTypeTag(mod) == .Vector;
-    const operand_scalar_ty = operand_ty.scalarType(mod);
+    const is_vector = operand_ty.zigTypeTag(zcu) == .Vector;
+    const operand_scalar_ty = operand_ty.scalarType(zcu);
     if (operand_scalar_ty.toIntern() != .bool_type) {
-        return sema.fail(block, src, "expected 'bool', found '{}'", .{operand_scalar_ty.zigTypeTag(mod)});
+        return sema.fail(block, src, "expected 'bool', found '{}'", .{operand_scalar_ty.zigTypeTag(zcu)});
     }
     if (try sema.resolveValue(operand)) |val| {
         if (!is_vector) {
-            if (val.isUndef(mod)) return pt.undefRef(Type.u1);
+            if (val.isUndef(zcu)) return pt.undefRef(Type.u1);
             if (val.toBool()) return Air.internedToRef((try pt.intValue(Type.u1, 1)).toIntern());
             return Air.internedToRef((try pt.intValue(Type.u1, 0)).toIntern());
         }
-        const len = operand_ty.vectorLen(mod);
+        const len = operand_ty.vectorLen(zcu);
         const dest_ty = try pt.vectorType(.{ .child = .u1_type, .len = len });
-        if (val.isUndef(mod)) return pt.undefRef(dest_ty);
+        if (val.isUndef(zcu)) return pt.undefRef(dest_ty);
         const new_elems = try sema.arena.alloc(InternPool.Index, len);
         for (new_elems, 0..) |*new_elem, i| {
             const old_elem = try val.elemValue(pt, i);
-            const new_val = if (old_elem.isUndef(mod))
+            const new_val = if (old_elem.isUndef(zcu))
                 try pt.undefValue(Type.u1)
             else if (old_elem.toBool())
                 try pt.intValue(Type.u1, 1)
@@ -21166,7 +21172,7 @@ fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
     if (!is_vector) {
         return block.addUnOp(.int_from_bool, operand);
     }
-    const len = operand_ty.vectorLen(mod);
+    const len = operand_ty.vectorLen(zcu);
     const dest_ty = try pt.vectorType(.{ .child = .u1_type, .len = len });
     const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
     for (new_elems, 0..) |*new_elem, i| {
@@ -21199,16 +21205,16 @@ fn zirAbs(
     inst: Zir.Inst.Index,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const operand = try sema.resolveInst(inst_data.operand);
     const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
     const operand_ty = sema.typeOf(operand);
-    const scalar_ty = operand_ty.scalarType(mod);
+    const scalar_ty = operand_ty.scalarType(zcu);
 
-    const result_ty = switch (scalar_ty.zigTypeTag(mod)) {
+    const result_ty = switch (scalar_ty.zigTypeTag(zcu)) {
         .ComptimeFloat, .Float, .ComptimeInt => operand_ty,
-        .Int => if (scalar_ty.isSignedInt(mod)) try operand_ty.toUnsigned(pt) else return operand,
+        .Int => if (scalar_ty.isSignedInt(zcu)) try operand_ty.toUnsigned(pt) else return operand,
         else => return sema.fail(
             block,
             operand_src,
@@ -21230,12 +21236,12 @@ fn maybeConstantUnaryMath(
     comptime eval: fn (Value, Type, Allocator, Zcu.PerThread) Allocator.Error!Value,
 ) CompileError!?Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    switch (result_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (result_ty.zigTypeTag(zcu)) {
         .Vector => if (try sema.resolveValue(operand)) |val| {
-            const scalar_ty = result_ty.scalarType(mod);
-            const vec_len = result_ty.vectorLen(mod);
-            if (val.isUndef(mod))
+            const scalar_ty = result_ty.scalarType(zcu);
+            const vec_len = result_ty.vectorLen(zcu);
+            if (val.isUndef(zcu))
                 return try pt.undefRef(result_ty);
 
             const elems = try sema.arena.alloc(InternPool.Index, vec_len);
@@ -21249,7 +21255,7 @@ fn maybeConstantUnaryMath(
             } })));
         },
         else => if (try sema.resolveValue(operand)) |operand_val| {
-            if (operand_val.isUndef(mod))
+            if (operand_val.isUndef(zcu))
                 return try pt.undefRef(result_ty);
             const result_val = try eval(operand_val, result_ty, sema.arena, pt);
             return Air.internedToRef(result_val.toIntern());
@@ -21269,14 +21275,14 @@ fn zirUnaryMath(
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const operand = try sema.resolveInst(inst_data.operand);
     const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
     const operand_ty = sema.typeOf(operand);
-    const scalar_ty = operand_ty.scalarType(mod);
+    const scalar_ty = operand_ty.scalarType(zcu);
 
-    switch (scalar_ty.zigTypeTag(mod)) {
+    switch (scalar_ty.zigTypeTag(zcu)) {
         .ComptimeFloat, .Float => {},
         else => return sema.fail(
             block,
@@ -21359,9 +21365,9 @@ fn zirReify(
     inst: Zir.Inst.Index,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
     const name_strategy: Zir.Inst.NameStrategy = @enumFromInt(extended.small);
     const extra = sema.code.extraData(Zir.Inst.Reify, extended.operand).data;
     const tracked_inst = try block.trackZir(inst);
@@ -21388,7 +21394,7 @@ fn zirReify(
     if (try sema.anyUndef(block, operand_src, Value.fromInterned(union_val.val))) {
         return sema.failWithUseOfUndef(block, operand_src);
     }
-    const tag_index = type_info_ty.unionTagFieldIndex(Value.fromInterned(union_val.tag), mod).?;
+    const tag_index = type_info_ty.unionTagFieldIndex(Value.fromInterned(union_val.tag), zcu).?;
     switch (@as(std.builtin.TypeId, @enumFromInt(tag_index))) {
         .Type => return .type_type,
         .Void => return .void_type,
@@ -21411,7 +21417,7 @@ fn zirReify(
                 struct_type.nameIndex(ip, try ip.getOrPutString(gpa, pt.tid, "bits", .no_embedded_nulls)).?,
             );
 
-            const signedness = mod.toEnum(std.builtin.Signedness, signedness_val);
+            const signedness = zcu.toEnum(std.builtin.Signedness, signedness_val);
             const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(pt));
             const ty = try pt.intType(signedness, bits);
             return Air.internedToRef(ty.toIntern());
@@ -21495,7 +21501,7 @@ fn zirReify(
                 return sema.fail(block, src, "alignment must fit in 'u32'", .{});
             }
 
-            const alignment_val_int = (try alignment_val.getUnsignedIntAdvanced(pt, .sema)).?;
+            const alignment_val_int = try alignment_val.toUnsignedIntSema(pt);
             if (alignment_val_int > 0 and !math.isPowerOfTwo(alignment_val_int)) {
                 return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{alignment_val_int});
             }
@@ -21506,14 +21512,14 @@ fn zirReify(
                 try elem_ty.resolveLayout(pt);
             }
 
-            const ptr_size = mod.toEnum(std.builtin.Type.Pointer.Size, size_val);
+            const ptr_size = zcu.toEnum(std.builtin.Type.Pointer.Size, size_val);
 
             const actual_sentinel: InternPool.Index = s: {
-                if (!sentinel_val.isNull(mod)) {
+                if (!sentinel_val.isNull(zcu)) {
                     if (ptr_size == .One or ptr_size == .C) {
                         return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{});
                     }
-                    const sentinel_ptr_val = sentinel_val.optionalValue(mod).?;
+                    const sentinel_ptr_val = sentinel_val.optionalValue(zcu).?;
                     const ptr_ty = try pt.singleMutPtrType(elem_ty);
                     const sent_val = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?;
                     break :s sent_val.toIntern();
@@ -21521,13 +21527,13 @@ fn zirReify(
                 break :s .none;
             };
 
-            if (elem_ty.zigTypeTag(mod) == .NoReturn) {
+            if (elem_ty.zigTypeTag(zcu) == .NoReturn) {
                 return sema.fail(block, src, "pointer to noreturn not allowed", .{});
-            } else if (elem_ty.zigTypeTag(mod) == .Fn) {
+            } else if (elem_ty.zigTypeTag(zcu) == .Fn) {
                 if (ptr_size != .One) {
                     return sema.fail(block, src, "function pointers must be single pointers", .{});
                 }
-            } else if (ptr_size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) {
+            } else if (ptr_size == .Many and elem_ty.zigTypeTag(zcu) == .Opaque) {
                 return sema.fail(block, src, "unknown-length pointer to opaque not allowed", .{});
             } else if (ptr_size == .C) {
                 if (!try sema.validateExternType(elem_ty, .other)) {
@@ -21542,7 +21548,7 @@ fn zirReify(
                     };
                     return sema.failWithOwnedErrorMsg(block, msg);
                 }
-                if (elem_ty.zigTypeTag(mod) == .Opaque) {
+                if (elem_ty.zigTypeTag(zcu) == .Opaque) {
                     return sema.fail(block, src, "C pointers cannot point to opaque types", .{});
                 }
             }
@@ -21555,7 +21561,7 @@ fn zirReify(
                     .is_const = is_const_val.toBool(),
                     .is_volatile = is_volatile_val.toBool(),
                     .alignment = abi_align,
-                    .address_space = mod.toEnum(std.builtin.AddressSpace, address_space_val),
+                    .address_space = zcu.toEnum(std.builtin.AddressSpace, address_space_val),
                     .is_allowzero = is_allowzero_val.toBool(),
                 },
             });
@@ -21578,7 +21584,7 @@ fn zirReify(
 
             const len = try len_val.toUnsignedIntSema(pt);
             const child_ty = child_val.toType();
-            const sentinel = if (sentinel_val.optionalValue(mod)) |p| blk: {
+            const sentinel = if (sentinel_val.optionalValue(zcu)) |p| blk: {
                 const ptr_ty = try pt.singleMutPtrType(child_ty);
                 break :blk (try sema.pointerDeref(block, src, p, ptr_ty)).?;
             } else null;
@@ -21616,7 +21622,7 @@ fn zirReify(
             const error_set_ty = error_set_val.toType();
             const payload_ty = payload_val.toType();
 
-            if (error_set_ty.zigTypeTag(mod) != .ErrorSet) {
+            if (error_set_ty.zigTypeTag(zcu) != .ErrorSet) {
                 return sema.fail(block, src, "Type.ErrorUnion.error_set must be an error set type", .{});
             }
 
@@ -21624,14 +21630,14 @@ fn zirReify(
             return Air.internedToRef(ty.toIntern());
         },
         .ErrorSet => {
-            const payload_val = Value.fromInterned(union_val.val).optionalValue(mod) orelse
+            const payload_val = Value.fromInterned(union_val.val).optionalValue(zcu) orelse
                 return Air.internedToRef(Type.anyerror.toIntern());
 
             const names_val = try sema.derefSliceAsArray(block, src, payload_val, .{
                 .needed_comptime_reason = "error set contents must be comptime-known",
             });
 
-            const len = try sema.usizeCast(block, src, names_val.typeOf(mod).arrayLen(mod));
+            const len = try sema.usizeCast(block, src, names_val.typeOf(zcu).arrayLen(zcu));
             var names: InferredErrorSet.NameMap = .{};
             try names.ensureUnusedCapacity(sema.arena, len);
             for (0..len) |i| {
@@ -21680,14 +21686,14 @@ fn zirReify(
                 try ip.getOrPutString(gpa, pt.tid, "is_tuple", .no_embedded_nulls),
             ).?);
 
-            const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val);
+            const layout = zcu.toEnum(std.builtin.Type.ContainerLayout, layout_val);
 
             // Decls
             if (try decls_val.sliceLen(pt) > 0) {
                 return sema.fail(block, src, "reified structs must have no decls", .{});
             }
 
-            if (layout != .@"packed" and !backing_integer_val.isNull(mod)) {
+            if (layout != .@"packed" and !backing_integer_val.isNull(zcu)) {
                 return sema.fail(block, src, "non-packed struct does not support backing integer type", .{});
             }
 
@@ -21762,8 +21768,8 @@ fn zirReify(
             const new_namespace_index = try pt.createNamespace(.{
                 .parent = block.namespace.toOptional(),
                 .owner_type = wip_ty.index,
-                .file_scope = block.getFileScopeIndex(mod),
-                .generation = mod.generation,
+                .file_scope = block.getFileScopeIndex(zcu),
+                .generation = zcu.generation,
             });
 
             try sema.addTypeReferenceEntry(src, wip_ty.index);
@@ -21791,7 +21797,7 @@ fn zirReify(
             if (try decls_val.sliceLen(pt) > 0) {
                 return sema.fail(block, src, "reified unions must have no decls", .{});
             }
-            const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val);
+            const layout = zcu.toEnum(std.builtin.Type.ContainerLayout, layout_val);
 
             const fields_arr = try sema.derefSliceAsArray(block, operand_src, fields_val, .{
                 .needed_comptime_reason = "union fields must be comptime-known",
@@ -21828,19 +21834,19 @@ fn zirReify(
             }
 
             const is_var_args = is_var_args_val.toBool();
-            const cc = mod.toEnum(std.builtin.CallingConvention, calling_convention_val);
+            const cc = zcu.toEnum(std.builtin.CallingConvention, calling_convention_val);
             if (is_var_args) {
                 try sema.checkCallConvSupportsVarArgs(block, src, cc);
             }
 
-            const return_type = return_type_val.optionalValue(mod) orelse
+            const return_type = return_type_val.optionalValue(zcu) orelse
                 return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{});
 
             const params_val = try sema.derefSliceAsArray(block, operand_src, params_slice_val, .{
                 .needed_comptime_reason = "function parameters must be comptime-known",
             });
 
-            const args_len = try sema.usizeCast(block, src, params_val.typeOf(mod).arrayLen(mod));
+            const args_len = try sema.usizeCast(block, src, params_val.typeOf(zcu).arrayLen(zcu));
             const param_types = try sema.arena.alloc(InternPool.Index, args_len);
 
             var noalias_bits: u32 = 0;
@@ -21864,12 +21870,12 @@ fn zirReify(
                     return sema.fail(block, src, "Type.Fn.Param.is_generic must be false for @Type", .{});
                 }
 
-                const param_type_val = opt_param_type_val.optionalValue(mod) orelse
+                const param_type_val = opt_param_type_val.optionalValue(zcu) orelse
                     return sema.fail(block, src, "Type.Fn.Param.type must be non-null for @Type", .{});
                 param_type.* = param_type_val.toIntern();
 
                 if (param_is_noalias_val.toBool()) {
-                    if (!Type.fromInterned(param_type.*).isPtrAtRuntime(mod)) {
+                    if (!Type.fromInterned(param_type.*).isPtrAtRuntime(zcu)) {
                         return sema.fail(block, src, "non-pointer parameter declared noalias", .{});
                     }
                     noalias_bits |= @as(u32, 1) << (std.math.cast(u5, i) orelse
@@ -21901,13 +21907,13 @@ fn reifyEnum(
     name_strategy: Zir.Inst.NameStrategy,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
 
     // This logic must stay in sync with the structure of `std.builtin.Type.Enum` - search for `fieldValue`.
 
-    const fields_len: u32 = @intCast(fields_val.typeOf(mod).arrayLen(mod));
+    const fields_len: u32 = @intCast(fields_val.typeOf(zcu).arrayLen(zcu));
 
     // The validation work here is non-trivial, and it's possible the type already exists.
     // So in this first pass, let's just construct a hash to optimize for this case. If the
@@ -21957,7 +21963,7 @@ fn reifyEnum(
     var done = false;
     errdefer if (!done) wip_ty.cancel(ip, pt.tid);
 
-    if (tag_ty.zigTypeTag(mod) != .Int) {
+    if (tag_ty.zigTypeTag(zcu) != .Int) {
         return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{});
     }
 
@@ -21972,8 +21978,8 @@ fn reifyEnum(
     const new_namespace_index = try pt.createNamespace(.{
         .parent = block.namespace.toOptional(),
         .owner_type = wip_ty.index,
-        .file_scope = block.getFileScopeIndex(mod),
-        .generation = mod.generation,
+        .file_scope = block.getFileScopeIndex(zcu),
+        .generation = zcu.generation,
     });
 
     const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index);
@@ -22023,14 +22029,14 @@ fn reifyEnum(
         }
     }
 
-    if (!is_exhaustive and fields_len > 1 and std.math.log2_int(u64, fields_len) == tag_ty.bitSize(pt)) {
+    if (!is_exhaustive and fields_len > 1 and std.math.log2_int(u64, fields_len) == tag_ty.bitSize(zcu)) {
         return sema.fail(block, src, "non-exhaustive enum specified every value", .{});
     }
 
     codegen_type: {
-        if (mod.comp.config.use_llvm) break :codegen_type;
+        if (zcu.comp.config.use_llvm) break :codegen_type;
         if (block.ownerModule().strip) break :codegen_type;
-        try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
+        try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
     }
     return Air.internedToRef(wip_ty.index);
 }
@@ -22046,13 +22052,13 @@ fn reifyUnion(
     name_strategy: Zir.Inst.NameStrategy,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
 
     // This logic must stay in sync with the structure of `std.builtin.Type.Union` - search for `fieldValue`.
 
-    const fields_len: u32 = @intCast(fields_val.typeOf(mod).arrayLen(mod));
+    const fields_len: u32 = @intCast(fields_val.typeOf(zcu).arrayLen(zcu));
 
     // The validation work here is non-trivial, and it's possible the type already exists.
     // So in this first pass, let's just construct a hash to optimize for this case. If the
@@ -22084,7 +22090,7 @@ fn reifyUnion(
             field_align_val.toIntern(),
         });
 
-        if (field_align_val.toUnsignedInt(pt) != 0) {
+        if (field_align_val.toUnsignedInt(zcu) != 0) {
             any_aligns = true;
         }
     }
@@ -22095,7 +22101,7 @@ fn reifyUnion(
         .flags = .{
             .layout = layout,
             .status = .none,
-            .runtime_tag = if (opt_tag_type_val.optionalValue(mod) != null)
+            .runtime_tag = if (opt_tag_type_val.optionalValue(zcu) != null)
                 .tagged
             else if (layout != .auto)
                 .none
@@ -22139,7 +22145,7 @@ fn reifyUnion(
     const field_types = try sema.arena.alloc(InternPool.Index, fields_len);
     const field_aligns = if (any_aligns) try sema.arena.alloc(InternPool.Alignment, fields_len) else undefined;
 
-    const enum_tag_ty, const has_explicit_tag = if (opt_tag_type_val.optionalValue(mod)) |tag_type_val| tag_ty: {
+    const enum_tag_ty, const has_explicit_tag = if (opt_tag_type_val.optionalValue(zcu)) |tag_type_val| tag_ty: {
         switch (ip.indexToKey(tag_type_val.toIntern())) {
             .enum_type => {},
             else => return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}),
@@ -22147,7 +22153,7 @@ fn reifyUnion(
         const enum_tag_ty = tag_type_val.toType();
 
         // We simply track which fields of the tag type have been seen.
-        const tag_ty_fields_len = enum_tag_ty.enumFieldCount(mod);
+        const tag_ty_fields_len = enum_tag_ty.enumFieldCount(zcu);
         var seen_tags = try std.DynamicBitSetUnmanaged.initEmpty(sema.arena, tag_ty_fields_len);
 
         for (field_types, 0..) |*field_ty, field_idx| {
@@ -22159,7 +22165,7 @@ fn reifyUnion(
             // Don't pass a reason; first loop acts as an assertion that this is valid.
             const field_name = try sema.sliceToIpString(block, src, field_name_val, undefined);
 
-            const enum_index = enum_tag_ty.enumFieldIndex(field_name, mod) orelse {
+            const enum_index = enum_tag_ty.enumFieldIndex(field_name, zcu) orelse {
                 // TODO: better source location
                 return sema.fail(block, src, "no field named '{}' in enum '{}'", .{
                     field_name.fmt(ip), enum_tag_ty.fmt(pt),
@@ -22187,7 +22193,7 @@ fn reifyUnion(
             errdefer msg.destroy(gpa);
             var it = seen_tags.iterator(.{ .kind = .unset });
             while (it.next()) |enum_index| {
-                const field_name = enum_tag_ty.enumFieldName(enum_index, mod);
+                const field_name = enum_tag_ty.enumFieldName(enum_index, zcu);
                 try sema.addFieldErrNote(enum_tag_ty, enum_index, msg, "field '{}' missing, declared here", .{
                     field_name.fmt(ip),
                 });
@@ -22234,7 +22240,7 @@ fn reifyUnion(
 
     for (field_types) |field_ty_ip| {
         const field_ty = Type.fromInterned(field_ty_ip);
-        if (field_ty.zigTypeTag(mod) == .Opaque) {
+        if (field_ty.zigTypeTag(zcu) == .Opaque) {
             return sema.failWithOwnedErrorMsg(block, msg: {
                 const msg = try sema.errMsg(src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{});
                 errdefer msg.destroy(gpa);
@@ -22277,17 +22283,17 @@ fn reifyUnion(
     const new_namespace_index = try pt.createNamespace(.{
         .parent = block.namespace.toOptional(),
         .owner_type = wip_ty.index,
-        .file_scope = block.getFileScopeIndex(mod),
-        .generation = mod.generation,
+        .file_scope = block.getFileScopeIndex(zcu),
+        .generation = zcu.generation,
     });
 
     const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index);
 
-    try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
+    try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
     codegen_type: {
-        if (mod.comp.config.use_llvm) break :codegen_type;
+        if (zcu.comp.config.use_llvm) break :codegen_type;
         if (block.ownerModule().strip) break :codegen_type;
-        try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
+        try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
     }
     try sema.declareDependency(.{ .interned = wip_ty.index });
     try sema.addTypeReferenceEntry(src, wip_ty.index);
@@ -22306,13 +22312,13 @@ fn reifyStruct(
     is_tuple: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
 
     // This logic must stay in sync with the structure of `std.builtin.Type.Struct` - search for `fieldValue`.
 
-    const fields_len: u32 = @intCast(fields_val.typeOf(mod).arrayLen(mod));
+    const fields_len: u32 = @intCast(fields_val.typeOf(zcu).arrayLen(zcu));
 
     // The validation work here is non-trivial, and it's possible the type already exists.
     // So in this first pass, let's just construct a hash to optimize for this case. If the
@@ -22343,7 +22349,7 @@ fn reifyStruct(
             .needed_comptime_reason = "struct field name must be comptime-known",
         });
         const field_is_comptime = field_is_comptime_val.toBool();
-        const field_default_value: InternPool.Index = if (field_default_value_val.optionalValue(mod)) |ptr_val| d: {
+        const field_default_value: InternPool.Index = if (field_default_value_val.optionalValue(zcu)) |ptr_val| d: {
             const ptr_ty = try pt.singleConstPtrType(field_type_val.toType());
             // We need to do this deref here, so we won't check for this error case later on.
             const val = try sema.pointerDeref(block, src, ptr_val, ptr_ty) orelse return sema.failWithNeededComptime(
@@ -22365,7 +22371,7 @@ fn reifyStruct(
 
         if (field_is_comptime) any_comptime_fields = true;
         if (field_default_value != .none) any_default_inits = true;
-        switch (try field_alignment_val.orderAgainstZeroAdvanced(pt, .sema)) {
+        switch (try field_alignment_val.orderAgainstZeroSema(pt)) {
             .eq => {},
             .gt => any_aligned_fields = true,
             .lt => unreachable,
@@ -22475,7 +22481,7 @@ fn reifyStruct(
 
         const field_default: InternPool.Index = d: {
             if (!any_default_inits) break :d .none;
-            const ptr_val = field_default_value_val.optionalValue(mod) orelse break :d .none;
+            const ptr_val = field_default_value_val.optionalValue(zcu) orelse break :d .none;
             const ptr_ty = try pt.singleConstPtrType(field_ty);
             // Asserted comptime-dereferencable above.
             const val = (try sema.pointerDeref(block, src, ptr_val, ptr_ty)).?;
@@ -22492,7 +22498,7 @@ fn reifyStruct(
             struct_type.field_inits.get(ip)[field_idx] = field_default;
         }
 
-        if (field_ty.zigTypeTag(mod) == .Opaque) {
+        if (field_ty.zigTypeTag(zcu) == .Opaque) {
             return sema.failWithOwnedErrorMsg(block, msg: {
                 const msg = try sema.errMsg(src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
                 errdefer msg.destroy(gpa);
@@ -22501,7 +22507,7 @@ fn reifyStruct(
                 break :msg msg;
             });
         }
-        if (field_ty.zigTypeTag(mod) == .NoReturn) {
+        if (field_ty.zigTypeTag(zcu) == .NoReturn) {
             return sema.failWithOwnedErrorMsg(block, msg: {
                 const msg = try sema.errMsg(src, "struct fields cannot be 'noreturn'", .{});
                 errdefer msg.destroy(gpa);
@@ -22545,10 +22551,10 @@ fn reifyStruct(
                 },
                 else => return err,
             };
-            fields_bit_sum += field_ty.bitSize(pt);
+            fields_bit_sum += field_ty.bitSize(zcu);
         }
 
-        if (opt_backing_int_val.optionalValue(mod)) |backing_int_val| {
+        if (opt_backing_int_val.optionalValue(zcu)) |backing_int_val| {
             const backing_int_ty = backing_int_val.toType();
             try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum);
             struct_type.setBackingIntType(ip, backing_int_ty.toIntern());
@@ -22561,17 +22567,17 @@ fn reifyStruct(
     const new_namespace_index = try pt.createNamespace(.{
         .parent = block.namespace.toOptional(),
         .owner_type = wip_ty.index,
-        .file_scope = block.getFileScopeIndex(mod),
-        .generation = mod.generation,
+        .file_scope = block.getFileScopeIndex(zcu),
+        .generation = zcu.generation,
     });
 
     const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index);
 
-    try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
+    try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
     codegen_type: {
-        if (mod.comp.config.use_llvm) break :codegen_type;
+        if (zcu.comp.config.use_llvm) break :codegen_type;
         if (block.ownerModule().strip) break :codegen_type;
-        try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
+        try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
     }
     try sema.declareDependency(.{ .interned = wip_ty.index });
     try sema.addTypeReferenceEntry(src, wip_ty.index);
@@ -22649,8 +22655,8 @@ fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData)
 
 fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
 
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const ty_src = block.builtinCallArgSrc(inst_data.src_node, 0);
@@ -22674,7 +22680,7 @@ fn zirFrameSize(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
 
 fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.nodeOffset(inst_data.src_node);
     const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
@@ -22684,10 +22690,10 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
     const operand_ty = sema.typeOf(operand);
 
     try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, src, operand_src);
-    const is_vector = dest_ty.zigTypeTag(mod) == .Vector;
+    const is_vector = dest_ty.zigTypeTag(zcu) == .Vector;
 
-    const dest_scalar_ty = dest_ty.scalarType(mod);
-    const operand_scalar_ty = operand_ty.scalarType(mod);
+    const dest_scalar_ty = dest_ty.scalarType(zcu);
+    const operand_scalar_ty = operand_ty.scalarType(zcu);
 
     _ = try sema.checkIntType(block, src, dest_scalar_ty);
     try sema.checkFloatType(block, operand_src, operand_scalar_ty);
@@ -22695,14 +22701,14 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
     if (try sema.resolveValue(operand)) |operand_val| {
         const result_val = try sema.intFromFloat(block, operand_src, operand_val, operand_ty, dest_ty, .truncate);
         return Air.internedToRef(result_val.toIntern());
-    } else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
+    } else if (dest_scalar_ty.zigTypeTag(zcu) == .ComptimeInt) {
         return sema.failWithNeededComptime(block, operand_src, .{
             .needed_comptime_reason = "value being casted to 'comptime_int' must be comptime-known",
         });
     }
 
     try sema.requireRuntimeBlock(block, src, operand_src);
-    if (dest_scalar_ty.intInfo(mod).bits == 0) {
+    if (dest_scalar_ty.intInfo(zcu).bits == 0) {
         if (!is_vector) {
             if (block.wantSafety()) {
                 const ok = try block.addBinOp(if (block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, operand, Air.internedToRef((try pt.floatValue(operand_ty, 0.0)).toIntern()));
@@ -22711,7 +22717,7 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
             return Air.internedToRef((try pt.intValue(dest_ty, 0)).toIntern());
         }
         if (block.wantSafety()) {
-            const len = dest_ty.vectorLen(mod);
+            const len = dest_ty.vectorLen(zcu);
             for (0..len) |i| {
                 const idx_ref = try pt.intRef(Type.usize, i);
                 const elem_ref = try block.addBinOp(.array_elem_val, operand, idx_ref);
@@ -22736,7 +22742,7 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
         }
         return result;
     }
-    const len = dest_ty.vectorLen(mod);
+    const len = dest_ty.vectorLen(zcu);
     const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
     for (new_elems, 0..) |*new_elem, i| {
         const idx_ref = try pt.intRef(Type.usize, i);
@@ -22757,7 +22763,7 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
 
 fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.nodeOffset(inst_data.src_node);
     const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
@@ -22767,10 +22773,10 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
     const operand_ty = sema.typeOf(operand);
 
     try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, src, operand_src);
-    const is_vector = dest_ty.zigTypeTag(mod) == .Vector;
+    const is_vector = dest_ty.zigTypeTag(zcu) == .Vector;
 
-    const dest_scalar_ty = dest_ty.scalarType(mod);
-    const operand_scalar_ty = operand_ty.scalarType(mod);
+    const dest_scalar_ty = dest_ty.scalarType(zcu);
+    const operand_scalar_ty = operand_ty.scalarType(zcu);
 
     try sema.checkFloatType(block, src, dest_scalar_ty);
     _ = try sema.checkIntType(block, operand_src, operand_scalar_ty);
@@ -22778,7 +22784,7 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
     if (try sema.resolveValue(operand)) |operand_val| {
         const result_val = try operand_val.floatFromIntAdvanced(sema.arena, operand_ty, dest_ty, pt, .sema);
         return Air.internedToRef(result_val.toIntern());
-    } else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeFloat) {
+    } else if (dest_scalar_ty.zigTypeTag(zcu) == .ComptimeFloat) {
         return sema.failWithNeededComptime(block, operand_src, .{
             .needed_comptime_reason = "value being casted to 'comptime_float' must be comptime-known",
         });
@@ -22788,7 +22794,7 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
     if (!is_vector) {
         return block.addTyOp(.float_from_int, dest_ty, operand);
     }
-    const len = operand_ty.vectorLen(mod);
+    const len = operand_ty.vectorLen(zcu);
     const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
     for (new_elems, 0..) |*new_elem, i| {
         const idx_ref = try pt.intRef(Type.usize, i);
@@ -22800,7 +22806,7 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
 
 fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.nodeOffset(inst_data.src_node);
 
@@ -22813,21 +22819,21 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
     const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu, "@ptrFromInt");
     try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, uncoerced_operand_ty, src, operand_src);
 
-    const is_vector = dest_ty.zigTypeTag(mod) == .Vector;
+    const is_vector = dest_ty.zigTypeTag(zcu) == .Vector;
     const operand_ty = if (is_vector) operand_ty: {
-        const len = dest_ty.vectorLen(mod);
+        const len = dest_ty.vectorLen(zcu);
         break :operand_ty try pt.vectorType(.{ .child = .usize_type, .len = len });
     } else Type.usize;
 
     const operand_coerced = try sema.coerce(block, operand_ty, operand_res, operand_src);
 
-    const ptr_ty = dest_ty.scalarType(mod);
+    const ptr_ty = dest_ty.scalarType(zcu);
     try sema.checkPtrType(block, src, ptr_ty, true);
 
-    const elem_ty = ptr_ty.elemType2(mod);
-    const ptr_align = try ptr_ty.ptrAlignmentAdvanced(pt, .sema);
+    const elem_ty = ptr_ty.elemType2(zcu);
+    const ptr_align = try ptr_ty.ptrAlignmentSema(pt);
 
-    if (ptr_ty.isSlice(mod)) {
+    if (ptr_ty.isSlice(zcu)) {
         const msg = msg: {
             const msg = try sema.errMsg(src, "integer cannot be converted to slice type '{}'", .{ptr_ty.fmt(pt)});
             errdefer msg.destroy(sema.gpa);
@@ -22842,7 +22848,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
             const ptr_val = try sema.ptrFromIntVal(block, operand_src, val, ptr_ty, ptr_align);
             return Air.internedToRef(ptr_val.toIntern());
         }
-        const len = dest_ty.vectorLen(mod);
+        const len = dest_ty.vectorLen(zcu);
         const new_elems = try sema.arena.alloc(InternPool.Index, len);
         for (new_elems, 0..) |*new_elem, i| {
             const elem = try val.elemValue(pt, i);
@@ -22854,7 +22860,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
             .storage = .{ .elems = new_elems },
         } }));
     }
-    if (try sema.typeRequiresComptime(ptr_ty)) {
+    if (try ptr_ty.comptimeOnlySema(pt)) {
         return sema.failWithOwnedErrorMsg(block, msg: {
             const msg = try sema.errMsg(src, "pointer to comptime-only type '{}' must be comptime-known, but operand is runtime-known", .{ptr_ty.fmt(pt)});
             errdefer msg.destroy(sema.gpa);
@@ -22865,8 +22871,8 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
     }
     try sema.requireRuntimeBlock(block, src, operand_src);
     if (!is_vector) {
-        if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag(mod) == .Fn)) {
-            if (!ptr_ty.isAllowzeroPtr(mod)) {
+        if (block.wantSafety() and (try elem_ty.hasRuntimeBitsSema(pt) or elem_ty.zigTypeTag(zcu) == .Fn)) {
+            if (!ptr_ty.isAllowzeroPtr(zcu)) {
                 const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize);
                 try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null);
             }
@@ -22881,12 +22887,12 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
         return block.addBitCast(dest_ty, operand_coerced);
     }
 
-    const len = dest_ty.vectorLen(mod);
-    if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag(mod) == .Fn)) {
+    const len = dest_ty.vectorLen(zcu);
+    if (block.wantSafety() and (try elem_ty.hasRuntimeBitsSema(pt) or elem_ty.zigTypeTag(zcu) == .Fn)) {
         for (0..len) |i| {
             const idx_ref = try pt.intRef(Type.usize, i);
             const elem_coerced = try block.addBinOp(.array_elem_val, operand_coerced, idx_ref);
-            if (!ptr_ty.isAllowzeroPtr(mod)) {
+            if (!ptr_ty.isAllowzeroPtr(zcu)) {
                 const is_non_zero = try block.addBinOp(.cmp_neq, elem_coerced, .zero_usize);
                 try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null);
             }
@@ -22943,16 +22949,16 @@ fn ptrFromIntVal(
 
 fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
     const src = block.nodeOffset(extra.node);
     const operand_src = block.builtinCallArgSrc(extra.node, 0);
     const base_dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_opt, "@errorCast");
     const operand = try sema.resolveInst(extra.rhs);
     const base_operand_ty = sema.typeOf(operand);
-    const dest_tag = base_dest_ty.zigTypeTag(mod);
-    const operand_tag = base_operand_ty.zigTypeTag(mod);
+    const dest_tag = base_dest_ty.zigTypeTag(zcu);
+    const operand_tag = base_operand_ty.zigTypeTag(zcu);
 
     if (dest_tag != .ErrorSet and dest_tag != .ErrorUnion) {
         return sema.fail(block, src, "expected error set or error union type, found '{s}'", .{@tagName(dest_tag)});
@@ -22964,13 +22970,13 @@ fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData
         return sema.fail(block, src, "cannot cast an error union type to error set", .{});
     }
     if (dest_tag == .ErrorUnion and operand_tag == .ErrorUnion and
-        base_dest_ty.errorUnionPayload(mod).toIntern() != base_operand_ty.errorUnionPayload(mod).toIntern())
+        base_dest_ty.errorUnionPayload(zcu).toIntern() != base_operand_ty.errorUnionPayload(zcu).toIntern())
     {
         return sema.failWithOwnedErrorMsg(block, msg: {
             const msg = try sema.errMsg(src, "payload types of error unions must match", .{});
             errdefer msg.destroy(sema.gpa);
-            const dest_ty = base_dest_ty.errorUnionPayload(mod);
-            const operand_ty = base_operand_ty.errorUnionPayload(mod);
+            const dest_ty = base_dest_ty.errorUnionPayload(zcu);
+            const operand_ty = base_operand_ty.errorUnionPayload(zcu);
             try sema.errNote(src, msg, "destination payload is '{}'", .{dest_ty.fmt(pt)});
             try sema.errNote(src, msg, "operand payload is '{}'", .{operand_ty.fmt(pt)});
             try addDeclaredHereNote(sema, msg, dest_ty);
@@ -22978,19 +22984,19 @@ fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData
             break :msg msg;
         });
     }
-    const dest_ty = if (dest_tag == .ErrorUnion) base_dest_ty.errorUnionSet(mod) else base_dest_ty;
-    const operand_ty = if (operand_tag == .ErrorUnion) base_operand_ty.errorUnionSet(mod) else base_operand_ty;
+    const dest_ty = if (dest_tag == .ErrorUnion) base_dest_ty.errorUnionSet(zcu) else base_dest_ty;
+    const operand_ty = if (operand_tag == .ErrorUnion) base_operand_ty.errorUnionSet(zcu) else base_operand_ty;
 
     // operand must be defined since it can be an invalid error value
     const maybe_operand_val = try sema.resolveDefinedValue(block, operand_src, operand);
 
     const disjoint = disjoint: {
         // Try avoiding resolving inferred error sets if we can
-        if (!dest_ty.isAnyError(mod) and dest_ty.errorSetIsEmpty(mod)) break :disjoint true;
-        if (!operand_ty.isAnyError(mod) and operand_ty.errorSetIsEmpty(mod)) break :disjoint true;
-        if (dest_ty.isAnyError(mod)) break :disjoint false;
-        if (operand_ty.isAnyError(mod)) break :disjoint false;
-        const dest_err_names = dest_ty.errorSetNames(mod);
+        if (!dest_ty.isAnyError(zcu) and dest_ty.errorSetIsEmpty(zcu)) break :disjoint true;
+        if (!operand_ty.isAnyError(zcu) and operand_ty.errorSetIsEmpty(zcu)) break :disjoint true;
+        if (dest_ty.isAnyError(zcu)) break :disjoint false;
+        if (operand_ty.isAnyError(zcu)) break :disjoint false;
+        const dest_err_names = dest_ty.errorSetNames(zcu);
         for (0..dest_err_names.len) |dest_err_index| {
             if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_names.get(ip)[dest_err_index]))
                 break :disjoint false;
@@ -23018,8 +23024,8 @@ fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData
     }
 
     if (maybe_operand_val) |val| {
-        if (!dest_ty.isAnyError(mod)) check: {
-            const operand_val = mod.intern_pool.indexToKey(val.toIntern());
+        if (!dest_ty.isAnyError(zcu)) check: {
+            const operand_val = zcu.intern_pool.indexToKey(val.toIntern());
             var error_name: InternPool.NullTerminatedString = undefined;
             if (operand_tag == .ErrorUnion) {
                 if (operand_val.error_union.val != .err_name) break :check;
@@ -23039,9 +23045,9 @@ fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData
 
     try sema.requireRuntimeBlock(block, src, operand_src);
     const err_int_ty = try pt.errorIntType();
-    if (block.wantSafety() and !dest_ty.isAnyError(mod) and
+    if (block.wantSafety() and !dest_ty.isAnyError(zcu) and
         dest_ty.toIntern() != .adhoc_inferred_error_set_type and
-        mod.backendSupportsFeature(.error_set_has_value))
+        zcu.backendSupportsFeature(.error_set_has_value))
     {
         if (dest_tag == .ErrorUnion) {
             const err_code = try sema.analyzeErrUnionCode(block, operand_src, operand);
@@ -23116,23 +23122,23 @@ fn ptrCastFull(
     operation: []const u8,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const operand_ty = sema.typeOf(operand);
 
     try sema.checkPtrType(block, src, dest_ty, true);
     try sema.checkPtrOperand(block, operand_src, operand_ty);
 
-    const src_info = operand_ty.ptrInfo(mod);
-    const dest_info = dest_ty.ptrInfo(mod);
+    const src_info = operand_ty.ptrInfo(zcu);
+    const dest_info = dest_ty.ptrInfo(zcu);
 
     try Type.fromInterned(src_info.child).resolveLayout(pt);
     try Type.fromInterned(dest_info.child).resolveLayout(pt);
 
     const src_slice_like = src_info.flags.size == .Slice or
-        (src_info.flags.size == .One and Type.fromInterned(src_info.child).zigTypeTag(mod) == .Array);
+        (src_info.flags.size == .One and Type.fromInterned(src_info.child).zigTypeTag(zcu) == .Array);
 
     const dest_slice_like = dest_info.flags.size == .Slice or
-        (dest_info.flags.size == .One and Type.fromInterned(dest_info.child).zigTypeTag(mod) == .Array);
+        (dest_info.flags.size == .One and Type.fromInterned(dest_info.child).zigTypeTag(zcu) == .Array);
 
     if (dest_info.flags.size == .Slice and !src_slice_like) {
         return sema.fail(block, src, "illegal pointer cast to slice", .{});
@@ -23140,12 +23146,12 @@ fn ptrCastFull(
 
     if (dest_info.flags.size == .Slice) {
         const src_elem_size = switch (src_info.flags.size) {
-            .Slice => Type.fromInterned(src_info.child).abiSize(pt),
+            .Slice => Type.fromInterned(src_info.child).abiSize(zcu),
             // pointer to array
-            .One => Type.fromInterned(src_info.child).childType(mod).abiSize(pt),
+            .One => Type.fromInterned(src_info.child).childType(zcu).abiSize(zcu),
             else => unreachable,
         };
-        const dest_elem_size = Type.fromInterned(dest_info.child).abiSize(pt);
+        const dest_elem_size = Type.fromInterned(dest_info.child).abiSize(zcu);
         if (src_elem_size != dest_elem_size) {
             return sema.fail(block, src, "TODO: implement {s} between slices changing the length", .{operation});
         }
@@ -23167,7 +23173,7 @@ fn ptrCastFull(
                 errdefer msg.destroy(sema.gpa);
                 if (dest_info.flags.size == .Many and
                     (src_info.flags.size == .Slice or
-                    (src_info.flags.size == .One and Type.fromInterned(src_info.child).zigTypeTag(mod) == .Array)))
+                    (src_info.flags.size == .One and Type.fromInterned(src_info.child).zigTypeTag(zcu) == .Array)))
                 {
                     try sema.errNote(src, msg, "use 'ptr' field to convert slice to many pointer", .{});
                 } else {
@@ -23180,7 +23186,7 @@ fn ptrCastFull(
         check_child: {
             const src_child = if (dest_info.flags.size == .Slice and src_info.flags.size == .One) blk: {
                 // *[n]T -> []T
-                break :blk Type.fromInterned(src_info.child).childType(mod);
+                break :blk Type.fromInterned(src_info.child).childType(zcu);
             } else Type.fromInterned(src_info.child);
 
             const dest_child = Type.fromInterned(dest_info.child);
@@ -23190,7 +23196,7 @@ fn ptrCastFull(
                 dest_child,
                 src_child,
                 !dest_info.flags.is_const,
-                mod.getTarget(),
+                zcu.getTarget(),
                 src,
                 operand_src,
                 null,
@@ -23211,14 +23217,14 @@ fn ptrCastFull(
             if (dest_info.sentinel == .none) break :check_sent;
             if (src_info.flags.size == .C) break :check_sent;
             if (src_info.sentinel != .none) {
-                const coerced_sent = try mod.intern_pool.getCoerced(sema.gpa, pt.tid, src_info.sentinel, dest_info.child);
+                const coerced_sent = try zcu.intern_pool.getCoerced(sema.gpa, pt.tid, src_info.sentinel, dest_info.child);
                 if (dest_info.sentinel == coerced_sent) break :check_sent;
             }
             if (src_slice_like and src_info.flags.size == .One and dest_info.flags.size == .Slice) {
                 // [*]nT -> []T
                 const arr_ty = Type.fromInterned(src_info.child);
-                if (arr_ty.sentinel(mod)) |src_sentinel| {
-                    const coerced_sent = try mod.intern_pool.getCoerced(sema.gpa, pt.tid, src_sentinel.toIntern(), dest_info.child);
+                if (arr_ty.sentinel(zcu)) |src_sentinel| {
+                    const coerced_sent = try zcu.intern_pool.getCoerced(sema.gpa, pt.tid, src_sentinel.toIntern(), dest_info.child);
                     if (dest_info.sentinel == coerced_sent) break :check_sent;
                 }
             }
@@ -23264,8 +23270,8 @@ fn ptrCastFull(
         }
 
         check_allowzero: {
-            const src_allows_zero = operand_ty.ptrAllowsZero(mod);
-            const dest_allows_zero = dest_ty.ptrAllowsZero(mod);
+            const src_allows_zero = operand_ty.ptrAllowsZero(zcu);
+            const dest_allows_zero = dest_ty.ptrAllowsZero(zcu);
             if (!src_allows_zero) break :check_allowzero;
             if (dest_allows_zero) break :check_allowzero;
 
@@ -23286,12 +23292,12 @@ fn ptrCastFull(
     const src_align = if (src_info.flags.alignment != .none)
         src_info.flags.alignment
     else
-        Type.fromInterned(src_info.child).abiAlignment(pt);
+        Type.fromInterned(src_info.child).abiAlignment(zcu);
 
     const dest_align = if (dest_info.flags.alignment != .none)
         dest_info.flags.alignment
     else
-        Type.fromInterned(dest_info.child).abiAlignment(pt);
+        Type.fromInterned(dest_info.child).abiAlignment(zcu);
 
     if (!flags.align_cast) {
         if (dest_align.compare(.gt, src_align)) {
@@ -23327,7 +23333,7 @@ fn ptrCastFull(
         }
     } else {
         // Some address space casts are always disallowed
-        if (!target_util.addrSpaceCastIsValid(mod.getTarget(), src_info.flags.address_space, dest_info.flags.address_space)) {
+        if (!target_util.addrSpaceCastIsValid(zcu.getTarget(), src_info.flags.address_space, dest_info.flags.address_space)) {
             return sema.failWithOwnedErrorMsg(block, msg: {
                 const msg = try sema.errMsg(src, "invalid address space cast", .{});
                 errdefer msg.destroy(sema.gpa);
@@ -23363,7 +23369,7 @@ fn ptrCastFull(
     }
 
     const ptr = if (src_info.flags.size == .Slice and dest_info.flags.size != .Slice) ptr: {
-        if (operand_ty.zigTypeTag(mod) == .Optional) {
+        if (operand_ty.zigTypeTag(zcu) == .Optional) {
             break :ptr try sema.analyzeOptionalSlicePtr(block, operand_src, operand, operand_ty);
         } else {
             break :ptr try sema.analyzeSlicePtr(block, operand_src, operand, operand_ty);
@@ -23375,7 +23381,7 @@ fn ptrCastFull(
         var info = dest_info;
         info.flags.size = .Many;
         const ty = try pt.ptrTypeSema(info);
-        if (dest_ty.zigTypeTag(mod) == .Optional) {
+        if (dest_ty.zigTypeTag(zcu) == .Optional) {
             break :blk try pt.optionalType(ty.toIntern());
         } else {
             break :blk ty;
@@ -23385,14 +23391,14 @@ fn ptrCastFull(
     // Cannot do @addrSpaceCast at comptime
     if (!flags.addrspace_cast) {
         if (try sema.resolveValue(ptr)) |ptr_val| {
-            if (!dest_ty.ptrAllowsZero(mod) and ptr_val.isUndef(mod)) {
+            if (!dest_ty.ptrAllowsZero(zcu) and ptr_val.isUndef(zcu)) {
                 return sema.failWithUseOfUndef(block, operand_src);
             }
-            if (!dest_ty.ptrAllowsZero(mod) and ptr_val.isNull(mod)) {
+            if (!dest_ty.ptrAllowsZero(zcu) and ptr_val.isNull(zcu)) {
                 return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(pt)});
             }
             if (dest_align.compare(.gt, src_align)) {
-                if (try ptr_val.getUnsignedIntAdvanced(pt, .sema)) |addr| {
+                if (try ptr_val.getUnsignedIntSema(pt)) |addr| {
                     if (!dest_align.check(addr)) {
                         return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{
                             addr,
@@ -23402,20 +23408,20 @@ fn ptrCastFull(
                 }
             }
             if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) {
-                if (ptr_val.isUndef(mod)) return pt.undefRef(dest_ty);
-                const arr_len = try pt.intValue(Type.usize, Type.fromInterned(src_info.child).arrayLen(mod));
-                const ptr_val_key = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr;
+                if (ptr_val.isUndef(zcu)) return pt.undefRef(dest_ty);
+                const arr_len = try pt.intValue(Type.usize, Type.fromInterned(src_info.child).arrayLen(zcu));
+                const ptr_val_key = zcu.intern_pool.indexToKey(ptr_val.toIntern()).ptr;
                 return Air.internedToRef((try pt.intern(.{ .slice = .{
                     .ty = dest_ty.toIntern(),
                     .ptr = try pt.intern(.{ .ptr = .{
-                        .ty = dest_ty.slicePtrFieldType(mod).toIntern(),
+                        .ty = dest_ty.slicePtrFieldType(zcu).toIntern(),
                         .base_addr = ptr_val_key.base_addr,
                         .byte_offset = ptr_val_key.byte_offset,
                     } }),
                     .len = arr_len.toIntern(),
                 } })));
             } else {
-                assert(dest_ptr_ty.eql(dest_ty, mod));
+                assert(dest_ptr_ty.eql(dest_ty, zcu));
                 return Air.internedToRef((try pt.getCoerced(ptr_val, dest_ty)).toIntern());
             }
         }
@@ -23424,8 +23430,8 @@ fn ptrCastFull(
     try sema.requireRuntimeBlock(block, src, null);
     try sema.validateRuntimeValue(block, operand_src, ptr);
 
-    if (block.wantSafety() and operand_ty.ptrAllowsZero(mod) and !dest_ty.ptrAllowsZero(mod) and
-        (try sema.typeHasRuntimeBits(Type.fromInterned(dest_info.child)) or Type.fromInterned(dest_info.child).zigTypeTag(mod) == .Fn))
+    if (block.wantSafety() and operand_ty.ptrAllowsZero(zcu) and !dest_ty.ptrAllowsZero(zcu) and
+        (try Type.fromInterned(dest_info.child).hasRuntimeBitsSema(pt) or Type.fromInterned(dest_info.child).zigTypeTag(zcu) == .Fn))
     {
         const ptr_int = try block.addUnOp(.int_from_ptr, ptr);
         const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize);
@@ -23439,7 +23445,7 @@ fn ptrCastFull(
 
     if (block.wantSafety() and
         dest_align.compare(.gt, src_align) and
-        try sema.typeHasRuntimeBits(Type.fromInterned(dest_info.child)))
+        try Type.fromInterned(dest_info.child).hasRuntimeBitsSema(pt))
     {
         const align_bytes_minus_1 = dest_align.toByteUnits().? - 1;
         const align_minus_1 = Air.internedToRef((try pt.intValue(Type.usize, align_bytes_minus_1)).toIntern());
@@ -23460,7 +23466,7 @@ fn ptrCastFull(
         var intermediate_info = src_info;
         intermediate_info.flags.address_space = dest_info.flags.address_space;
         const intermediate_ptr_ty = try pt.ptrTypeSema(intermediate_info);
-        const intermediate_ty = if (dest_ptr_ty.zigTypeTag(mod) == .Optional) blk: {
+        const intermediate_ty = if (dest_ptr_ty.zigTypeTag(zcu) == .Optional) blk: {
             break :blk try pt.optionalType(intermediate_ptr_ty.toIntern());
         } else intermediate_ptr_ty;
         const intermediate = try block.addInst(.{
@@ -23470,7 +23476,7 @@ fn ptrCastFull(
                 .operand = ptr,
             } },
         });
-        if (intermediate_ty.eql(dest_ptr_ty, mod)) {
+        if (intermediate_ty.eql(dest_ptr_ty, zcu)) {
             // We only changed the address space, so no need for a bitcast
             break :ptr intermediate;
         }
@@ -23482,7 +23488,7 @@ fn ptrCastFull(
     if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) {
         // We have to construct a slice using the operand's child's array length
         // Note that we know from the check at the start of the function that operand_ty is slice-like
-        const arr_len = Air.internedToRef((try pt.intValue(Type.usize, Type.fromInterned(src_info.child).arrayLen(mod))).toIntern());
+        const arr_len = Air.internedToRef((try pt.intValue(Type.usize, Type.fromInterned(src_info.child).arrayLen(zcu))).toIntern());
         return block.addInst(.{
             .tag = .slice,
             .data = .{ .ty_pl = .{
@@ -23494,7 +23500,7 @@ fn ptrCastFull(
             } },
         });
     } else {
-        assert(dest_ptr_ty.eql(dest_ty, mod));
+        assert(dest_ptr_ty.eql(dest_ty, zcu));
         try sema.checkKnownAllocPtr(block, operand, result_ptr);
         return result_ptr;
     }
@@ -23502,7 +23508,7 @@ fn ptrCastFull(
 
 fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?;
     const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(FlagsInt, @truncate(extended.small)));
     const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
@@ -23512,13 +23518,13 @@ fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
     const operand_ty = sema.typeOf(operand);
     try sema.checkPtrOperand(block, operand_src, operand_ty);
 
-    var ptr_info = operand_ty.ptrInfo(mod);
+    var ptr_info = operand_ty.ptrInfo(zcu);
     if (flags.const_cast) ptr_info.flags.is_const = false;
     if (flags.volatile_cast) ptr_info.flags.is_volatile = false;
 
     const dest_ty = blk: {
         const dest_ty = try pt.ptrTypeSema(ptr_info);
-        if (operand_ty.zigTypeTag(mod) == .Optional) {
+        if (operand_ty.zigTypeTag(zcu) == .Optional) {
             break :blk try pt.optionalType(dest_ty.toIntern());
         }
         break :blk dest_ty;
@@ -23536,7 +23542,7 @@ fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
 
 fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const src = block.nodeOffset(inst_data.src_node);
     const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
@@ -23547,24 +23553,24 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     const operand_ty = sema.typeOf(operand);
     const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
 
-    const operand_is_vector = operand_ty.zigTypeTag(mod) == .Vector;
-    const dest_is_vector = dest_ty.zigTypeTag(mod) == .Vector;
+    const operand_is_vector = operand_ty.zigTypeTag(zcu) == .Vector;
+    const dest_is_vector = dest_ty.zigTypeTag(zcu) == .Vector;
     if (operand_is_vector != dest_is_vector) {
         return sema.fail(block, operand_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(pt), operand_ty.fmt(pt) });
     }
 
-    if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
+    if (dest_scalar_ty.zigTypeTag(zcu) == .ComptimeInt) {
         return sema.coerce(block, dest_ty, operand, operand_src);
     }
 
-    const dest_info = dest_scalar_ty.intInfo(mod);
+    const dest_info = dest_scalar_ty.intInfo(zcu);
 
     if (try sema.typeHasOnePossibleValue(dest_ty)) |val| {
         return Air.internedToRef(val.toIntern());
     }
 
-    if (operand_scalar_ty.zigTypeTag(mod) != .ComptimeInt) {
-        const operand_info = operand_ty.intInfo(mod);
+    if (operand_scalar_ty.zigTypeTag(zcu) != .ComptimeInt) {
+        const operand_info = operand_ty.intInfo(zcu);
         if (try sema.typeHasOnePossibleValue(operand_ty)) |val| {
             return Air.internedToRef(val.toIntern());
         }
@@ -23595,14 +23601,14 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     }
 
     if (try sema.resolveValueIntable(operand)) |val| {
-        if (val.isUndef(mod)) return pt.undefRef(dest_ty);
+        if (val.isUndef(zcu)) return pt.undefRef(dest_ty);
         if (!dest_is_vector) {
             return Air.internedToRef((try pt.getCoerced(
                 try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, pt),
                 dest_ty,
             )).toIntern());
         }
-        const elems = try sema.arena.alloc(InternPool.Index, operand_ty.vectorLen(mod));
+        const elems = try sema.arena.alloc(InternPool.Index, operand_ty.vectorLen(zcu));
         for (elems, 0..) |*elem, i| {
             const elem_val = try val.elemValue(pt, i);
             const uncoerced_elem = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, pt);
@@ -23623,38 +23629,38 @@ fn zirBitCount(
     block: *Block,
     inst: Zir.Inst.Index,
     air_tag: Air.Inst.Tag,
-    comptime comptimeOp: fn (val: Value, ty: Type, pt: Zcu.PerThread) u64,
+    comptime comptimeOp: fn (val: Value, ty: Type, zcu: *Zcu) u64,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const src = block.nodeOffset(inst_data.src_node);
     const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
     const operand = try sema.resolveInst(inst_data.operand);
     const operand_ty = sema.typeOf(operand);
     _ = try sema.checkIntOrVector(block, operand, operand_src);
-    const bits = operand_ty.intInfo(mod).bits;
+    const bits = operand_ty.intInfo(zcu).bits;
 
     if (try sema.typeHasOnePossibleValue(operand_ty)) |val| {
         return Air.internedToRef(val.toIntern());
     }
 
     const result_scalar_ty = try pt.smallestUnsignedInt(bits);
-    switch (operand_ty.zigTypeTag(mod)) {
+    switch (operand_ty.zigTypeTag(zcu)) {
         .Vector => {
-            const vec_len = operand_ty.vectorLen(mod);
+            const vec_len = operand_ty.vectorLen(zcu);
             const result_ty = try pt.vectorType(.{
                 .len = vec_len,
                 .child = result_scalar_ty.toIntern(),
             });
             if (try sema.resolveValue(operand)) |val| {
-                if (val.isUndef(mod)) return pt.undefRef(result_ty);
+                if (val.isUndef(zcu)) return pt.undefRef(result_ty);
 
                 const elems = try sema.arena.alloc(InternPool.Index, vec_len);
-                const scalar_ty = operand_ty.scalarType(mod);
+                const scalar_ty = operand_ty.scalarType(zcu);
                 for (elems, 0..) |*elem, i| {
                     const elem_val = try val.elemValue(pt, i);
-                    const count = comptimeOp(elem_val, scalar_ty, pt);
+                    const count = comptimeOp(elem_val, scalar_ty, zcu);
                     elem.* = (try pt.intValue(result_scalar_ty, count)).toIntern();
                 }
                 return Air.internedToRef((try pt.intern(.{ .aggregate = .{
@@ -23668,8 +23674,8 @@ fn zirBitCount(
         },
         .Int => {
             if (try sema.resolveValueResolveLazy(operand)) |val| {
-                if (val.isUndef(mod)) return pt.undefRef(result_scalar_ty);
-                return pt.intRef(result_scalar_ty, comptimeOp(val, operand_ty, pt));
+                if (val.isUndef(zcu)) return pt.undefRef(result_scalar_ty);
+                return pt.intRef(result_scalar_ty, comptimeOp(val, operand_ty, zcu));
             } else {
                 try sema.requireRuntimeBlock(block, src, operand_src);
                 return block.addTyOp(air_tag, result_scalar_ty, operand);
@@ -23681,14 +23687,14 @@ fn zirBitCount(
 
 fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const src = block.nodeOffset(inst_data.src_node);
     const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
     const operand = try sema.resolveInst(inst_data.operand);
     const operand_ty = sema.typeOf(operand);
     const scalar_ty = try sema.checkIntOrVector(block, operand, operand_src);
-    const bits = scalar_ty.intInfo(mod).bits;
+    const bits = scalar_ty.intInfo(zcu).bits;
     if (bits % 8 != 0) {
         return sema.fail(
             block,
@@ -23702,10 +23708,10 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
         return Air.internedToRef(val.toIntern());
     }
 
-    switch (operand_ty.zigTypeTag(mod)) {
+    switch (operand_ty.zigTypeTag(zcu)) {
         .Int => {
             const runtime_src = if (try sema.resolveValue(operand)) |val| {
-                if (val.isUndef(mod)) return pt.undefRef(operand_ty);
+                if (val.isUndef(zcu)) return pt.undefRef(operand_ty);
                 const result_val = try val.byteSwap(operand_ty, pt, sema.arena);
                 return Air.internedToRef(result_val.toIntern());
             } else operand_src;
@@ -23715,10 +23721,10 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
         },
         .Vector => {
             const runtime_src = if (try sema.resolveValue(operand)) |val| {
-                if (val.isUndef(mod))
+                if (val.isUndef(zcu))
                     return pt.undefRef(operand_ty);
 
-                const vec_len = operand_ty.vectorLen(mod);
+                const vec_len = operand_ty.vectorLen(zcu);
                 const elems = try sema.arena.alloc(InternPool.Index, vec_len);
                 for (elems, 0..) |*elem, i| {
                     const elem_val = try val.elemValue(pt, i);
@@ -23750,11 +23756,11 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
     }
 
     const pt = sema.pt;
-    const mod = pt.zcu;
-    switch (operand_ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (operand_ty.zigTypeTag(zcu)) {
         .Int => {
             const runtime_src = if (try sema.resolveValue(operand)) |val| {
-                if (val.isUndef(mod)) return pt.undefRef(operand_ty);
+                if (val.isUndef(zcu)) return pt.undefRef(operand_ty);
                 const result_val = try val.bitReverse(operand_ty, pt, sema.arena);
                 return Air.internedToRef(result_val.toIntern());
             } else operand_src;
@@ -23764,10 +23770,10 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
         },
         .Vector => {
             const runtime_src = if (try sema.resolveValue(operand)) |val| {
-                if (val.isUndef(mod))
+                if (val.isUndef(zcu))
                     return pt.undefRef(operand_ty);
 
-                const vec_len = operand_ty.vectorLen(mod);
+                const vec_len = operand_ty.vectorLen(zcu);
                 const elems = try sema.arena.alloc(InternPool.Index, vec_len);
                 for (elems, 0..) |*elem, i| {
                     const elem_val = try val.elemValue(pt, i);
@@ -23810,26 +23816,26 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
     });
 
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     try ty.resolveLayout(pt);
-    switch (ty.zigTypeTag(mod)) {
+    switch (ty.zigTypeTag(zcu)) {
         .Struct => {},
         else => return sema.fail(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(pt)}),
     }
 
-    const field_index = if (ty.isTuple(mod)) blk: {
+    const field_index = if (ty.isTuple(zcu)) blk: {
         if (field_name.eqlSlice("len", ip)) {
             return sema.fail(block, src, "no offset available for 'len' field of tuple", .{});
         }
         break :blk try sema.tupleFieldIndex(block, ty, field_name, rhs_src);
     } else try sema.structFieldIndex(block, ty, field_name, rhs_src);
 
-    if (ty.structFieldIsComptime(field_index, mod)) {
+    if (ty.structFieldIsComptime(field_index, zcu)) {
         return sema.fail(block, src, "no offset available for comptime field", .{});
     }
 
-    switch (ty.containerLayout(mod)) {
+    switch (ty.containerLayout(zcu)) {
         .@"packed" => {
             var bit_sum: u64 = 0;
             const struct_type = ip.loadStructType(ty.toIntern());
@@ -23838,17 +23844,17 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
                     return bit_sum;
                 }
                 const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
-                bit_sum += field_ty.bitSize(pt);
+                bit_sum += field_ty.bitSize(zcu);
             } else unreachable;
         },
-        else => return ty.structFieldOffset(field_index, pt) * 8,
+        else => return ty.structFieldOffset(field_index, zcu) * 8,
     }
 }
 
 fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (ty.zigTypeTag(zcu)) {
         .Struct, .Enum, .Union, .Opaque => return,
         else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(pt)}),
     }
@@ -23857,8 +23863,8 @@ fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) Com
 /// Returns `true` if the type was a comptime_int.
 fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    switch (try ty.zigTypeTagOrPoison(mod)) {
+    const zcu = pt.zcu;
+    switch (try ty.zigTypeTagOrPoison(zcu)) {
         .ComptimeInt => return true,
         .Int => return false,
         else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(pt)}),
@@ -23872,9 +23878,9 @@ fn checkInvalidPtrIntArithmetic(
     ty: Type,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    switch (try ty.zigTypeTagOrPoison(mod)) {
-        .Pointer => switch (ty.ptrSize(mod)) {
+    const zcu = pt.zcu;
+    switch (try ty.zigTypeTagOrPoison(zcu)) {
+        .Pointer => switch (ty.ptrSize(zcu)) {
             .One, .Slice => return,
             .Many, .C => return sema.failWithInvalidPtrArithmetic(block, src, "pointer-integer", "addition and subtraction"),
         },
@@ -23908,8 +23914,8 @@ fn checkPtrOperand(
     ty: Type,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (ty.zigTypeTag(zcu)) {
         .Pointer => return,
         .Fn => {
             const msg = msg: {
@@ -23926,7 +23932,7 @@ fn checkPtrOperand(
             };
             return sema.failWithOwnedErrorMsg(block, msg);
         },
-        .Optional => if (ty.childType(mod).zigTypeTag(mod) == .Pointer) return,
+        .Optional => if (ty.childType(zcu).zigTypeTag(zcu) == .Pointer) return,
         else => {},
     }
     return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(pt)});
@@ -23940,9 +23946,9 @@ fn checkPtrType(
     allow_slice: bool,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    switch (ty.zigTypeTag(mod)) {
-        .Pointer => if (allow_slice or !ty.isSlice(mod)) return,
+    const zcu = pt.zcu;
+    switch (ty.zigTypeTag(zcu)) {
+        .Pointer => if (allow_slice or !ty.isSlice(zcu)) return,
         .Fn => {
             const msg = msg: {
                 const msg = try sema.errMsg(
@@ -23958,7 +23964,7 @@ fn checkPtrType(
             };
             return sema.failWithOwnedErrorMsg(block, msg);
         },
-        .Optional => if (ty.childType(mod).zigTypeTag(mod) == .Pointer) return,
+        .Optional => if (ty.childType(zcu).zigTypeTag(zcu) == .Pointer) return,
         else => {},
     }
     return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(pt)});
@@ -23971,10 +23977,10 @@ fn checkVectorElemType(
     ty: Type,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (ty.zigTypeTag(zcu)) {
         .Int, .Float, .Bool => return,
-        .Optional, .Pointer => if (ty.isPtrAtRuntime(mod)) return,
+        .Optional, .Pointer => if (ty.isPtrAtRuntime(zcu)) return,
         else => {},
     }
     return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(pt)});
@@ -23987,8 +23993,8 @@ fn checkFloatType(
     ty: Type,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (ty.zigTypeTag(zcu)) {
         .ComptimeInt, .ComptimeFloat, .Float => {},
         else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(pt)}),
     }
@@ -24001,10 +24007,10 @@ fn checkNumericType(
     ty: Type,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (ty.zigTypeTag(zcu)) {
         .ComptimeFloat, .Float, .ComptimeInt, .Int => {},
-        .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
+        .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
             .ComptimeFloat, .Float, .ComptimeInt, .Int => {},
             else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}),
         },
@@ -24023,9 +24029,9 @@ fn checkAtomicPtrOperand(
     ptr_const: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    var diag: Module.AtomicPtrAlignmentDiagnostics = .{};
-    const alignment = mod.atomicPtrAlignment(elem_ty, &diag) catch |err| switch (err) {
+    const zcu = pt.zcu;
+    var diag: Zcu.AtomicPtrAlignmentDiagnostics = .{};
+    const alignment = zcu.atomicPtrAlignment(elem_ty, &diag) catch |err| switch (err) {
         error.OutOfMemory => return error.OutOfMemory,
         error.FloatTooBig => return sema.fail(
             block,
@@ -24056,8 +24062,8 @@ fn checkAtomicPtrOperand(
     };
 
     const ptr_ty = sema.typeOf(ptr);
-    const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) {
-        .Pointer => ptr_ty.ptrInfo(mod),
+    const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(zcu)) {
+        .Pointer => ptr_ty.ptrInfo(zcu),
         else => {
             const wanted_ptr_ty = try pt.ptrTypeSema(wanted_ptr_data);
             _ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src);
@@ -24095,13 +24101,13 @@ fn checkIntOrVector(
     operand_src: LazySrcLoc,
 ) CompileError!Type {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const operand_ty = sema.typeOf(operand);
-    switch (try operand_ty.zigTypeTagOrPoison(mod)) {
+    switch (try operand_ty.zigTypeTagOrPoison(zcu)) {
         .Int => return operand_ty,
         .Vector => {
-            const elem_ty = operand_ty.childType(mod);
-            switch (try elem_ty.zigTypeTagOrPoison(mod)) {
+            const elem_ty = operand_ty.childType(zcu);
+            switch (try elem_ty.zigTypeTagOrPoison(zcu)) {
                 .Int => return elem_ty,
                 else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{
                     elem_ty.fmt(pt),
@@ -24121,12 +24127,12 @@ fn checkIntOrVectorAllowComptime(
     operand_src: LazySrcLoc,
 ) CompileError!Type {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    switch (try operand_ty.zigTypeTagOrPoison(mod)) {
+    const zcu = pt.zcu;
+    switch (try operand_ty.zigTypeTagOrPoison(zcu)) {
         .Int, .ComptimeInt => return operand_ty,
         .Vector => {
-            const elem_ty = operand_ty.childType(mod);
-            switch (try elem_ty.zigTypeTagOrPoison(mod)) {
+            const elem_ty = operand_ty.childType(zcu);
+            switch (try elem_ty.zigTypeTagOrPoison(zcu)) {
                 .Int, .ComptimeInt => return elem_ty,
                 else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{
                     elem_ty.fmt(pt),
@@ -24162,12 +24168,12 @@ fn checkSimdBinOp(
     rhs_src: LazySrcLoc,
 ) CompileError!SimdBinOp {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const lhs_ty = sema.typeOf(uncasted_lhs);
     const rhs_ty = sema.typeOf(uncasted_rhs);
 
     try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
-    const vec_len: ?usize = if (lhs_ty.zigTypeTag(mod) == .Vector) lhs_ty.vectorLen(mod) else null;
+    const vec_len: ?usize = if (lhs_ty.zigTypeTag(zcu) == .Vector) lhs_ty.vectorLen(zcu) else null;
     const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{
         .override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
     });
@@ -24181,7 +24187,7 @@ fn checkSimdBinOp(
         .lhs_val = try sema.resolveValue(lhs),
         .rhs_val = try sema.resolveValue(rhs),
         .result_ty = result_ty,
-        .scalar_ty = result_ty.scalarType(mod),
+        .scalar_ty = result_ty.scalarType(zcu),
     };
 }
 
@@ -24195,9 +24201,9 @@ fn checkVectorizableBinaryOperands(
     rhs_src: LazySrcLoc,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
-    const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
+    const zcu = pt.zcu;
+    const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(zcu);
+    const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(zcu);
     if (lhs_zig_ty_tag != .Vector and rhs_zig_ty_tag != .Vector) return;
 
     const lhs_is_vector = switch (lhs_zig_ty_tag) {
@@ -24210,8 +24216,8 @@ fn checkVectorizableBinaryOperands(
     };
 
     if (lhs_is_vector and rhs_is_vector) {
-        const lhs_len = lhs_ty.arrayLen(mod);
-        const rhs_len = rhs_ty.arrayLen(mod);
+        const lhs_len = lhs_ty.arrayLen(zcu);
+        const rhs_len = rhs_ty.arrayLen(zcu);
         if (lhs_len != rhs_len) {
             const msg = msg: {
                 const msg = try sema.errMsg(src, "vector length mismatch", .{});
@@ -24246,11 +24252,11 @@ fn resolveExportOptions(
     block: *Block,
     src: LazySrcLoc,
     zir_ref: Zir.Inst.Ref,
-) CompileError!Module.Export.Options {
+) CompileError!Zcu.Export.Options {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
     const export_options_ty = try pt.getBuiltinType("ExportOptions");
     const air_ref = try sema.resolveInst(zir_ref);
     const options = try sema.coerce(block, export_options_ty, air_ref, src);
@@ -24269,13 +24275,13 @@ fn resolveExportOptions(
     const linkage_val = try sema.resolveConstDefinedValue(block, linkage_src, linkage_operand, .{
         .needed_comptime_reason = "linkage of exported value must be comptime-known",
     });
-    const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val);
+    const linkage = zcu.toEnum(std.builtin.GlobalLinkage, linkage_val);
 
     const section_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "section", .no_embedded_nulls), section_src);
     const section_opt_val = try sema.resolveConstDefinedValue(block, section_src, section_operand, .{
         .needed_comptime_reason = "linksection of exported value must be comptime-known",
     });
-    const section = if (section_opt_val.optionalValue(mod)) |section_val|
+    const section = if (section_opt_val.optionalValue(zcu)) |section_val|
         try sema.toConstString(block, section_src, Air.internedToRef(section_val.toIntern()), .{
             .needed_comptime_reason = "linksection of exported value must be comptime-known",
         })
@@ -24286,7 +24292,7 @@ fn resolveExportOptions(
     const visibility_val = try sema.resolveConstDefinedValue(block, visibility_src, visibility_operand, .{
         .needed_comptime_reason = "visibility of exported value must be comptime-known",
     });
-    const visibility = mod.toEnum(std.builtin.SymbolVisibility, visibility_val);
+    const visibility = zcu.toEnum(std.builtin.SymbolVisibility, visibility_val);
 
     if (name.len < 1) {
         return sema.fail(block, name_src, "exported symbol name cannot be empty", .{});
@@ -24349,7 +24355,7 @@ fn zirCmpxchg(
     extended: Zir.Inst.Extended.InstData,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const extra = sema.code.extraData(Zir.Inst.Cmpxchg, extended.operand).data;
     const air_tag: Air.Inst.Tag = switch (extended.small) {
         0 => .cmpxchg_weak,
@@ -24367,7 +24373,7 @@ fn zirCmpxchg(
     // zig fmt: on
     const expected_value = try sema.resolveInst(extra.expected_value);
     const elem_ty = sema.typeOf(expected_value);
-    if (elem_ty.zigTypeTag(mod) == .Float) {
+    if (elem_ty.zigTypeTag(zcu) == .Float) {
         return sema.fail(
             block,
             elem_ty_src,
@@ -24411,7 +24417,7 @@ fn zirCmpxchg(
     const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
         if (try sema.resolveValue(expected_value)) |expected_val| {
             if (try sema.resolveValue(new_value)) |new_val| {
-                if (expected_val.isUndef(mod) or new_val.isUndef(mod)) {
+                if (expected_val.isUndef(zcu) or new_val.isUndef(zcu)) {
                     // TODO: this should probably cause the memory stored at the pointer
                     // to become undef as well
                     return pt.undefRef(result_ty);
@@ -24420,7 +24426,7 @@ fn zirCmpxchg(
                 const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src;
                 const result_val = try pt.intern(.{ .opt = .{
                     .ty = result_ty.toIntern(),
-                    .val = if (stored_val.eql(expected_val, elem_ty, mod)) blk: {
+                    .val = if (stored_val.eql(expected_val, elem_ty, zcu)) blk: {
                         try sema.storePtr(block, src, ptr, new_value);
                         break :blk .none;
                     } else stored_val.toIntern(),
@@ -24450,16 +24456,16 @@ fn zirCmpxchg(
 
 fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
     const src = block.nodeOffset(inst_data.src_node);
     const scalar_src = block.builtinCallArgSrc(inst_data.src_node, 0);
     const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@splat");
 
-    if (!dest_ty.isVector(mod)) return sema.fail(block, src, "expected vector type, found '{}'", .{dest_ty.fmt(pt)});
+    if (!dest_ty.isVector(zcu)) return sema.fail(block, src, "expected vector type, found '{}'", .{dest_ty.fmt(pt)});
 
-    if (!dest_ty.hasRuntimeBits(pt)) {
+    if (!dest_ty.hasRuntimeBits(zcu)) {
         const empty_aggregate = try pt.intern(.{ .aggregate = .{
             .ty = dest_ty.toIntern(),
             .storage = .{ .elems = &[_]InternPool.Index{} },
@@ -24468,10 +24474,10 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
     }
 
     const operand = try sema.resolveInst(extra.rhs);
-    const scalar_ty = dest_ty.childType(mod);
+    const scalar_ty = dest_ty.childType(zcu);
     const scalar = try sema.coerce(block, scalar_ty, operand, scalar_src);
     if (try sema.resolveValue(scalar)) |scalar_val| {
-        if (scalar_val.isUndef(mod)) return pt.undefRef(dest_ty);
+        if (scalar_val.isUndef(zcu)) return pt.undefRef(dest_ty);
         return Air.internedToRef((try sema.splat(dest_ty, scalar_val)).toIntern());
     }
 
@@ -24490,23 +24496,23 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
     const operand = try sema.resolveInst(extra.rhs);
     const operand_ty = sema.typeOf(operand);
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
-    if (operand_ty.zigTypeTag(mod) != .Vector) {
+    if (operand_ty.zigTypeTag(zcu) != .Vector) {
         return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(pt)});
     }
 
-    const scalar_ty = operand_ty.childType(mod);
+    const scalar_ty = operand_ty.childType(zcu);
 
     // Type-check depending on operation.
     switch (operation) {
-        .And, .Or, .Xor => switch (scalar_ty.zigTypeTag(mod)) {
+        .And, .Or, .Xor => switch (scalar_ty.zigTypeTag(zcu)) {
             .Int, .Bool => {},
             else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or boolean operand; found '{}'", .{
                 @tagName(operation), operand_ty.fmt(pt),
             }),
         },
-        .Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag(mod)) {
+        .Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag(zcu)) {
             .Int, .Float => {},
             else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or float operand; found '{}'", .{
                 @tagName(operation), operand_ty.fmt(pt),
@@ -24514,7 +24520,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
         },
     }
 
-    const vec_len = operand_ty.vectorLen(mod);
+    const vec_len = operand_ty.vectorLen(zcu);
     if (vec_len == 0) {
         // TODO re-evaluate if we should introduce a "neutral value" for some operations,
         // e.g. zero for add and one for mul.
@@ -24522,7 +24528,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
     }
 
     if (try sema.resolveValue(operand)) |operand_val| {
-        if (operand_val.isUndef(mod)) return pt.undefRef(scalar_ty);
+        if (operand_val.isUndef(zcu)) return pt.undefRef(scalar_ty);
 
         var accum: Value = try operand_val.elemValue(pt, 0);
         var i: u32 = 1;
@@ -24532,8 +24538,8 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
                 .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, pt),
                 .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, pt),
                 .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, pt),
-                .Min => accum = accum.numberMin(elem_val, pt),
-                .Max => accum = accum.numberMax(elem_val, pt),
+                .Min => accum = accum.numberMin(elem_val, zcu),
+                .Max => accum = accum.numberMax(elem_val, zcu),
                 .Add => accum = try sema.numberAddWrapScalar(accum, elem_val, scalar_ty),
                 .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, pt),
             }
@@ -24553,7 +24559,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
 
 fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const extra = sema.code.extraData(Zir.Inst.Shuffle, inst_data.payload_index).data;
     const elem_ty_src = block.builtinCallArgSrc(inst_data.src_node, 0);
@@ -24566,8 +24572,8 @@ fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
     var mask = try sema.resolveInst(extra.mask);
     var mask_ty = sema.typeOf(mask);
 
-    const mask_len = switch (sema.typeOf(mask).zigTypeTag(mod)) {
-        .Array, .Vector => sema.typeOf(mask).arrayLen(mod),
+    const mask_len = switch (sema.typeOf(mask).zigTypeTag(zcu)) {
+        .Array, .Vector => sema.typeOf(mask).arrayLen(zcu),
         else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(pt)}),
     };
     mask_ty = try pt.vectorType(.{
@@ -24592,6 +24598,7 @@ fn analyzeShuffle(
     mask_len: u32,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
+    const zcu = pt.zcu;
     const a_src = block.builtinCallArgSrc(src_node, 1);
     const b_src = block.builtinCallArgSrc(src_node, 2);
     const mask_src = block.builtinCallArgSrc(src_node, 3);
@@ -24603,16 +24610,16 @@ fn analyzeShuffle(
         .child = elem_ty.toIntern(),
     });
 
-    const maybe_a_len = switch (sema.typeOf(a).zigTypeTag(pt.zcu)) {
-        .Array, .Vector => sema.typeOf(a).arrayLen(pt.zcu),
+    const maybe_a_len = switch (sema.typeOf(a).zigTypeTag(zcu)) {
+        .Array, .Vector => sema.typeOf(a).arrayLen(zcu),
         .Undefined => null,
         else => return sema.fail(block, a_src, "expected vector or array with element type '{}', found '{}'", .{
             elem_ty.fmt(pt),
             sema.typeOf(a).fmt(pt),
         }),
     };
-    const maybe_b_len = switch (sema.typeOf(b).zigTypeTag(pt.zcu)) {
-        .Array, .Vector => sema.typeOf(b).arrayLen(pt.zcu),
+    const maybe_b_len = switch (sema.typeOf(b).zigTypeTag(zcu)) {
+        .Array, .Vector => sema.typeOf(b).arrayLen(zcu),
         .Undefined => null,
         else => return sema.fail(block, b_src, "expected vector or array with element type '{}', found '{}'", .{
             elem_ty.fmt(pt),
@@ -24644,9 +24651,9 @@ fn analyzeShuffle(
 
     for (0..@intCast(mask_len)) |i| {
         const elem = try mask.elemValue(pt, i);
-        if (elem.isUndef(pt.zcu)) continue;
+        if (elem.isUndef(zcu)) continue;
         const elem_resolved = try sema.resolveLazyValue(elem);
-        const int = elem_resolved.toSignedInt(pt);
+        const int = elem_resolved.toSignedInt(zcu);
         var unsigned: u32 = undefined;
         var chosen: u32 = undefined;
         if (int >= 0) {
@@ -24681,11 +24688,11 @@ fn analyzeShuffle(
             const values = try sema.arena.alloc(InternPool.Index, mask_len);
             for (values, 0..) |*value, i| {
                 const mask_elem_val = try mask.elemValue(pt, i);
-                if (mask_elem_val.isUndef(pt.zcu)) {
+                if (mask_elem_val.isUndef(zcu)) {
                     value.* = try pt.intern(.{ .undef = elem_ty.toIntern() });
                     continue;
                 }
-                const int = mask_elem_val.toSignedInt(pt);
+                const int = mask_elem_val.toSignedInt(zcu);
                 const unsigned: u32 = @intCast(if (int >= 0) int else ~int);
                 values[i] = (try (if (int >= 0) a_val else b_val).elemValue(pt, unsigned)).toIntern();
             }
@@ -24743,7 +24750,7 @@ fn analyzeShuffle(
 
 fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const extra = sema.code.extraData(Zir.Inst.Select, extended.operand).data;
 
     const src = block.nodeOffset(extra.node);
@@ -24757,8 +24764,8 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C
     const pred_uncoerced = try sema.resolveInst(extra.pred);
     const pred_ty = sema.typeOf(pred_uncoerced);
 
-    const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(mod)) {
-        .Vector, .Array => pred_ty.arrayLen(mod),
+    const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(zcu)) {
+        .Vector, .Array => pred_ty.arrayLen(zcu),
         else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(pt)}),
     };
     const vec_len: u32 = @intCast(try sema.usizeCast(block, pred_src, vec_len_u64));
@@ -24781,13 +24788,13 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C
     const maybe_b = try sema.resolveValue(b);
 
     const runtime_src = if (maybe_pred) |pred_val| rs: {
-        if (pred_val.isUndef(mod)) return pt.undefRef(vec_ty);
+        if (pred_val.isUndef(zcu)) return pt.undefRef(vec_ty);
 
         if (maybe_a) |a_val| {
-            if (a_val.isUndef(mod)) return pt.undefRef(vec_ty);
+            if (a_val.isUndef(zcu)) return pt.undefRef(vec_ty);
 
             if (maybe_b) |b_val| {
-                if (b_val.isUndef(mod)) return pt.undefRef(vec_ty);
+                if (b_val.isUndef(zcu)) return pt.undefRef(vec_ty);
 
                 const elems = try sema.gpa.alloc(InternPool.Index, vec_len);
                 defer sema.gpa.free(elems);
@@ -24806,16 +24813,16 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C
             }
         } else {
             if (maybe_b) |b_val| {
-                if (b_val.isUndef(mod)) return pt.undefRef(vec_ty);
+                if (b_val.isUndef(zcu)) return pt.undefRef(vec_ty);
             }
             break :rs a_src;
         }
     } else rs: {
         if (maybe_a) |a_val| {
-            if (a_val.isUndef(mod)) return pt.undefRef(vec_ty);
+            if (a_val.isUndef(zcu)) return pt.undefRef(vec_ty);
         }
         if (maybe_b) |b_val| {
-            if (b_val.isUndef(mod)) return pt.undefRef(vec_ty);
+            if (b_val.isUndef(zcu)) return pt.undefRef(vec_ty);
         }
         break :rs pred_src;
     };
@@ -24882,7 +24889,7 @@ fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
 
 fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const extra = sema.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data;
     const src = block.nodeOffset(inst_data.src_node);
@@ -24899,7 +24906,7 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
     const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false);
     const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation);
 
-    switch (elem_ty.zigTypeTag(mod)) {
+    switch (elem_ty.zigTypeTag(zcu)) {
         .Enum => if (op != .Xchg) {
             return sema.fail(block, op_src, "@atomicRmw with enum only allowed with .Xchg", .{});
         },
@@ -24939,12 +24946,12 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
                 .Xchg => operand_val,
                 .Add  => try sema.numberAddWrapScalar(stored_val, operand_val, elem_ty),
                 .Sub  => try sema.numberSubWrapScalar(stored_val, operand_val, elem_ty),
-                .And  => try                   stored_val.bitwiseAnd   (operand_val, elem_ty, sema.arena, pt),
-                .Nand => try                   stored_val.bitwiseNand  (operand_val, elem_ty, sema.arena, pt),
-                .Or   => try                   stored_val.bitwiseOr    (operand_val, elem_ty, sema.arena, pt),
-                .Xor  => try                   stored_val.bitwiseXor   (operand_val, elem_ty, sema.arena, pt),
-                .Max  =>                       stored_val.numberMax    (operand_val,                      pt),
-                .Min  =>                       stored_val.numberMin    (operand_val,                      pt),
+                .And  => try                   stored_val.bitwiseAnd   (operand_val, elem_ty, sema.arena, pt ),
+                .Nand => try                   stored_val.bitwiseNand  (operand_val, elem_ty, sema.arena, pt ),
+                .Or   => try                   stored_val.bitwiseOr    (operand_val, elem_ty, sema.arena, pt ),
+                .Xor  => try                   stored_val.bitwiseXor   (operand_val, elem_ty, sema.arena, pt ),
+                .Max  =>                       stored_val.numberMax    (operand_val,                      zcu),
+                .Min  =>                       stored_val.numberMin    (operand_val,                      zcu),
                 // zig fmt: on
             };
             try sema.storePtrVal(block, src, ptr_val, new_val, elem_ty);
@@ -25021,19 +25028,19 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
     const maybe_mulend2 = try sema.resolveValue(mulend2);
     const maybe_addend = try sema.resolveValue(addend);
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
-    switch (ty.scalarType(mod).zigTypeTag(mod)) {
+    switch (ty.scalarType(zcu).zigTypeTag(zcu)) {
         .ComptimeFloat, .Float => {},
         else => return sema.fail(block, src, "expected vector of floats or float type, found '{}'", .{ty.fmt(pt)}),
     }
 
     const runtime_src = if (maybe_mulend1) |mulend1_val| rs: {
         if (maybe_mulend2) |mulend2_val| {
-            if (mulend2_val.isUndef(mod)) return pt.undefRef(ty);
+            if (mulend2_val.isUndef(zcu)) return pt.undefRef(ty);
 
             if (maybe_addend) |addend_val| {
-                if (addend_val.isUndef(mod)) return pt.undefRef(ty);
+                if (addend_val.isUndef(zcu)) return pt.undefRef(ty);
                 const result_val = try Value.mulAdd(ty, mulend1_val, mulend2_val, addend_val, sema.arena, pt);
                 return Air.internedToRef(result_val.toIntern());
             } else {
@@ -25041,16 +25048,16 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
             }
         } else {
             if (maybe_addend) |addend_val| {
-                if (addend_val.isUndef(mod)) return pt.undefRef(ty);
+                if (addend_val.isUndef(zcu)) return pt.undefRef(ty);
             }
             break :rs mulend2_src;
         }
     } else rs: {
         if (maybe_mulend2) |mulend2_val| {
-            if (mulend2_val.isUndef(mod)) return pt.undefRef(ty);
+            if (mulend2_val.isUndef(zcu)) return pt.undefRef(ty);
         }
         if (maybe_addend) |addend_val| {
-            if (addend_val.isUndef(mod)) return pt.undefRef(ty);
+            if (addend_val.isUndef(zcu)) return pt.undefRef(ty);
         }
         break :rs mulend1_src;
     };
@@ -25073,7 +25080,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const modifier_src = block.builtinCallArgSrc(inst_data.src_node, 0);
     const func_src = block.builtinCallArgSrc(inst_data.src_node, 1);
@@ -25089,7 +25096,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
     const modifier_val = try sema.resolveConstDefinedValue(block, modifier_src, modifier_ref, .{
         .needed_comptime_reason = "call modifier must be comptime-known",
     });
-    var modifier = mod.toEnum(std.builtin.CallModifier, modifier_val);
+    var modifier = zcu.toEnum(std.builtin.CallModifier, modifier_val);
     switch (modifier) {
         // These can be upgraded to comptime or nosuspend calls.
         .auto, .never_tail, .no_async => {
@@ -25135,11 +25142,11 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
     const args = try sema.resolveInst(extra.args);
 
     const args_ty = sema.typeOf(args);
-    if (!args_ty.isTuple(mod) and args_ty.toIntern() != .empty_struct_type) {
+    if (!args_ty.isTuple(zcu) and args_ty.toIntern() != .empty_struct_type) {
         return sema.fail(block, args_src, "expected a tuple, found '{}'", .{args_ty.fmt(pt)});
     }
 
-    const resolved_args: []Air.Inst.Ref = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount(mod));
+    const resolved_args: []Air.Inst.Ref = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount(zcu));
     for (resolved_args, 0..) |*resolved, i| {
         resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @intCast(i), args_ty);
     }
@@ -25219,7 +25226,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
     var actual_parent_ptr_info: InternPool.Key.PtrType = .{
         .child = parent_ty.toIntern(),
         .flags = .{
-            .alignment = try parent_ptr_ty.ptrAlignmentAdvanced(pt, .sema),
+            .alignment = try parent_ptr_ty.ptrAlignmentSema(pt),
             .is_const = field_ptr_info.flags.is_const,
             .is_volatile = field_ptr_info.flags.is_volatile,
             .is_allowzero = field_ptr_info.flags.is_allowzero,
@@ -25231,7 +25238,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
     var actual_field_ptr_info: InternPool.Key.PtrType = .{
         .child = field_ty.toIntern(),
         .flags = .{
-            .alignment = try field_ptr_ty.ptrAlignmentAdvanced(pt, .sema),
+            .alignment = try field_ptr_ty.ptrAlignmentSema(pt),
             .is_const = field_ptr_info.flags.is_const,
             .is_volatile = field_ptr_info.flags.is_volatile,
             .is_allowzero = field_ptr_info.flags.is_allowzero,
@@ -25242,13 +25249,20 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
     switch (parent_ty.containerLayout(zcu)) {
         .auto => {
             actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict(
-                if (zcu.typeToStruct(parent_ty)) |struct_obj| try pt.structFieldAlignmentAdvanced(
+                if (zcu.typeToStruct(parent_ty)) |struct_obj| try field_ty.structFieldAlignmentAdvanced(
                     struct_obj.fieldAlign(ip, field_index),
-                    field_ty,
                     struct_obj.layout,
                     .sema,
+                    pt.zcu,
+                    pt.tid,
                 ) else if (zcu.typeToUnion(parent_ty)) |union_obj|
-                    try pt.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema)
+                    try Type.unionFieldNormalAlignmentAdvanced(
+                        union_obj,
+                        field_index,
+                        .sema,
+                        pt.zcu,
+                        pt.tid,
+                    )
                 else
                     actual_field_ptr_info.flags.alignment,
             );
@@ -25257,7 +25271,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
             actual_field_ptr_info.packed_offset = .{ .bit_offset = 0, .host_size = 0 };
         },
         .@"extern" => {
-            const field_offset = parent_ty.structFieldOffset(field_index, pt);
+            const field_offset = parent_ty.structFieldOffset(field_index, zcu);
             actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict(if (field_offset > 0)
                 Alignment.fromLog2Units(@ctz(field_offset))
             else
@@ -25287,7 +25301,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
             .Struct => switch (parent_ty.containerLayout(zcu)) {
                 .auto => {},
                 .@"extern" => {
-                    const byte_offset = parent_ty.structFieldOffset(field_index, pt);
+                    const byte_offset = parent_ty.structFieldOffset(field_index, zcu);
                     const parent_ptr_val = try sema.ptrSubtract(block, field_ptr_src, field_ptr_val, byte_offset, actual_parent_ptr_ty);
                     break :result Air.internedToRef(parent_ptr_val.toIntern());
                 },
@@ -25428,7 +25442,7 @@ fn analyzeMinMax(
     assert(operands.len == operand_srcs.len);
     assert(operands.len > 0);
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
     if (operands.len == 1) return operands[0];
 
@@ -25466,20 +25480,20 @@ fn analyzeMinMax(
         switch (bounds_status) {
             .unknown, .defined => refine_bounds: {
                 const ty = sema.typeOf(operand);
-                if (!ty.scalarType(mod).isInt(mod) and !ty.scalarType(mod).eql(Type.comptime_int, mod)) {
+                if (!ty.scalarType(zcu).isInt(zcu) and !ty.scalarType(zcu).eql(Type.comptime_int, zcu)) {
                     bounds_status = .non_integral;
                     break :refine_bounds;
                 }
                 const scalar_bounds: ?[2]Value = bounds: {
-                    if (!ty.isVector(mod)) break :bounds try uncoerced_val.intValueBounds(pt);
+                    if (!ty.isVector(zcu)) break :bounds try uncoerced_val.intValueBounds(pt);
                     var cur_bounds: [2]Value = try Value.intValueBounds(try uncoerced_val.elemValue(pt, 0), pt) orelse break :bounds null;
-                    const len = try sema.usizeCast(block, src, ty.vectorLen(mod));
+                    const len = try sema.usizeCast(block, src, ty.vectorLen(zcu));
                     for (1..len) |i| {
                         const elem = try uncoerced_val.elemValue(pt, i);
                         const elem_bounds = try elem.intValueBounds(pt) orelse break :bounds null;
                         cur_bounds = .{
-                            Value.numberMin(elem_bounds[0], cur_bounds[0], pt),
-                            Value.numberMax(elem_bounds[1], cur_bounds[1], pt),
+                            Value.numberMin(elem_bounds[0], cur_bounds[0], zcu),
+                            Value.numberMax(elem_bounds[1], cur_bounds[1], zcu),
                         };
                     }
                     break :bounds cur_bounds;
@@ -25490,8 +25504,8 @@ fn analyzeMinMax(
                         cur_max_scalar = bounds[1];
                         bounds_status = .defined;
                     } else {
-                        cur_min_scalar = opFunc(cur_min_scalar, bounds[0], pt);
-                        cur_max_scalar = opFunc(cur_max_scalar, bounds[1], pt);
+                        cur_min_scalar = opFunc(cur_min_scalar, bounds[0], zcu);
+                        cur_max_scalar = opFunc(cur_max_scalar, bounds[1], zcu);
                     }
                 }
             },
@@ -25509,7 +25523,7 @@ fn analyzeMinMax(
         const operand_val = try sema.resolveLazyValue(simd_op.rhs_val.?); // we checked the operand was resolvable above
 
         const vec_len = simd_op.len orelse {
-            const result_val = opFunc(cur_val, operand_val, pt);
+            const result_val = opFunc(cur_val, operand_val, zcu);
             cur_minmax = Air.internedToRef(result_val.toIntern());
             continue;
         };
@@ -25517,7 +25531,7 @@ fn analyzeMinMax(
         for (elems, 0..) |*elem, i| {
             const lhs_elem_val = try cur_val.elemValue(pt, i);
             const rhs_elem_val = try operand_val.elemValue(pt, i);
-            const uncoerced_elem = opFunc(lhs_elem_val, rhs_elem_val, pt);
+            const uncoerced_elem = opFunc(lhs_elem_val, rhs_elem_val, zcu);
             elem.* = (try pt.getCoerced(uncoerced_elem, simd_op.scalar_ty)).toIntern();
         }
         cur_minmax = Air.internedToRef((try pt.intern(.{ .aggregate = .{
@@ -25537,19 +25551,19 @@ fn analyzeMinMax(
         const val = (try sema.resolveValue(ct_minmax_ref)).?;
         const orig_ty = sema.typeOf(ct_minmax_ref);
 
-        if (opt_runtime_idx == null and orig_ty.scalarType(mod).eql(Type.comptime_int, mod)) {
+        if (opt_runtime_idx == null and orig_ty.scalarType(zcu).eql(Type.comptime_int, zcu)) {
             // If all arguments were `comptime_int`, and there are no runtime args, we'll preserve that type
             break :refine;
         }
 
         // We can't refine float types
-        if (orig_ty.scalarType(mod).isAnyFloat()) break :refine;
+        if (orig_ty.scalarType(zcu).isAnyFloat()) break :refine;
 
         assert(bounds_status == .defined); // there was a non-comptime-int integral comptime-known arg
 
         const refined_scalar_ty = try pt.intFittingRange(cur_min_scalar, cur_max_scalar);
-        const refined_ty = if (orig_ty.isVector(mod)) try pt.vectorType(.{
-            .len = orig_ty.vectorLen(mod),
+        const refined_ty = if (orig_ty.isVector(zcu)) try pt.vectorType(.{
+            .len = orig_ty.vectorLen(zcu),
             .child = refined_scalar_ty.toIntern(),
         }) else refined_scalar_ty;
 
@@ -25570,7 +25584,7 @@ fn analyzeMinMax(
     // If the comptime-known part is undef we can avoid emitting actual instructions later
     const known_undef = if (cur_minmax) |operand| blk: {
         const val = (try sema.resolveValue(operand)).?;
-        break :blk val.isUndef(mod);
+        break :blk val.isUndef(zcu);
     } else false;
 
     if (cur_minmax == null) {
@@ -25580,8 +25594,8 @@ fn analyzeMinMax(
         cur_minmax = operands[0];
         cur_minmax_src = runtime_src;
         runtime_known.unset(0); // don't look at this operand in the loop below
-        const scalar_ty = sema.typeOf(cur_minmax.?).scalarType(mod);
-        if (scalar_ty.isInt(mod)) {
+        const scalar_ty = sema.typeOf(cur_minmax.?).scalarType(zcu);
+        if (scalar_ty.isInt(zcu)) {
             cur_min_scalar = try scalar_ty.minInt(pt, scalar_ty);
             cur_max_scalar = try scalar_ty.maxInt(pt, scalar_ty);
             bounds_status = .defined;
@@ -25605,7 +25619,7 @@ fn analyzeMinMax(
         // Compute the bounds of this type
         switch (bounds_status) {
             .unknown, .defined => refine_bounds: {
-                const scalar_ty = sema.typeOf(rhs).scalarType(mod);
+                const scalar_ty = sema.typeOf(rhs).scalarType(zcu);
                 if (scalar_ty.isAnyFloat()) {
                     bounds_status = .non_integral;
                     break :refine_bounds;
@@ -25617,8 +25631,8 @@ fn analyzeMinMax(
                     cur_max_scalar = scalar_max;
                     bounds_status = .defined;
                 } else {
-                    cur_min_scalar = opFunc(cur_min_scalar, scalar_min, pt);
-                    cur_max_scalar = opFunc(cur_max_scalar, scalar_max, pt);
+                    cur_min_scalar = opFunc(cur_min_scalar, scalar_min, zcu);
+                    cur_max_scalar = opFunc(cur_max_scalar, scalar_max, zcu);
                 }
             },
             .non_integral => {},
@@ -25627,18 +25641,18 @@ fn analyzeMinMax(
 
     // Finally, refine the type based on the known bounds.
     const unrefined_ty = sema.typeOf(cur_minmax.?);
-    if (unrefined_ty.scalarType(mod).isAnyFloat()) {
+    if (unrefined_ty.scalarType(zcu).isAnyFloat()) {
         // We can't refine floats, so we're done.
         return cur_minmax.?;
     }
     assert(bounds_status == .defined); // there were integral runtime operands
     const refined_scalar_ty = try pt.intFittingRange(cur_min_scalar, cur_max_scalar);
-    const refined_ty = if (unrefined_ty.isVector(mod)) try pt.vectorType(.{
-        .len = unrefined_ty.vectorLen(mod),
+    const refined_ty = if (unrefined_ty.isVector(zcu)) try pt.vectorType(.{
+        .len = unrefined_ty.vectorLen(zcu),
         .child = refined_scalar_ty.toIntern(),
     }) else refined_scalar_ty;
 
-    if (!refined_ty.eql(unrefined_ty, mod)) {
+    if (!refined_ty.eql(unrefined_ty, zcu)) {
         // We've reduced the type - cast the result down
         return block.addTyOp(.intcast, refined_ty, cur_minmax.?);
     }
@@ -25648,9 +25662,9 @@ fn analyzeMinMax(
 
 fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ptr_ty = sema.typeOf(ptr);
-    const info = ptr_ty.ptrInfo(mod);
+    const info = ptr_ty.ptrInfo(zcu);
     if (info.flags.size == .One) {
         // Already an array pointer.
         return ptr;
@@ -25670,7 +25684,7 @@ fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !A
         },
     });
     const non_slice_ptr = if (info.flags.size == .Slice)
-        try block.addTyOp(.slice_ptr, ptr_ty.slicePtrFieldType(mod), ptr)
+        try block.addTyOp(.slice_ptr, ptr_ty.slicePtrFieldType(zcu), ptr)
     else
         ptr;
     return block.addBitCast(new_ty, non_slice_ptr);
@@ -25689,10 +25703,10 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
     const dest_len = try indexablePtrLenOrNone(sema, block, dest_src, dest_ptr);
     const src_len = try indexablePtrLenOrNone(sema, block, src_src, src_ptr);
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
 
-    if (dest_ty.isConstPtr(mod)) {
+    if (dest_ty.isConstPtr(zcu)) {
         return sema.fail(block, dest_src, "cannot memcpy to constant pointer", .{});
     }
 
@@ -25755,7 +25769,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
     const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: {
         if (!sema.isComptimeMutablePtr(dest_ptr_val)) break :rs dest_src;
         if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| {
-            const len_u64 = (try len_val.?.getUnsignedIntAdvanced(pt, .sema)).?;
+            const len_u64 = try len_val.?.toUnsignedIntSema(pt);
             const len = try sema.usizeCast(block, dest_src, len_u64);
             for (0..len) |i| {
                 const elem_index = try pt.intRef(Type.usize, i);
@@ -25798,12 +25812,12 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
     // lowering. The AIR instruction requires pointers with element types of
     // equal ABI size.
 
-    if (dest_ty.zigTypeTag(mod) != .Pointer or src_ty.zigTypeTag(mod) != .Pointer) {
+    if (dest_ty.zigTypeTag(zcu) != .Pointer or src_ty.zigTypeTag(zcu) != .Pointer) {
         return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the source or destination iterable is a tuple", .{});
     }
 
-    const dest_elem_ty = dest_ty.elemType2(mod);
-    const src_elem_ty = src_ty.elemType2(mod);
+    const dest_elem_ty = dest_ty.elemType2(zcu);
+    const src_elem_ty = src_ty.elemType2(zcu);
     if (.ok != try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, true, target, dest_src, src_src, null)) {
         return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the element types have different ABI sizes", .{});
     }
@@ -25827,7 +25841,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
         // Change the src from slice to a many pointer, to avoid multiple ptr
         // slice extractions in AIR instructions.
         const new_src_ptr_ty = sema.typeOf(new_src_ptr);
-        if (new_src_ptr_ty.isSlice(mod)) {
+        if (new_src_ptr_ty.isSlice(zcu)) {
             new_src_ptr = try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty);
         }
     } else if (dest_len == .none and len_val == null) {
@@ -25835,7 +25849,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
         const dest_ptr_ptr = try sema.analyzeRef(block, dest_src, new_dest_ptr);
         new_dest_ptr = try sema.analyzeSlice(block, dest_src, dest_ptr_ptr, .zero, src_len, .none, LazySrcLoc.unneeded, dest_src, dest_src, dest_src, false);
         const new_src_ptr_ty = sema.typeOf(new_src_ptr);
-        if (new_src_ptr_ty.isSlice(mod)) {
+        if (new_src_ptr_ty.isSlice(zcu)) {
             new_src_ptr = try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty);
         }
     }
@@ -25854,10 +25868,10 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
         // Extract raw pointer from dest slice. The AIR instructions could support them, but
         // it would cause redundant machine code instructions.
         const new_dest_ptr_ty = sema.typeOf(new_dest_ptr);
-        const raw_dest_ptr = if (new_dest_ptr_ty.isSlice(mod))
+        const raw_dest_ptr = if (new_dest_ptr_ty.isSlice(zcu))
             try sema.analyzeSlicePtr(block, dest_src, new_dest_ptr, new_dest_ptr_ty)
-        else if (new_dest_ptr_ty.ptrSize(mod) == .One) ptr: {
-            var dest_manyptr_ty_key = mod.intern_pool.indexToKey(new_dest_ptr_ty.toIntern()).ptr_type;
+        else if (new_dest_ptr_ty.ptrSize(zcu) == .One) ptr: {
+            var dest_manyptr_ty_key = zcu.intern_pool.indexToKey(new_dest_ptr_ty.toIntern()).ptr_type;
             assert(dest_manyptr_ty_key.flags.size == .One);
             dest_manyptr_ty_key.child = dest_elem_ty.toIntern();
             dest_manyptr_ty_key.flags.size = .Many;
@@ -25865,10 +25879,10 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
         } else new_dest_ptr;
 
         const new_src_ptr_ty = sema.typeOf(new_src_ptr);
-        const raw_src_ptr = if (new_src_ptr_ty.isSlice(mod))
+        const raw_src_ptr = if (new_src_ptr_ty.isSlice(zcu))
             try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty)
-        else if (new_src_ptr_ty.ptrSize(mod) == .One) ptr: {
-            var src_manyptr_ty_key = mod.intern_pool.indexToKey(new_src_ptr_ty.toIntern()).ptr_type;
+        else if (new_src_ptr_ty.ptrSize(zcu) == .One) ptr: {
+            var src_manyptr_ty_key = zcu.intern_pool.indexToKey(new_src_ptr_ty.toIntern()).ptr_type;
             assert(src_manyptr_ty_key.flags.size == .One);
             src_manyptr_ty_key.child = src_elem_ty.toIntern();
             src_manyptr_ty_key.flags.size = .Many;
@@ -25896,9 +25910,9 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
 
 fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
     const src = block.nodeOffset(inst_data.src_node);
@@ -25909,17 +25923,17 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
     const dest_ptr_ty = sema.typeOf(dest_ptr);
     try checkMemOperand(sema, block, dest_src, dest_ptr_ty);
 
-    if (dest_ptr_ty.isConstPtr(mod)) {
+    if (dest_ptr_ty.isConstPtr(zcu)) {
         return sema.fail(block, dest_src, "cannot memset constant pointer", .{});
     }
 
     const dest_elem_ty: Type = dest_elem_ty: {
-        const ptr_info = dest_ptr_ty.ptrInfo(mod);
+        const ptr_info = dest_ptr_ty.ptrInfo(zcu);
         switch (ptr_info.flags.size) {
             .Slice => break :dest_elem_ty Type.fromInterned(ptr_info.child),
             .One => {
-                if (Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Array) {
-                    break :dest_elem_ty Type.fromInterned(ptr_info.child).childType(mod);
+                if (Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .Array) {
+                    break :dest_elem_ty Type.fromInterned(ptr_info.child).childType(zcu);
                 }
             },
             .Many, .C => {},
@@ -25940,7 +25954,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
         const ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr) orelse break :rs dest_src;
         const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, pt.tid, "len", .no_embedded_nulls), dest_src);
         const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse break :rs dest_src;
-        const len_u64 = (try len_val.getUnsignedIntAdvanced(pt, .sema)).?;
+        const len_u64 = try len_val.toUnsignedIntSema(pt);
         const len = try sema.usizeCast(block, dest_src, len_u64);
         if (len == 0) {
             // This AIR instruction guarantees length > 0 if it is comptime-known.
@@ -25958,12 +25972,12 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
             .storage = .{ .repeated_elem = elem_val.toIntern() },
         } }));
         const array_ptr_ty = ty: {
-            var info = dest_ptr_ty.ptrInfo(mod);
+            var info = dest_ptr_ty.ptrInfo(zcu);
             info.flags.size = .One;
             info.child = array_ty.toIntern();
             break :ty try pt.ptrType(info);
         };
-        const raw_ptr_val = if (dest_ptr_ty.isSlice(mod)) ptr_val.slicePtr(mod) else ptr_val;
+        const raw_ptr_val = if (dest_ptr_ty.isSlice(zcu)) ptr_val.slicePtr(zcu) else ptr_val;
         const array_ptr_val = try pt.getCoerced(raw_ptr_val, array_ptr_ty);
         return sema.storePtrVal(block, src, array_ptr_val, array_val, array_ty);
     };
@@ -26129,10 +26143,10 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
     defer tracy.end();
 
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
     const extra = sema.code.extraData(Zir.Inst.FuncFancy, inst_data.payload_index);
-    const target = mod.getTarget();
+    const target = zcu.getTarget();
 
     const align_src = block.src(.{ .node_offset_fn_type_align = inst_data.src_node });
     const addrspace_src = block.src(.{ .node_offset_fn_type_addrspace = inst_data.src_node });
@@ -26207,7 +26221,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
         if (val.isGenericPoison()) {
             break :blk null;
         }
-        break :blk mod.toEnum(std.builtin.AddressSpace, val);
+        break :blk zcu.toEnum(std.builtin.AddressSpace, val);
     } else if (extra.data.bits.has_addrspace_ref) blk: {
         const addrspace_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
         extra_index += 1;
@@ -26226,7 +26240,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
             error.GenericPoison => break :blk null,
             else => |e| return e,
         };
-        break :blk mod.toEnum(std.builtin.AddressSpace, addrspace_val);
+        break :blk zcu.toEnum(std.builtin.AddressSpace, addrspace_val);
     } else target_util.defaultAddressSpace(target, .function);
 
     const section: Section = if (extra.data.bits.has_section_body) blk: {
@@ -26272,7 +26286,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
         if (val.isGenericPoison()) {
             break :blk null;
         }
-        break :blk mod.toEnum(std.builtin.CallingConvention, val);
+        break :blk zcu.toEnum(std.builtin.CallingConvention, val);
     } else if (extra.data.bits.has_cc_ref) blk: {
         const cc_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
         extra_index += 1;
@@ -26291,18 +26305,18 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
             error.GenericPoison => break :blk null,
             else => |e| return e,
         };
-        break :blk mod.toEnum(std.builtin.CallingConvention, cc_val);
+        break :blk zcu.toEnum(std.builtin.CallingConvention, cc_val);
     } else cc: {
         if (has_body) {
             const decl_inst = if (sema.generic_owner != .none) decl_inst: {
                 // Generic instance -- use the original function declaration to
                 // look for the `export` syntax.
-                const nav = mod.intern_pool.getNav(mod.funcInfo(sema.generic_owner).owner_nav);
-                const cau = mod.intern_pool.getCau(nav.analysis_owner.unwrap().?);
+                const nav = zcu.intern_pool.getNav(zcu.funcInfo(sema.generic_owner).owner_nav);
+                const cau = zcu.intern_pool.getCau(nav.analysis_owner.unwrap().?);
                 break :decl_inst cau.zir_index;
             } else sema.getOwnerCauDeclInst(); // not an instantiation so we're analyzing a function declaration Cau
 
-            const zir_decl = sema.code.getDeclaration(decl_inst.resolve(&mod.intern_pool) orelse return error.AnalysisFail)[0];
+            const zir_decl = sema.code.getDeclaration(decl_inst.resolve(&zcu.intern_pool) orelse return error.AnalysisFail)[0];
             if (zir_decl.flags.is_export) {
                 break :cc .C;
             }
@@ -26408,7 +26422,7 @@ fn zirCDefine(
     extended: Zir.Inst.Extended.InstData,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
     const name_src = block.builtinCallArgSrc(extra.node, 0);
     const val_src = block.builtinCallArgSrc(extra.node, 1);
@@ -26417,7 +26431,7 @@ fn zirCDefine(
         .needed_comptime_reason = "name of macro being undefined must be comptime-known",
     });
     const rhs = try sema.resolveInst(extra.rhs);
-    if (sema.typeOf(rhs).zigTypeTag(mod) != .Void) {
+    if (sema.typeOf(rhs).zigTypeTag(zcu) != .Void) {
         const value = try sema.resolveConstString(block, val_src, extra.rhs, .{
             .needed_comptime_reason = "value of macro being undefined must be comptime-known",
         });
@@ -26490,9 +26504,9 @@ fn resolvePrefetchOptions(
     zir_ref: Zir.Inst.Ref,
 ) CompileError!std.builtin.PrefetchOptions {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
     const options_ty = try pt.getBuiltinType("PrefetchOptions");
     const options = try sema.coerce(block, options_ty, try sema.resolveInst(zir_ref), src);
 
@@ -26516,9 +26530,9 @@ fn resolvePrefetchOptions(
     });
 
     return std.builtin.PrefetchOptions{
-        .rw = mod.toEnum(std.builtin.PrefetchOptions.Rw, rw_val),
+        .rw = zcu.toEnum(std.builtin.PrefetchOptions.Rw, rw_val),
         .locality = @intCast(try locality_val.toUnsignedIntSema(pt)),
-        .cache = mod.toEnum(std.builtin.PrefetchOptions.Cache, cache_val),
+        .cache = zcu.toEnum(std.builtin.PrefetchOptions.Cache, cache_val),
     };
 }
 
@@ -26562,9 +26576,9 @@ fn resolveExternOptions(
     is_thread_local: bool = false,
 } {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
     const options_inst = try sema.resolveInst(zir_ref);
     const extern_options_ty = try pt.getBuiltinType("ExternOptions");
     const options = try sema.coerce(block, extern_options_ty, options_inst, src);
@@ -26588,14 +26602,14 @@ fn resolveExternOptions(
     const linkage_val = try sema.resolveConstDefinedValue(block, linkage_src, linkage_ref, .{
         .needed_comptime_reason = "linkage of the extern symbol must be comptime-known",
     });
-    const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val);
+    const linkage = zcu.toEnum(std.builtin.GlobalLinkage, linkage_val);
 
     const is_thread_local = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "is_thread_local", .no_embedded_nulls), thread_local_src);
     const is_thread_local_val = try sema.resolveConstDefinedValue(block, thread_local_src, is_thread_local, .{
         .needed_comptime_reason = "threadlocality of the extern symbol must be comptime-known",
     });
 
-    const library_name = if (library_name_val.optionalValue(mod)) |library_name_payload| library_name: {
+    const library_name = if (library_name_val.optionalValue(zcu)) |library_name_payload| library_name: {
         const library_name = try sema.toConstString(block, library_src, Air.internedToRef(library_name_payload.toIntern()), .{
             .needed_comptime_reason = "library in which extern symbol is must be comptime-known",
         });
@@ -26628,14 +26642,14 @@ fn zirBuiltinExtern(
     extended: Zir.Inst.Extended.InstData,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
     const ty_src = block.builtinCallArgSrc(extra.node, 0);
     const options_src = block.builtinCallArgSrc(extra.node, 1);
 
     var ty = try sema.resolveType(block, ty_src, extra.lhs);
-    if (!ty.isPtrAtRuntime(mod)) {
+    if (!ty.isPtrAtRuntime(zcu)) {
         return sema.fail(block, ty_src, "expected (optional) pointer", .{});
     }
     if (!try sema.validateExternType(ty, .other)) {
@@ -26652,10 +26666,10 @@ fn zirBuiltinExtern(
 
     // TODO: error for threadlocal functions, non-const functions, etc
 
-    if (options.linkage == .weak and !ty.ptrAllowsZero(mod)) {
+    if (options.linkage == .weak and !ty.ptrAllowsZero(zcu)) {
         ty = try pt.optionalType(ty.toIntern());
     }
-    const ptr_info = ty.ptrInfo(mod);
+    const ptr_info = ty.ptrInfo(zcu);
 
     const extern_val = try pt.getExtern(.{
         .name = options.name,
@@ -26801,7 +26815,7 @@ fn validateVarType(
     is_extern: bool,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     if (is_extern) {
         if (!try sema.validateExternType(var_ty, .other)) {
             const msg = msg: {
@@ -26813,7 +26827,7 @@ fn validateVarType(
             return sema.failWithOwnedErrorMsg(block, msg);
         }
     } else {
-        if (var_ty.zigTypeTag(mod) == .Opaque) {
+        if (var_ty.zigTypeTag(zcu) == .Opaque) {
             return sema.fail(
                 block,
                 src,
@@ -26823,14 +26837,14 @@ fn validateVarType(
         }
     }
 
-    if (!try sema.typeRequiresComptime(var_ty)) return;
+    if (!try var_ty.comptimeOnlySema(pt)) return;
 
     const msg = msg: {
         const msg = try sema.errMsg(src, "variable of type '{}' must be const or comptime", .{var_ty.fmt(pt)});
         errdefer msg.destroy(sema.gpa);
 
         try sema.explainWhyTypeIsComptime(msg, src, var_ty);
-        if (var_ty.zigTypeTag(mod) == .ComptimeInt or var_ty.zigTypeTag(mod) == .ComptimeFloat) {
+        if (var_ty.zigTypeTag(zcu) == .ComptimeInt or var_ty.zigTypeTag(zcu) == .ComptimeFloat) {
             try sema.errNote(src, msg, "to modify this variable at runtime, it must be given an explicit fixed-size number type", .{});
         }
 
@@ -26843,7 +26857,7 @@ const TypeSet = std.AutoHashMapUnmanaged(InternPool.Index, void);
 
 fn explainWhyTypeIsComptime(
     sema: *Sema,
-    msg: *Module.ErrorMsg,
+    msg: *Zcu.ErrorMsg,
     src_loc: LazySrcLoc,
     ty: Type,
 ) CompileError!void {
@@ -26856,15 +26870,15 @@ fn explainWhyTypeIsComptime(
 
 fn explainWhyTypeIsComptimeInner(
     sema: *Sema,
-    msg: *Module.ErrorMsg,
+    msg: *Zcu.ErrorMsg,
     src_loc: LazySrcLoc,
     ty: Type,
     type_set: *TypeSet,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    switch (ty.zigTypeTag(zcu)) {
         .Bool,
         .Int,
         .Float,
@@ -26896,12 +26910,12 @@ fn explainWhyTypeIsComptimeInner(
         },
 
         .Array, .Vector => {
-            try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set);
+            try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(zcu), type_set);
         },
         .Pointer => {
-            const elem_ty = ty.elemType2(mod);
-            if (elem_ty.zigTypeTag(mod) == .Fn) {
-                const fn_info = mod.typeToFunc(elem_ty).?;
+            const elem_ty = ty.elemType2(zcu);
+            if (elem_ty.zigTypeTag(zcu) == .Fn) {
+                const fn_info = zcu.typeToFunc(elem_ty).?;
                 if (fn_info.is_generic) {
                     try sema.errNote(src_loc, msg, "function is generic", .{});
                 }
@@ -26909,25 +26923,25 @@ fn explainWhyTypeIsComptimeInner(
                     .Inline => try sema.errNote(src_loc, msg, "function has inline calling convention", .{}),
                     else => {},
                 }
-                if (Type.fromInterned(fn_info.return_type).comptimeOnly(pt)) {
+                if (Type.fromInterned(fn_info.return_type).comptimeOnly(zcu)) {
                     try sema.errNote(src_loc, msg, "function has a comptime-only return type", .{});
                 }
                 return;
             }
-            try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set);
+            try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(zcu), type_set);
         },
 
         .Optional => {
-            try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.optionalChild(mod), type_set);
+            try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.optionalChild(zcu), type_set);
         },
         .ErrorUnion => {
-            try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.errorUnionPayload(mod), type_set);
+            try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.errorUnionPayload(zcu), type_set);
         },
 
         .Struct => {
             if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return;
 
-            if (mod.typeToStruct(ty)) |struct_type| {
+            if (zcu.typeToStruct(ty)) |struct_type| {
                 for (0..struct_type.field_types.len) |i| {
                     const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
                     const field_src: LazySrcLoc = .{
@@ -26935,7 +26949,7 @@ fn explainWhyTypeIsComptimeInner(
                         .offset = .{ .container_field_type = @intCast(i) },
                     };
 
-                    if (try sema.typeRequiresComptime(field_ty)) {
+                    if (try field_ty.comptimeOnlySema(pt)) {
                         try sema.errNote(field_src, msg, "struct requires comptime because of this field", .{});
                         try sema.explainWhyTypeIsComptimeInner(msg, field_src, field_ty, type_set);
                     }
@@ -26947,7 +26961,7 @@ fn explainWhyTypeIsComptimeInner(
         .Union => {
             if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return;
 
-            if (mod.typeToUnion(ty)) |union_obj| {
+            if (zcu.typeToUnion(ty)) |union_obj| {
                 for (0..union_obj.field_types.len) |i| {
                     const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[i]);
                     const field_src: LazySrcLoc = .{
@@ -26955,7 +26969,7 @@ fn explainWhyTypeIsComptimeInner(
                         .offset = .{ .container_field_type = @intCast(i) },
                     };
 
-                    if (try sema.typeRequiresComptime(field_ty)) {
+                    if (try field_ty.comptimeOnlySema(pt)) {
                         try sema.errNote(field_src, msg, "union requires comptime because of this field", .{});
                         try sema.explainWhyTypeIsComptimeInner(msg, field_src, field_ty, type_set);
                     }
@@ -26983,8 +26997,8 @@ fn validateExternType(
     position: ExternPosition,
 ) !bool {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (ty.zigTypeTag(zcu)) {
         .Type,
         .ComptimeFloat,
         .ComptimeInt,
@@ -27003,58 +27017,58 @@ fn validateExternType(
         .AnyFrame,
         => return true,
         .Pointer => {
-            if (ty.childType(mod).zigTypeTag(mod) == .Fn) {
-                return ty.isConstPtr(mod) and try sema.validateExternType(ty.childType(mod), .other);
+            if (ty.childType(zcu).zigTypeTag(zcu) == .Fn) {
+                return ty.isConstPtr(zcu) and try sema.validateExternType(ty.childType(zcu), .other);
             }
-            return !(ty.isSlice(mod) or try sema.typeRequiresComptime(ty));
+            return !(ty.isSlice(zcu) or try ty.comptimeOnlySema(pt));
         },
-        .Int => switch (ty.intInfo(mod).bits) {
+        .Int => switch (ty.intInfo(zcu).bits) {
             0, 8, 16, 32, 64, 128 => return true,
             else => return false,
         },
         .Fn => {
             if (position != .other) return false;
-            const target = mod.getTarget();
+            const target = zcu.getTarget();
             // For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI.
             // The goal is to experiment with more integrated CPU/GPU code.
-            if (ty.fnCallingConvention(mod) == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) {
+            if (ty.fnCallingConvention(zcu) == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) {
                 return true;
             }
-            return !target_util.fnCallConvAllowsZigTypes(target, ty.fnCallingConvention(mod));
+            return !target_util.fnCallConvAllowsZigTypes(target, ty.fnCallingConvention(zcu));
         },
         .Enum => {
-            return sema.validateExternType(ty.intTagType(mod), position);
+            return sema.validateExternType(ty.intTagType(zcu), position);
         },
-        .Struct, .Union => switch (ty.containerLayout(mod)) {
+        .Struct, .Union => switch (ty.containerLayout(zcu)) {
             .@"extern" => return true,
             .@"packed" => {
-                const bit_size = try ty.bitSizeAdvanced(pt, .sema);
+                const bit_size = try ty.bitSizeSema(pt);
                 switch (bit_size) {
                     0, 8, 16, 32, 64, 128 => return true,
                     else => return false,
                 }
             },
-            .auto => return !(try sema.typeHasRuntimeBits(ty)),
+            .auto => return !(try ty.hasRuntimeBitsSema(pt)),
         },
         .Array => {
             if (position == .ret_ty or position == .param_ty) return false;
-            return sema.validateExternType(ty.elemType2(mod), .element);
+            return sema.validateExternType(ty.elemType2(zcu), .element);
         },
-        .Vector => return sema.validateExternType(ty.elemType2(mod), .element),
-        .Optional => return ty.isPtrLikeOptional(mod),
+        .Vector => return sema.validateExternType(ty.elemType2(zcu), .element),
+        .Optional => return ty.isPtrLikeOptional(zcu),
     }
 }
 
 fn explainWhyTypeIsNotExtern(
     sema: *Sema,
-    msg: *Module.ErrorMsg,
+    msg: *Zcu.ErrorMsg,
     src_loc: LazySrcLoc,
     ty: Type,
     position: ExternPosition,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (ty.zigTypeTag(zcu)) {
         .Opaque,
         .Bool,
         .Float,
@@ -27073,13 +27087,13 @@ fn explainWhyTypeIsNotExtern(
         => return,
 
         .Pointer => {
-            if (ty.isSlice(mod)) {
+            if (ty.isSlice(zcu)) {
                 try sema.errNote(src_loc, msg, "slices have no guaranteed in-memory representation", .{});
             } else {
-                const pointee_ty = ty.childType(mod);
-                if (!ty.isConstPtr(mod) and pointee_ty.zigTypeTag(mod) == .Fn) {
+                const pointee_ty = ty.childType(zcu);
+                if (!ty.isConstPtr(zcu) and pointee_ty.zigTypeTag(zcu) == .Fn) {
                     try sema.errNote(src_loc, msg, "pointer to extern function must be 'const'", .{});
-                } else if (try sema.typeRequiresComptime(ty)) {
+                } else if (try ty.comptimeOnlySema(pt)) {
                     try sema.errNote(src_loc, msg, "pointer to comptime-only type '{}'", .{pointee_ty.fmt(pt)});
                     try sema.explainWhyTypeIsComptime(msg, src_loc, ty);
                 }
@@ -27088,7 +27102,7 @@ fn explainWhyTypeIsNotExtern(
         },
         .Void => try sema.errNote(src_loc, msg, "'void' is a zero bit type; for C 'void' use 'anyopaque'", .{}),
         .NoReturn => try sema.errNote(src_loc, msg, "'noreturn' is only allowed as a return type", .{}),
-        .Int => if (!std.math.isPowerOfTwo(ty.intInfo(mod).bits)) {
+        .Int => if (!std.math.isPowerOfTwo(ty.intInfo(zcu).bits)) {
             try sema.errNote(src_loc, msg, "only integers with 0 or power of two bits are extern compatible", .{});
         } else {
             try sema.errNote(src_loc, msg, "only integers with 0, 8, 16, 32, 64 and 128 bits are extern compatible", .{});
@@ -27099,7 +27113,7 @@ fn explainWhyTypeIsNotExtern(
                 try sema.errNote(src_loc, msg, "use '*const ' to make a function pointer type", .{});
                 return;
             }
-            switch (ty.fnCallingConvention(mod)) {
+            switch (ty.fnCallingConvention(zcu)) {
                 .Unspecified => try sema.errNote(src_loc, msg, "extern function must specify calling convention", .{}),
                 .Async => try sema.errNote(src_loc, msg, "async function cannot be extern", .{}),
                 .Inline => try sema.errNote(src_loc, msg, "inline function cannot be extern", .{}),
@@ -27107,7 +27121,7 @@ fn explainWhyTypeIsNotExtern(
             }
         },
         .Enum => {
-            const tag_ty = ty.intTagType(mod);
+            const tag_ty = ty.intTagType(zcu);
             try sema.errNote(src_loc, msg, "enum tag type '{}' is not extern compatible", .{tag_ty.fmt(pt)});
             try sema.explainWhyTypeIsNotExtern(msg, src_loc, tag_ty, position);
         },
@@ -27119,9 +27133,9 @@ fn explainWhyTypeIsNotExtern(
             } else if (position == .param_ty) {
                 return sema.errNote(src_loc, msg, "arrays are not allowed as a parameter type", .{});
             }
-            try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element);
+            try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(zcu), .element);
         },
-        .Vector => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element),
+        .Vector => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(zcu), .element),
         .Optional => try sema.errNote(src_loc, msg, "only pointer like optionals are extern compatible", .{}),
     }
 }
@@ -27158,20 +27172,20 @@ fn validatePackedType(sema: *Sema, ty: Type) !bool {
             .auto => false,
             .explicit, .nonexhaustive => true,
         },
-        .Pointer => !ty.isSlice(zcu) and !try sema.typeRequiresComptime(ty),
+        .Pointer => !ty.isSlice(zcu) and !try ty.comptimeOnlySema(pt),
         .Struct, .Union => ty.containerLayout(zcu) == .@"packed",
     };
 }
 
 fn explainWhyTypeIsNotPacked(
     sema: *Sema,
-    msg: *Module.ErrorMsg,
+    msg: *Zcu.ErrorMsg,
     src_loc: LazySrcLoc,
     ty: Type,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (ty.zigTypeTag(zcu)) {
         .Void,
         .Bool,
         .Float,
@@ -27194,7 +27208,7 @@ fn explainWhyTypeIsNotPacked(
         .Optional,
         .Array,
         => try sema.errNote(src_loc, msg, "type has no guaranteed in-memory representation", .{}),
-        .Pointer => if (ty.isSlice(mod)) {
+        .Pointer => if (ty.isSlice(zcu)) {
             try sema.errNote(src_loc, msg, "slices have no guaranteed in-memory representation", .{});
         } else {
             try sema.errNote(src_loc, msg, "comptime-only pointer has no guaranteed in-memory representation", .{});
@@ -27211,23 +27225,23 @@ fn explainWhyTypeIsNotPacked(
 
 fn prepareSimplePanic(sema: *Sema, block: *Block, src: LazySrcLoc) !void {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
-    if (mod.panic_func_index == .none) {
+    if (zcu.panic_func_index == .none) {
         const fn_ref = try sema.analyzeNavVal(block, src, try pt.getBuiltinNav("panic"));
         const fn_val = try sema.resolveConstValue(block, src, fn_ref, .{
             .needed_comptime_reason = "panic handler must be comptime-known",
         });
-        assert(fn_val.typeOf(mod).zigTypeTag(mod) == .Fn);
-        assert(try sema.fnHasRuntimeBits(fn_val.typeOf(mod)));
-        try mod.ensureFuncBodyAnalysisQueued(fn_val.toIntern());
-        mod.panic_func_index = fn_val.toIntern();
+        assert(fn_val.typeOf(zcu).zigTypeTag(zcu) == .Fn);
+        assert(try fn_val.typeOf(zcu).fnHasRuntimeBitsSema(pt));
+        try zcu.ensureFuncBodyAnalysisQueued(fn_val.toIntern());
+        zcu.panic_func_index = fn_val.toIntern();
     }
 
-    if (mod.null_stack_trace == .none) {
+    if (zcu.null_stack_trace == .none) {
         const stack_trace_ty = try pt.getBuiltinType("StackTrace");
         try stack_trace_ty.resolveFields(pt);
-        const target = mod.getTarget();
+        const target = zcu.getTarget();
         const ptr_stack_trace_ty = try pt.ptrTypeSema(.{
             .child = stack_trace_ty.toIntern(),
             .flags = .{
@@ -27235,7 +27249,7 @@ fn prepareSimplePanic(sema: *Sema, block: *Block, src: LazySrcLoc) !void {
             },
         });
         const opt_ptr_stack_trace_ty = try pt.optionalType(ptr_stack_trace_ty.toIntern());
-        mod.null_stack_trace = try pt.intern(.{ .opt = .{
+        zcu.null_stack_trace = try pt.intern(.{ .opt = .{
             .ty = opt_ptr_stack_trace_ty.toIntern(),
             .val = .none,
         } });
@@ -27245,11 +27259,11 @@ fn prepareSimplePanic(sema: *Sema, block: *Block, src: LazySrcLoc) !void {
 /// Backends depend on panic decls being available when lowering safety-checked
 /// instructions. This function ensures the panic function will be available to
 /// be called during that time.
-fn preparePanicId(sema: *Sema, block: *Block, src: LazySrcLoc, panic_id: Module.PanicId) !InternPool.Nav.Index {
+fn preparePanicId(sema: *Sema, block: *Block, src: LazySrcLoc, panic_id: Zcu.PanicId) !InternPool.Nav.Index {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    if (mod.panic_messages[@intFromEnum(panic_id)].unwrap()) |x| return x;
+    if (zcu.panic_messages[@intFromEnum(panic_id)].unwrap()) |x| return x;
 
     try sema.prepareSimplePanic(block, src);
 
@@ -27257,15 +27271,15 @@ fn preparePanicId(sema: *Sema, block: *Block, src: LazySrcLoc, panic_id: Module.
     const msg_nav_index = (sema.namespaceLookup(
         block,
         LazySrcLoc.unneeded,
-        panic_messages_ty.getNamespaceIndex(mod),
-        try mod.intern_pool.getOrPutString(gpa, pt.tid, @tagName(panic_id), .no_embedded_nulls),
+        panic_messages_ty.getNamespaceIndex(zcu),
+        try zcu.intern_pool.getOrPutString(gpa, pt.tid, @tagName(panic_id), .no_embedded_nulls),
     ) catch |err| switch (err) {
         error.AnalysisFail => @panic("std.builtin.panic_messages is corrupt"),
         error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable,
         error.OutOfMemory => |e| return e,
     }).?;
     try sema.ensureNavResolved(src, msg_nav_index);
-    mod.panic_messages[@intFromEnum(panic_id)] = msg_nav_index.toOptional();
+    zcu.panic_messages[@intFromEnum(panic_id)] = msg_nav_index.toOptional();
     return msg_nav_index;
 }
 
@@ -27274,7 +27288,7 @@ fn addSafetyCheck(
     parent_block: *Block,
     src: LazySrcLoc,
     ok: Air.Inst.Ref,
-    panic_id: Module.PanicId,
+    panic_id: Zcu.PanicId,
 ) !void {
     const gpa = sema.gpa;
     assert(!parent_block.is_comptime);
@@ -27353,18 +27367,18 @@ fn addSafetyCheckExtra(
 
 fn panicWithMsg(sema: *Sema, block: *Block, src: LazySrcLoc, msg_inst: Air.Inst.Ref, operation: CallOperation) !void {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
-    if (!mod.backendSupportsFeature(.panic_fn)) {
+    if (!zcu.backendSupportsFeature(.panic_fn)) {
         _ = try block.addNoOp(.trap);
         return;
     }
 
     try sema.prepareSimplePanic(block, src);
 
-    const panic_func = mod.funcInfo(mod.panic_func_index);
+    const panic_func = zcu.funcInfo(zcu.panic_func_index);
     const panic_fn = try sema.analyzeNavVal(block, src, panic_func.owner_nav);
-    const null_stack_trace = Air.internedToRef(mod.null_stack_trace);
+    const null_stack_trace = Air.internedToRef(zcu.null_stack_trace);
 
     const opt_usize_ty = try pt.optionalType(.usize_type);
     const null_ret_addr = Air.internedToRef((try pt.intern(.{ .opt = .{
@@ -27459,12 +27473,12 @@ fn panicSentinelMismatch(
 ) !void {
     assert(!parent_block.is_comptime);
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const expected_sentinel_val = maybe_sentinel orelse return;
     const expected_sentinel = Air.internedToRef(expected_sentinel_val.toIntern());
 
     const ptr_ty = sema.typeOf(ptr);
-    const actual_sentinel = if (ptr_ty.isSlice(mod))
+    const actual_sentinel = if (ptr_ty.isSlice(zcu))
         try parent_block.addBinOp(.slice_elem_val, ptr, sentinel_index)
     else blk: {
         const elem_ptr_ty = try ptr_ty.elemPtrType(null, pt);
@@ -27472,7 +27486,7 @@ fn panicSentinelMismatch(
         break :blk try parent_block.addTyOp(.load, sentinel_ty, sentinel_ptr);
     };
 
-    const ok = if (sentinel_ty.zigTypeTag(mod) == .Vector) ok: {
+    const ok = if (sentinel_ty.zigTypeTag(zcu) == .Vector) ok: {
         const eql =
             try parent_block.addCmpVector(expected_sentinel, actual_sentinel, .eq);
         break :ok try parent_block.addInst(.{
@@ -27482,7 +27496,7 @@ fn panicSentinelMismatch(
                 .operation = .And,
             } },
         });
-    } else if (sentinel_ty.isSelfComparable(mod, true))
+    } else if (sentinel_ty.isSelfComparable(zcu, true))
         try parent_block.addBinOp(.cmp_eq, expected_sentinel, actual_sentinel)
     else {
         const panic_fn = try pt.getBuiltin("checkNonScalarSentinel");
@@ -27532,7 +27546,7 @@ fn safetyCheckFormatted(
     try sema.addSafetyCheckExtra(parent_block, ok, &fail_block);
 }
 
-fn safetyPanic(sema: *Sema, block: *Block, src: LazySrcLoc, panic_id: Module.PanicId) CompileError!void {
+fn safetyPanic(sema: *Sema, block: *Block, src: LazySrcLoc, panic_id: Zcu.PanicId) CompileError!void {
     const msg_nav_index = try sema.preparePanicId(block, src, panic_id);
     const msg_inst = try sema.analyzeNavVal(block, src, msg_nav_index);
     try sema.panicWithMsg(block, src, msg_inst, .@"safety check");
@@ -27568,30 +27582,30 @@ fn fieldVal(
     // in `fieldPtr`. This function takes a value and returns a value.
 
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const object_src = src; // TODO better source location
     const object_ty = sema.typeOf(object);
 
     // Zig allows dereferencing a single pointer during field lookup. Note that
     // we don't actually need to generate the dereference some field lookups, like the
     // length of arrays and other comptime operations.
-    const is_pointer_to = object_ty.isSinglePointer(mod);
+    const is_pointer_to = object_ty.isSinglePointer(zcu);
 
     const inner_ty = if (is_pointer_to)
-        object_ty.childType(mod)
+        object_ty.childType(zcu)
     else
         object_ty;
 
-    switch (inner_ty.zigTypeTag(mod)) {
+    switch (inner_ty.zigTypeTag(zcu)) {
         .Array => {
             if (field_name.eqlSlice("len", ip)) {
-                return Air.internedToRef((try pt.intValue(Type.usize, inner_ty.arrayLen(mod))).toIntern());
+                return Air.internedToRef((try pt.intValue(Type.usize, inner_ty.arrayLen(zcu))).toIntern());
             } else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) {
-                const ptr_info = object_ty.ptrInfo(mod);
+                const ptr_info = object_ty.ptrInfo(zcu);
                 const result_ty = try pt.ptrTypeSema(.{
-                    .child = Type.fromInterned(ptr_info.child).childType(mod).toIntern(),
-                    .sentinel = if (inner_ty.sentinel(mod)) |s| s.toIntern() else .none,
+                    .child = Type.fromInterned(ptr_info.child).childType(zcu).toIntern(),
+                    .sentinel = if (inner_ty.sentinel(zcu)) |s| s.toIntern() else .none,
                     .flags = .{
                         .size = .Many,
                         .alignment = ptr_info.flags.alignment,
@@ -27614,7 +27628,7 @@ fn fieldVal(
             }
         },
         .Pointer => {
-            const ptr_info = inner_ty.ptrInfo(mod);
+            const ptr_info = inner_ty.ptrInfo(zcu);
             if (ptr_info.flags.size == .Slice) {
                 if (field_name.eqlSlice("ptr", ip)) {
                     const slice = if (is_pointer_to)
@@ -27647,7 +27661,7 @@ fn fieldVal(
             const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?;
             const child_type = val.toType();
 
-            switch (try child_type.zigTypeTagOrPoison(mod)) {
+            switch (try child_type.zigTypeTagOrPoison(zcu)) {
                 .ErrorSet => {
                     switch (ip.indexToKey(child_type.toIntern())) {
                         .error_set_type => |error_set_type| blk: {
@@ -27666,7 +27680,7 @@ fn fieldVal(
                         else => unreachable,
                     }
 
-                    const error_set_type = if (!child_type.isAnyError(mod))
+                    const error_set_type = if (!child_type.isAnyError(zcu))
                         child_type
                     else
                         try pt.singleErrorSetType(field_name);
@@ -27676,12 +27690,12 @@ fn fieldVal(
                     } })));
                 },
                 .Union => {
-                    if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| {
+                    if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(zcu), field_name)) |inst| {
                         return inst;
                     }
                     try child_type.resolveFields(pt);
-                    if (child_type.unionTagType(mod)) |enum_ty| {
-                        if (enum_ty.enumFieldIndex(field_name, mod)) |field_index_usize| {
+                    if (child_type.unionTagType(zcu)) |enum_ty| {
+                        if (enum_ty.enumFieldIndex(field_name, zcu)) |field_index_usize| {
                             const field_index: u32 = @intCast(field_index_usize);
                             return Air.internedToRef((try pt.enumValueFieldIndex(enum_ty, field_index)).toIntern());
                         }
@@ -27689,10 +27703,10 @@ fn fieldVal(
                     return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
                 },
                 .Enum => {
-                    if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| {
+                    if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(zcu), field_name)) |inst| {
                         return inst;
                     }
-                    const field_index_usize = child_type.enumFieldIndex(field_name, mod) orelse
+                    const field_index_usize = child_type.enumFieldIndex(field_name, zcu) orelse
                         return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
                     const field_index: u32 = @intCast(field_index_usize);
                     const enum_val = try pt.enumValueFieldIndex(child_type, field_index);
@@ -27701,7 +27715,7 @@ fn fieldVal(
                 .Struct, .Opaque => {
                     switch (child_type.toIntern()) {
                         .empty_struct_type, .anyopaque_type => {}, // no namespace
-                        else => if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| {
+                        else => if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(zcu), field_name)) |inst| {
                             return inst;
                         },
                     }
@@ -27710,8 +27724,8 @@ fn fieldVal(
                 else => return sema.failWithOwnedErrorMsg(block, msg: {
                     const msg = try sema.errMsg(src, "type '{}' has no members", .{child_type.fmt(pt)});
                     errdefer msg.destroy(sema.gpa);
-                    if (child_type.isSlice(mod)) try sema.errNote(src, msg, "slice values have 'len' and 'ptr' members", .{});
-                    if (child_type.zigTypeTag(mod) == .Array) try sema.errNote(src, msg, "array values have 'len' member", .{});
+                    if (child_type.isSlice(zcu)) try sema.errNote(src, msg, "slice values have 'len' and 'ptr' members", .{});
+                    if (child_type.zigTypeTag(zcu) == .Array) try sema.errNote(src, msg, "array values have 'len' member", .{});
                     break :msg msg;
                 }),
             }
@@ -27748,35 +27762,35 @@ fn fieldPtr(
     // in `fieldVal`. This function takes a pointer and returns a pointer.
 
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const object_ptr_src = src; // TODO better source location
     const object_ptr_ty = sema.typeOf(object_ptr);
-    const object_ty = switch (object_ptr_ty.zigTypeTag(mod)) {
-        .Pointer => object_ptr_ty.childType(mod),
+    const object_ty = switch (object_ptr_ty.zigTypeTag(zcu)) {
+        .Pointer => object_ptr_ty.childType(zcu),
         else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(pt)}),
     };
 
     // Zig allows dereferencing a single pointer during field lookup. Note that
     // we don't actually need to generate the dereference some field lookups, like the
     // length of arrays and other comptime operations.
-    const is_pointer_to = object_ty.isSinglePointer(mod);
+    const is_pointer_to = object_ty.isSinglePointer(zcu);
 
     const inner_ty = if (is_pointer_to)
-        object_ty.childType(mod)
+        object_ty.childType(zcu)
     else
         object_ty;
 
-    switch (inner_ty.zigTypeTag(mod)) {
+    switch (inner_ty.zigTypeTag(zcu)) {
         .Array => {
             if (field_name.eqlSlice("len", ip)) {
-                const int_val = try pt.intValue(Type.usize, inner_ty.arrayLen(mod));
+                const int_val = try pt.intValue(Type.usize, inner_ty.arrayLen(zcu));
                 return uavRef(sema, int_val.toIntern());
             } else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) {
-                const ptr_info = object_ty.ptrInfo(mod);
+                const ptr_info = object_ty.ptrInfo(zcu);
                 const new_ptr_ty = try pt.ptrTypeSema(.{
-                    .child = Type.fromInterned(ptr_info.child).childType(mod).toIntern(),
-                    .sentinel = if (object_ty.sentinel(mod)) |s| s.toIntern() else .none,
+                    .child = Type.fromInterned(ptr_info.child).childType(zcu).toIntern(),
+                    .sentinel = if (object_ty.sentinel(zcu)) |s| s.toIntern() else .none,
                     .flags = .{
                         .size = .Many,
                         .alignment = ptr_info.flags.alignment,
@@ -27788,10 +27802,10 @@ fn fieldPtr(
                     },
                     .packed_offset = ptr_info.packed_offset,
                 });
-                const ptr_ptr_info = object_ptr_ty.ptrInfo(mod);
+                const ptr_ptr_info = object_ptr_ty.ptrInfo(zcu);
                 const result_ty = try pt.ptrTypeSema(.{
                     .child = new_ptr_ty.toIntern(),
-                    .sentinel = if (object_ptr_ty.sentinel(mod)) |s| s.toIntern() else .none,
+                    .sentinel = if (object_ptr_ty.sentinel(zcu)) |s| s.toIntern() else .none,
                     .flags = .{
                         .alignment = ptr_ptr_info.flags.alignment,
                         .is_const = ptr_ptr_info.flags.is_const,
@@ -27812,7 +27826,7 @@ fn fieldPtr(
                 );
             }
         },
-        .Pointer => if (inner_ty.isSlice(mod)) {
+        .Pointer => if (inner_ty.isSlice(zcu)) {
             const inner_ptr = if (is_pointer_to)
                 try sema.analyzeLoad(block, src, object_ptr, object_ptr_src)
             else
@@ -27821,14 +27835,14 @@ fn fieldPtr(
             const attr_ptr_ty = if (is_pointer_to) object_ty else object_ptr_ty;
 
             if (field_name.eqlSlice("ptr", ip)) {
-                const slice_ptr_ty = inner_ty.slicePtrFieldType(mod);
+                const slice_ptr_ty = inner_ty.slicePtrFieldType(zcu);
 
                 const result_ty = try pt.ptrTypeSema(.{
                     .child = slice_ptr_ty.toIntern(),
                     .flags = .{
-                        .is_const = !attr_ptr_ty.ptrIsMutable(mod),
-                        .is_volatile = attr_ptr_ty.isVolatilePtr(mod),
-                        .address_space = attr_ptr_ty.ptrAddressSpace(mod),
+                        .is_const = !attr_ptr_ty.ptrIsMutable(zcu),
+                        .is_volatile = attr_ptr_ty.isVolatilePtr(zcu),
+                        .address_space = attr_ptr_ty.ptrAddressSpace(zcu),
                     },
                 });
 
@@ -27844,9 +27858,9 @@ fn fieldPtr(
                 const result_ty = try pt.ptrTypeSema(.{
                     .child = .usize_type,
                     .flags = .{
-                        .is_const = !attr_ptr_ty.ptrIsMutable(mod),
-                        .is_volatile = attr_ptr_ty.isVolatilePtr(mod),
-                        .address_space = attr_ptr_ty.ptrAddressSpace(mod),
+                        .is_const = !attr_ptr_ty.ptrIsMutable(zcu),
+                        .is_volatile = attr_ptr_ty.isVolatilePtr(zcu),
+                        .address_space = attr_ptr_ty.ptrAddressSpace(zcu),
                     },
                 });
 
@@ -27878,7 +27892,7 @@ fn fieldPtr(
             const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?;
             const child_type = val.toType();
 
-            switch (child_type.zigTypeTag(mod)) {
+            switch (child_type.zigTypeTag(zcu)) {
                 .ErrorSet => {
                     switch (ip.indexToKey(child_type.toIntern())) {
                         .error_set_type => |error_set_type| blk: {
@@ -27899,7 +27913,7 @@ fn fieldPtr(
                         else => unreachable,
                     }
 
-                    const error_set_type = if (!child_type.isAnyError(mod))
+                    const error_set_type = if (!child_type.isAnyError(zcu))
                         child_type
                     else
                         try pt.singleErrorSetType(field_name);
@@ -27909,12 +27923,12 @@ fn fieldPtr(
                     } }));
                 },
                 .Union => {
-                    if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| {
+                    if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(zcu), field_name)) |inst| {
                         return inst;
                     }
                     try child_type.resolveFields(pt);
-                    if (child_type.unionTagType(mod)) |enum_ty| {
-                        if (enum_ty.enumFieldIndex(field_name, mod)) |field_index| {
+                    if (child_type.unionTagType(zcu)) |enum_ty| {
+                        if (enum_ty.enumFieldIndex(field_name, zcu)) |field_index| {
                             const field_index_u32: u32 = @intCast(field_index);
                             const idx_val = try pt.enumValueFieldIndex(enum_ty, field_index_u32);
                             return uavRef(sema, idx_val.toIntern());
@@ -27923,10 +27937,10 @@ fn fieldPtr(
                     return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
                 },
                 .Enum => {
-                    if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| {
+                    if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(zcu), field_name)) |inst| {
                         return inst;
                     }
-                    const field_index = child_type.enumFieldIndex(field_name, mod) orelse {
+                    const field_index = child_type.enumFieldIndex(field_name, zcu) orelse {
                         return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
                     };
                     const field_index_u32: u32 = @intCast(field_index);
@@ -27934,7 +27948,7 @@ fn fieldPtr(
                     return uavRef(sema, idx_val.toIntern());
                 },
                 .Struct, .Opaque => {
-                    if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| {
+                    if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(zcu), field_name)) |inst| {
                         return inst;
                     }
                     return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
@@ -28149,18 +28163,18 @@ fn finishFieldCallBind(
     object_ptr: Air.Inst.Ref,
 ) CompileError!ResolvedFieldCallee {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ptr_field_ty = try pt.ptrTypeSema(.{
         .child = field_ty.toIntern(),
         .flags = .{
-            .is_const = !ptr_ty.ptrIsMutable(mod),
-            .address_space = ptr_ty.ptrAddressSpace(mod),
+            .is_const = !ptr_ty.ptrIsMutable(zcu),
+            .address_space = ptr_ty.ptrAddressSpace(zcu),
         },
     });
 
-    const container_ty = ptr_ty.childType(mod);
-    if (container_ty.zigTypeTag(mod) == .Struct) {
-        if (container_ty.structFieldIsComptime(field_index, mod)) {
+    const container_ty = ptr_ty.childType(zcu);
+    if (container_ty.zigTypeTag(zcu) == .Struct) {
+        if (container_ty.structFieldIsComptime(field_index, zcu)) {
             try container_ty.resolveStructFieldInits(pt);
             const default_val = (try container_ty.structFieldValueComptime(pt, field_index)).?;
             return .{ .direct = Air.internedToRef(default_val.toIntern()) };
@@ -28237,26 +28251,26 @@ fn structFieldPtr(
     initializing: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    assert(struct_ty.zigTypeTag(mod) == .Struct);
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    assert(struct_ty.zigTypeTag(zcu) == .Struct);
 
     try struct_ty.resolveFields(pt);
     try struct_ty.resolveLayout(pt);
 
-    if (struct_ty.isTuple(mod)) {
+    if (struct_ty.isTuple(zcu)) {
         if (field_name.eqlSlice("len", ip)) {
-            const len_inst = try pt.intRef(Type.usize, struct_ty.structFieldCount(mod));
+            const len_inst = try pt.intRef(Type.usize, struct_ty.structFieldCount(zcu));
             return sema.analyzeRef(block, src, len_inst);
         }
         const field_index = try sema.tupleFieldIndex(block, struct_ty, field_name, field_name_src);
         return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing);
-    } else if (struct_ty.isAnonStruct(mod)) {
+    } else if (struct_ty.isAnonStruct(zcu)) {
         const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src);
         return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing);
     }
 
-    const struct_type = mod.typeToStruct(struct_ty).?;
+    const struct_type = zcu.typeToStruct(struct_ty).?;
 
     const field_index = struct_type.nameIndex(ip, field_name) orelse
         return sema.failWithBadStructFieldAccess(block, struct_ty, struct_type, field_name_src, field_name);
@@ -28275,9 +28289,9 @@ fn structFieldPtrByIndex(
     initializing: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    if (struct_ty.isAnonStruct(mod)) {
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    if (struct_ty.isAnonStruct(zcu)) {
         return sema.tupleFieldPtr(block, src, struct_ptr, field_src, field_index, initializing);
     }
 
@@ -28286,10 +28300,10 @@ fn structFieldPtrByIndex(
         return Air.internedToRef(val.toIntern());
     }
 
-    const struct_type = mod.typeToStruct(struct_ty).?;
+    const struct_type = zcu.typeToStruct(struct_ty).?;
     const field_ty = struct_type.field_types.get(ip)[field_index];
     const struct_ptr_ty = sema.typeOf(struct_ptr);
-    const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod);
+    const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(zcu);
 
     var ptr_ty_data: InternPool.Key.PtrType = .{
         .child = field_ty,
@@ -28303,7 +28317,7 @@ fn structFieldPtrByIndex(
     const parent_align = if (struct_ptr_ty_info.flags.alignment != .none)
         struct_ptr_ty_info.flags.alignment
     else
-        try sema.typeAbiAlignment(Type.fromInterned(struct_ptr_ty_info.child));
+        try Type.fromInterned(struct_ptr_ty_info.child).abiAlignmentSema(pt);
 
     if (struct_type.layout == .@"packed") {
         switch (struct_ty.packedStructFieldPtrInfo(struct_ptr_ty, field_index, pt)) {
@@ -28319,18 +28333,19 @@ fn structFieldPtrByIndex(
         // For extern structs, field alignment might be bigger than type's
         // natural alignment. Eg, in `extern struct { x: u32, y: u16 }` the
         // second field is aligned as u32.
-        const field_offset = struct_ty.structFieldOffset(field_index, pt);
+        const field_offset = struct_ty.structFieldOffset(field_index, zcu);
         ptr_ty_data.flags.alignment = if (parent_align == .none)
             .none
         else
             @enumFromInt(@min(@intFromEnum(parent_align), @ctz(field_offset)));
     } else {
         // Our alignment is capped at the field alignment.
-        const field_align = try pt.structFieldAlignmentAdvanced(
+        const field_align = try Type.fromInterned(field_ty).structFieldAlignmentAdvanced(
             struct_type.fieldAlign(ip, field_index),
-            Type.fromInterned(field_ty),
             struct_type.layout,
             .sema,
+            pt.zcu,
+            pt.tid,
         );
         ptr_ty_data.flags.alignment = if (struct_ptr_ty_info.flags.alignment == .none)
             field_align
@@ -28364,9 +28379,9 @@ fn structFieldVal(
     struct_ty: Type,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    assert(struct_ty.zigTypeTag(mod) == .Struct);
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    assert(struct_ty.zigTypeTag(zcu) == .Struct);
 
     try struct_ty.resolveFields(pt);
 
@@ -28388,7 +28403,7 @@ fn structFieldVal(
                 return Air.internedToRef(field_val.toIntern());
 
             if (try sema.resolveValue(struct_byval)) |struct_val| {
-                if (struct_val.isUndef(mod)) return pt.undefRef(field_ty);
+                if (struct_val.isUndef(zcu)) return pt.undefRef(field_ty);
                 if ((try sema.typeHasOnePossibleValue(field_ty))) |opv| {
                     return Air.internedToRef(opv.toIntern());
                 }
@@ -28421,9 +28436,9 @@ fn tupleFieldVal(
     tuple_ty: Type,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    if (field_name.eqlSlice("len", &mod.intern_pool)) {
-        return pt.intRef(Type.usize, tuple_ty.structFieldCount(mod));
+    const zcu = pt.zcu;
+    if (field_name.eqlSlice("len", &zcu.intern_pool)) {
+        return pt.intRef(Type.usize, tuple_ty.structFieldCount(zcu));
     }
     const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_name_src);
     return sema.tupleFieldValByIndex(block, src, tuple_byval, field_index, tuple_ty);
@@ -28461,10 +28476,10 @@ fn tupleFieldValByIndex(
     tuple_ty: Type,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const field_ty = tuple_ty.structFieldType(field_index, mod);
+    const zcu = pt.zcu;
+    const field_ty = tuple_ty.structFieldType(field_index, zcu);
 
-    if (tuple_ty.structFieldIsComptime(field_index, mod))
+    if (tuple_ty.structFieldIsComptime(field_index, zcu))
         try tuple_ty.resolveStructFieldInits(pt);
     if (try tuple_ty.structFieldValueComptime(pt, field_index)) |default_value| {
         return Air.internedToRef(default_value.toIntern());
@@ -28474,10 +28489,10 @@ fn tupleFieldValByIndex(
         if ((try sema.typeHasOnePossibleValue(field_ty))) |opv| {
             return Air.internedToRef(opv.toIntern());
         }
-        return switch (mod.intern_pool.indexToKey(tuple_val.toIntern())) {
+        return switch (zcu.intern_pool.indexToKey(tuple_val.toIntern())) {
             .undef => pt.undefRef(field_ty),
             .aggregate => |aggregate| Air.internedToRef(switch (aggregate.storage) {
-                .bytes => |bytes| try pt.intValue(Type.u8, bytes.at(field_index, &mod.intern_pool)),
+                .bytes => |bytes| try pt.intValue(Type.u8, bytes.at(field_index, &zcu.intern_pool)),
                 .elems => |elems| Value.fromInterned(elems[field_index]),
                 .repeated_elem => |elem| Value.fromInterned(elem),
             }.toIntern()),
@@ -28501,15 +28516,15 @@ fn unionFieldPtr(
     initializing: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
 
-    assert(union_ty.zigTypeTag(mod) == .Union);
+    assert(union_ty.zigTypeTag(zcu) == .Union);
 
     const union_ptr_ty = sema.typeOf(union_ptr);
-    const union_ptr_info = union_ptr_ty.ptrInfo(mod);
+    const union_ptr_info = union_ptr_ty.ptrInfo(zcu);
     try union_ty.resolveFields(pt);
-    const union_obj = mod.typeToUnion(union_ty).?;
+    const union_obj = zcu.typeToUnion(union_ty).?;
     const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
     const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
     const ptr_field_ty = try pt.ptrTypeSema(.{
@@ -28522,16 +28537,22 @@ fn unionFieldPtr(
                 const union_align = if (union_ptr_info.flags.alignment != .none)
                     union_ptr_info.flags.alignment
                 else
-                    try sema.typeAbiAlignment(union_ty);
-                const field_align = try pt.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema);
+                    try union_ty.abiAlignmentSema(pt);
+                const field_align = try Type.unionFieldNormalAlignmentAdvanced(
+                    union_obj,
+                    field_index,
+                    .sema,
+                    pt.zcu,
+                    pt.tid,
+                );
                 break :blk union_align.min(field_align);
             } else union_ptr_info.flags.alignment,
         },
         .packed_offset = union_ptr_info.packed_offset,
     });
-    const enum_field_index: u32 = @intCast(Type.fromInterned(union_obj.enum_tag_ty).enumFieldIndex(field_name, mod).?);
+    const enum_field_index: u32 = @intCast(Type.fromInterned(union_obj.enum_tag_ty).enumFieldIndex(field_name, zcu).?);
 
-    if (initializing and field_ty.zigTypeTag(mod) == .NoReturn) {
+    if (initializing and field_ty.zigTypeTag(zcu) == .NoReturn) {
         const msg = msg: {
             const msg = try sema.errMsg(src, "cannot initialize 'noreturn' field of union", .{});
             errdefer msg.destroy(sema.gpa);
@@ -28556,7 +28577,7 @@ fn unionFieldPtr(
             } else {
                 const union_val = (try sema.pointerDeref(block, src, union_ptr_val, union_ptr_ty)) orelse
                     break :ct;
-                if (union_val.isUndef(mod)) {
+                if (union_val.isUndef(zcu)) {
                     return sema.failWithUseOfUndef(block, src);
                 }
                 const un = ip.indexToKey(union_val.toIntern()).un;
@@ -28564,8 +28585,8 @@ fn unionFieldPtr(
                 const tag_matches = un.tag == field_tag.toIntern();
                 if (!tag_matches) {
                     const msg = msg: {
-                        const active_index = Type.fromInterned(union_obj.enum_tag_ty).enumTagFieldIndex(Value.fromInterned(un.tag), mod).?;
-                        const active_field_name = Type.fromInterned(union_obj.enum_tag_ty).enumFieldName(active_index, mod);
+                        const active_index = Type.fromInterned(union_obj.enum_tag_ty).enumTagFieldIndex(Value.fromInterned(un.tag), zcu).?;
+                        const active_field_name = Type.fromInterned(union_obj.enum_tag_ty).enumFieldName(active_index, zcu);
                         const msg = try sema.errMsg(src, "access of union field '{}' while field '{}' is active", .{
                             field_name.fmt(ip),
                             active_field_name.fmt(ip),
@@ -28585,7 +28606,7 @@ fn unionFieldPtr(
 
     try sema.requireRuntimeBlock(block, src, null);
     if (!initializing and union_obj.flagsUnordered(ip).layout == .auto and block.wantSafety() and
-        union_ty.unionTagTypeSafety(mod) != null and union_obj.field_types.len > 1)
+        union_ty.unionTagTypeSafety(zcu) != null and union_obj.field_types.len > 1)
     {
         const wanted_tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
         const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern());
@@ -28594,7 +28615,7 @@ fn unionFieldPtr(
         const active_tag = try block.addTyOp(.get_union_tag, Type.fromInterned(union_obj.enum_tag_ty), union_val);
         try sema.panicInactiveUnionField(block, src, active_tag, wanted_tag);
     }
-    if (field_ty.zigTypeTag(mod) == .NoReturn) {
+    if (field_ty.zigTypeTag(zcu) == .NoReturn) {
         _ = try block.addNoOp(.unreach);
         return .unreachable_value;
     }
@@ -28654,7 +28675,7 @@ fn unionFieldVal(
             .@"packed" => if (tag_matches) {
                 // Fast path - no need to use bitcast logic.
                 return Air.internedToRef(un.val);
-            } else if (try sema.bitCastVal(union_val, field_ty, 0, try union_ty.bitSizeAdvanced(pt, .sema), 0)) |field_val| {
+            } else if (try sema.bitCastVal(union_val, field_ty, 0, try union_ty.bitSizeSema(pt), 0)) |field_val| {
                 return Air.internedToRef(field_val.toIntern());
             },
         }
@@ -28688,17 +28709,17 @@ fn elemPtr(
     oob_safety: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const indexable_ptr_src = src; // TODO better source location
     const indexable_ptr_ty = sema.typeOf(indexable_ptr);
 
-    const indexable_ty = switch (indexable_ptr_ty.zigTypeTag(mod)) {
-        .Pointer => indexable_ptr_ty.childType(mod),
+    const indexable_ty = switch (indexable_ptr_ty.zigTypeTag(zcu)) {
+        .Pointer => indexable_ptr_ty.childType(zcu),
         else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(pt)}),
     };
     try checkIndexable(sema, block, src, indexable_ty);
 
-    const elem_ptr = switch (indexable_ty.zigTypeTag(mod)) {
+    const elem_ptr = switch (indexable_ty.zigTypeTag(zcu)) {
         .Array, .Vector => try sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init, oob_safety),
         .Struct => blk: {
             // Tuple field access.
@@ -28732,11 +28753,11 @@ fn elemPtrOneLayerOnly(
     const indexable_src = src; // TODO better source location
     const indexable_ty = sema.typeOf(indexable);
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
     try checkIndexable(sema, block, src, indexable_ty);
 
-    switch (indexable_ty.ptrSize(mod)) {
+    switch (indexable_ty.ptrSize(zcu)) {
         .Slice => return sema.elemPtrSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
         .Many, .C => {
             const maybe_ptr_val = try sema.resolveDefinedValue(block, indexable_src, indexable);
@@ -28754,11 +28775,11 @@ fn elemPtrOneLayerOnly(
             return block.addPtrElemPtr(indexable, elem_index, result_ty);
         },
         .One => {
-            const child_ty = indexable_ty.childType(mod);
-            const elem_ptr = switch (child_ty.zigTypeTag(mod)) {
+            const child_ty = indexable_ty.childType(zcu);
+            const elem_ptr = switch (child_ty.zigTypeTag(zcu)) {
                 .Array, .Vector => try sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init, oob_safety),
                 .Struct => blk: {
-                    assert(child_ty.isTuple(mod));
+                    assert(child_ty.isTuple(zcu));
                     const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{
                         .needed_comptime_reason = "tuple field access index must be comptime-known",
                     });
@@ -28785,7 +28806,7 @@ fn elemVal(
     const indexable_src = src; // TODO better source location
     const indexable_ty = sema.typeOf(indexable);
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
     try checkIndexable(sema, block, src, indexable_ty);
 
@@ -28793,8 +28814,8 @@ fn elemVal(
     // index is a scalar or vector instead of unconditionally casting to usize.
     const elem_index = try sema.coerce(block, Type.usize, elem_index_uncasted, elem_index_src);
 
-    switch (indexable_ty.zigTypeTag(mod)) {
-        .Pointer => switch (indexable_ty.ptrSize(mod)) {
+    switch (indexable_ty.zigTypeTag(zcu)) {
+        .Pointer => switch (indexable_ty.ptrSize(zcu)) {
             .Slice => return sema.elemValSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
             .Many, .C => {
                 const maybe_indexable_val = try sema.resolveDefinedValue(block, indexable_src, indexable);
@@ -28804,7 +28825,7 @@ fn elemVal(
                     const indexable_val = maybe_indexable_val orelse break :rs indexable_src;
                     const index_val = maybe_index_val orelse break :rs elem_index_src;
                     const index: usize = @intCast(try index_val.toUnsignedIntSema(pt));
-                    const elem_ty = indexable_ty.elemType2(mod);
+                    const elem_ty = indexable_ty.elemType2(zcu);
                     const many_ptr_ty = try pt.manyConstPtrType(elem_ty);
                     const many_ptr_val = try pt.getCoerced(indexable_val, many_ptr_ty);
                     const elem_ptr_ty = try pt.singleConstPtrType(elem_ty);
@@ -28820,12 +28841,12 @@ fn elemVal(
             },
             .One => {
                 arr_sent: {
-                    const inner_ty = indexable_ty.childType(mod);
-                    if (inner_ty.zigTypeTag(mod) != .Array) break :arr_sent;
-                    const sentinel = inner_ty.sentinel(mod) orelse break :arr_sent;
+                    const inner_ty = indexable_ty.childType(zcu);
+                    if (inner_ty.zigTypeTag(zcu) != .Array) break :arr_sent;
+                    const sentinel = inner_ty.sentinel(zcu) orelse break :arr_sent;
                     const index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index) orelse break :arr_sent;
                     const index = try sema.usizeCast(block, src, try index_val.toUnsignedIntSema(pt));
-                    if (index != inner_ty.arrayLen(mod)) break :arr_sent;
+                    if (index != inner_ty.arrayLen(zcu)) break :arr_sent;
                     return Air.internedToRef(sentinel.toIntern());
                 }
                 const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false, oob_safety);
@@ -28857,7 +28878,7 @@ fn validateRuntimeElemAccess(
     parent_ty: Type,
     parent_src: LazySrcLoc,
 ) CompileError!void {
-    if (try sema.typeRequiresComptime(elem_ty)) {
+    if (try elem_ty.comptimeOnlySema(sema.pt)) {
         const msg = msg: {
             const msg = try sema.errMsg(
                 elem_index_src,
@@ -28884,11 +28905,11 @@ fn tupleFieldPtr(
     init: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const tuple_ptr_ty = sema.typeOf(tuple_ptr);
-    const tuple_ty = tuple_ptr_ty.childType(mod);
+    const tuple_ty = tuple_ptr_ty.childType(zcu);
     try tuple_ty.resolveFields(pt);
-    const field_count = tuple_ty.structFieldCount(mod);
+    const field_count = tuple_ty.structFieldCount(zcu);
 
     if (field_count == 0) {
         return sema.fail(block, tuple_ptr_src, "indexing into empty tuple is not allowed", .{});
@@ -28900,17 +28921,17 @@ fn tupleFieldPtr(
         });
     }
 
-    const field_ty = tuple_ty.structFieldType(field_index, mod);
+    const field_ty = tuple_ty.structFieldType(field_index, zcu);
     const ptr_field_ty = try pt.ptrTypeSema(.{
         .child = field_ty.toIntern(),
         .flags = .{
-            .is_const = !tuple_ptr_ty.ptrIsMutable(mod),
-            .is_volatile = tuple_ptr_ty.isVolatilePtr(mod),
-            .address_space = tuple_ptr_ty.ptrAddressSpace(mod),
+            .is_const = !tuple_ptr_ty.ptrIsMutable(zcu),
+            .is_volatile = tuple_ptr_ty.isVolatilePtr(zcu),
+            .address_space = tuple_ptr_ty.ptrAddressSpace(zcu),
         },
     });
 
-    if (tuple_ty.structFieldIsComptime(field_index, mod))
+    if (tuple_ty.structFieldIsComptime(field_index, zcu))
         try tuple_ty.resolveStructFieldInits(pt);
 
     if (try tuple_ty.structFieldValueComptime(pt, field_index)) |default_val| {
@@ -28943,10 +28964,10 @@ fn tupleField(
     field_index: u32,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const tuple_ty = sema.typeOf(tuple);
     try tuple_ty.resolveFields(pt);
-    const field_count = tuple_ty.structFieldCount(mod);
+    const field_count = tuple_ty.structFieldCount(zcu);
 
     if (field_count == 0) {
         return sema.fail(block, tuple_src, "indexing into empty tuple is not allowed", .{});
@@ -28958,16 +28979,16 @@ fn tupleField(
         });
     }
 
-    const field_ty = tuple_ty.structFieldType(field_index, mod);
+    const field_ty = tuple_ty.structFieldType(field_index, zcu);
 
-    if (tuple_ty.structFieldIsComptime(field_index, mod))
+    if (tuple_ty.structFieldIsComptime(field_index, zcu))
         try tuple_ty.resolveStructFieldInits(pt);
     if (try tuple_ty.structFieldValueComptime(pt, field_index)) |default_value| {
         return Air.internedToRef(default_value.toIntern()); // comptime field
     }
 
     if (try sema.resolveValue(tuple)) |tuple_val| {
-        if (tuple_val.isUndef(mod)) return pt.undefRef(field_ty);
+        if (tuple_val.isUndef(zcu)) return pt.undefRef(field_ty);
         return Air.internedToRef((try tuple_val.fieldValue(pt, field_index)).toIntern());
     }
 
@@ -28989,12 +29010,12 @@ fn elemValArray(
     oob_safety: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const array_ty = sema.typeOf(array);
-    const array_sent = array_ty.sentinel(mod);
-    const array_len = array_ty.arrayLen(mod);
+    const array_sent = array_ty.sentinel(zcu);
+    const array_len = array_ty.arrayLen(zcu);
     const array_len_s = array_len + @intFromBool(array_sent != null);
-    const elem_ty = array_ty.childType(mod);
+    const elem_ty = array_ty.childType(zcu);
 
     if (array_len_s == 0) {
         return sema.fail(block, array_src, "indexing into empty array is not allowed", .{});
@@ -29017,7 +29038,7 @@ fn elemValArray(
         }
     }
     if (maybe_undef_array_val) |array_val| {
-        if (array_val.isUndef(mod)) {
+        if (array_val.isUndef(zcu)) {
             return pt.undefRef(elem_ty);
         }
         if (maybe_index_val) |index_val| {
@@ -29058,11 +29079,11 @@ fn elemPtrArray(
     oob_safety: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const array_ptr_ty = sema.typeOf(array_ptr);
-    const array_ty = array_ptr_ty.childType(mod);
-    const array_sent = array_ty.sentinel(mod) != null;
-    const array_len = array_ty.arrayLen(mod);
+    const array_ty = array_ptr_ty.childType(zcu);
+    const array_sent = array_ty.sentinel(zcu) != null;
+    const array_len = array_ty.arrayLen(zcu);
     const array_len_s = array_len + @intFromBool(array_sent);
 
     if (array_len_s == 0) {
@@ -29083,7 +29104,7 @@ fn elemPtrArray(
     const elem_ptr_ty = try array_ptr_ty.elemPtrType(offset, pt);
 
     if (maybe_undef_array_ptr_val) |array_ptr_val| {
-        if (array_ptr_val.isUndef(mod)) {
+        if (array_ptr_val.isUndef(zcu)) {
             return pt.undefRef(elem_ptr_ty);
         }
         if (offset) |index| {
@@ -29093,7 +29114,7 @@ fn elemPtrArray(
     }
 
     if (!init) {
-        try sema.validateRuntimeElemAccess(block, elem_index_src, array_ty.elemType2(mod), array_ty, array_ptr_src);
+        try sema.validateRuntimeElemAccess(block, elem_index_src, array_ty.elemType2(zcu), array_ty, array_ptr_src);
     }
 
     const runtime_src = if (maybe_undef_array_ptr_val != null) elem_index_src else array_ptr_src;
@@ -29120,10 +29141,10 @@ fn elemValSlice(
     oob_safety: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const slice_ty = sema.typeOf(slice);
-    const slice_sent = slice_ty.sentinel(mod) != null;
-    const elem_ty = slice_ty.elemType2(mod);
+    const slice_sent = slice_ty.sentinel(zcu) != null;
+    const elem_ty = slice_ty.elemType2(zcu);
     var runtime_src = slice_src;
 
     // slice must be defined since it can dereferenced as null
@@ -29178,9 +29199,9 @@ fn elemPtrSlice(
     oob_safety: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const slice_ty = sema.typeOf(slice);
-    const slice_sent = slice_ty.sentinel(mod) != null;
+    const slice_sent = slice_ty.sentinel(zcu) != null;
 
     const maybe_undef_slice_val = try sema.resolveValue(slice);
     // The index must not be undefined since it can be out of bounds.
@@ -29192,7 +29213,7 @@ fn elemPtrSlice(
     const elem_ptr_ty = try slice_ty.elemPtrType(offset, pt);
 
     if (maybe_undef_slice_val) |slice_val| {
-        if (slice_val.isUndef(mod)) {
+        if (slice_val.isUndef(zcu)) {
             return pt.undefRef(elem_ptr_ty);
         }
         const slice_len = try slice_val.sliceLen(pt);
@@ -29217,7 +29238,7 @@ fn elemPtrSlice(
     if (oob_safety and block.wantSafety()) {
         const len_inst = len: {
             if (maybe_undef_slice_val) |slice_val|
-                if (!slice_val.isUndef(mod))
+                if (!slice_val.isUndef(zcu))
                     break :len try pt.intRef(Type.usize, try slice_val.sliceLen(pt));
             break :len try block.addTyOp(.slice_len, Type.usize, slice);
         };
@@ -29600,7 +29621,7 @@ fn coerceExtra(
                     // empty tuple to zero-length slice
                     // note that this allows coercing to a mutable slice.
                     if (inst_child_ty.structFieldCount(zcu) == 0) {
-                        const align_val = try dest_ty.ptrAlignmentAdvanced(pt, .sema);
+                        const align_val = try dest_ty.ptrAlignmentSema(pt);
                         return Air.internedToRef(try pt.intern(.{ .slice = .{
                             .ty = dest_ty.toIntern(),
                             .ptr = try pt.intern(.{ .ptr = .{
@@ -30098,7 +30119,7 @@ const InMemoryCoercionResult = union(enum) {
         return res;
     }
 
-    fn report(res: *const InMemoryCoercionResult, sema: *Sema, src: LazySrcLoc, msg: *Module.ErrorMsg) !void {
+    fn report(res: *const InMemoryCoercionResult, sema: *Sema, src: LazySrcLoc, msg: *Zcu.ErrorMsg) !void {
         const pt = sema.pt;
         var cur = res;
         while (true) switch (cur.*) {
@@ -30364,18 +30385,18 @@ pub fn coerceInMemoryAllowed(
     src_val: ?Value,
 ) CompileError!InMemoryCoercionResult {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
-    if (dest_ty.eql(src_ty, mod))
+    if (dest_ty.eql(src_ty, zcu))
         return .ok;
 
-    const dest_tag = dest_ty.zigTypeTag(mod);
-    const src_tag = src_ty.zigTypeTag(mod);
+    const dest_tag = dest_ty.zigTypeTag(zcu);
+    const src_tag = src_ty.zigTypeTag(zcu);
 
     // Differently-named integers with the same number of bits.
     if (dest_tag == .Int and src_tag == .Int) {
-        const dest_info = dest_ty.intInfo(mod);
-        const src_info = src_ty.intInfo(mod);
+        const dest_info = dest_ty.intInfo(zcu);
+        const src_info = src_ty.intInfo(zcu);
 
         if (dest_info.signedness == src_info.signedness and
             dest_info.bits == src_info.bits)
@@ -30425,7 +30446,7 @@ pub fn coerceInMemoryAllowed(
     }
 
     // Slices
-    if (dest_ty.isSlice(mod) and src_ty.isSlice(mod)) {
+    if (dest_ty.isSlice(zcu) and src_ty.isSlice(zcu)) {
         return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target, dest_src, src_src);
     }
 
@@ -30436,8 +30457,8 @@ pub fn coerceInMemoryAllowed(
 
     // Error Unions
     if (dest_tag == .ErrorUnion and src_tag == .ErrorUnion) {
-        const dest_payload = dest_ty.errorUnionPayload(mod);
-        const src_payload = src_ty.errorUnionPayload(mod);
+        const dest_payload = dest_ty.errorUnionPayload(zcu);
+        const src_payload = src_ty.errorUnionPayload(zcu);
         const child = try sema.coerceInMemoryAllowed(block, dest_payload, src_payload, dest_is_mut, target, dest_src, src_src, null);
         if (child != .ok) {
             return InMemoryCoercionResult{ .error_union_payload = .{
@@ -30446,7 +30467,7 @@ pub fn coerceInMemoryAllowed(
                 .wanted = dest_payload,
             } };
         }
-        return try sema.coerceInMemoryAllowed(block, dest_ty.errorUnionSet(mod), src_ty.errorUnionSet(mod), dest_is_mut, target, dest_src, src_src, null);
+        return try sema.coerceInMemoryAllowed(block, dest_ty.errorUnionSet(zcu), src_ty.errorUnionSet(zcu), dest_is_mut, target, dest_src, src_src, null);
     }
 
     // Error Sets
@@ -30456,8 +30477,8 @@ pub fn coerceInMemoryAllowed(
 
     // Arrays
     if (dest_tag == .Array and src_tag == .Array) {
-        const dest_info = dest_ty.arrayInfo(mod);
-        const src_info = src_ty.arrayInfo(mod);
+        const dest_info = dest_ty.arrayInfo(zcu);
+        const src_info = src_ty.arrayInfo(zcu);
         if (dest_info.len != src_info.len) {
             return InMemoryCoercionResult{ .array_len = .{
                 .actual = src_info.len,
@@ -30483,7 +30504,7 @@ pub fn coerceInMemoryAllowed(
             dest_info.sentinel.?.eql(
             try pt.getCoerced(src_info.sentinel.?, dest_info.elem_type),
             dest_info.elem_type,
-            mod,
+            zcu,
         ));
         if (!ok_sent) {
             return InMemoryCoercionResult{ .array_sentinel = .{
@@ -30497,8 +30518,8 @@ pub fn coerceInMemoryAllowed(
 
     // Vectors
     if (dest_tag == .Vector and src_tag == .Vector) {
-        const dest_len = dest_ty.vectorLen(mod);
-        const src_len = src_ty.vectorLen(mod);
+        const dest_len = dest_ty.vectorLen(zcu);
+        const src_len = src_ty.vectorLen(zcu);
         if (dest_len != src_len) {
             return InMemoryCoercionResult{ .vector_len = .{
                 .actual = src_len,
@@ -30506,8 +30527,8 @@ pub fn coerceInMemoryAllowed(
             } };
         }
 
-        const dest_elem_ty = dest_ty.scalarType(mod);
-        const src_elem_ty = src_ty.scalarType(mod);
+        const dest_elem_ty = dest_ty.scalarType(zcu);
+        const src_elem_ty = src_ty.scalarType(zcu);
         const child = try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, dest_is_mut, target, dest_src, src_src, null);
         if (child != .ok) {
             return InMemoryCoercionResult{ .vector_elem = .{
@@ -30524,8 +30545,8 @@ pub fn coerceInMemoryAllowed(
     if ((dest_tag == .Vector and src_tag == .Array) or
         (dest_tag == .Array and src_tag == .Vector))
     {
-        const dest_len = dest_ty.arrayLen(mod);
-        const src_len = src_ty.arrayLen(mod);
+        const dest_len = dest_ty.arrayLen(zcu);
+        const src_len = src_ty.arrayLen(zcu);
         if (dest_len != src_len) {
             return InMemoryCoercionResult{ .array_len = .{
                 .actual = src_len,
@@ -30533,8 +30554,8 @@ pub fn coerceInMemoryAllowed(
             } };
         }
 
-        const dest_elem_ty = dest_ty.childType(mod);
-        const src_elem_ty = src_ty.childType(mod);
+        const dest_elem_ty = dest_ty.childType(zcu);
+        const src_elem_ty = src_ty.childType(zcu);
         const child = try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, dest_is_mut, target, dest_src, src_src, null);
         if (child != .ok) {
             return InMemoryCoercionResult{ .array_elem = .{
@@ -30545,7 +30566,7 @@ pub fn coerceInMemoryAllowed(
         }
 
         if (dest_tag == .Array) {
-            const dest_info = dest_ty.arrayInfo(mod);
+            const dest_info = dest_ty.arrayInfo(zcu);
             if (dest_info.sentinel != null) {
                 return InMemoryCoercionResult{ .array_sentinel = .{
                     .actual = Value.@"unreachable",
@@ -30558,8 +30579,8 @@ pub fn coerceInMemoryAllowed(
         // The memory layout of @Vector(N, iM) is the same as the integer type i(N*M),
         // that is to say, the padding bits are not in the same place as the array [N]iM.
         // If there's no padding, the bitcast is possible.
-        const elem_bit_size = dest_elem_ty.bitSize(pt);
-        const elem_abi_byte_size = dest_elem_ty.abiSize(pt);
+        const elem_bit_size = dest_elem_ty.bitSize(zcu);
+        const elem_abi_byte_size = dest_elem_ty.abiSize(zcu);
         if (elem_abi_byte_size * 8 == elem_bit_size)
             return .ok;
     }
@@ -30572,8 +30593,8 @@ pub fn coerceInMemoryAllowed(
                 .wanted = dest_ty,
             } };
         }
-        const dest_child_type = dest_ty.optionalChild(mod);
-        const src_child_type = src_ty.optionalChild(mod);
+        const dest_child_type = dest_ty.optionalChild(zcu);
+        const src_child_type = src_ty.optionalChild(zcu);
 
         const child = try sema.coerceInMemoryAllowed(block, dest_child_type, src_child_type, dest_is_mut, target, dest_src, src_src, null);
         if (child != .ok) {
@@ -30588,15 +30609,15 @@ pub fn coerceInMemoryAllowed(
     }
 
     // Tuples (with in-memory-coercible fields)
-    if (dest_ty.isTuple(mod) and src_ty.isTuple(mod)) tuple: {
-        if (dest_ty.containerLayout(mod) != src_ty.containerLayout(mod)) break :tuple;
-        if (dest_ty.structFieldCount(mod) != src_ty.structFieldCount(mod)) break :tuple;
-        const field_count = dest_ty.structFieldCount(mod);
+    if (dest_ty.isTuple(zcu) and src_ty.isTuple(zcu)) tuple: {
+        if (dest_ty.containerLayout(zcu) != src_ty.containerLayout(zcu)) break :tuple;
+        if (dest_ty.structFieldCount(zcu) != src_ty.structFieldCount(zcu)) break :tuple;
+        const field_count = dest_ty.structFieldCount(zcu);
         for (0..field_count) |field_idx| {
-            if (dest_ty.structFieldIsComptime(field_idx, mod) != src_ty.structFieldIsComptime(field_idx, mod)) break :tuple;
-            if (dest_ty.structFieldAlign(field_idx, pt) != src_ty.structFieldAlign(field_idx, pt)) break :tuple;
-            const dest_field_ty = dest_ty.structFieldType(field_idx, mod);
-            const src_field_ty = src_ty.structFieldType(field_idx, mod);
+            if (dest_ty.structFieldIsComptime(field_idx, zcu) != src_ty.structFieldIsComptime(field_idx, zcu)) break :tuple;
+            if (dest_ty.structFieldAlign(field_idx, zcu) != src_ty.structFieldAlign(field_idx, zcu)) break :tuple;
+            const dest_field_ty = dest_ty.structFieldType(field_idx, zcu);
+            const src_field_ty = src_ty.structFieldType(field_idx, zcu);
             const field = try sema.coerceInMemoryAllowed(block, dest_field_ty, src_field_ty, dest_is_mut, target, dest_src, src_src, null);
             if (field != .ok) break :tuple;
         }
@@ -30618,13 +30639,13 @@ fn coerceInMemoryAllowedErrorSets(
     src_src: LazySrcLoc,
 ) !InMemoryCoercionResult {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
 
     // Coercion to `anyerror`. Note that this check can return false negatives
     // in case the error sets did not get resolved.
-    if (dest_ty.isAnyError(mod)) {
+    if (dest_ty.isAnyError(zcu)) {
         return .ok;
     }
 
@@ -30669,7 +30690,7 @@ fn coerceInMemoryAllowedErrorSets(
                 const resolved_src_ty = try sema.resolveInferredErrorSet(block, src_src, src_ty.toIntern());
                 // src anyerror status might have changed after the resolution.
                 if (resolved_src_ty == .anyerror_type) {
-                    // dest_ty.isAnyError(mod) == true is already checked for at this point.
+                    // dest_ty.isAnyError(zcu) == true is already checked for at this point.
                     return .from_anyerror;
                 }
 
@@ -30717,11 +30738,11 @@ fn coerceInMemoryAllowedFns(
     src_src: LazySrcLoc,
 ) !InMemoryCoercionResult {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
 
-    const dest_info = mod.typeToFunc(dest_ty).?;
-    const src_info = mod.typeToFunc(src_ty).?;
+    const dest_info = zcu.typeToFunc(dest_ty).?;
+    const src_info = zcu.typeToFunc(src_ty).?;
 
     {
         if (dest_info.is_var_args != src_info.is_var_args) {
@@ -30922,12 +30943,12 @@ fn coerceInMemoryAllowedPtrs(
         const src_align = if (src_info.flags.alignment != .none)
             src_info.flags.alignment
         else
-            try sema.typeAbiAlignment(Type.fromInterned(src_info.child));
+            try Type.fromInterned(src_info.child).abiAlignmentSema(pt);
 
         const dest_align = if (dest_info.flags.alignment != .none)
             dest_info.flags.alignment
         else
-            try sema.typeAbiAlignment(Type.fromInterned(dest_info.child));
+            try Type.fromInterned(dest_info.child).abiAlignmentSema(pt);
 
         if (dest_align.compare(.gt, src_align)) {
             return InMemoryCoercionResult{ .ptr_alignment = .{
@@ -31044,12 +31065,12 @@ fn storePtr2(
     air_tag: Air.Inst.Tag,
 ) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ptr_ty = sema.typeOf(ptr);
-    if (ptr_ty.isConstPtr(mod))
+    if (ptr_ty.isConstPtr(zcu))
         return sema.fail(block, ptr_src, "cannot assign to constant", .{});
 
-    const elem_ty = ptr_ty.childType(mod);
+    const elem_ty = ptr_ty.childType(zcu);
 
     // To generate better code for tuples, we detect a tuple operand here, and
     // analyze field loads and stores directly. This avoids an extra allocation + memcpy
@@ -31060,8 +31081,8 @@ fn storePtr2(
     // this code does not handle tuple-to-struct coercion which requires dealing with missing
     // fields.
     const operand_ty = sema.typeOf(uncasted_operand);
-    if (operand_ty.isTuple(mod) and elem_ty.zigTypeTag(mod) == .Array) {
-        const field_count = operand_ty.structFieldCount(mod);
+    if (operand_ty.isTuple(zcu) and elem_ty.zigTypeTag(zcu) == .Array) {
+        const field_count = operand_ty.structFieldCount(zcu);
         var i: u32 = 0;
         while (i < field_count) : (i += 1) {
             const elem_src = operand_src; // TODO better source location
@@ -31085,7 +31106,7 @@ fn storePtr2(
     // as well as working around an LLVM bug:
     // https://github.com/ziglang/zig/issues/11154
     if (sema.obtainBitCastedVectorPtr(ptr)) |vector_ptr| {
-        const vector_ty = sema.typeOf(vector_ptr).childType(mod);
+        const vector_ty = sema.typeOf(vector_ptr).childType(zcu);
         const vector = sema.coerceExtra(block, vector_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) {
             error.NotCoercible => unreachable,
             else => |e| return e,
@@ -31119,7 +31140,7 @@ fn storePtr2(
 
     try sema.requireRuntimeBlock(block, src, runtime_src);
 
-    if (ptr_ty.ptrInfo(mod).flags.vector_index == .runtime) {
+    if (ptr_ty.ptrInfo(zcu).flags.vector_index == .runtime) {
         const ptr_inst = ptr.toIndex().?;
         const air_tags = sema.air_instructions.items(.tag);
         if (air_tags[@intFromEnum(ptr_inst)] == .ptr_elem_ptr) {
@@ -31253,9 +31274,9 @@ fn markMaybeComptimeAllocRuntime(sema: *Sema, block: *Block, alloc_inst: Air.Ins
 /// lengths match.
 fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const array_ty = sema.typeOf(ptr).childType(mod);
-    if (array_ty.zigTypeTag(mod) != .Array) return null;
+    const zcu = pt.zcu;
+    const array_ty = sema.typeOf(ptr).childType(zcu);
+    if (array_ty.zigTypeTag(zcu) != .Array) return null;
     var ptr_ref = ptr;
     var ptr_inst = ptr_ref.toIndex() orelse return null;
     const air_datas = sema.air_instructions.items(.data);
@@ -31263,15 +31284,15 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref {
     const vector_ty = while (air_tags[@intFromEnum(ptr_inst)] == .bitcast) {
         ptr_ref = air_datas[@intFromEnum(ptr_inst)].ty_op.operand;
         if (!sema.isKnownZigType(ptr_ref, .Pointer)) return null;
-        const child_ty = sema.typeOf(ptr_ref).childType(mod);
-        if (child_ty.zigTypeTag(mod) == .Vector) break child_ty;
+        const child_ty = sema.typeOf(ptr_ref).childType(zcu);
+        if (child_ty.zigTypeTag(zcu) == .Vector) break child_ty;
         ptr_inst = ptr_ref.toIndex() orelse return null;
     } else return null;
 
     // We have a pointer-to-array and a pointer-to-vector. If the elements and
     // lengths match, return the result.
-    if (array_ty.childType(mod).eql(vector_ty.childType(mod), mod) and
-        array_ty.arrayLen(mod) == vector_ty.vectorLen(mod))
+    if (array_ty.childType(zcu).eql(vector_ty.childType(zcu), zcu) and
+        array_ty.arrayLen(zcu) == vector_ty.vectorLen(zcu))
     {
         return ptr_ref;
     } else {
@@ -31347,8 +31368,8 @@ fn bitCast(
     const old_ty = sema.typeOf(inst);
     try old_ty.resolveLayout(pt);
 
-    const dest_bits = dest_ty.bitSize(pt);
-    const old_bits = old_ty.bitSize(pt);
+    const dest_bits = dest_ty.bitSize(zcu);
+    const old_bits = old_ty.bitSize(zcu);
 
     if (old_bits != dest_bits) {
         return sema.fail(block, inst_src, "@bitCast size mismatch: destination type '{}' has {d} bits but source type '{}' has {d} bits", .{
@@ -31384,16 +31405,16 @@ fn coerceArrayPtrToSlice(
     inst_src: LazySrcLoc,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     if (try sema.resolveValue(inst)) |val| {
         const ptr_array_ty = sema.typeOf(inst);
-        const array_ty = ptr_array_ty.childType(mod);
-        const slice_ptr_ty = dest_ty.slicePtrFieldType(mod);
+        const array_ty = ptr_array_ty.childType(zcu);
+        const slice_ptr_ty = dest_ty.slicePtrFieldType(zcu);
         const slice_ptr = try pt.getCoerced(val, slice_ptr_ty);
         const slice_val = try pt.intern(.{ .slice = .{
             .ty = dest_ty.toIntern(),
             .ptr = slice_ptr.toIntern(),
-            .len = (try pt.intValue(Type.usize, array_ty.arrayLen(mod))).toIntern(),
+            .len = (try pt.intValue(Type.usize, array_ty.arrayLen(zcu))).toIntern(),
         } });
         return Air.internedToRef(slice_val);
     }
@@ -31403,12 +31424,12 @@ fn coerceArrayPtrToSlice(
 
 fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_result: *InMemoryCoercionResult) bool {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const dest_info = dest_ty.ptrInfo(mod);
-    const inst_info = inst_ty.ptrInfo(mod);
-    const len0 = (Type.fromInterned(inst_info.child).zigTypeTag(mod) == .Array and (Type.fromInterned(inst_info.child).arrayLenIncludingSentinel(mod) == 0 or
-        (Type.fromInterned(inst_info.child).arrayLen(mod) == 0 and dest_info.sentinel == .none and dest_info.flags.size != .C and dest_info.flags.size != .Many))) or
-        (Type.fromInterned(inst_info.child).isTuple(mod) and Type.fromInterned(inst_info.child).structFieldCount(mod) == 0);
+    const zcu = pt.zcu;
+    const dest_info = dest_ty.ptrInfo(zcu);
+    const inst_info = inst_ty.ptrInfo(zcu);
+    const len0 = (Type.fromInterned(inst_info.child).zigTypeTag(zcu) == .Array and (Type.fromInterned(inst_info.child).arrayLenIncludingSentinel(zcu) == 0 or
+        (Type.fromInterned(inst_info.child).arrayLen(zcu) == 0 and dest_info.sentinel == .none and dest_info.flags.size != .C and dest_info.flags.size != .Many))) or
+        (Type.fromInterned(inst_info.child).isTuple(zcu) and Type.fromInterned(inst_info.child).structFieldCount(zcu) == 0);
 
     const ok_cv_qualifiers =
         ((!inst_info.flags.is_const or dest_info.flags.is_const) or len0) and
@@ -31436,12 +31457,12 @@ fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_resul
     const inst_align = if (inst_info.flags.alignment != .none)
         inst_info.flags.alignment
     else
-        Type.fromInterned(inst_info.child).abiAlignment(pt);
+        Type.fromInterned(inst_info.child).abiAlignment(zcu);
 
     const dest_align = if (dest_info.flags.alignment != .none)
         dest_info.flags.alignment
     else
-        Type.fromInterned(dest_info.child).abiAlignment(pt);
+        Type.fromInterned(dest_info.child).abiAlignment(zcu);
 
     if (dest_align.compare(.gt, inst_align)) {
         in_memory_result.* = .{ .ptr_alignment = .{
@@ -31461,10 +31482,10 @@ fn coerceCompatiblePtrs(
     inst_src: LazySrcLoc,
 ) !Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_ty = sema.typeOf(inst);
     if (try sema.resolveValue(inst)) |val| {
-        if (!val.isUndef(mod) and val.isNull(mod) and !dest_ty.isAllowzeroPtr(mod)) {
+        if (!val.isUndef(zcu) and val.isNull(zcu) and !dest_ty.isAllowzeroPtr(zcu)) {
             return sema.fail(block, inst_src, "null pointer casted to type '{}'", .{dest_ty.fmt(pt)});
         }
         // The comptime Value representation is compatible with both types.
@@ -31473,17 +31494,17 @@ fn coerceCompatiblePtrs(
         );
     }
     try sema.requireRuntimeBlock(block, inst_src, null);
-    const inst_allows_zero = inst_ty.zigTypeTag(mod) != .Pointer or inst_ty.ptrAllowsZero(mod);
-    if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero(mod) and
-        (try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn))
+    const inst_allows_zero = inst_ty.zigTypeTag(zcu) != .Pointer or inst_ty.ptrAllowsZero(zcu);
+    if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero(zcu) and
+        (try dest_ty.elemType2(zcu).hasRuntimeBitsSema(pt) or dest_ty.elemType2(zcu).zigTypeTag(zcu) == .Fn))
     {
-        const actual_ptr = if (inst_ty.isSlice(mod))
+        const actual_ptr = if (inst_ty.isSlice(zcu))
             try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty)
         else
             inst;
         const ptr_int = try block.addUnOp(.int_from_ptr, actual_ptr);
         const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize);
-        const ok = if (inst_ty.isSlice(mod)) ok: {
+        const ok = if (inst_ty.isSlice(zcu)) ok: {
             const len = try sema.analyzeSliceLen(block, inst_src, inst);
             const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize);
             break :ok try block.addBinOp(.bool_or, len_zero, is_non_zero);
@@ -31504,11 +31525,11 @@ fn coerceEnumToUnion(
     inst_src: LazySrcLoc,
 ) !Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const inst_ty = sema.typeOf(inst);
 
-    const tag_ty = union_ty.unionTagType(mod) orelse {
+    const tag_ty = union_ty.unionTagType(zcu) orelse {
         const msg = msg: {
             const msg = try sema.errMsg(inst_src, "expected type '{}', found '{}'", .{
                 union_ty.fmt(pt), inst_ty.fmt(pt),
@@ -31529,10 +31550,10 @@ fn coerceEnumToUnion(
             });
         };
 
-        const union_obj = mod.typeToUnion(union_ty).?;
+        const union_obj = zcu.typeToUnion(union_ty).?;
         const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
         try field_ty.resolveFields(pt);
-        if (field_ty.zigTypeTag(mod) == .NoReturn) {
+        if (field_ty.zigTypeTag(zcu) == .NoReturn) {
             const msg = msg: {
                 const msg = try sema.errMsg(inst_src, "cannot initialize 'noreturn' field of union", .{});
                 errdefer msg.destroy(sema.gpa);
@@ -31569,7 +31590,7 @@ fn coerceEnumToUnion(
 
     try sema.requireRuntimeBlock(block, inst_src, null);
 
-    if (tag_ty.isNonexhaustiveEnum(mod)) {
+    if (tag_ty.isNonexhaustiveEnum(zcu)) {
         const msg = msg: {
             const msg = try sema.errMsg(inst_src, "runtime coercion to union '{}' from non-exhaustive enum", .{
                 union_ty.fmt(pt),
@@ -31581,13 +31602,13 @@ fn coerceEnumToUnion(
         return sema.failWithOwnedErrorMsg(block, msg);
     }
 
-    const union_obj = mod.typeToUnion(union_ty).?;
+    const union_obj = zcu.typeToUnion(union_ty).?;
     {
-        var msg: ?*Module.ErrorMsg = null;
+        var msg: ?*Zcu.ErrorMsg = null;
         errdefer if (msg) |some| some.destroy(sema.gpa);
 
         for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| {
-            if (Type.fromInterned(field_ty).zigTypeTag(mod) == .NoReturn) {
+            if (Type.fromInterned(field_ty).zigTypeTag(zcu) == .NoReturn) {
                 const err_msg = msg orelse try sema.errMsg(
                     inst_src,
                     "runtime coercion from enum '{}' to union '{}' which has a 'noreturn' field",
@@ -31606,7 +31627,7 @@ fn coerceEnumToUnion(
     }
 
     // If the union has all fields 0 bits, the union value is just the enum value.
-    if (union_ty.unionHasAllZeroBitFieldTypes(pt)) {
+    if (union_ty.unionHasAllZeroBitFieldTypes(zcu)) {
         return block.addBitCast(union_ty, enum_tag);
     }
 
@@ -31621,7 +31642,7 @@ fn coerceEnumToUnion(
         for (0..union_obj.field_types.len) |field_index| {
             const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index];
             const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
-            if (!(try sema.typeHasRuntimeBits(field_ty))) continue;
+            if (!(try field_ty.hasRuntimeBitsSema(pt))) continue;
             try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' has type '{}'", .{
                 field_name.fmt(ip),
                 field_ty.fmt(pt),
@@ -31642,8 +31663,8 @@ fn coerceAnonStructToUnion(
     inst_src: LazySrcLoc,
 ) !Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const inst_ty = sema.typeOf(inst);
     const field_info: union(enum) {
         name: InternPool.NullTerminatedString,
@@ -31701,8 +31722,8 @@ fn coerceAnonStructToUnionPtrs(
     anon_struct_src: LazySrcLoc,
 ) !Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const union_ty = ptr_union_ty.childType(mod);
+    const zcu = pt.zcu;
+    const union_ty = ptr_union_ty.childType(zcu);
     const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src);
     const union_inst = try sema.coerceAnonStructToUnion(block, union_ty, union_ty_src, anon_struct, anon_struct_src);
     return sema.analyzeRef(block, union_ty_src, union_inst);
@@ -31717,8 +31738,8 @@ fn coerceAnonStructToStructPtrs(
     anon_struct_src: LazySrcLoc,
 ) !Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const struct_ty = ptr_struct_ty.childType(mod);
+    const zcu = pt.zcu;
+    const struct_ty = ptr_struct_ty.childType(zcu);
     const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src);
     const struct_inst = try sema.coerceTupleToStruct(block, struct_ty, anon_struct, anon_struct_src);
     return sema.analyzeRef(block, struct_ty_src, struct_inst);
@@ -31734,9 +31755,9 @@ fn coerceArrayLike(
     inst_src: LazySrcLoc,
 ) !Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_ty = sema.typeOf(inst);
-    const target = mod.getTarget();
+    const target = zcu.getTarget();
 
     // try coercion of the whole array
     const in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src, null);
@@ -31750,8 +31771,8 @@ fn coerceArrayLike(
     }
 
     // otherwise, try element by element
-    const inst_len = inst_ty.arrayLen(mod);
-    const dest_len = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLen(mod));
+    const inst_len = inst_ty.arrayLen(zcu);
+    const dest_len = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLen(zcu));
     if (dest_len != inst_len) {
         const msg = msg: {
             const msg = try sema.errMsg(inst_src, "expected type '{}', found '{}'", .{
@@ -31765,14 +31786,14 @@ fn coerceArrayLike(
         return sema.failWithOwnedErrorMsg(block, msg);
     }
 
-    const dest_elem_ty = dest_ty.childType(mod);
-    if (dest_ty.isVector(mod) and inst_ty.isVector(mod) and (try sema.resolveValue(inst)) == null) {
-        const inst_elem_ty = inst_ty.childType(mod);
-        switch (dest_elem_ty.zigTypeTag(mod)) {
-            .Int => if (inst_elem_ty.isInt(mod)) {
+    const dest_elem_ty = dest_ty.childType(zcu);
+    if (dest_ty.isVector(zcu) and inst_ty.isVector(zcu) and (try sema.resolveValue(inst)) == null) {
+        const inst_elem_ty = inst_ty.childType(zcu);
+        switch (dest_elem_ty.zigTypeTag(zcu)) {
+            .Int => if (inst_elem_ty.isInt(zcu)) {
                 // integer widening
-                const dst_info = dest_elem_ty.intInfo(mod);
-                const src_info = inst_elem_ty.intInfo(mod);
+                const dst_info = dest_elem_ty.intInfo(zcu);
+                const src_info = inst_elem_ty.intInfo(zcu);
                 if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or
                     // small enough unsigned ints can get casted to large enough signed ints
                     (dst_info.signedness == .signed and dst_info.bits > src_info.bits))
@@ -31835,10 +31856,10 @@ fn coerceTupleToArray(
     inst_src: LazySrcLoc,
 ) !Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const inst_ty = sema.typeOf(inst);
-    const inst_len = inst_ty.arrayLen(mod);
-    const dest_len = dest_ty.arrayLen(mod);
+    const inst_len = inst_ty.arrayLen(zcu);
+    const dest_len = dest_ty.arrayLen(zcu);
 
     if (dest_len != inst_len) {
         const msg = msg: {
@@ -31856,13 +31877,13 @@ fn coerceTupleToArray(
     const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_len);
     const element_vals = try sema.arena.alloc(InternPool.Index, dest_elems);
     const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_elems);
-    const dest_elem_ty = dest_ty.childType(mod);
+    const dest_elem_ty = dest_ty.childType(zcu);
 
     var runtime_src: ?LazySrcLoc = null;
     for (element_vals, element_refs, 0..) |*val, *ref, i_usize| {
         const i: u32 = @intCast(i_usize);
         if (i_usize == inst_len) {
-            const sentinel_val = dest_ty.sentinel(mod).?;
+            const sentinel_val = dest_ty.sentinel(zcu).?;
             val.* = sentinel_val.toIntern();
             ref.* = Air.internedToRef(sentinel_val.toIntern());
             break;
@@ -31901,12 +31922,12 @@ fn coerceTupleToSlicePtrs(
     tuple_src: LazySrcLoc,
 ) !Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const tuple_ty = sema.typeOf(ptr_tuple).childType(mod);
+    const zcu = pt.zcu;
+    const tuple_ty = sema.typeOf(ptr_tuple).childType(zcu);
     const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src);
-    const slice_info = slice_ty.ptrInfo(mod);
+    const slice_info = slice_ty.ptrInfo(zcu);
     const array_ty = try pt.arrayType(.{
-        .len = tuple_ty.structFieldCount(mod),
+        .len = tuple_ty.structFieldCount(zcu),
         .sentinel = slice_info.sentinel,
         .child = slice_info.child,
     });
@@ -31928,9 +31949,9 @@ fn coerceTupleToArrayPtrs(
     tuple_src: LazySrcLoc,
 ) !Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src);
-    const ptr_info = ptr_array_ty.ptrInfo(mod);
+    const ptr_info = ptr_array_ty.ptrInfo(zcu);
     const array_ty = Type.fromInterned(ptr_info.child);
     const array_inst = try sema.coerceTupleToArray(block, array_ty, array_ty_src, tuple, tuple_src);
     if (ptr_info.flags.alignment != .none) {
@@ -31950,16 +31971,16 @@ fn coerceTupleToStruct(
     inst_src: LazySrcLoc,
 ) !Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     try struct_ty.resolveFields(pt);
     try struct_ty.resolveStructFieldInits(pt);
 
-    if (struct_ty.isTupleOrAnonStruct(mod)) {
+    if (struct_ty.isTupleOrAnonStruct(zcu)) {
         return sema.coerceTupleToTuple(block, struct_ty, inst, inst_src);
     }
 
-    const struct_type = mod.typeToStruct(struct_ty).?;
+    const struct_type = zcu.typeToStruct(struct_ty).?;
     const field_vals = try sema.arena.alloc(InternPool.Index, struct_type.field_types.len);
     const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len);
     @memset(field_refs, .none);
@@ -31973,7 +31994,7 @@ fn coerceTupleToStruct(
     };
     for (0..field_count) |tuple_field_index| {
         const field_src = inst_src; // TODO better source location
-        const field_name = inst_ty.structFieldName(tuple_field_index, mod).unwrap() orelse
+        const field_name = inst_ty.structFieldName(tuple_field_index, zcu).unwrap() orelse
             try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{d}", .{tuple_field_index}, .no_embedded_nulls);
 
         const struct_field_index = try sema.structFieldIndex(block, struct_ty, field_name, field_src);
@@ -32003,7 +32024,7 @@ fn coerceTupleToStruct(
     }
 
     // Populate default field values and report errors for missing fields.
-    var root_msg: ?*Module.ErrorMsg = null;
+    var root_msg: ?*Zcu.ErrorMsg = null;
     errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
 
     for (field_refs, 0..) |*field_ref, i| {
@@ -32058,8 +32079,8 @@ fn coerceTupleToTuple(
     inst_src: LazySrcLoc,
 ) !Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const dest_field_count = switch (ip.indexToKey(tuple_ty.toIntern())) {
         .anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
         .struct_type => ip.loadStructType(tuple_ty.toIntern()).field_types.len,
@@ -32081,7 +32102,7 @@ fn coerceTupleToTuple(
     for (0..dest_field_count) |field_index_usize| {
         const field_i: u32 = @intCast(field_index_usize);
         const field_src = inst_src; // TODO better source location
-        const field_name = inst_ty.structFieldName(field_index_usize, mod).unwrap() orelse
+        const field_name = inst_ty.structFieldName(field_index_usize, zcu).unwrap() orelse
             try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{d}", .{field_index_usize}, .no_embedded_nulls);
 
         if (field_name.eqlSlice("len", ip))
@@ -32124,7 +32145,7 @@ fn coerceTupleToTuple(
     }
 
     // Populate default field values and report errors for missing fields.
-    var root_msg: ?*Module.ErrorMsg = null;
+    var root_msg: ?*Zcu.ErrorMsg = null;
     errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
 
     for (field_refs, 0..) |*field_ref, i_usize| {
@@ -32139,7 +32160,7 @@ fn coerceTupleToTuple(
 
         const field_src = inst_src; // TODO better source location
         if (default_val == .none) {
-            const field_name = tuple_ty.structFieldName(i, mod).unwrap() orelse {
+            const field_name = tuple_ty.structFieldName(i, zcu).unwrap() orelse {
                 const template = "missing tuple field: {d}";
                 if (root_msg) |msg| {
                     try sema.errNote(field_src, msg, template, .{i});
@@ -32308,7 +32329,7 @@ fn maybeQueueFuncBodyAnalysis(sema: *Sema, src: LazySrcLoc, nav_index: InternPoo
     const ip = &zcu.intern_pool;
     const nav_val = zcu.navValue(nav_index);
     if (!ip.isFuncBody(nav_val.toIntern())) return;
-    if (!try sema.fnHasRuntimeBits(nav_val.typeOf(zcu))) return;
+    if (!try nav_val.typeOf(zcu).fnHasRuntimeBitsSema(sema.pt)) return;
     try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .func = nav_val.toIntern() }));
     try zcu.ensureFuncBodyAnalysisQueued(nav_val.toIntern());
 }
@@ -32320,11 +32341,11 @@ fn analyzeRef(
     operand: Air.Inst.Ref,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const operand_ty = sema.typeOf(operand);
 
     if (try sema.resolveValue(operand)) |val| {
-        switch (mod.intern_pool.indexToKey(val.toIntern())) {
+        switch (zcu.intern_pool.indexToKey(val.toIntern())) {
             .@"extern" => |e| return sema.analyzeNavRef(src, e.owner_nav),
             .func => |f| return sema.analyzeNavRef(src, f.owner_nav),
             else => return uavRef(sema, val.toIntern()),
@@ -32332,7 +32353,7 @@ fn analyzeRef(
     }
 
     try sema.requireRuntimeBlock(block, src, null);
-    const address_space = target_util.defaultAddressSpace(mod.getTarget(), .local);
+    const address_space = target_util.defaultAddressSpace(zcu.getTarget(), .local);
     const ptr_type = try pt.ptrTypeSema(.{
         .child = operand_ty.toIntern(),
         .flags = .{
@@ -32359,13 +32380,13 @@ fn analyzeLoad(
     ptr_src: LazySrcLoc,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ptr_ty = sema.typeOf(ptr);
-    const elem_ty = switch (ptr_ty.zigTypeTag(mod)) {
-        .Pointer => ptr_ty.childType(mod),
+    const elem_ty = switch (ptr_ty.zigTypeTag(zcu)) {
+        .Pointer => ptr_ty.childType(zcu),
         else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(pt)}),
     };
-    if (elem_ty.zigTypeTag(mod) == .Opaque) {
+    if (elem_ty.zigTypeTag(zcu) == .Opaque) {
         return sema.fail(block, ptr_src, "cannot load opaque type '{}'", .{elem_ty.fmt(pt)});
     }
 
@@ -32379,7 +32400,7 @@ fn analyzeLoad(
         }
     }
 
-    if (ptr_ty.ptrInfo(mod).flags.vector_index == .runtime) {
+    if (ptr_ty.ptrInfo(zcu).flags.vector_index == .runtime) {
         const ptr_inst = ptr.toIndex().?;
         const air_tags = sema.air_instructions.items(.tag);
         if (air_tags[@intFromEnum(ptr_inst)] == .ptr_elem_ptr) {
@@ -32403,11 +32424,11 @@ fn analyzeSlicePtr(
     slice_ty: Type,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const result_ty = slice_ty.slicePtrFieldType(mod);
+    const zcu = pt.zcu;
+    const result_ty = slice_ty.slicePtrFieldType(zcu);
     if (try sema.resolveValue(slice)) |val| {
-        if (val.isUndef(mod)) return pt.undefRef(result_ty);
-        return Air.internedToRef(val.slicePtr(mod).toIntern());
+        if (val.isUndef(zcu)) return pt.undefRef(result_ty);
+        return Air.internedToRef(val.slicePtr(zcu).toIntern());
     }
     try sema.requireRuntimeBlock(block, slice_src, null);
     return block.addTyOp(.slice_ptr, result_ty, slice);
@@ -32421,13 +32442,13 @@ fn analyzeOptionalSlicePtr(
     opt_slice_ty: Type,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const result_ty = opt_slice_ty.optionalChild(mod).slicePtrFieldType(mod);
+    const zcu = pt.zcu;
+    const result_ty = opt_slice_ty.optionalChild(zcu).slicePtrFieldType(zcu);
 
     if (try sema.resolveValue(opt_slice)) |opt_val| {
-        if (opt_val.isUndef(mod)) return pt.undefRef(result_ty);
-        const slice_ptr: InternPool.Index = if (opt_val.optionalValue(mod)) |val|
-            val.slicePtr(mod).toIntern()
+        if (opt_val.isUndef(zcu)) return pt.undefRef(result_ty);
+        const slice_ptr: InternPool.Index = if (opt_val.optionalValue(zcu)) |val|
+            val.slicePtr(zcu).toIntern()
         else
             .null_value;
 
@@ -32447,9 +32468,9 @@ fn analyzeSliceLen(
     slice_inst: Air.Inst.Ref,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     if (try sema.resolveValue(slice_inst)) |slice_val| {
-        if (slice_val.isUndef(mod)) {
+        if (slice_val.isUndef(zcu)) {
             return pt.undefRef(Type.usize);
         }
         return pt.intRef(Type.usize, try slice_val.sliceLen(pt));
@@ -32466,23 +32487,23 @@ fn analyzeIsNull(
     invert_logic: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const result_ty = Type.bool;
     if (try sema.resolveValue(operand)) |opt_val| {
-        if (opt_val.isUndef(mod)) {
+        if (opt_val.isUndef(zcu)) {
             return pt.undefRef(result_ty);
         }
-        const is_null = opt_val.isNull(mod);
+        const is_null = opt_val.isNull(zcu);
         const bool_value = if (invert_logic) !is_null else is_null;
         return if (bool_value) .bool_true else .bool_false;
     }
 
     const inverted_non_null_res: Air.Inst.Ref = if (invert_logic) .bool_true else .bool_false;
     const operand_ty = sema.typeOf(operand);
-    if (operand_ty.zigTypeTag(mod) == .Optional and operand_ty.optionalChild(mod).zigTypeTag(mod) == .NoReturn) {
+    if (operand_ty.zigTypeTag(zcu) == .Optional and operand_ty.optionalChild(zcu).zigTypeTag(zcu) == .NoReturn) {
         return inverted_non_null_res;
     }
-    if (operand_ty.zigTypeTag(mod) != .Optional and !operand_ty.isPtrLikeOptional(mod)) {
+    if (operand_ty.zigTypeTag(zcu) != .Optional and !operand_ty.isPtrLikeOptional(zcu)) {
         return inverted_non_null_res;
     }
     try sema.requireRuntimeBlock(block, src, null);
@@ -32497,12 +32518,12 @@ fn analyzePtrIsNonErrComptimeOnly(
     operand: Air.Inst.Ref,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const ptr_ty = sema.typeOf(operand);
-    assert(ptr_ty.zigTypeTag(mod) == .Pointer);
-    const child_ty = ptr_ty.childType(mod);
+    assert(ptr_ty.zigTypeTag(zcu) == .Pointer);
+    const child_ty = ptr_ty.childType(zcu);
 
-    const child_tag = child_ty.zigTypeTag(mod);
+    const child_tag = child_ty.zigTypeTag(zcu);
     if (child_tag != .ErrorSet and child_tag != .ErrorUnion) return .bool_true;
     if (child_tag == .ErrorSet) return .bool_false;
     assert(child_tag == .ErrorUnion);
@@ -32520,16 +32541,16 @@ fn analyzeIsNonErrComptimeOnly(
     operand: Air.Inst.Ref,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const operand_ty = sema.typeOf(operand);
-    const ot = operand_ty.zigTypeTag(mod);
+    const ot = operand_ty.zigTypeTag(zcu);
     if (ot != .ErrorSet and ot != .ErrorUnion) return .bool_true;
     if (ot == .ErrorSet) return .bool_false;
     assert(ot == .ErrorUnion);
 
-    const payload_ty = operand_ty.errorUnionPayload(mod);
-    if (payload_ty.zigTypeTag(mod) == .NoReturn) {
+    const payload_ty = operand_ty.errorUnionPayload(zcu);
+    if (payload_ty.zigTypeTag(zcu) == .NoReturn) {
         return .bool_false;
     }
 
@@ -32588,7 +32609,7 @@ fn analyzeIsNonErrComptimeOnly(
                 // If the error set is empty, we must return a comptime true or false.
                 // However we want to avoid unnecessarily resolving an inferred error set
                 // in case it is already non-empty.
-                try mod.maybeUnresolveIes(func_index);
+                try zcu.maybeUnresolveIes(func_index);
                 switch (ip.funcIesResolvedUnordered(func_index)) {
                     .anyerror_type => break :blk,
                     .none => {},
@@ -32624,10 +32645,10 @@ fn analyzeIsNonErrComptimeOnly(
     }
 
     if (maybe_operand_val) |err_union| {
-        if (err_union.isUndef(mod)) {
+        if (err_union.isUndef(zcu)) {
             return pt.undefRef(Type.bool);
         }
-        if (err_union.getErrorName(mod) == .none) {
+        if (err_union.getErrorName(zcu) == .none) {
             return .bool_true;
         } else {
             return .bool_false;
@@ -32681,12 +32702,12 @@ fn analyzeSlice(
     by_length: bool,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     // Slice expressions can operate on a variable whose type is an array. This requires
     // the slice operand to be a pointer. In the case of a non-array, it will be a double pointer.
     const ptr_ptr_ty = sema.typeOf(ptr_ptr);
-    const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag(mod)) {
-        .Pointer => ptr_ptr_ty.childType(mod),
+    const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag(zcu)) {
+        .Pointer => ptr_ptr_ty.childType(zcu),
         else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(pt)}),
     };
 
@@ -32695,20 +32716,20 @@ fn analyzeSlice(
     var ptr_or_slice = ptr_ptr;
     var elem_ty: Type = undefined;
     var ptr_sentinel: ?Value = null;
-    switch (ptr_ptr_child_ty.zigTypeTag(mod)) {
+    switch (ptr_ptr_child_ty.zigTypeTag(zcu)) {
         .Array => {
-            ptr_sentinel = ptr_ptr_child_ty.sentinel(mod);
-            elem_ty = ptr_ptr_child_ty.childType(mod);
+            ptr_sentinel = ptr_ptr_child_ty.sentinel(zcu);
+            elem_ty = ptr_ptr_child_ty.childType(zcu);
         },
-        .Pointer => switch (ptr_ptr_child_ty.ptrSize(mod)) {
+        .Pointer => switch (ptr_ptr_child_ty.ptrSize(zcu)) {
             .One => {
-                const double_child_ty = ptr_ptr_child_ty.childType(mod);
+                const double_child_ty = ptr_ptr_child_ty.childType(zcu);
                 ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
-                if (double_child_ty.zigTypeTag(mod) == .Array) {
-                    ptr_sentinel = double_child_ty.sentinel(mod);
+                if (double_child_ty.zigTypeTag(zcu) == .Array) {
+                    ptr_sentinel = double_child_ty.sentinel(zcu);
                     slice_ty = ptr_ptr_child_ty;
                     array_ty = double_child_ty;
-                    elem_ty = double_child_ty.childType(mod);
+                    elem_ty = double_child_ty.childType(zcu);
                 } else {
                     const bounds_error_message = "slice of single-item pointer must have comptime-known bounds [0..0], [0..1], or [1..1]";
                     if (uncasted_end_opt == .none) {
@@ -32777,7 +32798,7 @@ fn analyzeSlice(
                         .len = 1,
                         .child = double_child_ty.toIntern(),
                     });
-                    const ptr_info = ptr_ptr_child_ty.ptrInfo(mod);
+                    const ptr_info = ptr_ptr_child_ty.ptrInfo(zcu);
                     slice_ty = try pt.ptrType(.{
                         .child = array_ty.toIntern(),
                         .flags = .{
@@ -32792,35 +32813,35 @@ fn analyzeSlice(
                 }
             },
             .Many, .C => {
-                ptr_sentinel = ptr_ptr_child_ty.sentinel(mod);
+                ptr_sentinel = ptr_ptr_child_ty.sentinel(zcu);
                 ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
                 slice_ty = ptr_ptr_child_ty;
                 array_ty = ptr_ptr_child_ty;
-                elem_ty = ptr_ptr_child_ty.childType(mod);
+                elem_ty = ptr_ptr_child_ty.childType(zcu);
 
-                if (ptr_ptr_child_ty.ptrSize(mod) == .C) {
+                if (ptr_ptr_child_ty.ptrSize(zcu) == .C) {
                     if (try sema.resolveDefinedValue(block, ptr_src, ptr_or_slice)) |ptr_val| {
-                        if (ptr_val.isNull(mod)) {
+                        if (ptr_val.isNull(zcu)) {
                             return sema.fail(block, src, "slice of null pointer", .{});
                         }
                     }
                 }
             },
             .Slice => {
-                ptr_sentinel = ptr_ptr_child_ty.sentinel(mod);
+                ptr_sentinel = ptr_ptr_child_ty.sentinel(zcu);
                 ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
                 slice_ty = ptr_ptr_child_ty;
                 array_ty = ptr_ptr_child_ty;
-                elem_ty = ptr_ptr_child_ty.childType(mod);
+                elem_ty = ptr_ptr_child_ty.childType(zcu);
             },
         },
         else => return sema.fail(block, src, "slice of non-array type '{}'", .{ptr_ptr_child_ty.fmt(pt)}),
     }
 
-    const ptr = if (slice_ty.isSlice(mod))
+    const ptr = if (slice_ty.isSlice(zcu))
         try sema.analyzeSlicePtr(block, ptr_src, ptr_or_slice, slice_ty)
-    else if (array_ty.zigTypeTag(mod) == .Array) ptr: {
-        var manyptr_ty_key = mod.intern_pool.indexToKey(slice_ty.toIntern()).ptr_type;
+    else if (array_ty.zigTypeTag(zcu) == .Array) ptr: {
+        var manyptr_ty_key = zcu.intern_pool.indexToKey(slice_ty.toIntern()).ptr_type;
         assert(manyptr_ty_key.child == array_ty.toIntern());
         assert(manyptr_ty_key.flags.size == .One);
         manyptr_ty_key.child = elem_ty.toIntern();
@@ -32838,8 +32859,8 @@ fn analyzeSlice(
     // we might learn of the length because it is a comptime-known slice value.
     var end_is_len = uncasted_end_opt == .none;
     const end = e: {
-        if (array_ty.zigTypeTag(mod) == .Array) {
-            const len_val = try pt.intValue(Type.usize, array_ty.arrayLen(mod));
+        if (array_ty.zigTypeTag(zcu) == .Array) {
+            const len_val = try pt.intValue(Type.usize, array_ty.arrayLen(zcu));
 
             if (!end_is_len) {
                 const end = if (by_length) end: {
@@ -32850,10 +32871,10 @@ fn analyzeSlice(
                 if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| {
                     const len_s_val = try pt.intValue(
                         Type.usize,
-                        array_ty.arrayLenIncludingSentinel(mod),
+                        array_ty.arrayLenIncludingSentinel(zcu),
                     );
                     if (!(try sema.compareAll(end_val, .lte, len_s_val, Type.usize))) {
-                        const sentinel_label: []const u8 = if (array_ty.sentinel(mod) != null)
+                        const sentinel_label: []const u8 = if (array_ty.sentinel(zcu) != null)
                             " +1 (sentinel)"
                         else
                             "";
@@ -32873,7 +32894,7 @@ fn analyzeSlice(
                     // end_is_len is only true if we are NOT using the sentinel
                     // length. For sentinel-length, we don't want the type to
                     // contain the sentinel.
-                    if (end_val.eql(len_val, Type.usize, mod)) {
+                    if (end_val.eql(len_val, Type.usize, zcu)) {
                         end_is_len = true;
                     }
                 }
@@ -32881,7 +32902,7 @@ fn analyzeSlice(
             }
 
             break :e Air.internedToRef(len_val.toIntern());
-        } else if (slice_ty.isSlice(mod)) {
+        } else if (slice_ty.isSlice(zcu)) {
             if (!end_is_len) {
                 const end = if (by_length) end: {
                     const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
@@ -32890,10 +32911,10 @@ fn analyzeSlice(
                 } else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
                 if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| {
                     if (try sema.resolveValue(ptr_or_slice)) |slice_val| {
-                        if (slice_val.isUndef(mod)) {
+                        if (slice_val.isUndef(zcu)) {
                             return sema.fail(block, src, "slice of undefined", .{});
                         }
-                        const has_sentinel = slice_ty.sentinel(mod) != null;
+                        const has_sentinel = slice_ty.sentinel(zcu) != null;
                         const slice_len = try slice_val.sliceLen(pt);
                         const len_plus_sent = slice_len + @intFromBool(has_sentinel);
                         const slice_len_val_with_sentinel = try pt.intValue(Type.usize, len_plus_sent);
@@ -32919,7 +32940,7 @@ fn analyzeSlice(
                         // is only true if it equals the length WITHOUT the
                         // sentinel, so we don't add a sentinel type.
                         const slice_len_val = try pt.intValue(Type.usize, slice_len);
-                        if (end_val.eql(slice_len_val, Type.usize, mod)) {
+                        if (end_val.eql(slice_len_val, Type.usize, zcu)) {
                             end_is_len = true;
                         }
                     }
@@ -32976,8 +32997,8 @@ fn analyzeSlice(
             checked_start_lte_end = true;
             if (try sema.resolveValue(new_ptr)) |ptr_val| sentinel_check: {
                 const expected_sentinel = sentinel orelse break :sentinel_check;
-                const start_int = start_val.getUnsignedInt(pt).?;
-                const end_int = end_val.getUnsignedInt(pt).?;
+                const start_int = start_val.toUnsignedInt(zcu);
+                const end_int = end_val.toUnsignedInt(zcu);
                 const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int);
 
                 const many_ptr_ty = try pt.manyConstPtrType(elem_ty);
@@ -33001,7 +33022,7 @@ fn analyzeSlice(
                     ),
                 };
 
-                if (!actual_sentinel.eql(expected_sentinel, elem_ty, mod)) {
+                if (!actual_sentinel.eql(expected_sentinel, elem_ty, zcu)) {
                     const msg = msg: {
                         const msg = try sema.errMsg(src, "value in memory does not match slice sentinel", .{});
                         errdefer msg.destroy(sema.gpa);
@@ -33041,8 +33062,8 @@ fn analyzeSlice(
         try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src, false);
     const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len);
 
-    const new_ptr_ty_info = new_ptr_ty.ptrInfo(mod);
-    const new_allowzero = new_ptr_ty_info.flags.is_allowzero and sema.typeOf(ptr).ptrSize(mod) != .C;
+    const new_ptr_ty_info = new_ptr_ty.ptrInfo(zcu);
+    const new_allowzero = new_ptr_ty_info.flags.is_allowzero and sema.typeOf(ptr).ptrSize(zcu) != .C;
 
     if (opt_new_len_val) |new_len_val| {
         const new_len_int = try new_len_val.toUnsignedIntSema(pt);
@@ -33067,17 +33088,17 @@ fn analyzeSlice(
             const result = try block.addBitCast(return_ty, new_ptr);
             if (block.wantSafety()) {
                 // requirement: slicing C ptr is non-null
-                if (ptr_ptr_child_ty.isCPtr(mod)) {
+                if (ptr_ptr_child_ty.isCPtr(zcu)) {
                     const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true);
                     try sema.addSafetyCheck(block, src, is_non_null, .unwrap_null);
                 }
 
                 bounds_check: {
-                    const actual_len = if (array_ty.zigTypeTag(mod) == .Array)
-                        try pt.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(mod))
-                    else if (slice_ty.isSlice(mod)) l: {
+                    const actual_len = if (array_ty.zigTypeTag(zcu) == .Array)
+                        try pt.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(zcu))
+                    else if (slice_ty.isSlice(zcu)) l: {
                         const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice);
-                        break :l if (slice_ty.sentinel(mod) == null)
+                        break :l if (slice_ty.sentinel(zcu) == null)
                             slice_len_inst
                         else
                             try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true);
@@ -33097,7 +33118,7 @@ fn analyzeSlice(
             return result;
         };
 
-        if (!new_ptr_val.isUndef(mod)) {
+        if (!new_ptr_val.isUndef(zcu)) {
             return Air.internedToRef((try pt.getCoerced(new_ptr_val, return_ty)).toIntern());
         }
 
@@ -33125,15 +33146,15 @@ fn analyzeSlice(
     try sema.requireRuntimeBlock(block, src, runtime_src.?);
     if (block.wantSafety()) {
         // requirement: slicing C ptr is non-null
-        if (ptr_ptr_child_ty.isCPtr(mod)) {
+        if (ptr_ptr_child_ty.isCPtr(zcu)) {
             const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true);
             try sema.addSafetyCheck(block, src, is_non_null, .unwrap_null);
         }
 
         // requirement: end <= len
-        const opt_len_inst = if (array_ty.zigTypeTag(mod) == .Array)
-            try pt.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(mod))
-        else if (slice_ty.isSlice(mod)) blk: {
+        const opt_len_inst = if (array_ty.zigTypeTag(zcu) == .Array)
+            try pt.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(zcu))
+        else if (slice_ty.isSlice(zcu)) blk: {
             if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| {
                 // we don't need to add one for sentinels because the
                 // underlying value data includes the sentinel
@@ -33141,7 +33162,7 @@ fn analyzeSlice(
             }
 
             const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice);
-            if (slice_ty.sentinel(mod) == null) break :blk slice_len_inst;
+            if (slice_ty.sentinel(zcu) == null) break :blk slice_len_inst;
 
             // we have to add one because slice lengths don't include the sentinel
             break :blk try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true);
@@ -33186,16 +33207,16 @@ fn cmpNumeric(
     rhs_src: LazySrcLoc,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const lhs_ty = sema.typeOf(uncasted_lhs);
     const rhs_ty = sema.typeOf(uncasted_rhs);
 
-    assert(lhs_ty.isNumeric(mod));
-    assert(rhs_ty.isNumeric(mod));
+    assert(lhs_ty.isNumeric(zcu));
+    assert(rhs_ty.isNumeric(zcu));
 
-    const lhs_ty_tag = lhs_ty.zigTypeTag(mod);
-    const rhs_ty_tag = rhs_ty.zigTypeTag(mod);
-    const target = mod.getTarget();
+    const lhs_ty_tag = lhs_ty.zigTypeTag(zcu);
+    const rhs_ty_tag = rhs_ty.zigTypeTag(zcu);
+    const target = zcu.getTarget();
 
     // One exception to heterogeneous comparison: comptime_float needs to
     // coerce to fixed-width float.
@@ -33214,28 +33235,28 @@ fn cmpNumeric(
         if (try sema.resolveValue(lhs)) |lhs_val| {
             if (try sema.resolveValue(rhs)) |rhs_val| {
                 // Compare ints: const vs. undefined (or vice versa)
-                if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod) and rhs_val.isUndef(mod)) {
+                if (!lhs_val.isUndef(zcu) and (lhs_ty.isInt(zcu) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(zcu) and rhs_val.isUndef(zcu)) {
                     if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(lhs_val), op, rhs_ty)) |res| {
                         return if (res) .bool_true else .bool_false;
                     }
-                } else if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod) and lhs_val.isUndef(mod)) {
+                } else if (!rhs_val.isUndef(zcu) and (rhs_ty.isInt(zcu) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(zcu) and lhs_val.isUndef(zcu)) {
                     if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(rhs_val), op.reverse(), lhs_ty)) |res| {
                         return if (res) .bool_true else .bool_false;
                     }
                 }
 
-                if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
+                if (lhs_val.isUndef(zcu) or rhs_val.isUndef(zcu)) {
                     return pt.undefRef(Type.bool);
                 }
-                if (lhs_val.isNan(mod) or rhs_val.isNan(mod)) {
+                if (lhs_val.isNan(zcu) or rhs_val.isNan(zcu)) {
                     return if (op == std.math.CompareOperator.neq) .bool_true else .bool_false;
                 }
-                return if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, pt, .sema))
+                return if (try Value.compareHeteroSema(lhs_val, op, rhs_val, pt))
                     .bool_true
                 else
                     .bool_false;
             } else {
-                if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod)) {
+                if (!lhs_val.isUndef(zcu) and (lhs_ty.isInt(zcu) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(zcu)) {
                     // Compare ints: const vs. var
                     if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(lhs_val), op, rhs_ty)) |res| {
                         return if (res) .bool_true else .bool_false;
@@ -33245,7 +33266,7 @@ fn cmpNumeric(
             }
         } else {
             if (try sema.resolveValueResolveLazy(rhs)) |rhs_val| {
-                if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod)) {
+                if (!rhs_val.isUndef(zcu) and (rhs_ty.isInt(zcu) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(zcu)) {
                     // Compare ints: var vs. const
                     if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(rhs_val), op.reverse(), lhs_ty)) |res| {
                         return if (res) .bool_true else .bool_false;
@@ -33301,31 +33322,31 @@ fn cmpNumeric(
     const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val|
         !(try lhs_val.compareAllWithZeroSema(.gte, pt))
     else
-        (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt(mod));
+        (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt(zcu));
     const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val|
         !(try rhs_val.compareAllWithZeroSema(.gte, pt))
     else
-        (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt(mod));
+        (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt(zcu));
     const dest_int_is_signed = lhs_is_signed or rhs_is_signed;
 
     var dest_float_type: ?Type = null;
 
     var lhs_bits: usize = undefined;
     if (try sema.resolveValueResolveLazy(lhs)) |lhs_val| {
-        if (lhs_val.isUndef(mod))
+        if (lhs_val.isUndef(zcu))
             return pt.undefRef(Type.bool);
-        if (lhs_val.isNan(mod)) switch (op) {
+        if (lhs_val.isNan(zcu)) switch (op) {
             .neq => return .bool_true,
             else => return .bool_false,
         };
-        if (lhs_val.isInf(mod)) switch (op) {
+        if (lhs_val.isInf(zcu)) switch (op) {
             .neq => return .bool_true,
             .eq => return .bool_false,
-            .gt, .gte => return if (lhs_val.isNegativeInf(mod)) .bool_false else .bool_true,
-            .lt, .lte => return if (lhs_val.isNegativeInf(mod)) .bool_true else .bool_false,
+            .gt, .gte => return if (lhs_val.isNegativeInf(zcu)) .bool_false else .bool_true,
+            .lt, .lte => return if (lhs_val.isNegativeInf(zcu)) .bool_true else .bool_false,
         };
         if (!rhs_is_signed) {
-            switch (lhs_val.orderAgainstZero(pt)) {
+            switch (lhs_val.orderAgainstZero(zcu)) {
                 .gt => {},
                 .eq => switch (op) { // LHS = 0, RHS is unsigned
                     .lte => return .bool_true,
@@ -33339,7 +33360,7 @@ fn cmpNumeric(
             }
         }
         if (lhs_is_float) {
-            if (lhs_val.floatHasFraction(mod)) {
+            if (lhs_val.floatHasFraction(zcu)) {
                 switch (op) {
                     .eq => return .bool_false,
                     .neq => return .bool_true,
@@ -33347,9 +33368,9 @@ fn cmpNumeric(
                 }
             }
 
-            var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128, pt));
+            var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128, zcu));
             defer bigint.deinit();
-            if (lhs_val.floatHasFraction(mod)) {
+            if (lhs_val.floatHasFraction(zcu)) {
                 if (lhs_is_signed) {
                     try bigint.addScalar(&bigint, -1);
                 } else {
@@ -33358,32 +33379,32 @@ fn cmpNumeric(
             }
             lhs_bits = bigint.toConst().bitCountTwosComp();
         } else {
-            lhs_bits = lhs_val.intBitCountTwosComp(pt);
+            lhs_bits = lhs_val.intBitCountTwosComp(zcu);
         }
         lhs_bits += @intFromBool(!lhs_is_signed and dest_int_is_signed);
     } else if (lhs_is_float) {
         dest_float_type = lhs_ty;
     } else {
-        const int_info = lhs_ty.intInfo(mod);
+        const int_info = lhs_ty.intInfo(zcu);
         lhs_bits = int_info.bits + @intFromBool(int_info.signedness == .unsigned and dest_int_is_signed);
     }
 
     var rhs_bits: usize = undefined;
     if (try sema.resolveValueResolveLazy(rhs)) |rhs_val| {
-        if (rhs_val.isUndef(mod))
+        if (rhs_val.isUndef(zcu))
             return pt.undefRef(Type.bool);
-        if (rhs_val.isNan(mod)) switch (op) {
+        if (rhs_val.isNan(zcu)) switch (op) {
             .neq => return .bool_true,
             else => return .bool_false,
         };
-        if (rhs_val.isInf(mod)) switch (op) {
+        if (rhs_val.isInf(zcu)) switch (op) {
             .neq => return .bool_true,
             .eq => return .bool_false,
-            .gt, .gte => return if (rhs_val.isNegativeInf(mod)) .bool_true else .bool_false,
-            .lt, .lte => return if (rhs_val.isNegativeInf(mod)) .bool_false else .bool_true,
+            .gt, .gte => return if (rhs_val.isNegativeInf(zcu)) .bool_true else .bool_false,
+            .lt, .lte => return if (rhs_val.isNegativeInf(zcu)) .bool_false else .bool_true,
         };
         if (!lhs_is_signed) {
-            switch (rhs_val.orderAgainstZero(pt)) {
+            switch (rhs_val.orderAgainstZero(zcu)) {
                 .gt => {},
                 .eq => switch (op) { // RHS = 0, LHS is unsigned
                     .gte => return .bool_true,
@@ -33397,7 +33418,7 @@ fn cmpNumeric(
             }
         }
         if (rhs_is_float) {
-            if (rhs_val.floatHasFraction(mod)) {
+            if (rhs_val.floatHasFraction(zcu)) {
                 switch (op) {
                     .eq => return .bool_false,
                     .neq => return .bool_true,
@@ -33405,9 +33426,9 @@ fn cmpNumeric(
                 }
             }
 
-            var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128, pt));
+            var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128, zcu));
             defer bigint.deinit();
-            if (rhs_val.floatHasFraction(mod)) {
+            if (rhs_val.floatHasFraction(zcu)) {
                 if (rhs_is_signed) {
                     try bigint.addScalar(&bigint, -1);
                 } else {
@@ -33416,13 +33437,13 @@ fn cmpNumeric(
             }
             rhs_bits = bigint.toConst().bitCountTwosComp();
         } else {
-            rhs_bits = rhs_val.intBitCountTwosComp(pt);
+            rhs_bits = rhs_val.intBitCountTwosComp(zcu);
         }
         rhs_bits += @intFromBool(!rhs_is_signed and dest_int_is_signed);
     } else if (rhs_is_float) {
         dest_float_type = rhs_ty;
     } else {
-        const int_info = rhs_ty.intInfo(mod);
+        const int_info = rhs_ty.intInfo(zcu);
         rhs_bits = int_info.bits + @intFromBool(int_info.signedness == .unsigned and dest_int_is_signed);
     }
 
@@ -33450,9 +33471,9 @@ fn compareIntsOnlyPossibleResult(
     rhs_ty: Type,
 ) Allocator.Error!?bool {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const rhs_info = rhs_ty.intInfo(mod);
-    const vs_zero = lhs_val.orderAgainstZeroAdvanced(pt, .sema) catch unreachable;
+    const zcu = pt.zcu;
+    const rhs_info = rhs_ty.intInfo(zcu);
+    const vs_zero = lhs_val.orderAgainstZeroSema(pt) catch unreachable;
     const is_zero = vs_zero == .eq;
     const is_negative = vs_zero == .lt;
     const is_positive = vs_zero == .gt;
@@ -33484,7 +33505,7 @@ fn compareIntsOnlyPossibleResult(
     };
 
     const sign_adj = @intFromBool(!is_negative and rhs_info.signedness == .signed);
-    const req_bits = lhs_val.intBitCountTwosComp(pt) + sign_adj;
+    const req_bits = lhs_val.intBitCountTwosComp(zcu) + sign_adj;
 
     // No sized type can have more than 65535 bits.
     // The RHS type operand is either a runtime value or sized (but undefined) constant.
@@ -33515,7 +33536,7 @@ fn compareIntsOnlyPossibleResult(
             if (is_negative) .signed else .unsigned,
             @intCast(req_bits),
         );
-        const pop_count = lhs_val.popCount(ty, pt);
+        const pop_count = lhs_val.popCount(ty, zcu);
 
         if (is_negative) {
             break :edge .{ pop_count == 1, false };
@@ -33546,11 +33567,11 @@ fn cmpVector(
     rhs_src: LazySrcLoc,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const lhs_ty = sema.typeOf(lhs);
     const rhs_ty = sema.typeOf(rhs);
-    assert(lhs_ty.zigTypeTag(mod) == .Vector);
-    assert(rhs_ty.zigTypeTag(mod) == .Vector);
+    assert(lhs_ty.zigTypeTag(zcu) == .Vector);
+    assert(rhs_ty.zigTypeTag(zcu) == .Vector);
     try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
 
     const resolved_ty = try sema.resolvePeerTypes(block, src, &.{ lhs, rhs }, .{ .override = &.{ lhs_src, rhs_src } });
@@ -33558,14 +33579,14 @@ fn cmpVector(
     const casted_rhs = try sema.coerce(block, resolved_ty, rhs, rhs_src);
 
     const result_ty = try pt.vectorType(.{
-        .len = lhs_ty.vectorLen(mod),
+        .len = lhs_ty.vectorLen(zcu),
         .child = .bool_type,
     });
 
     const runtime_src: LazySrcLoc = src: {
         if (try sema.resolveValue(casted_lhs)) |lhs_val| {
             if (try sema.resolveValue(casted_rhs)) |rhs_val| {
-                if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
+                if (lhs_val.isUndef(zcu) or rhs_val.isUndef(zcu)) {
                     return pt.undefRef(result_ty);
                 }
                 const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_ty);
@@ -33608,8 +33629,8 @@ fn wrapErrorUnionPayload(
     inst_src: LazySrcLoc,
 ) !Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const dest_payload_ty = dest_ty.errorUnionPayload(mod);
+    const zcu = pt.zcu;
+    const dest_payload_ty = dest_ty.errorUnionPayload(zcu);
     const coerced = try sema.coerceExtra(block, dest_payload_ty, inst, inst_src, .{ .report_err = false });
     if (try sema.resolveValue(coerced)) |val| {
         return Air.internedToRef((try pt.intern(.{ .error_union = .{
@@ -33629,12 +33650,12 @@ fn wrapErrorUnionSet(
     inst_src: LazySrcLoc,
 ) !Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const inst_ty = sema.typeOf(inst);
-    const dest_err_set_ty = dest_ty.errorUnionSet(mod);
+    const dest_err_set_ty = dest_ty.errorUnionSet(zcu);
     if (try sema.resolveValue(inst)) |val| {
-        const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name;
+        const expected_name = zcu.intern_pool.indexToKey(val.toIntern()).err.name;
         switch (dest_err_set_ty.toIntern()) {
             .anyerror_type => {},
             .adhoc_inferred_error_set_type => ok: {
@@ -33658,7 +33679,7 @@ fn wrapErrorUnionSet(
                 .inferred_error_set_type => |func_index| ok: {
                     // We carefully do this in an order that avoids unnecessarily
                     // resolving the destination error set type.
-                    try mod.maybeUnresolveIes(func_index);
+                    try zcu.maybeUnresolveIes(func_index);
                     switch (ip.funcIesResolvedUnordered(func_index)) {
                         .anyerror_type => break :ok,
                         .none => if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) {
@@ -33693,13 +33714,13 @@ fn unionToTag(
     un_src: LazySrcLoc,
 ) !Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     if ((try sema.typeHasOnePossibleValue(enum_ty))) |opv| {
         return Air.internedToRef(opv.toIntern());
     }
     if (try sema.resolveValue(un)) |un_val| {
-        const tag_val = un_val.unionTag(mod).?;
-        if (tag_val.isUndef(mod))
+        const tag_val = un_val.unionTag(zcu).?;
+        if (tag_val.isUndef(zcu))
             return try pt.undefRef(enum_ty);
         return Air.internedToRef(tag_val.toIntern());
     }
@@ -33861,8 +33882,8 @@ const PeerResolveStrategy = enum {
         return strat;
     }
 
-    fn select(ty: Type, mod: *Module) PeerResolveStrategy {
-        return switch (ty.zigTypeTag(mod)) {
+    fn select(ty: Type, zcu: *Zcu) PeerResolveStrategy {
+        return switch (ty.zigTypeTag(zcu)) {
             .Type, .Void, .Bool, .Opaque, .Frame, .AnyFrame => .exact,
             .NoReturn, .Undefined => .unknown,
             .Null => .nullable,
@@ -33870,14 +33891,14 @@ const PeerResolveStrategy = enum {
             .Int => .fixed_int,
             .ComptimeFloat => .comptime_float,
             .Float => .fixed_float,
-            .Pointer => if (ty.ptrInfo(mod).flags.size == .C) .c_ptr else .ptr,
+            .Pointer => if (ty.ptrInfo(zcu).flags.size == .C) .c_ptr else .ptr,
             .Array => .array,
             .Vector => .vector,
             .Optional => .optional,
             .ErrorSet => .error_set,
             .ErrorUnion => .error_union,
             .EnumLiteral, .Enum, .Union => .enum_or_union,
-            .Struct => if (ty.isTupleOrAnonStruct(mod)) .coercible_struct else .exact,
+            .Struct => if (ty.isTupleOrAnonStruct(zcu)) .coercible_struct else .exact,
             .Fn => .func,
         };
     }
@@ -33933,10 +33954,10 @@ const PeerResolveResult = union(enum) {
         src: LazySrcLoc,
         instructions: []const Air.Inst.Ref,
         candidate_srcs: PeerTypeCandidateSrc,
-    ) !*Module.ErrorMsg {
+    ) !*Zcu.ErrorMsg {
         const pt = sema.pt;
 
-        var opt_msg: ?*Module.ErrorMsg = null;
+        var opt_msg: ?*Zcu.ErrorMsg = null;
         errdefer if (opt_msg) |msg| msg.destroy(sema.gpa);
 
         // If we mention fields we'll want to include field types, so put peer types in a buffer
@@ -34053,14 +34074,14 @@ fn resolvePeerTypesInner(
     peer_vals: []?Value,
 ) !PeerResolveResult {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
 
     var strat_reason: usize = 0;
     var s: PeerResolveStrategy = .unknown;
     for (peer_tys, 0..) |opt_ty, i| {
         const ty = opt_ty orelse continue;
-        s = s.merge(PeerResolveStrategy.select(ty, mod), &strat_reason, i);
+        s = s.merge(PeerResolveStrategy.select(ty, zcu), &strat_reason, i);
     }
 
     if (s == .unknown) {
@@ -34070,14 +34091,14 @@ fn resolvePeerTypesInner(
         // There was something other than noreturn and undefined, so we can ignore those peers
         for (peer_tys) |*ty_ptr| {
             const ty = ty_ptr.* orelse continue;
-            switch (ty.zigTypeTag(mod)) {
+            switch (ty.zigTypeTag(zcu)) {
                 .NoReturn, .Undefined => ty_ptr.* = null,
                 else => {},
             }
         }
     }
 
-    const target = mod.getTarget();
+    const target = zcu.getTarget();
 
     switch (s) {
         .unknown => unreachable,
@@ -34086,7 +34107,7 @@ fn resolvePeerTypesInner(
             var final_set: ?Type = null;
             for (peer_tys, 0..) |opt_ty, i| {
                 const ty = opt_ty orelse continue;
-                if (ty.zigTypeTag(mod) != .ErrorSet) return .{ .conflict = .{
+                if (ty.zigTypeTag(zcu) != .ErrorSet) return .{ .conflict = .{
                     .peer_idx_a = strat_reason,
                     .peer_idx_b = i,
                 } };
@@ -34103,15 +34124,15 @@ fn resolvePeerTypesInner(
             var final_set: ?Type = null;
             for (peer_tys, peer_vals) |*ty_ptr, *val_ptr| {
                 const ty = ty_ptr.* orelse continue;
-                const set_ty = switch (ty.zigTypeTag(mod)) {
+                const set_ty = switch (ty.zigTypeTag(zcu)) {
                     .ErrorSet => blk: {
                         ty_ptr.* = null; // no payload to decide on
                         val_ptr.* = null;
                         break :blk ty;
                     },
                     .ErrorUnion => blk: {
-                        const set_ty = ty.errorUnionSet(mod);
-                        ty_ptr.* = ty.errorUnionPayload(mod);
+                        const set_ty = ty.errorUnionSet(zcu);
+                        ty_ptr.* = ty.errorUnionPayload(zcu);
                         if (val_ptr.*) |eu_val| switch (ip.indexToKey(eu_val.toIntern())) {
                             .error_union => |eu| switch (eu.val) {
                                 .payload => |payload_ip| val_ptr.* = Value.fromInterned(payload_ip),
@@ -34146,7 +34167,7 @@ fn resolvePeerTypesInner(
         .nullable => {
             for (peer_tys, 0..) |opt_ty, i| {
                 const ty = opt_ty orelse continue;
-                if (!ty.eql(Type.null, mod)) return .{ .conflict = .{
+                if (!ty.eql(Type.null, zcu)) return .{ .conflict = .{
                     .peer_idx_a = strat_reason,
                     .peer_idx_b = i,
                 } };
@@ -34157,14 +34178,14 @@ fn resolvePeerTypesInner(
         .optional => {
             for (peer_tys, peer_vals) |*ty_ptr, *val_ptr| {
                 const ty = ty_ptr.* orelse continue;
-                switch (ty.zigTypeTag(mod)) {
+                switch (ty.zigTypeTag(zcu)) {
                     .Null => {
                         ty_ptr.* = null;
                         val_ptr.* = null;
                     },
                     .Optional => {
-                        ty_ptr.* = ty.optionalChild(mod);
-                        if (val_ptr.*) |opt_val| val_ptr.* = if (!opt_val.isUndef(mod)) opt_val.optionalValue(mod) else null;
+                        ty_ptr.* = ty.optionalChild(zcu);
+                        if (val_ptr.*) |opt_val| val_ptr.* = if (!opt_val.isUndef(zcu)) opt_val.optionalValue(zcu) else null;
                     },
                     else => {},
                 }
@@ -34195,7 +34216,7 @@ fn resolvePeerTypesInner(
             for (peer_tys, 0..) |*ty_ptr, i| {
                 const ty = ty_ptr.* orelse continue;
 
-                if (!ty.isArrayOrVector(mod)) {
+                if (!ty.isArrayOrVector(zcu)) {
                     // We allow tuples of the correct length. We won't validate their elem type, since the elements can be coerced.
                     const arr_like = sema.typeIsArrayLike(ty) orelse return .{ .conflict = .{
                         .peer_idx_a = strat_reason,
@@ -34220,29 +34241,29 @@ fn resolvePeerTypesInner(
                 const first_arr_idx = opt_first_arr_idx orelse {
                     if (opt_first_idx == null) {
                         opt_first_idx = i;
-                        len = ty.arrayLen(mod);
-                        sentinel = ty.sentinel(mod);
+                        len = ty.arrayLen(zcu);
+                        sentinel = ty.sentinel(zcu);
                     }
                     opt_first_arr_idx = i;
-                    elem_ty = ty.childType(mod);
+                    elem_ty = ty.childType(zcu);
                     continue;
                 };
 
-                if (ty.arrayLen(mod) != len) return .{ .conflict = .{
+                if (ty.arrayLen(zcu) != len) return .{ .conflict = .{
                     .peer_idx_a = first_arr_idx,
                     .peer_idx_b = i,
                 } };
 
-                const peer_elem_ty = ty.childType(mod);
-                if (!peer_elem_ty.eql(elem_ty, mod)) coerce: {
+                const peer_elem_ty = ty.childType(zcu);
+                if (!peer_elem_ty.eql(elem_ty, zcu)) coerce: {
                     const peer_elem_coerces_to_elem =
-                        try sema.coerceInMemoryAllowed(block, elem_ty, peer_elem_ty, false, mod.getTarget(), src, src, null);
+                        try sema.coerceInMemoryAllowed(block, elem_ty, peer_elem_ty, false, zcu.getTarget(), src, src, null);
                     if (peer_elem_coerces_to_elem == .ok) {
                         break :coerce;
                     }
 
                     const elem_coerces_to_peer_elem =
-                        try sema.coerceInMemoryAllowed(block, peer_elem_ty, elem_ty, false, mod.getTarget(), src, src, null);
+                        try sema.coerceInMemoryAllowed(block, peer_elem_ty, elem_ty, false, zcu.getTarget(), src, src, null);
                     if (elem_coerces_to_peer_elem == .ok) {
                         elem_ty = peer_elem_ty;
                         break :coerce;
@@ -34255,8 +34276,8 @@ fn resolvePeerTypesInner(
                 }
 
                 if (sentinel) |cur_sent| {
-                    if (ty.sentinel(mod)) |peer_sent| {
-                        if (!peer_sent.eql(cur_sent, elem_ty, mod)) sentinel = null;
+                    if (ty.sentinel(zcu)) |peer_sent| {
+                        if (!peer_sent.eql(cur_sent, elem_ty, zcu)) sentinel = null;
                     } else {
                         sentinel = null;
                     }
@@ -34279,7 +34300,7 @@ fn resolvePeerTypesInner(
             for (peer_tys, peer_vals, 0..) |*ty_ptr, *val_ptr, i| {
                 const ty = ty_ptr.* orelse continue;
 
-                if (!ty.isArrayOrVector(mod)) {
+                if (!ty.isArrayOrVector(zcu)) {
                     // Allow tuples of the correct length
                     const arr_like = sema.typeIsArrayLike(ty) orelse return .{ .conflict = .{
                         .peer_idx_a = strat_reason,
@@ -34305,16 +34326,16 @@ fn resolvePeerTypesInner(
                 }
 
                 if (len) |expect_len| {
-                    if (ty.arrayLen(mod) != expect_len) return .{ .conflict = .{
+                    if (ty.arrayLen(zcu) != expect_len) return .{ .conflict = .{
                         .peer_idx_a = first_idx,
                         .peer_idx_b = i,
                     } };
                 } else {
-                    len = ty.arrayLen(mod);
+                    len = ty.arrayLen(zcu);
                     first_idx = i;
                 }
 
-                ty_ptr.* = ty.childType(mod);
+                ty_ptr.* = ty.childType(zcu);
                 val_ptr.* = null; // multiple child vals, so we can't easily use them in PTR
             }
 
@@ -34339,7 +34360,7 @@ fn resolvePeerTypesInner(
             var first_idx: usize = undefined;
             for (peer_tys, peer_vals, 0..) |opt_ty, opt_val, i| {
                 const ty = opt_ty orelse continue;
-                switch (ty.zigTypeTag(mod)) {
+                switch (ty.zigTypeTag(zcu)) {
                     .ComptimeInt => continue, // comptime-known integers can always coerce to C pointers
                     .Int => {
                         if (opt_val != null) {
@@ -34348,7 +34369,7 @@ fn resolvePeerTypesInner(
                         } else {
                             // Runtime-known, so check if the type is no bigger than a usize
                             const ptr_bits = target.ptrBitWidth();
-                            const bits = ty.intInfo(mod).bits;
+                            const bits = ty.intInfo(zcu).bits;
                             if (bits <= ptr_bits) continue;
                         }
                     },
@@ -34356,13 +34377,13 @@ fn resolvePeerTypesInner(
                     else => {},
                 }
 
-                if (!ty.isPtrAtRuntime(mod)) return .{ .conflict = .{
+                if (!ty.isPtrAtRuntime(zcu)) return .{ .conflict = .{
                     .peer_idx_a = strat_reason,
                     .peer_idx_b = i,
                 } };
 
                 // Goes through optionals
-                const peer_info = ty.ptrInfo(mod);
+                const peer_info = ty.ptrInfo(zcu);
 
                 var ptr_info = opt_ptr_info orelse {
                     opt_ptr_info = peer_info;
@@ -34391,17 +34412,17 @@ fn resolvePeerTypesInner(
                     ptr_info.sentinel = .none;
                 }
 
-                // Note that the align can be always non-zero; Module.ptrType will canonicalize it
+                // Note that the align can be always non-zero; Zcu.ptrType will canonicalize it
                 ptr_info.flags.alignment = InternPool.Alignment.min(
                     if (ptr_info.flags.alignment != .none)
                         ptr_info.flags.alignment
                     else
-                        Type.fromInterned(ptr_info.child).abiAlignment(pt),
+                        Type.fromInterned(ptr_info.child).abiAlignment(zcu),
 
                     if (peer_info.flags.alignment != .none)
                         peer_info.flags.alignment
                     else
-                        Type.fromInterned(peer_info.child).abiAlignment(pt),
+                        Type.fromInterned(peer_info.child).abiAlignment(zcu),
                 );
                 if (ptr_info.flags.address_space != peer_info.flags.address_space) {
                     return .{ .conflict = .{
@@ -34438,8 +34459,8 @@ fn resolvePeerTypesInner(
 
             for (peer_tys, 0..) |opt_ty, i| {
                 const ty = opt_ty orelse continue;
-                const peer_info: InternPool.Key.PtrType = switch (ty.zigTypeTag(mod)) {
-                    .Pointer => ty.ptrInfo(mod),
+                const peer_info: InternPool.Key.PtrType = switch (ty.zigTypeTag(zcu)) {
+                    .Pointer => ty.ptrInfo(zcu),
                     .Fn => .{
                         .child = ty.toIntern(),
                         .flags = .{
@@ -34480,12 +34501,12 @@ fn resolvePeerTypesInner(
                     if (ptr_info.flags.alignment != .none)
                         ptr_info.flags.alignment
                     else
-                        try sema.typeAbiAlignment(Type.fromInterned(ptr_info.child)),
+                        try Type.fromInterned(ptr_info.child).abiAlignmentSema(pt),
 
                     if (peer_info.flags.alignment != .none)
                         peer_info.flags.alignment
                     else
-                        try sema.typeAbiAlignment(Type.fromInterned(peer_info.child)),
+                        try Type.fromInterned(peer_info.child).abiAlignmentSema(pt),
                 );
 
                 if (ptr_info.flags.address_space != peer_info.flags.address_space) {
@@ -34747,7 +34768,7 @@ fn resolvePeerTypesInner(
                     first_idx = i;
                     continue;
                 };
-                if (ty.zigTypeTag(mod) != .Fn) return .{ .conflict = .{
+                if (ty.zigTypeTag(zcu) != .Fn) return .{ .conflict = .{
                     .peer_idx_a = strat_reason,
                     .peer_idx_b = i,
                 } };
@@ -34775,7 +34796,7 @@ fn resolvePeerTypesInner(
 
             for (peer_tys, 0..) |opt_ty, i| {
                 const ty = opt_ty orelse continue;
-                switch (ty.zigTypeTag(mod)) {
+                switch (ty.zigTypeTag(zcu)) {
                     .EnumLiteral, .Enum, .Union => {},
                     else => return .{ .conflict = .{
                         .peer_idx_a = strat_reason,
@@ -34794,32 +34815,32 @@ fn resolvePeerTypesInner(
                     .peer_idx_b = i,
                 } };
 
-                switch (cur_ty.zigTypeTag(mod)) {
+                switch (cur_ty.zigTypeTag(zcu)) {
                     .EnumLiteral => {
                         opt_cur_ty = ty;
                         cur_ty_idx = i;
                     },
-                    .Enum => switch (ty.zigTypeTag(mod)) {
+                    .Enum => switch (ty.zigTypeTag(zcu)) {
                         .EnumLiteral => {},
                         .Enum => {
-                            if (!ty.eql(cur_ty, mod)) return generic_err;
+                            if (!ty.eql(cur_ty, zcu)) return generic_err;
                         },
                         .Union => {
-                            const tag_ty = ty.unionTagTypeHypothetical(mod);
-                            if (!tag_ty.eql(cur_ty, mod)) return generic_err;
+                            const tag_ty = ty.unionTagTypeHypothetical(zcu);
+                            if (!tag_ty.eql(cur_ty, zcu)) return generic_err;
                             opt_cur_ty = ty;
                             cur_ty_idx = i;
                         },
                         else => unreachable,
                     },
-                    .Union => switch (ty.zigTypeTag(mod)) {
+                    .Union => switch (ty.zigTypeTag(zcu)) {
                         .EnumLiteral => {},
                         .Enum => {
-                            const cur_tag_ty = cur_ty.unionTagTypeHypothetical(mod);
-                            if (!ty.eql(cur_tag_ty, mod)) return generic_err;
+                            const cur_tag_ty = cur_ty.unionTagTypeHypothetical(zcu);
+                            if (!ty.eql(cur_tag_ty, zcu)) return generic_err;
                         },
                         .Union => {
-                            if (!ty.eql(cur_ty, mod)) return generic_err;
+                            if (!ty.eql(cur_ty, zcu)) return generic_err;
                         },
                         else => unreachable,
                     },
@@ -34832,7 +34853,7 @@ fn resolvePeerTypesInner(
         .comptime_int => {
             for (peer_tys, 0..) |opt_ty, i| {
                 const ty = opt_ty orelse continue;
-                switch (ty.zigTypeTag(mod)) {
+                switch (ty.zigTypeTag(zcu)) {
                     .ComptimeInt => {},
                     else => return .{ .conflict = .{
                         .peer_idx_a = strat_reason,
@@ -34846,7 +34867,7 @@ fn resolvePeerTypesInner(
         .comptime_float => {
             for (peer_tys, 0..) |opt_ty, i| {
                 const ty = opt_ty orelse continue;
-                switch (ty.zigTypeTag(mod)) {
+                switch (ty.zigTypeTag(zcu)) {
                     .ComptimeInt, .ComptimeFloat => {},
                     else => return .{ .conflict = .{
                         .peer_idx_a = strat_reason,
@@ -34868,11 +34889,11 @@ fn resolvePeerTypesInner(
                 const ty = opt_ty orelse continue;
                 const opt_val = ptr_opt_val.*;
 
-                const peer_tag = ty.zigTypeTag(mod);
+                const peer_tag = ty.zigTypeTag(zcu);
                 switch (peer_tag) {
                     .ComptimeInt => {
                         // If the value is undefined, we can't refine to a fixed-width int
-                        if (opt_val == null or opt_val.?.isUndef(mod)) return .{ .conflict = .{
+                        if (opt_val == null or opt_val.?.isUndef(zcu)) return .{ .conflict = .{
                             .peer_idx_a = strat_reason,
                             .peer_idx_b = i,
                         } };
@@ -34889,7 +34910,7 @@ fn resolvePeerTypesInner(
 
                 if (opt_val != null) any_comptime_known = true;
 
-                const info = ty.intInfo(mod);
+                const info = ty.intInfo(zcu);
 
                 const idx_ptr = switch (info.signedness) {
                     .unsigned => &idx_unsigned,
@@ -34901,7 +34922,7 @@ fn resolvePeerTypesInner(
                     continue;
                 };
 
-                const cur_info = peer_tys[largest_idx].?.intInfo(mod);
+                const cur_info = peer_tys[largest_idx].?.intInfo(zcu);
                 if (info.bits > cur_info.bits) {
                     idx_ptr.* = i;
                 }
@@ -34915,8 +34936,8 @@ fn resolvePeerTypesInner(
                 return .{ .success = peer_tys[idx_signed.?].? };
             }
 
-            const unsigned_info = peer_tys[idx_unsigned.?].?.intInfo(mod);
-            const signed_info = peer_tys[idx_signed.?].?.intInfo(mod);
+            const unsigned_info = peer_tys[idx_unsigned.?].?.intInfo(zcu);
+            const signed_info = peer_tys[idx_signed.?].?.intInfo(zcu);
             if (signed_info.bits > unsigned_info.bits) {
                 return .{ .success = peer_tys[idx_signed.?].? };
             }
@@ -34948,7 +34969,7 @@ fn resolvePeerTypesInner(
 
             for (peer_tys, peer_vals, 0..) |opt_ty, opt_val, i| {
                 const ty = opt_ty orelse continue;
-                switch (ty.zigTypeTag(mod)) {
+                switch (ty.zigTypeTag(zcu)) {
                     .ComptimeFloat, .ComptimeInt => {},
                     .Int => {
                         if (opt_val == null) return .{ .conflict = .{
@@ -34958,7 +34979,7 @@ fn resolvePeerTypesInner(
                     },
                     .Float => {
                         if (opt_cur_ty) |cur_ty| {
-                            if (cur_ty.eql(ty, mod)) continue;
+                            if (cur_ty.eql(ty, zcu)) continue;
                             // Recreate the type so we eliminate any c_longdouble
                             const bits = @max(cur_ty.floatBits(target), ty.floatBits(target));
                             opt_cur_ty = switch (bits) {
@@ -34997,7 +35018,7 @@ fn resolvePeerTypesInner(
             for (peer_tys, 0..) |opt_ty, i| {
                 const ty = opt_ty orelse continue;
 
-                if (!ty.isTupleOrAnonStruct(mod)) {
+                if (!ty.isTupleOrAnonStruct(zcu)) {
                     return .{ .conflict = .{
                         .peer_idx_a = strat_reason,
                         .peer_idx_b = i,
@@ -35006,8 +35027,8 @@ fn resolvePeerTypesInner(
 
                 const first_idx = opt_first_idx orelse {
                     opt_first_idx = i;
-                    is_tuple = ty.isTuple(mod);
-                    field_count = ty.structFieldCount(mod);
+                    is_tuple = ty.isTuple(zcu);
+                    field_count = ty.structFieldCount(zcu);
                     if (!is_tuple) {
                         const names = ip.indexToKey(ty.toIntern()).anon_struct_type.names.get(ip);
                         field_names = try sema.arena.dupe(InternPool.NullTerminatedString, names);
@@ -35015,7 +35036,7 @@ fn resolvePeerTypesInner(
                     continue;
                 };
 
-                if (ty.isTuple(mod) != is_tuple or ty.structFieldCount(mod) != field_count) {
+                if (ty.isTuple(zcu) != is_tuple or ty.structFieldCount(zcu) != field_count) {
                     return .{ .conflict = .{
                         .peer_idx_a = first_idx,
                         .peer_idx_b = i,
@@ -35025,7 +35046,7 @@ fn resolvePeerTypesInner(
                 if (!is_tuple) {
                     for (field_names, 0..) |expected, field_index_usize| {
                         const field_index: u32 = @intCast(field_index_usize);
-                        const actual = ty.structFieldName(field_index, mod).unwrap().?;
+                        const actual = ty.structFieldName(field_index, zcu).unwrap().?;
                         if (actual == expected) continue;
                         return .{ .conflict = .{
                             .peer_idx_a = first_idx,
@@ -35052,7 +35073,7 @@ fn resolvePeerTypesInner(
                         peer_field_val.* = null;
                         continue;
                     };
-                    peer_field_ty.* = ty.structFieldType(field_index, mod);
+                    peer_field_ty.* = ty.structFieldType(field_index, zcu);
                     peer_field_val.* = if (opt_val) |val| try val.fieldValue(pt, field_index) else null;
                 }
 
@@ -35074,7 +35095,7 @@ fn resolvePeerTypesInner(
                             // Already-resolved types won't be referenced by the error so it's fine
                             // to leave them undefined.
                             const ty = opt_ty orelse continue;
-                            peer_field_ty.* = ty.structFieldType(field_index, mod);
+                            peer_field_ty.* = ty.structFieldType(field_index, zcu);
                         }
 
                         return .{ .field_error = .{
@@ -35111,7 +35132,7 @@ fn resolvePeerTypesInner(
                         comptime_val = coerced_val;
                         continue;
                     };
-                    if (!coerced_val.eql(existing, Type.fromInterned(field_ty.*), mod)) {
+                    if (!coerced_val.eql(existing, Type.fromInterned(field_ty.*), zcu)) {
                         comptime_val = null;
                         break;
                     }
@@ -35120,7 +35141,7 @@ fn resolvePeerTypesInner(
                 field_val.* = if (comptime_val) |v| v.toIntern() else .none;
             }
 
-            const final_ty = try ip.getAnonStructType(mod.gpa, pt.tid, .{
+            const final_ty = try ip.getAnonStructType(zcu.gpa, pt.tid, .{
                 .types = field_types,
                 .names = if (is_tuple) &.{} else field_names,
                 .values = field_vals,
@@ -35135,7 +35156,7 @@ fn resolvePeerTypesInner(
             for (peer_tys, 0..) |opt_ty, i| {
                 const ty = opt_ty orelse continue;
                 if (expect_ty) |expect| {
-                    if (!ty.eql(expect, mod)) return .{ .conflict = .{
+                    if (!ty.eql(expect, zcu)) return .{ .conflict = .{
                         .peer_idx_a = first_idx,
                         .peer_idx_b = i,
                     } };
@@ -35186,22 +35207,22 @@ const ArrayLike = struct {
 };
 fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    return switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    return switch (ty.zigTypeTag(zcu)) {
         .Array => .{
-            .len = ty.arrayLen(mod),
-            .elem_ty = ty.childType(mod),
+            .len = ty.arrayLen(zcu),
+            .elem_ty = ty.childType(zcu),
         },
         .Struct => {
-            const field_count = ty.structFieldCount(mod);
+            const field_count = ty.structFieldCount(zcu);
             if (field_count == 0) return .{
                 .len = 0,
                 .elem_ty = Type.noreturn,
             };
-            if (!ty.isTuple(mod)) return null;
-            const elem_ty = ty.structFieldType(0, mod);
+            if (!ty.isTuple(zcu)) return null;
+            const elem_ty = ty.structFieldType(0, zcu);
             for (1..field_count) |i| {
-                if (!ty.structFieldType(i, mod).eql(elem_ty, mod)) {
+                if (!ty.structFieldType(i, zcu).eql(elem_ty, zcu)) {
                     return null;
                 }
             }
@@ -35216,8 +35237,8 @@ fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike {
 
 pub fn resolveIes(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
 
     if (sema.fn_ret_ty_ies) |ies| {
         try sema.resolveInferredErrorSetPtr(block, src, ies);
@@ -35228,14 +35249,14 @@ pub fn resolveIes(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError!void
 
 pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    const fn_ty_info = mod.typeToFunc(fn_ty).?;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    const fn_ty_info = zcu.typeToFunc(fn_ty).?;
 
     try Type.fromInterned(fn_ty_info.return_type).resolveFully(pt);
 
-    if (mod.comp.config.any_error_tracing and
-        Type.fromInterned(fn_ty_info.return_type).isError(mod))
+    if (zcu.comp.config.any_error_tracing and
+        Type.fromInterned(fn_ty_info.return_type).isError(zcu))
     {
         // Ensure the type exists so that backends can assume that.
         _ = try pt.getBuiltinType("StackTrace");
@@ -35258,9 +35279,9 @@ pub fn resolveStructAlignment(
     struct_type: InternPool.LoadedStructType,
 ) SemaError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    const target = zcu.getTarget();
 
     assert(sema.owner.unwrap().cau == struct_type.cau.unwrap().?);
 
@@ -35286,13 +35307,14 @@ pub fn resolveStructAlignment(
 
     for (0..struct_type.field_types.len) |i| {
         const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
-        if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty))
+        if (struct_type.fieldIsComptime(ip, i) or try field_ty.comptimeOnlySema(pt))
             continue;
-        const field_align = try pt.structFieldAlignmentAdvanced(
+        const field_align = try field_ty.structFieldAlignmentAdvanced(
             struct_type.fieldAlign(ip, i),
-            field_ty,
             struct_type.layout,
             .sema,
+            pt.zcu,
+            pt.tid,
         );
         alignment = alignment.maxStrict(field_align);
     }
@@ -35338,14 +35360,14 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
 
     for (aligns, sizes, 0..) |*field_align, *field_size, i| {
         const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
-        if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty)) {
+        if (struct_type.fieldIsComptime(ip, i) or try field_ty.comptimeOnlySema(pt)) {
             struct_type.offsets.get(ip)[i] = 0;
             field_size.* = 0;
             field_align.* = .none;
             continue;
         }
 
-        field_size.* = sema.typeAbiSize(field_ty) catch |err| switch (err) {
+        field_size.* = field_ty.abiSizeSema(pt) catch |err| switch (err) {
             error.AnalysisFail => {
                 const msg = sema.err orelse return err;
                 try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{});
@@ -35353,16 +35375,17 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
             },
             else => return err,
         };
-        field_align.* = try pt.structFieldAlignmentAdvanced(
+        field_align.* = try field_ty.structFieldAlignmentAdvanced(
             struct_type.fieldAlign(ip, i),
-            field_ty,
             struct_type.layout,
             .sema,
+            pt.zcu,
+            pt.tid,
         );
         big_align = big_align.maxStrict(field_align.*);
     }
 
-    if (struct_type.flagsUnordered(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
+    if (struct_type.flagsUnordered(ip).assumed_runtime_bits and !(try ty.hasRuntimeBitsSema(pt))) {
         const msg = try sema.errMsg(
             ty.srcLoc(zcu),
             "struct layout depends on it having runtime bits",
@@ -35387,7 +35410,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
 
         for (runtime_order, 0..) |*ro, i| {
             const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
-            if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty)) {
+            if (struct_type.fieldIsComptime(ip, i) or try field_ty.comptimeOnlySema(pt)) {
                 ro.* = .omitted;
             } else {
                 ro.* = @enumFromInt(i);
@@ -35440,7 +35463,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
         offset = offsets[i] + sizes[i];
     }
     struct_type.setLayoutResolved(ip, @intCast(big_align.forward(offset)), big_align);
-    _ = try sema.typeRequiresComptime(ty);
+    _ = try ty.comptimeOnlySema(pt);
 }
 
 fn semaBackingIntType(pt: Zcu.PerThread, struct_type: InternPool.LoadedStructType) CompileError!void {
@@ -35488,7 +35511,7 @@ fn semaBackingIntType(pt: Zcu.PerThread, struct_type: InternPool.LoadedStructTyp
         var accumulator: u64 = 0;
         for (0..struct_type.field_types.len) |i| {
             const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
-            accumulator += try field_ty.bitSizeAdvanced(pt, .sema);
+            accumulator += try field_ty.bitSizeSema(pt);
         }
         break :blk accumulator;
     };
@@ -35543,17 +35566,17 @@ fn semaBackingIntType(pt: Zcu.PerThread, struct_type: InternPool.LoadedStructTyp
 
 fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_ty: Type, fields_bit_sum: u64) CompileError!void {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
-    if (!backing_int_ty.isInt(mod)) {
+    if (!backing_int_ty.isInt(zcu)) {
         return sema.fail(block, src, "expected backing integer type, found '{}'", .{backing_int_ty.fmt(pt)});
     }
-    if (backing_int_ty.bitSize(pt) != fields_bit_sum) {
+    if (backing_int_ty.bitSize(zcu) != fields_bit_sum) {
         return sema.fail(
             block,
             src,
             "backing integer type '{}' has bit size {} but the struct fields have a total bit size of {}",
-            .{ backing_int_ty.fmt(pt), backing_int_ty.bitSize(pt), fields_bit_sum },
+            .{ backing_int_ty.fmt(pt), backing_int_ty.bitSize(zcu), fields_bit_sum },
         );
     }
 }
@@ -35573,13 +35596,13 @@ fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
 
 fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) == .Pointer) {
-        switch (ty.ptrSize(mod)) {
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Pointer) {
+        switch (ty.ptrSize(zcu)) {
             .Slice, .Many, .C => return,
             .One => {
-                const elem_ty = ty.childType(mod);
-                if (elem_ty.zigTypeTag(mod) == .Array) return;
+                const elem_ty = ty.childType(zcu);
+                if (elem_ty.zigTypeTag(zcu) == .Array) return;
                 // TODO https://github.com/ziglang/zig/issues/15479
                 // if (elem_ty.isTuple()) return;
             },
@@ -35601,7 +35624,8 @@ pub fn resolveUnionAlignment(
     ty: Type,
     union_type: InternPool.LoadedUnionType,
 ) SemaError!void {
-    const zcu = sema.pt.zcu;
+    const pt = sema.pt;
+    const zcu = pt.zcu;
     const ip = &zcu.intern_pool;
     const target = zcu.getTarget();
 
@@ -35621,13 +35645,13 @@ pub fn resolveUnionAlignment(
     var max_align: Alignment = .@"1";
     for (0..union_type.field_types.len) |field_index| {
         const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
-        if (!(try sema.typeHasRuntimeBits(field_ty))) continue;
+        if (!(try field_ty.hasRuntimeBitsSema(pt))) continue;
 
         const explicit_align = union_type.fieldAlign(ip, field_index);
         const field_align = if (explicit_align != .none)
             explicit_align
         else
-            try sema.typeAbiAlignment(field_ty);
+            try field_ty.abiAlignmentSema(sema.pt);
 
         max_align = max_align.max(field_align);
     }
@@ -35635,7 +35659,7 @@ pub fn resolveUnionAlignment(
     union_type.setAlignment(ip, max_align);
 }
 
-/// This logic must be kept in sync with `Module.getUnionLayout`.
+/// This logic must be kept in sync with `Zcu.getUnionLayout`.
 pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
     const pt = sema.pt;
     const ip = &pt.zcu.intern_pool;
@@ -35670,9 +35694,9 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
     for (0..union_type.field_types.len) |field_index| {
         const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
 
-        if (try sema.typeRequiresComptime(field_ty) or field_ty.zigTypeTag(pt.zcu) == .NoReturn) continue; // TODO: should this affect alignment?
+        if (try field_ty.comptimeOnlySema(pt) or field_ty.zigTypeTag(pt.zcu) == .NoReturn) continue; // TODO: should this affect alignment?
 
-        max_size = @max(max_size, sema.typeAbiSize(field_ty) catch |err| switch (err) {
+        max_size = @max(max_size, field_ty.abiSizeSema(pt) catch |err| switch (err) {
             error.AnalysisFail => {
                 const msg = sema.err orelse return err;
                 try sema.addFieldErrNote(ty, field_index, msg, "while checking this field", .{});
@@ -35685,17 +35709,17 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
         const field_align = if (explicit_align != .none)
             explicit_align
         else
-            try sema.typeAbiAlignment(field_ty);
+            try field_ty.abiAlignmentSema(pt);
 
         max_align = max_align.max(field_align);
     }
 
     const has_runtime_tag = union_type.flagsUnordered(ip).runtime_tag.hasTag() and
-        try sema.typeHasRuntimeBits(Type.fromInterned(union_type.enum_tag_ty));
+        try Type.fromInterned(union_type.enum_tag_ty).hasRuntimeBitsSema(pt);
     const size, const alignment, const padding = if (has_runtime_tag) layout: {
         const enum_tag_type = Type.fromInterned(union_type.enum_tag_ty);
-        const tag_align = try sema.typeAbiAlignment(enum_tag_type);
-        const tag_size = try sema.typeAbiSize(enum_tag_type);
+        const tag_align = try enum_tag_type.abiAlignmentSema(pt);
+        const tag_size = try enum_tag_type.abiSizeSema(pt);
 
         // Put the tag before or after the payload depending on which one's
         // alignment is greater.
@@ -35727,7 +35751,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
 
     union_type.setHaveLayout(ip, @intCast(size), padding, alignment);
 
-    if (union_type.flagsUnordered(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
+    if (union_type.flagsUnordered(ip).assumed_runtime_bits and !(try ty.hasRuntimeBitsSema(pt))) {
         const msg = try sema.errMsg(
             ty.srcLoc(pt.zcu),
             "union layout depends on it having runtime bits",
@@ -35746,6 +35770,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
         );
         return sema.failWithOwnedErrorMsg(null, msg);
     }
+    _ = try ty.comptimeOnlySema(pt);
 }
 
 /// Returns `error.AnalysisFail` if any of the types (recursively) failed to
@@ -35754,9 +35779,9 @@ pub fn resolveStructFully(sema: *Sema, ty: Type) SemaError!void {
     try sema.resolveStructLayout(ty);
 
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    const struct_type = mod.typeToStruct(ty).?;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    const struct_type = zcu.typeToStruct(ty).?;
 
     assert(sema.owner.unwrap().cau == struct_type.cau.unwrap().?);
 
@@ -35777,9 +35802,9 @@ pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void {
     try sema.resolveUnionLayout(ty);
 
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    const union_obj = mod.typeToUnion(ty).?;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    const union_obj = zcu.typeToUnion(ty).?;
 
     assert(sema.owner.unwrap().cau == union_obj.cau);
 
@@ -35804,7 +35829,7 @@ pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void {
     }
 
     // And let's not forget comptime-only status.
-    _ = try sema.typeRequiresComptime(ty);
+    _ = try ty.comptimeOnlySema(pt);
 }
 
 pub fn resolveTypeFieldsStruct(
@@ -35950,7 +35975,7 @@ fn resolveInferredErrorSet(
         try pt.ensureFuncBodyAnalyzed(func_index);
     }
 
-    // This will now have been resolved by the logic at the end of `Module.analyzeFnBody`
+    // This will now have been resolved by the logic at the end of `Zcu.analyzeFnBody`
     // which calls `resolveInferredErrorSetPtr`.
     const final_resolved_ty = func.resolvedErrorSetUnordered(ip);
     assert(final_resolved_ty != .none);
@@ -35997,9 +36022,9 @@ fn resolveAdHocInferredErrorSet(
     value: InternPool.Index,
 ) CompileError!InternPool.Index {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
     const new_ty = try resolveAdHocInferredErrorSetTy(sema, block, src, ip.typeOf(value));
     if (new_ty == .none) return value;
     return ip.getCoerced(gpa, pt.tid, value, new_ty);
@@ -36013,8 +36038,8 @@ fn resolveAdHocInferredErrorSetTy(
 ) CompileError!InternPool.Index {
     const ies = sema.fn_ret_ty_ies orelse return .none;
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const error_union_info = switch (ip.indexToKey(ty)) {
         .error_union_type => |x| x,
         else => return .none,
@@ -36037,8 +36062,8 @@ fn resolveInferredErrorSetTy(
     ty: InternPool.Index,
 ) CompileError!InternPool.Index {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     if (ty == .anyerror_type) return ty;
     switch (ip.indexToKey(ty)) {
         .error_set_type => return ty,
@@ -36845,9 +36870,9 @@ fn generateUnionTagTypeNumbered(
     union_name: InternPool.NullTerminatedString,
 ) !InternPool.Index {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const gpa = sema.gpa;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
 
     const name = try ip.getOrPutStringFmt(
         gpa,
@@ -36881,8 +36906,8 @@ fn generateUnionTagTypeSimple(
     union_name: InternPool.NullTerminatedString,
 ) !InternPool.Index {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     const gpa = sema.gpa;
 
     const name = try ip.getOrPutStringFmt(
@@ -37192,7 +37217,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
                             return null;
                         },
                         .auto, .explicit => {
-                            if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(pt)) return null;
+                            if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(zcu)) return null;
 
                             return Value.fromInterned(switch (enum_type.names.len) {
                                 0 => try pt.intern(.{ .empty_enum_value = ty.toIntern() }),
@@ -37279,7 +37304,7 @@ fn analyzeComptimeAlloc(
     alignment: Alignment,
 ) CompileError!Air.Inst.Ref {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
     // Needed to make an anon decl with type `var_type` (the `finish()` call below).
     _ = try sema.typeHasOnePossibleValue(var_type);
@@ -37288,7 +37313,7 @@ fn analyzeComptimeAlloc(
         .child = var_type.toIntern(),
         .flags = .{
             .alignment = alignment,
-            .address_space = target_util.defaultAddressSpace(mod.getTarget(), .global_constant),
+            .address_space = target_util.defaultAddressSpace(zcu.getTarget(), .global_constant),
         },
     });
 
@@ -37338,13 +37363,13 @@ pub fn analyzeAsAddressSpace(
     ctx: AddressSpaceContext,
 ) !std.builtin.AddressSpace {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     const addrspace_ty = try pt.getBuiltinType("AddressSpace");
     const coerced = try sema.coerce(block, addrspace_ty, air_ref, src);
     const addrspace_val = try sema.resolveConstDefinedValue(block, src, coerced, .{
         .needed_comptime_reason = "address space must be comptime-known",
     });
-    const address_space = mod.toEnum(std.builtin.AddressSpace, addrspace_val);
+    const address_space = zcu.toEnum(std.builtin.AddressSpace, addrspace_val);
     const target = pt.zcu.getTarget();
     const arch = target.cpu.arch;
 
@@ -37446,13 +37471,13 @@ fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError
 /// This logic must be kept in sync with `Type.isPtrLikeOptional`.
 fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+    const zcu = pt.zcu;
+    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
         .ptr_type => |ptr_type| switch (ptr_type.flags.size) {
             .One, .Many, .C => ty,
             .Slice => null,
         },
-        .opt_type => |opt_child| switch (mod.intern_pool.indexToKey(opt_child)) {
+        .opt_type => |opt_child| switch (zcu.intern_pool.indexToKey(opt_child)) {
             .ptr_type => |ptr_type| switch (ptr_type.flags.size) {
                 .Slice, .C => null,
                 .Many, .One => {
@@ -37473,33 +37498,6 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type {
     };
 }
 
-/// `generic_poison` will return false.
-/// May return false negatives when structs and unions are having their field types resolved.
-pub fn typeRequiresComptime(sema: *Sema, ty: Type) SemaError!bool {
-    return ty.comptimeOnlyAdvanced(sema.pt, .sema);
-}
-
-pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) SemaError!bool {
-    return ty.hasRuntimeBitsAdvanced(sema.pt, false, .sema) catch |err| switch (err) {
-        error.NeedLazy => unreachable,
-        else => |e| return e,
-    };
-}
-
-pub fn typeAbiSize(sema: *Sema, ty: Type) SemaError!u64 {
-    const pt = sema.pt;
-    try ty.resolveLayout(pt);
-    return ty.abiSize(pt);
-}
-
-pub fn typeAbiAlignment(sema: *Sema, ty: Type) SemaError!Alignment {
-    return (try ty.abiAlignmentAdvanced(sema.pt, .sema)).scalar;
-}
-
-pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool {
-    return ty.fnHasRuntimeBitsAdvanced(sema.pt, .sema);
-}
-
 fn unionFieldIndex(
     sema: *Sema,
     block: *Block,
@@ -37508,10 +37506,10 @@ fn unionFieldIndex(
     field_src: LazySrcLoc,
 ) !u32 {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     try union_ty.resolveFields(pt);
-    const union_obj = mod.typeToUnion(union_ty).?;
+    const union_obj = zcu.typeToUnion(union_ty).?;
     const field_index = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse
         return sema.failWithBadUnionFieldAccess(block, union_ty, union_obj, field_src, field_name);
     return @intCast(field_index);
@@ -37525,13 +37523,13 @@ fn structFieldIndex(
     field_src: LazySrcLoc,
 ) !u32 {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     try struct_ty.resolveFields(pt);
-    if (struct_ty.isAnonStruct(mod)) {
+    if (struct_ty.isAnonStruct(zcu)) {
         return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src);
     } else {
-        const struct_type = mod.typeToStruct(struct_ty).?;
+        const struct_type = zcu.typeToStruct(struct_ty).?;
         return struct_type.nameIndex(ip, field_name) orelse
             return sema.failWithBadStructFieldAccess(block, struct_ty, struct_type, field_src, field_name);
     }
@@ -37545,8 +37543,8 @@ fn anonStructFieldIndex(
     field_src: LazySrcLoc,
 ) !u32 {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     switch (ip.indexToKey(struct_ty.toIntern())) {
         .anon_struct_type => |anon_struct_type| for (anon_struct_type.names.get(ip), 0..) |name, i| {
             if (name == field_name) return @intCast(i);
@@ -37583,10 +37581,10 @@ fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize)
 
 fn intAddInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize) !Value {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) == .Vector) {
-        const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod));
-        const scalar_ty = ty.scalarType(mod);
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Vector) {
+        const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(zcu));
+        const scalar_ty = ty.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const lhs_elem = try lhs.elemValue(pt, i);
             const rhs_elem = try rhs.elemValue(pt, i);
@@ -37611,15 +37609,15 @@ fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value {
     const pt = sema.pt;
     if (scalar_ty.toIntern() != .comptime_int_type) {
         const res = try sema.intAddWithOverflowScalar(lhs, rhs, scalar_ty);
-        if (res.overflow_bit.compareAllWithZero(.neq, pt)) return error.Overflow;
+        if (res.overflow_bit.compareAllWithZero(.neq, pt.zcu)) return error.Overflow;
         return res.wrapped_result;
     }
     // TODO is this a performance issue? maybe we should try the operation without
     // resorting to BigInt first.
     var lhs_space: Value.BigIntSpace = undefined;
     var rhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, pt, .sema);
-    const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, pt, .sema);
+    const lhs_bigint = try lhs.toBigIntSema(&lhs_space, pt);
+    const rhs_bigint = try rhs.toBigIntSema(&rhs_space, pt);
     const limbs = try sema.arena.alloc(
         std.math.big.Limb,
         @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
@@ -37637,10 +37635,10 @@ fn numberAddWrapScalar(
     ty: Type,
 ) !Value {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    if (lhs.isUndef(mod) or rhs.isUndef(mod)) return pt.undefValue(ty);
+    const zcu = pt.zcu;
+    if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return pt.undefValue(ty);
 
-    if (ty.zigTypeTag(mod) == .ComptimeInt) {
+    if (ty.zigTypeTag(zcu) == .ComptimeInt) {
         return sema.intAdd(lhs, rhs, ty, undefined);
     }
 
@@ -37701,17 +37699,18 @@ fn intSubInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usi
 
 fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value {
     const pt = sema.pt;
+    const zcu = pt.zcu;
     if (scalar_ty.toIntern() != .comptime_int_type) {
         const res = try sema.intSubWithOverflowScalar(lhs, rhs, scalar_ty);
-        if (res.overflow_bit.compareAllWithZero(.neq, pt)) return error.Overflow;
+        if (res.overflow_bit.compareAllWithZero(.neq, zcu)) return error.Overflow;
         return res.wrapped_result;
     }
     // TODO is this a performance issue? maybe we should try the operation without
     // resorting to BigInt first.
     var lhs_space: Value.BigIntSpace = undefined;
     var rhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, pt, .sema);
-    const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, pt, .sema);
+    const lhs_bigint = try lhs.toBigIntSema(&lhs_space, pt);
+    const rhs_bigint = try rhs.toBigIntSema(&rhs_space, pt);
     const limbs = try sema.arena.alloc(
         std.math.big.Limb,
         @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
@@ -37729,10 +37728,10 @@ fn numberSubWrapScalar(
     ty: Type,
 ) !Value {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    if (lhs.isUndef(mod) or rhs.isUndef(mod)) return pt.undefValue(ty);
+    const zcu = pt.zcu;
+    if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return pt.undefValue(ty);
 
-    if (ty.zigTypeTag(mod) == .ComptimeInt) {
+    if (ty.zigTypeTag(zcu) == .ComptimeInt) {
         return sema.intSub(lhs, rhs, ty, undefined);
     }
 
@@ -37751,12 +37750,12 @@ fn intSubWithOverflow(
     ty: Type,
 ) !Value.OverflowArithmeticResult {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) == .Vector) {
-        const vec_len = ty.vectorLen(mod);
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Vector) {
+        const vec_len = ty.vectorLen(zcu);
         const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len);
         const result_data = try sema.arena.alloc(InternPool.Index, vec_len);
-        const scalar_ty = ty.scalarType(mod);
+        const scalar_ty = ty.scalarType(zcu);
         for (overflowed_data, result_data, 0..) |*of, *scalar, i| {
             const lhs_elem = try lhs.elemValue(pt, i);
             const rhs_elem = try rhs.elemValue(pt, i);
@@ -37785,10 +37784,10 @@ fn intSubWithOverflowScalar(
     ty: Type,
 ) !Value.OverflowArithmeticResult {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const info = ty.intInfo(mod);
+    const zcu = pt.zcu;
+    const info = ty.intInfo(zcu);
 
-    if (lhs.isUndef(mod) or rhs.isUndef(mod)) {
+    if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) {
         return .{
             .overflow_bit = try pt.undefValue(Type.u1),
             .wrapped_result = try pt.undefValue(ty),
@@ -37797,8 +37796,8 @@ fn intSubWithOverflowScalar(
 
     var lhs_space: Value.BigIntSpace = undefined;
     var rhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, pt, .sema);
-    const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, pt, .sema);
+    const lhs_bigint = try lhs.toBigIntSema(&lhs_space, pt);
+    const rhs_bigint = try rhs.toBigIntSema(&rhs_space, pt);
     const limbs = try sema.arena.alloc(
         std.math.big.Limb,
         std.math.big.int.calcTwosCompLimbCount(info.bits),
@@ -37824,12 +37823,12 @@ fn intFromFloat(
     mode: IntFromFloatMode,
 ) CompileError!Value {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    if (float_ty.zigTypeTag(mod) == .Vector) {
-        const result_data = try sema.arena.alloc(InternPool.Index, float_ty.vectorLen(mod));
+    const zcu = pt.zcu;
+    if (float_ty.zigTypeTag(zcu) == .Vector) {
+        const result_data = try sema.arena.alloc(InternPool.Index, float_ty.vectorLen(zcu));
         for (result_data, 0..) |*scalar, i| {
             const elem_val = try val.elemValue(pt, i);
-            scalar.* = (try sema.intFromFloatScalar(block, src, elem_val, int_ty.scalarType(mod), mode)).toIntern();
+            scalar.* = (try sema.intFromFloatScalar(block, src, elem_val, int_ty.scalarType(zcu), mode)).toIntern();
         }
         return Value.fromInterned(try pt.intern(.{ .aggregate = .{
             .ty = int_ty.toIntern(),
@@ -37873,18 +37872,18 @@ fn intFromFloatScalar(
     mode: IntFromFloatMode,
 ) CompileError!Value {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
 
-    if (val.isUndef(mod)) return sema.failWithUseOfUndef(block, src);
+    if (val.isUndef(zcu)) return sema.failWithUseOfUndef(block, src);
 
-    if (mode == .exact and val.floatHasFraction(mod)) return sema.fail(
+    if (mode == .exact and val.floatHasFraction(zcu)) return sema.fail(
         block,
         src,
         "fractional component prevents float value '{}' from coercion to type '{}'",
         .{ val.fmtValueSema(pt, sema), int_ty.fmt(pt) },
     );
 
-    const float = val.toFloat(f128, pt);
+    const float = val.toFloat(f128, zcu);
     if (std.math.isNan(float)) {
         return sema.fail(block, src, "float value NaN cannot be stored in integer type '{}'", .{
             int_ty.fmt(pt),
@@ -37920,15 +37919,15 @@ fn intFitsInType(
     vector_index: ?*usize,
 ) CompileError!bool {
     const pt = sema.pt;
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     if (ty.toIntern() == .comptime_int_type) return true;
-    const info = ty.intInfo(mod);
+    const info = ty.intInfo(zcu);
     switch (val.toIntern()) {
         .zero_usize, .zero_u8 => return true,
-        else => switch (mod.intern_pool.indexToKey(val.toIntern())) {
+        else => switch (zcu.intern_pool.indexToKey(val.toIntern())) {
             .undef => return true,
             .variable, .@"extern", .func, .ptr => {
-                const target = mod.getTarget();
+                const target = zcu.getTarget();
                 const ptr_bits = target.ptrBitWidth();
                 return switch (info.signedness) {
                     .signed => info.bits > ptr_bits,
@@ -37945,7 +37944,7 @@ fn intFitsInType(
                     const max_needed_bits = @as(u16, 16) + @intFromBool(info.signedness == .signed);
                     // If it is u16 or bigger we know the alignment fits without resolving it.
                     if (info.bits >= max_needed_bits) return true;
-                    const x = try sema.typeAbiAlignment(Type.fromInterned(lazy_ty));
+                    const x = try Type.fromInterned(lazy_ty).abiAlignmentSema(pt);
                     if (x == .none) return true;
                     const actual_needed_bits = @as(usize, x.toLog2Units()) + 1 + @intFromBool(info.signedness == .signed);
                     return info.bits >= actual_needed_bits;
@@ -37954,16 +37953,16 @@ fn intFitsInType(
                     const max_needed_bits = @as(u16, 64) + @intFromBool(info.signedness == .signed);
                     // If it is u64 or bigger we know the size fits without resolving it.
                     if (info.bits >= max_needed_bits) return true;
-                    const x = try sema.typeAbiSize(Type.fromInterned(lazy_ty));
+                    const x = try Type.fromInterned(lazy_ty).abiSizeSema(pt);
                     if (x == 0) return true;
                     const actual_needed_bits = std.math.log2(x) + 1 + @intFromBool(info.signedness == .signed);
                     return info.bits >= actual_needed_bits;
                 },
             },
             .aggregate => |aggregate| {
-                assert(ty.zigTypeTag(mod) == .Vector);
+                assert(ty.zigTypeTag(zcu) == .Vector);
                 return switch (aggregate.storage) {
-                    .bytes => |bytes| for (bytes.toSlice(ty.vectorLen(mod), &mod.intern_pool), 0..) |byte, i| {
+                    .bytes => |bytes| for (bytes.toSlice(ty.vectorLen(zcu), &zcu.intern_pool), 0..) |byte, i| {
                         if (byte == 0) continue;
                         const actual_needed_bits = std.math.log2(byte) + 1 + @intFromBool(info.signedness == .signed);
                         if (info.bits >= actual_needed_bits) continue;
@@ -37975,7 +37974,7 @@ fn intFitsInType(
                         .elems => |elems| elems,
                         .repeated_elem => |elem| @as(*const [1]InternPool.Index, &elem),
                     }, 0..) |elem, i| {
-                        if (try sema.intFitsInType(Value.fromInterned(elem), ty.scalarType(mod), null)) continue;
+                        if (try sema.intFitsInType(Value.fromInterned(elem), ty.scalarType(zcu), null)) continue;
                         if (vector_index) |vi| vi.* = i;
                         break false;
                     } else true,
@@ -37997,15 +37996,15 @@ fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool {
 /// Asserts the type is an enum.
 fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const enum_type = mod.intern_pool.loadEnumType(ty.toIntern());
+    const zcu = pt.zcu;
+    const enum_type = zcu.intern_pool.loadEnumType(ty.toIntern());
     assert(enum_type.tag_mode != .nonexhaustive);
     // The `tagValueIndex` function call below relies on the type being the integer tag type.
     // `getCoerced` assumes the value will fit the new type.
     if (!(try sema.intFitsInType(int, Type.fromInterned(enum_type.tag_ty), null))) return false;
     const int_coerced = try pt.getCoerced(int, Type.fromInterned(enum_type.tag_ty));
 
-    return enum_type.tagValueIndex(&mod.intern_pool, int_coerced.toIntern()) != null;
+    return enum_type.tagValueIndex(&zcu.intern_pool, int_coerced.toIntern()) != null;
 }
 
 fn intAddWithOverflow(
@@ -38015,12 +38014,12 @@ fn intAddWithOverflow(
     ty: Type,
 ) !Value.OverflowArithmeticResult {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) == .Vector) {
-        const vec_len = ty.vectorLen(mod);
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Vector) {
+        const vec_len = ty.vectorLen(zcu);
         const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len);
         const result_data = try sema.arena.alloc(InternPool.Index, vec_len);
-        const scalar_ty = ty.scalarType(mod);
+        const scalar_ty = ty.scalarType(zcu);
         for (overflowed_data, result_data, 0..) |*of, *scalar, i| {
             const lhs_elem = try lhs.elemValue(pt, i);
             const rhs_elem = try rhs.elemValue(pt, i);
@@ -38049,10 +38048,10 @@ fn intAddWithOverflowScalar(
     ty: Type,
 ) !Value.OverflowArithmeticResult {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    const info = ty.intInfo(mod);
+    const zcu = pt.zcu;
+    const info = ty.intInfo(zcu);
 
-    if (lhs.isUndef(mod) or rhs.isUndef(mod)) {
+    if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) {
         return .{
             .overflow_bit = try pt.undefValue(Type.u1),
             .wrapped_result = try pt.undefValue(ty),
@@ -38061,8 +38060,8 @@ fn intAddWithOverflowScalar(
 
     var lhs_space: Value.BigIntSpace = undefined;
     var rhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, pt, .sema);
-    const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, pt, .sema);
+    const lhs_bigint = try lhs.toBigIntSema(&lhs_space, pt);
+    const rhs_bigint = try rhs.toBigIntSema(&rhs_space, pt);
     const limbs = try sema.arena.alloc(
         std.math.big.Limb,
         std.math.big.int.calcTwosCompLimbCount(info.bits),
@@ -38088,13 +38087,13 @@ fn compareAll(
     ty: Type,
 ) CompileError!bool {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) == .Vector) {
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Vector) {
         var i: usize = 0;
-        while (i < ty.vectorLen(mod)) : (i += 1) {
+        while (i < ty.vectorLen(zcu)) : (i += 1) {
             const lhs_elem = try lhs.elemValue(pt, i);
             const rhs_elem = try rhs.elemValue(pt, i);
-            if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)))) {
+            if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(zcu)))) {
                 return false;
             }
         }
@@ -38117,7 +38116,7 @@ fn compareScalar(
     switch (op) {
         .eq => return sema.valuesEqual(coerced_lhs, coerced_rhs, ty),
         .neq => return !(try sema.valuesEqual(coerced_lhs, coerced_rhs, ty)),
-        else => return Value.compareHeteroAdvanced(coerced_lhs, op, coerced_rhs, pt, .sema),
+        else => return Value.compareHeteroSema(coerced_lhs, op, coerced_rhs, pt),
     }
 }
 
@@ -38139,17 +38138,17 @@ fn compareVector(
     ty: Type,
 ) !Value {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    assert(ty.zigTypeTag(mod) == .Vector);
-    const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod));
+    const zcu = pt.zcu;
+    assert(ty.zigTypeTag(zcu) == .Vector);
+    const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(zcu));
     for (result_data, 0..) |*scalar, i| {
         const lhs_elem = try lhs.elemValue(pt, i);
         const rhs_elem = try rhs.elemValue(pt, i);
-        const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod));
+        const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(zcu));
         scalar.* = Value.makeBool(res_bool).toIntern();
     }
     return Value.fromInterned(try pt.intern(.{ .aggregate = .{
-        .ty = (try pt.vectorType(.{ .len = ty.vectorLen(mod), .child = .bool_type })).toIntern(),
+        .ty = (try pt.vectorType(.{ .len = ty.vectorLen(zcu), .child = .bool_type })).toIntern(),
         .storage = .{ .elems = result_data },
     } }));
 }
@@ -38250,8 +38249,8 @@ fn validateRuntimeValue(sema: *Sema, block: *Block, val_src: LazySrcLoc, val: Ai
 /// Returns true if any value contained in `val` is undefined.
 fn anyUndef(sema: *Sema, block: *Block, src: LazySrcLoc, val: Value) !bool {
     const pt = sema.pt;
-    const mod = pt.zcu;
-    return switch (mod.intern_pool.indexToKey(val.toIntern())) {
+    const zcu = pt.zcu;
+    return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
         .undef => true,
         .simple_value => |v| v == .undefined,
         .slice => {
@@ -38261,7 +38260,7 @@ fn anyUndef(sema: *Sema, block: *Block, src: LazySrcLoc, val: Value) !bool {
             return sema.anyUndef(block, src, arr);
         },
         .aggregate => |aggregate| for (0..aggregate.storage.values().len) |i| {
-            const elem = mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.values()[i];
+            const elem = zcu.intern_pool.indexToKey(val.toIntern()).aggregate.storage.values()[i];
             if (try sema.anyUndef(block, src, Value.fromInterned(elem))) break true;
         } else false,
         else => false,
src/Type.zig
@@ -10,8 +10,6 @@ const Value = @import("Value.zig");
 const assert = std.debug.assert;
 const Target = std.Target;
 const Zcu = @import("Zcu.zig");
-/// Deprecated.
-const Module = Zcu;
 const log = std.log.scoped(.Type);
 const target_util = @import("target.zig");
 const Sema = @import("Sema.zig");
@@ -23,15 +21,15 @@ const SemaError = Zcu.SemaError;
 
 ip_index: InternPool.Index,
 
-pub fn zigTypeTag(ty: Type, mod: *const Module) std.builtin.TypeId {
-    return ty.zigTypeTagOrPoison(mod) catch unreachable;
+pub fn zigTypeTag(ty: Type, zcu: *const Zcu) std.builtin.TypeId {
+    return ty.zigTypeTagOrPoison(zcu) catch unreachable;
 }
 
-pub fn zigTypeTagOrPoison(ty: Type, mod: *const Module) error{GenericPoison}!std.builtin.TypeId {
-    return mod.intern_pool.zigTypeTagOrPoison(ty.toIntern());
+pub fn zigTypeTagOrPoison(ty: Type, zcu: *const Zcu) error{GenericPoison}!std.builtin.TypeId {
+    return zcu.intern_pool.zigTypeTagOrPoison(ty.toIntern());
 }
 
-pub fn baseZigTypeTag(self: Type, mod: *Module) std.builtin.TypeId {
+pub fn baseZigTypeTag(self: Type, mod: *Zcu) std.builtin.TypeId {
     return switch (self.zigTypeTag(mod)) {
         .ErrorUnion => self.errorUnionPayload(mod).baseZigTypeTag(mod),
         .Optional => {
@@ -41,15 +39,15 @@ pub fn baseZigTypeTag(self: Type, mod: *Module) std.builtin.TypeId {
     };
 }
 
-pub fn isSelfComparable(ty: Type, mod: *const Module, is_equality_cmp: bool) bool {
-    return switch (ty.zigTypeTag(mod)) {
+pub fn isSelfComparable(ty: Type, zcu: *const Zcu, is_equality_cmp: bool) bool {
+    return switch (ty.zigTypeTag(zcu)) {
         .Int,
         .Float,
         .ComptimeFloat,
         .ComptimeInt,
         => true,
 
-        .Vector => ty.elemType2(mod).isSelfComparable(mod, is_equality_cmp),
+        .Vector => ty.elemType2(zcu).isSelfComparable(zcu, is_equality_cmp),
 
         .Bool,
         .Type,
@@ -72,25 +70,25 @@ pub fn isSelfComparable(ty: Type, mod: *const Module, is_equality_cmp: bool) boo
         .Frame,
         => false,
 
-        .Pointer => !ty.isSlice(mod) and (is_equality_cmp or ty.isCPtr(mod)),
+        .Pointer => !ty.isSlice(zcu) and (is_equality_cmp or ty.isCPtr(zcu)),
         .Optional => {
             if (!is_equality_cmp) return false;
-            return ty.optionalChild(mod).isSelfComparable(mod, is_equality_cmp);
+            return ty.optionalChild(zcu).isSelfComparable(zcu, is_equality_cmp);
         },
     };
 }
 
 /// If it is a function pointer, returns the function type. Otherwise returns null.
-pub fn castPtrToFn(ty: Type, mod: *const Module) ?Type {
-    if (ty.zigTypeTag(mod) != .Pointer) return null;
-    const elem_ty = ty.childType(mod);
-    if (elem_ty.zigTypeTag(mod) != .Fn) return null;
+pub fn castPtrToFn(ty: Type, zcu: *const Zcu) ?Type {
+    if (ty.zigTypeTag(zcu) != .Pointer) return null;
+    const elem_ty = ty.childType(zcu);
+    if (elem_ty.zigTypeTag(zcu) != .Fn) return null;
     return elem_ty;
 }
 
 /// Asserts the type is a pointer.
-pub fn ptrIsMutable(ty: Type, mod: *const Module) bool {
-    return !mod.intern_pool.indexToKey(ty.toIntern()).ptr_type.flags.is_const;
+pub fn ptrIsMutable(ty: Type, zcu: *const Zcu) bool {
+    return !zcu.intern_pool.indexToKey(ty.toIntern()).ptr_type.flags.is_const;
 }
 
 pub const ArrayInfo = struct {
@@ -99,18 +97,18 @@ pub const ArrayInfo = struct {
     len: u64,
 };
 
-pub fn arrayInfo(self: Type, mod: *const Module) ArrayInfo {
+pub fn arrayInfo(self: Type, zcu: *const Zcu) ArrayInfo {
     return .{
-        .len = self.arrayLen(mod),
-        .sentinel = self.sentinel(mod),
-        .elem_type = self.childType(mod),
+        .len = self.arrayLen(zcu),
+        .sentinel = self.sentinel(zcu),
+        .elem_type = self.childType(zcu),
     };
 }
 
-pub fn ptrInfo(ty: Type, mod: *const Module) InternPool.Key.PtrType {
-    return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+pub fn ptrInfo(ty: Type, zcu: *const Zcu) InternPool.Key.PtrType {
+    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
         .ptr_type => |p| p,
-        .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) {
+        .opt_type => |child| switch (zcu.intern_pool.indexToKey(child)) {
             .ptr_type => |p| p,
             else => unreachable,
         },
@@ -118,8 +116,8 @@ pub fn ptrInfo(ty: Type, mod: *const Module) InternPool.Key.PtrType {
     };
 }
 
-pub fn eql(a: Type, b: Type, mod: *const Module) bool {
-    _ = mod; // TODO: remove this parameter
+pub fn eql(a: Type, b: Type, zcu: *const Zcu) bool {
+    _ = zcu; // TODO: remove this parameter
     // The InternPool data structure hashes based on Key to make interned objects
     // unique. An Index can be treated simply as u32 value for the
     // purpose of Type/Value hashing and equality.
@@ -179,8 +177,8 @@ pub fn dump(
 /// Prints a name suitable for `@typeName`.
 /// TODO: take an `opt_sema` to pass to `fmtValue` when printing sentinels.
 pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error!void {
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     switch (ip.indexToKey(ty.toIntern())) {
         .int_type => |int_type| {
             const sign_char: u8 = switch (int_type.signedness) {
@@ -190,7 +188,7 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error
             return writer.print("{c}{d}", .{ sign_char, int_type.bits });
         },
         .ptr_type => {
-            const info = ty.ptrInfo(mod);
+            const info = ty.ptrInfo(zcu);
 
             if (info.sentinel != .none) switch (info.flags.size) {
                 .One, .C => unreachable,
@@ -210,7 +208,7 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error
                 const alignment = if (info.flags.alignment != .none)
                     info.flags.alignment
                 else
-                    Type.fromInterned(info.child).abiAlignment(pt);
+                    Type.fromInterned(info.child).abiAlignment(pt.zcu);
                 try writer.print("align({d}", .{alignment.toByteUnits() orelse 0});
 
                 if (info.packed_offset.bit_offset != 0 or info.packed_offset.host_size != 0) {
@@ -268,7 +266,7 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error
             return;
         },
         .inferred_error_set_type => |func_index| {
-            const func_nav = ip.getNav(mod.funcInfo(func_index).owner_nav);
+            const func_nav = ip.getNav(zcu.funcInfo(func_index).owner_nav);
             try writer.print("@typeInfo(@typeInfo(@TypeOf({})).Fn.return_type.?).ErrorUnion.error_set", .{
                 func_nav.fqn.fmt(ip),
             });
@@ -338,7 +336,7 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error
                     try writer.writeAll("comptime ");
                 }
                 if (anon_struct.names.len != 0) {
-                    try writer.print("{}: ", .{anon_struct.names.get(ip)[i].fmt(&mod.intern_pool)});
+                    try writer.print("{}: ", .{anon_struct.names.get(ip)[i].fmt(&zcu.intern_pool)});
                 }
 
                 try print(Type.fromInterned(field_ty), writer, pt);
@@ -367,7 +365,7 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error
                 try writer.writeAll("noinline ");
             }
             try writer.writeAll("fn (");
-            const param_types = fn_info.param_types.get(&mod.intern_pool);
+            const param_types = fn_info.param_types.get(&zcu.intern_pool);
             for (param_types, 0..) |param_ty, i| {
                 if (i != 0) try writer.writeAll(", ");
                 if (std.math.cast(u5, i)) |index| {
@@ -448,6 +446,21 @@ pub fn toValue(self: Type) Value {
 
 const RuntimeBitsError = SemaError || error{NeedLazy};
 
+pub fn hasRuntimeBits(ty: Type, zcu: *Zcu) bool {
+    return hasRuntimeBitsInner(ty, false, .eager, zcu, {}) catch unreachable;
+}
+
+pub fn hasRuntimeBitsSema(ty: Type, pt: Zcu.PerThread) SemaError!bool {
+    return hasRuntimeBitsInner(ty, false, .sema, pt.zcu, pt.tid) catch |err| switch (err) {
+        error.NeedLazy => unreachable, // this would require a resolve strat of lazy
+        else => |e| return e,
+    };
+}
+
+pub fn hasRuntimeBitsIgnoreComptime(ty: Type, zcu: *Zcu) bool {
+    return hasRuntimeBitsInner(ty, true, .eager, zcu, {}) catch unreachable;
+}
+
 /// true if and only if the type takes up space in memory at runtime.
 /// There are two reasons a type will return false:
 /// * the type is a comptime-only type. For example, the type `type` itself.
@@ -459,14 +472,14 @@ const RuntimeBitsError = SemaError || error{NeedLazy};
 ///     making it one-possible-value only if the integer tag type has 0 bits.
 /// When `ignore_comptime_only` is true, then types that are comptime-only
 /// may return false positives.
-pub fn hasRuntimeBitsAdvanced(
+pub fn hasRuntimeBitsInner(
     ty: Type,
-    pt: Zcu.PerThread,
     ignore_comptime_only: bool,
     comptime strat: ResolveStratLazy,
+    zcu: *Zcu,
+    tid: strat.Tid(),
 ) RuntimeBitsError!bool {
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
     return switch (ty.toIntern()) {
         // False because it is a comptime-only type.
         .empty_struct_type => false,
@@ -477,26 +490,29 @@ pub fn hasRuntimeBitsAdvanced(
                 // to comptime-only types do not, with the exception of function pointers.
                 if (ignore_comptime_only) return true;
                 return switch (strat) {
-                    .sema => !try ty.comptimeOnlyAdvanced(pt, .sema),
-                    .eager => !ty.comptimeOnly(pt),
+                    .sema => {
+                        const pt = strat.pt(zcu, tid);
+                        return !try ty.comptimeOnlySema(pt);
+                    },
+                    .eager => !ty.comptimeOnly(zcu),
                     .lazy => error.NeedLazy,
                 };
             },
             .anyframe_type => true,
             .array_type => |array_type| return array_type.lenIncludingSentinel() > 0 and
-                try Type.fromInterned(array_type.child).hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat),
+                try Type.fromInterned(array_type.child).hasRuntimeBitsInner(ignore_comptime_only, strat, zcu, tid),
             .vector_type => |vector_type| return vector_type.len > 0 and
-                try Type.fromInterned(vector_type.child).hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat),
+                try Type.fromInterned(vector_type.child).hasRuntimeBitsInner(ignore_comptime_only, strat, zcu, tid),
             .opt_type => |child| {
                 const child_ty = Type.fromInterned(child);
-                if (child_ty.isNoReturn(mod)) {
+                if (child_ty.isNoReturn(zcu)) {
                     // Then the optional is comptime-known to be null.
                     return false;
                 }
                 if (ignore_comptime_only) return true;
                 return switch (strat) {
-                    .sema => !try child_ty.comptimeOnlyAdvanced(pt, .sema),
-                    .eager => !child_ty.comptimeOnly(pt),
+                    .sema => !try child_ty.comptimeOnlyInner(.sema, zcu, tid),
+                    .eager => !child_ty.comptimeOnly(zcu),
                     .lazy => error.NeedLazy,
                 };
             },
@@ -556,14 +572,14 @@ pub fn hasRuntimeBitsAdvanced(
                     return true;
                 }
                 switch (strat) {
-                    .sema => try ty.resolveFields(pt),
+                    .sema => try ty.resolveFields(strat.pt(zcu, tid)),
                     .eager => assert(struct_type.haveFieldTypes(ip)),
                     .lazy => if (!struct_type.haveFieldTypes(ip)) return error.NeedLazy,
                 }
                 for (0..struct_type.field_types.len) |i| {
                     if (struct_type.comptime_bits.getBit(ip, i)) continue;
                     const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
-                    if (try field_ty.hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat))
+                    if (try field_ty.hasRuntimeBitsInner(ignore_comptime_only, strat, zcu, tid))
                         return true;
                 } else {
                     return false;
@@ -572,7 +588,12 @@ pub fn hasRuntimeBitsAdvanced(
             .anon_struct_type => |tuple| {
                 for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
                     if (val != .none) continue; // comptime field
-                    if (try Type.fromInterned(field_ty).hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat)) return true;
+                    if (try Type.fromInterned(field_ty).hasRuntimeBitsInner(
+                        ignore_comptime_only,
+                        strat,
+                        zcu,
+                        tid,
+                    )) return true;
                 }
                 return false;
             },
@@ -591,21 +612,25 @@ pub fn hasRuntimeBitsAdvanced(
                         // tag_ty will be `none` if this union's tag type is not resolved yet,
                         // in which case we want control flow to continue down below.
                         if (tag_ty != .none and
-                            try Type.fromInterned(tag_ty).hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat))
-                        {
+                            try Type.fromInterned(tag_ty).hasRuntimeBitsInner(
+                            ignore_comptime_only,
+                            strat,
+                            zcu,
+                            tid,
+                        )) {
                             return true;
                         }
                     },
                 }
                 switch (strat) {
-                    .sema => try ty.resolveFields(pt),
+                    .sema => try ty.resolveFields(strat.pt(zcu, tid)),
                     .eager => assert(union_flags.status.haveFieldTypes()),
                     .lazy => if (!union_flags.status.haveFieldTypes())
                         return error.NeedLazy,
                 }
                 for (0..union_type.field_types.len) |field_index| {
                     const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
-                    if (try field_ty.hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat))
+                    if (try field_ty.hasRuntimeBitsInner(ignore_comptime_only, strat, zcu, tid))
                         return true;
                 } else {
                     return false;
@@ -613,7 +638,12 @@ pub fn hasRuntimeBitsAdvanced(
             },
 
             .opaque_type => true,
-            .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat),
+            .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).hasRuntimeBitsInner(
+                ignore_comptime_only,
+                strat,
+                zcu,
+                tid,
+            ),
 
             // values, not types
             .undef,
@@ -643,8 +673,8 @@ pub fn hasRuntimeBitsAdvanced(
 /// true if and only if the type has a well-defined memory layout
 /// readFrom/writeToMemory are supported only for types with a well-
 /// defined memory layout
-pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool {
-    const ip = &mod.intern_pool;
+pub fn hasWellDefinedLayout(ty: Type, zcu: *const Zcu) bool {
+    const ip = &zcu.intern_pool;
     return switch (ip.indexToKey(ty.toIntern())) {
         .int_type,
         .vector_type,
@@ -660,8 +690,8 @@ pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool {
         .func_type,
         => false,
 
-        .array_type => |array_type| Type.fromInterned(array_type.child).hasWellDefinedLayout(mod),
-        .opt_type => ty.isPtrLikeOptional(mod),
+        .array_type => |array_type| Type.fromInterned(array_type.child).hasWellDefinedLayout(zcu),
+        .opt_type => ty.isPtrLikeOptional(zcu),
         .ptr_type => |ptr_type| ptr_type.flags.size != .Slice,
 
         .simple_type => |t| switch (t) {
@@ -740,94 +770,99 @@ pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool {
     };
 }
 
-pub fn hasRuntimeBits(ty: Type, pt: Zcu.PerThread) bool {
-    return hasRuntimeBitsAdvanced(ty, pt, false, .eager) catch unreachable;
+pub fn fnHasRuntimeBits(ty: Type, zcu: *Zcu) bool {
+    return ty.fnHasRuntimeBitsInner(.normal, zcu, {}) catch unreachable;
 }
 
-pub fn hasRuntimeBitsIgnoreComptime(ty: Type, pt: Zcu.PerThread) bool {
-    return hasRuntimeBitsAdvanced(ty, pt, true, .eager) catch unreachable;
-}
-
-pub fn fnHasRuntimeBits(ty: Type, pt: Zcu.PerThread) bool {
-    return ty.fnHasRuntimeBitsAdvanced(pt, .normal) catch unreachable;
+pub fn fnHasRuntimeBitsSema(ty: Type, pt: Zcu.PerThread) SemaError!bool {
+    return try ty.fnHasRuntimeBitsInner(.sema, pt.zcu, pt.tid);
 }
 
 /// Determines whether a function type has runtime bits, i.e. whether a
 /// function with this type can exist at runtime.
 /// Asserts that `ty` is a function type.
-pub fn fnHasRuntimeBitsAdvanced(ty: Type, pt: Zcu.PerThread, comptime strat: ResolveStrat) SemaError!bool {
-    const fn_info = pt.zcu.typeToFunc(ty).?;
+pub fn fnHasRuntimeBitsInner(
+    ty: Type,
+    comptime strat: ResolveStrat,
+    zcu: *Zcu,
+    tid: strat.Tid(),
+) SemaError!bool {
+    const fn_info = zcu.typeToFunc(ty).?;
     if (fn_info.is_generic) return false;
     if (fn_info.is_var_args) return true;
     if (fn_info.cc == .Inline) return false;
-    return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(pt, strat);
+    return !try Type.fromInterned(fn_info.return_type).comptimeOnlyInner(strat, zcu, tid);
 }
 
-pub fn isFnOrHasRuntimeBits(ty: Type, pt: Zcu.PerThread) bool {
-    switch (ty.zigTypeTag(pt.zcu)) {
-        .Fn => return ty.fnHasRuntimeBits(pt),
-        else => return ty.hasRuntimeBits(pt),
+pub fn isFnOrHasRuntimeBits(ty: Type, zcu: *Zcu) bool {
+    switch (ty.zigTypeTag(zcu)) {
+        .Fn => return ty.fnHasRuntimeBits(zcu),
+        else => return ty.hasRuntimeBits(zcu),
     }
 }
 
 /// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive.
-pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, pt: Zcu.PerThread) bool {
-    return switch (ty.zigTypeTag(pt.zcu)) {
+pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, zcu: *Zcu) bool {
+    return switch (ty.zigTypeTag(zcu)) {
         .Fn => true,
-        else => return ty.hasRuntimeBitsIgnoreComptime(pt),
+        else => return ty.hasRuntimeBitsIgnoreComptime(zcu),
     };
 }
 
-pub fn isNoReturn(ty: Type, mod: *Module) bool {
-    return mod.intern_pool.isNoReturn(ty.toIntern());
+pub fn isNoReturn(ty: Type, zcu: *const Zcu) bool {
+    return zcu.intern_pool.isNoReturn(ty.toIntern());
 }
 
 /// Returns `none` if the pointer is naturally aligned and the element type is 0-bit.
-pub fn ptrAlignment(ty: Type, pt: Zcu.PerThread) Alignment {
-    return ptrAlignmentAdvanced(ty, pt, .normal) catch unreachable;
+pub fn ptrAlignment(ty: Type, zcu: *Zcu) Alignment {
+    return ptrAlignmentInner(ty, .normal, zcu, {}) catch unreachable;
+}
+
+pub fn ptrAlignmentSema(ty: Type, pt: Zcu.PerThread) SemaError!Alignment {
+    return try ty.ptrAlignmentInner(.sema, pt.zcu, pt.tid);
 }
 
-pub fn ptrAlignmentAdvanced(ty: Type, pt: Zcu.PerThread, comptime strat: ResolveStrat) !Alignment {
-    return switch (pt.zcu.intern_pool.indexToKey(ty.toIntern())) {
+pub fn ptrAlignmentInner(
+    ty: Type,
+    comptime strat: ResolveStrat,
+    zcu: *Zcu,
+    tid: strat.Tid(),
+) !Alignment {
+    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
         .ptr_type => |ptr_type| {
             if (ptr_type.flags.alignment != .none)
                 return ptr_type.flags.alignment;
 
             if (strat == .sema) {
-                const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(pt, .sema);
+                const res = try Type.fromInterned(ptr_type.child).abiAlignmentInner(.sema, zcu, tid);
                 return res.scalar;
             }
 
-            return (Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(pt, .eager) catch unreachable).scalar;
+            return Type.fromInterned(ptr_type.child).abiAlignment(zcu);
         },
-        .opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(pt, strat),
+        .opt_type => |child| Type.fromInterned(child).ptrAlignmentInner(strat, zcu, tid),
         else => unreachable,
     };
 }
 
-pub fn ptrAddressSpace(ty: Type, mod: *const Module) std.builtin.AddressSpace {
-    return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+pub fn ptrAddressSpace(ty: Type, zcu: *const Zcu) std.builtin.AddressSpace {
+    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
         .ptr_type => |ptr_type| ptr_type.flags.address_space,
-        .opt_type => |child| mod.intern_pool.indexToKey(child).ptr_type.flags.address_space,
+        .opt_type => |child| zcu.intern_pool.indexToKey(child).ptr_type.flags.address_space,
         else => unreachable,
     };
 }
 
-/// Never returns `none`. Asserts that all necessary type resolution is already done.
-pub fn abiAlignment(ty: Type, pt: Zcu.PerThread) Alignment {
-    return (ty.abiAlignmentAdvanced(pt, .eager) catch unreachable).scalar;
-}
-
 /// May capture a reference to `ty`.
 /// Returned value has type `comptime_int`.
 pub fn lazyAbiAlignment(ty: Type, pt: Zcu.PerThread) !Value {
-    switch (try ty.abiAlignmentAdvanced(pt, .lazy)) {
+    switch (try ty.abiAlignmentInner(.lazy, pt.zcu, pt.tid)) {
         .val => |val| return val,
         .scalar => |x| return pt.intValue(Type.comptime_int, x.toByteUnits() orelse 0),
     }
 }
 
-pub const AbiAlignmentAdvanced = union(enum) {
+pub const AbiAlignmentInner = union(enum) {
     scalar: Alignment,
     val: Value,
 };
@@ -842,6 +877,23 @@ pub const ResolveStratLazy = enum {
     /// Return a scalar result, performing type resolution as necessary.
     /// This should typically be used from semantic analysis.
     sema,
+
+    pub fn Tid(comptime strat: ResolveStratLazy) type {
+        return switch (strat) {
+            .lazy, .sema => Zcu.PerThread.Id,
+            .eager => void,
+        };
+    }
+
+    pub fn pt(comptime strat: ResolveStratLazy, zcu: *Zcu, tid: strat.Tid()) switch (strat) {
+        .lazy, .sema => Zcu.PerThread,
+        .eager => void,
+    } {
+        return switch (strat) {
+            .lazy, .sema => .{ .tid = tid, .zcu = zcu },
+            else => {},
+        };
+    }
 };
 
 /// The chosen strategy can be easily optimized away in release builds.
@@ -854,6 +906,23 @@ pub const ResolveStrat = enum {
     /// This should typically be used from semantic analysis.
     sema,
 
+    pub fn Tid(comptime strat: ResolveStrat) type {
+        return switch (strat) {
+            .sema => Zcu.PerThread.Id,
+            .normal => void,
+        };
+    }
+
+    pub fn pt(comptime strat: ResolveStrat, zcu: *Zcu, tid: strat.Tid()) switch (strat) {
+        .sema => Zcu.PerThread,
+        .normal => void,
+    } {
+        return switch (strat) {
+            .sema => .{ .tid = tid, .zcu = zcu },
+            .normal => {},
+        };
+    }
+
     pub inline fn toLazy(strat: ResolveStrat) ResolveStratLazy {
         return switch (strat) {
             .normal => .eager,
@@ -862,21 +931,31 @@ pub const ResolveStrat = enum {
     }
 };
 
+/// Never returns `none`. Asserts that all necessary type resolution is already done.
+pub fn abiAlignment(ty: Type, zcu: *Zcu) Alignment {
+    return (ty.abiAlignmentInner(.eager, zcu, {}) catch unreachable).scalar;
+}
+
+pub fn abiAlignmentSema(ty: Type, pt: Zcu.PerThread) SemaError!Alignment {
+    return (try ty.abiAlignmentInner(.sema, pt.zcu, pt.tid)).scalar;
+}
+
 /// If you pass `eager` you will get back `scalar` and assert the type is resolved.
 /// In this case there will be no error, guaranteed.
 /// If you pass `lazy` you may get back `scalar` or `val`.
 /// If `val` is returned, a reference to `ty` has been captured.
 /// If you pass `sema` you will get back `scalar` and resolve the type if
 /// necessary, possibly returning a CompileError.
-pub fn abiAlignmentAdvanced(
+pub fn abiAlignmentInner(
     ty: Type,
-    pt: Zcu.PerThread,
     comptime strat: ResolveStratLazy,
-) SemaError!AbiAlignmentAdvanced {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
-    const use_llvm = mod.comp.config.use_llvm;
-    const ip = &mod.intern_pool;
+    zcu: *Zcu,
+    tid: strat.Tid(),
+) SemaError!AbiAlignmentInner {
+    const pt = strat.pt(zcu, tid);
+    const target = zcu.getTarget();
+    const use_llvm = zcu.comp.config.use_llvm;
+    const ip = &zcu.intern_pool;
 
     switch (ty.toIntern()) {
         .empty_struct_type => return .{ .scalar = .@"1" },
@@ -889,22 +968,22 @@ pub fn abiAlignmentAdvanced(
                 return .{ .scalar = ptrAbiAlignment(target) };
             },
             .array_type => |array_type| {
-                return Type.fromInterned(array_type.child).abiAlignmentAdvanced(pt, strat);
+                return Type.fromInterned(array_type.child).abiAlignmentInner(strat, zcu, tid);
             },
             .vector_type => |vector_type| {
                 if (vector_type.len == 0) return .{ .scalar = .@"1" };
-                switch (mod.comp.getZigBackend()) {
+                switch (zcu.comp.getZigBackend()) {
                     else => {
                         // This is fine because the child type of a vector always has a bit-size known
                         // without needing any type resolution.
-                        const elem_bits: u32 = @intCast(Type.fromInterned(vector_type.child).bitSize(pt));
+                        const elem_bits: u32 = @intCast(Type.fromInterned(vector_type.child).bitSize(zcu));
                         if (elem_bits == 0) return .{ .scalar = .@"1" };
                         const bytes = ((elem_bits * vector_type.len) + 7) / 8;
                         const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes);
                         return .{ .scalar = Alignment.fromByteUnits(alignment) };
                     },
                     .stage2_c => {
-                        return Type.fromInterned(vector_type.child).abiAlignmentAdvanced(pt, strat);
+                        return Type.fromInterned(vector_type.child).abiAlignmentInner(strat, zcu, tid);
                     },
                     .stage2_x86_64 => {
                         if (vector_type.child == .bool_type) {
@@ -915,7 +994,7 @@ pub fn abiAlignmentAdvanced(
                             const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes);
                             return .{ .scalar = Alignment.fromByteUnits(alignment) };
                         }
-                        const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(pt, strat)).scalar);
+                        const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeInner(strat, zcu, tid)).scalar);
                         if (elem_bytes == 0) return .{ .scalar = .@"1" };
                         const bytes = elem_bytes * vector_type.len;
                         if (bytes > 32 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{ .scalar = .@"64" };
@@ -925,11 +1004,16 @@ pub fn abiAlignmentAdvanced(
                 }
             },
 
-            .opt_type => return ty.abiAlignmentAdvancedOptional(pt, strat),
-            .error_union_type => |info| return ty.abiAlignmentAdvancedErrorUnion(pt, strat, Type.fromInterned(info.payload_type)),
+            .opt_type => return ty.abiAlignmentInnerOptional(strat, zcu, tid),
+            .error_union_type => |info| return ty.abiAlignmentInnerErrorUnion(
+                strat,
+                zcu,
+                tid,
+                Type.fromInterned(info.payload_type),
+            ),
 
             .error_set_type, .inferred_error_set_type => {
-                const bits = mod.errorSetBits();
+                const bits = zcu.errorSetBits();
                 if (bits == 0) return .{ .scalar = .@"1" };
                 return .{ .scalar = intAbiAlignment(bits, target, use_llvm) };
             },
@@ -965,7 +1049,7 @@ pub fn abiAlignmentAdvanced(
                 },
                 .f80 => switch (target.cTypeBitSize(.longdouble)) {
                     80 => return .{ .scalar = cTypeAlign(target, .longdouble) },
-                    else => return .{ .scalar = Type.u80.abiAlignment(pt) },
+                    else => return .{ .scalar = Type.u80.abiAlignment(zcu) },
                 },
                 .f128 => switch (target.cTypeBitSize(.longdouble)) {
                     128 => return .{ .scalar = cTypeAlign(target, .longdouble) },
@@ -973,7 +1057,7 @@ pub fn abiAlignmentAdvanced(
                 },
 
                 .anyerror, .adhoc_inferred_error_set => {
-                    const bits = mod.errorSetBits();
+                    const bits = zcu.errorSetBits();
                     if (bits == 0) return .{ .scalar = .@"1" };
                     return .{ .scalar = intAbiAlignment(bits, target, use_llvm) };
                 },
@@ -1003,7 +1087,7 @@ pub fn abiAlignmentAdvanced(
                         },
                         .eager => {},
                     }
-                    return .{ .scalar = Type.fromInterned(struct_type.backingIntTypeUnordered(ip)).abiAlignment(pt) };
+                    return .{ .scalar = Type.fromInterned(struct_type.backingIntTypeUnordered(ip)).abiAlignment(zcu) };
                 }
 
                 if (struct_type.flagsUnordered(ip).alignment == .none) switch (strat) {
@@ -1021,11 +1105,11 @@ pub fn abiAlignmentAdvanced(
                 var big_align: Alignment = .@"1";
                 for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
                     if (val != .none) continue; // comptime field
-                    switch (try Type.fromInterned(field_ty).abiAlignmentAdvanced(pt, strat)) {
+                    switch (try Type.fromInterned(field_ty).abiAlignmentInner(strat, zcu, tid)) {
                         .scalar => |field_align| big_align = big_align.max(field_align),
                         .val => switch (strat) {
                             .eager => unreachable, // field type alignment not resolved
-                            .sema => unreachable, // passed to abiAlignmentAdvanced above
+                            .sema => unreachable, // passed to abiAlignmentInner above
                             .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
                                 .ty = .comptime_int_type,
                                 .storage = .{ .lazy_align = ty.toIntern() },
@@ -1051,7 +1135,7 @@ pub fn abiAlignmentAdvanced(
             },
             .opaque_type => return .{ .scalar = .@"1" },
             .enum_type => return .{
-                .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiAlignment(pt),
+                .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiAlignment(zcu),
             },
 
             // values, not types
@@ -1079,32 +1163,37 @@ pub fn abiAlignmentAdvanced(
     }
 }
 
-fn abiAlignmentAdvancedErrorUnion(
+fn abiAlignmentInnerErrorUnion(
     ty: Type,
-    pt: Zcu.PerThread,
     comptime strat: ResolveStratLazy,
+    zcu: *Zcu,
+    tid: strat.Tid(),
     payload_ty: Type,
-) SemaError!AbiAlignmentAdvanced {
+) SemaError!AbiAlignmentInner {
     // This code needs to be kept in sync with the equivalent switch prong
-    // in abiSizeAdvanced.
-    const code_align = Type.anyerror.abiAlignment(pt);
+    // in abiSizeInner.
+    const code_align = Type.anyerror.abiAlignment(zcu);
     switch (strat) {
         .eager, .sema => {
-            if (!(payload_ty.hasRuntimeBitsAdvanced(pt, false, strat) catch |err| switch (err) {
-                error.NeedLazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
-                    .ty = .comptime_int_type,
-                    .storage = .{ .lazy_align = ty.toIntern() },
-                } })) },
+            if (!(payload_ty.hasRuntimeBitsInner(false, strat, zcu, tid) catch |err| switch (err) {
+                error.NeedLazy => if (strat == .lazy) {
+                    const pt = strat.pt(zcu, tid);
+                    return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
+                        .ty = .comptime_int_type,
+                        .storage = .{ .lazy_align = ty.toIntern() },
+                    } })) };
+                } else unreachable,
                 else => |e| return e,
             })) {
                 return .{ .scalar = code_align };
             }
             return .{ .scalar = code_align.max(
-                (try payload_ty.abiAlignmentAdvanced(pt, strat)).scalar,
+                (try payload_ty.abiAlignmentInner(strat, zcu, tid)).scalar,
             ) };
         },
         .lazy => {
-            switch (try payload_ty.abiAlignmentAdvanced(pt, strat)) {
+            const pt = strat.pt(zcu, tid);
+            switch (try payload_ty.abiAlignmentInner(strat, zcu, tid)) {
                 .scalar => |payload_align| return .{ .scalar = code_align.max(payload_align) },
                 .val => {},
             }
@@ -1116,36 +1205,39 @@ fn abiAlignmentAdvancedErrorUnion(
     }
 }
 
-fn abiAlignmentAdvancedOptional(
+fn abiAlignmentInnerOptional(
     ty: Type,
-    pt: Zcu.PerThread,
     comptime strat: ResolveStratLazy,
-) SemaError!AbiAlignmentAdvanced {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
-    const child_type = ty.optionalChild(mod);
-
-    switch (child_type.zigTypeTag(mod)) {
+    zcu: *Zcu,
+    tid: strat.Tid(),
+) SemaError!AbiAlignmentInner {
+    const pt = strat.pt(zcu, tid);
+    const target = zcu.getTarget();
+    const child_type = ty.optionalChild(zcu);
+
+    switch (child_type.zigTypeTag(zcu)) {
         .Pointer => return .{ .scalar = ptrAbiAlignment(target) },
-        .ErrorSet => return Type.anyerror.abiAlignmentAdvanced(pt, strat),
+        .ErrorSet => return Type.anyerror.abiAlignmentInner(strat, zcu, tid),
         .NoReturn => return .{ .scalar = .@"1" },
         else => {},
     }
 
     switch (strat) {
         .eager, .sema => {
-            if (!(child_type.hasRuntimeBitsAdvanced(pt, false, strat) catch |err| switch (err) {
-                error.NeedLazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
-                    .ty = .comptime_int_type,
-                    .storage = .{ .lazy_align = ty.toIntern() },
-                } })) },
+            if (!(child_type.hasRuntimeBitsInner(false, strat, zcu, tid) catch |err| switch (err) {
+                error.NeedLazy => if (strat == .lazy) {
+                    return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
+                        .ty = .comptime_int_type,
+                        .storage = .{ .lazy_align = ty.toIntern() },
+                    } })) };
+                } else unreachable,
                 else => |e| return e,
             })) {
                 return .{ .scalar = .@"1" };
             }
-            return child_type.abiAlignmentAdvanced(pt, strat);
+            return child_type.abiAlignmentInner(strat, zcu, tid);
         },
-        .lazy => switch (try child_type.abiAlignmentAdvanced(pt, strat)) {
+        .lazy => switch (try child_type.abiAlignmentInner(strat, zcu, tid)) {
             .scalar => |x| return .{ .scalar = x.max(.@"1") },
             .val => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
                 .ty = .comptime_int_type,
@@ -1155,40 +1247,44 @@ fn abiAlignmentAdvancedOptional(
     }
 }
 
+const AbiSizeInner = union(enum) {
+    scalar: u64,
+    val: Value,
+};
+
+/// Asserts the type has the ABI size already resolved.
+/// Types that return false for hasRuntimeBits() return 0.
+pub fn abiSize(ty: Type, zcu: *Zcu) u64 {
+    return (abiSizeInner(ty, .eager, zcu, {}) catch unreachable).scalar;
+}
+
 /// May capture a reference to `ty`.
 pub fn lazyAbiSize(ty: Type, pt: Zcu.PerThread) !Value {
-    switch (try ty.abiSizeAdvanced(pt, .lazy)) {
+    switch (try ty.abiSizeInner(.lazy, pt.zcu, pt.tid)) {
         .val => |val| return val,
         .scalar => |x| return pt.intValue(Type.comptime_int, x),
     }
 }
 
-/// Asserts the type has the ABI size already resolved.
-/// Types that return false for hasRuntimeBits() return 0.
-pub fn abiSize(ty: Type, pt: Zcu.PerThread) u64 {
-    return (abiSizeAdvanced(ty, pt, .eager) catch unreachable).scalar;
+pub fn abiSizeSema(ty: Type, pt: Zcu.PerThread) SemaError!u64 {
+    return (try abiSizeInner(ty, .sema, pt.zcu, pt.tid)).scalar;
 }
 
-const AbiSizeAdvanced = union(enum) {
-    scalar: u64,
-    val: Value,
-};
-
 /// If you pass `eager` you will get back `scalar` and assert the type is resolved.
 /// In this case there will be no error, guaranteed.
 /// If you pass `lazy` you may get back `scalar` or `val`.
 /// If `val` is returned, a reference to `ty` has been captured.
 /// If you pass `sema` you will get back `scalar` and resolve the type if
 /// necessary, possibly returning a CompileError.
-pub fn abiSizeAdvanced(
+pub fn abiSizeInner(
     ty: Type,
-    pt: Zcu.PerThread,
     comptime strat: ResolveStratLazy,
-) SemaError!AbiSizeAdvanced {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
-    const use_llvm = mod.comp.config.use_llvm;
-    const ip = &mod.intern_pool;
+    zcu: *Zcu,
+    tid: strat.Tid(),
+) SemaError!AbiSizeInner {
+    const target = zcu.getTarget();
+    const use_llvm = zcu.comp.config.use_llvm;
+    const ip = &zcu.intern_pool;
 
     switch (ty.toIntern()) {
         .empty_struct_type => return .{ .scalar = 0 },
@@ -1207,14 +1303,17 @@ pub fn abiSizeAdvanced(
             .array_type => |array_type| {
                 const len = array_type.lenIncludingSentinel();
                 if (len == 0) return .{ .scalar = 0 };
-                switch (try Type.fromInterned(array_type.child).abiSizeAdvanced(pt, strat)) {
+                switch (try Type.fromInterned(array_type.child).abiSizeInner(strat, zcu, tid)) {
                     .scalar => |elem_size| return .{ .scalar = len * elem_size },
                     .val => switch (strat) {
                         .sema, .eager => unreachable,
-                        .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
-                            .ty = .comptime_int_type,
-                            .storage = .{ .lazy_size = ty.toIntern() },
-                        } })) },
+                        .lazy => {
+                            const pt = strat.pt(zcu, tid);
+                            return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
+                                .ty = .comptime_int_type,
+                                .storage = .{ .lazy_size = ty.toIntern() },
+                            } })) };
+                        },
                     },
                 }
             },
@@ -1222,41 +1321,38 @@ pub fn abiSizeAdvanced(
                 const sub_strat: ResolveStrat = switch (strat) {
                     .sema => .sema,
                     .eager => .normal,
-                    .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
-                        .ty = .comptime_int_type,
-                        .storage = .{ .lazy_size = ty.toIntern() },
-                    } })) },
-                };
-                const alignment = switch (try ty.abiAlignmentAdvanced(pt, strat)) {
-                    .scalar => |x| x,
-                    .val => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
-                        .ty = .comptime_int_type,
-                        .storage = .{ .lazy_size = ty.toIntern() },
-                    } })) },
+                    .lazy => {
+                        const pt = strat.pt(zcu, tid);
+                        return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
+                            .ty = .comptime_int_type,
+                            .storage = .{ .lazy_size = ty.toIntern() },
+                        } })) };
+                    },
                 };
-                const total_bytes = switch (mod.comp.getZigBackend()) {
+                const alignment = (try ty.abiAlignmentInner(strat, zcu, tid)).scalar;
+                const total_bytes = switch (zcu.comp.getZigBackend()) {
                     else => total_bytes: {
-                        const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(pt, sub_strat);
+                        const elem_bits = try Type.fromInterned(vector_type.child).bitSizeInner(sub_strat, zcu, tid);
                         const total_bits = elem_bits * vector_type.len;
                         break :total_bytes (total_bits + 7) / 8;
                     },
                     .stage2_c => total_bytes: {
-                        const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(pt, strat)).scalar);
+                        const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeInner(strat, zcu, tid)).scalar);
                         break :total_bytes elem_bytes * vector_type.len;
                     },
                     .stage2_x86_64 => total_bytes: {
                         if (vector_type.child == .bool_type) break :total_bytes std.math.divCeil(u32, vector_type.len, 8) catch unreachable;
-                        const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(pt, strat)).scalar);
+                        const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeInner(strat, zcu, tid)).scalar);
                         break :total_bytes elem_bytes * vector_type.len;
                     },
                 };
                 return .{ .scalar = alignment.forward(total_bytes) };
             },
 
-            .opt_type => return ty.abiSizeAdvancedOptional(pt, strat),
+            .opt_type => return ty.abiSizeInnerOptional(strat, zcu, tid),
 
             .error_set_type, .inferred_error_set_type => {
-                const bits = mod.errorSetBits();
+                const bits = zcu.errorSetBits();
                 if (bits == 0) return .{ .scalar = 0 };
                 return .{ .scalar = intAbiSize(bits, target, use_llvm) };
             },
@@ -1264,29 +1360,35 @@ pub fn abiSizeAdvanced(
             .error_union_type => |error_union_type| {
                 const payload_ty = Type.fromInterned(error_union_type.payload_type);
                 // This code needs to be kept in sync with the equivalent switch prong
-                // in abiAlignmentAdvanced.
-                const code_size = Type.anyerror.abiSize(pt);
-                if (!(payload_ty.hasRuntimeBitsAdvanced(pt, false, strat) catch |err| switch (err) {
-                    error.NeedLazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
-                        .ty = .comptime_int_type,
-                        .storage = .{ .lazy_size = ty.toIntern() },
-                    } })) },
+                // in abiAlignmentInner.
+                const code_size = Type.anyerror.abiSize(zcu);
+                if (!(payload_ty.hasRuntimeBitsInner(false, strat, zcu, tid) catch |err| switch (err) {
+                    error.NeedLazy => if (strat == .lazy) {
+                        const pt = strat.pt(zcu, tid);
+                        return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
+                            .ty = .comptime_int_type,
+                            .storage = .{ .lazy_size = ty.toIntern() },
+                        } })) };
+                    } else unreachable,
                     else => |e| return e,
                 })) {
                     // Same as anyerror.
                     return .{ .scalar = code_size };
                 }
-                const code_align = Type.anyerror.abiAlignment(pt);
-                const payload_align = payload_ty.abiAlignment(pt);
-                const payload_size = switch (try payload_ty.abiSizeAdvanced(pt, strat)) {
+                const code_align = Type.anyerror.abiAlignment(zcu);
+                const payload_align = (try payload_ty.abiAlignmentInner(strat, zcu, tid)).scalar;
+                const payload_size = switch (try payload_ty.abiSizeInner(strat, zcu, tid)) {
                     .scalar => |elem_size| elem_size,
                     .val => switch (strat) {
                         .sema => unreachable,
                         .eager => unreachable,
-                        .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
-                            .ty = .comptime_int_type,
-                            .storage = .{ .lazy_size = ty.toIntern() },
-                        } })) },
+                        .lazy => {
+                            const pt = strat.pt(zcu, tid);
+                            return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
+                                .ty = .comptime_int_type,
+                                .storage = .{ .lazy_size = ty.toIntern() },
+                            } })) };
+                        },
                     },
                 };
 
@@ -1314,7 +1416,7 @@ pub fn abiSizeAdvanced(
                 .f128 => return .{ .scalar = 16 },
                 .f80 => switch (target.cTypeBitSize(.longdouble)) {
                     80 => return .{ .scalar = target.cTypeByteSize(.longdouble) },
-                    else => return .{ .scalar = Type.u80.abiSize(pt) },
+                    else => return .{ .scalar = Type.u80.abiSize(zcu) },
                 },
 
                 .usize,
@@ -1343,7 +1445,7 @@ pub fn abiSizeAdvanced(
                 => return .{ .scalar = 0 },
 
                 .anyerror, .adhoc_inferred_error_set => {
-                    const bits = mod.errorSetBits();
+                    const bits = zcu.errorSetBits();
                     if (bits == 0) return .{ .scalar = 0 };
                     return .{ .scalar = intAbiSize(bits, target, use_llvm) };
                 },
@@ -1354,30 +1456,33 @@ pub fn abiSizeAdvanced(
             .struct_type => {
                 const struct_type = ip.loadStructType(ty.toIntern());
                 switch (strat) {
-                    .sema => try ty.resolveLayout(pt),
-                    .lazy => switch (struct_type.layout) {
-                        .@"packed" => {
-                            if (struct_type.backingIntTypeUnordered(ip) == .none) return .{
-                                .val = Value.fromInterned(try pt.intern(.{ .int = .{
-                                    .ty = .comptime_int_type,
-                                    .storage = .{ .lazy_size = ty.toIntern() },
-                                } })),
-                            };
-                        },
-                        .auto, .@"extern" => {
-                            if (!struct_type.haveLayout(ip)) return .{
-                                .val = Value.fromInterned(try pt.intern(.{ .int = .{
-                                    .ty = .comptime_int_type,
-                                    .storage = .{ .lazy_size = ty.toIntern() },
-                                } })),
-                            };
-                        },
+                    .sema => try ty.resolveLayout(strat.pt(zcu, tid)),
+                    .lazy => {
+                        const pt = strat.pt(zcu, tid);
+                        switch (struct_type.layout) {
+                            .@"packed" => {
+                                if (struct_type.backingIntTypeUnordered(ip) == .none) return .{
+                                    .val = Value.fromInterned(try pt.intern(.{ .int = .{
+                                        .ty = .comptime_int_type,
+                                        .storage = .{ .lazy_size = ty.toIntern() },
+                                    } })),
+                                };
+                            },
+                            .auto, .@"extern" => {
+                                if (!struct_type.haveLayout(ip)) return .{
+                                    .val = Value.fromInterned(try pt.intern(.{ .int = .{
+                                        .ty = .comptime_int_type,
+                                        .storage = .{ .lazy_size = ty.toIntern() },
+                                    } })),
+                                };
+                            },
+                        }
                     },
                     .eager => {},
                 }
                 switch (struct_type.layout) {
                     .@"packed" => return .{
-                        .scalar = Type.fromInterned(struct_type.backingIntTypeUnordered(ip)).abiSize(pt),
+                        .scalar = Type.fromInterned(struct_type.backingIntTypeUnordered(ip)).abiSize(zcu),
                     },
                     .auto, .@"extern" => {
                         assert(struct_type.haveLayout(ip));
@@ -1387,25 +1492,28 @@ pub fn abiSizeAdvanced(
             },
             .anon_struct_type => |tuple| {
                 switch (strat) {
-                    .sema => try ty.resolveLayout(pt),
+                    .sema => try ty.resolveLayout(strat.pt(zcu, tid)),
                     .lazy, .eager => {},
                 }
                 const field_count = tuple.types.len;
                 if (field_count == 0) {
                     return .{ .scalar = 0 };
                 }
-                return .{ .scalar = ty.structFieldOffset(field_count, pt) };
+                return .{ .scalar = ty.structFieldOffset(field_count, zcu) };
             },
 
             .union_type => {
                 const union_type = ip.loadUnionType(ty.toIntern());
                 switch (strat) {
-                    .sema => try ty.resolveLayout(pt),
-                    .lazy => if (!union_type.flagsUnordered(ip).status.haveLayout()) return .{
-                        .val = Value.fromInterned(try pt.intern(.{ .int = .{
-                            .ty = .comptime_int_type,
-                            .storage = .{ .lazy_size = ty.toIntern() },
-                        } })),
+                    .sema => try ty.resolveLayout(strat.pt(zcu, tid)),
+                    .lazy => {
+                        const pt = strat.pt(zcu, tid);
+                        if (!union_type.flagsUnordered(ip).status.haveLayout()) return .{
+                            .val = Value.fromInterned(try pt.intern(.{ .int = .{
+                                .ty = .comptime_int_type,
+                                .storage = .{ .lazy_size = ty.toIntern() },
+                            } })),
+                        };
                     },
                     .eager => {},
                 }
@@ -1414,7 +1522,7 @@ pub fn abiSizeAdvanced(
                 return .{ .scalar = union_type.sizeUnordered(ip) };
             },
             .opaque_type => unreachable, // no size available
-            .enum_type => return .{ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiSize(pt) },
+            .enum_type => return .{ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiSize(zcu) },
 
             // values, not types
             .undef,
@@ -1441,36 +1549,39 @@ pub fn abiSizeAdvanced(
     }
 }
 
-fn abiSizeAdvancedOptional(
+fn abiSizeInnerOptional(
     ty: Type,
-    pt: Zcu.PerThread,
     comptime strat: ResolveStratLazy,
-) SemaError!AbiSizeAdvanced {
-    const mod = pt.zcu;
-    const child_ty = ty.optionalChild(mod);
+    zcu: *Zcu,
+    tid: strat.Tid(),
+) SemaError!AbiSizeInner {
+    const child_ty = ty.optionalChild(zcu);
 
-    if (child_ty.isNoReturn(mod)) {
+    if (child_ty.isNoReturn(zcu)) {
         return .{ .scalar = 0 };
     }
 
-    if (!(child_ty.hasRuntimeBitsAdvanced(pt, false, strat) catch |err| switch (err) {
-        error.NeedLazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
-            .ty = .comptime_int_type,
-            .storage = .{ .lazy_size = ty.toIntern() },
-        } })) },
+    if (!(child_ty.hasRuntimeBitsInner(false, strat, zcu, tid) catch |err| switch (err) {
+        error.NeedLazy => if (strat == .lazy) {
+            const pt = strat.pt(zcu, tid);
+            return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
+                .ty = .comptime_int_type,
+                .storage = .{ .lazy_size = ty.toIntern() },
+            } })) };
+        } else unreachable,
         else => |e| return e,
     })) return .{ .scalar = 1 };
 
-    if (ty.optionalReprIsPayload(mod)) {
-        return child_ty.abiSizeAdvanced(pt, strat);
+    if (ty.optionalReprIsPayload(zcu)) {
+        return child_ty.abiSizeInner(strat, zcu, tid);
     }
 
-    const payload_size = switch (try child_ty.abiSizeAdvanced(pt, strat)) {
+    const payload_size = switch (try child_ty.abiSizeInner(strat, zcu, tid)) {
         .scalar => |elem_size| elem_size,
         .val => switch (strat) {
             .sema => unreachable,
             .eager => unreachable,
-            .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
+            .lazy => return .{ .val = Value.fromInterned(try strat.pt(zcu, tid).intern(.{ .int = .{
                 .ty = .comptime_int_type,
                 .storage = .{ .lazy_size = ty.toIntern() },
             } })) },
@@ -1482,7 +1593,7 @@ fn abiSizeAdvancedOptional(
     // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal
     // to the child type's ABI alignment.
     return .{
-        .scalar = (child_ty.abiAlignment(pt).toByteUnits() orelse 0) + payload_size,
+        .scalar = (child_ty.abiAlignment(zcu).toByteUnits() orelse 0) + payload_size,
     };
 }
 
@@ -1600,18 +1711,22 @@ pub fn maxIntAlignment(target: std.Target, use_llvm: bool) u16 {
     };
 }
 
-pub fn bitSize(ty: Type, pt: Zcu.PerThread) u64 {
-    return bitSizeAdvanced(ty, pt, .normal) catch unreachable;
+pub fn bitSize(ty: Type, zcu: *Zcu) u64 {
+    return bitSizeInner(ty, .normal, zcu, {}) catch unreachable;
+}
+
+pub fn bitSizeSema(ty: Type, pt: Zcu.PerThread) SemaError!u64 {
+    return bitSizeInner(ty, .sema, pt.zcu, pt.tid);
 }
 
-pub fn bitSizeAdvanced(
+pub fn bitSizeInner(
     ty: Type,
-    pt: Zcu.PerThread,
     comptime strat: ResolveStrat,
+    zcu: *Zcu,
+    tid: strat.Tid(),
 ) SemaError!u64 {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
-    const ip = &mod.intern_pool;
+    const target = zcu.getTarget();
+    const ip = &zcu.intern_pool;
 
     const strat_lazy: ResolveStratLazy = strat.toLazy();
 
@@ -1628,30 +1743,30 @@ pub fn bitSizeAdvanced(
             if (len == 0) return 0;
             const elem_ty = Type.fromInterned(array_type.child);
             const elem_size = @max(
-                (try elem_ty.abiAlignmentAdvanced(pt, strat_lazy)).scalar.toByteUnits() orelse 0,
-                (try elem_ty.abiSizeAdvanced(pt, strat_lazy)).scalar,
+                (try elem_ty.abiAlignmentInner(strat_lazy, zcu, tid)).scalar.toByteUnits() orelse 0,
+                (try elem_ty.abiSizeInner(strat_lazy, zcu, tid)).scalar,
             );
             if (elem_size == 0) return 0;
-            const elem_bit_size = try elem_ty.bitSizeAdvanced(pt, strat);
+            const elem_bit_size = try elem_ty.bitSizeInner(strat, zcu, tid);
             return (len - 1) * 8 * elem_size + elem_bit_size;
         },
         .vector_type => |vector_type| {
             const child_ty = Type.fromInterned(vector_type.child);
-            const elem_bit_size = try child_ty.bitSizeAdvanced(pt, strat);
+            const elem_bit_size = try child_ty.bitSizeInner(strat, zcu, tid);
             return elem_bit_size * vector_type.len;
         },
         .opt_type => {
             // Optionals and error unions are not packed so their bitsize
             // includes padding bits.
-            return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8;
+            return (try ty.abiSizeInner(strat_lazy, zcu, tid)).scalar * 8;
         },
 
-        .error_set_type, .inferred_error_set_type => return mod.errorSetBits(),
+        .error_set_type, .inferred_error_set_type => return zcu.errorSetBits(),
 
         .error_union_type => {
             // Optionals and error unions are not packed so their bitsize
             // includes padding bits.
-            return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8;
+            return (try ty.abiSizeInner(strat_lazy, zcu, tid)).scalar * 8;
         },
         .func_type => unreachable, // represents machine code; not a pointer
         .simple_type => |t| switch (t) {
@@ -1681,7 +1796,7 @@ pub fn bitSizeAdvanced(
 
             .anyerror,
             .adhoc_inferred_error_set,
-            => return mod.errorSetBits(),
+            => return zcu.errorSetBits(),
 
             .anyopaque => unreachable,
             .type => unreachable,
@@ -1697,42 +1812,46 @@ pub fn bitSizeAdvanced(
             const struct_type = ip.loadStructType(ty.toIntern());
             const is_packed = struct_type.layout == .@"packed";
             if (strat == .sema) {
+                const pt = strat.pt(zcu, tid);
                 try ty.resolveFields(pt);
                 if (is_packed) try ty.resolveLayout(pt);
             }
             if (is_packed) {
-                return try Type.fromInterned(struct_type.backingIntTypeUnordered(ip)).bitSizeAdvanced(pt, strat);
+                return try Type.fromInterned(struct_type.backingIntTypeUnordered(ip))
+                    .bitSizeInner(strat, zcu, tid);
             }
-            return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8;
+            return (try ty.abiSizeInner(strat_lazy, zcu, tid)).scalar * 8;
         },
 
         .anon_struct_type => {
-            if (strat == .sema) try ty.resolveFields(pt);
-            return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8;
+            if (strat == .sema) try ty.resolveFields(strat.pt(zcu, tid));
+            return (try ty.abiSizeInner(strat_lazy, zcu, tid)).scalar * 8;
         },
 
         .union_type => {
             const union_type = ip.loadUnionType(ty.toIntern());
-            const is_packed = ty.containerLayout(mod) == .@"packed";
+            const is_packed = ty.containerLayout(zcu) == .@"packed";
             if (strat == .sema) {
+                const pt = strat.pt(zcu, tid);
                 try ty.resolveFields(pt);
                 if (is_packed) try ty.resolveLayout(pt);
             }
             if (!is_packed) {
-                return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8;
+                return (try ty.abiSizeInner(strat_lazy, zcu, tid)).scalar * 8;
             }
             assert(union_type.flagsUnordered(ip).status.haveFieldTypes());
 
             var size: u64 = 0;
             for (0..union_type.field_types.len) |field_index| {
                 const field_ty = union_type.field_types.get(ip)[field_index];
-                size = @max(size, try Type.fromInterned(field_ty).bitSizeAdvanced(pt, strat));
+                size = @max(size, try Type.fromInterned(field_ty).bitSizeInner(strat, zcu, tid));
             }
 
             return size;
         },
         .opaque_type => unreachable,
-        .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).bitSizeAdvanced(pt, strat),
+        .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty)
+            .bitSizeInner(strat, zcu, tid),
 
         // values, not types
         .undef,
@@ -1760,61 +1879,61 @@ pub fn bitSizeAdvanced(
 
 /// Returns true if the type's layout is already resolved and it is safe
 /// to use `abiSize`, `abiAlignment` and `bitSize` on it.
-pub fn layoutIsResolved(ty: Type, mod: *Module) bool {
-    const ip = &mod.intern_pool;
+pub fn layoutIsResolved(ty: Type, zcu: *const Zcu) bool {
+    const ip = &zcu.intern_pool;
     return switch (ip.indexToKey(ty.toIntern())) {
         .struct_type => ip.loadStructType(ty.toIntern()).haveLayout(ip),
         .union_type => ip.loadUnionType(ty.toIntern()).haveLayout(ip),
         .array_type => |array_type| {
             if (array_type.lenIncludingSentinel() == 0) return true;
-            return Type.fromInterned(array_type.child).layoutIsResolved(mod);
+            return Type.fromInterned(array_type.child).layoutIsResolved(zcu);
         },
-        .opt_type => |child| Type.fromInterned(child).layoutIsResolved(mod),
-        .error_union_type => |k| Type.fromInterned(k.payload_type).layoutIsResolved(mod),
+        .opt_type => |child| Type.fromInterned(child).layoutIsResolved(zcu),
+        .error_union_type => |k| Type.fromInterned(k.payload_type).layoutIsResolved(zcu),
         else => true,
     };
 }
 
-pub fn isSinglePointer(ty: Type, mod: *const Module) bool {
-    return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+pub fn isSinglePointer(ty: Type, zcu: *const Zcu) bool {
+    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
         .ptr_type => |ptr_info| ptr_info.flags.size == .One,
         else => false,
     };
 }
 
 /// Asserts `ty` is a pointer.
-pub fn ptrSize(ty: Type, mod: *const Module) std.builtin.Type.Pointer.Size {
-    return ty.ptrSizeOrNull(mod).?;
+pub fn ptrSize(ty: Type, zcu: *const Zcu) std.builtin.Type.Pointer.Size {
+    return ty.ptrSizeOrNull(zcu).?;
 }
 
 /// Returns `null` if `ty` is not a pointer.
-pub fn ptrSizeOrNull(ty: Type, mod: *const Module) ?std.builtin.Type.Pointer.Size {
-    return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+pub fn ptrSizeOrNull(ty: Type, zcu: *const Zcu) ?std.builtin.Type.Pointer.Size {
+    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
         .ptr_type => |ptr_info| ptr_info.flags.size,
         else => null,
     };
 }
 
-pub fn isSlice(ty: Type, mod: *const Module) bool {
-    return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+pub fn isSlice(ty: Type, zcu: *const Zcu) bool {
+    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
         .ptr_type => |ptr_type| ptr_type.flags.size == .Slice,
         else => false,
     };
 }
 
-pub fn slicePtrFieldType(ty: Type, mod: *const Module) Type {
-    return Type.fromInterned(mod.intern_pool.slicePtrType(ty.toIntern()));
+pub fn slicePtrFieldType(ty: Type, zcu: *const Zcu) Type {
+    return Type.fromInterned(zcu.intern_pool.slicePtrType(ty.toIntern()));
 }
 
-pub fn isConstPtr(ty: Type, mod: *const Module) bool {
-    return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+pub fn isConstPtr(ty: Type, zcu: *const Zcu) bool {
+    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
         .ptr_type => |ptr_type| ptr_type.flags.is_const,
         else => false,
     };
 }
 
-pub fn isVolatilePtr(ty: Type, mod: *const Module) bool {
-    return isVolatilePtrIp(ty, &mod.intern_pool);
+pub fn isVolatilePtr(ty: Type, zcu: *const Zcu) bool {
+    return isVolatilePtrIp(ty, &zcu.intern_pool);
 }
 
 pub fn isVolatilePtrIp(ty: Type, ip: *const InternPool) bool {
@@ -1824,28 +1943,28 @@ pub fn isVolatilePtrIp(ty: Type, ip: *const InternPool) bool {
     };
 }
 
-pub fn isAllowzeroPtr(ty: Type, mod: *const Module) bool {
-    return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+pub fn isAllowzeroPtr(ty: Type, zcu: *const Zcu) bool {
+    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
         .ptr_type => |ptr_type| ptr_type.flags.is_allowzero,
         .opt_type => true,
         else => false,
     };
 }
 
-pub fn isCPtr(ty: Type, mod: *const Module) bool {
-    return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+pub fn isCPtr(ty: Type, zcu: *const Zcu) bool {
+    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
         .ptr_type => |ptr_type| ptr_type.flags.size == .C,
         else => false,
     };
 }
 
-pub fn isPtrAtRuntime(ty: Type, mod: *const Module) bool {
-    return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+pub fn isPtrAtRuntime(ty: Type, zcu: *const Zcu) bool {
+    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
         .ptr_type => |ptr_type| switch (ptr_type.flags.size) {
             .Slice => false,
             .One, .Many, .C => true,
         },
-        .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) {
+        .opt_type => |child| switch (zcu.intern_pool.indexToKey(child)) {
             .ptr_type => |p| switch (p.flags.size) {
                 .Slice, .C => false,
                 .Many, .One => !p.flags.is_allowzero,
@@ -1858,17 +1977,17 @@ pub fn isPtrAtRuntime(ty: Type, mod: *const Module) bool {
 
 /// For pointer-like optionals, returns true, otherwise returns the allowzero property
 /// of pointers.
-pub fn ptrAllowsZero(ty: Type, mod: *const Module) bool {
-    if (ty.isPtrLikeOptional(mod)) {
+pub fn ptrAllowsZero(ty: Type, zcu: *const Zcu) bool {
+    if (ty.isPtrLikeOptional(zcu)) {
         return true;
     }
-    return ty.ptrInfo(mod).flags.is_allowzero;
+    return ty.ptrInfo(zcu).flags.is_allowzero;
 }
 
 /// See also `isPtrLikeOptional`.
-pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool {
-    return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
-        .opt_type => |child_type| child_type == .anyerror_type or switch (mod.intern_pool.indexToKey(child_type)) {
+pub fn optionalReprIsPayload(ty: Type, zcu: *const Zcu) bool {
+    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
+        .opt_type => |child_type| child_type == .anyerror_type or switch (zcu.intern_pool.indexToKey(child_type)) {
             .ptr_type => |ptr_type| ptr_type.flags.size != .C and !ptr_type.flags.is_allowzero,
             .error_set_type, .inferred_error_set_type => true,
             else => false,
@@ -1881,10 +2000,10 @@ pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool {
 /// Returns true if the type is optional and would be lowered to a single pointer
 /// address value, using 0 for null. Note that this returns true for C pointers.
 /// This function must be kept in sync with `Sema.typePtrOrOptionalPtrTy`.
-pub fn isPtrLikeOptional(ty: Type, mod: *const Module) bool {
-    return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+pub fn isPtrLikeOptional(ty: Type, zcu: *const Zcu) bool {
+    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
         .ptr_type => |ptr_type| ptr_type.flags.size == .C,
-        .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) {
+        .opt_type => |child| switch (zcu.intern_pool.indexToKey(child)) {
             .ptr_type => |ptr_type| switch (ptr_type.flags.size) {
                 .Slice, .C => false,
                 .Many, .One => !ptr_type.flags.is_allowzero,
@@ -1898,8 +2017,8 @@ pub fn isPtrLikeOptional(ty: Type, mod: *const Module) bool {
 /// For *[N]T,  returns [N]T.
 /// For *T,     returns T.
 /// For [*]T,   returns T.
-pub fn childType(ty: Type, mod: *const Module) Type {
-    return childTypeIp(ty, &mod.intern_pool);
+pub fn childType(ty: Type, zcu: *const Zcu) Type {
+    return childTypeIp(ty, &zcu.intern_pool);
 }
 
 pub fn childTypeIp(ty: Type, ip: *const InternPool) Type {
@@ -1915,10 +2034,10 @@ pub fn childTypeIp(ty: Type, ip: *const InternPool) Type {
 /// For [N]T,        returns T.
 /// For []T,         returns T.
 /// For anyframe->T, returns T.
-pub fn elemType2(ty: Type, mod: *const Module) Type {
-    return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+pub fn elemType2(ty: Type, zcu: *const Zcu) Type {
+    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
         .ptr_type => |ptr_type| switch (ptr_type.flags.size) {
-            .One => Type.fromInterned(ptr_type.child).shallowElemType(mod),
+            .One => Type.fromInterned(ptr_type.child).shallowElemType(zcu),
             .Many, .C, .Slice => Type.fromInterned(ptr_type.child),
         },
         .anyframe_type => |child| {
@@ -1927,30 +2046,30 @@ pub fn elemType2(ty: Type, mod: *const Module) Type {
         },
         .vector_type => |vector_type| Type.fromInterned(vector_type.child),
         .array_type => |array_type| Type.fromInterned(array_type.child),
-        .opt_type => |child| Type.fromInterned(mod.intern_pool.childType(child)),
+        .opt_type => |child| Type.fromInterned(zcu.intern_pool.childType(child)),
         else => unreachable,
     };
 }
 
-fn shallowElemType(child_ty: Type, mod: *const Module) Type {
-    return switch (child_ty.zigTypeTag(mod)) {
-        .Array, .Vector => child_ty.childType(mod),
+fn shallowElemType(child_ty: Type, zcu: *const Zcu) Type {
+    return switch (child_ty.zigTypeTag(zcu)) {
+        .Array, .Vector => child_ty.childType(zcu),
         else => child_ty,
     };
 }
 
 /// For vectors, returns the element type. Otherwise returns self.
-pub fn scalarType(ty: Type, mod: *Module) Type {
-    return switch (ty.zigTypeTag(mod)) {
-        .Vector => ty.childType(mod),
+pub fn scalarType(ty: Type, zcu: *const Zcu) Type {
+    return switch (ty.zigTypeTag(zcu)) {
+        .Vector => ty.childType(zcu),
         else => ty,
     };
 }
 
 /// Asserts that the type is an optional.
 /// Note that for C pointers this returns the type unmodified.
-pub fn optionalChild(ty: Type, mod: *const Module) Type {
-    return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+pub fn optionalChild(ty: Type, zcu: *const Zcu) Type {
+    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
         .opt_type => |child| Type.fromInterned(child),
         .ptr_type => |ptr_type| b: {
             assert(ptr_type.flags.size == .C);
@@ -1962,8 +2081,8 @@ pub fn optionalChild(ty: Type, mod: *const Module) Type {
 
 /// Returns the tag type of a union, if the type is a union and it has a tag type.
 /// Otherwise, returns `null`.
-pub fn unionTagType(ty: Type, mod: *Module) ?Type {
-    const ip = &mod.intern_pool;
+pub fn unionTagType(ty: Type, zcu: *const Zcu) ?Type {
+    const ip = &zcu.intern_pool;
     switch (ip.indexToKey(ty.toIntern())) {
         .union_type => {},
         else => return null,
@@ -1981,8 +2100,8 @@ pub fn unionTagType(ty: Type, mod: *Module) ?Type {
 
 /// Same as `unionTagType` but includes safety tag.
 /// Codegen should use this version.
-pub fn unionTagTypeSafety(ty: Type, mod: *Module) ?Type {
-    const ip = &mod.intern_pool;
+pub fn unionTagTypeSafety(ty: Type, zcu: *const Zcu) ?Type {
+    const ip = &zcu.intern_pool;
     return switch (ip.indexToKey(ty.toIntern())) {
         .union_type => {
             const union_type = ip.loadUnionType(ty.toIntern());
@@ -1996,35 +2115,35 @@ pub fn unionTagTypeSafety(ty: Type, mod: *Module) ?Type {
 
 /// Asserts the type is a union; returns the tag type, even if the tag will
 /// not be stored at runtime.
-pub fn unionTagTypeHypothetical(ty: Type, mod: *Module) Type {
-    const union_obj = mod.typeToUnion(ty).?;
+pub fn unionTagTypeHypothetical(ty: Type, zcu: *const Zcu) Type {
+    const union_obj = zcu.typeToUnion(ty).?;
     return Type.fromInterned(union_obj.enum_tag_ty);
 }
 
-pub fn unionFieldType(ty: Type, enum_tag: Value, mod: *Module) ?Type {
-    const ip = &mod.intern_pool;
-    const union_obj = mod.typeToUnion(ty).?;
+pub fn unionFieldType(ty: Type, enum_tag: Value, zcu: *const Zcu) ?Type {
+    const ip = &zcu.intern_pool;
+    const union_obj = zcu.typeToUnion(ty).?;
     const union_fields = union_obj.field_types.get(ip);
-    const index = mod.unionTagFieldIndex(union_obj, enum_tag) orelse return null;
+    const index = zcu.unionTagFieldIndex(union_obj, enum_tag) orelse return null;
     return Type.fromInterned(union_fields[index]);
 }
 
-pub fn unionFieldTypeByIndex(ty: Type, index: usize, mod: *Module) Type {
-    const ip = &mod.intern_pool;
-    const union_obj = mod.typeToUnion(ty).?;
+pub fn unionFieldTypeByIndex(ty: Type, index: usize, zcu: *const Zcu) Type {
+    const ip = &zcu.intern_pool;
+    const union_obj = zcu.typeToUnion(ty).?;
     return Type.fromInterned(union_obj.field_types.get(ip)[index]);
 }
 
-pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 {
-    const union_obj = mod.typeToUnion(ty).?;
-    return mod.unionTagFieldIndex(union_obj, enum_tag);
+pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, zcu: *const Zcu) ?u32 {
+    const union_obj = zcu.typeToUnion(ty).?;
+    return zcu.unionTagFieldIndex(union_obj, enum_tag);
 }
 
-pub fn unionHasAllZeroBitFieldTypes(ty: Type, pt: Zcu.PerThread) bool {
-    const ip = &pt.zcu.intern_pool;
-    const union_obj = pt.zcu.typeToUnion(ty).?;
+pub fn unionHasAllZeroBitFieldTypes(ty: Type, zcu: *Zcu) bool {
+    const ip = &zcu.intern_pool;
+    const union_obj = zcu.typeToUnion(ty).?;
     for (union_obj.field_types.get(ip)) |field_ty| {
-        if (Type.fromInterned(field_ty).hasRuntimeBits(pt)) return false;
+        if (Type.fromInterned(field_ty).hasRuntimeBits(zcu)) return false;
     }
     return true;
 }
@@ -2032,20 +2151,21 @@ pub fn unionHasAllZeroBitFieldTypes(ty: Type, pt: Zcu.PerThread) bool {
 /// Returns the type used for backing storage of this union during comptime operations.
 /// Asserts the type is either an extern or packed union.
 pub fn unionBackingType(ty: Type, pt: Zcu.PerThread) !Type {
-    return switch (ty.containerLayout(pt.zcu)) {
-        .@"extern" => try pt.arrayType(.{ .len = ty.abiSize(pt), .child = .u8_type }),
-        .@"packed" => try pt.intType(.unsigned, @intCast(ty.bitSize(pt))),
+    const zcu = pt.zcu;
+    return switch (ty.containerLayout(zcu)) {
+        .@"extern" => try pt.arrayType(.{ .len = ty.abiSize(zcu), .child = .u8_type }),
+        .@"packed" => try pt.intType(.unsigned, @intCast(ty.bitSize(zcu))),
         .auto => unreachable,
     };
 }
 
-pub fn unionGetLayout(ty: Type, pt: Zcu.PerThread) Module.UnionLayout {
-    const union_obj = pt.zcu.intern_pool.loadUnionType(ty.toIntern());
-    return pt.getUnionLayout(union_obj);
+pub fn unionGetLayout(ty: Type, zcu: *Zcu) Zcu.UnionLayout {
+    const union_obj = zcu.intern_pool.loadUnionType(ty.toIntern());
+    return Type.getUnionLayout(union_obj, zcu);
 }
 
-pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout {
-    const ip = &mod.intern_pool;
+pub fn containerLayout(ty: Type, zcu: *const Zcu) std.builtin.Type.ContainerLayout {
+    const ip = &zcu.intern_pool;
     return switch (ip.indexToKey(ty.toIntern())) {
         .struct_type => ip.loadStructType(ty.toIntern()).layout,
         .anon_struct_type => .auto,
@@ -2055,18 +2175,18 @@ pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout
 }
 
 /// Asserts that the type is an error union.
-pub fn errorUnionPayload(ty: Type, mod: *Module) Type {
-    return Type.fromInterned(mod.intern_pool.indexToKey(ty.toIntern()).error_union_type.payload_type);
+pub fn errorUnionPayload(ty: Type, zcu: *const Zcu) Type {
+    return Type.fromInterned(zcu.intern_pool.indexToKey(ty.toIntern()).error_union_type.payload_type);
 }
 
 /// Asserts that the type is an error union.
-pub fn errorUnionSet(ty: Type, mod: *Module) Type {
-    return Type.fromInterned(mod.intern_pool.errorUnionSet(ty.toIntern()));
+pub fn errorUnionSet(ty: Type, zcu: *const Zcu) Type {
+    return Type.fromInterned(zcu.intern_pool.errorUnionSet(ty.toIntern()));
 }
 
 /// Returns false for unresolved inferred error sets.
-pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool {
-    const ip = &mod.intern_pool;
+pub fn errorSetIsEmpty(ty: Type, zcu: *const Zcu) bool {
+    const ip = &zcu.intern_pool;
     return switch (ty.toIntern()) {
         .anyerror_type, .adhoc_inferred_error_set_type => false,
         else => switch (ip.indexToKey(ty.toIntern())) {
@@ -2083,20 +2203,20 @@ pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool {
 /// Returns true if it is an error set that includes anyerror, false otherwise.
 /// Note that the result may be a false negative if the type did not get error set
 /// resolution prior to this call.
-pub fn isAnyError(ty: Type, mod: *Module) bool {
-    const ip = &mod.intern_pool;
+pub fn isAnyError(ty: Type, zcu: *const Zcu) bool {
+    const ip = &zcu.intern_pool;
     return switch (ty.toIntern()) {
         .anyerror_type => true,
         .adhoc_inferred_error_set_type => false,
-        else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+        else => switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
             .inferred_error_set_type => |i| ip.funcIesResolvedUnordered(i) == .anyerror_type,
             else => false,
         },
     };
 }
 
-pub fn isError(ty: Type, mod: *const Module) bool {
-    return switch (ty.zigTypeTag(mod)) {
+pub fn isError(ty: Type, zcu: *const Zcu) bool {
+    return switch (ty.zigTypeTag(zcu)) {
         .ErrorUnion, .ErrorSet => true,
         else => false,
     };
@@ -2127,8 +2247,8 @@ pub fn errorSetHasFieldIp(
 /// Returns whether ty, which must be an error set, includes an error `name`.
 /// Might return a false negative if `ty` is an inferred error set and not fully
 /// resolved yet.
-pub fn errorSetHasField(ty: Type, name: []const u8, mod: *Module) bool {
-    const ip = &mod.intern_pool;
+pub fn errorSetHasField(ty: Type, name: []const u8, zcu: *const Zcu) bool {
+    const ip = &zcu.intern_pool;
     return switch (ty.toIntern()) {
         .anyerror_type => true,
         else => switch (ip.indexToKey(ty.toIntern())) {
@@ -2152,20 +2272,20 @@ pub fn errorSetHasField(ty: Type, name: []const u8, mod: *Module) bool {
 }
 
 /// Asserts the type is an array or vector or struct.
-pub fn arrayLen(ty: Type, mod: *const Module) u64 {
-    return ty.arrayLenIp(&mod.intern_pool);
+pub fn arrayLen(ty: Type, zcu: *const Zcu) u64 {
+    return ty.arrayLenIp(&zcu.intern_pool);
 }
 
 pub fn arrayLenIp(ty: Type, ip: *const InternPool) u64 {
     return ip.aggregateTypeLen(ty.toIntern());
 }
 
-pub fn arrayLenIncludingSentinel(ty: Type, mod: *const Module) u64 {
-    return mod.intern_pool.aggregateTypeLenIncludingSentinel(ty.toIntern());
+pub fn arrayLenIncludingSentinel(ty: Type, zcu: *const Zcu) u64 {
+    return zcu.intern_pool.aggregateTypeLenIncludingSentinel(ty.toIntern());
 }
 
-pub fn vectorLen(ty: Type, mod: *const Module) u32 {
-    return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+pub fn vectorLen(ty: Type, zcu: *const Zcu) u32 {
+    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
         .vector_type => |vector_type| vector_type.len,
         .anon_struct_type => |tuple| @intCast(tuple.types.len),
         else => unreachable,
@@ -2173,8 +2293,8 @@ pub fn vectorLen(ty: Type, mod: *const Module) u32 {
 }
 
 /// Asserts the type is an array, pointer or vector.
-pub fn sentinel(ty: Type, mod: *const Module) ?Value {
-    return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+pub fn sentinel(ty: Type, zcu: *const Zcu) ?Value {
+    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
         .vector_type,
         .struct_type,
         .anon_struct_type,
@@ -2188,17 +2308,17 @@ pub fn sentinel(ty: Type, mod: *const Module) ?Value {
 }
 
 /// Returns true if and only if the type is a fixed-width integer.
-pub fn isInt(self: Type, mod: *const Module) bool {
+pub fn isInt(self: Type, zcu: *const Zcu) bool {
     return self.toIntern() != .comptime_int_type and
-        mod.intern_pool.isIntegerType(self.toIntern());
+        zcu.intern_pool.isIntegerType(self.toIntern());
 }
 
 /// Returns true if and only if the type is a fixed-width, signed integer.
-pub fn isSignedInt(ty: Type, mod: *const Module) bool {
+pub fn isSignedInt(ty: Type, zcu: *const Zcu) bool {
     return switch (ty.toIntern()) {
-        .c_char_type => mod.getTarget().charSignedness() == .signed,
+        .c_char_type => zcu.getTarget().charSignedness() == .signed,
         .isize_type, .c_short_type, .c_int_type, .c_long_type, .c_longlong_type => true,
-        else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+        else => switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
             .int_type => |int_type| int_type.signedness == .signed,
             else => false,
         },
@@ -2206,11 +2326,11 @@ pub fn isSignedInt(ty: Type, mod: *const Module) bool {
 }
 
 /// Returns true if and only if the type is a fixed-width, unsigned integer.
-pub fn isUnsignedInt(ty: Type, mod: *const Module) bool {
+pub fn isUnsignedInt(ty: Type, zcu: *const Zcu) bool {
     return switch (ty.toIntern()) {
-        .c_char_type => mod.getTarget().charSignedness() == .unsigned,
+        .c_char_type => zcu.getTarget().charSignedness() == .unsigned,
         .usize_type, .c_ushort_type, .c_uint_type, .c_ulong_type, .c_ulonglong_type => true,
-        else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+        else => switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
             .int_type => |int_type| int_type.signedness == .unsigned,
             else => false,
         },
@@ -2219,27 +2339,27 @@ pub fn isUnsignedInt(ty: Type, mod: *const Module) bool {
 
 /// Returns true for integers, enums, error sets, and packed structs.
 /// If this function returns true, then intInfo() can be called on the type.
-pub fn isAbiInt(ty: Type, mod: *Module) bool {
-    return switch (ty.zigTypeTag(mod)) {
+pub fn isAbiInt(ty: Type, zcu: *const Zcu) bool {
+    return switch (ty.zigTypeTag(zcu)) {
         .Int, .Enum, .ErrorSet => true,
-        .Struct => ty.containerLayout(mod) == .@"packed",
+        .Struct => ty.containerLayout(zcu) == .@"packed",
         else => false,
     };
 }
 
 /// Asserts the type is an integer, enum, error set, or vector of one of them.
-pub fn intInfo(starting_ty: Type, mod: *Module) InternPool.Key.IntType {
-    const ip = &mod.intern_pool;
-    const target = mod.getTarget();
+pub fn intInfo(starting_ty: Type, zcu: *const Zcu) InternPool.Key.IntType {
+    const ip = &zcu.intern_pool;
+    const target = zcu.getTarget();
     var ty = starting_ty;
 
     while (true) switch (ty.toIntern()) {
         .anyerror_type, .adhoc_inferred_error_set_type => {
-            return .{ .signedness = .unsigned, .bits = mod.errorSetBits() };
+            return .{ .signedness = .unsigned, .bits = zcu.errorSetBits() };
         },
         .usize_type => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() },
         .isize_type => return .{ .signedness = .signed, .bits = target.ptrBitWidth() },
-        .c_char_type => return .{ .signedness = mod.getTarget().charSignedness(), .bits = target.cTypeBitSize(.char) },
+        .c_char_type => return .{ .signedness = zcu.getTarget().charSignedness(), .bits = target.cTypeBitSize(.char) },
         .c_short_type => return .{ .signedness = .signed, .bits = target.cTypeBitSize(.short) },
         .c_ushort_type => return .{ .signedness = .unsigned, .bits = target.cTypeBitSize(.ushort) },
         .c_int_type => return .{ .signedness = .signed, .bits = target.cTypeBitSize(.int) },
@@ -2255,7 +2375,7 @@ pub fn intInfo(starting_ty: Type, mod: *Module) InternPool.Key.IntType {
             .vector_type => |vector_type| ty = Type.fromInterned(vector_type.child),
 
             .error_set_type, .inferred_error_set_type => {
-                return .{ .signedness = .unsigned, .bits = mod.errorSetBits() };
+                return .{ .signedness = .unsigned, .bits = zcu.errorSetBits() };
             },
 
             .anon_struct_type => unreachable,
@@ -2363,35 +2483,35 @@ pub fn floatBits(ty: Type, target: Target) u16 {
 }
 
 /// Asserts the type is a function or a function pointer.
-pub fn fnReturnType(ty: Type, mod: *Module) Type {
-    return Type.fromInterned(mod.intern_pool.funcTypeReturnType(ty.toIntern()));
+pub fn fnReturnType(ty: Type, zcu: *const Zcu) Type {
+    return Type.fromInterned(zcu.intern_pool.funcTypeReturnType(ty.toIntern()));
 }
 
 /// Asserts the type is a function.
-pub fn fnCallingConvention(ty: Type, mod: *Module) std.builtin.CallingConvention {
-    return mod.intern_pool.indexToKey(ty.toIntern()).func_type.cc;
+pub fn fnCallingConvention(ty: Type, zcu: *const Zcu) std.builtin.CallingConvention {
+    return zcu.intern_pool.indexToKey(ty.toIntern()).func_type.cc;
 }
 
-pub fn isValidParamType(self: Type, mod: *const Module) bool {
-    return switch (self.zigTypeTagOrPoison(mod) catch return true) {
+pub fn isValidParamType(self: Type, zcu: *const Zcu) bool {
+    return switch (self.zigTypeTagOrPoison(zcu) catch return true) {
         .Opaque, .NoReturn => false,
         else => true,
     };
 }
 
-pub fn isValidReturnType(self: Type, mod: *const Module) bool {
-    return switch (self.zigTypeTagOrPoison(mod) catch return true) {
+pub fn isValidReturnType(self: Type, zcu: *const Zcu) bool {
+    return switch (self.zigTypeTagOrPoison(zcu) catch return true) {
         .Opaque => false,
         else => true,
     };
 }
 
 /// Asserts the type is a function.
-pub fn fnIsVarArgs(ty: Type, mod: *Module) bool {
-    return mod.intern_pool.indexToKey(ty.toIntern()).func_type.is_var_args;
+pub fn fnIsVarArgs(ty: Type, zcu: *const Zcu) bool {
+    return zcu.intern_pool.indexToKey(ty.toIntern()).func_type.is_var_args;
 }
 
-pub fn isNumeric(ty: Type, mod: *const Module) bool {
+pub fn isNumeric(ty: Type, zcu: *const Zcu) bool {
     return switch (ty.toIntern()) {
         .f16_type,
         .f32_type,
@@ -2414,7 +2534,7 @@ pub fn isNumeric(ty: Type, mod: *const Module) bool {
         .c_ulonglong_type,
         => true,
 
-        else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+        else => switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
             .int_type => true,
             else => false,
         },
@@ -2424,9 +2544,9 @@ pub fn isNumeric(ty: Type, mod: *const Module) bool {
 /// During semantic analysis, instead call `Sema.typeHasOnePossibleValue` which
 /// resolves field types rather than asserting they are already resolved.
 pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     var ty = starting_type;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
     while (true) switch (ty.toIntern()) {
         .empty_struct_type => return Value.empty_struct,
 
@@ -2509,8 +2629,8 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
                 assert(struct_type.haveFieldTypes(ip));
                 if (struct_type.knownNonOpv(ip))
                     return null;
-                const field_vals = try mod.gpa.alloc(InternPool.Index, struct_type.field_types.len);
-                defer mod.gpa.free(field_vals);
+                const field_vals = try zcu.gpa.alloc(InternPool.Index, struct_type.field_types.len);
+                defer zcu.gpa.free(field_vals);
                 for (field_vals, 0..) |*field_val, i_usize| {
                     const i: u32 = @intCast(i_usize);
                     if (struct_type.fieldIsComptime(ip, i)) {
@@ -2539,8 +2659,8 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
                 // In this case the struct has all comptime-known fields and
                 // therefore has one possible value.
                 // TODO: write something like getCoercedInts to avoid needing to dupe
-                const duped_values = try mod.gpa.dupe(InternPool.Index, tuple.values.get(ip));
-                defer mod.gpa.free(duped_values);
+                const duped_values = try zcu.gpa.dupe(InternPool.Index, tuple.values.get(ip));
+                defer zcu.gpa.free(duped_values);
                 return Value.fromInterned(try pt.intern(.{ .aggregate = .{
                     .ty = ty.toIntern(),
                     .storage = .{ .elems = duped_values },
@@ -2583,7 +2703,7 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
                         return null;
                     },
                     .auto, .explicit => {
-                        if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(pt)) return null;
+                        if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(zcu)) return null;
 
                         switch (enum_type.names.len) {
                             0 => {
@@ -2635,17 +2755,25 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
     };
 }
 
-/// During semantic analysis, instead call `Sema.typeRequiresComptime` which
+/// During semantic analysis, instead call `ty.comptimeOnlySema` which
 /// resolves field types rather than asserting they are already resolved.
-pub fn comptimeOnly(ty: Type, pt: Zcu.PerThread) bool {
-    return ty.comptimeOnlyAdvanced(pt, .normal) catch unreachable;
+pub fn comptimeOnly(ty: Type, zcu: *Zcu) bool {
+    return ty.comptimeOnlyInner(.normal, zcu, {}) catch unreachable;
+}
+
+pub fn comptimeOnlySema(ty: Type, pt: Zcu.PerThread) SemaError!bool {
+    return try ty.comptimeOnlyInner(.sema, pt.zcu, pt.tid);
 }
 
 /// `generic_poison` will return false.
 /// May return false negatives when structs and unions are having their field types resolved.
-pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, comptime strat: ResolveStrat) SemaError!bool {
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+pub fn comptimeOnlyInner(
+    ty: Type,
+    comptime strat: ResolveStrat,
+    zcu: *Zcu,
+    tid: strat.Tid(),
+) SemaError!bool {
+    const ip = &zcu.intern_pool;
     return switch (ty.toIntern()) {
         .empty_struct_type => false,
 
@@ -2653,20 +2781,20 @@ pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, comptime strat: Resolve
             .int_type => false,
             .ptr_type => |ptr_type| {
                 const child_ty = Type.fromInterned(ptr_type.child);
-                switch (child_ty.zigTypeTag(mod)) {
-                    .Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(pt, strat),
+                switch (child_ty.zigTypeTag(zcu)) {
+                    .Fn => return !try child_ty.fnHasRuntimeBitsInner(strat, zcu, tid),
                     .Opaque => return false,
-                    else => return child_ty.comptimeOnlyAdvanced(pt, strat),
+                    else => return child_ty.comptimeOnlyInner(strat, zcu, tid),
                 }
             },
             .anyframe_type => |child| {
                 if (child == .none) return false;
-                return Type.fromInterned(child).comptimeOnlyAdvanced(pt, strat);
+                return Type.fromInterned(child).comptimeOnlyInner(strat, zcu, tid);
             },
-            .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(pt, strat),
-            .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(pt, strat),
-            .opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(pt, strat),
-            .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(pt, strat),
+            .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyInner(strat, zcu, tid),
+            .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyInner(strat, zcu, tid),
+            .opt_type => |child| return Type.fromInterned(child).comptimeOnlyInner(strat, zcu, tid),
+            .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyInner(strat, zcu, tid),
 
             .error_set_type,
             .inferred_error_set_type,
@@ -2732,13 +2860,14 @@ pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, comptime strat: Resolve
 
                         errdefer struct_type.setRequiresComptime(ip, .unknown);
 
+                        const pt = strat.pt(zcu, tid);
                         try ty.resolveFields(pt);
 
                         for (0..struct_type.field_types.len) |i_usize| {
                             const i: u32 = @intCast(i_usize);
                             if (struct_type.fieldIsComptime(ip, i)) continue;
                             const field_ty = struct_type.field_types.get(ip)[i];
-                            if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(pt, strat)) {
+                            if (try Type.fromInterned(field_ty).comptimeOnlyInner(strat, zcu, tid)) {
                                 // Note that this does not cause the layout to
                                 // be considered resolved. Comptime-only types
                                 // still maintain a layout of their
@@ -2757,7 +2886,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, comptime strat: Resolve
             .anon_struct_type => |tuple| {
                 for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
                     const have_comptime_val = val != .none;
-                    if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(pt, strat)) return true;
+                    if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyInner(strat, zcu, tid)) return true;
                 }
                 return false;
             },
@@ -2778,11 +2907,12 @@ pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, comptime strat: Resolve
 
                         errdefer union_type.setRequiresComptime(ip, .unknown);
 
+                        const pt = strat.pt(zcu, tid);
                         try ty.resolveFields(pt);
 
                         for (0..union_type.field_types.len) |field_idx| {
                             const field_ty = union_type.field_types.get(ip)[field_idx];
-                            if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(pt, strat)) {
+                            if (try Type.fromInterned(field_ty).comptimeOnlyInner(strat, zcu, tid)) {
                                 union_type.setRequiresComptime(ip, .yes);
                                 return true;
                             }
@@ -2796,7 +2926,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, comptime strat: Resolve
 
             .opaque_type => false,
 
-            .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(pt, strat),
+            .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyInner(strat, zcu, tid),
 
             // values, not types
             .undef,
@@ -2823,53 +2953,53 @@ pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, comptime strat: Resolve
     };
 }
 
-pub fn isVector(ty: Type, mod: *const Module) bool {
-    return ty.zigTypeTag(mod) == .Vector;
+pub fn isVector(ty: Type, zcu: *const Zcu) bool {
+    return ty.zigTypeTag(zcu) == .Vector;
 }
 
 /// Returns 0 if not a vector, otherwise returns @bitSizeOf(Element) * vector_len.
-pub fn totalVectorBits(ty: Type, pt: Zcu.PerThread) u64 {
-    if (!ty.isVector(pt.zcu)) return 0;
-    const v = pt.zcu.intern_pool.indexToKey(ty.toIntern()).vector_type;
-    return v.len * Type.fromInterned(v.child).bitSize(pt);
+pub fn totalVectorBits(ty: Type, zcu: *Zcu) u64 {
+    if (!ty.isVector(zcu)) return 0;
+    const v = zcu.intern_pool.indexToKey(ty.toIntern()).vector_type;
+    return v.len * Type.fromInterned(v.child).bitSize(zcu);
 }
 
-pub fn isArrayOrVector(ty: Type, mod: *const Module) bool {
-    return switch (ty.zigTypeTag(mod)) {
+pub fn isArrayOrVector(ty: Type, zcu: *const Zcu) bool {
+    return switch (ty.zigTypeTag(zcu)) {
         .Array, .Vector => true,
         else => false,
     };
 }
 
-pub fn isIndexable(ty: Type, mod: *Module) bool {
-    return switch (ty.zigTypeTag(mod)) {
+pub fn isIndexable(ty: Type, zcu: *const Zcu) bool {
+    return switch (ty.zigTypeTag(zcu)) {
         .Array, .Vector => true,
-        .Pointer => switch (ty.ptrSize(mod)) {
+        .Pointer => switch (ty.ptrSize(zcu)) {
             .Slice, .Many, .C => true,
-            .One => switch (ty.childType(mod).zigTypeTag(mod)) {
+            .One => switch (ty.childType(zcu).zigTypeTag(zcu)) {
                 .Array, .Vector => true,
-                .Struct => ty.childType(mod).isTuple(mod),
+                .Struct => ty.childType(zcu).isTuple(zcu),
                 else => false,
             },
         },
-        .Struct => ty.isTuple(mod),
+        .Struct => ty.isTuple(zcu),
         else => false,
     };
 }
 
-pub fn indexableHasLen(ty: Type, mod: *Module) bool {
-    return switch (ty.zigTypeTag(mod)) {
+pub fn indexableHasLen(ty: Type, zcu: *const Zcu) bool {
+    return switch (ty.zigTypeTag(zcu)) {
         .Array, .Vector => true,
-        .Pointer => switch (ty.ptrSize(mod)) {
+        .Pointer => switch (ty.ptrSize(zcu)) {
             .Many, .C => false,
             .Slice => true,
-            .One => switch (ty.childType(mod).zigTypeTag(mod)) {
+            .One => switch (ty.childType(zcu).zigTypeTag(zcu)) {
                 .Array, .Vector => true,
-                .Struct => ty.childType(mod).isTuple(mod),
+                .Struct => ty.childType(zcu).isTuple(zcu),
                 else => false,
             },
         },
-        .Struct => ty.isTuple(mod),
+        .Struct => ty.isTuple(zcu),
         else => false,
     };
 }
@@ -2973,17 +3103,17 @@ pub fn maxIntScalar(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
 }
 
 /// Asserts the type is an enum or a union.
-pub fn intTagType(ty: Type, mod: *Module) Type {
-    const ip = &mod.intern_pool;
+pub fn intTagType(ty: Type, zcu: *const Zcu) Type {
+    const ip = &zcu.intern_pool;
     return switch (ip.indexToKey(ty.toIntern())) {
-        .union_type => Type.fromInterned(ip.loadUnionType(ty.toIntern()).enum_tag_ty).intTagType(mod),
+        .union_type => Type.fromInterned(ip.loadUnionType(ty.toIntern()).enum_tag_ty).intTagType(zcu),
         .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty),
         else => unreachable,
     };
 }
 
-pub fn isNonexhaustiveEnum(ty: Type, mod: *Module) bool {
-    const ip = &mod.intern_pool;
+pub fn isNonexhaustiveEnum(ty: Type, zcu: *const Zcu) bool {
+    const ip = &zcu.intern_pool;
     return switch (ip.indexToKey(ty.toIntern())) {
         .enum_type => switch (ip.loadEnumType(ty.toIntern()).tag_mode) {
             .nonexhaustive => true,
@@ -2995,8 +3125,8 @@ pub fn isNonexhaustiveEnum(ty: Type, mod: *Module) bool {
 
 // Asserts that `ty` is an error set and not `anyerror`.
 // Asserts that `ty` is resolved if it is an inferred error set.
-pub fn errorSetNames(ty: Type, mod: *Module) InternPool.NullTerminatedString.Slice {
-    const ip = &mod.intern_pool;
+pub fn errorSetNames(ty: Type, zcu: *const Zcu) InternPool.NullTerminatedString.Slice {
+    const ip = &zcu.intern_pool;
     return switch (ip.indexToKey(ty.toIntern())) {
         .error_set_type => |x| x.names,
         .inferred_error_set_type => |i| switch (ip.funcIesResolvedUnordered(i)) {
@@ -3008,21 +3138,21 @@ pub fn errorSetNames(ty: Type, mod: *Module) InternPool.NullTerminatedString.Sli
     };
 }
 
-pub fn enumFields(ty: Type, mod: *Module) InternPool.NullTerminatedString.Slice {
-    return mod.intern_pool.loadEnumType(ty.toIntern()).names;
+pub fn enumFields(ty: Type, zcu: *const Zcu) InternPool.NullTerminatedString.Slice {
+    return zcu.intern_pool.loadEnumType(ty.toIntern()).names;
 }
 
-pub fn enumFieldCount(ty: Type, mod: *Module) usize {
-    return mod.intern_pool.loadEnumType(ty.toIntern()).names.len;
+pub fn enumFieldCount(ty: Type, zcu: *const Zcu) usize {
+    return zcu.intern_pool.loadEnumType(ty.toIntern()).names.len;
 }
 
-pub fn enumFieldName(ty: Type, field_index: usize, mod: *Module) InternPool.NullTerminatedString {
-    const ip = &mod.intern_pool;
+pub fn enumFieldName(ty: Type, field_index: usize, zcu: *const Zcu) InternPool.NullTerminatedString {
+    const ip = &zcu.intern_pool;
     return ip.loadEnumType(ty.toIntern()).names.get(ip)[field_index];
 }
 
-pub fn enumFieldIndex(ty: Type, field_name: InternPool.NullTerminatedString, mod: *Module) ?u32 {
-    const ip = &mod.intern_pool;
+pub fn enumFieldIndex(ty: Type, field_name: InternPool.NullTerminatedString, zcu: *const Zcu) ?u32 {
+    const ip = &zcu.intern_pool;
     const enum_type = ip.loadEnumType(ty.toIntern());
     return enum_type.nameIndex(ip, field_name);
 }
@@ -3030,8 +3160,8 @@ pub fn enumFieldIndex(ty: Type, field_name: InternPool.NullTerminatedString, mod
 /// Asserts `ty` is an enum. `enum_tag` can either be `enum_field_index` or
 /// an integer which represents the enum value. Returns the field index in
 /// declaration order, or `null` if `enum_tag` does not match any field.
-pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 {
-    const ip = &mod.intern_pool;
+pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, zcu: *const Zcu) ?u32 {
+    const ip = &zcu.intern_pool;
     const enum_type = ip.loadEnumType(ty.toIntern());
     const int_tag = switch (ip.indexToKey(enum_tag.toIntern())) {
         .int => enum_tag.toIntern(),
@@ -3043,8 +3173,8 @@ pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 {
 }
 
 /// Returns none in the case of a tuple which uses the integer index as the field name.
-pub fn structFieldName(ty: Type, index: usize, mod: *Module) InternPool.OptionalNullTerminatedString {
-    const ip = &mod.intern_pool;
+pub fn structFieldName(ty: Type, index: usize, zcu: *const Zcu) InternPool.OptionalNullTerminatedString {
+    const ip = &zcu.intern_pool;
     return switch (ip.indexToKey(ty.toIntern())) {
         .struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, index),
         .anon_struct_type => |anon_struct| anon_struct.fieldName(ip, index),
@@ -3052,8 +3182,8 @@ pub fn structFieldName(ty: Type, index: usize, mod: *Module) InternPool.Optional
     };
 }
 
-pub fn structFieldCount(ty: Type, mod: *Module) u32 {
-    const ip = &mod.intern_pool;
+pub fn structFieldCount(ty: Type, zcu: *const Zcu) u32 {
+    const ip = &zcu.intern_pool;
     return switch (ip.indexToKey(ty.toIntern())) {
         .struct_type => ip.loadStructType(ty.toIntern()).field_types.len,
         .anon_struct_type => |anon_struct| anon_struct.types.len,
@@ -3062,8 +3192,8 @@ pub fn structFieldCount(ty: Type, mod: *Module) u32 {
 }
 
 /// Supports structs and unions.
-pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type {
-    const ip = &mod.intern_pool;
+pub fn structFieldType(ty: Type, index: usize, zcu: *const Zcu) Type {
+    const ip = &zcu.intern_pool;
     return switch (ip.indexToKey(ty.toIntern())) {
         .struct_type => Type.fromInterned(ip.loadStructType(ty.toIntern()).field_types.get(ip)[index]),
         .union_type => {
@@ -3075,33 +3205,111 @@ pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type {
     };
 }
 
-pub fn structFieldAlign(ty: Type, index: usize, pt: Zcu.PerThread) Alignment {
-    return ty.structFieldAlignAdvanced(index, pt, .normal) catch unreachable;
+pub fn structFieldAlign(ty: Type, index: usize, zcu: *Zcu) Alignment {
+    return ty.structFieldAlignAdvanced(index, .normal, zcu, {}) catch unreachable;
 }
 
-pub fn structFieldAlignAdvanced(ty: Type, index: usize, pt: Zcu.PerThread, comptime strat: ResolveStrat) !Alignment {
-    const ip = &pt.zcu.intern_pool;
+pub fn structFieldAlignAdvanced(
+    ty: Type,
+    index: usize,
+    comptime strat: ResolveStrat,
+    zcu: *Zcu,
+    tid: strat.Tid(),
+) !Alignment {
+    const ip = &zcu.intern_pool;
     switch (ip.indexToKey(ty.toIntern())) {
         .struct_type => {
             const struct_type = ip.loadStructType(ty.toIntern());
             assert(struct_type.layout != .@"packed");
             const explicit_align = struct_type.fieldAlign(ip, index);
             const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]);
-            return pt.structFieldAlignmentAdvanced(explicit_align, field_ty, struct_type.layout, strat);
+            return field_ty.structFieldAlignmentAdvanced(
+                explicit_align,
+                struct_type.layout,
+                strat,
+                zcu,
+                tid,
+            );
         },
         .anon_struct_type => |anon_struct| {
-            return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(pt, strat.toLazy())).scalar;
+            return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentInner(
+                strat.toLazy(),
+                zcu,
+                tid,
+            )).scalar;
         },
         .union_type => {
             const union_obj = ip.loadUnionType(ty.toIntern());
-            return pt.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(index), strat);
+            return unionFieldNormalAlignmentAdvanced(
+                union_obj,
+                @intCast(index),
+                strat,
+                zcu,
+                tid,
+            );
         },
         else => unreachable,
     }
 }
 
-pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value {
-    const ip = &mod.intern_pool;
+/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed.
+/// If `strat` is `.sema`, may perform type resolution.
+pub fn structFieldAlignmentAdvanced(
+    field_ty: Type,
+    explicit_alignment: InternPool.Alignment,
+    layout: std.builtin.Type.ContainerLayout,
+    comptime strat: Type.ResolveStrat,
+    zcu: *Zcu,
+    tid: strat.Tid(),
+) Zcu.SemaError!InternPool.Alignment {
+    assert(layout != .@"packed");
+    if (explicit_alignment != .none) return explicit_alignment;
+    const ty_abi_align = (try field_ty.abiAlignmentInner(
+        strat.toLazy(),
+        zcu,
+        tid,
+    )).scalar;
+    switch (layout) {
+        .@"packed" => unreachable,
+        .auto => if (zcu.getTarget().ofmt != .c) return ty_abi_align,
+        .@"extern" => {},
+    }
+    // extern
+    if (field_ty.isAbiInt(zcu) and field_ty.intInfo(zcu).bits >= 128) {
+        return ty_abi_align.maxStrict(.@"16");
+    }
+    return ty_abi_align;
+}
+
+/// Returns the field alignment of a non-packed union. Asserts the layout is not packed.
+pub fn unionFieldNormalAlignment(
+    loaded_union: InternPool.LoadedUnionType,
+    field_index: u32,
+    zcu: *Zcu,
+) InternPool.Alignment {
+    return unionFieldNormalAlignmentAdvanced(loaded_union, field_index, .normal, zcu, {}) catch unreachable;
+}
+
+/// Returns the field alignment of a non-packed union. Asserts the layout is not packed.
+/// If `strat` is `.sema`, may perform type resolution.
+pub fn unionFieldNormalAlignmentAdvanced(
+    loaded_union: InternPool.LoadedUnionType,
+    field_index: u32,
+    comptime strat: Type.ResolveStrat,
+    zcu: *Zcu,
+    tid: strat.Tid(),
+) Zcu.SemaError!InternPool.Alignment {
+    const ip = &zcu.intern_pool;
+    assert(loaded_union.flagsUnordered(ip).layout != .@"packed");
+    const field_align = loaded_union.fieldAlign(ip, field_index);
+    if (field_align != .none) return field_align;
+    const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
+    if (field_ty.isNoReturn(zcu)) return .none;
+    return (try field_ty.abiAlignmentInner(strat.toLazy(), zcu, tid)).scalar;
+}
+
+pub fn structFieldDefaultValue(ty: Type, index: usize, zcu: *const Zcu) Value {
+    const ip = &zcu.intern_pool;
     switch (ip.indexToKey(ty.toIntern())) {
         .struct_type => {
             const struct_type = ip.loadStructType(ty.toIntern());
@@ -3121,8 +3329,8 @@ pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value {
 }
 
 pub fn structFieldValueComptime(ty: Type, pt: Zcu.PerThread, index: usize) !?Value {
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     switch (ip.indexToKey(ty.toIntern())) {
         .struct_type => {
             const struct_type = ip.loadStructType(ty.toIntern());
@@ -3145,8 +3353,8 @@ pub fn structFieldValueComptime(ty: Type, pt: Zcu.PerThread, index: usize) !?Val
     }
 }
 
-pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool {
-    const ip = &mod.intern_pool;
+pub fn structFieldIsComptime(ty: Type, index: usize, zcu: *const Zcu) bool {
+    const ip = &zcu.intern_pool;
     return switch (ip.indexToKey(ty.toIntern())) {
         .struct_type => ip.loadStructType(ty.toIntern()).fieldIsComptime(ip, index),
         .anon_struct_type => |anon_struct| anon_struct.values.get(ip)[index] != .none,
@@ -3160,9 +3368,12 @@ pub const FieldOffset = struct {
 };
 
 /// Supports structs and unions.
-pub fn structFieldOffset(ty: Type, index: usize, pt: Zcu.PerThread) u64 {
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+pub fn structFieldOffset(
+    ty: Type,
+    index: usize,
+    zcu: *Zcu,
+) u64 {
+    const ip = &zcu.intern_pool;
     switch (ip.indexToKey(ty.toIntern())) {
         .struct_type => {
             const struct_type = ip.loadStructType(ty.toIntern());
@@ -3176,17 +3387,17 @@ pub fn structFieldOffset(ty: Type, index: usize, pt: Zcu.PerThread) u64 {
             var big_align: Alignment = .none;
 
             for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| {
-                if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) {
+                if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(zcu)) {
                     // comptime field
                     if (i == index) return offset;
                     continue;
                 }
 
-                const field_align = Type.fromInterned(field_ty).abiAlignment(pt);
+                const field_align = Type.fromInterned(field_ty).abiAlignment(zcu);
                 big_align = big_align.max(field_align);
                 offset = field_align.forward(offset);
                 if (i == index) return offset;
-                offset += Type.fromInterned(field_ty).abiSize(pt);
+                offset += Type.fromInterned(field_ty).abiSize(zcu);
             }
             offset = big_align.max(.@"1").forward(offset);
             return offset;
@@ -3196,7 +3407,7 @@ pub fn structFieldOffset(ty: Type, index: usize, pt: Zcu.PerThread) u64 {
             const union_type = ip.loadUnionType(ty.toIntern());
             if (!union_type.hasTag(ip))
                 return 0;
-            const layout = pt.getUnionLayout(union_type);
+            const layout = union_type.getUnionLayout(zcu);
             if (layout.tag_align.compare(.gte, layout.payload_align)) {
                 // {Tag, Payload}
                 return layout.payload_align.forward(layout.tag_size);
@@ -3210,7 +3421,7 @@ pub fn structFieldOffset(ty: Type, index: usize, pt: Zcu.PerThread) u64 {
     }
 }
 
-pub fn srcLocOrNull(ty: Type, zcu: *Zcu) ?Module.LazySrcLoc {
+pub fn srcLocOrNull(ty: Type, zcu: *Zcu) ?Zcu.LazySrcLoc {
     const ip = &zcu.intern_pool;
     return .{
         .base_node_inst = switch (ip.indexToKey(ty.toIntern())) {
@@ -3222,11 +3433,11 @@ pub fn srcLocOrNull(ty: Type, zcu: *Zcu) ?Module.LazySrcLoc {
             },
             else => return null,
         },
-        .offset = Module.LazySrcLoc.Offset.nodeOffset(0),
+        .offset = Zcu.LazySrcLoc.Offset.nodeOffset(0),
     };
 }
 
-pub fn srcLoc(ty: Type, zcu: *Zcu) Module.LazySrcLoc {
+pub fn srcLoc(ty: Type, zcu: *Zcu) Zcu.LazySrcLoc {
     return ty.srcLocOrNull(zcu).?;
 }
 
@@ -3234,8 +3445,8 @@ pub fn isGenericPoison(ty: Type) bool {
     return ty.toIntern() == .generic_poison_type;
 }
 
-pub fn isTuple(ty: Type, mod: *Module) bool {
-    const ip = &mod.intern_pool;
+pub fn isTuple(ty: Type, zcu: *const Zcu) bool {
+    const ip = &zcu.intern_pool;
     return switch (ip.indexToKey(ty.toIntern())) {
         .struct_type => {
             const struct_type = ip.loadStructType(ty.toIntern());
@@ -3248,16 +3459,16 @@ pub fn isTuple(ty: Type, mod: *Module) bool {
     };
 }
 
-pub fn isAnonStruct(ty: Type, mod: *Module) bool {
+pub fn isAnonStruct(ty: Type, zcu: *const Zcu) bool {
     if (ty.toIntern() == .empty_struct_type) return true;
-    return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
         .anon_struct_type => |anon_struct_type| anon_struct_type.names.len > 0,
         else => false,
     };
 }
 
-pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool {
-    const ip = &mod.intern_pool;
+pub fn isTupleOrAnonStruct(ty: Type, zcu: *const Zcu) bool {
+    const ip = &zcu.intern_pool;
     return switch (ip.indexToKey(ty.toIntern())) {
         .struct_type => {
             const struct_type = ip.loadStructType(ty.toIntern());
@@ -3270,15 +3481,15 @@ pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool {
     };
 }
 
-pub fn isSimpleTuple(ty: Type, mod: *Module) bool {
-    return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+pub fn isSimpleTuple(ty: Type, zcu: *const Zcu) bool {
+    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
         .anon_struct_type => |anon_struct_type| anon_struct_type.names.len == 0,
         else => false,
     };
 }
 
-pub fn isSimpleTupleOrAnonStruct(ty: Type, mod: *Module) bool {
-    return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+pub fn isSimpleTupleOrAnonStruct(ty: Type, zcu: *const Zcu) bool {
+    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
         .anon_struct_type => true,
         else => false,
     };
@@ -3286,11 +3497,11 @@ pub fn isSimpleTupleOrAnonStruct(ty: Type, mod: *Module) bool {
 
 /// Traverses optional child types and error union payloads until the type
 /// is not a pointer. For `E!?u32`, returns `u32`; for `*u8`, returns `*u8`.
-pub fn optEuBaseType(ty: Type, mod: *Module) Type {
+pub fn optEuBaseType(ty: Type, zcu: *const Zcu) Type {
     var cur = ty;
-    while (true) switch (cur.zigTypeTag(mod)) {
-        .Optional => cur = cur.optionalChild(mod),
-        .ErrorUnion => cur = cur.errorUnionPayload(mod),
+    while (true) switch (cur.zigTypeTag(zcu)) {
+        .Optional => cur = cur.optionalChild(zcu),
+        .ErrorUnion => cur = cur.errorUnionPayload(zcu),
         else => return cur,
     };
 }
@@ -3406,7 +3617,7 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx:
         if (i == field_idx) {
             bit_offset = running_bits;
         }
-        running_bits += @intCast(f_ty.bitSize(pt));
+        running_bits += @intCast(f_ty.bitSize(zcu));
     }
 
     const res_host_size: u16, const res_bit_offset: u16 = if (parent_ptr_info.packed_offset.host_size != 0)
@@ -3423,9 +3634,9 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx:
     // targets before adding the necessary complications to this code. This will not
     // cause miscompilations; it only means the field pointer uses bit masking when it
     // might not be strictly necessary.
-    if (res_bit_offset % 8 == 0 and field_ty.bitSize(pt) == field_ty.abiSize(pt) * 8 and zcu.getTarget().cpu.arch.endian() == .little) {
+    if (res_bit_offset % 8 == 0 and field_ty.bitSize(zcu) == field_ty.abiSize(zcu) * 8 and zcu.getTarget().cpu.arch.endian() == .little) {
         const byte_offset = res_bit_offset / 8;
-        const new_align = Alignment.fromLog2Units(@ctz(byte_offset | parent_ptr_ty.ptrAlignment(pt).toByteUnits().?));
+        const new_align = Alignment.fromLog2Units(@ctz(byte_offset | parent_ptr_ty.ptrAlignment(zcu).toByteUnits().?));
         return .{ .byte_ptr = .{
             .offset = byte_offset,
             .alignment = new_align,
@@ -3768,14 +3979,14 @@ pub fn elemPtrType(ptr_ty: Type, offset: ?usize, pt: Zcu.PerThread) !Type {
         alignment: Alignment = .none,
         vector_index: VI = .none,
     } = if (parent_ty.isVector(zcu) and ptr_info.flags.size == .One) blk: {
-        const elem_bits = elem_ty.bitSize(pt);
+        const elem_bits = elem_ty.bitSize(zcu);
         if (elem_bits == 0) break :blk .{};
         const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits);
         if (!is_packed) break :blk .{};
 
         break :blk .{
             .host_size = @intCast(parent_ty.arrayLen(zcu)),
-            .alignment = parent_ty.abiAlignment(pt),
+            .alignment = parent_ty.abiAlignment(zcu),
             .vector_index = if (offset) |some| @enumFromInt(some) else .runtime,
         };
     } else .{};
@@ -3789,7 +4000,7 @@ pub fn elemPtrType(ptr_ty: Type, offset: ?usize, pt: Zcu.PerThread) !Type {
         }
         // If the addend is not a comptime-known value we can still count on
         // it being a multiple of the type size.
-        const elem_size = (try elem_ty.abiSizeAdvanced(pt, .sema)).scalar;
+        const elem_size = (try elem_ty.abiSizeInner(.sema, zcu, pt.tid)).scalar;
         const addend = if (offset) |off| elem_size * off else elem_size;
 
         // The resulting pointer is aligned to the lcd between the offset (an
src/Value.zig
@@ -65,19 +65,19 @@ pub fn fmtValueSemaFull(ctx: print_value.FormatContext) std.fmt.Formatter(print_
 /// Converts `val` to a null-terminated string stored in the InternPool.
 /// Asserts `val` is an array of `u8`
 pub fn toIpString(val: Value, ty: Type, pt: Zcu.PerThread) !InternPool.NullTerminatedString {
-    const mod = pt.zcu;
-    assert(ty.zigTypeTag(mod) == .Array);
-    assert(ty.childType(mod).toIntern() == .u8_type);
-    const ip = &mod.intern_pool;
-    switch (mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage) {
-        .bytes => |bytes| return bytes.toNullTerminatedString(ty.arrayLen(mod), ip),
-        .elems => return arrayToIpString(val, ty.arrayLen(mod), pt),
+    const zcu = pt.zcu;
+    assert(ty.zigTypeTag(zcu) == .Array);
+    assert(ty.childType(zcu).toIntern() == .u8_type);
+    const ip = &zcu.intern_pool;
+    switch (zcu.intern_pool.indexToKey(val.toIntern()).aggregate.storage) {
+        .bytes => |bytes| return bytes.toNullTerminatedString(ty.arrayLen(zcu), ip),
+        .elems => return arrayToIpString(val, ty.arrayLen(zcu), pt),
         .repeated_elem => |elem| {
-            const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(pt));
-            const len: u32 = @intCast(ty.arrayLen(mod));
-            const strings = ip.getLocal(pt.tid).getMutableStrings(mod.gpa);
+            const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(zcu));
+            const len: u32 = @intCast(ty.arrayLen(zcu));
+            const strings = ip.getLocal(pt.tid).getMutableStrings(zcu.gpa);
             try strings.appendNTimes(.{byte}, len);
-            return ip.getOrPutTrailingString(mod.gpa, pt.tid, len, .no_embedded_nulls);
+            return ip.getOrPutTrailingString(zcu.gpa, pt.tid, len, .no_embedded_nulls);
         },
     }
 }
@@ -85,17 +85,17 @@ pub fn toIpString(val: Value, ty: Type, pt: Zcu.PerThread) !InternPool.NullTermi
 /// Asserts that the value is representable as an array of bytes.
 /// Copies the value into a freshly allocated slice of memory, which is owned by the caller.
 pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) ![]u8 {
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
     return switch (ip.indexToKey(val.toIntern())) {
         .enum_literal => |enum_literal| allocator.dupe(u8, enum_literal.toSlice(ip)),
-        .slice => |slice| try arrayToAllocatedBytes(val, Value.fromInterned(slice.len).toUnsignedInt(pt), allocator, pt),
+        .slice => |slice| try arrayToAllocatedBytes(val, Value.fromInterned(slice.len).toUnsignedInt(zcu), allocator, pt),
         .aggregate => |aggregate| switch (aggregate.storage) {
-            .bytes => |bytes| try allocator.dupe(u8, bytes.toSlice(ty.arrayLenIncludingSentinel(mod), ip)),
-            .elems => try arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, pt),
+            .bytes => |bytes| try allocator.dupe(u8, bytes.toSlice(ty.arrayLenIncludingSentinel(zcu), ip)),
+            .elems => try arrayToAllocatedBytes(val, ty.arrayLen(zcu), allocator, pt),
             .repeated_elem => |elem| {
-                const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(pt));
-                const result = try allocator.alloc(u8, @intCast(ty.arrayLen(mod)));
+                const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(zcu));
+                const result = try allocator.alloc(u8, @intCast(ty.arrayLen(zcu)));
                 @memset(result, byte);
                 return result;
             },
@@ -108,15 +108,15 @@ fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, pt: Zcu.Per
     const result = try allocator.alloc(u8, @intCast(len));
     for (result, 0..) |*elem, i| {
         const elem_val = try val.elemValue(pt, i);
-        elem.* = @intCast(elem_val.toUnsignedInt(pt));
+        elem.* = @intCast(elem_val.toUnsignedInt(pt.zcu));
     }
     return result;
 }
 
 fn arrayToIpString(val: Value, len_u64: u64, pt: Zcu.PerThread) !InternPool.NullTerminatedString {
-    const mod = pt.zcu;
-    const gpa = mod.gpa;
-    const ip = &mod.intern_pool;
+    const zcu = pt.zcu;
+    const gpa = zcu.gpa;
+    const ip = &zcu.intern_pool;
     const len: u32 = @intCast(len_u64);
     const strings = ip.getLocal(pt.tid).getMutableStrings(gpa);
     try strings.ensureUnusedCapacity(len);
@@ -126,7 +126,7 @@ fn arrayToIpString(val: Value, len_u64: u64, pt: Zcu.PerThread) !InternPool.Null
         const prev_len = strings.mutate.len;
         const elem_val = try val.elemValue(pt, i);
         assert(strings.mutate.len == prev_len);
-        const byte: u8 = @intCast(elem_val.toUnsignedInt(pt));
+        const byte: u8 = @intCast(elem_val.toUnsignedInt(zcu));
         strings.appendAssumeCapacity(.{byte});
     }
     return ip.getOrPutTrailingString(gpa, pt.tid, len, .no_embedded_nulls);
@@ -178,50 +178,55 @@ pub fn intFromEnum(val: Value, ty: Type, pt: Zcu.PerThread) Allocator.Error!Valu
 pub const ResolveStrat = Type.ResolveStrat;
 
 /// Asserts the value is an integer.
-pub fn toBigInt(val: Value, space: *BigIntSpace, pt: Zcu.PerThread) BigIntConst {
-    return val.toBigIntAdvanced(space, pt, .normal) catch unreachable;
+pub fn toBigInt(val: Value, space: *BigIntSpace, zcu: *Zcu) BigIntConst {
+    return val.toBigIntAdvanced(space, .normal, zcu, {}) catch unreachable;
+}
+
+pub fn toBigIntSema(val: Value, space: *BigIntSpace, pt: Zcu.PerThread) !BigIntConst {
+    return try val.toBigIntAdvanced(space, .sema, pt.zcu, pt.tid);
 }
 
 /// Asserts the value is an integer.
 pub fn toBigIntAdvanced(
     val: Value,
     space: *BigIntSpace,
-    pt: Zcu.PerThread,
     comptime strat: ResolveStrat,
+    zcu: *Zcu,
+    tid: strat.Tid(),
 ) Module.CompileError!BigIntConst {
     return switch (val.toIntern()) {
         .bool_false => BigIntMutable.init(&space.limbs, 0).toConst(),
         .bool_true => BigIntMutable.init(&space.limbs, 1).toConst(),
         .null_value => BigIntMutable.init(&space.limbs, 0).toConst(),
-        else => switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) {
+        else => switch (zcu.intern_pool.indexToKey(val.toIntern())) {
             .int => |int| switch (int.storage) {
                 .u64, .i64, .big_int => int.storage.toBigInt(space),
                 .lazy_align, .lazy_size => |ty| {
-                    if (strat == .sema) try Type.fromInterned(ty).resolveLayout(pt);
+                    if (strat == .sema) try Type.fromInterned(ty).resolveLayout(strat.pt(zcu, tid));
                     const x = switch (int.storage) {
                         else => unreachable,
-                        .lazy_align => Type.fromInterned(ty).abiAlignment(pt).toByteUnits() orelse 0,
-                        .lazy_size => Type.fromInterned(ty).abiSize(pt),
+                        .lazy_align => Type.fromInterned(ty).abiAlignment(zcu).toByteUnits() orelse 0,
+                        .lazy_size => Type.fromInterned(ty).abiSize(zcu),
                     };
                     return BigIntMutable.init(&space.limbs, x).toConst();
                 },
             },
-            .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).toBigIntAdvanced(space, pt, strat),
+            .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).toBigIntAdvanced(space, strat, zcu, tid),
             .opt, .ptr => BigIntMutable.init(
                 &space.limbs,
-                (try val.getUnsignedIntAdvanced(pt, strat)).?,
+                (try val.getUnsignedIntInner(strat, zcu, tid)).?,
             ).toConst(),
             else => unreachable,
         },
     };
 }
 
-pub fn isFuncBody(val: Value, mod: *Module) bool {
-    return mod.intern_pool.isFuncBody(val.toIntern());
+pub fn isFuncBody(val: Value, zcu: *Module) bool {
+    return zcu.intern_pool.isFuncBody(val.toIntern());
 }
 
-pub fn getFunction(val: Value, mod: *Module) ?InternPool.Key.Func {
-    return switch (mod.intern_pool.indexToKey(val.toIntern())) {
+pub fn getFunction(val: Value, zcu: *Module) ?InternPool.Key.Func {
+    return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
         .func => |x| x,
         else => null,
     };
@@ -236,68 +241,79 @@ pub fn getVariable(val: Value, mod: *Module) ?InternPool.Key.Variable {
 
 /// If the value fits in a u64, return it, otherwise null.
 /// Asserts not undefined.
-pub fn getUnsignedInt(val: Value, pt: Zcu.PerThread) ?u64 {
-    return getUnsignedIntAdvanced(val, pt, .normal) catch unreachable;
+pub fn getUnsignedInt(val: Value, zcu: *Zcu) ?u64 {
+    return getUnsignedIntInner(val, .normal, zcu, {}) catch unreachable;
+}
+
+/// Asserts the value is an integer and it fits in a u64
+pub fn toUnsignedInt(val: Value, zcu: *Zcu) u64 {
+    return getUnsignedInt(val, zcu).?;
+}
+
+pub fn getUnsignedIntSema(val: Value, pt: Zcu.PerThread) !?u64 {
+    return try val.getUnsignedIntInner(.sema, pt.zcu, pt.tid);
 }
 
 /// If the value fits in a u64, return it, otherwise null.
 /// Asserts not undefined.
-pub fn getUnsignedIntAdvanced(val: Value, pt: Zcu.PerThread, comptime strat: ResolveStrat) !?u64 {
-    const mod = pt.zcu;
+pub fn getUnsignedIntInner(
+    val: Value,
+    comptime strat: ResolveStrat,
+    zcu: *Zcu,
+    tid: strat.Tid(),
+) !?u64 {
     return switch (val.toIntern()) {
         .undef => unreachable,
         .bool_false => 0,
         .bool_true => 1,
-        else => switch (mod.intern_pool.indexToKey(val.toIntern())) {
+        else => switch (zcu.intern_pool.indexToKey(val.toIntern())) {
             .undef => unreachable,
             .int => |int| switch (int.storage) {
                 .big_int => |big_int| big_int.to(u64) catch null,
                 .u64 => |x| x,
                 .i64 => |x| std.math.cast(u64, x),
-                .lazy_align => |ty| (try Type.fromInterned(ty).abiAlignmentAdvanced(pt, strat.toLazy())).scalar.toByteUnits() orelse 0,
-                .lazy_size => |ty| (try Type.fromInterned(ty).abiSizeAdvanced(pt, strat.toLazy())).scalar,
+                .lazy_align => |ty| (try Type.fromInterned(ty).abiAlignmentInner(strat.toLazy(), zcu, tid)).scalar.toByteUnits() orelse 0,
+                .lazy_size => |ty| (try Type.fromInterned(ty).abiSizeInner(strat.toLazy(), zcu, tid)).scalar,
             },
             .ptr => |ptr| switch (ptr.base_addr) {
                 .int => ptr.byte_offset,
                 .field => |field| {
-                    const base_addr = (try Value.fromInterned(field.base).getUnsignedIntAdvanced(pt, strat)) orelse return null;
-                    const struct_ty = Value.fromInterned(field.base).typeOf(mod).childType(mod);
-                    if (strat == .sema) try struct_ty.resolveLayout(pt);
-                    return base_addr + struct_ty.structFieldOffset(@intCast(field.index), pt) + ptr.byte_offset;
+                    const base_addr = (try Value.fromInterned(field.base).getUnsignedIntInner(strat, zcu, tid)) orelse return null;
+                    const struct_ty = Value.fromInterned(field.base).typeOf(zcu).childType(zcu);
+                    if (strat == .sema) {
+                        const pt = strat.pt(zcu, tid);
+                        try struct_ty.resolveLayout(pt);
+                    }
+                    return base_addr + struct_ty.structFieldOffset(@intCast(field.index), zcu) + ptr.byte_offset;
                 },
                 else => null,
             },
             .opt => |opt| switch (opt.val) {
                 .none => 0,
-                else => |payload| Value.fromInterned(payload).getUnsignedIntAdvanced(pt, strat),
+                else => |payload| Value.fromInterned(payload).getUnsignedIntInner(strat, zcu, tid),
             },
             else => null,
         },
     };
 }
 
-/// Asserts the value is an integer and it fits in a u64
-pub fn toUnsignedInt(val: Value, pt: Zcu.PerThread) u64 {
-    return getUnsignedInt(val, pt).?;
-}
-
 /// Asserts the value is an integer and it fits in a u64
 pub fn toUnsignedIntSema(val: Value, pt: Zcu.PerThread) !u64 {
-    return (try getUnsignedIntAdvanced(val, pt, .sema)).?;
+    return (try getUnsignedIntInner(val, .sema, pt.zcu, pt.tid)).?;
 }
 
 /// Asserts the value is an integer and it fits in a i64
-pub fn toSignedInt(val: Value, pt: Zcu.PerThread) i64 {
+pub fn toSignedInt(val: Value, zcu: *Zcu) i64 {
     return switch (val.toIntern()) {
         .bool_false => 0,
         .bool_true => 1,
-        else => switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) {
+        else => switch (zcu.intern_pool.indexToKey(val.toIntern())) {
             .int => |int| switch (int.storage) {
                 .big_int => |big_int| big_int.to(i64) catch unreachable,
                 .i64 => |x| x,
                 .u64 => |x| @intCast(x),
-                .lazy_align => |ty| @intCast(Type.fromInterned(ty).abiAlignment(pt).toByteUnits() orelse 0),
-                .lazy_size => |ty| @intCast(Type.fromInterned(ty).abiSize(pt)),
+                .lazy_align => |ty| @intCast(Type.fromInterned(ty).abiAlignment(zcu).toByteUnits() orelse 0),
+                .lazy_size => |ty| @intCast(Type.fromInterned(ty).abiSize(zcu)),
             },
             else => unreachable,
         },
@@ -326,41 +342,41 @@ pub fn writeToMemory(val: Value, ty: Type, pt: Zcu.PerThread, buffer: []u8) erro
     Unimplemented,
     OutOfMemory,
 }!void {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const endian = target.cpu.arch.endian();
-    if (val.isUndef(mod)) {
-        const size: usize = @intCast(ty.abiSize(pt));
+    if (val.isUndef(zcu)) {
+        const size: usize = @intCast(ty.abiSize(zcu));
         @memset(buffer[0..size], 0xaa);
         return;
     }
-    const ip = &mod.intern_pool;
-    switch (ty.zigTypeTag(mod)) {
+    const ip = &zcu.intern_pool;
+    switch (ty.zigTypeTag(zcu)) {
         .Void => {},
         .Bool => {
             buffer[0] = @intFromBool(val.toBool());
         },
         .Int, .Enum => {
-            const int_info = ty.intInfo(mod);
+            const int_info = ty.intInfo(zcu);
             const bits = int_info.bits;
             const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8);
 
             var bigint_buffer: BigIntSpace = undefined;
-            const bigint = val.toBigInt(&bigint_buffer, pt);
+            const bigint = val.toBigInt(&bigint_buffer, zcu);
             bigint.writeTwosComplement(buffer[0..byte_count], endian);
         },
         .Float => switch (ty.floatBits(target)) {
-            16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(val.toFloat(f16, pt)), endian),
-            32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(val.toFloat(f32, pt)), endian),
-            64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(val.toFloat(f64, pt)), endian),
-            80 => std.mem.writeInt(u80, buffer[0..10], @bitCast(val.toFloat(f80, pt)), endian),
-            128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(val.toFloat(f128, pt)), endian),
+            16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(val.toFloat(f16, zcu)), endian),
+            32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(val.toFloat(f32, zcu)), endian),
+            64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(val.toFloat(f64, zcu)), endian),
+            80 => std.mem.writeInt(u80, buffer[0..10], @bitCast(val.toFloat(f80, zcu)), endian),
+            128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(val.toFloat(f128, zcu)), endian),
             else => unreachable,
         },
         .Array => {
-            const len = ty.arrayLen(mod);
-            const elem_ty = ty.childType(mod);
-            const elem_size: usize = @intCast(elem_ty.abiSize(pt));
+            const len = ty.arrayLen(zcu);
+            const elem_ty = ty.childType(zcu);
+            const elem_size: usize = @intCast(elem_ty.abiSize(zcu));
             var elem_i: usize = 0;
             var buf_off: usize = 0;
             while (elem_i < len) : (elem_i += 1) {
@@ -372,15 +388,15 @@ pub fn writeToMemory(val: Value, ty: Type, pt: Zcu.PerThread, buffer: []u8) erro
         .Vector => {
             // We use byte_count instead of abi_size here, so that any padding bytes
             // follow the data bytes, on both big- and little-endian systems.
-            const byte_count = (@as(usize, @intCast(ty.bitSize(pt))) + 7) / 8;
+            const byte_count = (@as(usize, @intCast(ty.bitSize(zcu))) + 7) / 8;
             return writeToPackedMemory(val, ty, pt, buffer[0..byte_count], 0);
         },
         .Struct => {
-            const struct_type = mod.typeToStruct(ty) orelse return error.IllDefinedMemoryLayout;
+            const struct_type = zcu.typeToStruct(ty) orelse return error.IllDefinedMemoryLayout;
             switch (struct_type.layout) {
                 .auto => return error.IllDefinedMemoryLayout,
                 .@"extern" => for (0..struct_type.field_types.len) |field_index| {
-                    const off: usize = @intCast(ty.structFieldOffset(field_index, pt));
+                    const off: usize = @intCast(ty.structFieldOffset(field_index, zcu));
                     const field_val = Value.fromInterned(switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
                         .bytes => |bytes| {
                             buffer[off] = bytes.at(field_index, ip);
@@ -393,13 +409,13 @@ pub fn writeToMemory(val: Value, ty: Type, pt: Zcu.PerThread, buffer: []u8) erro
                     try writeToMemory(field_val, field_ty, pt, buffer[off..]);
                 },
                 .@"packed" => {
-                    const byte_count = (@as(usize, @intCast(ty.bitSize(pt))) + 7) / 8;
+                    const byte_count = (@as(usize, @intCast(ty.bitSize(zcu))) + 7) / 8;
                     return writeToPackedMemory(val, ty, pt, buffer[0..byte_count], 0);
                 },
             }
         },
         .ErrorSet => {
-            const bits = mod.errorSetBits();
+            const bits = zcu.errorSetBits();
             const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8);
 
             const name = switch (ip.indexToKey(val.toIntern())) {
@@ -414,37 +430,37 @@ pub fn writeToMemory(val: Value, ty: Type, pt: Zcu.PerThread, buffer: []u8) erro
             ).toConst();
             bigint.writeTwosComplement(buffer[0..byte_count], endian);
         },
-        .Union => switch (ty.containerLayout(mod)) {
+        .Union => switch (ty.containerLayout(zcu)) {
             .auto => return error.IllDefinedMemoryLayout, // Sema is supposed to have emitted a compile error already
             .@"extern" => {
-                if (val.unionTag(mod)) |union_tag| {
-                    const union_obj = mod.typeToUnion(ty).?;
-                    const field_index = mod.unionTagFieldIndex(union_obj, union_tag).?;
+                if (val.unionTag(zcu)) |union_tag| {
+                    const union_obj = zcu.typeToUnion(ty).?;
+                    const field_index = zcu.unionTagFieldIndex(union_obj, union_tag).?;
                     const field_type = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
                     const field_val = try val.fieldValue(pt, field_index);
-                    const byte_count: usize = @intCast(field_type.abiSize(pt));
+                    const byte_count: usize = @intCast(field_type.abiSize(zcu));
                     return writeToMemory(field_val, field_type, pt, buffer[0..byte_count]);
                 } else {
                     const backing_ty = try ty.unionBackingType(pt);
-                    const byte_count: usize = @intCast(backing_ty.abiSize(pt));
-                    return writeToMemory(val.unionValue(mod), backing_ty, pt, buffer[0..byte_count]);
+                    const byte_count: usize = @intCast(backing_ty.abiSize(zcu));
+                    return writeToMemory(val.unionValue(zcu), backing_ty, pt, buffer[0..byte_count]);
                 }
             },
             .@"packed" => {
                 const backing_ty = try ty.unionBackingType(pt);
-                const byte_count: usize = @intCast(backing_ty.abiSize(pt));
+                const byte_count: usize = @intCast(backing_ty.abiSize(zcu));
                 return writeToPackedMemory(val, ty, pt, buffer[0..byte_count], 0);
             },
         },
         .Pointer => {
-            if (ty.isSlice(mod)) return error.IllDefinedMemoryLayout;
-            if (!val.ptrHasIntAddr(mod)) return error.ReinterpretDeclRef;
+            if (ty.isSlice(zcu)) return error.IllDefinedMemoryLayout;
+            if (!val.ptrHasIntAddr(zcu)) return error.ReinterpretDeclRef;
             return val.writeToMemory(Type.usize, pt, buffer);
         },
         .Optional => {
-            if (!ty.isPtrLikeOptional(mod)) return error.IllDefinedMemoryLayout;
-            const child = ty.optionalChild(mod);
-            const opt_val = val.optionalValue(mod);
+            if (!ty.isPtrLikeOptional(zcu)) return error.IllDefinedMemoryLayout;
+            const child = ty.optionalChild(zcu);
+            const opt_val = val.optionalValue(zcu);
             if (opt_val) |some| {
                 return some.writeToMemory(child, pt, buffer);
             } else {
@@ -466,18 +482,18 @@ pub fn writeToPackedMemory(
     buffer: []u8,
     bit_offset: usize,
 ) error{ ReinterpretDeclRef, OutOfMemory }!void {
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    const target = zcu.getTarget();
     const endian = target.cpu.arch.endian();
-    if (val.isUndef(mod)) {
-        const bit_size: usize = @intCast(ty.bitSize(pt));
+    if (val.isUndef(zcu)) {
+        const bit_size: usize = @intCast(ty.bitSize(zcu));
         if (bit_size != 0) {
             std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian);
         }
         return;
     }
-    switch (ty.zigTypeTag(mod)) {
+    switch (ty.zigTypeTag(zcu)) {
         .Void => {},
         .Bool => {
             const byte_index = switch (endian) {
@@ -492,34 +508,34 @@ pub fn writeToPackedMemory(
         },
         .Int, .Enum => {
             if (buffer.len == 0) return;
-            const bits = ty.intInfo(mod).bits;
+            const bits = ty.intInfo(zcu).bits;
             if (bits == 0) return;
 
             switch (ip.indexToKey((try val.intFromEnum(ty, pt)).toIntern()).int.storage) {
                 inline .u64, .i64 => |int| std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian),
                 .big_int => |bigint| bigint.writePackedTwosComplement(buffer, bit_offset, bits, endian),
                 .lazy_align => |lazy_align| {
-                    const num = Type.fromInterned(lazy_align).abiAlignment(pt).toByteUnits() orelse 0;
+                    const num = Type.fromInterned(lazy_align).abiAlignment(zcu).toByteUnits() orelse 0;
                     std.mem.writeVarPackedInt(buffer, bit_offset, bits, num, endian);
                 },
                 .lazy_size => |lazy_size| {
-                    const num = Type.fromInterned(lazy_size).abiSize(pt);
+                    const num = Type.fromInterned(lazy_size).abiSize(zcu);
                     std.mem.writeVarPackedInt(buffer, bit_offset, bits, num, endian);
                 },
             }
         },
         .Float => switch (ty.floatBits(target)) {
-            16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(val.toFloat(f16, pt)), endian),
-            32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(val.toFloat(f32, pt)), endian),
-            64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(val.toFloat(f64, pt)), endian),
-            80 => std.mem.writePackedInt(u80, buffer, bit_offset, @bitCast(val.toFloat(f80, pt)), endian),
-            128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(val.toFloat(f128, pt)), endian),
+            16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(val.toFloat(f16, zcu)), endian),
+            32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(val.toFloat(f32, zcu)), endian),
+            64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(val.toFloat(f64, zcu)), endian),
+            80 => std.mem.writePackedInt(u80, buffer, bit_offset, @bitCast(val.toFloat(f80, zcu)), endian),
+            128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(val.toFloat(f128, zcu)), endian),
             else => unreachable,
         },
         .Vector => {
-            const elem_ty = ty.childType(mod);
-            const elem_bit_size: u16 = @intCast(elem_ty.bitSize(pt));
-            const len: usize = @intCast(ty.arrayLen(mod));
+            const elem_ty = ty.childType(zcu);
+            const elem_bit_size: u16 = @intCast(elem_ty.bitSize(zcu));
+            const len: usize = @intCast(ty.arrayLen(zcu));
 
             var bits: u16 = 0;
             var elem_i: usize = 0;
@@ -544,37 +560,37 @@ pub fn writeToPackedMemory(
                     .repeated_elem => |elem| elem,
                 });
                 const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
-                const field_bits: u16 = @intCast(field_ty.bitSize(pt));
+                const field_bits: u16 = @intCast(field_ty.bitSize(zcu));
                 try field_val.writeToPackedMemory(field_ty, pt, buffer, bit_offset + bits);
                 bits += field_bits;
             }
         },
         .Union => {
-            const union_obj = mod.typeToUnion(ty).?;
+            const union_obj = zcu.typeToUnion(ty).?;
             switch (union_obj.flagsUnordered(ip).layout) {
                 .auto, .@"extern" => unreachable, // Handled in non-packed writeToMemory
                 .@"packed" => {
-                    if (val.unionTag(mod)) |union_tag| {
-                        const field_index = mod.unionTagFieldIndex(union_obj, union_tag).?;
+                    if (val.unionTag(zcu)) |union_tag| {
+                        const field_index = zcu.unionTagFieldIndex(union_obj, union_tag).?;
                         const field_type = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
                         const field_val = try val.fieldValue(pt, field_index);
                         return field_val.writeToPackedMemory(field_type, pt, buffer, bit_offset);
                     } else {
                         const backing_ty = try ty.unionBackingType(pt);
-                        return val.unionValue(mod).writeToPackedMemory(backing_ty, pt, buffer, bit_offset);
+                        return val.unionValue(zcu).writeToPackedMemory(backing_ty, pt, buffer, bit_offset);
                     }
                 },
             }
         },
         .Pointer => {
-            assert(!ty.isSlice(mod)); // No well defined layout.
-            if (!val.ptrHasIntAddr(mod)) return error.ReinterpretDeclRef;
+            assert(!ty.isSlice(zcu)); // No well defined layout.
+            if (!val.ptrHasIntAddr(zcu)) return error.ReinterpretDeclRef;
             return val.writeToPackedMemory(Type.usize, pt, buffer, bit_offset);
         },
         .Optional => {
-            assert(ty.isPtrLikeOptional(mod));
-            const child = ty.optionalChild(mod);
-            const opt_val = val.optionalValue(mod);
+            assert(ty.isPtrLikeOptional(zcu));
+            const child = ty.optionalChild(zcu);
+            const opt_val = val.optionalValue(zcu);
             if (opt_val) |some| {
                 return some.writeToPackedMemory(child, pt, buffer, bit_offset);
             } else {
@@ -599,11 +615,11 @@ pub fn readFromMemory(
     Unimplemented,
     OutOfMemory,
 }!Value {
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    const target = zcu.getTarget();
     const endian = target.cpu.arch.endian();
-    switch (ty.zigTypeTag(mod)) {
+    switch (ty.zigTypeTag(zcu)) {
         .Void => return Value.void,
         .Bool => {
             if (buffer[0] == 0) {
@@ -615,24 +631,24 @@ pub fn readFromMemory(
         .Int, .Enum => |ty_tag| {
             const int_ty = switch (ty_tag) {
                 .Int => ty,
-                .Enum => ty.intTagType(mod),
+                .Enum => ty.intTagType(zcu),
                 else => unreachable,
             };
-            const int_info = int_ty.intInfo(mod);
+            const int_info = int_ty.intInfo(zcu);
             const bits = int_info.bits;
             const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8);
-            if (bits == 0 or buffer.len == 0) return mod.getCoerced(try mod.intValue(int_ty, 0), ty);
+            if (bits == 0 or buffer.len == 0) return zcu.getCoerced(try zcu.intValue(int_ty, 0), ty);
 
             if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64
                 .signed => {
                     const val = std.mem.readVarInt(i64, buffer[0..byte_count], endian);
                     const result = (val << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits));
-                    return mod.getCoerced(try mod.intValue(int_ty, result), ty);
+                    return zcu.getCoerced(try zcu.intValue(int_ty, result), ty);
                 },
                 .unsigned => {
                     const val = std.mem.readVarInt(u64, buffer[0..byte_count], endian);
                     const result = (val << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits));
-                    return mod.getCoerced(try mod.intValue(int_ty, result), ty);
+                    return zcu.getCoerced(try zcu.intValue(int_ty, result), ty);
                 },
             } else { // Slow path, we have to construct a big-int
                 const Limb = std.math.big.Limb;
@@ -641,7 +657,7 @@ pub fn readFromMemory(
 
                 var bigint = BigIntMutable.init(limbs_buffer, 0);
                 bigint.readTwosComplement(buffer[0..byte_count], bits, endian, int_info.signedness);
-                return mod.getCoerced(try mod.intValue_big(int_ty, bigint.toConst()), ty);
+                return zcu.getCoerced(try zcu.intValue_big(int_ty, bigint.toConst()), ty);
             }
         },
         .Float => return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -656,12 +672,12 @@ pub fn readFromMemory(
             },
         } })),
         .Array => {
-            const elem_ty = ty.childType(mod);
-            const elem_size = elem_ty.abiSize(pt);
-            const elems = try arena.alloc(InternPool.Index, @intCast(ty.arrayLen(mod)));
+            const elem_ty = ty.childType(zcu);
+            const elem_size = elem_ty.abiSize(zcu);
+            const elems = try arena.alloc(InternPool.Index, @intCast(ty.arrayLen(zcu)));
             var offset: usize = 0;
             for (elems) |*elem| {
-                elem.* = (try readFromMemory(elem_ty, mod, buffer[offset..], arena)).toIntern();
+                elem.* = (try readFromMemory(elem_ty, zcu, buffer[offset..], arena)).toIntern();
                 offset += @intCast(elem_size);
             }
             return Value.fromInterned(try pt.intern(.{ .aggregate = .{
@@ -672,11 +688,11 @@ pub fn readFromMemory(
         .Vector => {
             // We use byte_count instead of abi_size here, so that any padding bytes
             // follow the data bytes, on both big- and little-endian systems.
-            const byte_count = (@as(usize, @intCast(ty.bitSize(pt))) + 7) / 8;
-            return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
+            const byte_count = (@as(usize, @intCast(ty.bitSize(zcu))) + 7) / 8;
+            return readFromPackedMemory(ty, zcu, buffer[0..byte_count], 0, arena);
         },
         .Struct => {
-            const struct_type = mod.typeToStruct(ty).?;
+            const struct_type = zcu.typeToStruct(ty).?;
             switch (struct_type.layout) {
                 .auto => unreachable, // Sema is supposed to have emitted a compile error already
                 .@"extern" => {
@@ -684,9 +700,9 @@ pub fn readFromMemory(
                     const field_vals = try arena.alloc(InternPool.Index, field_types.len);
                     for (field_vals, 0..) |*field_val, i| {
                         const field_ty = Type.fromInterned(field_types.get(ip)[i]);
-                        const off: usize = @intCast(ty.structFieldOffset(i, mod));
-                        const sz: usize = @intCast(field_ty.abiSize(pt));
-                        field_val.* = (try readFromMemory(field_ty, mod, buffer[off..(off + sz)], arena)).toIntern();
+                        const off: usize = @intCast(ty.structFieldOffset(i, zcu));
+                        const sz: usize = @intCast(field_ty.abiSize(zcu));
+                        field_val.* = (try readFromMemory(field_ty, zcu, buffer[off..(off + sz)], arena)).toIntern();
                     }
                     return Value.fromInterned(try pt.intern(.{ .aggregate = .{
                         .ty = ty.toIntern(),
@@ -694,29 +710,29 @@ pub fn readFromMemory(
                     } }));
                 },
                 .@"packed" => {
-                    const byte_count = (@as(usize, @intCast(ty.bitSize(pt))) + 7) / 8;
-                    return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
+                    const byte_count = (@as(usize, @intCast(ty.bitSize(zcu))) + 7) / 8;
+                    return readFromPackedMemory(ty, zcu, buffer[0..byte_count], 0, arena);
                 },
             }
         },
         .ErrorSet => {
-            const bits = mod.errorSetBits();
+            const bits = zcu.errorSetBits();
             const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8);
             const int = std.mem.readVarInt(u64, buffer[0..byte_count], endian);
             const index = (int << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits));
-            const name = mod.global_error_set.keys()[@intCast(index)];
+            const name = zcu.global_error_set.keys()[@intCast(index)];
 
             return Value.fromInterned(try pt.intern(.{ .err = .{
                 .ty = ty.toIntern(),
                 .name = name,
             } }));
         },
-        .Union => switch (ty.containerLayout(mod)) {
+        .Union => switch (ty.containerLayout(zcu)) {
             .auto => return error.IllDefinedMemoryLayout,
             .@"extern" => {
-                const union_size = ty.abiSize(pt);
-                const array_ty = try mod.arrayType(.{ .len = union_size, .child = .u8_type });
-                const val = (try readFromMemory(array_ty, mod, buffer, arena)).toIntern();
+                const union_size = ty.abiSize(zcu);
+                const array_ty = try zcu.arrayType(.{ .len = union_size, .child = .u8_type });
+                const val = (try readFromMemory(array_ty, zcu, buffer, arena)).toIntern();
                 return Value.fromInterned(try pt.intern(.{ .un = .{
                     .ty = ty.toIntern(),
                     .tag = .none,
@@ -724,23 +740,23 @@ pub fn readFromMemory(
                 } }));
             },
             .@"packed" => {
-                const byte_count = (@as(usize, @intCast(ty.bitSize(pt))) + 7) / 8;
-                return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
+                const byte_count = (@as(usize, @intCast(ty.bitSize(zcu))) + 7) / 8;
+                return readFromPackedMemory(ty, zcu, buffer[0..byte_count], 0, arena);
             },
         },
         .Pointer => {
-            assert(!ty.isSlice(mod)); // No well defined layout.
-            const int_val = try readFromMemory(Type.usize, mod, buffer, arena);
+            assert(!ty.isSlice(zcu)); // No well defined layout.
+            const int_val = try readFromMemory(Type.usize, zcu, buffer, arena);
             return Value.fromInterned(try pt.intern(.{ .ptr = .{
                 .ty = ty.toIntern(),
                 .base_addr = .int,
-                .byte_offset = int_val.toUnsignedInt(pt),
+                .byte_offset = int_val.toUnsignedInt(zcu),
             } }));
         },
         .Optional => {
-            assert(ty.isPtrLikeOptional(mod));
-            const child_ty = ty.optionalChild(mod);
-            const child_val = try readFromMemory(child_ty, mod, buffer, arena);
+            assert(ty.isPtrLikeOptional(zcu));
+            const child_ty = ty.optionalChild(zcu);
+            const child_val = try readFromMemory(child_ty, zcu, buffer, arena);
             return Value.fromInterned(try pt.intern(.{ .opt = .{
                 .ty = ty.toIntern(),
                 .val = switch (child_val.orderAgainstZero(pt)) {
@@ -768,11 +784,11 @@ pub fn readFromPackedMemory(
     IllDefinedMemoryLayout,
     OutOfMemory,
 }!Value {
-    const mod = pt.zcu;
-    const ip = &mod.intern_pool;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const ip = &zcu.intern_pool;
+    const target = zcu.getTarget();
     const endian = target.cpu.arch.endian();
-    switch (ty.zigTypeTag(mod)) {
+    switch (ty.zigTypeTag(zcu)) {
         .Void => return Value.void,
         .Bool => {
             const byte = switch (endian) {
@@ -787,7 +803,7 @@ pub fn readFromPackedMemory(
         },
         .Int => {
             if (buffer.len == 0) return pt.intValue(ty, 0);
-            const int_info = ty.intInfo(mod);
+            const int_info = ty.intInfo(zcu);
             const bits = int_info.bits;
             if (bits == 0) return pt.intValue(ty, 0);
 
@@ -800,7 +816,7 @@ pub fn readFromPackedMemory(
             };
 
             // Slow path, we have to construct a big-int
-            const abi_size: usize = @intCast(ty.abiSize(pt));
+            const abi_size: usize = @intCast(ty.abiSize(zcu));
             const Limb = std.math.big.Limb;
             const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb);
             const limbs_buffer = try arena.alloc(Limb, limb_count);
@@ -810,7 +826,7 @@ pub fn readFromPackedMemory(
             return pt.intValue_big(ty, bigint.toConst());
         },
         .Enum => {
-            const int_ty = ty.intTagType(mod);
+            const int_ty = ty.intTagType(zcu);
             const int_val = try Value.readFromPackedMemory(int_ty, pt, buffer, bit_offset, arena);
             return pt.getCoerced(int_val, ty);
         },
@@ -826,11 +842,11 @@ pub fn readFromPackedMemory(
             },
         } })),
         .Vector => {
-            const elem_ty = ty.childType(mod);
-            const elems = try arena.alloc(InternPool.Index, @intCast(ty.arrayLen(mod)));
+            const elem_ty = ty.childType(zcu);
+            const elems = try arena.alloc(InternPool.Index, @intCast(ty.arrayLen(zcu)));
 
             var bits: u16 = 0;
-            const elem_bit_size: u16 = @intCast(elem_ty.bitSize(pt));
+            const elem_bit_size: u16 = @intCast(elem_ty.bitSize(zcu));
             for (elems, 0..) |_, i| {
                 // On big-endian systems, LLVM reverses the element order of vectors by default
                 const tgt_elem_i = if (endian == .big) elems.len - i - 1 else i;
@@ -845,12 +861,12 @@ pub fn readFromPackedMemory(
         .Struct => {
             // Sema is supposed to have emitted a compile error already for Auto layout structs,
             // and Extern is handled by non-packed readFromMemory.
-            const struct_type = mod.typeToPackedStruct(ty).?;
+            const struct_type = zcu.typeToPackedStruct(ty).?;
             var bits: u16 = 0;
             const field_vals = try arena.alloc(InternPool.Index, struct_type.field_types.len);
             for (field_vals, 0..) |*field_val, i| {
                 const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
-                const field_bits: u16 = @intCast(field_ty.bitSize(pt));
+                const field_bits: u16 = @intCast(field_ty.bitSize(zcu));
                 field_val.* = (try readFromPackedMemory(field_ty, pt, buffer, bit_offset + bits, arena)).toIntern();
                 bits += field_bits;
             }
@@ -859,7 +875,7 @@ pub fn readFromPackedMemory(
                 .storage = .{ .elems = field_vals },
             } }));
         },
-        .Union => switch (ty.containerLayout(mod)) {
+        .Union => switch (ty.containerLayout(zcu)) {
             .auto, .@"extern" => unreachable, // Handled by non-packed readFromMemory
             .@"packed" => {
                 const backing_ty = try ty.unionBackingType(pt);
@@ -872,21 +888,21 @@ pub fn readFromPackedMemory(
             },
         },
         .Pointer => {
-            assert(!ty.isSlice(mod)); // No well defined layout.
+            assert(!ty.isSlice(zcu)); // No well defined layout.
             const int_val = try readFromPackedMemory(Type.usize, pt, buffer, bit_offset, arena);
             return Value.fromInterned(try pt.intern(.{ .ptr = .{
                 .ty = ty.toIntern(),
                 .base_addr = .int,
-                .byte_offset = int_val.toUnsignedInt(pt),
+                .byte_offset = int_val.toUnsignedInt(zcu),
             } }));
         },
         .Optional => {
-            assert(ty.isPtrLikeOptional(mod));
-            const child_ty = ty.optionalChild(mod);
+            assert(ty.isPtrLikeOptional(zcu));
+            const child_ty = ty.optionalChild(zcu);
             const child_val = try readFromPackedMemory(child_ty, pt, buffer, bit_offset, arena);
             return Value.fromInterned(try pt.intern(.{ .opt = .{
                 .ty = ty.toIntern(),
-                .val = switch (child_val.orderAgainstZero(pt)) {
+                .val = switch (child_val.orderAgainstZero(zcu)) {
                     .lt => unreachable,
                     .eq => .none,
                     .gt => child_val.toIntern(),
@@ -898,8 +914,8 @@ pub fn readFromPackedMemory(
 }
 
 /// Asserts that the value is a float or an integer.
-pub fn toFloat(val: Value, comptime T: type, pt: Zcu.PerThread) T {
-    return switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) {
+pub fn toFloat(val: Value, comptime T: type, zcu: *Zcu) T {
+    return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
         .int => |int| switch (int.storage) {
             .big_int => |big_int| @floatCast(bigIntToFloat(big_int.limbs, big_int.positive)),
             inline .u64, .i64 => |x| {
@@ -908,8 +924,8 @@ pub fn toFloat(val: Value, comptime T: type, pt: Zcu.PerThread) T {
                 }
                 return @floatFromInt(x);
             },
-            .lazy_align => |ty| @floatFromInt(Type.fromInterned(ty).abiAlignment(pt).toByteUnits() orelse 0),
-            .lazy_size => |ty| @floatFromInt(Type.fromInterned(ty).abiSize(pt)),
+            .lazy_align => |ty| @floatFromInt(Type.fromInterned(ty).abiAlignment(zcu).toByteUnits() orelse 0),
+            .lazy_size => |ty| @floatFromInt(Type.fromInterned(ty).abiSize(zcu)),
         },
         .float => |float| switch (float.storage) {
             inline else => |x| @floatCast(x),
@@ -937,30 +953,30 @@ fn bigIntToFloat(limbs: []const std.math.big.Limb, positive: bool) f128 {
     }
 }
 
-pub fn clz(val: Value, ty: Type, pt: Zcu.PerThread) u64 {
+pub fn clz(val: Value, ty: Type, zcu: *Zcu) u64 {
     var bigint_buf: BigIntSpace = undefined;
-    const bigint = val.toBigInt(&bigint_buf, pt);
-    return bigint.clz(ty.intInfo(pt.zcu).bits);
+    const bigint = val.toBigInt(&bigint_buf, zcu);
+    return bigint.clz(ty.intInfo(zcu).bits);
 }
 
-pub fn ctz(val: Value, ty: Type, pt: Zcu.PerThread) u64 {
+pub fn ctz(val: Value, ty: Type, zcu: *Zcu) u64 {
     var bigint_buf: BigIntSpace = undefined;
-    const bigint = val.toBigInt(&bigint_buf, pt);
-    return bigint.ctz(ty.intInfo(pt.zcu).bits);
+    const bigint = val.toBigInt(&bigint_buf, zcu);
+    return bigint.ctz(ty.intInfo(zcu).bits);
 }
 
-pub fn popCount(val: Value, ty: Type, pt: Zcu.PerThread) u64 {
+pub fn popCount(val: Value, ty: Type, zcu: *Zcu) u64 {
     var bigint_buf: BigIntSpace = undefined;
-    const bigint = val.toBigInt(&bigint_buf, pt);
-    return @intCast(bigint.popCount(ty.intInfo(pt.zcu).bits));
+    const bigint = val.toBigInt(&bigint_buf, zcu);
+    return @intCast(bigint.popCount(ty.intInfo(zcu).bits));
 }
 
 pub fn bitReverse(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) !Value {
-    const mod = pt.zcu;
-    const info = ty.intInfo(mod);
+    const zcu = pt.zcu;
+    const info = ty.intInfo(zcu);
 
     var buffer: Value.BigIntSpace = undefined;
-    const operand_bigint = val.toBigInt(&buffer, pt);
+    const operand_bigint = val.toBigInt(&buffer, zcu);
 
     const limbs = try arena.alloc(
         std.math.big.Limb,
@@ -973,14 +989,14 @@ pub fn bitReverse(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) !Va
 }
 
 pub fn byteSwap(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) !Value {
-    const mod = pt.zcu;
-    const info = ty.intInfo(mod);
+    const zcu = pt.zcu;
+    const info = ty.intInfo(zcu);
 
     // Bit count must be evenly divisible by 8
     assert(info.bits % 8 == 0);
 
     var buffer: Value.BigIntSpace = undefined;
-    const operand_bigint = val.toBigInt(&buffer, pt);
+    const operand_bigint = val.toBigInt(&buffer, zcu);
 
     const limbs = try arena.alloc(
         std.math.big.Limb,
@@ -994,33 +1010,34 @@ pub fn byteSwap(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) !Valu
 
 /// Asserts the value is an integer and not undefined.
 /// Returns the number of bits the value requires to represent stored in twos complement form.
-pub fn intBitCountTwosComp(self: Value, pt: Zcu.PerThread) usize {
+pub fn intBitCountTwosComp(self: Value, zcu: *Zcu) usize {
     var buffer: BigIntSpace = undefined;
-    const big_int = self.toBigInt(&buffer, pt);
+    const big_int = self.toBigInt(&buffer, zcu);
     return big_int.bitCountTwosComp();
 }
 
 /// Converts an integer or a float to a float. May result in a loss of information.
 /// Caller can find out by equality checking the result against the operand.
 pub fn floatCast(val: Value, dest_ty: Type, pt: Zcu.PerThread) !Value {
-    const target = pt.zcu.getTarget();
-    if (val.isUndef(pt.zcu)) return pt.undefValue(dest_ty);
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
+    if (val.isUndef(zcu)) return pt.undefValue(dest_ty);
     return Value.fromInterned(try pt.intern(.{ .float = .{
         .ty = dest_ty.toIntern(),
         .storage = switch (dest_ty.floatBits(target)) {
-            16 => .{ .f16 = val.toFloat(f16, pt) },
-            32 => .{ .f32 = val.toFloat(f32, pt) },
-            64 => .{ .f64 = val.toFloat(f64, pt) },
-            80 => .{ .f80 = val.toFloat(f80, pt) },
-            128 => .{ .f128 = val.toFloat(f128, pt) },
+            16 => .{ .f16 = val.toFloat(f16, zcu) },
+            32 => .{ .f32 = val.toFloat(f32, zcu) },
+            64 => .{ .f64 = val.toFloat(f64, zcu) },
+            80 => .{ .f80 = val.toFloat(f80, zcu) },
+            128 => .{ .f128 = val.toFloat(f128, zcu) },
             else => unreachable,
         },
     } }));
 }
 
 /// Asserts the value is a float
-pub fn floatHasFraction(self: Value, mod: *const Module) bool {
-    return switch (mod.intern_pool.indexToKey(self.toIntern())) {
+pub fn floatHasFraction(self: Value, zcu: *const Module) bool {
+    return switch (zcu.intern_pool.indexToKey(self.toIntern())) {
         .float => |float| switch (float.storage) {
             inline else => |x| @rem(x, 1) != 0,
         },
@@ -1028,19 +1045,24 @@ pub fn floatHasFraction(self: Value, mod: *const Module) bool {
     };
 }
 
-pub fn orderAgainstZero(lhs: Value, pt: Zcu.PerThread) std.math.Order {
-    return orderAgainstZeroAdvanced(lhs, pt, .normal) catch unreachable;
+pub fn orderAgainstZero(lhs: Value, zcu: *Zcu) std.math.Order {
+    return orderAgainstZeroInner(lhs, .normal, zcu, {}) catch unreachable;
 }
 
-pub fn orderAgainstZeroAdvanced(
+pub fn orderAgainstZeroSema(lhs: Value, pt: Zcu.PerThread) !std.math.Order {
+    return try orderAgainstZeroInner(lhs, .sema, pt.zcu, pt.tid);
+}
+
+pub fn orderAgainstZeroInner(
     lhs: Value,
-    pt: Zcu.PerThread,
     comptime strat: ResolveStrat,
+    zcu: *Zcu,
+    tid: strat.Tid(),
 ) Module.CompileError!std.math.Order {
     return switch (lhs.toIntern()) {
         .bool_false => .eq,
         .bool_true => .gt,
-        else => switch (pt.zcu.intern_pool.indexToKey(lhs.toIntern())) {
+        else => switch (zcu.intern_pool.indexToKey(lhs.toIntern())) {
             .ptr => |ptr| if (ptr.byte_offset > 0) .gt else switch (ptr.base_addr) {
                 .nav, .comptime_alloc, .comptime_field => .gt,
                 .int => .eq,
@@ -1050,16 +1072,17 @@ pub fn orderAgainstZeroAdvanced(
                 .big_int => |big_int| big_int.orderAgainstScalar(0),
                 inline .u64, .i64 => |x| std.math.order(x, 0),
                 .lazy_align => .gt, // alignment is never 0
-                .lazy_size => |ty| return if (Type.fromInterned(ty).hasRuntimeBitsAdvanced(
-                    pt,
+                .lazy_size => |ty| return if (Type.fromInterned(ty).hasRuntimeBitsInner(
                     false,
                     strat.toLazy(),
+                    zcu,
+                    tid,
                 ) catch |err| switch (err) {
                     error.NeedLazy => unreachable,
                     else => |e| return e,
                 }) .gt else .eq,
             },
-            .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).orderAgainstZeroAdvanced(pt, strat),
+            .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).orderAgainstZeroInner(strat, zcu, tid),
             .float => |float| switch (float.storage) {
                 inline else => |x| std.math.order(x, 0),
             },
@@ -1069,14 +1092,20 @@ pub fn orderAgainstZeroAdvanced(
 }
 
 /// Asserts the value is comparable.
-pub fn order(lhs: Value, rhs: Value, pt: Zcu.PerThread) std.math.Order {
-    return orderAdvanced(lhs, rhs, pt, .normal) catch unreachable;
+pub fn order(lhs: Value, rhs: Value, zcu: *Zcu) std.math.Order {
+    return orderAdvanced(lhs, rhs, .normal, zcu, {}) catch unreachable;
 }
 
 /// Asserts the value is comparable.
-pub fn orderAdvanced(lhs: Value, rhs: Value, pt: Zcu.PerThread, comptime strat: ResolveStrat) !std.math.Order {
-    const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(pt, strat);
-    const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(pt, strat);
+pub fn orderAdvanced(
+    lhs: Value,
+    rhs: Value,
+    comptime strat: ResolveStrat,
+    zcu: *Zcu,
+    tid: strat.Tid(),
+) !std.math.Order {
+    const lhs_against_zero = try lhs.orderAgainstZeroInner(strat, zcu, tid);
+    const rhs_against_zero = try rhs.orderAgainstZeroInner(strat, zcu, tid);
     switch (lhs_against_zero) {
         .lt => if (rhs_against_zero != .lt) return .lt,
         .eq => return rhs_against_zero.invert(),
@@ -1088,34 +1117,39 @@ pub fn orderAdvanced(lhs: Value, rhs: Value, pt: Zcu.PerThread, comptime strat:
         .gt => {},
     }
 
-    if (lhs.isFloat(pt.zcu) or rhs.isFloat(pt.zcu)) {
-        const lhs_f128 = lhs.toFloat(f128, pt);
-        const rhs_f128 = rhs.toFloat(f128, pt);
+    if (lhs.isFloat(zcu) or rhs.isFloat(zcu)) {
+        const lhs_f128 = lhs.toFloat(f128, zcu);
+        const rhs_f128 = rhs.toFloat(f128, zcu);
         return std.math.order(lhs_f128, rhs_f128);
     }
 
     var lhs_bigint_space: BigIntSpace = undefined;
     var rhs_bigint_space: BigIntSpace = undefined;
-    const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, pt, strat);
-    const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, pt, strat);
+    const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, strat, zcu, tid);
+    const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, strat, zcu, tid);
     return lhs_bigint.order(rhs_bigint);
 }
 
 /// Asserts the value is comparable. Does not take a type parameter because it supports
 /// comparisons between heterogeneous types.
-pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, pt: Zcu.PerThread) bool {
-    return compareHeteroAdvanced(lhs, op, rhs, pt, .normal) catch unreachable;
+pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, zcu: *Zcu) bool {
+    return compareHeteroAdvanced(lhs, op, rhs, .normal, zcu, {}) catch unreachable;
+}
+
+pub fn compareHeteroSema(lhs: Value, op: std.math.CompareOperator, rhs: Value, pt: Zcu.PerThread) !bool {
+    return try compareHeteroAdvanced(lhs, op, rhs, .sema, pt.zcu, pt.tid);
 }
 
 pub fn compareHeteroAdvanced(
     lhs: Value,
     op: std.math.CompareOperator,
     rhs: Value,
-    pt: Zcu.PerThread,
     comptime strat: ResolveStrat,
+    zcu: *Zcu,
+    tid: strat.Tid(),
 ) !bool {
-    if (lhs.pointerNav(pt.zcu)) |lhs_nav| {
-        if (rhs.pointerNav(pt.zcu)) |rhs_nav| {
+    if (lhs.pointerNav(zcu)) |lhs_nav| {
+        if (rhs.pointerNav(zcu)) |rhs_nav| {
             switch (op) {
                 .eq => return lhs_nav == rhs_nav,
                 .neq => return lhs_nav != rhs_nav,
@@ -1128,32 +1162,32 @@ pub fn compareHeteroAdvanced(
                 else => {},
             }
         }
-    } else if (rhs.pointerNav(pt.zcu)) |_| {
+    } else if (rhs.pointerNav(zcu)) |_| {
         switch (op) {
             .eq => return false,
             .neq => return true,
             else => {},
         }
     }
-    return (try orderAdvanced(lhs, rhs, pt, strat)).compare(op);
+    return (try orderAdvanced(lhs, rhs, strat, zcu, tid)).compare(op);
 }
 
 /// Asserts the values are comparable. Both operands have type `ty`.
 /// For vectors, returns true if comparison is true for ALL elements.
 pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, pt: Zcu.PerThread) !bool {
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) == .Vector) {
-        const scalar_ty = ty.scalarType(mod);
-        for (0..ty.vectorLen(mod)) |i| {
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Vector) {
+        const scalar_ty = ty.scalarType(zcu);
+        for (0..ty.vectorLen(zcu)) |i| {
             const lhs_elem = try lhs.elemValue(pt, i);
             const rhs_elem = try rhs.elemValue(pt, i);
-            if (!compareScalar(lhs_elem, op, rhs_elem, scalar_ty, pt)) {
+            if (!compareScalar(lhs_elem, op, rhs_elem, scalar_ty, zcu)) {
                 return false;
             }
         }
         return true;
     }
-    return compareScalar(lhs, op, rhs, ty, pt);
+    return compareScalar(lhs, op, rhs, ty, zcu);
 }
 
 /// Asserts the values are comparable. Both operands have type `ty`.
@@ -1162,12 +1196,12 @@ pub fn compareScalar(
     op: std.math.CompareOperator,
     rhs: Value,
     ty: Type,
-    pt: Zcu.PerThread,
+    zcu: *Zcu,
 ) bool {
     return switch (op) {
-        .eq => lhs.eql(rhs, ty, pt.zcu),
-        .neq => !lhs.eql(rhs, ty, pt.zcu),
-        else => compareHetero(lhs, op, rhs, pt),
+        .eq => lhs.eql(rhs, ty, zcu),
+        .neq => !lhs.eql(rhs, ty, zcu),
+        else => compareHetero(lhs, op, rhs, zcu),
     };
 }
 
@@ -1176,8 +1210,8 @@ pub fn compareScalar(
 /// Returns `false` if the value or any vector element is undefined.
 ///
 /// Note that `!compareAllWithZero(.eq, ...) != compareAllWithZero(.neq, ...)`
-pub fn compareAllWithZero(lhs: Value, op: std.math.CompareOperator, pt: Zcu.PerThread) bool {
-    return compareAllWithZeroAdvancedExtra(lhs, op, pt, .normal) catch unreachable;
+pub fn compareAllWithZero(lhs: Value, op: std.math.CompareOperator, zcu: *Zcu) bool {
+    return compareAllWithZeroAdvancedExtra(lhs, op, .normal, zcu, {}) catch unreachable;
 }
 
 pub fn compareAllWithZeroSema(
@@ -1185,47 +1219,47 @@ pub fn compareAllWithZeroSema(
     op: std.math.CompareOperator,
     pt: Zcu.PerThread,
 ) Module.CompileError!bool {
-    return compareAllWithZeroAdvancedExtra(lhs, op, pt, .sema);
+    return compareAllWithZeroAdvancedExtra(lhs, op, .sema, pt.zcu, pt.tid);
 }
 
 pub fn compareAllWithZeroAdvancedExtra(
     lhs: Value,
     op: std.math.CompareOperator,
-    pt: Zcu.PerThread,
     comptime strat: ResolveStrat,
+    zcu: *Zcu,
+    tid: strat.Tid(),
 ) Module.CompileError!bool {
-    const mod = pt.zcu;
-    if (lhs.isInf(mod)) {
+    if (lhs.isInf(zcu)) {
         switch (op) {
             .neq => return true,
             .eq => return false,
-            .gt, .gte => return !lhs.isNegativeInf(mod),
-            .lt, .lte => return lhs.isNegativeInf(mod),
+            .gt, .gte => return !lhs.isNegativeInf(zcu),
+            .lt, .lte => return lhs.isNegativeInf(zcu),
         }
     }
 
-    switch (mod.intern_pool.indexToKey(lhs.toIntern())) {
+    switch (zcu.intern_pool.indexToKey(lhs.toIntern())) {
         .float => |float| switch (float.storage) {
             inline else => |x| if (std.math.isNan(x)) return op == .neq,
         },
         .aggregate => |aggregate| return switch (aggregate.storage) {
-            .bytes => |bytes| for (bytes.toSlice(lhs.typeOf(mod).arrayLenIncludingSentinel(mod), &mod.intern_pool)) |byte| {
+            .bytes => |bytes| for (bytes.toSlice(lhs.typeOf(zcu).arrayLenIncludingSentinel(zcu), &zcu.intern_pool)) |byte| {
                 if (!std.math.order(byte, 0).compare(op)) break false;
             } else true,
             .elems => |elems| for (elems) |elem| {
-                if (!try Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, pt, strat)) break false;
+                if (!try Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, strat, zcu, tid)) break false;
             } else true,
-            .repeated_elem => |elem| Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, pt, strat),
+            .repeated_elem => |elem| Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, strat, zcu, tid),
         },
         .undef => return false,
         else => {},
     }
-    return (try orderAgainstZeroAdvanced(lhs, pt, strat)).compare(op);
+    return (try orderAgainstZeroInner(lhs, strat, zcu, tid)).compare(op);
 }
 
-pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool {
-    assert(mod.intern_pool.typeOf(a.toIntern()) == ty.toIntern());
-    assert(mod.intern_pool.typeOf(b.toIntern()) == ty.toIntern());
+pub fn eql(a: Value, b: Value, ty: Type, zcu: *Module) bool {
+    assert(zcu.intern_pool.typeOf(a.toIntern()) == ty.toIntern());
+    assert(zcu.intern_pool.typeOf(b.toIntern()) == ty.toIntern());
     return a.toIntern() == b.toIntern();
 }
 
@@ -1260,8 +1294,8 @@ pub fn canMutateComptimeVarState(val: Value, zcu: *Zcu) bool {
 /// Gets the `Nav` referenced by this pointer.  If the pointer does not point
 /// to a `Nav`, or if it points to some part of one (like a field or element),
 /// returns null.
-pub fn pointerNav(val: Value, mod: *Module) ?InternPool.Nav.Index {
-    return switch (mod.intern_pool.indexToKey(val.toIntern())) {
+pub fn pointerNav(val: Value, zcu: *Zcu) ?InternPool.Nav.Index {
+    return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
         // TODO: these 3 cases are weird; these aren't pointer values!
         .variable => |v| v.owner_nav,
         .@"extern" => |e| e.owner_nav,
@@ -1277,8 +1311,8 @@ pub fn pointerNav(val: Value, mod: *Module) ?InternPool.Nav.Index {
 pub const slice_ptr_index = 0;
 pub const slice_len_index = 1;
 
-pub fn slicePtr(val: Value, mod: *Module) Value {
-    return Value.fromInterned(mod.intern_pool.slicePtr(val.toIntern()));
+pub fn slicePtr(val: Value, zcu: *Module) Value {
+    return Value.fromInterned(zcu.intern_pool.slicePtr(val.toIntern()));
 }
 
 /// Gets the `len` field of a slice value as a `u64`.
@@ -1312,15 +1346,15 @@ pub fn elemValue(val: Value, pt: Zcu.PerThread, index: usize) Allocator.Error!Va
     }
 }
 
-pub fn isLazyAlign(val: Value, mod: *Module) bool {
-    return switch (mod.intern_pool.indexToKey(val.toIntern())) {
+pub fn isLazyAlign(val: Value, zcu: *Module) bool {
+    return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
         .int => |int| int.storage == .lazy_align,
         else => false,
     };
 }
 
-pub fn isLazySize(val: Value, mod: *Module) bool {
-    return switch (mod.intern_pool.indexToKey(val.toIntern())) {
+pub fn isLazySize(val: Value, zcu: *Module) bool {
+    return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
         .int => |int| int.storage == .lazy_size,
         else => false,
     };
@@ -1377,15 +1411,15 @@ pub fn sliceArray(
 }
 
 pub fn fieldValue(val: Value, pt: Zcu.PerThread, index: usize) !Value {
-    const mod = pt.zcu;
-    return switch (mod.intern_pool.indexToKey(val.toIntern())) {
+    const zcu = pt.zcu;
+    return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
         .undef => |ty| Value.fromInterned(try pt.intern(.{
-            .undef = Type.fromInterned(ty).structFieldType(index, mod).toIntern(),
+            .undef = Type.fromInterned(ty).structFieldType(index, zcu).toIntern(),
         })),
         .aggregate => |aggregate| Value.fromInterned(switch (aggregate.storage) {
             .bytes => |bytes| try pt.intern(.{ .int = .{
                 .ty = .u8_type,
-                .storage = .{ .u64 = bytes.at(index, &mod.intern_pool) },
+                .storage = .{ .u64 = bytes.at(index, &zcu.intern_pool) },
             } }),
             .elems => |elems| elems[index],
             .repeated_elem => |elem| elem,
@@ -1396,40 +1430,40 @@ pub fn fieldValue(val: Value, pt: Zcu.PerThread, index: usize) !Value {
     };
 }
 
-pub fn unionTag(val: Value, mod: *Module) ?Value {
-    return switch (mod.intern_pool.indexToKey(val.toIntern())) {
+pub fn unionTag(val: Value, zcu: *Module) ?Value {
+    return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
         .undef, .enum_tag => val,
         .un => |un| if (un.tag != .none) Value.fromInterned(un.tag) else return null,
         else => unreachable,
     };
 }
 
-pub fn unionValue(val: Value, mod: *Module) Value {
-    return switch (mod.intern_pool.indexToKey(val.toIntern())) {
+pub fn unionValue(val: Value, zcu: *Module) Value {
+    return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
         .un => |un| Value.fromInterned(un.val),
         else => unreachable,
     };
 }
 
-pub fn isUndef(val: Value, mod: *Module) bool {
-    return mod.intern_pool.isUndef(val.toIntern());
+pub fn isUndef(val: Value, zcu: *Module) bool {
+    return zcu.intern_pool.isUndef(val.toIntern());
 }
 
 /// TODO: check for cases such as array that is not marked undef but all the element
 /// values are marked undef, or struct that is not marked undef but all fields are marked
 /// undef, etc.
-pub fn isUndefDeep(val: Value, mod: *Module) bool {
-    return val.isUndef(mod);
+pub fn isUndefDeep(val: Value, zcu: *Module) bool {
+    return val.isUndef(zcu);
 }
 
 /// Asserts the value is not undefined and not unreachable.
 /// C pointers with an integer value of 0 are also considered null.
-pub fn isNull(val: Value, mod: *Module) bool {
+pub fn isNull(val: Value, zcu: *Module) bool {
     return switch (val.toIntern()) {
         .undef => unreachable,
         .unreachable_value => unreachable,
         .null_value => true,
-        else => return switch (mod.intern_pool.indexToKey(val.toIntern())) {
+        else => return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
             .undef => unreachable,
             .ptr => |ptr| switch (ptr.base_addr) {
                 .int => ptr.byte_offset == 0,
@@ -1442,8 +1476,8 @@ pub fn isNull(val: Value, mod: *Module) bool {
 }
 
 /// Valid only for error (union) types. Asserts the value is not undefined and not unreachable.
-pub fn getErrorName(val: Value, mod: *const Module) InternPool.OptionalNullTerminatedString {
-    return switch (mod.intern_pool.indexToKey(val.toIntern())) {
+pub fn getErrorName(val: Value, zcu: *const Module) InternPool.OptionalNullTerminatedString {
+    return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
         .err => |err| err.name.toOptional(),
         .error_union => |error_union| switch (error_union.val) {
             .err_name => |err_name| err_name.toOptional(),
@@ -1462,13 +1496,13 @@ pub fn getErrorInt(val: Value, zcu: *Zcu) Module.ErrorInt {
 
 /// Assumes the type is an error union. Returns true if and only if the value is
 /// the error union payload, not an error.
-pub fn errorUnionIsPayload(val: Value, mod: *const Module) bool {
-    return mod.intern_pool.indexToKey(val.toIntern()).error_union.val == .payload;
+pub fn errorUnionIsPayload(val: Value, zcu: *const Module) bool {
+    return zcu.intern_pool.indexToKey(val.toIntern()).error_union.val == .payload;
 }
 
 /// Value of the optional, null if optional has no payload.
-pub fn optionalValue(val: Value, mod: *const Module) ?Value {
-    return switch (mod.intern_pool.indexToKey(val.toIntern())) {
+pub fn optionalValue(val: Value, zcu: *const Module) ?Value {
+    return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
         .opt => |opt| switch (opt.val) {
             .none => null,
             else => |payload| Value.fromInterned(payload),
@@ -1479,10 +1513,10 @@ pub fn optionalValue(val: Value, mod: *const Module) ?Value {
 }
 
 /// Valid for all types. Asserts the value is not undefined.
-pub fn isFloat(self: Value, mod: *const Module) bool {
+pub fn isFloat(self: Value, zcu: *const Module) bool {
     return switch (self.toIntern()) {
         .undef => unreachable,
-        else => switch (mod.intern_pool.indexToKey(self.toIntern())) {
+        else => switch (zcu.intern_pool.indexToKey(self.toIntern())) {
             .undef => unreachable,
             .float => true,
             else => false,
@@ -1490,8 +1524,8 @@ pub fn isFloat(self: Value, mod: *const Module) bool {
     };
 }
 
-pub fn floatFromInt(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module) !Value {
-    return floatFromIntAdvanced(val, arena, int_ty, float_ty, mod, .normal) catch |err| switch (err) {
+pub fn floatFromInt(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, zcu: *Module) !Value {
+    return floatFromIntAdvanced(val, arena, int_ty, float_ty, zcu, .normal) catch |err| switch (err) {
         error.OutOfMemory => return error.OutOfMemory,
         else => unreachable,
     };
@@ -1505,10 +1539,10 @@ pub fn floatFromIntAdvanced(
     pt: Zcu.PerThread,
     comptime strat: ResolveStrat,
 ) !Value {
-    const mod = pt.zcu;
-    if (int_ty.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, int_ty.vectorLen(mod));
-        const scalar_ty = float_ty.scalarType(mod);
+    const zcu = pt.zcu;
+    if (int_ty.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, int_ty.vectorLen(zcu));
+        const scalar_ty = float_ty.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const elem_val = try val.elemValue(pt, i);
             scalar.* = (try floatFromIntScalar(elem_val, scalar_ty, pt, strat)).toIntern();
@@ -1522,8 +1556,8 @@ pub fn floatFromIntAdvanced(
 }
 
 pub fn floatFromIntScalar(val: Value, float_ty: Type, pt: Zcu.PerThread, comptime strat: ResolveStrat) !Value {
-    const mod = pt.zcu;
-    return switch (mod.intern_pool.indexToKey(val.toIntern())) {
+    const zcu = pt.zcu;
+    return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
         .undef => try pt.undefValue(float_ty),
         .int => |int| switch (int.storage) {
             .big_int => |big_int| {
@@ -1531,8 +1565,8 @@ pub fn floatFromIntScalar(val: Value, float_ty: Type, pt: Zcu.PerThread, comptim
                 return pt.floatValue(float_ty, float);
             },
             inline .u64, .i64 => |x| floatFromIntInner(x, float_ty, pt),
-            .lazy_align => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(pt, strat.toLazy())).scalar.toByteUnits() orelse 0, float_ty, pt),
-            .lazy_size => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiSizeAdvanced(pt, strat.toLazy())).scalar, float_ty, pt),
+            .lazy_align => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentInner(strat.toLazy(), pt.zcu, pt.tid)).scalar.toByteUnits() orelse 0, float_ty, pt),
+            .lazy_size => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiSizeInner(strat.toLazy(), pt.zcu, pt.tid)).scalar, float_ty, pt),
         },
         else => unreachable,
     };
@@ -1600,15 +1634,16 @@ pub fn intAddSatScalar(
     arena: Allocator,
     pt: Zcu.PerThread,
 ) !Value {
-    assert(!lhs.isUndef(pt.zcu));
-    assert(!rhs.isUndef(pt.zcu));
+    const zcu = pt.zcu;
+    assert(!lhs.isUndef(zcu));
+    assert(!rhs.isUndef(zcu));
 
-    const info = ty.intInfo(pt.zcu);
+    const info = ty.intInfo(zcu);
 
     var lhs_space: Value.BigIntSpace = undefined;
     var rhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
-    const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
+    const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
+    const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
     const limbs = try arena.alloc(
         std.math.big.Limb,
         std.math.big.int.calcTwosCompLimbCount(info.bits),
@@ -1650,15 +1685,17 @@ pub fn intSubSatScalar(
     arena: Allocator,
     pt: Zcu.PerThread,
 ) !Value {
-    assert(!lhs.isUndef(pt.zcu));
-    assert(!rhs.isUndef(pt.zcu));
+    const zcu = pt.zcu;
 
-    const info = ty.intInfo(pt.zcu);
+    assert(!lhs.isUndef(zcu));
+    assert(!rhs.isUndef(zcu));
+
+    const info = ty.intInfo(zcu);
 
     var lhs_space: Value.BigIntSpace = undefined;
     var rhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
-    const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
+    const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
+    const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
     const limbs = try arena.alloc(
         std.math.big.Limb,
         std.math.big.int.calcTwosCompLimbCount(info.bits),
@@ -1675,12 +1712,12 @@ pub fn intMulWithOverflow(
     arena: Allocator,
     pt: Zcu.PerThread,
 ) !OverflowArithmeticResult {
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) == .Vector) {
-        const vec_len = ty.vectorLen(mod);
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Vector) {
+        const vec_len = ty.vectorLen(zcu);
         const overflowed_data = try arena.alloc(InternPool.Index, vec_len);
         const result_data = try arena.alloc(InternPool.Index, vec_len);
-        const scalar_ty = ty.scalarType(mod);
+        const scalar_ty = ty.scalarType(zcu);
         for (overflowed_data, result_data, 0..) |*of, *scalar, i| {
             const lhs_elem = try lhs.elemValue(pt, i);
             const rhs_elem = try rhs.elemValue(pt, i);
@@ -1709,10 +1746,10 @@ pub fn intMulWithOverflowScalar(
     arena: Allocator,
     pt: Zcu.PerThread,
 ) !OverflowArithmeticResult {
-    const mod = pt.zcu;
-    const info = ty.intInfo(mod);
+    const zcu = pt.zcu;
+    const info = ty.intInfo(zcu);
 
-    if (lhs.isUndef(mod) or rhs.isUndef(mod)) {
+    if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) {
         return .{
             .overflow_bit = try pt.undefValue(Type.u1),
             .wrapped_result = try pt.undefValue(ty),
@@ -1721,8 +1758,8 @@ pub fn intMulWithOverflowScalar(
 
     var lhs_space: Value.BigIntSpace = undefined;
     var rhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
-    const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
+    const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
+    const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
     const limbs = try arena.alloc(
         std.math.big.Limb,
         lhs_bigint.limbs.len + rhs_bigint.limbs.len,
@@ -1753,10 +1790,10 @@ pub fn numberMulWrap(
     arena: Allocator,
     pt: Zcu.PerThread,
 ) !Value {
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod));
-        const scalar_ty = ty.scalarType(mod);
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(zcu));
+        const scalar_ty = ty.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const lhs_elem = try lhs.elemValue(pt, i);
             const rhs_elem = try rhs.elemValue(pt, i);
@@ -1778,10 +1815,10 @@ pub fn numberMulWrapScalar(
     arena: Allocator,
     pt: Zcu.PerThread,
 ) !Value {
-    const mod = pt.zcu;
-    if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef;
+    const zcu = pt.zcu;
+    if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return Value.undef;
 
-    if (ty.zigTypeTag(mod) == .ComptimeInt) {
+    if (ty.zigTypeTag(zcu) == .ComptimeInt) {
         return intMul(lhs, rhs, ty, undefined, arena, pt);
     }
 
@@ -1825,15 +1862,17 @@ pub fn intMulSatScalar(
     arena: Allocator,
     pt: Zcu.PerThread,
 ) !Value {
-    assert(!lhs.isUndef(pt.zcu));
-    assert(!rhs.isUndef(pt.zcu));
+    const zcu = pt.zcu;
+
+    assert(!lhs.isUndef(zcu));
+    assert(!rhs.isUndef(zcu));
 
-    const info = ty.intInfo(pt.zcu);
+    const info = ty.intInfo(zcu);
 
     var lhs_space: Value.BigIntSpace = undefined;
     var rhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
-    const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
+    const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
+    const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
     const limbs = try arena.alloc(
         std.math.big.Limb,
         @max(
@@ -1853,24 +1892,24 @@ pub fn intMulSatScalar(
 }
 
 /// Supports both floats and ints; handles undefined.
-pub fn numberMax(lhs: Value, rhs: Value, pt: Zcu.PerThread) Value {
-    if (lhs.isUndef(pt.zcu) or rhs.isUndef(pt.zcu)) return undef;
-    if (lhs.isNan(pt.zcu)) return rhs;
-    if (rhs.isNan(pt.zcu)) return lhs;
+pub fn numberMax(lhs: Value, rhs: Value, zcu: *Zcu) Value {
+    if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return undef;
+    if (lhs.isNan(zcu)) return rhs;
+    if (rhs.isNan(zcu)) return lhs;
 
-    return switch (order(lhs, rhs, pt)) {
+    return switch (order(lhs, rhs, zcu)) {
         .lt => rhs,
         .gt, .eq => lhs,
     };
 }
 
 /// Supports both floats and ints; handles undefined.
-pub fn numberMin(lhs: Value, rhs: Value, pt: Zcu.PerThread) Value {
-    if (lhs.isUndef(pt.zcu) or rhs.isUndef(pt.zcu)) return undef;
-    if (lhs.isNan(pt.zcu)) return rhs;
-    if (rhs.isNan(pt.zcu)) return lhs;
+pub fn numberMin(lhs: Value, rhs: Value, zcu: *Zcu) Value {
+    if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return undef;
+    if (lhs.isNan(zcu)) return rhs;
+    if (rhs.isNan(zcu)) return lhs;
 
-    return switch (order(lhs, rhs, pt)) {
+    return switch (order(lhs, rhs, zcu)) {
         .lt => lhs,
         .gt, .eq => rhs,
     };
@@ -1878,10 +1917,10 @@ pub fn numberMin(lhs: Value, rhs: Value, pt: Zcu.PerThread) Value {
 
 /// operands must be (vectors of) integers; handles undefined scalars.
 pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod));
-        const scalar_ty = ty.scalarType(mod);
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(zcu));
+        const scalar_ty = ty.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const elem_val = try val.elemValue(pt, i);
             scalar.* = (try bitwiseNotScalar(elem_val, scalar_ty, arena, pt)).toIntern();
@@ -1896,11 +1935,11 @@ pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Va
 
 /// operands must be integers; handles undefined.
 pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (val.isUndef(mod)) return Value.fromInterned(try pt.intern(.{ .undef = ty.toIntern() }));
+    const zcu = pt.zcu;
+    if (val.isUndef(zcu)) return Value.fromInterned(try pt.intern(.{ .undef = ty.toIntern() }));
     if (ty.toIntern() == .bool_type) return makeBool(!val.toBool());
 
-    const info = ty.intInfo(mod);
+    const info = ty.intInfo(zcu);
 
     if (info.bits == 0) {
         return val;
@@ -1909,7 +1948,7 @@ pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThrea
     // TODO is this a performance issue? maybe we should try the operation without
     // resorting to BigInt first.
     var val_space: Value.BigIntSpace = undefined;
-    const val_bigint = val.toBigInt(&val_space, pt);
+    const val_bigint = val.toBigInt(&val_space, zcu);
     const limbs = try arena.alloc(
         std.math.big.Limb,
         std.math.big.int.calcTwosCompLimbCount(info.bits),
@@ -1922,10 +1961,10 @@ pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThrea
 
 /// operands must be (vectors of) integers; handles undefined scalars.
 pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) == .Vector) {
-        const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod));
-        const scalar_ty = ty.scalarType(mod);
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Vector) {
+        const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
+        const scalar_ty = ty.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const lhs_elem = try lhs.elemValue(pt, i);
             const rhs_elem = try rhs.elemValue(pt, i);
@@ -1962,8 +2001,8 @@ pub fn bitwiseAndScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Alloc
     // resorting to BigInt first.
     var lhs_space: Value.BigIntSpace = undefined;
     var rhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
-    const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
+    const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
+    const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
     const limbs = try arena.alloc(
         std.math.big.Limb,
         // + 1 for negatives
@@ -1995,10 +2034,10 @@ fn intValueAa(ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
 
 /// operands must be (vectors of) integers; handles undefined scalars.
 pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod));
-        const scalar_ty = ty.scalarType(mod);
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(zcu));
+        const scalar_ty = ty.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const lhs_elem = try lhs.elemValue(pt, i);
             const rhs_elem = try rhs.elemValue(pt, i);
@@ -2014,21 +2053,21 @@ pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, pt: Zcu.P
 
 /// operands must be integers; handles undefined.
 pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned(try pt.intern(.{ .undef = ty.toIntern() }));
+    const zcu = pt.zcu;
+    if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return Value.fromInterned(try pt.intern(.{ .undef = ty.toIntern() }));
     if (ty.toIntern() == .bool_type) return makeBool(!(lhs.toBool() and rhs.toBool()));
 
     const anded = try bitwiseAnd(lhs, rhs, ty, arena, pt);
-    const all_ones = if (ty.isSignedInt(mod)) try pt.intValue(ty, -1) else try ty.maxIntScalar(pt, ty);
+    const all_ones = if (ty.isSignedInt(zcu)) try pt.intValue(ty, -1) else try ty.maxIntScalar(pt, ty);
     return bitwiseXor(anded, all_ones, ty, arena, pt);
 }
 
 /// operands must be (vectors of) integers; handles undefined scalars.
 pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) == .Vector) {
-        const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod));
-        const scalar_ty = ty.scalarType(mod);
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Vector) {
+        const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
+        const scalar_ty = ty.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const lhs_elem = try lhs.elemValue(pt, i);
             const rhs_elem = try rhs.elemValue(pt, i);
@@ -2047,9 +2086,10 @@ pub fn bitwiseOrScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Alloca
     // If one operand is defined, we turn the other into `0xAA` so the bitwise AND can
     // still zero out some bits.
     // TODO: ideally we'd still like tracking for the undef bits. Related: #19634.
+    const zcu = pt.zcu;
     const lhs: Value, const rhs: Value = make_defined: {
-        const lhs_undef = orig_lhs.isUndef(pt.zcu);
-        const rhs_undef = orig_rhs.isUndef(pt.zcu);
+        const lhs_undef = orig_lhs.isUndef(zcu);
+        const rhs_undef = orig_rhs.isUndef(zcu);
         break :make_defined switch ((@as(u2, @intFromBool(lhs_undef)) << 1) | @intFromBool(rhs_undef)) {
             0b00 => .{ orig_lhs, orig_rhs },
             0b01 => .{ orig_lhs, try intValueAa(ty, arena, pt) },
@@ -2064,8 +2104,8 @@ pub fn bitwiseOrScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Alloca
     // resorting to BigInt first.
     var lhs_space: Value.BigIntSpace = undefined;
     var rhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
-    const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
+    const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
+    const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
     const limbs = try arena.alloc(
         std.math.big.Limb,
         @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
@@ -2077,10 +2117,10 @@ pub fn bitwiseOrScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Alloca
 
 /// operands must be (vectors of) integers; handles undefined scalars.
 pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) == .Vector) {
-        const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod));
-        const scalar_ty = ty.scalarType(mod);
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Vector) {
+        const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
+        const scalar_ty = ty.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const lhs_elem = try lhs.elemValue(pt, i);
             const rhs_elem = try rhs.elemValue(pt, i);
@@ -2096,16 +2136,16 @@ pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zc
 
 /// operands must be integers; handles undefined.
 pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned(try pt.intern(.{ .undef = ty.toIntern() }));
+    const zcu = pt.zcu;
+    if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return Value.fromInterned(try pt.intern(.{ .undef = ty.toIntern() }));
     if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() != rhs.toBool());
 
     // TODO is this a performance issue? maybe we should try the operation without
     // resorting to BigInt first.
     var lhs_space: Value.BigIntSpace = undefined;
     var rhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
-    const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
+    const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
+    const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
     const limbs = try arena.alloc(
         std.math.big.Limb,
         // + 1 for negatives
@@ -2164,10 +2204,11 @@ fn intDivInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator
 pub fn intDivScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
     // TODO is this a performance issue? maybe we should try the operation without
     // resorting to BigInt first.
+    const zcu = pt.zcu;
     var lhs_space: Value.BigIntSpace = undefined;
     var rhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
-    const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
+    const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
+    const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
     const limbs_q = try allocator.alloc(
         std.math.big.Limb,
         lhs_bigint.limbs.len,
@@ -2212,10 +2253,11 @@ pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Z
 pub fn intDivFloorScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
     // TODO is this a performance issue? maybe we should try the operation without
     // resorting to BigInt first.
+    const zcu = pt.zcu;
     var lhs_space: Value.BigIntSpace = undefined;
     var rhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
-    const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
+    const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
+    const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
     const limbs_q = try allocator.alloc(
         std.math.big.Limb,
         lhs_bigint.limbs.len,
@@ -2254,10 +2296,11 @@ pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.Pe
 pub fn intModScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
     // TODO is this a performance issue? maybe we should try the operation without
     // resorting to BigInt first.
+    const zcu = pt.zcu;
     var lhs_space: Value.BigIntSpace = undefined;
     var rhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
-    const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
+    const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
+    const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
     const limbs_q = try allocator.alloc(
         std.math.big.Limb,
         lhs_bigint.limbs.len,
@@ -2277,8 +2320,8 @@ pub fn intModScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt:
 }
 
 /// Returns true if the value is a floating point type and is NaN. Returns false otherwise.
-pub fn isNan(val: Value, mod: *const Module) bool {
-    return switch (mod.intern_pool.indexToKey(val.toIntern())) {
+pub fn isNan(val: Value, zcu: *const Module) bool {
+    return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
         .float => |float| switch (float.storage) {
             inline else => |x| std.math.isNan(x),
         },
@@ -2287,8 +2330,8 @@ pub fn isNan(val: Value, mod: *const Module) bool {
 }
 
 /// Returns true if the value is a floating point type and is infinite. Returns false otherwise.
-pub fn isInf(val: Value, mod: *const Module) bool {
-    return switch (mod.intern_pool.indexToKey(val.toIntern())) {
+pub fn isInf(val: Value, zcu: *const Module) bool {
+    return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
         .float => |float| switch (float.storage) {
             inline else => |x| std.math.isInf(x),
         },
@@ -2296,8 +2339,8 @@ pub fn isInf(val: Value, mod: *const Module) bool {
     };
 }
 
-pub fn isNegativeInf(val: Value, mod: *const Module) bool {
-    return switch (mod.intern_pool.indexToKey(val.toIntern())) {
+pub fn isNegativeInf(val: Value, zcu: *const Module) bool {
+    return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
         .float => |float| switch (float.storage) {
             inline else => |x| std.math.isNegativeInf(x),
         },
@@ -2323,13 +2366,14 @@ pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, pt:
 }
 
 pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, pt: Zcu.PerThread) !Value {
+    const zcu = pt.zcu;
     const target = pt.zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = @rem(lhs.toFloat(f16, pt), rhs.toFloat(f16, pt)) },
-        32 => .{ .f32 = @rem(lhs.toFloat(f32, pt), rhs.toFloat(f32, pt)) },
-        64 => .{ .f64 = @rem(lhs.toFloat(f64, pt), rhs.toFloat(f64, pt)) },
-        80 => .{ .f80 = @rem(lhs.toFloat(f80, pt), rhs.toFloat(f80, pt)) },
-        128 => .{ .f128 = @rem(lhs.toFloat(f128, pt), rhs.toFloat(f128, pt)) },
+        16 => .{ .f16 = @rem(lhs.toFloat(f16, zcu), rhs.toFloat(f16, zcu)) },
+        32 => .{ .f32 = @rem(lhs.toFloat(f32, zcu), rhs.toFloat(f32, zcu)) },
+        64 => .{ .f64 = @rem(lhs.toFloat(f64, zcu), rhs.toFloat(f64, zcu)) },
+        80 => .{ .f80 = @rem(lhs.toFloat(f80, zcu), rhs.toFloat(f80, zcu)) },
+        128 => .{ .f128 = @rem(lhs.toFloat(f128, zcu), rhs.toFloat(f128, zcu)) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -2356,13 +2400,14 @@ pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, pt:
 }
 
 pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, pt: Zcu.PerThread) !Value {
-    const target = pt.zcu.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = @mod(lhs.toFloat(f16, pt), rhs.toFloat(f16, pt)) },
-        32 => .{ .f32 = @mod(lhs.toFloat(f32, pt), rhs.toFloat(f32, pt)) },
-        64 => .{ .f64 = @mod(lhs.toFloat(f64, pt), rhs.toFloat(f64, pt)) },
-        80 => .{ .f80 = @mod(lhs.toFloat(f80, pt), rhs.toFloat(f80, pt)) },
-        128 => .{ .f128 = @mod(lhs.toFloat(f128, pt), rhs.toFloat(f128, pt)) },
+        16 => .{ .f16 = @mod(lhs.toFloat(f16, zcu), rhs.toFloat(f16, zcu)) },
+        32 => .{ .f32 = @mod(lhs.toFloat(f32, zcu), rhs.toFloat(f32, zcu)) },
+        64 => .{ .f64 = @mod(lhs.toFloat(f64, zcu), rhs.toFloat(f64, zcu)) },
+        80 => .{ .f80 = @mod(lhs.toFloat(f80, zcu), rhs.toFloat(f80, zcu)) },
+        128 => .{ .f128 = @mod(lhs.toFloat(f128, zcu), rhs.toFloat(f128, zcu)) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -2374,14 +2419,14 @@ pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, pt: Zcu.PerThrea
 /// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting
 /// overflow_idx to the vector index the overflow was at (or 0 for a scalar).
 pub fn intMul(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
+    const zcu = pt.zcu;
     var overflow: usize = undefined;
     return intMulInner(lhs, rhs, ty, &overflow, allocator, pt) catch |err| switch (err) {
         error.Overflow => {
-            const is_vec = ty.isVector(mod);
+            const is_vec = ty.isVector(zcu);
             overflow_idx.* = if (is_vec) overflow else 0;
             const safe_ty = if (is_vec) try pt.vectorType(.{
-                .len = ty.vectorLen(mod),
+                .len = ty.vectorLen(zcu),
                 .child = .comptime_int_type,
             }) else Type.comptime_int;
             return intMulInner(lhs, rhs, safe_ty, undefined, allocator, pt) catch |err1| switch (err1) {
@@ -2394,10 +2439,10 @@ pub fn intMul(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator
 }
 
 fn intMulInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) == .Vector) {
-        const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod));
-        const scalar_ty = ty.scalarType(mod);
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Vector) {
+        const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
+        const scalar_ty = ty.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const lhs_elem = try lhs.elemValue(pt, i);
             const rhs_elem = try rhs.elemValue(pt, i);
@@ -2419,17 +2464,18 @@ fn intMulInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator
 }
 
 pub fn intMulScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
+    const zcu = pt.zcu;
     if (ty.toIntern() != .comptime_int_type) {
         const res = try intMulWithOverflowScalar(lhs, rhs, ty, allocator, pt);
-        if (res.overflow_bit.compareAllWithZero(.neq, pt)) return error.Overflow;
+        if (res.overflow_bit.compareAllWithZero(.neq, zcu)) return error.Overflow;
         return res.wrapped_result;
     }
     // TODO is this a performance issue? maybe we should try the operation without
     // resorting to BigInt first.
     var lhs_space: Value.BigIntSpace = undefined;
     var rhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
-    const rhs_bigint = rhs.toBigInt(&rhs_space, pt);
+    const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
+    const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
     const limbs = try allocator.alloc(
         std.math.big.Limb,
         lhs_bigint.limbs.len + rhs_bigint.limbs.len,
@@ -2445,10 +2491,10 @@ pub fn intMulScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt:
 }
 
 pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) == .Vector) {
-        const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod));
-        const scalar_ty = ty.scalarType(mod);
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Vector) {
+        const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
+        const scalar_ty = ty.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const elem_val = try val.elemValue(pt, i);
             scalar.* = (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, pt)).toIntern();
@@ -2470,20 +2516,21 @@ pub fn intTruncBitsAsValue(
     bits: Value,
     pt: Zcu.PerThread,
 ) !Value {
-    if (ty.zigTypeTag(pt.zcu) == .Vector) {
-        const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
-        const scalar_ty = ty.scalarType(pt.zcu);
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Vector) {
+        const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
+        const scalar_ty = ty.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const elem_val = try val.elemValue(pt, i);
             const bits_elem = try bits.elemValue(pt, i);
-            scalar.* = (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(bits_elem.toUnsignedInt(pt)), pt)).toIntern();
+            scalar.* = (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(bits_elem.toUnsignedInt(zcu)), pt)).toIntern();
         }
         return Value.fromInterned(try pt.intern(.{ .aggregate = .{
             .ty = ty.toIntern(),
             .storage = .{ .elems = result_data },
         } }));
     }
-    return intTruncScalar(val, ty, allocator, signedness, @intCast(bits.toUnsignedInt(pt)), pt);
+    return intTruncScalar(val, ty, allocator, signedness, @intCast(bits.toUnsignedInt(zcu)), pt);
 }
 
 pub fn intTruncScalar(
@@ -2500,7 +2547,7 @@ pub fn intTruncScalar(
     if (val.isUndef(zcu)) return pt.undefValue(ty);
 
     var val_space: Value.BigIntSpace = undefined;
-    const val_bigint = val.toBigInt(&val_space, pt);
+    const val_bigint = val.toBigInt(&val_space, zcu);
 
     const limbs = try allocator.alloc(
         std.math.big.Limb,
@@ -2513,10 +2560,10 @@ pub fn intTruncScalar(
 }
 
 pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) == .Vector) {
-        const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod));
-        const scalar_ty = ty.scalarType(mod);
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Vector) {
+        const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
+        const scalar_ty = ty.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const lhs_elem = try lhs.elemValue(pt, i);
             const rhs_elem = try rhs.elemValue(pt, i);
@@ -2533,9 +2580,10 @@ pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerTh
 pub fn shlScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
     // TODO is this a performance issue? maybe we should try the operation without
     // resorting to BigInt first.
+    const zcu = pt.zcu;
     var lhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
-    const shift: usize = @intCast(rhs.toUnsignedInt(pt));
+    const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
+    const shift: usize = @intCast(rhs.toUnsignedInt(zcu));
     const limbs = try allocator.alloc(
         std.math.big.Limb,
         lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1,
@@ -2547,7 +2595,7 @@ pub fn shlScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu
     };
     result_bigint.shiftLeft(lhs_bigint, shift);
     if (ty.toIntern() != .comptime_int_type) {
-        const int_info = ty.intInfo(pt.zcu);
+        const int_info = ty.intInfo(zcu);
         result_bigint.truncate(result_bigint.toConst(), int_info.signedness, int_info.bits);
     }
 
@@ -2594,10 +2642,11 @@ pub fn shlWithOverflowScalar(
     allocator: Allocator,
     pt: Zcu.PerThread,
 ) !OverflowArithmeticResult {
-    const info = ty.intInfo(pt.zcu);
+    const zcu = pt.zcu;
+    const info = ty.intInfo(zcu);
     var lhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
-    const shift: usize = @intCast(rhs.toUnsignedInt(pt));
+    const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
+    const shift: usize = @intCast(rhs.toUnsignedInt(zcu));
     const limbs = try allocator.alloc(
         std.math.big.Limb,
         lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1,
@@ -2650,11 +2699,12 @@ pub fn shlSatScalar(
 ) !Value {
     // TODO is this a performance issue? maybe we should try the operation without
     // resorting to BigInt first.
-    const info = ty.intInfo(pt.zcu);
+    const zcu = pt.zcu;
+    const info = ty.intInfo(zcu);
 
     var lhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
-    const shift: usize = @intCast(rhs.toUnsignedInt(pt));
+    const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
+    const shift: usize = @intCast(rhs.toUnsignedInt(zcu));
     const limbs = try arena.alloc(
         std.math.big.Limb,
         std.math.big.int.calcTwosCompLimbCount(info.bits) + 1,
@@ -2724,9 +2774,10 @@ pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerTh
 pub fn shrScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
     // TODO is this a performance issue? maybe we should try the operation without
     // resorting to BigInt first.
+    const zcu = pt.zcu;
     var lhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = lhs.toBigInt(&lhs_space, pt);
-    const shift: usize = @intCast(rhs.toUnsignedInt(pt));
+    const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
+    const shift: usize = @intCast(rhs.toUnsignedInt(zcu));
 
     const result_limbs = lhs_bigint.limbs.len -| (shift / (@sizeOf(std.math.big.Limb) * 8));
     if (result_limbs == 0) {
@@ -2758,10 +2809,10 @@ pub fn floatNeg(
     arena: Allocator,
     pt: Zcu.PerThread,
 ) !Value {
-    const mod = pt.zcu;
-    if (float_type.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
-        const scalar_ty = float_type.scalarType(mod);
+    const zcu = pt.zcu;
+    if (float_type.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
+        const scalar_ty = float_type.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const elem_val = try val.elemValue(pt, i);
             scalar.* = (try floatNegScalar(elem_val, scalar_ty, pt)).toIntern();
@@ -2775,13 +2826,14 @@ pub fn floatNeg(
 }
 
 pub fn floatNegScalar(val: Value, float_type: Type, pt: Zcu.PerThread) !Value {
-    const target = pt.zcu.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = -val.toFloat(f16, pt) },
-        32 => .{ .f32 = -val.toFloat(f32, pt) },
-        64 => .{ .f64 = -val.toFloat(f64, pt) },
-        80 => .{ .f80 = -val.toFloat(f80, pt) },
-        128 => .{ .f128 = -val.toFloat(f128, pt) },
+        16 => .{ .f16 = -val.toFloat(f16, zcu) },
+        32 => .{ .f32 = -val.toFloat(f32, zcu) },
+        64 => .{ .f64 = -val.toFloat(f64, zcu) },
+        80 => .{ .f80 = -val.toFloat(f80, zcu) },
+        128 => .{ .f128 = -val.toFloat(f128, zcu) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -2797,10 +2849,10 @@ pub fn floatAdd(
     arena: Allocator,
     pt: Zcu.PerThread,
 ) !Value {
-    const mod = pt.zcu;
-    if (float_type.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
-        const scalar_ty = float_type.scalarType(mod);
+    const zcu = pt.zcu;
+    if (float_type.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
+        const scalar_ty = float_type.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const lhs_elem = try lhs.elemValue(pt, i);
             const rhs_elem = try rhs.elemValue(pt, i);
@@ -2820,14 +2872,14 @@ pub fn floatAddScalar(
     float_type: Type,
     pt: Zcu.PerThread,
 ) !Value {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = lhs.toFloat(f16, pt) + rhs.toFloat(f16, pt) },
-        32 => .{ .f32 = lhs.toFloat(f32, pt) + rhs.toFloat(f32, pt) },
-        64 => .{ .f64 = lhs.toFloat(f64, pt) + rhs.toFloat(f64, pt) },
-        80 => .{ .f80 = lhs.toFloat(f80, pt) + rhs.toFloat(f80, pt) },
-        128 => .{ .f128 = lhs.toFloat(f128, pt) + rhs.toFloat(f128, pt) },
+        16 => .{ .f16 = lhs.toFloat(f16, zcu) + rhs.toFloat(f16, zcu) },
+        32 => .{ .f32 = lhs.toFloat(f32, zcu) + rhs.toFloat(f32, zcu) },
+        64 => .{ .f64 = lhs.toFloat(f64, zcu) + rhs.toFloat(f64, zcu) },
+        80 => .{ .f80 = lhs.toFloat(f80, zcu) + rhs.toFloat(f80, zcu) },
+        128 => .{ .f128 = lhs.toFloat(f128, zcu) + rhs.toFloat(f128, zcu) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -2843,10 +2895,10 @@ pub fn floatSub(
     arena: Allocator,
     pt: Zcu.PerThread,
 ) !Value {
-    const mod = pt.zcu;
-    if (float_type.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
-        const scalar_ty = float_type.scalarType(mod);
+    const zcu = pt.zcu;
+    if (float_type.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
+        const scalar_ty = float_type.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const lhs_elem = try lhs.elemValue(pt, i);
             const rhs_elem = try rhs.elemValue(pt, i);
@@ -2866,14 +2918,14 @@ pub fn floatSubScalar(
     float_type: Type,
     pt: Zcu.PerThread,
 ) !Value {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = lhs.toFloat(f16, pt) - rhs.toFloat(f16, pt) },
-        32 => .{ .f32 = lhs.toFloat(f32, pt) - rhs.toFloat(f32, pt) },
-        64 => .{ .f64 = lhs.toFloat(f64, pt) - rhs.toFloat(f64, pt) },
-        80 => .{ .f80 = lhs.toFloat(f80, pt) - rhs.toFloat(f80, pt) },
-        128 => .{ .f128 = lhs.toFloat(f128, pt) - rhs.toFloat(f128, pt) },
+        16 => .{ .f16 = lhs.toFloat(f16, zcu) - rhs.toFloat(f16, zcu) },
+        32 => .{ .f32 = lhs.toFloat(f32, zcu) - rhs.toFloat(f32, zcu) },
+        64 => .{ .f64 = lhs.toFloat(f64, zcu) - rhs.toFloat(f64, zcu) },
+        80 => .{ .f80 = lhs.toFloat(f80, zcu) - rhs.toFloat(f80, zcu) },
+        128 => .{ .f128 = lhs.toFloat(f128, zcu) - rhs.toFloat(f128, zcu) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -2911,13 +2963,14 @@ pub fn floatDivScalar(
     float_type: Type,
     pt: Zcu.PerThread,
 ) !Value {
-    const target = pt.zcu.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = lhs.toFloat(f16, pt) / rhs.toFloat(f16, pt) },
-        32 => .{ .f32 = lhs.toFloat(f32, pt) / rhs.toFloat(f32, pt) },
-        64 => .{ .f64 = lhs.toFloat(f64, pt) / rhs.toFloat(f64, pt) },
-        80 => .{ .f80 = lhs.toFloat(f80, pt) / rhs.toFloat(f80, pt) },
-        128 => .{ .f128 = lhs.toFloat(f128, pt) / rhs.toFloat(f128, pt) },
+        16 => .{ .f16 = lhs.toFloat(f16, zcu) / rhs.toFloat(f16, zcu) },
+        32 => .{ .f32 = lhs.toFloat(f32, zcu) / rhs.toFloat(f32, zcu) },
+        64 => .{ .f64 = lhs.toFloat(f64, zcu) / rhs.toFloat(f64, zcu) },
+        80 => .{ .f80 = lhs.toFloat(f80, zcu) / rhs.toFloat(f80, zcu) },
+        128 => .{ .f128 = lhs.toFloat(f128, zcu) / rhs.toFloat(f128, zcu) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -2955,13 +3008,14 @@ pub fn floatDivFloorScalar(
     float_type: Type,
     pt: Zcu.PerThread,
 ) !Value {
-    const target = pt.zcu.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = @divFloor(lhs.toFloat(f16, pt), rhs.toFloat(f16, pt)) },
-        32 => .{ .f32 = @divFloor(lhs.toFloat(f32, pt), rhs.toFloat(f32, pt)) },
-        64 => .{ .f64 = @divFloor(lhs.toFloat(f64, pt), rhs.toFloat(f64, pt)) },
-        80 => .{ .f80 = @divFloor(lhs.toFloat(f80, pt), rhs.toFloat(f80, pt)) },
-        128 => .{ .f128 = @divFloor(lhs.toFloat(f128, pt), rhs.toFloat(f128, pt)) },
+        16 => .{ .f16 = @divFloor(lhs.toFloat(f16, zcu), rhs.toFloat(f16, zcu)) },
+        32 => .{ .f32 = @divFloor(lhs.toFloat(f32, zcu), rhs.toFloat(f32, zcu)) },
+        64 => .{ .f64 = @divFloor(lhs.toFloat(f64, zcu), rhs.toFloat(f64, zcu)) },
+        80 => .{ .f80 = @divFloor(lhs.toFloat(f80, zcu), rhs.toFloat(f80, zcu)) },
+        128 => .{ .f128 = @divFloor(lhs.toFloat(f128, zcu), rhs.toFloat(f128, zcu)) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -2999,13 +3053,14 @@ pub fn floatDivTruncScalar(
     float_type: Type,
     pt: Zcu.PerThread,
 ) !Value {
-    const target = pt.zcu.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = @divTrunc(lhs.toFloat(f16, pt), rhs.toFloat(f16, pt)) },
-        32 => .{ .f32 = @divTrunc(lhs.toFloat(f32, pt), rhs.toFloat(f32, pt)) },
-        64 => .{ .f64 = @divTrunc(lhs.toFloat(f64, pt), rhs.toFloat(f64, pt)) },
-        80 => .{ .f80 = @divTrunc(lhs.toFloat(f80, pt), rhs.toFloat(f80, pt)) },
-        128 => .{ .f128 = @divTrunc(lhs.toFloat(f128, pt), rhs.toFloat(f128, pt)) },
+        16 => .{ .f16 = @divTrunc(lhs.toFloat(f16, zcu), rhs.toFloat(f16, zcu)) },
+        32 => .{ .f32 = @divTrunc(lhs.toFloat(f32, zcu), rhs.toFloat(f32, zcu)) },
+        64 => .{ .f64 = @divTrunc(lhs.toFloat(f64, zcu), rhs.toFloat(f64, zcu)) },
+        80 => .{ .f80 = @divTrunc(lhs.toFloat(f80, zcu), rhs.toFloat(f80, zcu)) },
+        128 => .{ .f128 = @divTrunc(lhs.toFloat(f128, zcu), rhs.toFloat(f128, zcu)) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -3021,10 +3076,10 @@ pub fn floatMul(
     arena: Allocator,
     pt: Zcu.PerThread,
 ) !Value {
-    const mod = pt.zcu;
-    if (float_type.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
-        const scalar_ty = float_type.scalarType(mod);
+    const zcu = pt.zcu;
+    if (float_type.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
+        const scalar_ty = float_type.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const lhs_elem = try lhs.elemValue(pt, i);
             const rhs_elem = try rhs.elemValue(pt, i);
@@ -3044,14 +3099,14 @@ pub fn floatMulScalar(
     float_type: Type,
     pt: Zcu.PerThread,
 ) !Value {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = lhs.toFloat(f16, pt) * rhs.toFloat(f16, pt) },
-        32 => .{ .f32 = lhs.toFloat(f32, pt) * rhs.toFloat(f32, pt) },
-        64 => .{ .f64 = lhs.toFloat(f64, pt) * rhs.toFloat(f64, pt) },
-        80 => .{ .f80 = lhs.toFloat(f80, pt) * rhs.toFloat(f80, pt) },
-        128 => .{ .f128 = lhs.toFloat(f128, pt) * rhs.toFloat(f128, pt) },
+        16 => .{ .f16 = lhs.toFloat(f16, zcu) * rhs.toFloat(f16, zcu) },
+        32 => .{ .f32 = lhs.toFloat(f32, zcu) * rhs.toFloat(f32, zcu) },
+        64 => .{ .f64 = lhs.toFloat(f64, zcu) * rhs.toFloat(f64, zcu) },
+        80 => .{ .f80 = lhs.toFloat(f80, zcu) * rhs.toFloat(f80, zcu) },
+        128 => .{ .f128 = lhs.toFloat(f128, zcu) * rhs.toFloat(f128, zcu) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -3077,14 +3132,14 @@ pub fn sqrt(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !
 }
 
 pub fn sqrtScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = @sqrt(val.toFloat(f16, pt)) },
-        32 => .{ .f32 = @sqrt(val.toFloat(f32, pt)) },
-        64 => .{ .f64 = @sqrt(val.toFloat(f64, pt)) },
-        80 => .{ .f80 = @sqrt(val.toFloat(f80, pt)) },
-        128 => .{ .f128 = @sqrt(val.toFloat(f128, pt)) },
+        16 => .{ .f16 = @sqrt(val.toFloat(f16, zcu)) },
+        32 => .{ .f32 = @sqrt(val.toFloat(f32, zcu)) },
+        64 => .{ .f64 = @sqrt(val.toFloat(f64, zcu)) },
+        80 => .{ .f80 = @sqrt(val.toFloat(f80, zcu)) },
+        128 => .{ .f128 = @sqrt(val.toFloat(f128, zcu)) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -3094,10 +3149,10 @@ pub fn sqrtScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Err
 }
 
 pub fn sin(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (float_type.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
-        const scalar_ty = float_type.scalarType(mod);
+    const zcu = pt.zcu;
+    if (float_type.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
+        const scalar_ty = float_type.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const elem_val = try val.elemValue(pt, i);
             scalar.* = (try sinScalar(elem_val, scalar_ty, pt)).toIntern();
@@ -3111,14 +3166,14 @@ pub fn sin(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !V
 }
 
 pub fn sinScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = @sin(val.toFloat(f16, pt)) },
-        32 => .{ .f32 = @sin(val.toFloat(f32, pt)) },
-        64 => .{ .f64 = @sin(val.toFloat(f64, pt)) },
-        80 => .{ .f80 = @sin(val.toFloat(f80, pt)) },
-        128 => .{ .f128 = @sin(val.toFloat(f128, pt)) },
+        16 => .{ .f16 = @sin(val.toFloat(f16, zcu)) },
+        32 => .{ .f32 = @sin(val.toFloat(f32, zcu)) },
+        64 => .{ .f64 = @sin(val.toFloat(f64, zcu)) },
+        80 => .{ .f80 = @sin(val.toFloat(f80, zcu)) },
+        128 => .{ .f128 = @sin(val.toFloat(f128, zcu)) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -3128,10 +3183,10 @@ pub fn sinScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Erro
 }
 
 pub fn cos(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (float_type.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
-        const scalar_ty = float_type.scalarType(mod);
+    const zcu = pt.zcu;
+    if (float_type.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
+        const scalar_ty = float_type.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const elem_val = try val.elemValue(pt, i);
             scalar.* = (try cosScalar(elem_val, scalar_ty, pt)).toIntern();
@@ -3145,14 +3200,14 @@ pub fn cos(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !V
 }
 
 pub fn cosScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = @cos(val.toFloat(f16, pt)) },
-        32 => .{ .f32 = @cos(val.toFloat(f32, pt)) },
-        64 => .{ .f64 = @cos(val.toFloat(f64, pt)) },
-        80 => .{ .f80 = @cos(val.toFloat(f80, pt)) },
-        128 => .{ .f128 = @cos(val.toFloat(f128, pt)) },
+        16 => .{ .f16 = @cos(val.toFloat(f16, zcu)) },
+        32 => .{ .f32 = @cos(val.toFloat(f32, zcu)) },
+        64 => .{ .f64 = @cos(val.toFloat(f64, zcu)) },
+        80 => .{ .f80 = @cos(val.toFloat(f80, zcu)) },
+        128 => .{ .f128 = @cos(val.toFloat(f128, zcu)) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -3162,10 +3217,10 @@ pub fn cosScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Erro
 }
 
 pub fn tan(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (float_type.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
-        const scalar_ty = float_type.scalarType(mod);
+    const zcu = pt.zcu;
+    if (float_type.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
+        const scalar_ty = float_type.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const elem_val = try val.elemValue(pt, i);
             scalar.* = (try tanScalar(elem_val, scalar_ty, pt)).toIntern();
@@ -3179,14 +3234,14 @@ pub fn tan(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !V
 }
 
 pub fn tanScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = @tan(val.toFloat(f16, pt)) },
-        32 => .{ .f32 = @tan(val.toFloat(f32, pt)) },
-        64 => .{ .f64 = @tan(val.toFloat(f64, pt)) },
-        80 => .{ .f80 = @tan(val.toFloat(f80, pt)) },
-        128 => .{ .f128 = @tan(val.toFloat(f128, pt)) },
+        16 => .{ .f16 = @tan(val.toFloat(f16, zcu)) },
+        32 => .{ .f32 = @tan(val.toFloat(f32, zcu)) },
+        64 => .{ .f64 = @tan(val.toFloat(f64, zcu)) },
+        80 => .{ .f80 = @tan(val.toFloat(f80, zcu)) },
+        128 => .{ .f128 = @tan(val.toFloat(f128, zcu)) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -3196,10 +3251,10 @@ pub fn tanScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Erro
 }
 
 pub fn exp(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (float_type.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
-        const scalar_ty = float_type.scalarType(mod);
+    const zcu = pt.zcu;
+    if (float_type.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
+        const scalar_ty = float_type.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const elem_val = try val.elemValue(pt, i);
             scalar.* = (try expScalar(elem_val, scalar_ty, pt)).toIntern();
@@ -3213,14 +3268,14 @@ pub fn exp(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !V
 }
 
 pub fn expScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = @exp(val.toFloat(f16, pt)) },
-        32 => .{ .f32 = @exp(val.toFloat(f32, pt)) },
-        64 => .{ .f64 = @exp(val.toFloat(f64, pt)) },
-        80 => .{ .f80 = @exp(val.toFloat(f80, pt)) },
-        128 => .{ .f128 = @exp(val.toFloat(f128, pt)) },
+        16 => .{ .f16 = @exp(val.toFloat(f16, zcu)) },
+        32 => .{ .f32 = @exp(val.toFloat(f32, zcu)) },
+        64 => .{ .f64 = @exp(val.toFloat(f64, zcu)) },
+        80 => .{ .f80 = @exp(val.toFloat(f80, zcu)) },
+        128 => .{ .f128 = @exp(val.toFloat(f128, zcu)) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -3230,10 +3285,10 @@ pub fn expScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Erro
 }
 
 pub fn exp2(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (float_type.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
-        const scalar_ty = float_type.scalarType(mod);
+    const zcu = pt.zcu;
+    if (float_type.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
+        const scalar_ty = float_type.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const elem_val = try val.elemValue(pt, i);
             scalar.* = (try exp2Scalar(elem_val, scalar_ty, pt)).toIntern();
@@ -3247,14 +3302,14 @@ pub fn exp2(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !
 }
 
 pub fn exp2Scalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = @exp2(val.toFloat(f16, pt)) },
-        32 => .{ .f32 = @exp2(val.toFloat(f32, pt)) },
-        64 => .{ .f64 = @exp2(val.toFloat(f64, pt)) },
-        80 => .{ .f80 = @exp2(val.toFloat(f80, pt)) },
-        128 => .{ .f128 = @exp2(val.toFloat(f128, pt)) },
+        16 => .{ .f16 = @exp2(val.toFloat(f16, zcu)) },
+        32 => .{ .f32 = @exp2(val.toFloat(f32, zcu)) },
+        64 => .{ .f64 = @exp2(val.toFloat(f64, zcu)) },
+        80 => .{ .f80 = @exp2(val.toFloat(f80, zcu)) },
+        128 => .{ .f128 = @exp2(val.toFloat(f128, zcu)) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -3264,10 +3319,10 @@ pub fn exp2Scalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Err
 }
 
 pub fn log(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (float_type.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
-        const scalar_ty = float_type.scalarType(mod);
+    const zcu = pt.zcu;
+    if (float_type.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
+        const scalar_ty = float_type.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const elem_val = try val.elemValue(pt, i);
             scalar.* = (try logScalar(elem_val, scalar_ty, pt)).toIntern();
@@ -3281,14 +3336,14 @@ pub fn log(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !V
 }
 
 pub fn logScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = @log(val.toFloat(f16, pt)) },
-        32 => .{ .f32 = @log(val.toFloat(f32, pt)) },
-        64 => .{ .f64 = @log(val.toFloat(f64, pt)) },
-        80 => .{ .f80 = @log(val.toFloat(f80, pt)) },
-        128 => .{ .f128 = @log(val.toFloat(f128, pt)) },
+        16 => .{ .f16 = @log(val.toFloat(f16, zcu)) },
+        32 => .{ .f32 = @log(val.toFloat(f32, zcu)) },
+        64 => .{ .f64 = @log(val.toFloat(f64, zcu)) },
+        80 => .{ .f80 = @log(val.toFloat(f80, zcu)) },
+        128 => .{ .f128 = @log(val.toFloat(f128, zcu)) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -3298,10 +3353,10 @@ pub fn logScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Erro
 }
 
 pub fn log2(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (float_type.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
-        const scalar_ty = float_type.scalarType(mod);
+    const zcu = pt.zcu;
+    if (float_type.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
+        const scalar_ty = float_type.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const elem_val = try val.elemValue(pt, i);
             scalar.* = (try log2Scalar(elem_val, scalar_ty, pt)).toIntern();
@@ -3315,14 +3370,14 @@ pub fn log2(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !
 }
 
 pub fn log2Scalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = @log2(val.toFloat(f16, pt)) },
-        32 => .{ .f32 = @log2(val.toFloat(f32, pt)) },
-        64 => .{ .f64 = @log2(val.toFloat(f64, pt)) },
-        80 => .{ .f80 = @log2(val.toFloat(f80, pt)) },
-        128 => .{ .f128 = @log2(val.toFloat(f128, pt)) },
+        16 => .{ .f16 = @log2(val.toFloat(f16, zcu)) },
+        32 => .{ .f32 = @log2(val.toFloat(f32, zcu)) },
+        64 => .{ .f64 = @log2(val.toFloat(f64, zcu)) },
+        80 => .{ .f80 = @log2(val.toFloat(f80, zcu)) },
+        128 => .{ .f128 = @log2(val.toFloat(f128, zcu)) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -3332,10 +3387,10 @@ pub fn log2Scalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Err
 }
 
 pub fn log10(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (float_type.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
-        const scalar_ty = float_type.scalarType(mod);
+    const zcu = pt.zcu;
+    if (float_type.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
+        const scalar_ty = float_type.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const elem_val = try val.elemValue(pt, i);
             scalar.* = (try log10Scalar(elem_val, scalar_ty, pt)).toIntern();
@@ -3349,14 +3404,14 @@ pub fn log10(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread)
 }
 
 pub fn log10Scalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = @log10(val.toFloat(f16, pt)) },
-        32 => .{ .f32 = @log10(val.toFloat(f32, pt)) },
-        64 => .{ .f64 = @log10(val.toFloat(f64, pt)) },
-        80 => .{ .f80 = @log10(val.toFloat(f80, pt)) },
-        128 => .{ .f128 = @log10(val.toFloat(f128, pt)) },
+        16 => .{ .f16 = @log10(val.toFloat(f16, zcu)) },
+        32 => .{ .f32 = @log10(val.toFloat(f32, zcu)) },
+        64 => .{ .f64 = @log10(val.toFloat(f64, zcu)) },
+        80 => .{ .f80 = @log10(val.toFloat(f80, zcu)) },
+        128 => .{ .f128 = @log10(val.toFloat(f128, zcu)) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -3366,10 +3421,10 @@ pub fn log10Scalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Er
 }
 
 pub fn abs(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (ty.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod));
-        const scalar_ty = ty.scalarType(mod);
+    const zcu = pt.zcu;
+    if (ty.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(zcu));
+        const scalar_ty = ty.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const elem_val = try val.elemValue(pt, i);
             scalar.* = (try absScalar(elem_val, scalar_ty, pt, arena)).toIntern();
@@ -3383,30 +3438,30 @@ pub fn abs(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
 }
 
 pub fn absScalar(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) Allocator.Error!Value {
-    const mod = pt.zcu;
-    switch (ty.zigTypeTag(mod)) {
+    const zcu = pt.zcu;
+    switch (ty.zigTypeTag(zcu)) {
         .Int => {
             var buffer: Value.BigIntSpace = undefined;
-            var operand_bigint = try val.toBigInt(&buffer, pt).toManaged(arena);
+            var operand_bigint = try val.toBigInt(&buffer, zcu).toManaged(arena);
             operand_bigint.abs();
 
             return pt.intValue_big(try ty.toUnsigned(pt), operand_bigint.toConst());
         },
         .ComptimeInt => {
             var buffer: Value.BigIntSpace = undefined;
-            var operand_bigint = try val.toBigInt(&buffer, pt).toManaged(arena);
+            var operand_bigint = try val.toBigInt(&buffer, zcu).toManaged(arena);
             operand_bigint.abs();
 
             return pt.intValue_big(ty, operand_bigint.toConst());
         },
         .ComptimeFloat, .Float => {
-            const target = mod.getTarget();
+            const target = zcu.getTarget();
             const storage: InternPool.Key.Float.Storage = switch (ty.floatBits(target)) {
-                16 => .{ .f16 = @abs(val.toFloat(f16, pt)) },
-                32 => .{ .f32 = @abs(val.toFloat(f32, pt)) },
-                64 => .{ .f64 = @abs(val.toFloat(f64, pt)) },
-                80 => .{ .f80 = @abs(val.toFloat(f80, pt)) },
-                128 => .{ .f128 = @abs(val.toFloat(f128, pt)) },
+                16 => .{ .f16 = @abs(val.toFloat(f16, zcu)) },
+                32 => .{ .f32 = @abs(val.toFloat(f32, zcu)) },
+                64 => .{ .f64 = @abs(val.toFloat(f64, zcu)) },
+                80 => .{ .f80 = @abs(val.toFloat(f80, zcu)) },
+                128 => .{ .f128 = @abs(val.toFloat(f128, zcu)) },
                 else => unreachable,
             };
             return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -3419,10 +3474,10 @@ pub fn absScalar(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) Allo
 }
 
 pub fn floor(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (float_type.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
-        const scalar_ty = float_type.scalarType(mod);
+    const zcu = pt.zcu;
+    if (float_type.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
+        const scalar_ty = float_type.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const elem_val = try val.elemValue(pt, i);
             scalar.* = (try floorScalar(elem_val, scalar_ty, pt)).toIntern();
@@ -3436,14 +3491,14 @@ pub fn floor(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread)
 }
 
 pub fn floorScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = @floor(val.toFloat(f16, pt)) },
-        32 => .{ .f32 = @floor(val.toFloat(f32, pt)) },
-        64 => .{ .f64 = @floor(val.toFloat(f64, pt)) },
-        80 => .{ .f80 = @floor(val.toFloat(f80, pt)) },
-        128 => .{ .f128 = @floor(val.toFloat(f128, pt)) },
+        16 => .{ .f16 = @floor(val.toFloat(f16, zcu)) },
+        32 => .{ .f32 = @floor(val.toFloat(f32, zcu)) },
+        64 => .{ .f64 = @floor(val.toFloat(f64, zcu)) },
+        80 => .{ .f80 = @floor(val.toFloat(f80, zcu)) },
+        128 => .{ .f128 = @floor(val.toFloat(f128, zcu)) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -3453,10 +3508,10 @@ pub fn floorScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Er
 }
 
 pub fn ceil(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (float_type.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
-        const scalar_ty = float_type.scalarType(mod);
+    const zcu = pt.zcu;
+    if (float_type.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
+        const scalar_ty = float_type.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const elem_val = try val.elemValue(pt, i);
             scalar.* = (try ceilScalar(elem_val, scalar_ty, pt)).toIntern();
@@ -3470,14 +3525,14 @@ pub fn ceil(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !
 }
 
 pub fn ceilScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = @ceil(val.toFloat(f16, pt)) },
-        32 => .{ .f32 = @ceil(val.toFloat(f32, pt)) },
-        64 => .{ .f64 = @ceil(val.toFloat(f64, pt)) },
-        80 => .{ .f80 = @ceil(val.toFloat(f80, pt)) },
-        128 => .{ .f128 = @ceil(val.toFloat(f128, pt)) },
+        16 => .{ .f16 = @ceil(val.toFloat(f16, zcu)) },
+        32 => .{ .f32 = @ceil(val.toFloat(f32, zcu)) },
+        64 => .{ .f64 = @ceil(val.toFloat(f64, zcu)) },
+        80 => .{ .f80 = @ceil(val.toFloat(f80, zcu)) },
+        128 => .{ .f128 = @ceil(val.toFloat(f128, zcu)) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -3487,10 +3542,10 @@ pub fn ceilScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Err
 }
 
 pub fn round(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (float_type.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
-        const scalar_ty = float_type.scalarType(mod);
+    const zcu = pt.zcu;
+    if (float_type.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
+        const scalar_ty = float_type.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const elem_val = try val.elemValue(pt, i);
             scalar.* = (try roundScalar(elem_val, scalar_ty, pt)).toIntern();
@@ -3504,14 +3559,14 @@ pub fn round(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread)
 }
 
 pub fn roundScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = @round(val.toFloat(f16, pt)) },
-        32 => .{ .f32 = @round(val.toFloat(f32, pt)) },
-        64 => .{ .f64 = @round(val.toFloat(f64, pt)) },
-        80 => .{ .f80 = @round(val.toFloat(f80, pt)) },
-        128 => .{ .f128 = @round(val.toFloat(f128, pt)) },
+        16 => .{ .f16 = @round(val.toFloat(f16, zcu)) },
+        32 => .{ .f32 = @round(val.toFloat(f32, zcu)) },
+        64 => .{ .f64 = @round(val.toFloat(f64, zcu)) },
+        80 => .{ .f80 = @round(val.toFloat(f80, zcu)) },
+        128 => .{ .f128 = @round(val.toFloat(f128, zcu)) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -3521,10 +3576,10 @@ pub fn roundScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Er
 }
 
 pub fn trunc(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
-    const mod = pt.zcu;
-    if (float_type.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
-        const scalar_ty = float_type.scalarType(mod);
+    const zcu = pt.zcu;
+    if (float_type.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
+        const scalar_ty = float_type.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const elem_val = try val.elemValue(pt, i);
             scalar.* = (try truncScalar(elem_val, scalar_ty, pt)).toIntern();
@@ -3538,14 +3593,14 @@ pub fn trunc(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread)
 }
 
 pub fn truncScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = @trunc(val.toFloat(f16, pt)) },
-        32 => .{ .f32 = @trunc(val.toFloat(f32, pt)) },
-        64 => .{ .f64 = @trunc(val.toFloat(f64, pt)) },
-        80 => .{ .f80 = @trunc(val.toFloat(f80, pt)) },
-        128 => .{ .f128 = @trunc(val.toFloat(f128, pt)) },
+        16 => .{ .f16 = @trunc(val.toFloat(f16, zcu)) },
+        32 => .{ .f32 = @trunc(val.toFloat(f32, zcu)) },
+        64 => .{ .f64 = @trunc(val.toFloat(f64, zcu)) },
+        80 => .{ .f80 = @trunc(val.toFloat(f80, zcu)) },
+        128 => .{ .f128 = @trunc(val.toFloat(f128, zcu)) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -3562,10 +3617,10 @@ pub fn mulAdd(
     arena: Allocator,
     pt: Zcu.PerThread,
 ) !Value {
-    const mod = pt.zcu;
-    if (float_type.zigTypeTag(mod) == .Vector) {
-        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod));
-        const scalar_ty = float_type.scalarType(mod);
+    const zcu = pt.zcu;
+    if (float_type.zigTypeTag(zcu) == .Vector) {
+        const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
+        const scalar_ty = float_type.scalarType(zcu);
         for (result_data, 0..) |*scalar, i| {
             const mulend1_elem = try mulend1.elemValue(pt, i);
             const mulend2_elem = try mulend2.elemValue(pt, i);
@@ -3587,14 +3642,14 @@ pub fn mulAddScalar(
     addend: Value,
     pt: Zcu.PerThread,
 ) Allocator.Error!Value {
-    const mod = pt.zcu;
-    const target = mod.getTarget();
+    const zcu = pt.zcu;
+    const target = zcu.getTarget();
     const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
-        16 => .{ .f16 = @mulAdd(f16, mulend1.toFloat(f16, pt), mulend2.toFloat(f16, pt), addend.toFloat(f16, pt)) },
-        32 => .{ .f32 = @mulAdd(f32, mulend1.toFloat(f32, pt), mulend2.toFloat(f32, pt), addend.toFloat(f32, pt)) },
-        64 => .{ .f64 = @mulAdd(f64, mulend1.toFloat(f64, pt), mulend2.toFloat(f64, pt), addend.toFloat(f64, pt)) },
-        80 => .{ .f80 = @mulAdd(f80, mulend1.toFloat(f80, pt), mulend2.toFloat(f80, pt), addend.toFloat(f80, pt)) },
-        128 => .{ .f128 = @mulAdd(f128, mulend1.toFloat(f128, pt), mulend2.toFloat(f128, pt), addend.toFloat(f128, pt)) },
+        16 => .{ .f16 = @mulAdd(f16, mulend1.toFloat(f16, zcu), mulend2.toFloat(f16, zcu), addend.toFloat(f16, zcu)) },
+        32 => .{ .f32 = @mulAdd(f32, mulend1.toFloat(f32, zcu), mulend2.toFloat(f32, zcu), addend.toFloat(f32, zcu)) },
+        64 => .{ .f64 = @mulAdd(f64, mulend1.toFloat(f64, zcu), mulend2.toFloat(f64, zcu), addend.toFloat(f64, zcu)) },
+        80 => .{ .f80 = @mulAdd(f80, mulend1.toFloat(f80, zcu), mulend2.toFloat(f80, zcu), addend.toFloat(f80, zcu)) },
+        128 => .{ .f128 = @mulAdd(f128, mulend1.toFloat(f128, zcu), mulend2.toFloat(f128, zcu), addend.toFloat(f128, zcu)) },
         else => unreachable,
     };
     return Value.fromInterned(try pt.intern(.{ .float = .{
@@ -3606,10 +3661,11 @@ pub fn mulAddScalar(
 /// If the value is represented in-memory as a series of bytes that all
 /// have the same value, return that byte value, otherwise null.
 pub fn hasRepeatedByteRepr(val: Value, ty: Type, pt: Zcu.PerThread) !?u8 {
-    const abi_size = std.math.cast(usize, ty.abiSize(pt)) orelse return null;
+    const zcu = pt.zcu;
+    const abi_size = std.math.cast(usize, ty.abiSize(zcu)) orelse return null;
     assert(abi_size >= 1);
-    const byte_buffer = try pt.zcu.gpa.alloc(u8, abi_size);
-    defer pt.zcu.gpa.free(byte_buffer);
+    const byte_buffer = try zcu.gpa.alloc(u8, abi_size);
+    defer zcu.gpa.free(byte_buffer);
 
     writeToMemory(val, ty, pt, byte_buffer) catch |err| switch (err) {
         error.OutOfMemory => return error.OutOfMemory,
@@ -3756,13 +3812,13 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value {
         .Struct => field: {
             const field_ty = aggregate_ty.structFieldType(field_idx, zcu);
             switch (aggregate_ty.containerLayout(zcu)) {
-                .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), pt, .sema) },
+                .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), .sema, zcu, pt.tid) },
                 .@"extern" => {
                     // Well-defined layout, so just offset the pointer appropriately.
-                    const byte_off = aggregate_ty.structFieldOffset(field_idx, pt);
+                    const byte_off = aggregate_ty.structFieldOffset(field_idx, zcu);
                     const field_align = a: {
                         const parent_align = if (parent_ptr_info.flags.alignment == .none) pa: {
-                            break :pa (try aggregate_ty.abiAlignmentAdvanced(pt, .sema)).scalar;
+                            break :pa try aggregate_ty.abiAlignmentSema(pt);
                         } else parent_ptr_info.flags.alignment;
                         break :a InternPool.Alignment.fromLog2Units(@min(parent_align.toLog2Units(), @ctz(byte_off)));
                     };
@@ -3781,7 +3837,7 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value {
                             new.packed_offset = packed_offset;
                             new.child = field_ty.toIntern();
                             if (new.flags.alignment == .none) {
-                                new.flags.alignment = (try aggregate_ty.abiAlignmentAdvanced(pt, .sema)).scalar;
+                                new.flags.alignment = try aggregate_ty.abiAlignmentSema(pt);
                             }
                             break :info new;
                         });
@@ -3807,7 +3863,7 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value {
             const union_obj = zcu.typeToUnion(aggregate_ty).?;
             const field_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[field_idx]);
             switch (aggregate_ty.containerLayout(zcu)) {
-                .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), pt, .sema) },
+                .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), .sema, zcu, pt.tid) },
                 .@"extern" => {
                     // Point to the same address.
                     const result_ty = try pt.ptrTypeSema(info: {
@@ -3820,17 +3876,17 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value {
                 .@"packed" => {
                     // If the field has an ABI size matching its bit size, then we can continue to use a
                     // non-bit pointer if the parent pointer is also a non-bit pointer.
-                    if (parent_ptr_info.packed_offset.host_size == 0 and (try field_ty.abiSizeAdvanced(pt, .sema)).scalar * 8 == try field_ty.bitSizeAdvanced(pt, .sema)) {
+                    if (parent_ptr_info.packed_offset.host_size == 0 and (try field_ty.abiSizeInner(.sema, zcu, pt.tid)).scalar * 8 == try field_ty.bitSizeSema(pt)) {
                         // We must offset the pointer on big-endian targets, since the bits of packed memory don't align nicely.
                         const byte_offset = switch (zcu.getTarget().cpu.arch.endian()) {
                             .little => 0,
-                            .big => (try aggregate_ty.abiSizeAdvanced(pt, .sema)).scalar - (try field_ty.abiSizeAdvanced(pt, .sema)).scalar,
+                            .big => (try aggregate_ty.abiSizeInner(.sema, zcu, pt.tid)).scalar - (try field_ty.abiSizeInner(.sema, zcu, pt.tid)).scalar,
                         };
                         const result_ty = try pt.ptrTypeSema(info: {
                             var new = parent_ptr_info;
                             new.child = field_ty.toIntern();
                             new.flags.alignment = InternPool.Alignment.fromLog2Units(
-                                @ctz(byte_offset | (try parent_ptr_ty.ptrAlignmentAdvanced(pt, .sema)).toByteUnits().?),
+                                @ctz(byte_offset | (try parent_ptr_ty.ptrAlignmentSema(pt)).toByteUnits().?),
                             );
                             break :info new;
                         });
@@ -3841,7 +3897,7 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value {
                             var new = parent_ptr_info;
                             new.child = field_ty.toIntern();
                             if (new.packed_offset.host_size == 0) {
-                                new.packed_offset.host_size = @intCast(((try aggregate_ty.bitSizeAdvanced(pt, .sema)) + 7) / 8);
+                                new.packed_offset.host_size = @intCast(((try aggregate_ty.bitSizeSema(pt)) + 7) / 8);
                                 assert(new.packed_offset.bit_offset == 0);
                             }
                             break :info new;
@@ -3854,8 +3910,8 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value {
         .Pointer => field_ty: {
             assert(aggregate_ty.isSlice(zcu));
             break :field_ty switch (field_idx) {
-                Value.slice_ptr_index => .{ aggregate_ty.slicePtrFieldType(zcu), Type.usize.abiAlignment(pt) },
-                Value.slice_len_index => .{ Type.usize, Type.usize.abiAlignment(pt) },
+                Value.slice_ptr_index => .{ aggregate_ty.slicePtrFieldType(zcu), Type.usize.abiAlignment(zcu) },
+                Value.slice_len_index => .{ Type.usize, Type.usize.abiAlignment(zcu) },
                 else => unreachable,
             };
         },
@@ -3863,7 +3919,7 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value {
     };
 
     const new_align: InternPool.Alignment = if (parent_ptr_info.flags.alignment != .none) a: {
-        const ty_align = (try field_ty.abiAlignmentAdvanced(pt, .sema)).scalar;
+        const ty_align = (try field_ty.abiAlignmentInner(.sema, zcu, pt.tid)).scalar;
         const true_field_align = if (field_align == .none) ty_align else field_align;
         const new_align = true_field_align.min(parent_ptr_info.flags.alignment);
         if (new_align == ty_align) break :a .none;
@@ -3919,21 +3975,21 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, pt: Zcu.PerThread) !Value
 
     const strat: PtrStrat = switch (parent_ptr_ty.ptrSize(zcu)) {
         .One => switch (elem_ty.zigTypeTag(zcu)) {
-            .Vector => .{ .offset = field_idx * @divExact(try elem_ty.childType(zcu).bitSizeAdvanced(pt, .sema), 8) },
+            .Vector => .{ .offset = field_idx * @divExact(try elem_ty.childType(zcu).bitSizeSema(pt), 8) },
             .Array => strat: {
                 const arr_elem_ty = elem_ty.childType(zcu);
-                if (try arr_elem_ty.comptimeOnlyAdvanced(pt, .sema)) {
+                if (try arr_elem_ty.comptimeOnlySema(pt)) {
                     break :strat .{ .elem_ptr = arr_elem_ty };
                 }
-                break :strat .{ .offset = field_idx * (try arr_elem_ty.abiSizeAdvanced(pt, .sema)).scalar };
+                break :strat .{ .offset = field_idx * (try arr_elem_ty.abiSizeInner(.sema, zcu, pt.tid)).scalar };
             },
             else => unreachable,
         },
 
-        .Many, .C => if (try elem_ty.comptimeOnlyAdvanced(pt, .sema))
+        .Many, .C => if (try elem_ty.comptimeOnlySema(pt))
             .{ .elem_ptr = elem_ty }
         else
-            .{ .offset = field_idx * (try elem_ty.abiSizeAdvanced(pt, .sema)).scalar },
+            .{ .offset = field_idx * (try elem_ty.abiSizeInner(.sema, zcu, pt.tid)).scalar },
 
         .Slice => unreachable,
     };
@@ -4142,22 +4198,32 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
             const base_ptr_ty = base_ptr.typeOf(zcu);
             const agg_ty = base_ptr_ty.childType(zcu);
             const field_ty, const field_align = switch (agg_ty.zigTypeTag(zcu)) {
-                .Struct => .{ agg_ty.structFieldType(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), pt, if (have_sema) .sema else .normal) },
-                .Union => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), pt, if (have_sema) .sema else .normal) },
+                .Struct => .{ agg_ty.structFieldType(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(
+                    @intCast(field.index),
+                    if (have_sema) .sema else .normal,
+                    pt.zcu,
+                    if (have_sema) pt.tid else {},
+                ) },
+                .Union => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(
+                    @intCast(field.index),
+                    if (have_sema) .sema else .normal,
+                    pt.zcu,
+                    if (have_sema) pt.tid else {},
+                ) },
                 .Pointer => .{ switch (field.index) {
                     Value.slice_ptr_index => agg_ty.slicePtrFieldType(zcu),
                     Value.slice_len_index => Type.usize,
                     else => unreachable,
-                }, Type.usize.abiAlignment(pt) },
+                }, Type.usize.abiAlignment(zcu) },
                 else => unreachable,
             };
-            const base_align = base_ptr_ty.ptrAlignment(pt);
+            const base_align = base_ptr_ty.ptrAlignment(zcu);
             const result_align = field_align.minStrict(base_align);
             const result_ty = try pt.ptrType(.{
                 .child = field_ty.toIntern(),
                 .flags = flags: {
                     var flags = base_ptr_ty.ptrInfo(zcu).flags;
-                    if (result_align == field_ty.abiAlignment(pt)) {
+                    if (result_align == field_ty.abiAlignment(zcu)) {
                         flags.alignment = .none;
                     } else {
                         flags.alignment = result_align;
@@ -4198,7 +4264,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
     }
 
     const need_child = Type.fromInterned(ptr.ty).childType(zcu);
-    if (need_child.comptimeOnly(pt)) {
+    if (need_child.comptimeOnly(zcu)) {
         // No refinement can happen - this pointer is presumably invalid.
         // Just offset it.
         const parent = try arena.create(PointerDeriveStep);
@@ -4209,7 +4275,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
             .new_ptr_ty = Type.fromInterned(ptr.ty),
         } };
     }
-    const need_bytes = need_child.abiSize(pt);
+    const need_bytes = need_child.abiSize(zcu);
 
     var cur_derive = base_derive;
     var cur_offset = ptr.byte_offset;
@@ -4248,7 +4314,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
 
             .Array => {
                 const elem_ty = cur_ty.childType(zcu);
-                const elem_size = elem_ty.abiSize(pt);
+                const elem_size = elem_ty.abiSize(zcu);
                 const start_idx = cur_offset / elem_size;
                 const end_idx = (cur_offset + need_bytes + elem_size - 1) / elem_size;
                 if (end_idx == start_idx + 1) {
@@ -4279,11 +4345,11 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
                 .auto, .@"packed" => break,
                 .@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
                     const field_ty = cur_ty.structFieldType(field_idx, zcu);
-                    const start_off = cur_ty.structFieldOffset(field_idx, pt);
-                    const end_off = start_off + field_ty.abiSize(pt);
+                    const start_off = cur_ty.structFieldOffset(field_idx, zcu);
+                    const end_off = start_off + field_ty.abiSize(zcu);
                     if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
                         const old_ptr_ty = try cur_derive.ptrType(pt);
-                        const parent_align = old_ptr_ty.ptrAlignment(pt);
+                        const parent_align = old_ptr_ty.ptrAlignment(zcu);
                         const field_align = InternPool.Alignment.fromLog2Units(@min(parent_align.toLog2Units(), @ctz(start_off)));
                         const parent = try arena.create(PointerDeriveStep);
                         parent.* = cur_derive;
@@ -4291,7 +4357,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
                             .child = field_ty.toIntern(),
                             .flags = flags: {
                                 var flags = old_ptr_ty.ptrInfo(zcu).flags;
-                                if (field_align == field_ty.abiAlignment(pt)) {
+                                if (field_align == field_ty.abiAlignment(zcu)) {
                                     flags.alignment = .none;
                                 } else {
                                     flags.alignment = field_align;
@@ -4325,13 +4391,17 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
     } };
 }
 
-pub fn resolveLazy(val: Value, arena: Allocator, pt: Zcu.PerThread) Zcu.SemaError!Value {
+pub fn resolveLazy(
+    val: Value,
+    arena: Allocator,
+    pt: Zcu.PerThread,
+) Zcu.SemaError!Value {
     switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) {
         .int => |int| switch (int.storage) {
             .u64, .i64, .big_int => return val,
             .lazy_align, .lazy_size => return pt.intValue(
                 Type.fromInterned(int.ty),
-                (try val.getUnsignedIntAdvanced(pt, .sema)).?,
+                (try val.getUnsignedIntInner(.sema, pt.zcu, pt.tid)).?,
             ),
         },
         .slice => |slice| {
src/Zcu.zig
@@ -2109,9 +2109,9 @@ pub const CompileError = error{
     ComptimeBreak,
 };
 
-pub fn init(mod: *Zcu, thread_count: usize) !void {
-    const gpa = mod.gpa;
-    try mod.intern_pool.init(gpa, thread_count);
+pub fn init(zcu: *Zcu, thread_count: usize) !void {
+    const gpa = zcu.gpa;
+    try zcu.intern_pool.init(gpa, thread_count);
 }
 
 pub fn deinit(zcu: *Zcu) void {
@@ -2204,8 +2204,8 @@ pub fn namespacePtr(zcu: *Zcu, index: Namespace.Index) *Namespace {
     return zcu.intern_pool.namespacePtr(index);
 }
 
-pub fn namespacePtrUnwrap(mod: *Zcu, index: Namespace.OptionalIndex) ?*Namespace {
-    return mod.namespacePtr(index.unwrap() orelse return null);
+pub fn namespacePtrUnwrap(zcu: *Zcu, index: Namespace.OptionalIndex) ?*Namespace {
+    return zcu.namespacePtr(index.unwrap() orelse return null);
 }
 
 // TODO https://github.com/ziglang/zig/issues/8643
@@ -2682,7 +2682,7 @@ pub fn mapOldZirToNew(
 ///
 /// The caller is responsible for ensuring the function decl itself is already
 /// analyzed, and for ensuring it can exist at runtime (see
-/// `sema.fnHasRuntimeBits`). This function does *not* guarantee that the body
+/// `Type.fnHasRuntimeBitsSema`). This function does *not* guarantee that the body
 /// will be analyzed when it returns: for that, see `ensureFuncBodyAnalyzed`.
 pub fn ensureFuncBodyAnalysisQueued(zcu: *Zcu, func_index: InternPool.Index) !void {
     const ip = &zcu.intern_pool;
@@ -2846,16 +2846,16 @@ pub fn errorSetBits(mod: *Zcu) u16 {
 }
 
 pub fn errNote(
-    mod: *Zcu,
+    zcu: *Zcu,
     src_loc: LazySrcLoc,
     parent: *ErrorMsg,
     comptime format: []const u8,
     args: anytype,
 ) error{OutOfMemory}!void {
-    const msg = try std.fmt.allocPrint(mod.gpa, format, args);
-    errdefer mod.gpa.free(msg);
+    const msg = try std.fmt.allocPrint(zcu.gpa, format, args);
+    errdefer zcu.gpa.free(msg);
 
-    parent.notes = try mod.gpa.realloc(parent.notes, parent.notes.len + 1);
+    parent.notes = try zcu.gpa.realloc(parent.notes, parent.notes.len + 1);
     parent.notes[parent.notes.len - 1] = .{
         .src_loc = src_loc,
         .msg = msg,
@@ -2876,14 +2876,14 @@ pub fn optimizeMode(zcu: *const Zcu) std.builtin.OptimizeMode {
     return zcu.root_mod.optimize_mode;
 }
 
-fn lockAndClearFileCompileError(mod: *Zcu, file: *File) void {
+fn lockAndClearFileCompileError(zcu: *Zcu, file: *File) void {
     switch (file.status) {
         .success_zir, .retryable_failure => {},
         .never_loaded, .parse_failure, .astgen_failure => {
-            mod.comp.mutex.lock();
-            defer mod.comp.mutex.unlock();
-            if (mod.failed_files.fetchSwapRemove(file)) |kv| {
-                if (kv.value) |msg| msg.destroy(mod.gpa); // Delete previous error message.
+            zcu.comp.mutex.lock();
+            defer zcu.comp.mutex.unlock();
+            if (zcu.failed_files.fetchSwapRemove(file)) |kv| {
+                if (kv.value) |msg| msg.destroy(zcu.gpa); // Delete previous error message.
             }
         },
     }
@@ -2965,11 +2965,11 @@ pub const AtomicPtrAlignmentDiagnostics = struct {
 // TODO this function does not take into account CPU features, which can affect
 // this value. Audit this!
 pub fn atomicPtrAlignment(
-    mod: *Zcu,
+    zcu: *Zcu,
     ty: Type,
     diags: *AtomicPtrAlignmentDiagnostics,
 ) AtomicPtrAlignmentError!Alignment {
-    const target = mod.getTarget();
+    const target = zcu.getTarget();
     const max_atomic_bits: u16 = switch (target.cpu.arch) {
         .avr,
         .msp430,
@@ -3039,8 +3039,8 @@ pub fn atomicPtrAlignment(
         }
         return .none;
     }
-    if (ty.isAbiInt(mod)) {
-        const bit_count = ty.intInfo(mod).bits;
+    if (ty.isAbiInt(zcu)) {
+        const bit_count = ty.intInfo(zcu).bits;
         if (bit_count > max_atomic_bits) {
             diags.* = .{
                 .bits = bit_count,
@@ -3050,7 +3050,7 @@ pub fn atomicPtrAlignment(
         }
         return .none;
     }
-    if (ty.isPtrAtRuntime(mod)) return .none;
+    if (ty.isPtrAtRuntime(zcu)) return .none;
     return error.BadType;
 }
 
@@ -3058,45 +3058,45 @@ pub fn atomicPtrAlignment(
 /// * `@TypeOf(.{})`
 /// * A struct which has no fields (`struct {}`).
 /// * Not a struct.
-pub fn typeToStruct(mod: *Zcu, ty: Type) ?InternPool.LoadedStructType {
+pub fn typeToStruct(zcu: *Zcu, ty: Type) ?InternPool.LoadedStructType {
     if (ty.ip_index == .none) return null;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
     return switch (ip.indexToKey(ty.ip_index)) {
         .struct_type => ip.loadStructType(ty.ip_index),
         else => null,
     };
 }
 
-pub fn typeToPackedStruct(mod: *Zcu, ty: Type) ?InternPool.LoadedStructType {
-    const s = mod.typeToStruct(ty) orelse return null;
+pub fn typeToPackedStruct(zcu: *Zcu, ty: Type) ?InternPool.LoadedStructType {
+    const s = zcu.typeToStruct(ty) orelse return null;
     if (s.layout != .@"packed") return null;
     return s;
 }
 
-pub fn typeToUnion(mod: *Zcu, ty: Type) ?InternPool.LoadedUnionType {
+pub fn typeToUnion(zcu: *const Zcu, ty: Type) ?InternPool.LoadedUnionType {
     if (ty.ip_index == .none) return null;
-    const ip = &mod.intern_pool;
+    const ip = &zcu.intern_pool;
     return switch (ip.indexToKey(ty.ip_index)) {
         .union_type => ip.loadUnionType(ty.ip_index),
         else => null,
     };
 }
 
-pub fn typeToFunc(mod: *Zcu, ty: Type) ?InternPool.Key.FuncType {
+pub fn typeToFunc(zcu: *const Zcu, ty: Type) ?InternPool.Key.FuncType {
     if (ty.ip_index == .none) return null;
-    return mod.intern_pool.indexToFuncType(ty.toIntern());
+    return zcu.intern_pool.indexToFuncType(ty.toIntern());
 }
 
 pub fn iesFuncIndex(zcu: *const Zcu, ies_index: InternPool.Index) InternPool.Index {
     return zcu.intern_pool.iesFuncIndex(ies_index);
 }
 
-pub fn funcInfo(mod: *Zcu, func_index: InternPool.Index) InternPool.Key.Func {
-    return mod.intern_pool.indexToKey(func_index).func;
+pub fn funcInfo(zcu: *const Zcu, func_index: InternPool.Index) InternPool.Key.Func {
+    return zcu.intern_pool.indexToKey(func_index).func;
 }
 
-pub fn toEnum(mod: *Zcu, comptime E: type, val: Value) E {
-    return mod.intern_pool.toEnum(E, val.toIntern());
+pub fn toEnum(zcu: *const Zcu, comptime E: type, val: Value) E {
+    return zcu.intern_pool.toEnum(E, val.toIntern());
 }
 
 pub const UnionLayout = struct {
@@ -3121,8 +3121,8 @@ pub const UnionLayout = struct {
 };
 
 /// Returns the index of the active field, given the current tag value
-pub fn unionTagFieldIndex(mod: *Zcu, loaded_union: InternPool.LoadedUnionType, enum_tag: Value) ?u32 {
-    const ip = &mod.intern_pool;
+pub fn unionTagFieldIndex(zcu: *const Zcu, loaded_union: InternPool.LoadedUnionType, enum_tag: Value) ?u32 {
+    const ip = &zcu.intern_pool;
     if (enum_tag.toIntern() == .none) return null;
     assert(ip.typeOf(enum_tag.toIntern()) == loaded_union.enum_tag_ty);
     return loaded_union.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern());
@@ -3348,7 +3348,7 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve
     return result;
 }
 
-pub fn fileByIndex(zcu: *Zcu, file_index: File.Index) *File {
+pub fn fileByIndex(zcu: *const Zcu, file_index: File.Index) *File {
     return zcu.intern_pool.filePtr(file_index);
 }