Commit 0e5335aaf5

mlugg <mlugg@mlugg.co.uk>
2024-07-04 06:00:32
compiler: rework type resolution, fully resolve all types
I'm so sorry. This commit was just meant to be making all types fully resolve by queueing resolution at the moment of their creation. Unfortunately, a lot of dominoes ended up falling. Here's what happened: * I added a work queue job to fully resolve a type. * I realised that from here we could eliminate `Sema.types_to_resolve` if we made function codegen a separate job. This is desirable for simplicity of both spec and implementation. * This led to a new AIR traversal to detect whether any required type is unresolved. If a type in the AIR failed to resolve, then we can't run codegen. * Because full type resolution now occurs by the work queue job, a bug was exposed whereby error messages for type resolution were associated with the wrong `Decl`, resulting in duplicate error messages when the type was also resolved "by" its owner `Decl` (which really *all* resolution should be done on). * A correct fix for this requires using a different `Sema` when performing type resolution: we need a `Sema` owned by the type. Also note that this fix is necessary for incremental compilation. * This means a whole bunch of functions no longer need to take `Sema`s. * First-order effects: `resolveTypeFields`, `resolveTypeLayout`, etc * Second-order effects: `Type.abiAlignmentAdvanced`, `Value.orderAgainstZeroAdvanced`, etc The end result of this is, in short, a more correct compiler and a simpler language specification. This regressed a few error notes in the test cases, but nothing that seems worth blocking this change. Oh, also, I ripped out the old code in `test/src/Cases.zig` which introduced a dependency on `Compilation`. This dependency was problematic at best, and this code has been unused for a while. When we re-enable incremental test cases, we must rewrite their executor to use the compiler server protocol.
1 parent 2f0f1ef
src/Air/types_resolved.zig
@@ -0,0 +1,521 @@
+const Air = @import("../Air.zig");
+const Zcu = @import("../Zcu.zig");
+const Type = @import("../Type.zig");
+const Value = @import("../Value.zig");
+const InternPool = @import("../InternPool.zig");
+
+/// Given a body of AIR instructions, returns whether all type resolution necessary for codegen is complete.
+/// If `false`, then type resolution must have failed, so codegen cannot proceed.
+pub fn typesFullyResolved(air: Air, zcu: *Zcu) bool {
+    return checkBody(air, air.getMainBody(), zcu);
+}
+
+fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
+    const tags = air.instructions.items(.tag);
+    const datas = air.instructions.items(.data);
+
+    for (body) |inst| {
+        const data = datas[@intFromEnum(inst)];
+        switch (tags[@intFromEnum(inst)]) {
+            .inferred_alloc, .inferred_alloc_comptime => unreachable,
+
+            .arg => {
+                if (!checkType(data.arg.ty.toType(), zcu)) return false;
+            },
+
+            .add,
+            .add_safe,
+            .add_optimized,
+            .add_wrap,
+            .add_sat,
+            .sub,
+            .sub_safe,
+            .sub_optimized,
+            .sub_wrap,
+            .sub_sat,
+            .mul,
+            .mul_safe,
+            .mul_optimized,
+            .mul_wrap,
+            .mul_sat,
+            .div_float,
+            .div_float_optimized,
+            .div_trunc,
+            .div_trunc_optimized,
+            .div_floor,
+            .div_floor_optimized,
+            .div_exact,
+            .div_exact_optimized,
+            .rem,
+            .rem_optimized,
+            .mod,
+            .mod_optimized,
+            .max,
+            .min,
+            .bit_and,
+            .bit_or,
+            .shr,
+            .shr_exact,
+            .shl,
+            .shl_exact,
+            .shl_sat,
+            .xor,
+            .cmp_lt,
+            .cmp_lt_optimized,
+            .cmp_lte,
+            .cmp_lte_optimized,
+            .cmp_eq,
+            .cmp_eq_optimized,
+            .cmp_gte,
+            .cmp_gte_optimized,
+            .cmp_gt,
+            .cmp_gt_optimized,
+            .cmp_neq,
+            .cmp_neq_optimized,
+            .bool_and,
+            .bool_or,
+            .store,
+            .store_safe,
+            .set_union_tag,
+            .array_elem_val,
+            .slice_elem_val,
+            .ptr_elem_val,
+            .memset,
+            .memset_safe,
+            .memcpy,
+            .atomic_store_unordered,
+            .atomic_store_monotonic,
+            .atomic_store_release,
+            .atomic_store_seq_cst,
+            => {
+                if (!checkRef(data.bin_op.lhs, zcu)) return false;
+                if (!checkRef(data.bin_op.rhs, zcu)) return false;
+            },
+
+            .not,
+            .bitcast,
+            .clz,
+            .ctz,
+            .popcount,
+            .byte_swap,
+            .bit_reverse,
+            .abs,
+            .load,
+            .fptrunc,
+            .fpext,
+            .intcast,
+            .trunc,
+            .optional_payload,
+            .optional_payload_ptr,
+            .optional_payload_ptr_set,
+            .wrap_optional,
+            .unwrap_errunion_payload,
+            .unwrap_errunion_err,
+            .unwrap_errunion_payload_ptr,
+            .unwrap_errunion_err_ptr,
+            .errunion_payload_ptr_set,
+            .wrap_errunion_payload,
+            .wrap_errunion_err,
+            .struct_field_ptr_index_0,
+            .struct_field_ptr_index_1,
+            .struct_field_ptr_index_2,
+            .struct_field_ptr_index_3,
+            .get_union_tag,
+            .slice_len,
+            .slice_ptr,
+            .ptr_slice_len_ptr,
+            .ptr_slice_ptr_ptr,
+            .array_to_slice,
+            .int_from_float,
+            .int_from_float_optimized,
+            .float_from_int,
+            .splat,
+            .error_set_has_value,
+            .addrspace_cast,
+            .c_va_arg,
+            .c_va_copy,
+            => {
+                if (!checkType(data.ty_op.ty.toType(), zcu)) return false;
+                if (!checkRef(data.ty_op.operand, zcu)) return false;
+            },
+
+            .alloc,
+            .ret_ptr,
+            .c_va_start,
+            => {
+                if (!checkType(data.ty, zcu)) return false;
+            },
+
+            .ptr_add,
+            .ptr_sub,
+            .add_with_overflow,
+            .sub_with_overflow,
+            .mul_with_overflow,
+            .shl_with_overflow,
+            .slice,
+            .slice_elem_ptr,
+            .ptr_elem_ptr,
+            => {
+                const bin = air.extraData(Air.Bin, data.ty_pl.payload).data;
+                if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
+                if (!checkRef(bin.lhs, zcu)) return false;
+                if (!checkRef(bin.rhs, zcu)) return false;
+            },
+
+            .block,
+            .loop,
+            => {
+                const extra = air.extraData(Air.Block, data.ty_pl.payload);
+                if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
+                if (!checkBody(
+                    air,
+                    @ptrCast(air.extra[extra.end..][0..extra.data.body_len]),
+                    zcu,
+                )) return false;
+            },
+
+            .dbg_inline_block => {
+                const extra = air.extraData(Air.DbgInlineBlock, data.ty_pl.payload);
+                if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
+                if (!checkBody(
+                    air,
+                    @ptrCast(air.extra[extra.end..][0..extra.data.body_len]),
+                    zcu,
+                )) return false;
+            },
+
+            .sqrt,
+            .sin,
+            .cos,
+            .tan,
+            .exp,
+            .exp2,
+            .log,
+            .log2,
+            .log10,
+            .floor,
+            .ceil,
+            .round,
+            .trunc_float,
+            .neg,
+            .neg_optimized,
+            .is_null,
+            .is_non_null,
+            .is_null_ptr,
+            .is_non_null_ptr,
+            .is_err,
+            .is_non_err,
+            .is_err_ptr,
+            .is_non_err_ptr,
+            .int_from_ptr,
+            .int_from_bool,
+            .ret,
+            .ret_safe,
+            .ret_load,
+            .is_named_enum_value,
+            .tag_name,
+            .error_name,
+            .cmp_lt_errors_len,
+            .c_va_end,
+            .set_err_return_trace,
+            => {
+                if (!checkRef(data.un_op, zcu)) return false;
+            },
+
+            .br => {
+                if (!checkRef(data.br.operand, zcu)) return false;
+            },
+
+            .cmp_vector,
+            .cmp_vector_optimized,
+            => {
+                const extra = air.extraData(Air.VectorCmp, data.ty_pl.payload).data;
+                if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
+                if (!checkRef(extra.lhs, zcu)) return false;
+                if (!checkRef(extra.rhs, zcu)) return false;
+            },
+
+            .reduce,
+            .reduce_optimized,
+            => {
+                if (!checkRef(data.reduce.operand, zcu)) return false;
+            },
+
+            .struct_field_ptr,
+            .struct_field_val,
+            => {
+                const extra = air.extraData(Air.StructField, data.ty_pl.payload).data;
+                if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
+                if (!checkRef(extra.struct_operand, zcu)) return false;
+            },
+
+            .shuffle => {
+                const extra = air.extraData(Air.Shuffle, data.ty_pl.payload).data;
+                if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
+                if (!checkRef(extra.a, zcu)) return false;
+                if (!checkRef(extra.b, zcu)) return false;
+                if (!checkVal(Value.fromInterned(extra.mask), zcu)) return false;
+            },
+
+            .cmpxchg_weak,
+            .cmpxchg_strong,
+            => {
+                const extra = air.extraData(Air.Cmpxchg, data.ty_pl.payload).data;
+                if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
+                if (!checkRef(extra.ptr, zcu)) return false;
+                if (!checkRef(extra.expected_value, zcu)) return false;
+                if (!checkRef(extra.new_value, zcu)) return false;
+            },
+
+            .aggregate_init => {
+                const ty = data.ty_pl.ty.toType();
+                const elems_len: usize = @intCast(ty.arrayLen(zcu));
+                const elems: []const Air.Inst.Ref = @ptrCast(air.extra[data.ty_pl.payload..][0..elems_len]);
+                if (!checkType(ty, zcu)) return false;
+                if (ty.zigTypeTag(zcu) == .Struct) {
+                    for (elems, 0..) |elem, elem_idx| {
+                        if (ty.structFieldIsComptime(elem_idx, zcu)) continue;
+                        if (!checkRef(elem, zcu)) return false;
+                    }
+                } else {
+                    for (elems) |elem| {
+                        if (!checkRef(elem, zcu)) return false;
+                    }
+                }
+            },
+
+            .union_init => {
+                const extra = air.extraData(Air.UnionInit, data.ty_pl.payload).data;
+                if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
+                if (!checkRef(extra.init, zcu)) return false;
+            },
+
+            .field_parent_ptr => {
+                const extra = air.extraData(Air.FieldParentPtr, data.ty_pl.payload).data;
+                if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
+                if (!checkRef(extra.field_ptr, zcu)) return false;
+            },
+
+            .atomic_load => {
+                if (!checkRef(data.atomic_load.ptr, zcu)) return false;
+            },
+
+            .prefetch => {
+                if (!checkRef(data.prefetch.ptr, zcu)) return false;
+            },
+
+            .vector_store_elem => {
+                const bin = air.extraData(Air.Bin, data.vector_store_elem.payload).data;
+                if (!checkRef(data.vector_store_elem.vector_ptr, zcu)) return false;
+                if (!checkRef(bin.lhs, zcu)) return false;
+                if (!checkRef(bin.rhs, zcu)) return false;
+            },
+
+            .select,
+            .mul_add,
+            => {
+                const bin = air.extraData(Air.Bin, data.pl_op.payload).data;
+                if (!checkRef(data.pl_op.operand, zcu)) return false;
+                if (!checkRef(bin.lhs, zcu)) return false;
+                if (!checkRef(bin.rhs, zcu)) return false;
+            },
+
+            .atomic_rmw => {
+                const extra = air.extraData(Air.AtomicRmw, data.pl_op.payload).data;
+                if (!checkRef(data.pl_op.operand, zcu)) return false;
+                if (!checkRef(extra.operand, zcu)) return false;
+            },
+
+            .call,
+            .call_always_tail,
+            .call_never_tail,
+            .call_never_inline,
+            => {
+                const extra = air.extraData(Air.Call, data.pl_op.payload);
+                const args: []const Air.Inst.Ref = @ptrCast(air.extra[extra.end..][0..extra.data.args_len]);
+                if (!checkRef(data.pl_op.operand, zcu)) return false;
+                for (args) |arg| if (!checkRef(arg, zcu)) return false;
+            },
+
+            .dbg_var_ptr,
+            .dbg_var_val,
+            => {
+                if (!checkRef(data.pl_op.operand, zcu)) return false;
+            },
+
+            .@"try" => {
+                const extra = air.extraData(Air.Try, data.pl_op.payload);
+                if (!checkRef(data.pl_op.operand, zcu)) return false;
+                if (!checkBody(
+                    air,
+                    @ptrCast(air.extra[extra.end..][0..extra.data.body_len]),
+                    zcu,
+                )) return false;
+            },
+
+            .try_ptr => {
+                const extra = air.extraData(Air.TryPtr, data.ty_pl.payload);
+                if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
+                if (!checkRef(extra.data.ptr, zcu)) return false;
+                if (!checkBody(
+                    air,
+                    @ptrCast(air.extra[extra.end..][0..extra.data.body_len]),
+                    zcu,
+                )) return false;
+            },
+
+            .cond_br => {
+                const extra = air.extraData(Air.CondBr, data.pl_op.payload);
+                if (!checkRef(data.pl_op.operand, zcu)) return false;
+                if (!checkBody(
+                    air,
+                    @ptrCast(air.extra[extra.end..][0..extra.data.then_body_len]),
+                    zcu,
+                )) return false;
+                if (!checkBody(
+                    air,
+                    @ptrCast(air.extra[extra.end + extra.data.then_body_len ..][0..extra.data.else_body_len]),
+                    zcu,
+                )) return false;
+            },
+
+            .switch_br => {
+                const extra = air.extraData(Air.SwitchBr, data.pl_op.payload);
+                if (!checkRef(data.pl_op.operand, zcu)) return false;
+                var extra_index = extra.end;
+                for (0..extra.data.cases_len) |_| {
+                    const case = air.extraData(Air.SwitchBr.Case, extra_index);
+                    extra_index = case.end;
+                    const items: []const Air.Inst.Ref = @ptrCast(air.extra[extra_index..][0..case.data.items_len]);
+                    extra_index += case.data.items_len;
+                    for (items) |item| if (!checkRef(item, zcu)) return false;
+                    if (!checkBody(
+                        air,
+                        @ptrCast(air.extra[extra_index..][0..case.data.body_len]),
+                        zcu,
+                    )) return false;
+                    extra_index += case.data.body_len;
+                }
+                if (!checkBody(
+                    air,
+                    @ptrCast(air.extra[extra_index..][0..extra.data.else_body_len]),
+                    zcu,
+                )) return false;
+            },
+
+            .assembly => {
+                const extra = air.extraData(Air.Asm, data.ty_pl.payload);
+                if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
+                // Luckily, we only care about the inputs and outputs, so we don't have to do
+                // the whole null-terminated string dance.
+                const outputs: []const Air.Inst.Ref = @ptrCast(air.extra[extra.end..][0..extra.data.outputs_len]);
+                const inputs: []const Air.Inst.Ref = @ptrCast(air.extra[extra.end + extra.data.outputs_len ..][0..extra.data.inputs_len]);
+                for (outputs) |output| if (output != .none and !checkRef(output, zcu)) return false;
+                for (inputs) |input| if (input != .none and !checkRef(input, zcu)) return false;
+            },
+
+            .trap,
+            .breakpoint,
+            .ret_addr,
+            .frame_addr,
+            .unreach,
+            .wasm_memory_size,
+            .wasm_memory_grow,
+            .work_item_id,
+            .work_group_size,
+            .work_group_id,
+            .fence,
+            .dbg_stmt,
+            .err_return_trace,
+            .save_err_return_trace_index,
+            => {},
+        }
+    }
+    return true;
+}
+
+fn checkRef(ref: Air.Inst.Ref, zcu: *Zcu) bool {
+    const ip_index = ref.toInterned() orelse {
+        // This operand refers back to a previous instruction.
+        // We have already checked that instruction's type.
+        // So, there's no need to check this operand's type.
+        return true;
+    };
+    return checkVal(Value.fromInterned(ip_index), zcu);
+}
+
+fn checkVal(val: Value, zcu: *Zcu) bool {
+    if (!checkType(val.typeOf(zcu), zcu)) return false;
+    // Check for lazy values
+    switch (zcu.intern_pool.indexToKey(val.toIntern())) {
+        .int => |int| switch (int.storage) {
+            .u64, .i64, .big_int => return true,
+            .lazy_align, .lazy_size => |ty_index| {
+                return checkType(Type.fromInterned(ty_index), zcu);
+            },
+        },
+        else => return true,
+    }
+}
+
+fn checkType(ty: Type, zcu: *Zcu) bool {
+    const ip = &zcu.intern_pool;
+    return switch (ty.zigTypeTag(zcu)) {
+        .Type,
+        .Void,
+        .Bool,
+        .NoReturn,
+        .Int,
+        .Float,
+        .ErrorSet,
+        .Enum,
+        .Opaque,
+        .Vector,
+        // These types can appear due to some dummy instructions Sema introduces and expects to be omitted by Liveness.
+        // It's a little silly -- but fine, we'll return `true`.
+        .ComptimeFloat,
+        .ComptimeInt,
+        .Undefined,
+        .Null,
+        .EnumLiteral,
+        => true,
+
+        .Frame,
+        .AnyFrame,
+        => @panic("TODO Air.types_resolved.checkType async frames"),
+
+        .Optional => checkType(ty.childType(zcu), zcu),
+        .ErrorUnion => checkType(ty.errorUnionPayload(zcu), zcu),
+        .Pointer => checkType(ty.childType(zcu), zcu),
+        .Array => checkType(ty.childType(zcu), zcu),
+
+        .Fn => {
+            const info = zcu.typeToFunc(ty).?;
+            for (0..info.param_types.len) |i| {
+                const param_ty = info.param_types.get(ip)[i];
+                if (!checkType(Type.fromInterned(param_ty), zcu)) return false;
+            }
+            return checkType(Type.fromInterned(info.return_type), zcu);
+        },
+        .Struct => switch (ip.indexToKey(ty.toIntern())) {
+            .struct_type => {
+                const struct_obj = zcu.typeToStruct(ty).?;
+                return switch (struct_obj.layout) {
+                    .@"packed" => struct_obj.backingIntType(ip).* != .none,
+                    .auto, .@"extern" => struct_obj.flagsPtr(ip).fully_resolved,
+                };
+            },
+            .anon_struct_type => |tuple| {
+                for (0..tuple.types.len) |i| {
+                    const field_is_comptime = tuple.values.get(ip)[i] != .none;
+                    if (field_is_comptime) continue;
+                    const field_ty = tuple.types.get(ip)[i];
+                    if (!checkType(Type.fromInterned(field_ty), zcu)) return false;
+                }
+                return true;
+            },
+            else => unreachable,
+        },
+        .Union => return zcu.typeToUnion(ty).?.flagsPtr(ip).status == .fully_resolved,
+    };
+}
src/codegen/llvm.zig
@@ -2603,7 +2603,10 @@ pub const Object = struct {
                     if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue;
 
                     const field_size = Type.fromInterned(field_ty).abiSize(mod);
-                    const field_align = mod.unionFieldNormalAlignment(union_type, @intCast(field_index));
+                    const field_align: InternPool.Alignment = switch (union_type.flagsPtr(ip).layout) {
+                        .@"packed" => .none,
+                        .auto, .@"extern" => mod.unionFieldNormalAlignment(union_type, @intCast(field_index)),
+                    };
 
                     const field_name = tag_type.names.get(ip)[field_index];
                     fields.appendAssumeCapacity(try o.builder.debugMemberType(
src/Sema/bitcast.zig
@@ -78,8 +78,8 @@ fn bitCastInner(
 
     const val_ty = val.typeOf(zcu);
 
-    try sema.resolveTypeLayout(val_ty);
-    try sema.resolveTypeLayout(dest_ty);
+    try val_ty.resolveLayout(zcu);
+    try dest_ty.resolveLayout(zcu);
 
     assert(val_ty.hasWellDefinedLayout(zcu));
 
@@ -136,8 +136,8 @@ fn bitCastSpliceInner(
     const val_ty = val.typeOf(zcu);
     const splice_val_ty = splice_val.typeOf(zcu);
 
-    try sema.resolveTypeLayout(val_ty);
-    try sema.resolveTypeLayout(splice_val_ty);
+    try val_ty.resolveLayout(zcu);
+    try splice_val_ty.resolveLayout(zcu);
 
     const splice_bits = splice_val_ty.bitSize(zcu);
 
src/Air.zig
@@ -1801,3 +1801,5 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool {
         .atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip),
     };
 }
+
+pub const typesFullyResolved = @import("Air/types_resolved.zig").typesFullyResolved;
src/Compilation.zig
@@ -37,6 +37,7 @@ const Cache = std.Build.Cache;
 const c_codegen = @import("codegen/c.zig");
 const libtsan = @import("libtsan.zig");
 const Zir = std.zig.Zir;
+const Air = @import("Air.zig");
 const Builtin = @import("Builtin.zig");
 const LlvmObject = @import("codegen/llvm.zig").Object;
 
@@ -316,18 +317,29 @@ const Job = union(enum) {
     codegen_decl: InternPool.DeclIndex,
     /// Write the machine code for a function to the output file.
     /// This will either be a non-generic `func_decl` or a `func_instance`.
-    codegen_func: InternPool.Index,
+    codegen_func: struct {
+        func: InternPool.Index,
+        /// This `Air` is owned by the `Job` and allocated with `gpa`.
+        /// It must be deinited when the job is processed.
+        air: Air,
+    },
     /// Render the .h file snippet for the Decl.
     emit_h_decl: InternPool.DeclIndex,
     /// The Decl needs to be analyzed and possibly export itself.
     /// It may have already be analyzed, or it may have been determined
     /// to be outdated; in this case perform semantic analysis again.
     analyze_decl: InternPool.DeclIndex,
+    /// Analyze the body of a runtime function.
+    /// After analysis, a `codegen_func` job will be queued.
+    /// These must be separate jobs to ensure any needed type resolution occurs *before* codegen.
+    analyze_func: InternPool.Index,
     /// The source file containing the Decl has been updated, and so the
     /// Decl may need its line number information updated in the debug info.
     update_line_number: InternPool.DeclIndex,
     /// The main source file for the module needs to be analyzed.
     analyze_mod: *Package.Module,
+    /// Fully resolve the given `struct` or `union` type.
+    resolve_type_fully: InternPool.Index,
 
     /// one of the glibc static objects
     glibc_crt_file: glibc.CRTFile,
@@ -3389,7 +3401,7 @@ pub fn performAllTheWork(
             if (try zcu.findOutdatedToAnalyze()) |outdated| {
                 switch (outdated.unwrap()) {
                     .decl => |decl| try comp.work_queue.writeItem(.{ .analyze_decl = decl }),
-                    .func => |func| try comp.work_queue.writeItem(.{ .codegen_func = func }),
+                    .func => |func| try comp.work_queue.writeItem(.{ .analyze_func = func }),
                 }
                 continue;
             }
@@ -3439,6 +3451,14 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo
             const named_frame = tracy.namedFrame("codegen_func");
             defer named_frame.end();
 
+            const module = comp.module.?;
+            // This call takes ownership of `func.air`.
+            try module.linkerUpdateFunc(func.func, func.air);
+        },
+        .analyze_func => |func| {
+            const named_frame = tracy.namedFrame("analyze_func");
+            defer named_frame.end();
+
             const module = comp.module.?;
             module.ensureFuncBodyAnalyzed(func) catch |err| switch (err) {
                 error.OutOfMemory => return error.OutOfMemory,
@@ -3518,6 +3538,16 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo
                 try module.ensureFuncBodyAnalysisQueued(decl.val.toIntern());
             }
         },
+        .resolve_type_fully => |ty| {
+            const named_frame = tracy.namedFrame("resolve_type_fully");
+            defer named_frame.end();
+
+            const zcu = comp.module.?;
+            Type.fromInterned(ty).resolveFully(zcu) catch |err| switch (err) {
+                error.OutOfMemory => return error.OutOfMemory,
+                error.AnalysisFail => return,
+            };
+        },
         .update_line_number => |decl_index| {
             const named_frame = tracy.namedFrame("update_line_number");
             defer named_frame.end();
src/print_value.zig
@@ -81,12 +81,12 @@ pub fn print(
         }),
         .int => |int| switch (int.storage) {
             inline .u64, .i64, .big_int => |x| try writer.print("{}", .{x}),
-            .lazy_align => |ty| if (opt_sema) |sema| {
-                const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar;
+            .lazy_align => |ty| if (opt_sema != null) {
+                const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .sema)).scalar;
                 try writer.print("{}", .{a.toByteUnits() orelse 0});
             } else try writer.print("@alignOf({})", .{Type.fromInterned(ty).fmt(mod)}),
-            .lazy_size => |ty| if (opt_sema) |sema| {
-                const s = (try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar;
+            .lazy_size => |ty| if (opt_sema != null) {
+                const s = (try Type.fromInterned(ty).abiSizeAdvanced(mod, .sema)).scalar;
                 try writer.print("{}", .{s});
             } else try writer.print("@sizeOf({})", .{Type.fromInterned(ty).fmt(mod)}),
         },
src/Sema.zig
@@ -64,14 +64,6 @@ generic_owner: InternPool.Index = .none,
 /// instantiation can point back to the instantiation site in addition to the
 /// declaration site.
 generic_call_src: LazySrcLoc = LazySrcLoc.unneeded,
-/// The key is types that must be fully resolved prior to machine code
-/// generation pass. Types are added to this set when resolving them
-/// immediately could cause a dependency loop, but they do need to be resolved
-/// before machine code generation passes process the AIR.
-/// It would work fine if this were an array list instead of an array hash map.
-/// I chose array hash map with the intention to save time by omitting
-/// duplicates.
-types_to_resolve: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{},
 /// These are lazily created runtime blocks from block_inline instructions.
 /// They are created when an break_inline passes through a runtime condition, because
 /// Sema must convert comptime control flow to runtime control flow, which means
@@ -872,7 +864,6 @@ pub fn deinit(sema: *Sema) void {
     sema.air_extra.deinit(gpa);
     sema.inst_map.deinit(gpa);
     sema.decl_val_table.deinit(gpa);
-    sema.types_to_resolve.deinit(gpa);
     {
         var it = sema.post_hoc_blocks.iterator();
         while (it.next()) |entry| {
@@ -2078,8 +2069,8 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize)
     const addrs_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(addr_arr_ty));
 
     // var st: StackTrace = undefined;
-    const stack_trace_ty = try sema.getBuiltinType("StackTrace");
-    try sema.resolveTypeFields(stack_trace_ty);
+    const stack_trace_ty = try mod.getBuiltinType("StackTrace");
+    try stack_trace_ty.resolveFields(mod);
     const st_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(stack_trace_ty));
 
     // st.instruction_addresses = &addrs;
@@ -2628,7 +2619,7 @@ fn analyzeAsInt(
     const mod = sema.mod;
     const coerced = try sema.coerce(block, dest_ty, air_ref, src);
     const val = try sema.resolveConstDefinedValue(block, src, coerced, reason);
-    return (try val.getUnsignedIntAdvanced(mod, sema)).?;
+    return (try val.getUnsignedIntAdvanced(mod, .sema)).?;
 }
 
 /// Given a ZIR extra index which points to a list of `Zir.Inst.Capture`,
@@ -2832,6 +2823,7 @@ fn zirStructDecl(
     }
 
     try mod.finalizeAnonDecl(new_decl_index);
+    try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index });
     return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index));
 }
 
@@ -3332,7 +3324,7 @@ fn zirUnionDecl(
     }
 
     try mod.finalizeAnonDecl(new_decl_index);
-
+    try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index });
     return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index));
 }
 
@@ -3457,12 +3449,12 @@ fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
     defer tracy.end();
 
     if (block.is_comptime or try sema.typeRequiresComptime(sema.fn_ret_ty)) {
-        try sema.resolveTypeFields(sema.fn_ret_ty);
+        try sema.fn_ret_ty.resolveFields(sema.mod);
         return sema.analyzeComptimeAlloc(block, sema.fn_ret_ty, .none);
     }
 
     const target = sema.mod.getTarget();
-    const ptr_type = try sema.ptrType(.{
+    const ptr_type = try sema.mod.ptrTypeSema(.{
         .child = sema.fn_ret_ty.toIntern(),
         .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
     });
@@ -3471,7 +3463,6 @@ fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
         // We are inlining a function call; this should be emitted as an alloc, not a ret_ptr.
         // TODO when functions gain result location support, the inlining struct in
         // Block should contain the return pointer, and we would pass that through here.
-        try sema.queueFullTypeResolution(sema.fn_ret_ty);
         return block.addTy(.alloc, ptr_type);
     }
 
@@ -3667,8 +3658,8 @@ fn zirAllocExtended(
             try sema.validateVarType(block, ty_src, var_ty, false);
         }
         const target = sema.mod.getTarget();
-        try sema.resolveTypeLayout(var_ty);
-        const ptr_type = try sema.ptrType(.{
+        try var_ty.resolveLayout(sema.mod);
+        const ptr_type = try sema.mod.ptrTypeSema(.{
             .child = var_ty.toIntern(),
             .flags = .{
                 .alignment = alignment,
@@ -3902,7 +3893,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
                 const idx_val = (try sema.resolveValue(data.rhs)).?;
                 break :blk .{
                     data.lhs,
-                    .{ .elem = try idx_val.toUnsignedIntAdvanced(sema) },
+                    .{ .elem = try idx_val.toUnsignedIntSema(zcu) },
                 };
             },
             .bitcast => .{
@@ -3940,7 +3931,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
                     .val = payload_val.toIntern(),
                 } });
                 try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), Value.fromInterned(opt_val), opt_ty);
-                break :ptr (try Value.fromInterned(decl_parent_ptr).ptrOptPayload(sema)).toIntern();
+                break :ptr (try Value.fromInterned(decl_parent_ptr).ptrOptPayload(zcu)).toIntern();
             },
             .eu_payload => ptr: {
                 // Set the error union to non-error at comptime.
@@ -3953,7 +3944,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
                     .val = .{ .payload = payload_val.toIntern() },
                 } });
                 try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), Value.fromInterned(eu_val), eu_ty);
-                break :ptr (try Value.fromInterned(decl_parent_ptr).ptrEuPayload(sema)).toIntern();
+                break :ptr (try Value.fromInterned(decl_parent_ptr).ptrEuPayload(zcu)).toIntern();
             },
             .field => |idx| ptr: {
                 const maybe_union_ty = Value.fromInterned(decl_parent_ptr).typeOf(zcu).childType(zcu);
@@ -3967,9 +3958,9 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
                     const store_val = try zcu.unionValue(maybe_union_ty, tag_val, payload_val);
                     try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), store_val, maybe_union_ty);
                 }
-                break :ptr (try Value.fromInterned(decl_parent_ptr).ptrField(idx, sema)).toIntern();
+                break :ptr (try Value.fromInterned(decl_parent_ptr).ptrField(idx, zcu)).toIntern();
             },
-            .elem => |idx| (try Value.fromInterned(decl_parent_ptr).ptrElem(idx, sema)).toIntern(),
+            .elem => |idx| (try Value.fromInterned(decl_parent_ptr).ptrElem(idx, zcu)).toIntern(),
         };
         try ptr_mapping.put(air_ptr, new_ptr);
     }
@@ -4060,7 +4051,7 @@ fn finishResolveComptimeKnownAllocPtr(
 fn makePtrTyConst(sema: *Sema, ptr_ty: Type) CompileError!Type {
     var ptr_info = ptr_ty.ptrInfo(sema.mod);
     ptr_info.flags.is_const = true;
-    return sema.ptrType(ptr_info);
+    return sema.mod.ptrTypeSema(ptr_info);
 }
 
 fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Air.Inst.Ref {
@@ -4103,11 +4094,10 @@ fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
         return sema.analyzeComptimeAlloc(block, var_ty, .none);
     }
     const target = sema.mod.getTarget();
-    const ptr_type = try sema.ptrType(.{
+    const ptr_type = try sema.mod.ptrTypeSema(.{
         .child = var_ty.toIntern(),
         .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
     });
-    try sema.queueFullTypeResolution(var_ty);
     const ptr = try block.addTy(.alloc, ptr_type);
     const ptr_inst = ptr.toIndex().?;
     try sema.maybe_comptime_allocs.put(sema.gpa, ptr_inst, .{ .runtime_index = block.runtime_index });
@@ -4127,11 +4117,10 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     }
     try sema.validateVarType(block, ty_src, var_ty, false);
     const target = sema.mod.getTarget();
-    const ptr_type = try sema.ptrType(.{
+    const ptr_type = try sema.mod.ptrTypeSema(.{
         .child = var_ty.toIntern(),
         .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
     });
-    try sema.queueFullTypeResolution(var_ty);
     return block.addTy(.alloc, ptr_type);
 }
 
@@ -4227,7 +4216,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
             }
             const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_vals, .none);
 
-            const final_ptr_ty = try sema.ptrType(.{
+            const final_ptr_ty = try mod.ptrTypeSema(.{
                 .child = final_elem_ty.toIntern(),
                 .flags = .{
                     .alignment = ia1.alignment,
@@ -4247,7 +4236,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
                 // Unless the block is comptime, `alloc_inferred` always produces
                 // a runtime constant. The final inferred type needs to be
                 // fully resolved so it can be lowered in codegen.
-                try sema.resolveTypeFully(final_elem_ty);
+                try final_elem_ty.resolveFully(mod);
 
                 return;
             }
@@ -4259,8 +4248,6 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
                 return sema.fail(block, src, "value with comptime-only type '{}' depends on runtime control flow", .{final_elem_ty.fmt(mod)});
             }
 
-            try sema.queueFullTypeResolution(final_elem_ty);
-
             // Change it to a normal alloc.
             sema.air_instructions.set(@intFromEnum(ptr_inst), .{
                 .tag = .alloc,
@@ -4633,7 +4620,7 @@ fn validateArrayInitTy(
             return;
         },
         .Struct => if (ty.isTuple(mod)) {
-            try sema.resolveTypeFields(ty);
+            try ty.resolveFields(mod);
             const array_len = ty.arrayLen(mod);
             if (init_count > array_len) {
                 return sema.fail(block, src, "expected at most {d} tuple fields; found {d}", .{
@@ -4911,7 +4898,7 @@ fn validateStructInit(
     if (block.is_comptime and
         (try sema.resolveDefinedValue(block, init_src, struct_ptr)) != null)
     {
-        try sema.resolveStructLayout(struct_ty);
+        try struct_ty.resolveLayout(mod);
         // In this case the only thing we need to do is evaluate the implicit
         // store instructions for default field values, and report any missing fields.
         // Avoid the cost of the extra machinery for detecting a comptime struct init value.
@@ -4919,7 +4906,7 @@ fn validateStructInit(
             const i: u32 = @intCast(i_usize);
             if (field_ptr != .none) continue;
 
-            try sema.resolveStructFieldInits(struct_ty);
+            try struct_ty.resolveStructFieldInits(mod);
             const default_val = struct_ty.structFieldDefaultValue(i, mod);
             if (default_val.toIntern() == .unreachable_value) {
                 const field_name = struct_ty.structFieldName(i, mod).unwrap() orelse {
@@ -4968,7 +4955,7 @@ fn validateStructInit(
     const air_tags = sema.air_instructions.items(.tag);
     const air_datas = sema.air_instructions.items(.data);
 
-    try sema.resolveStructFieldInits(struct_ty);
+    try struct_ty.resolveStructFieldInits(mod);
 
     // We collect the comptime field values in case the struct initialization
     // ends up being comptime-known.
@@ -5127,7 +5114,7 @@ fn validateStructInit(
         try sema.storePtr2(block, init_src, struct_ptr, init_src, struct_init, init_src, .store);
         return;
     }
-    try sema.resolveStructLayout(struct_ty);
+    try struct_ty.resolveLayout(mod);
 
     // Our task is to insert `store` instructions for all the default field values.
     for (found_fields, 0..) |field_ptr, i| {
@@ -5172,7 +5159,7 @@ fn zirValidatePtrArrayInit(
             var root_msg: ?*Module.ErrorMsg = null;
             errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
 
-            try sema.resolveStructFieldInits(array_ty);
+            try array_ty.resolveStructFieldInits(mod);
             var i = instrs.len;
             while (i < array_len) : (i += 1) {
                 const default_val = array_ty.structFieldDefaultValue(i, mod).toIntern();
@@ -5241,7 +5228,7 @@ fn zirValidatePtrArrayInit(
 
         if (array_ty.isTuple(mod)) {
             if (array_ty.structFieldIsComptime(i, mod))
-                try sema.resolveStructFieldInits(array_ty);
+                try array_ty.resolveStructFieldInits(mod);
             if (try array_ty.structFieldValueComptime(mod, i)) |opv| {
                 element_vals[i] = opv.toIntern();
                 continue;
@@ -5581,7 +5568,7 @@ fn storeToInferredAllocComptime(
             .needed_comptime_reason = "value being stored to a comptime variable must be comptime-known",
         });
     };
-    const alloc_ty = try sema.ptrType(.{
+    const alloc_ty = try zcu.ptrTypeSema(.{
         .child = operand_ty.toIntern(),
         .flags = .{
             .alignment = iac.alignment,
@@ -5688,7 +5675,7 @@ fn anonDeclRef(sema: *Sema, val: InternPool.Index) CompileError!Air.Inst.Ref {
 
 fn refValue(sema: *Sema, val: InternPool.Index) CompileError!InternPool.Index {
     const mod = sema.mod;
-    const ptr_ty = (try sema.ptrType(.{
+    const ptr_ty = (try mod.ptrTypeSema(.{
         .child = mod.intern_pool.typeOf(val),
         .flags = .{
             .alignment = .none,
@@ -6645,8 +6632,6 @@ fn addDbgVar(
     //   real `block` instruction.
     if (block.need_debug_scope) |ptr| ptr.* = true;
 
-    try sema.queueFullTypeResolution(operand_ty);
-
     // Add the name to the AIR.
     const name_extra_index: u32 = @intCast(sema.air_extra.items.len);
     const elements_used = name.len / 4 + 1;
@@ -6832,14 +6817,8 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref
 
     if (!block.ownerModule().error_tracing) return .none;
 
-    const stack_trace_ty = sema.getBuiltinType("StackTrace") catch |err| switch (err) {
-        error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable,
-        else => |e| return e,
-    };
-    sema.resolveTypeFields(stack_trace_ty) catch |err| switch (err) {
-        error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable,
-        else => |e| return e,
-    };
+    const stack_trace_ty = try mod.getBuiltinType("StackTrace");
+    try stack_trace_ty.resolveFields(mod);
     const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls);
     const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, LazySrcLoc.unneeded) catch |err| switch (err) {
         error.AnalysisFail => @panic("std.builtin.StackTrace is corrupt"),
@@ -6879,8 +6858,8 @@ fn popErrorReturnTrace(
         // AstGen determined this result does not go to an error-handling expr (try/catch/return etc.), or
         // the result is comptime-known to be a non-error. Either way, pop unconditionally.
 
-        const stack_trace_ty = try sema.getBuiltinType("StackTrace");
-        try sema.resolveTypeFields(stack_trace_ty);
+        const stack_trace_ty = try mod.getBuiltinType("StackTrace");
+        try stack_trace_ty.resolveFields(mod);
         const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
         const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty);
         const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls);
@@ -6905,8 +6884,8 @@ fn popErrorReturnTrace(
         defer then_block.instructions.deinit(gpa);
 
         // If non-error, then pop the error return trace by restoring the index.
-        const stack_trace_ty = try sema.getBuiltinType("StackTrace");
-        try sema.resolveTypeFields(stack_trace_ty);
+        const stack_trace_ty = try mod.getBuiltinType("StackTrace");
+        try stack_trace_ty.resolveFields(mod);
         const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
         const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty);
         const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls);
@@ -7032,8 +7011,8 @@ fn zirCall(
         // If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only
         // need to clean-up our own trace if we were passed to a non-error-handling expression.
         if (input_is_error or (pop_error_return_trace and return_ty.isError(mod))) {
-            const stack_trace_ty = try sema.getBuiltinType("StackTrace");
-            try sema.resolveTypeFields(stack_trace_ty);
+            const stack_trace_ty = try mod.getBuiltinType("StackTrace");
+            try stack_trace_ty.resolveFields(mod);
             const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "index", .no_embedded_nulls);
             const field_index = try sema.structFieldIndex(block, stack_trace_ty, field_name, call_src);
 
@@ -7264,10 +7243,6 @@ const CallArgsInfo = union(enum) {
     ) CompileError!Air.Inst.Ref {
         const mod = sema.mod;
         const param_count = func_ty_info.param_types.len;
-        if (maybe_param_ty) |param_ty| switch (param_ty.toIntern()) {
-            .generic_poison_type => {},
-            else => try sema.queueFullTypeResolution(param_ty),
-        };
         const uncoerced_arg: Air.Inst.Ref = switch (cai) {
             inline .resolved, .call_builtin => |resolved| resolved.args[arg_index],
             .zir_call => |zir_call| arg_val: {
@@ -7494,24 +7469,19 @@ fn analyzeCall(
 
     const gpa = sema.gpa;
 
-    var is_generic_call = func_ty_info.is_generic;
+    const is_generic_call = func_ty_info.is_generic;
     var is_comptime_call = block.is_comptime or modifier == .compile_time;
     var is_inline_call = is_comptime_call or modifier == .always_inline or func_ty_info.cc == .Inline;
     var comptime_reason: ?*const Block.ComptimeReason = null;
     if (!is_inline_call and !is_comptime_call) {
-        if (sema.typeRequiresComptime(Type.fromInterned(func_ty_info.return_type))) |ct| {
-            is_comptime_call = ct;
-            is_inline_call = ct;
-            if (ct) {
-                comptime_reason = &.{ .comptime_ret_ty = .{
-                    .func = func,
-                    .func_src = func_src,
-                    .return_ty = Type.fromInterned(func_ty_info.return_type),
-                } };
-            }
-        } else |err| switch (err) {
-            error.GenericPoison => is_generic_call = true,
-            else => |e| return e,
+        if (try sema.typeRequiresComptime(Type.fromInterned(func_ty_info.return_type))) {
+            is_comptime_call = true;
+            is_inline_call = true;
+            comptime_reason = &.{ .comptime_ret_ty = .{
+                .func = func,
+                .func_src = func_src,
+                .return_ty = Type.fromInterned(func_ty_info.return_type),
+            } };
         }
     }
 
@@ -7871,7 +7841,6 @@ fn analyzeCall(
 
         if (call_dbg_node) |some| try sema.zirDbgStmt(block, some);
 
-        try sema.queueFullTypeResolution(Type.fromInterned(func_ty_info.return_type));
         if (sema.owner_func_index != .none and Type.fromInterned(func_ty_info.return_type).isError(mod)) {
             ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true;
         }
@@ -8281,7 +8250,6 @@ fn instantiateGenericCall(
             }
         } else {
             // The parameter is runtime-known.
-            try sema.queueFullTypeResolution(arg_ty);
             child_sema.inst_map.putAssumeCapacityNoClobber(param_inst, try child_block.addInst(.{
                 .tag = .arg,
                 .data = .{ .arg = .{
@@ -8330,8 +8298,6 @@ fn instantiateGenericCall(
         return error.GenericPoison;
     }
 
-    try sema.queueFullTypeResolution(Type.fromInterned(func_ty_info.return_type));
-
     if (call_dbg_node) |some| try sema.zirDbgStmt(block, some);
 
     if (sema.owner_func_index != .none and
@@ -8423,7 +8389,7 @@ fn zirArrayInitElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
         else => |e| return e,
     };
     const indexable_ty = maybe_wrapped_indexable_ty.optEuBaseType(mod);
-    try sema.resolveTypeFields(indexable_ty);
+    try indexable_ty.resolveFields(mod);
     assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction
     if (indexable_ty.zigTypeTag(mod) == .Struct) {
         const elem_type = indexable_ty.structFieldType(@intFromEnum(bin.rhs), mod);
@@ -8687,7 +8653,7 @@ fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
     const operand = try sema.coerce(block, err_int_ty, uncasted_operand, operand_src);
 
     if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| {
-        const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntAdvanced(sema));
+        const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntSema(mod));
         if (int > mod.global_error_set.count() or int == 0)
             return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int});
         return Air.internedToRef((try mod.intern(.{ .err = .{
@@ -8791,7 +8757,7 @@ fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
     const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag(mod)) {
         .Enum => operand,
         .Union => blk: {
-            try sema.resolveTypeFields(operand_ty);
+            try operand_ty.resolveFields(mod);
             const tag_ty = operand_ty.unionTagType(mod) orelse {
                 return sema.fail(
                     block,
@@ -8933,7 +8899,7 @@ fn analyzeOptionalPayloadPtr(
     }
 
     const child_type = opt_type.optionalChild(zcu);
-    const child_pointer = try sema.ptrType(.{
+    const child_pointer = try zcu.ptrTypeSema(.{
         .child = child_type.toIntern(),
         .flags = .{
             .is_const = optional_ptr_ty.isConstPtr(zcu),
@@ -8957,13 +8923,13 @@ fn analyzeOptionalPayloadPtr(
                 const opt_payload_ptr = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr);
                 try sema.checkKnownAllocPtr(block, optional_ptr, opt_payload_ptr);
             }
-            return Air.internedToRef((try ptr_val.ptrOptPayload(sema)).toIntern());
+            return Air.internedToRef((try ptr_val.ptrOptPayload(zcu)).toIntern());
         }
         if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| {
             if (val.isNull(zcu)) {
                 return sema.fail(block, src, "unable to unwrap null", .{});
             }
-            return Air.internedToRef((try ptr_val.ptrOptPayload(sema)).toIntern());
+            return Air.internedToRef((try ptr_val.ptrOptPayload(zcu)).toIntern());
         }
     }
 
@@ -9006,7 +8972,7 @@ fn zirOptionalPayload(
             // TODO https://github.com/ziglang/zig/issues/6597
             if (true) break :t operand_ty;
             const ptr_info = operand_ty.ptrInfo(mod);
-            break :t try sema.ptrType(.{
+            break :t try mod.ptrTypeSema(.{
                 .child = ptr_info.child,
                 .flags = .{
                     .alignment = ptr_info.flags.alignment,
@@ -9124,7 +9090,7 @@ fn analyzeErrUnionPayloadPtr(
 
     const err_union_ty = operand_ty.childType(zcu);
     const payload_ty = err_union_ty.errorUnionPayload(zcu);
-    const operand_pointer_ty = try sema.ptrType(.{
+    const operand_pointer_ty = try zcu.ptrTypeSema(.{
         .child = payload_ty.toIntern(),
         .flags = .{
             .is_const = operand_ty.isConstPtr(zcu),
@@ -9149,13 +9115,13 @@ fn analyzeErrUnionPayloadPtr(
                 const eu_payload_ptr = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand);
                 try sema.checkKnownAllocPtr(block, operand, eu_payload_ptr);
             }
-            return Air.internedToRef((try ptr_val.ptrEuPayload(sema)).toIntern());
+            return Air.internedToRef((try ptr_val.ptrEuPayload(zcu)).toIntern());
         }
         if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| {
             if (val.getErrorName(zcu).unwrap()) |name| {
                 return sema.failWithComptimeErrorRetTrace(block, src, name);
             }
-            return Air.internedToRef((try ptr_val.ptrEuPayload(sema)).toIntern());
+            return Air.internedToRef((try ptr_val.ptrEuPayload(zcu)).toIntern());
         }
     }
 
@@ -9603,17 +9569,8 @@ fn funcCommon(
         }
     }
 
-    var ret_ty_requires_comptime = false;
-    const ret_poison = if (sema.typeRequiresComptime(bare_return_type)) |ret_comptime| rp: {
-        ret_ty_requires_comptime = ret_comptime;
-        break :rp bare_return_type.isGenericPoison();
-    } else |err| switch (err) {
-        error.GenericPoison => rp: {
-            is_generic = true;
-            break :rp true;
-        },
-        else => |e| return e,
-    };
+    const ret_ty_requires_comptime = try sema.typeRequiresComptime(bare_return_type);
+    const ret_poison = bare_return_type.isGenericPoison();
     const final_is_generic = is_generic or comptime_bits != 0 or ret_ty_requires_comptime;
 
     const param_types = block.params.items(.ty);
@@ -9961,8 +9918,8 @@ fn finishFunc(
     if (!final_is_generic and sema.wantErrorReturnTracing(return_type)) {
         // Make sure that StackTrace's fields are resolved so that the backend can
         // lower this fn type.
-        const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
-        try sema.resolveTypeFields(unresolved_stack_trace_ty);
+        const unresolved_stack_trace_ty = try mod.getBuiltinType("StackTrace");
+        try unresolved_stack_trace_ty.resolveFields(mod);
     }
 
     return Air.internedToRef(if (opt_func_index != .none) opt_func_index else func_ty);
@@ -10021,21 +9978,7 @@ fn zirParam(
         }
     };
 
-    const is_comptime = sema.typeRequiresComptime(param_ty) catch |err| switch (err) {
-        error.GenericPoison => {
-            // The type is not available until the generic instantiation.
-            // We result the param instruction with a poison value and
-            // insert an anytype parameter.
-            try block.params.append(sema.arena, .{
-                .ty = .generic_poison_type,
-                .is_comptime = comptime_syntax,
-                .name = param_name,
-            });
-            sema.inst_map.putAssumeCapacity(inst, .generic_poison);
-            return;
-        },
-        else => |e| return e,
-    } or comptime_syntax;
+    const is_comptime = try sema.typeRequiresComptime(param_ty) or comptime_syntax;
 
     try block.params.append(sema.arena, .{
         .ty = param_ty.toIntern(),
@@ -10162,7 +10105,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
             }
             return Air.internedToRef((try zcu.intValue(
                 Type.usize,
-                (try operand_val.getUnsignedIntAdvanced(zcu, sema)).?,
+                (try operand_val.getUnsignedIntAdvanced(zcu, .sema)).?,
             )).toIntern());
         }
         const len = operand_ty.vectorLen(zcu);
@@ -10174,7 +10117,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
                 new_elem.* = (try zcu.undefValue(Type.usize)).toIntern();
                 continue;
             }
-            const addr = try ptr_val.getUnsignedIntAdvanced(zcu, sema) orelse {
+            const addr = try ptr_val.getUnsignedIntAdvanced(zcu, .sema) orelse {
                 // A vector element wasn't an integer pointer. This is a runtime operation.
                 break :ct;
             };
@@ -11047,7 +10990,7 @@ const SwitchProngAnalysis = struct {
                 const union_obj = zcu.typeToUnion(operand_ty).?;
                 const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
                 if (capture_byref) {
-                    const ptr_field_ty = try sema.ptrType(.{
+                    const ptr_field_ty = try zcu.ptrTypeSema(.{
                         .child = field_ty.toIntern(),
                         .flags = .{
                             .is_const = !operand_ptr_ty.ptrIsMutable(zcu),
@@ -11056,7 +10999,7 @@ const SwitchProngAnalysis = struct {
                         },
                     });
                     if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |union_ptr| {
-                        return Air.internedToRef((try union_ptr.ptrField(field_index, sema)).toIntern());
+                        return Air.internedToRef((try union_ptr.ptrField(field_index, zcu)).toIntern());
                     }
                     return block.addStructFieldPtr(spa.operand_ptr, field_index, ptr_field_ty);
                 } else {
@@ -11150,7 +11093,7 @@ const SwitchProngAnalysis = struct {
                         const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len);
                         for (field_indices, dummy_captures) |field_idx, *dummy| {
                             const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
-                            const field_ptr_ty = try sema.ptrType(.{
+                            const field_ptr_ty = try zcu.ptrTypeSema(.{
                                 .child = field_ty.toIntern(),
                                 .flags = .{
                                     .is_const = operand_ptr_info.flags.is_const,
@@ -11186,7 +11129,7 @@ const SwitchProngAnalysis = struct {
 
                     if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |op_ptr_val| {
                         if (op_ptr_val.isUndef(zcu)) return zcu.undefRef(capture_ptr_ty);
-                        const field_ptr_val = try op_ptr_val.ptrField(first_field_index, sema);
+                        const field_ptr_val = try op_ptr_val.ptrField(first_field_index, zcu);
                         return Air.internedToRef((try zcu.getCoerced(field_ptr_val, capture_ptr_ty)).toIntern());
                     }
 
@@ -11399,7 +11342,7 @@ fn switchCond(
         },
 
         .Union => {
-            try sema.resolveTypeFields(operand_ty);
+            try operand_ty.resolveFields(mod);
             const enum_ty = operand_ty.unionTagType(mod) orelse {
                 const msg = msg: {
                     const msg = try sema.errMsg(src, "switch on union with no attached enum", .{});
@@ -13691,7 +13634,7 @@ fn maybeErrorUnwrap(
                     return true;
                 }
 
-                const panic_fn = try sema.getBuiltin("panicUnwrapError");
+                const panic_fn = try mod.getBuiltin("panicUnwrapError");
                 const err_return_trace = try sema.getErrorReturnTrace(block);
                 const args: [2]Air.Inst.Ref = .{ err_return_trace, operand };
                 try sema.callBuiltin(block, operand_src, panic_fn, .auto, &args, .@"safety check");
@@ -13701,7 +13644,7 @@ fn maybeErrorUnwrap(
                 const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
                 const msg_inst = try sema.resolveInst(inst_data.operand);
 
-                const panic_fn = try sema.getBuiltin("panic");
+                const panic_fn = try mod.getBuiltin("panic");
                 const err_return_trace = try sema.getErrorReturnTrace(block);
                 const args: [3]Air.Inst.Ref = .{ msg_inst, err_return_trace, .null_value };
                 try sema.callBuiltin(block, operand_src, panic_fn, .auto, &args, .@"safety check");
@@ -13766,7 +13709,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     const field_name = try sema.resolveConstStringIntern(block, name_src, extra.rhs, .{
         .needed_comptime_reason = "field name must be comptime-known",
     });
-    try sema.resolveTypeFields(ty);
+    try ty.resolveFields(mod);
     const ip = &mod.intern_pool;
 
     const has_field = hf: {
@@ -13946,7 +13889,7 @@ fn zirShl(
             return mod.undefRef(sema.typeOf(lhs));
         }
         // If rhs is 0, return lhs without doing any calculations.
-        if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
+        if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
             return lhs;
         }
         if (scalar_ty.zigTypeTag(mod) != .ComptimeInt and air_tag != .shl_sat) {
@@ -14111,7 +14054,7 @@ fn zirShr(
             return mod.undefRef(lhs_ty);
         }
         // If rhs is 0, return lhs without doing any calculations.
-        if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
+        if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
             return lhs;
         }
         if (scalar_ty.zigTypeTag(mod) != .ComptimeInt) {
@@ -14158,7 +14101,7 @@ fn zirShr(
             if (air_tag == .shr_exact) {
                 // Detect if any ones would be shifted out.
                 const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, mod);
-                if (!(try truncated.compareAllWithZeroAdvanced(.eq, sema))) {
+                if (!(try truncated.compareAllWithZeroSema(.eq, mod))) {
                     return sema.fail(block, src, "exact shift shifted out 1 bits", .{});
                 }
             }
@@ -14582,12 +14525,12 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     try sema.requireRuntimeBlock(block, src, runtime_src);
 
     if (ptr_addrspace) |ptr_as| {
-        const alloc_ty = try sema.ptrType(.{
+        const alloc_ty = try mod.ptrTypeSema(.{
             .child = result_ty.toIntern(),
             .flags = .{ .address_space = ptr_as },
         });
         const alloc = try block.addTy(.alloc, alloc_ty);
-        const elem_ptr_ty = try sema.ptrType(.{
+        const elem_ptr_ty = try mod.ptrTypeSema(.{
             .child = resolved_elem_ty.toIntern(),
             .flags = .{ .address_space = ptr_as },
         });
@@ -14670,7 +14613,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins
                             .none => null,
                             else => Value.fromInterned(ptr_info.sentinel),
                         },
-                        .len = try val.sliceLen(sema),
+                        .len = try val.sliceLen(mod),
                     };
                 },
                 .One => {
@@ -14912,12 +14855,12 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     }
 
     if (ptr_addrspace) |ptr_as| {
-        const alloc_ty = try sema.ptrType(.{
+        const alloc_ty = try mod.ptrTypeSema(.{
             .child = result_ty.toIntern(),
             .flags = .{ .address_space = ptr_as },
         });
         const alloc = try block.addTy(.alloc, alloc_ty);
-        const elem_ptr_ty = try sema.ptrType(.{
+        const elem_ptr_ty = try mod.ptrTypeSema(.{
             .child = lhs_info.elem_type.toIntern(),
             .flags = .{ .address_space = ptr_as },
         });
@@ -15105,7 +15048,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
         .Int, .ComptimeInt, .ComptimeFloat => {
             if (maybe_lhs_val) |lhs_val| {
                 if (!lhs_val.isUndef(mod)) {
-                    if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
+                    if (try lhs_val.compareAllWithZeroSema(.eq, mod)) {
                         const scalar_zero = switch (scalar_tag) {
                             .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
                             .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
@@ -15120,7 +15063,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
                 if (rhs_val.isUndef(mod)) {
                     return sema.failWithUseOfUndef(block, rhs_src);
                 }
-                if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
+                if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) {
                     return sema.failWithDivideByZero(block, rhs_src);
                 }
                 // TODO: if the RHS is one, return the LHS directly
@@ -15241,7 +15184,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             if (lhs_val.isUndef(mod)) {
                 return sema.failWithUseOfUndef(block, rhs_src);
             } else {
-                if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
+                if (try lhs_val.compareAllWithZeroSema(.eq, mod)) {
                     const scalar_zero = switch (scalar_tag) {
                         .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
                         .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
@@ -15256,7 +15199,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             if (rhs_val.isUndef(mod)) {
                 return sema.failWithUseOfUndef(block, rhs_src);
             }
-            if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
+            if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) {
                 return sema.failWithDivideByZero(block, rhs_src);
             }
             // TODO: if the RHS is one, return the LHS directly
@@ -15408,7 +15351,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
         // If the lhs is undefined, result is undefined.
         if (maybe_lhs_val) |lhs_val| {
             if (!lhs_val.isUndef(mod)) {
-                if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
+                if (try lhs_val.compareAllWithZeroSema(.eq, mod)) {
                     const scalar_zero = switch (scalar_tag) {
                         .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
                         .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
@@ -15423,7 +15366,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             if (rhs_val.isUndef(mod)) {
                 return sema.failWithUseOfUndef(block, rhs_src);
             }
-            if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
+            if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) {
                 return sema.failWithDivideByZero(block, rhs_src);
             }
             // TODO: if the RHS is one, return the LHS directly
@@ -15518,7 +15461,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
         // If the lhs is undefined, result is undefined.
         if (maybe_lhs_val) |lhs_val| {
             if (!lhs_val.isUndef(mod)) {
-                if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
+                if (try lhs_val.compareAllWithZeroSema(.eq, mod)) {
                     const scalar_zero = switch (scalar_tag) {
                         .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
                         .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
@@ -15533,7 +15476,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             if (rhs_val.isUndef(mod)) {
                 return sema.failWithUseOfUndef(block, rhs_src);
             }
-            if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
+            if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) {
                 return sema.failWithDivideByZero(block, rhs_src);
             }
         }
@@ -15758,7 +15701,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
                 if (lhs_val.isUndef(mod)) {
                     return sema.failWithUseOfUndef(block, lhs_src);
                 }
-                if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
+                if (try lhs_val.compareAllWithZeroSema(.eq, mod)) {
                     const scalar_zero = switch (scalar_tag) {
                         .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
                         .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
@@ -15777,18 +15720,18 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
                 if (rhs_val.isUndef(mod)) {
                     return sema.failWithUseOfUndef(block, rhs_src);
                 }
-                if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
+                if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) {
                     return sema.failWithDivideByZero(block, rhs_src);
                 }
-                if (!(try rhs_val.compareAllWithZeroAdvanced(.gte, sema))) {
+                if (!(try rhs_val.compareAllWithZeroSema(.gte, mod))) {
                     return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
                 }
                 if (maybe_lhs_val) |lhs_val| {
                     const rem_result = try sema.intRem(resolved_type, lhs_val, rhs_val);
                     // If this answer could possibly be different by doing `intMod`,
                     // we must emit a compile error. Otherwise, it's OK.
-                    if (!(try lhs_val.compareAllWithZeroAdvanced(.gte, sema)) and
-                        !(try rem_result.compareAllWithZeroAdvanced(.eq, sema)))
+                    if (!(try lhs_val.compareAllWithZeroSema(.gte, mod)) and
+                        !(try rem_result.compareAllWithZeroSema(.eq, mod)))
                     {
                         return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
                     }
@@ -15806,14 +15749,14 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
             if (rhs_val.isUndef(mod)) {
                 return sema.failWithUseOfUndef(block, rhs_src);
             }
-            if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
+            if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) {
                 return sema.failWithDivideByZero(block, rhs_src);
             }
-            if (!(try rhs_val.compareAllWithZeroAdvanced(.gte, sema))) {
+            if (!(try rhs_val.compareAllWithZeroSema(.gte, mod))) {
                 return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
             }
             if (maybe_lhs_val) |lhs_val| {
-                if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))) {
+                if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroSema(.gte, mod))) {
                     return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
                 }
                 return Air.internedToRef((try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod)).toIntern());
@@ -15864,8 +15807,8 @@ fn intRemScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) CompileErr
     // resorting to BigInt first.
     var lhs_space: Value.BigIntSpace = undefined;
     var rhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
-    const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
+    const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema);
+    const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema);
     const limbs_q = try sema.arena.alloc(
         math.big.Limb,
         lhs_bigint.limbs.len,
@@ -15941,7 +15884,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
                 if (rhs_val.isUndef(mod)) {
                     return sema.failWithUseOfUndef(block, rhs_src);
                 }
-                if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
+                if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) {
                     return sema.failWithDivideByZero(block, rhs_src);
                 }
                 if (maybe_lhs_val) |lhs_val| {
@@ -15957,7 +15900,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
             if (rhs_val.isUndef(mod)) {
                 return sema.failWithUseOfUndef(block, rhs_src);
             }
-            if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
+            if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) {
                 return sema.failWithDivideByZero(block, rhs_src);
             }
         }
@@ -16036,7 +15979,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
                 if (rhs_val.isUndef(mod)) {
                     return sema.failWithUseOfUndef(block, rhs_src);
                 }
-                if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
+                if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) {
                     return sema.failWithDivideByZero(block, rhs_src);
                 }
                 if (maybe_lhs_val) |lhs_val| {
@@ -16052,7 +15995,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
             if (rhs_val.isUndef(mod)) {
                 return sema.failWithUseOfUndef(block, rhs_src);
             }
-            if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
+            if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) {
                 return sema.failWithDivideByZero(block, rhs_src);
             }
         }
@@ -16139,12 +16082,12 @@ fn zirOverflowArithmetic(
                 // to the result, even if it is undefined..
                 // Otherwise, if either of the argument is undefined, undefined is returned.
                 if (maybe_lhs_val) |lhs_val| {
-                    if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
+                    if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) {
                         break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs };
                     }
                 }
                 if (maybe_rhs_val) |rhs_val| {
-                    if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
+                    if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, mod))) {
                         break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
                     }
                 }
@@ -16165,7 +16108,7 @@ fn zirOverflowArithmetic(
                 if (maybe_rhs_val) |rhs_val| {
                     if (rhs_val.isUndef(mod)) {
                         break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
-                    } else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
+                    } else if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
                         break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
                     } else if (maybe_lhs_val) |lhs_val| {
                         if (lhs_val.isUndef(mod)) {
@@ -16184,7 +16127,7 @@ fn zirOverflowArithmetic(
                 const scalar_one = try mod.intValue(dest_ty.scalarType(mod), 1);
                 if (maybe_lhs_val) |lhs_val| {
                     if (!lhs_val.isUndef(mod)) {
-                        if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
+                        if (try lhs_val.compareAllWithZeroSema(.eq, mod)) {
                             break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
                         } else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) {
                             break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs };
@@ -16194,7 +16137,7 @@ fn zirOverflowArithmetic(
 
                 if (maybe_rhs_val) |rhs_val| {
                     if (!rhs_val.isUndef(mod)) {
-                        if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
+                        if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
                             break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs };
                         } else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) {
                             break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
@@ -16218,12 +16161,12 @@ fn zirOverflowArithmetic(
                 // If rhs is zero, the result is lhs (even if undefined) and no overflow occurred.
                 // Oterhwise if either of the arguments is undefined, both results are undefined.
                 if (maybe_lhs_val) |lhs_val| {
-                    if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
+                    if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) {
                         break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
                     }
                 }
                 if (maybe_rhs_val) |rhs_val| {
-                    if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
+                    if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, mod))) {
                         break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
                     }
                 }
@@ -16374,7 +16317,7 @@ fn analyzeArithmetic(
                 // overflow (max_int), causing illegal behavior.
                 // For floats: either operand being undef makes the result undef.
                 if (maybe_lhs_val) |lhs_val| {
-                    if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
+                    if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) {
                         return casted_rhs;
                     }
                 }
@@ -16386,7 +16329,7 @@ fn analyzeArithmetic(
                             return mod.undefRef(resolved_type);
                         }
                     }
-                    if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
+                    if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
                         return casted_lhs;
                     }
                 }
@@ -16418,7 +16361,7 @@ fn analyzeArithmetic(
                 // If either of the operands are zero, the other operand is returned.
                 // If either of the operands are undefined, the result is undefined.
                 if (maybe_lhs_val) |lhs_val| {
-                    if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
+                    if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) {
                         return casted_rhs;
                     }
                 }
@@ -16426,7 +16369,7 @@ fn analyzeArithmetic(
                     if (rhs_val.isUndef(mod)) {
                         return mod.undefRef(resolved_type);
                     }
-                    if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
+                    if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
                         return casted_lhs;
                     }
                     if (maybe_lhs_val) |lhs_val| {
@@ -16439,7 +16382,7 @@ fn analyzeArithmetic(
                 // If either of the operands are zero, then the other operand is returned.
                 // If either of the operands are undefined, the result is undefined.
                 if (maybe_lhs_val) |lhs_val| {
-                    if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
+                    if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) {
                         return casted_rhs;
                     }
                 }
@@ -16447,7 +16390,7 @@ fn analyzeArithmetic(
                     if (rhs_val.isUndef(mod)) {
                         return mod.undefRef(resolved_type);
                     }
-                    if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
+                    if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
                         return casted_lhs;
                     }
                     if (maybe_lhs_val) |lhs_val| {
@@ -16488,7 +16431,7 @@ fn analyzeArithmetic(
                             return mod.undefRef(resolved_type);
                         }
                     }
-                    if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
+                    if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
                         return casted_lhs;
                     }
                 }
@@ -16523,7 +16466,7 @@ fn analyzeArithmetic(
                     if (rhs_val.isUndef(mod)) {
                         return mod.undefRef(resolved_type);
                     }
-                    if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
+                    if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
                         return casted_lhs;
                     }
                 }
@@ -16544,7 +16487,7 @@ fn analyzeArithmetic(
                     if (rhs_val.isUndef(mod)) {
                         return mod.undefRef(resolved_type);
                     }
-                    if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
+                    if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
                         return casted_lhs;
                     }
                 }
@@ -16591,7 +16534,7 @@ fn analyzeArithmetic(
                         if (lhs_val.isNan(mod)) {
                             return Air.internedToRef(lhs_val.toIntern());
                         }
-                        if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) lz: {
+                        if (try lhs_val.compareAllWithZeroSema(.eq, mod)) lz: {
                             if (maybe_rhs_val) |rhs_val| {
                                 if (rhs_val.isNan(mod)) {
                                     return Air.internedToRef(rhs_val.toIntern());
@@ -16622,7 +16565,7 @@ fn analyzeArithmetic(
                     if (rhs_val.isNan(mod)) {
                         return Air.internedToRef(rhs_val.toIntern());
                     }
-                    if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) rz: {
+                    if (try rhs_val.compareAllWithZeroSema(.eq, mod)) rz: {
                         if (maybe_lhs_val) |lhs_val| {
                             if (lhs_val.isInf(mod)) {
                                 return Air.internedToRef((try mod.floatValue(resolved_type, std.math.nan(f128))).toIntern());
@@ -16674,7 +16617,7 @@ fn analyzeArithmetic(
                 };
                 if (maybe_lhs_val) |lhs_val| {
                     if (!lhs_val.isUndef(mod)) {
-                        if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
+                        if (try lhs_val.compareAllWithZeroSema(.eq, mod)) {
                             const zero_val = try sema.splat(resolved_type, scalar_zero);
                             return Air.internedToRef(zero_val.toIntern());
                         }
@@ -16687,7 +16630,7 @@ fn analyzeArithmetic(
                     if (rhs_val.isUndef(mod)) {
                         return mod.undefRef(resolved_type);
                     }
-                    if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
+                    if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
                         const zero_val = try sema.splat(resolved_type, scalar_zero);
                         return Air.internedToRef(zero_val.toIntern());
                     }
@@ -16719,7 +16662,7 @@ fn analyzeArithmetic(
                 };
                 if (maybe_lhs_val) |lhs_val| {
                     if (!lhs_val.isUndef(mod)) {
-                        if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
+                        if (try lhs_val.compareAllWithZeroSema(.eq, mod)) {
                             const zero_val = try sema.splat(resolved_type, scalar_zero);
                             return Air.internedToRef(zero_val.toIntern());
                         }
@@ -16732,7 +16675,7 @@ fn analyzeArithmetic(
                     if (rhs_val.isUndef(mod)) {
                         return mod.undefRef(resolved_type);
                     }
-                    if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
+                    if (try rhs_val.compareAllWithZeroSema(.eq, mod)) {
                         const zero_val = try sema.splat(resolved_type, scalar_zero);
                         return Air.internedToRef(zero_val.toIntern());
                     }
@@ -16828,7 +16771,7 @@ fn analyzePtrArithmetic(
 
     const new_ptr_ty = t: {
         // Calculate the new pointer alignment.
-        // This code is duplicated in `elemPtrType`.
+        // This code is duplicated in `Type.elemPtrType`.
         if (ptr_info.flags.alignment == .none) {
             // ABI-aligned pointer. Any pointer arithmetic maintains the same ABI-alignedness.
             break :t ptr_ty;
@@ -16837,7 +16780,7 @@ fn analyzePtrArithmetic(
         // it being a multiple of the type size.
         const elem_size = try sema.typeAbiSize(Type.fromInterned(ptr_info.child));
         const addend = if (opt_off_val) |off_val| a: {
-            const off_int = try sema.usizeCast(block, offset_src, try off_val.toUnsignedIntAdvanced(sema));
+            const off_int = try sema.usizeCast(block, offset_src, try off_val.toUnsignedIntSema(mod));
             break :a elem_size * off_int;
         } else elem_size;
 
@@ -16850,7 +16793,7 @@ fn analyzePtrArithmetic(
         ));
         assert(new_align != .none);
 
-        break :t try sema.ptrType(.{
+        break :t try mod.ptrTypeSema(.{
             .child = ptr_info.child,
             .sentinel = ptr_info.sentinel,
             .flags = .{
@@ -16869,14 +16812,14 @@ fn analyzePtrArithmetic(
             if (opt_off_val) |offset_val| {
                 if (ptr_val.isUndef(mod)) return mod.undefRef(new_ptr_ty);
 
-                const offset_int = try sema.usizeCast(block, offset_src, try offset_val.toUnsignedIntAdvanced(sema));
+                const offset_int = try sema.usizeCast(block, offset_src, try offset_val.toUnsignedIntSema(mod));
                 if (offset_int == 0) return ptr;
                 if (air_tag == .ptr_sub) {
                     const elem_size = try sema.typeAbiSize(Type.fromInterned(ptr_info.child));
                     const new_ptr_val = try sema.ptrSubtract(block, op_src, ptr_val, offset_int * elem_size, new_ptr_ty);
                     return Air.internedToRef(new_ptr_val.toIntern());
                 } else {
-                    const new_ptr_val = try mod.getCoerced(try ptr_val.ptrElem(offset_int, sema), new_ptr_ty);
+                    const new_ptr_val = try mod.getCoerced(try ptr_val.ptrElem(offset_int, mod), new_ptr_ty);
                     return Air.internedToRef(new_ptr_val.toIntern());
                 }
             } else break :rs offset_src;
@@ -16975,7 +16918,6 @@ fn zirAsm(
             // Indicate the output is the asm instruction return value.
             arg.* = .none;
             const out_ty = try sema.resolveType(block, ret_ty_src, output.data.operand);
-            try sema.queueFullTypeResolution(out_ty);
             expr_ty = Air.internedToRef(out_ty.toIntern());
         } else {
             arg.* = try sema.resolveInst(output.data.operand);
@@ -17010,7 +16952,6 @@ fn zirAsm(
             .ComptimeFloat => arg.* = try sema.coerce(block, Type.f64, uncasted_arg, src),
             else => {
                 arg.* = uncasted_arg;
-                try sema.queueFullTypeResolution(uncasted_arg_ty);
             },
         }
 
@@ -17169,7 +17110,7 @@ fn analyzeCmpUnionTag(
 ) CompileError!Air.Inst.Ref {
     const mod = sema.mod;
     const union_ty = sema.typeOf(un);
-    try sema.resolveTypeFields(union_ty);
+    try union_ty.resolveFields(mod);
     const union_tag_ty = union_ty.unionTagType(mod) orelse {
         const msg = msg: {
             const msg = try sema.errMsg(un_src, "comparison of union and enum literal is only valid for tagged union types", .{});
@@ -17385,9 +17326,6 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
         => {},
     }
     const val = try ty.lazyAbiSize(mod);
-    if (val.isLazySize(mod)) {
-        try sema.queueFullTypeResolution(ty);
-    }
     return Air.internedToRef(val.toIntern());
 }
 
@@ -17427,7 +17365,7 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
         .AnyFrame,
         => {},
     }
-    const bit_size = try operand_ty.bitSizeAdvanced(mod, sema);
+    const bit_size = try operand_ty.bitSizeAdvanced(mod, .sema);
     return mod.intRef(Type.comptime_int, bit_size);
 }
 
@@ -17613,7 +17551,7 @@ fn zirBuiltinSrc(
         } });
     };
 
-    const src_loc_ty = try sema.getBuiltinType("SourceLocation");
+    const src_loc_ty = try mod.getBuiltinType("SourceLocation");
     const fields = .{
         // file: [:0]const u8,
         file_name_val,
@@ -17637,7 +17575,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
     const src = block.nodeOffset(inst_data.src_node);
     const ty = try sema.resolveType(block, src, inst_data.operand);
-    const type_info_ty = try sema.getBuiltinType("Type");
+    const type_info_ty = try mod.getBuiltinType("Type");
     const type_info_tag_ty = type_info_ty.unionTagType(mod).?;
 
     if (ty.typeDeclInst(mod)) |type_decl_inst| {
@@ -17718,7 +17656,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                     .ty = new_decl_ty.toIntern(),
                     .storage = .{ .elems = param_vals },
                 } });
-                const slice_ty = (try sema.ptrType(.{
+                const slice_ty = (try mod.ptrTypeSema(.{
                     .child = param_info_ty.toIntern(),
                     .flags = .{
                         .size = .Slice,
@@ -17748,7 +17686,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                     func_ty_info.return_type,
             } });
 
-            const callconv_ty = try sema.getBuiltinType("CallingConvention");
+            const callconv_ty = try mod.getBuiltinType("CallingConvention");
 
             const field_values = .{
                 // calling_convention: CallingConvention,
@@ -17782,7 +17720,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             const int_info_decl = mod.declPtr(int_info_decl_index);
             const int_info_ty = int_info_decl.val.toType();
 
-            const signedness_ty = try sema.getBuiltinType("Signedness");
+            const signedness_ty = try mod.getBuiltinType("Signedness");
             const info = ty.intInfo(mod);
             const field_values = .{
                 // signedness: Signedness,
@@ -17830,12 +17768,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             else
                 try Type.fromInterned(info.child).lazyAbiAlignment(mod);
 
-            const addrspace_ty = try sema.getBuiltinType("AddressSpace");
+            const addrspace_ty = try mod.getBuiltinType("AddressSpace");
             const pointer_ty = t: {
                 const decl_index = (try sema.namespaceLookup(
                     block,
                     src,
-                    (try sema.getBuiltinType("Type")).getNamespaceIndex(mod),
+                    (try mod.getBuiltinType("Type")).getNamespaceIndex(mod),
                     try ip.getOrPutString(gpa, "Pointer", .no_embedded_nulls),
                 )).?;
                 try sema.ensureDeclAnalyzed(decl_index);
@@ -17984,8 +17922,6 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 break :t set_field_ty_decl.val.toType();
             };
 
-            try sema.queueFullTypeResolution(error_field_ty);
-
             // Build our list of Error values
             // Optional value is only null if anyerror
             // Value can be zero-length slice otherwise
@@ -18036,7 +17972,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             };
 
             // Build our ?[]const Error value
-            const slice_errors_ty = try sema.ptrType(.{
+            const slice_errors_ty = try mod.ptrTypeSema(.{
                 .child = error_field_ty.toIntern(),
                 .flags = .{
                     .size = .Slice,
@@ -18182,7 +18118,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                     .ty = fields_array_ty.toIntern(),
                     .storage = .{ .elems = enum_field_vals },
                 } });
-                const slice_ty = (try sema.ptrType(.{
+                const slice_ty = (try mod.ptrTypeSema(.{
                     .child = enum_field_ty.toIntern(),
                     .flags = .{
                         .size = .Slice,
@@ -18262,7 +18198,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 break :t union_field_ty_decl.val.toType();
             };
 
-            try sema.resolveTypeLayout(ty); // Getting alignment requires type layout
+            try ty.resolveLayout(mod); // Getting alignment requires type layout
             const union_obj = mod.typeToUnion(ty).?;
             const tag_type = union_obj.loadTagType(ip);
             const layout = union_obj.getLayout(ip);
@@ -18298,7 +18234,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 };
 
                 const alignment = switch (layout) {
-                    .auto, .@"extern" => try sema.unionFieldAlignment(union_obj, @intCast(field_index)),
+                    .auto, .@"extern" => try mod.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(field_index), .sema),
                     .@"packed" => .none,
                 };
 
@@ -18326,7 +18262,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                     .ty = array_fields_ty.toIntern(),
                     .storage = .{ .elems = union_field_vals },
                 } });
-                const slice_ty = (try sema.ptrType(.{
+                const slice_ty = (try mod.ptrTypeSema(.{
                     .child = union_field_ty.toIntern(),
                     .flags = .{
                         .size = .Slice,
@@ -18359,7 +18295,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 const decl_index = (try sema.namespaceLookup(
                     block,
                     src,
-                    (try sema.getBuiltinType("Type")).getNamespaceIndex(mod),
+                    (try mod.getBuiltinType("Type")).getNamespaceIndex(mod),
                     try ip.getOrPutString(gpa, "ContainerLayout", .no_embedded_nulls),
                 )).?;
                 try sema.ensureDeclAnalyzed(decl_index);
@@ -18412,7 +18348,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 break :t struct_field_ty_decl.val.toType();
             };
 
-            try sema.resolveTypeLayout(ty); // Getting alignment requires type layout
+            try ty.resolveLayout(mod); // Getting alignment requires type layout
 
             var struct_field_vals: []InternPool.Index = &.{};
             defer gpa.free(struct_field_vals);
@@ -18452,7 +18388,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                                 } });
                             };
 
-                            try sema.resolveTypeLayout(Type.fromInterned(field_ty));
+                            try Type.fromInterned(field_ty).resolveLayout(mod);
 
                             const is_comptime = field_val != .none;
                             const opt_default_val = if (is_comptime) Value.fromInterned(field_val) else null;
@@ -18481,7 +18417,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 };
                 struct_field_vals = try gpa.alloc(InternPool.Index, struct_type.field_types.len);
 
-                try sema.resolveStructFieldInits(ty);
+                try ty.resolveStructFieldInits(mod);
 
                 for (struct_field_vals, 0..) |*field_val, field_index| {
                     const field_name = if (struct_type.fieldName(ip, field_index).unwrap()) |field_name|
@@ -18520,10 +18456,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                     const default_val_ptr = try sema.optRefValue(opt_default_val);
                     const alignment = switch (struct_type.layout) {
                         .@"packed" => .none,
-                        else => try sema.structFieldAlignment(
+                        else => try mod.structFieldAlignmentAdvanced(
                             struct_type.fieldAlign(ip, field_index),
                             field_ty,
                             struct_type.layout,
+                            .sema,
                         ),
                     };
 
@@ -18555,7 +18492,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                     .ty = array_fields_ty.toIntern(),
                     .storage = .{ .elems = struct_field_vals },
                 } });
-                const slice_ty = (try sema.ptrType(.{
+                const slice_ty = (try mod.ptrTypeSema(.{
                     .child = struct_field_ty.toIntern(),
                     .flags = .{
                         .size = .Slice,
@@ -18591,7 +18528,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 const decl_index = (try sema.namespaceLookup(
                     block,
                     src,
-                    (try sema.getBuiltinType("Type")).getNamespaceIndex(mod),
+                    (try mod.getBuiltinType("Type")).getNamespaceIndex(mod),
                     try ip.getOrPutString(gpa, "ContainerLayout", .no_embedded_nulls),
                 )).?;
                 try sema.ensureDeclAnalyzed(decl_index);
@@ -18635,7 +18572,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 break :t type_opaque_ty_decl.val.toType();
             };
 
-            try sema.resolveTypeFields(ty);
+            try ty.resolveFields(mod);
             const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespaceIndex(mod));
 
             const field_values = .{
@@ -18677,7 +18614,6 @@ fn typeInfoDecls(
         const declaration_ty_decl = mod.declPtr(declaration_ty_decl_index);
         break :t declaration_ty_decl.val.toType();
     };
-    try sema.queueFullTypeResolution(declaration_ty);
 
     var decl_vals = std.ArrayList(InternPool.Index).init(gpa);
     defer decl_vals.deinit();
@@ -18695,7 +18631,7 @@ fn typeInfoDecls(
         .ty = array_decl_ty.toIntern(),
         .storage = .{ .elems = decl_vals.items },
     } });
-    const slice_ty = (try sema.ptrType(.{
+    const slice_ty = (try mod.ptrTypeSema(.{
         .child = declaration_ty.toIntern(),
         .flags = .{
             .size = .Slice,
@@ -19295,7 +19231,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr
 
     const operand_ty = sema.typeOf(operand);
     const ptr_info = operand_ty.ptrInfo(mod);
-    const res_ty = try sema.ptrType(.{
+    const res_ty = try mod.ptrTypeSema(.{
         .child = err_union_ty.errorUnionPayload(mod).toIntern(),
         .flags = .{
             .is_const = ptr_info.flags.is_const,
@@ -19528,11 +19464,11 @@ fn retWithErrTracing(
         else => true,
     };
     const gpa = sema.gpa;
-    const stack_trace_ty = try sema.getBuiltinType("StackTrace");
-    try sema.resolveTypeFields(stack_trace_ty);
+    const stack_trace_ty = try mod.getBuiltinType("StackTrace");
+    try stack_trace_ty.resolveFields(mod);
     const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
     const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty);
-    const return_err_fn = try sema.getBuiltin("returnError");
+    const return_err_fn = try mod.getBuiltin("returnError");
     const args: [1]Air.Inst.Ref = .{err_return_trace};
 
     if (!need_check) {
@@ -19735,7 +19671,7 @@ fn analyzeRet(
         return sema.failWithOwnedErrorMsg(block, msg);
     }
 
-    try sema.resolveTypeLayout(sema.fn_ret_ty);
+    try sema.fn_ret_ty.resolveLayout(mod);
 
     try sema.validateRuntimeValue(block, operand_src, operand);
 
@@ -19817,7 +19753,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
             },
             else => {},
         }
-        const align_bytes = (try val.getUnsignedIntAdvanced(mod, sema)).?;
+        const align_bytes = (try val.getUnsignedIntAdvanced(mod, .sema)).?;
         break :blk try sema.validateAlignAllowZero(block, align_src, align_bytes);
     } else .none;
 
@@ -19851,7 +19787,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
                 elem_ty.fmt(mod), bit_offset, bit_offset - host_size * 8, host_size,
             });
         }
-        const elem_bit_size = try elem_ty.bitSizeAdvanced(mod, sema);
+        const elem_bit_size = try elem_ty.bitSizeAdvanced(mod, .sema);
         if (elem_bit_size > host_size * 8 - bit_offset) {
             return sema.fail(block, bitoffset_src, "packed type '{}' at bit offset {} ends {} bits after the end of a {} byte host integer", .{
                 elem_ty.fmt(mod), bit_offset, elem_bit_size - (host_size * 8 - bit_offset), host_size,
@@ -19892,7 +19828,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
         });
     }
 
-    const ty = try sema.ptrType(.{
+    const ty = try mod.ptrTypeSema(.{
         .child = elem_ty.toIntern(),
         .sentinel = sentinel,
         .flags = .{
@@ -19983,7 +19919,7 @@ fn structInitEmpty(
     const mod = sema.mod;
     const gpa = sema.gpa;
     // This logic must be synchronized with that in `zirStructInit`.
-    try sema.resolveTypeFields(struct_ty);
+    try struct_ty.resolveFields(mod);
 
     // The init values to use for the struct instance.
     const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount(mod));
@@ -20054,7 +19990,6 @@ fn unionInit(
 
     try sema.requireRuntimeBlock(block, init_src, null);
     _ = union_ty_src;
-    try sema.queueFullTypeResolution(union_ty);
     return block.addUnionInit(union_ty, field_index, init);
 }
 
@@ -20083,7 +20018,7 @@ fn zirStructInit(
         else => |e| return e,
     };
     const resolved_ty = result_ty.optEuBaseType(mod);
-    try sema.resolveTypeLayout(resolved_ty);
+    try resolved_ty.resolveLayout(mod);
 
     if (resolved_ty.zigTypeTag(mod) == .Struct) {
         // This logic must be synchronized with that in `zirStructInitEmpty`.
@@ -20124,7 +20059,7 @@ fn zirStructInit(
             const field_ty = resolved_ty.structFieldType(field_index, mod);
             field_inits[field_index] = try sema.coerce(block, field_ty, uncoerced_init, field_src);
             if (!is_packed) {
-                try sema.resolveStructFieldInits(resolved_ty);
+                try resolved_ty.resolveStructFieldInits(mod);
                 if (try resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| {
                     const init_val = (try sema.resolveValue(field_inits[field_index])) orelse {
                         return sema.failWithNeededComptime(block, field_src, .{
@@ -20197,7 +20132,7 @@ fn zirStructInit(
 
         if (is_ref) {
             const target = mod.getTarget();
-            const alloc_ty = try sema.ptrType(.{
+            const alloc_ty = try mod.ptrTypeSema(.{
                 .child = result_ty.toIntern(),
                 .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
             });
@@ -20211,7 +20146,6 @@ fn zirStructInit(
         }
 
         try sema.requireRuntimeBlock(block, src, null);
-        try sema.queueFullTypeResolution(resolved_ty);
         const union_val = try block.addUnionInit(resolved_ty, field_index, init_inst);
         return sema.coerce(block, result_ty, union_val, src);
     }
@@ -20288,7 +20222,7 @@ fn finishStructInit(
                     continue;
                 }
 
-                try sema.resolveStructFieldInits(struct_ty);
+                try struct_ty.resolveStructFieldInits(mod);
 
                 const field_init = struct_type.fieldInit(ip, i);
                 if (field_init == .none) {
@@ -20358,9 +20292,9 @@ fn finishStructInit(
     }
 
     if (is_ref) {
-        try sema.resolveStructLayout(struct_ty);
+        try struct_ty.resolveLayout(mod);
         const target = sema.mod.getTarget();
-        const alloc_ty = try sema.ptrType(.{
+        const alloc_ty = try mod.ptrTypeSema(.{
             .child = result_ty.toIntern(),
             .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
         });
@@ -20380,8 +20314,7 @@ fn finishStructInit(
         .init_node_offset = init_src.offset.node_offset.x,
         .elem_index = @intCast(runtime_index),
     } }));
-    try sema.resolveStructFieldInits(struct_ty);
-    try sema.queueFullTypeResolution(struct_ty);
+    try struct_ty.resolveStructFieldInits(mod);
     const struct_val = try block.addAggregateInit(struct_ty, field_inits);
     return sema.coerce(block, result_ty, struct_val, init_src);
 }
@@ -20490,7 +20423,7 @@ fn structInitAnon(
 
     if (is_ref) {
         const target = mod.getTarget();
-        const alloc_ty = try sema.ptrType(.{
+        const alloc_ty = try mod.ptrTypeSema(.{
             .child = tuple_ty,
             .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
         });
@@ -20504,7 +20437,7 @@ fn structInitAnon(
             };
             extra_index = item.end;
 
-            const field_ptr_ty = try sema.ptrType(.{
+            const field_ptr_ty = try mod.ptrTypeSema(.{
                 .child = field_ty,
                 .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
             });
@@ -20597,7 +20530,7 @@ fn zirArrayInit(
         dest.* = try sema.coerce(block, elem_ty, resolved_arg, elem_src);
         if (is_tuple) {
             if (array_ty.structFieldIsComptime(i, mod))
-                try sema.resolveStructFieldInits(array_ty);
+                try array_ty.resolveStructFieldInits(mod);
             if (try array_ty.structFieldValueComptime(mod, i)) |field_val| {
                 const init_val = try sema.resolveValue(dest.*) orelse {
                     return sema.failWithNeededComptime(block, elem_src, .{
@@ -20641,11 +20574,10 @@ fn zirArrayInit(
         .init_node_offset = src.offset.node_offset.x,
         .elem_index = runtime_index,
     } }));
-    try sema.queueFullTypeResolution(array_ty);
 
     if (is_ref) {
         const target = mod.getTarget();
-        const alloc_ty = try sema.ptrType(.{
+        const alloc_ty = try mod.ptrTypeSema(.{
             .child = result_ty.toIntern(),
             .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
         });
@@ -20654,7 +20586,7 @@ fn zirArrayInit(
 
         if (is_tuple) {
             for (resolved_args, 0..) |arg, i| {
-                const elem_ptr_ty = try sema.ptrType(.{
+                const elem_ptr_ty = try mod.ptrTypeSema(.{
                     .child = array_ty.structFieldType(i, mod).toIntern(),
                     .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
                 });
@@ -20667,7 +20599,7 @@ fn zirArrayInit(
             return sema.makePtrConst(block, alloc);
         }
 
-        const elem_ptr_ty = try sema.ptrType(.{
+        const elem_ptr_ty = try mod.ptrTypeSema(.{
             .child = array_ty.elemType2(mod).toIntern(),
             .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
         });
@@ -20755,14 +20687,14 @@ fn arrayInitAnon(
 
     if (is_ref) {
         const target = sema.mod.getTarget();
-        const alloc_ty = try sema.ptrType(.{
+        const alloc_ty = try mod.ptrTypeSema(.{
             .child = tuple_ty,
             .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
         });
         const alloc = try block.addTy(.alloc, alloc_ty);
         for (operands, 0..) |operand, i_usize| {
             const i: u32 = @intCast(i_usize);
-            const field_ptr_ty = try sema.ptrType(.{
+            const field_ptr_ty = try mod.ptrTypeSema(.{
                 .child = types[i],
                 .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
             });
@@ -20832,7 +20764,7 @@ fn fieldType(
     const ip = &mod.intern_pool;
     var cur_ty = aggregate_ty;
     while (true) {
-        try sema.resolveTypeFields(cur_ty);
+        try cur_ty.resolveFields(mod);
         switch (cur_ty.zigTypeTag(mod)) {
             .Struct => switch (ip.indexToKey(cur_ty.toIntern())) {
                 .anon_struct_type => |anon_struct| {
@@ -20883,8 +20815,8 @@ fn zirErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
 fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
     const mod = sema.mod;
     const ip = &mod.intern_pool;
-    const stack_trace_ty = try sema.getBuiltinType("StackTrace");
-    try sema.resolveTypeFields(stack_trace_ty);
+    const stack_trace_ty = try mod.getBuiltinType("StackTrace");
+    try stack_trace_ty.resolveFields(mod);
     const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
     const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern());
 
@@ -20918,9 +20850,6 @@ fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
         return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)});
     }
     const val = try ty.lazyAbiAlignment(mod);
-    if (val.isLazyAlign(mod)) {
-        try sema.queueFullTypeResolution(ty);
-    }
     return Air.internedToRef(val.toIntern());
 }
 
@@ -21095,7 +21024,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
     const mod = sema.mod;
     const ip = &mod.intern_pool;
 
-    try sema.resolveTypeLayout(operand_ty);
+    try operand_ty.resolveLayout(mod);
     const enum_ty = switch (operand_ty.zigTypeTag(mod)) {
         .EnumLiteral => {
             const val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, operand, undefined);
@@ -21171,7 +21100,7 @@ fn zirReify(
             },
         },
     };
-    const type_info_ty = try sema.getBuiltinType("Type");
+    const type_info_ty = try mod.getBuiltinType("Type");
     const uncasted_operand = try sema.resolveInst(extra.operand);
     const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src);
     const val = try sema.resolveConstDefinedValue(block, operand_src, type_info, .{
@@ -21205,7 +21134,7 @@ fn zirReify(
             );
 
             const signedness = mod.toEnum(std.builtin.Signedness, signedness_val);
-            const bits: u16 = @intCast(try bits_val.toUnsignedIntAdvanced(sema));
+            const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(mod));
             const ty = try mod.intType(signedness, bits);
             return Air.internedToRef(ty.toIntern());
         },
@@ -21220,7 +21149,7 @@ fn zirReify(
                 try ip.getOrPutString(gpa, "child", .no_embedded_nulls),
             ).?);
 
-            const len: u32 = @intCast(try len_val.toUnsignedIntAdvanced(sema));
+            const len: u32 = @intCast(try len_val.toUnsignedIntSema(mod));
             const child_ty = child_val.toType();
 
             try sema.checkVectorElemType(block, src, child_ty);
@@ -21238,7 +21167,7 @@ fn zirReify(
                 try ip.getOrPutString(gpa, "bits", .no_embedded_nulls),
             ).?);
 
-            const bits: u16 = @intCast(try bits_val.toUnsignedIntAdvanced(sema));
+            const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(mod));
             const ty = switch (bits) {
                 16 => Type.f16,
                 32 => Type.f32,
@@ -21288,7 +21217,7 @@ fn zirReify(
                 return sema.fail(block, src, "alignment must fit in 'u32'", .{});
             }
 
-            const alignment_val_int = (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?;
+            const alignment_val_int = (try alignment_val.getUnsignedIntAdvanced(mod, .sema)).?;
             if (alignment_val_int > 0 and !math.isPowerOfTwo(alignment_val_int)) {
                 return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{alignment_val_int});
             }
@@ -21296,7 +21225,7 @@ fn zirReify(
 
             const elem_ty = child_val.toType();
             if (abi_align != .none) {
-                try sema.resolveTypeLayout(elem_ty);
+                try elem_ty.resolveLayout(mod);
             }
 
             const ptr_size = mod.toEnum(std.builtin.Type.Pointer.Size, size_val);
@@ -21340,7 +21269,7 @@ fn zirReify(
                 }
             }
 
-            const ty = try sema.ptrType(.{
+            const ty = try mod.ptrTypeSema(.{
                 .child = elem_ty.toIntern(),
                 .sentinel = actual_sentinel,
                 .flags = .{
@@ -21369,7 +21298,7 @@ fn zirReify(
                 try ip.getOrPutString(gpa, "sentinel", .no_embedded_nulls),
             ).?);
 
-            const len = try len_val.toUnsignedIntAdvanced(sema);
+            const len = try len_val.toUnsignedIntSema(mod);
             const child_ty = child_val.toType();
             const sentinel = if (sentinel_val.optionalValue(mod)) |p| blk: {
                 const ptr_ty = try mod.singleMutPtrType(child_ty);
@@ -21476,7 +21405,7 @@ fn zirReify(
             const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val);
 
             // Decls
-            if (try decls_val.sliceLen(sema) > 0) {
+            if (try decls_val.sliceLen(mod) > 0) {
                 return sema.fail(block, src, "reified structs must have no decls", .{});
             }
 
@@ -21509,7 +21438,7 @@ fn zirReify(
                 try ip.getOrPutString(gpa, "is_exhaustive", .no_embedded_nulls),
             ).?);
 
-            if (try decls_val.sliceLen(sema) > 0) {
+            if (try decls_val.sliceLen(mod) > 0) {
                 return sema.fail(block, src, "reified enums must have no decls", .{});
             }
 
@@ -21527,7 +21456,7 @@ fn zirReify(
             ).?);
 
             // Decls
-            if (try decls_val.sliceLen(sema) > 0) {
+            if (try decls_val.sliceLen(mod) > 0) {
                 return sema.fail(block, src, "reified opaque must have no decls", .{});
             }
 
@@ -21575,7 +21504,7 @@ fn zirReify(
                 try ip.getOrPutString(gpa, "decls", .no_embedded_nulls),
             ).?);
 
-            if (try decls_val.sliceLen(sema) > 0) {
+            if (try decls_val.sliceLen(mod) > 0) {
                 return sema.fail(block, src, "reified unions must have no decls", .{});
             }
             const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val);
@@ -21934,7 +21863,7 @@ fn reifyUnion(
 
             field_ty.* = field_type_val.toIntern();
             if (any_aligns) {
-                const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntAdvanced(sema);
+                const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntSema(mod);
                 if (byte_align > 0 and !math.isPowerOfTwo(byte_align)) {
                     // TODO: better source location
                     return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align});
@@ -21979,7 +21908,7 @@ fn reifyUnion(
 
             field_ty.* = field_type_val.toIntern();
             if (any_aligns) {
-                const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntAdvanced(sema);
+                const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntSema(mod);
                 if (byte_align > 0 and !math.isPowerOfTwo(byte_align)) {
                     // TODO: better source location
                     return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align});
@@ -22036,6 +21965,7 @@ fn reifyUnion(
     loaded_union.flagsPtr(ip).status = .have_field_types;
 
     try mod.finalizeAnonDecl(new_decl_index);
+    try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index });
     return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none));
 }
 
@@ -22109,7 +22039,7 @@ fn reifyStruct(
 
         if (field_is_comptime) any_comptime_fields = true;
         if (field_default_value != .none) any_default_inits = true;
-        switch (try field_alignment_val.orderAgainstZeroAdvanced(mod, sema)) {
+        switch (try field_alignment_val.orderAgainstZeroAdvanced(mod, .sema)) {
             .eq => {},
             .gt => any_aligned_fields = true,
             .lt => unreachable,
@@ -22192,7 +22122,7 @@ fn reifyStruct(
                 return sema.fail(block, src, "alignment must fit in 'u32'", .{});
             }
 
-            const byte_align = try field_alignment_val.toUnsignedIntAdvanced(sema);
+            const byte_align = try field_alignment_val.toUnsignedIntSema(mod);
             if (byte_align == 0) {
                 if (layout != .@"packed") {
                     struct_type.field_aligns.get(ip)[field_idx] = .none;
@@ -22278,7 +22208,7 @@ fn reifyStruct(
         var fields_bit_sum: u64 = 0;
         for (0..struct_type.field_types.len) |field_idx| {
             const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_idx]);
-            sema.resolveTypeLayout(field_ty) catch |err| switch (err) {
+            field_ty.resolveLayout(mod) catch |err| switch (err) {
                 error.AnalysisFail => {
                     const msg = sema.err orelse return err;
                     try sema.errNote(src, msg, "while checking a field of this struct", .{});
@@ -22300,11 +22230,12 @@ fn reifyStruct(
     }
 
     try mod.finalizeAnonDecl(new_decl_index);
+    try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index });
     return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none));
 }
 
 fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) CompileError!Air.Inst.Ref {
-    const va_list_ty = try sema.getBuiltinType("VaList");
+    const va_list_ty = try sema.mod.getBuiltinType("VaList");
     const va_list_ptr = try sema.mod.singleMutPtrType(va_list_ty);
 
     const inst = try sema.resolveInst(zir_ref);
@@ -22343,7 +22274,7 @@ fn zirCVaCopy(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData)
     const va_list_src = block.builtinCallArgSrc(extra.node, 0);
 
     const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.operand);
-    const va_list_ty = try sema.getBuiltinType("VaList");
+    const va_list_ty = try sema.mod.getBuiltinType("VaList");
 
     try sema.requireRuntimeBlock(block, src, null);
     return block.addTyOp(.c_va_copy, va_list_ty, va_list_ref);
@@ -22363,7 +22294,7 @@ fn zirCVaEnd(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C
 fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
     const src = block.nodeOffset(@bitCast(extended.operand));
 
-    const va_list_ty = try sema.getBuiltinType("VaList");
+    const va_list_ty = try sema.mod.getBuiltinType("VaList");
     try sema.requireRuntimeBlock(block, src, null);
     return block.addInst(.{
         .tag = .c_va_start,
@@ -22497,7 +22428,7 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
     _ = try sema.checkIntType(block, operand_src, operand_scalar_ty);
 
     if (try sema.resolveValue(operand)) |operand_val| {
-        const result_val = try operand_val.floatFromIntAdvanced(sema.arena, operand_ty, dest_ty, mod, sema);
+        const result_val = try operand_val.floatFromIntAdvanced(sema.arena, operand_ty, dest_ty, mod, .sema);
         return Air.internedToRef(result_val.toIntern());
     } else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeFloat) {
         return sema.failWithNeededComptime(block, operand_src, .{
@@ -22545,7 +22476,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
     try sema.checkPtrType(block, src, ptr_ty, true);
 
     const elem_ty = ptr_ty.elemType2(mod);
-    const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, sema);
+    const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, .sema);
 
     if (ptr_ty.isSlice(mod)) {
         const msg = msg: {
@@ -22644,7 +22575,7 @@ fn ptrFromIntVal(
         }
         return sema.failWithUseOfUndef(block, operand_src);
     }
-    const addr = try operand_val.toUnsignedIntAdvanced(sema);
+    const addr = try operand_val.toUnsignedIntSema(zcu);
     if (!ptr_ty.isAllowzeroPtr(zcu) and addr == 0)
         return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(zcu)});
     if (addr != 0 and ptr_align != .none and !ptr_align.check(addr))
@@ -22842,8 +22773,8 @@ fn ptrCastFull(
     const src_info = operand_ty.ptrInfo(mod);
     const dest_info = dest_ty.ptrInfo(mod);
 
-    try sema.resolveTypeLayout(Type.fromInterned(src_info.child));
-    try sema.resolveTypeLayout(Type.fromInterned(dest_info.child));
+    try Type.fromInterned(src_info.child).resolveLayout(mod);
+    try Type.fromInterned(dest_info.child).resolveLayout(mod);
 
     const src_slice_like = src_info.flags.size == .Slice or
         (src_info.flags.size == .One and Type.fromInterned(src_info.child).zigTypeTag(mod) == .Array);
@@ -23091,7 +23022,7 @@ fn ptrCastFull(
         // Only convert to a many-pointer at first
         var info = dest_info;
         info.flags.size = .Many;
-        const ty = try sema.ptrType(info);
+        const ty = try mod.ptrTypeSema(info);
         if (dest_ty.zigTypeTag(mod) == .Optional) {
             break :blk try mod.optionalType(ty.toIntern());
         } else {
@@ -23109,7 +23040,7 @@ fn ptrCastFull(
                 return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)});
             }
             if (dest_align.compare(.gt, src_align)) {
-                if (try ptr_val.getUnsignedIntAdvanced(mod, null)) |addr| {
+                if (try ptr_val.getUnsignedIntAdvanced(mod, .sema)) |addr| {
                     if (!dest_align.check(addr)) {
                         return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{
                             addr,
@@ -23176,7 +23107,7 @@ fn ptrCastFull(
         // We can't change address spaces with a bitcast, so this requires two instructions
         var intermediate_info = src_info;
         intermediate_info.flags.address_space = dest_info.flags.address_space;
-        const intermediate_ptr_ty = try sema.ptrType(intermediate_info);
+        const intermediate_ptr_ty = try mod.ptrTypeSema(intermediate_info);
         const intermediate_ty = if (dest_ptr_ty.zigTypeTag(mod) == .Optional) blk: {
             break :blk try mod.optionalType(intermediate_ptr_ty.toIntern());
         } else intermediate_ptr_ty;
@@ -23233,7 +23164,7 @@ fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
     if (flags.volatile_cast) ptr_info.flags.is_volatile = false;
 
     const dest_ty = blk: {
-        const dest_ty = try sema.ptrType(ptr_info);
+        const dest_ty = try mod.ptrTypeSema(ptr_info);
         if (operand_ty.zigTypeTag(mod) == .Optional) {
             break :blk try mod.optionalType(dest_ty.toIntern());
         }
@@ -23523,7 +23454,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
 
     const mod = sema.mod;
     const ip = &mod.intern_pool;
-    try sema.resolveTypeLayout(ty);
+    try ty.resolveLayout(mod);
     switch (ty.zigTypeTag(mod)) {
         .Struct => {},
         else => return sema.fail(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(mod)}),
@@ -23766,7 +23697,7 @@ fn checkAtomicPtrOperand(
     const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) {
         .Pointer => ptr_ty.ptrInfo(mod),
         else => {
-            const wanted_ptr_ty = try sema.ptrType(wanted_ptr_data);
+            const wanted_ptr_ty = try mod.ptrTypeSema(wanted_ptr_data);
             _ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src);
             unreachable;
         },
@@ -23776,7 +23707,7 @@ fn checkAtomicPtrOperand(
     wanted_ptr_data.flags.is_allowzero = ptr_data.flags.is_allowzero;
     wanted_ptr_data.flags.is_volatile = ptr_data.flags.is_volatile;
 
-    const wanted_ptr_ty = try sema.ptrType(wanted_ptr_data);
+    const wanted_ptr_ty = try mod.ptrTypeSema(wanted_ptr_data);
     const casted_ptr = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src);
 
     return casted_ptr;
@@ -23953,7 +23884,7 @@ fn resolveExportOptions(
     const mod = sema.mod;
     const gpa = sema.gpa;
     const ip = &mod.intern_pool;
-    const export_options_ty = try sema.getBuiltinType("ExportOptions");
+    const export_options_ty = try mod.getBuiltinType("ExportOptions");
     const air_ref = try sema.resolveInst(zir_ref);
     const options = try sema.coerce(block, export_options_ty, air_ref, src);
 
@@ -24017,7 +23948,7 @@ fn resolveBuiltinEnum(
     reason: NeededComptimeReason,
 ) CompileError!@field(std.builtin, name) {
     const mod = sema.mod;
-    const ty = try sema.getBuiltinType(name);
+    const ty = try mod.getBuiltinType(name);
     const air_ref = try sema.resolveInst(zir_ref);
     const coerced = try sema.coerce(block, ty, air_ref, src);
     const val = try sema.resolveConstDefinedValue(block, src, coerced, reason);
@@ -24777,7 +24708,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
     const extra = sema.code.extraData(Zir.Inst.BuiltinCall, inst_data.payload_index).data;
     const func = try sema.resolveInst(extra.callee);
 
-    const modifier_ty = try sema.getBuiltinType("CallModifier");
+    const modifier_ty = try mod.getBuiltinType("CallModifier");
     const air_ref = try sema.resolveInst(extra.modifier);
     const modifier_ref = try sema.coerce(block, modifier_ty, air_ref, modifier_src);
     const modifier_val = try sema.resolveConstDefinedValue(block, modifier_src, modifier_ref, .{
@@ -24881,7 +24812,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
         .Struct, .Union => {},
         else => return sema.fail(block, inst_src, "expected pointer to struct or union type, found '{}'", .{parent_ptr_ty.fmt(zcu)}),
     }
-    try sema.resolveTypeLayout(parent_ty);
+    try parent_ty.resolveLayout(zcu);
 
     const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, .{
         .needed_comptime_reason = "field name must be comptime-known",
@@ -24912,7 +24843,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
     var actual_parent_ptr_info: InternPool.Key.PtrType = .{
         .child = parent_ty.toIntern(),
         .flags = .{
-            .alignment = try parent_ptr_ty.ptrAlignmentAdvanced(zcu, sema),
+            .alignment = try parent_ptr_ty.ptrAlignmentAdvanced(zcu, .sema),
             .is_const = field_ptr_info.flags.is_const,
             .is_volatile = field_ptr_info.flags.is_volatile,
             .is_allowzero = field_ptr_info.flags.is_allowzero,
@@ -24924,7 +24855,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
     var actual_field_ptr_info: InternPool.Key.PtrType = .{
         .child = field_ty.toIntern(),
         .flags = .{
-            .alignment = try field_ptr_ty.ptrAlignmentAdvanced(zcu, sema),
+            .alignment = try field_ptr_ty.ptrAlignmentAdvanced(zcu, .sema),
             .is_const = field_ptr_info.flags.is_const,
             .is_volatile = field_ptr_info.flags.is_volatile,
             .is_allowzero = field_ptr_info.flags.is_allowzero,
@@ -24935,12 +24866,13 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
     switch (parent_ty.containerLayout(zcu)) {
         .auto => {
             actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict(
-                if (zcu.typeToStruct(parent_ty)) |struct_obj| try sema.structFieldAlignment(
+                if (zcu.typeToStruct(parent_ty)) |struct_obj| try zcu.structFieldAlignmentAdvanced(
                     struct_obj.fieldAlign(ip, field_index),
                     field_ty,
                     struct_obj.layout,
+                    .sema,
                 ) else if (zcu.typeToUnion(parent_ty)) |union_obj|
-                    try sema.unionFieldAlignment(union_obj, field_index)
+                    try zcu.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema)
                 else
                     actual_field_ptr_info.flags.alignment,
             );
@@ -24970,9 +24902,9 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
         },
     }
 
-    const actual_field_ptr_ty = try sema.ptrType(actual_field_ptr_info);
+    const actual_field_ptr_ty = try zcu.ptrTypeSema(actual_field_ptr_info);
     const casted_field_ptr = try sema.coerce(block, actual_field_ptr_ty, field_ptr, field_ptr_src);
-    const actual_parent_ptr_ty = try sema.ptrType(actual_parent_ptr_info);
+    const actual_parent_ptr_ty = try zcu.ptrTypeSema(actual_parent_ptr_info);
 
     const result = if (try sema.resolveDefinedValue(block, field_ptr_src, casted_field_ptr)) |field_ptr_val| result: {
         switch (parent_ty.zigTypeTag(zcu)) {
@@ -25032,7 +24964,6 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
         break :result try sema.coerce(block, actual_parent_ptr_ty, Air.internedToRef(field.base), inst_src);
     } else result: {
         try sema.requireRuntimeBlock(block, inst_src, field_ptr_src);
-        try sema.queueFullTypeResolution(parent_ty);
         break :result try block.addInst(.{
             .tag = .field_parent_ptr,
             .data = .{ .ty_pl = .{
@@ -25345,7 +25276,7 @@ fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !A
         // Already an array pointer.
         return ptr;
     }
-    const new_ty = try sema.ptrType(.{
+    const new_ty = try mod.ptrTypeSema(.{
         .child = (try mod.arrayType(.{
             .len = len,
             .sentinel = info.sentinel,
@@ -25444,7 +25375,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
     const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: {
         if (!sema.isComptimeMutablePtr(dest_ptr_val)) break :rs dest_src;
         if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| {
-            const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, sema)).?;
+            const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, .sema)).?;
             const len = try sema.usizeCast(block, dest_src, len_u64);
             for (0..len) |i| {
                 const elem_index = try mod.intRef(Type.usize, i);
@@ -25503,7 +25434,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
     var new_dest_ptr = dest_ptr;
     var new_src_ptr = src_ptr;
     if (len_val) |val| {
-        const len = try val.toUnsignedIntAdvanced(sema);
+        const len = try val.toUnsignedIntSema(mod);
         if (len == 0) {
             // This AIR instruction guarantees length > 0 if it is comptime-known.
             return;
@@ -25550,7 +25481,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
             assert(dest_manyptr_ty_key.flags.size == .One);
             dest_manyptr_ty_key.child = dest_elem_ty.toIntern();
             dest_manyptr_ty_key.flags.size = .Many;
-            break :ptr try sema.coerceCompatiblePtrs(block, try sema.ptrType(dest_manyptr_ty_key), new_dest_ptr, dest_src);
+            break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrTypeSema(dest_manyptr_ty_key), new_dest_ptr, dest_src);
         } else new_dest_ptr;
 
         const new_src_ptr_ty = sema.typeOf(new_src_ptr);
@@ -25561,7 +25492,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
             assert(src_manyptr_ty_key.flags.size == .One);
             src_manyptr_ty_key.child = src_elem_ty.toIntern();
             src_manyptr_ty_key.flags.size = .Many;
-            break :ptr try sema.coerceCompatiblePtrs(block, try sema.ptrType(src_manyptr_ty_key), new_src_ptr, src_src);
+            break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrTypeSema(src_manyptr_ty_key), new_src_ptr, src_src);
         } else new_src_ptr;
 
         // ok1: dest >= src + len
@@ -25628,7 +25559,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
         const ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr) orelse break :rs dest_src;
         const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, "len", .no_embedded_nulls), dest_src);
         const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse break :rs dest_src;
-        const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, sema)).?;
+        const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, .sema)).?;
         const len = try sema.usizeCast(block, dest_src, len_u64);
         if (len == 0) {
             // This AIR instruction guarantees length > 0 if it is comptime-known.
@@ -25808,7 +25739,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
         if (val.isGenericPoison()) {
             break :blk null;
         }
-        const alignment = try sema.validateAlignAllowZero(block, align_src, try val.toUnsignedIntAdvanced(sema));
+        const alignment = try sema.validateAlignAllowZero(block, align_src, try val.toUnsignedIntSema(mod));
         const default = target_util.defaultFunctionAlignment(target);
         break :blk if (alignment == default) .none else alignment;
     } else if (extra.data.bits.has_align_ref) blk: {
@@ -25828,7 +25759,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
             error.GenericPoison => break :blk null,
             else => |e| return e,
         };
-        const alignment = try sema.validateAlignAllowZero(block, align_src, try align_val.toUnsignedIntAdvanced(sema));
+        const alignment = try sema.validateAlignAllowZero(block, align_src, try align_val.toUnsignedIntSema(mod));
         const default = target_util.defaultFunctionAlignment(target);
         break :blk if (alignment == default) .none else alignment;
     } else .none;
@@ -25904,7 +25835,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
         const body = sema.code.bodySlice(extra_index, body_len);
         extra_index += body.len;
 
-        const cc_ty = try sema.getBuiltinType("CallingConvention");
+        const cc_ty = try mod.getBuiltinType("CallingConvention");
         const val = try sema.resolveGenericBody(block, cc_src, body, inst, cc_ty, .{
             .needed_comptime_reason = "calling convention must be comptime-known",
         });
@@ -26117,7 +26048,7 @@ fn resolvePrefetchOptions(
     const mod = sema.mod;
     const gpa = sema.gpa;
     const ip = &mod.intern_pool;
-    const options_ty = try sema.getBuiltinType("PrefetchOptions");
+    const options_ty = try mod.getBuiltinType("PrefetchOptions");
     const options = try sema.coerce(block, options_ty, try sema.resolveInst(zir_ref), src);
 
     const rw_src = block.src(.{ .init_field_rw = src.offset.node_offset_builtin_call_arg.builtin_call_node });
@@ -26141,7 +26072,7 @@ fn resolvePrefetchOptions(
 
     return std.builtin.PrefetchOptions{
         .rw = mod.toEnum(std.builtin.PrefetchOptions.Rw, rw_val),
-        .locality = @intCast(try locality_val.toUnsignedIntAdvanced(sema)),
+        .locality = @intCast(try locality_val.toUnsignedIntSema(mod)),
         .cache = mod.toEnum(std.builtin.PrefetchOptions.Cache, cache_val),
     };
 }
@@ -26189,7 +26120,7 @@ fn resolveExternOptions(
     const gpa = sema.gpa;
     const ip = &mod.intern_pool;
     const options_inst = try sema.resolveInst(zir_ref);
-    const extern_options_ty = try sema.getBuiltinType("ExternOptions");
+    const extern_options_ty = try mod.getBuiltinType("ExternOptions");
     const options = try sema.coerce(block, extern_options_ty, options_inst, src);
 
     const name_src = block.src(.{ .init_field_name = src.offset.node_offset_builtin_call_arg.builtin_call_node });
@@ -26440,7 +26371,7 @@ fn explainWhyTypeIsComptime(
     var type_set = TypeSet{};
     defer type_set.deinit(sema.gpa);
 
-    try sema.resolveTypeFully(ty);
+    try ty.resolveFully(sema.mod);
     return sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty, &type_set);
 }
 
@@ -26567,7 +26498,7 @@ const ExternPosition = enum {
 
 /// Returns true if `ty` is allowed in extern types.
 /// Does *NOT* require `ty` to be resolved in any way.
-/// Calls `resolveTypeLayout` for packed containers.
+/// Calls `resolveLayout` for packed containers.
 fn validateExternType(
     sema: *Sema,
     ty: Type,
@@ -26618,7 +26549,7 @@ fn validateExternType(
         .Struct, .Union => switch (ty.containerLayout(mod)) {
             .@"extern" => return true,
             .@"packed" => {
-                const bit_size = try ty.bitSizeAdvanced(mod, sema);
+                const bit_size = try ty.bitSizeAdvanced(mod, .sema);
                 switch (bit_size) {
                     0, 8, 16, 32, 64, 128 => return true,
                     else => return false,
@@ -26796,11 +26727,11 @@ fn explainWhyTypeIsNotPacked(
     }
 }
 
-fn prepareSimplePanic(sema: *Sema, block: *Block) !void {
+fn prepareSimplePanic(sema: *Sema) !void {
     const mod = sema.mod;
 
     if (mod.panic_func_index == .none) {
-        const decl_index = (try sema.getBuiltinDecl(block, "panic"));
+        const decl_index = (try mod.getBuiltinDecl("panic"));
         // decl_index may be an alias; we must find the decl that actually
         // owns the function.
         try sema.ensureDeclAnalyzed(decl_index);
@@ -26813,10 +26744,10 @@ fn prepareSimplePanic(sema: *Sema, block: *Block) !void {
     }
 
     if (mod.null_stack_trace == .none) {
-        const stack_trace_ty = try sema.getBuiltinType("StackTrace");
-        try sema.resolveTypeFields(stack_trace_ty);
+        const stack_trace_ty = try mod.getBuiltinType("StackTrace");
+        try stack_trace_ty.resolveFields(mod);
         const target = mod.getTarget();
-        const ptr_stack_trace_ty = try sema.ptrType(.{
+        const ptr_stack_trace_ty = try mod.ptrTypeSema(.{
             .child = stack_trace_ty.toIntern(),
             .flags = .{
                 .address_space = target_util.defaultAddressSpace(target, .global_constant),
@@ -26838,9 +26769,9 @@ fn preparePanicId(sema: *Sema, block: *Block, panic_id: Module.PanicId) !InternP
     const gpa = sema.gpa;
     if (mod.panic_messages[@intFromEnum(panic_id)].unwrap()) |x| return x;
 
-    try sema.prepareSimplePanic(block);
+    try sema.prepareSimplePanic();
 
-    const panic_messages_ty = try sema.getBuiltinType("panic_messages");
+    const panic_messages_ty = try mod.getBuiltinType("panic_messages");
     const msg_decl_index = (sema.namespaceLookup(
         block,
         LazySrcLoc.unneeded,
@@ -26946,7 +26877,7 @@ fn panicWithMsg(sema: *Sema, block: *Block, src: LazySrcLoc, msg_inst: Air.Inst.
         return;
     }
 
-    try sema.prepareSimplePanic(block);
+    try sema.prepareSimplePanic();
 
     const panic_func = mod.funcInfo(mod.panic_func_index);
     const panic_fn = try sema.analyzeDeclVal(block, src, panic_func.owner_decl);
@@ -26992,7 +26923,7 @@ fn panicUnwrapError(
         if (!sema.mod.backendSupportsFeature(.panic_unwrap_error)) {
             _ = try fail_block.addNoOp(.trap);
         } else {
-            const panic_fn = try sema.getBuiltin("panicUnwrapError");
+            const panic_fn = try sema.mod.getBuiltin("panicUnwrapError");
             const err = try fail_block.addTyOp(unwrap_err_tag, Type.anyerror, operand);
             const err_return_trace = try sema.getErrorReturnTrace(&fail_block);
             const args: [2]Air.Inst.Ref = .{ err_return_trace, err };
@@ -27051,7 +26982,7 @@ fn panicSentinelMismatch(
     const actual_sentinel = if (ptr_ty.isSlice(mod))
         try parent_block.addBinOp(.slice_elem_val, ptr, sentinel_index)
     else blk: {
-        const elem_ptr_ty = try sema.elemPtrType(ptr_ty, null);
+        const elem_ptr_ty = try ptr_ty.elemPtrType(null, mod);
         const sentinel_ptr = try parent_block.addPtrElemPtr(ptr, sentinel_index, elem_ptr_ty);
         break :blk try parent_block.addTyOp(.load, sentinel_ty, sentinel_ptr);
     };
@@ -27069,7 +27000,7 @@ fn panicSentinelMismatch(
     } else if (sentinel_ty.isSelfComparable(mod, true))
         try parent_block.addBinOp(.cmp_eq, expected_sentinel, actual_sentinel)
     else {
-        const panic_fn = try sema.getBuiltin("checkNonScalarSentinel");
+        const panic_fn = try mod.getBuiltin("checkNonScalarSentinel");
         const args: [2]Air.Inst.Ref = .{ expected_sentinel, actual_sentinel };
         try sema.callBuiltin(parent_block, src, panic_fn, .auto, &args, .@"safety check");
         return;
@@ -27108,7 +27039,7 @@ fn safetyCheckFormatted(
     if (!sema.mod.backendSupportsFeature(.safety_check_formatted)) {
         _ = try fail_block.addNoOp(.trap);
     } else {
-        const panic_fn = try sema.getBuiltin(func);
+        const panic_fn = try sema.mod.getBuiltin(func);
         try sema.callBuiltin(&fail_block, src, panic_fn, .auto, args, .@"safety check");
     }
     try sema.addSafetyCheckExtra(parent_block, ok, &fail_block);
@@ -27170,7 +27101,7 @@ fn fieldVal(
                 return Air.internedToRef((try mod.intValue(Type.usize, inner_ty.arrayLen(mod))).toIntern());
             } else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) {
                 const ptr_info = object_ty.ptrInfo(mod);
-                const result_ty = try sema.ptrType(.{
+                const result_ty = try mod.ptrTypeSema(.{
                     .child = Type.fromInterned(ptr_info.child).childType(mod).toIntern(),
                     .sentinel = if (inner_ty.sentinel(mod)) |s| s.toIntern() else .none,
                     .flags = .{
@@ -27267,7 +27198,7 @@ fn fieldVal(
                     if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| {
                         return inst;
                     }
-                    try sema.resolveTypeFields(child_type);
+                    try child_type.resolveFields(mod);
                     if (child_type.unionTagType(mod)) |enum_ty| {
                         if (enum_ty.enumFieldIndex(field_name, mod)) |field_index_usize| {
                             const field_index: u32 = @intCast(field_index_usize);
@@ -27361,7 +27292,7 @@ fn fieldPtr(
                 return anonDeclRef(sema, int_val.toIntern());
             } else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) {
                 const ptr_info = object_ty.ptrInfo(mod);
-                const new_ptr_ty = try sema.ptrType(.{
+                const new_ptr_ty = try mod.ptrTypeSema(.{
                     .child = Type.fromInterned(ptr_info.child).childType(mod).toIntern(),
                     .sentinel = if (object_ty.sentinel(mod)) |s| s.toIntern() else .none,
                     .flags = .{
@@ -27376,7 +27307,7 @@ fn fieldPtr(
                     .packed_offset = ptr_info.packed_offset,
                 });
                 const ptr_ptr_info = object_ptr_ty.ptrInfo(mod);
-                const result_ty = try sema.ptrType(.{
+                const result_ty = try mod.ptrTypeSema(.{
                     .child = new_ptr_ty.toIntern(),
                     .sentinel = if (object_ptr_ty.sentinel(mod)) |s| s.toIntern() else .none,
                     .flags = .{
@@ -27410,7 +27341,7 @@ fn fieldPtr(
             if (field_name.eqlSlice("ptr", ip)) {
                 const slice_ptr_ty = inner_ty.slicePtrFieldType(mod);
 
-                const result_ty = try sema.ptrType(.{
+                const result_ty = try mod.ptrTypeSema(.{
                     .child = slice_ptr_ty.toIntern(),
                     .flags = .{
                         .is_const = !attr_ptr_ty.ptrIsMutable(mod),
@@ -27420,7 +27351,7 @@ fn fieldPtr(
                 });
 
                 if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
-                    return Air.internedToRef((try val.ptrField(Value.slice_ptr_index, sema)).toIntern());
+                    return Air.internedToRef((try val.ptrField(Value.slice_ptr_index, mod)).toIntern());
                 }
                 try sema.requireRuntimeBlock(block, src, null);
 
@@ -27428,7 +27359,7 @@ fn fieldPtr(
                 try sema.checkKnownAllocPtr(block, inner_ptr, field_ptr);
                 return field_ptr;
             } else if (field_name.eqlSlice("len", ip)) {
-                const result_ty = try sema.ptrType(.{
+                const result_ty = try mod.ptrTypeSema(.{
                     .child = .usize_type,
                     .flags = .{
                         .is_const = !attr_ptr_ty.ptrIsMutable(mod),
@@ -27438,7 +27369,7 @@ fn fieldPtr(
                 });
 
                 if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
-                    return Air.internedToRef((try val.ptrField(Value.slice_len_index, sema)).toIntern());
+                    return Air.internedToRef((try val.ptrField(Value.slice_len_index, mod)).toIntern());
                 }
                 try sema.requireRuntimeBlock(block, src, null);
 
@@ -27506,7 +27437,7 @@ fn fieldPtr(
                     if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| {
                         return inst;
                     }
-                    try sema.resolveTypeFields(child_type);
+                    try child_type.resolveFields(mod);
                     if (child_type.unionTagType(mod)) |enum_ty| {
                         if (enum_ty.enumFieldIndex(field_name, mod)) |field_index| {
                             const field_index_u32: u32 = @intCast(field_index);
@@ -27601,7 +27532,7 @@ fn fieldCallBind(
     find_field: {
         switch (concrete_ty.zigTypeTag(mod)) {
             .Struct => {
-                try sema.resolveTypeFields(concrete_ty);
+                try concrete_ty.resolveFields(mod);
                 if (mod.typeToStruct(concrete_ty)) |struct_type| {
                     const field_index = struct_type.nameIndex(ip, field_name) orelse
                         break :find_field;
@@ -27627,7 +27558,7 @@ fn fieldCallBind(
                 }
             },
             .Union => {
-                try sema.resolveTypeFields(concrete_ty);
+                try concrete_ty.resolveFields(mod);
                 const union_obj = mod.typeToUnion(concrete_ty).?;
                 _ = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse break :find_field;
                 const field_ptr = try unionFieldPtr(sema, block, src, object_ptr, field_name, field_name_src, concrete_ty, false);
@@ -27737,7 +27668,7 @@ fn finishFieldCallBind(
     object_ptr: Air.Inst.Ref,
 ) CompileError!ResolvedFieldCallee {
     const mod = sema.mod;
-    const ptr_field_ty = try sema.ptrType(.{
+    const ptr_field_ty = try mod.ptrTypeSema(.{
         .child = field_ty.toIntern(),
         .flags = .{
             .is_const = !ptr_ty.ptrIsMutable(mod),
@@ -27748,14 +27679,14 @@ fn finishFieldCallBind(
     const container_ty = ptr_ty.childType(mod);
     if (container_ty.zigTypeTag(mod) == .Struct) {
         if (container_ty.structFieldIsComptime(field_index, mod)) {
-            try sema.resolveStructFieldInits(container_ty);
+            try container_ty.resolveStructFieldInits(mod);
             const default_val = (try container_ty.structFieldValueComptime(mod, field_index)).?;
             return .{ .direct = Air.internedToRef(default_val.toIntern()) };
         }
     }
 
     if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| {
-        const ptr_val = try struct_ptr_val.ptrField(field_index, sema);
+        const ptr_val = try struct_ptr_val.ptrField(field_index, mod);
         const pointer = Air.internedToRef(ptr_val.toIntern());
         return .{ .direct = try sema.analyzeLoad(block, src, pointer, src) };
     }
@@ -27831,8 +27762,8 @@ fn structFieldPtr(
     const ip = &mod.intern_pool;
     assert(struct_ty.zigTypeTag(mod) == .Struct);
 
-    try sema.resolveTypeFields(struct_ty);
-    try sema.resolveStructLayout(struct_ty);
+    try struct_ty.resolveFields(mod);
+    try struct_ty.resolveLayout(mod);
 
     if (struct_ty.isTuple(mod)) {
         if (field_name.eqlSlice("len", ip)) {
@@ -27871,7 +27802,7 @@ fn structFieldPtrByIndex(
     }
 
     if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| {
-        const val = try struct_ptr_val.ptrField(field_index, sema);
+        const val = try struct_ptr_val.ptrField(field_index, mod);
         return Air.internedToRef(val.toIntern());
     }
 
@@ -27915,10 +27846,11 @@ fn structFieldPtrByIndex(
             @enumFromInt(@min(@intFromEnum(parent_align), @ctz(field_offset)));
     } else {
         // Our alignment is capped at the field alignment.
-        const field_align = try sema.structFieldAlignment(
+        const field_align = try mod.structFieldAlignmentAdvanced(
             struct_type.fieldAlign(ip, field_index),
             Type.fromInterned(field_ty),
             struct_type.layout,
+            .sema,
         );
         ptr_ty_data.flags.alignment = if (struct_ptr_ty_info.flags.alignment == .none)
             field_align
@@ -27926,10 +27858,10 @@ fn structFieldPtrByIndex(
             field_align.min(parent_align);
     }
 
-    const ptr_field_ty = try sema.ptrType(ptr_ty_data);
+    const ptr_field_ty = try mod.ptrTypeSema(ptr_ty_data);
 
     if (struct_type.fieldIsComptime(ip, field_index)) {
-        try sema.resolveStructFieldInits(struct_ty);
+        try struct_ty.resolveStructFieldInits(mod);
         const val = try mod.intern(.{ .ptr = .{
             .ty = ptr_field_ty.toIntern(),
             .base_addr = .{ .comptime_field = struct_type.field_inits.get(ip)[field_index] },
@@ -27955,7 +27887,7 @@ fn structFieldVal(
     const ip = &mod.intern_pool;
     assert(struct_ty.zigTypeTag(mod) == .Struct);
 
-    try sema.resolveTypeFields(struct_ty);
+    try struct_ty.resolveFields(mod);
 
     switch (ip.indexToKey(struct_ty.toIntern())) {
         .struct_type => {
@@ -27966,7 +27898,7 @@ fn structFieldVal(
             const field_index = struct_type.nameIndex(ip, field_name) orelse
                 return sema.failWithBadStructFieldAccess(block, struct_ty, struct_type, field_name_src, field_name);
             if (struct_type.fieldIsComptime(ip, field_index)) {
-                try sema.resolveStructFieldInits(struct_ty);
+                try struct_ty.resolveStructFieldInits(mod);
                 return Air.internedToRef(struct_type.field_inits.get(ip)[field_index]);
             }
 
@@ -27983,7 +27915,7 @@ fn structFieldVal(
             }
 
             try sema.requireRuntimeBlock(block, src, null);
-            try sema.resolveTypeLayout(field_ty);
+            try field_ty.resolveLayout(mod);
             return block.addStructFieldVal(struct_byval, field_index, field_ty);
         },
         .anon_struct_type => |anon_struct| {
@@ -28050,7 +27982,7 @@ fn tupleFieldValByIndex(
     const field_ty = tuple_ty.structFieldType(field_index, mod);
 
     if (tuple_ty.structFieldIsComptime(field_index, mod))
-        try sema.resolveStructFieldInits(tuple_ty);
+        try tuple_ty.resolveStructFieldInits(mod);
     if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| {
         return Air.internedToRef(default_value.toIntern());
     }
@@ -28071,7 +28003,7 @@ fn tupleFieldValByIndex(
     }
 
     try sema.requireRuntimeBlock(block, src, null);
-    try sema.resolveTypeLayout(field_ty);
+    try field_ty.resolveLayout(mod);
     return block.addStructFieldVal(tuple_byval, field_index, field_ty);
 }
 
@@ -28092,11 +28024,11 @@ fn unionFieldPtr(
 
     const union_ptr_ty = sema.typeOf(union_ptr);
     const union_ptr_info = union_ptr_ty.ptrInfo(mod);
-    try sema.resolveTypeFields(union_ty);
+    try union_ty.resolveFields(mod);
     const union_obj = mod.typeToUnion(union_ty).?;
     const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
     const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
-    const ptr_field_ty = try sema.ptrType(.{
+    const ptr_field_ty = try mod.ptrTypeSema(.{
         .child = field_ty.toIntern(),
         .flags = .{
             .is_const = union_ptr_info.flags.is_const,
@@ -28107,7 +28039,7 @@ fn unionFieldPtr(
                     union_ptr_info.flags.alignment
                 else
                     try sema.typeAbiAlignment(union_ty);
-                const field_align = try sema.unionFieldAlignment(union_obj, field_index);
+                const field_align = try mod.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema);
                 break :blk union_align.min(field_align);
             } else union_ptr_info.flags.alignment,
         },
@@ -28163,7 +28095,7 @@ fn unionFieldPtr(
             },
             .@"packed", .@"extern" => {},
         }
-        const field_ptr_val = try union_ptr_val.ptrField(field_index, sema);
+        const field_ptr_val = try union_ptr_val.ptrField(field_index, mod);
         return Air.internedToRef(field_ptr_val.toIntern());
     }
 
@@ -28198,7 +28130,7 @@ fn unionFieldVal(
     const ip = &zcu.intern_pool;
     assert(union_ty.zigTypeTag(zcu) == .Union);
 
-    try sema.resolveTypeFields(union_ty);
+    try union_ty.resolveFields(zcu);
     const union_obj = zcu.typeToUnion(union_ty).?;
     const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
     const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
@@ -28237,7 +28169,7 @@ fn unionFieldVal(
             .@"packed" => if (tag_matches) {
                 // Fast path - no need to use bitcast logic.
                 return Air.internedToRef(un.val);
-            } else if (try sema.bitCastVal(union_val, field_ty, 0, try union_ty.bitSizeAdvanced(zcu, sema), 0)) |field_val| {
+            } else if (try sema.bitCastVal(union_val, field_ty, 0, try union_ty.bitSizeAdvanced(zcu, .sema), 0)) |field_val| {
                 return Air.internedToRef(field_val.toIntern());
             },
         }
@@ -28256,7 +28188,7 @@ fn unionFieldVal(
         _ = try block.addNoOp(.unreach);
         return .unreachable_value;
     }
-    try sema.resolveTypeLayout(field_ty);
+    try field_ty.resolveLayout(zcu);
     return block.addStructFieldVal(union_byval, field_index, field_ty);
 }
 
@@ -28287,7 +28219,7 @@ fn elemPtr(
             const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{
                 .needed_comptime_reason = "tuple field access index must be comptime-known",
             });
-            const index: u32 = @intCast(try index_val.toUnsignedIntAdvanced(sema));
+            const index: u32 = @intCast(try index_val.toUnsignedIntSema(mod));
             break :blk try sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init);
         },
         else => {
@@ -28325,11 +28257,11 @@ fn elemPtrOneLayerOnly(
             const runtime_src = rs: {
                 const ptr_val = maybe_ptr_val orelse break :rs indexable_src;
                 const index_val = maybe_index_val orelse break :rs elem_index_src;
-                const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema));
-                const elem_ptr = try ptr_val.ptrElem(index, sema);
+                const index: usize = @intCast(try index_val.toUnsignedIntSema(mod));
+                const elem_ptr = try ptr_val.ptrElem(index, mod);
                 return Air.internedToRef(elem_ptr.toIntern());
             };
-            const result_ty = try sema.elemPtrType(indexable_ty, null);
+            const result_ty = try indexable_ty.elemPtrType(null, mod);
 
             try sema.requireRuntimeBlock(block, src, runtime_src);
             return block.addPtrElemPtr(indexable, elem_index, result_ty);
@@ -28343,7 +28275,7 @@ fn elemPtrOneLayerOnly(
                     const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{
                         .needed_comptime_reason = "tuple field access index must be comptime-known",
                     });
-                    const index: u32 = @intCast(try index_val.toUnsignedIntAdvanced(sema));
+                    const index: u32 = @intCast(try index_val.toUnsignedIntSema(mod));
                     break :blk try sema.tupleFieldPtr(block, indexable_src, indexable, elem_index_src, index, false);
                 },
                 else => unreachable, // Guaranteed by checkIndexable
@@ -28383,12 +28315,12 @@ fn elemVal(
                 const runtime_src = rs: {
                     const indexable_val = maybe_indexable_val orelse break :rs indexable_src;
                     const index_val = maybe_index_val orelse break :rs elem_index_src;
-                    const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema));
+                    const index: usize = @intCast(try index_val.toUnsignedIntSema(mod));
                     const elem_ty = indexable_ty.elemType2(mod);
                     const many_ptr_ty = try mod.manyConstPtrType(elem_ty);
                     const many_ptr_val = try mod.getCoerced(indexable_val, many_ptr_ty);
                     const elem_ptr_ty = try mod.singleConstPtrType(elem_ty);
-                    const elem_ptr_val = try many_ptr_val.ptrElem(index, sema);
+                    const elem_ptr_val = try many_ptr_val.ptrElem(index, mod);
                     if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, elem_ptr_ty)) |elem_val| {
                         return Air.internedToRef((try mod.getCoerced(elem_val, elem_ty)).toIntern());
                     }
@@ -28404,7 +28336,7 @@ fn elemVal(
                     if (inner_ty.zigTypeTag(mod) != .Array) break :arr_sent;
                     const sentinel = inner_ty.sentinel(mod) orelse break :arr_sent;
                     const index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index) orelse break :arr_sent;
-                    const index = try sema.usizeCast(block, src, try index_val.toUnsignedIntAdvanced(sema));
+                    const index = try sema.usizeCast(block, src, try index_val.toUnsignedIntSema(mod));
                     if (index != inner_ty.arrayLen(mod)) break :arr_sent;
                     return Air.internedToRef(sentinel.toIntern());
                 }
@@ -28422,7 +28354,7 @@ fn elemVal(
             const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{
                 .needed_comptime_reason = "tuple field access index must be comptime-known",
             });
-            const index: u32 = @intCast(try index_val.toUnsignedIntAdvanced(sema));
+            const index: u32 = @intCast(try index_val.toUnsignedIntSema(mod));
             return sema.tupleField(block, indexable_src, indexable, elem_index_src, index);
         },
         else => unreachable,
@@ -28467,7 +28399,7 @@ fn tupleFieldPtr(
     const mod = sema.mod;
     const tuple_ptr_ty = sema.typeOf(tuple_ptr);
     const tuple_ty = tuple_ptr_ty.childType(mod);
-    try sema.resolveTypeFields(tuple_ty);
+    try tuple_ty.resolveFields(mod);
     const field_count = tuple_ty.structFieldCount(mod);
 
     if (field_count == 0) {
@@ -28481,7 +28413,7 @@ fn tupleFieldPtr(
     }
 
     const field_ty = tuple_ty.structFieldType(field_index, mod);
-    const ptr_field_ty = try sema.ptrType(.{
+    const ptr_field_ty = try mod.ptrTypeSema(.{
         .child = field_ty.toIntern(),
         .flags = .{
             .is_const = !tuple_ptr_ty.ptrIsMutable(mod),
@@ -28491,7 +28423,7 @@ fn tupleFieldPtr(
     });
 
     if (tuple_ty.structFieldIsComptime(field_index, mod))
-        try sema.resolveStructFieldInits(tuple_ty);
+        try tuple_ty.resolveStructFieldInits(mod);
 
     if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| {
         return Air.internedToRef((try mod.intern(.{ .ptr = .{
@@ -28502,7 +28434,7 @@ fn tupleFieldPtr(
     }
 
     if (try sema.resolveValue(tuple_ptr)) |tuple_ptr_val| {
-        const field_ptr_val = try tuple_ptr_val.ptrField(field_index, sema);
+        const field_ptr_val = try tuple_ptr_val.ptrField(field_index, mod);
         return Air.internedToRef(field_ptr_val.toIntern());
     }
 
@@ -28524,7 +28456,7 @@ fn tupleField(
 ) CompileError!Air.Inst.Ref {
     const mod = sema.mod;
     const tuple_ty = sema.typeOf(tuple);
-    try sema.resolveTypeFields(tuple_ty);
+    try tuple_ty.resolveFields(mod);
     const field_count = tuple_ty.structFieldCount(mod);
 
     if (field_count == 0) {
@@ -28540,7 +28472,7 @@ fn tupleField(
     const field_ty = tuple_ty.structFieldType(field_index, mod);
 
     if (tuple_ty.structFieldIsComptime(field_index, mod))
-        try sema.resolveStructFieldInits(tuple_ty);
+        try tuple_ty.resolveStructFieldInits(mod);
     if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| {
         return Air.internedToRef(default_value.toIntern()); // comptime field
     }
@@ -28553,7 +28485,7 @@ fn tupleField(
     try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src);
 
     try sema.requireRuntimeBlock(block, tuple_src, null);
-    try sema.resolveTypeLayout(field_ty);
+    try field_ty.resolveLayout(mod);
     return block.addStructFieldVal(tuple, field_index, field_ty);
 }
 
@@ -28583,7 +28515,7 @@ fn elemValArray(
     const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
 
     if (maybe_index_val) |index_val| {
-        const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema));
+        const index: usize = @intCast(try index_val.toUnsignedIntSema(mod));
         if (array_sent) |s| {
             if (index == array_len) {
                 return Air.internedToRef(s.toIntern());
@@ -28599,7 +28531,7 @@ fn elemValArray(
             return mod.undefRef(elem_ty);
         }
         if (maybe_index_val) |index_val| {
-            const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema));
+            const index: usize = @intCast(try index_val.toUnsignedIntSema(mod));
             const elem_val = try array_val.elemValue(mod, index);
             return Air.internedToRef(elem_val.toIntern());
         }
@@ -28621,7 +28553,6 @@ fn elemValArray(
         return Air.internedToRef(elem_val.toIntern());
 
     try sema.requireRuntimeBlock(block, src, runtime_src);
-    try sema.queueFullTypeResolution(array_ty);
     return block.addBinOp(.array_elem_val, array, elem_index);
 }
 
@@ -28650,7 +28581,7 @@ fn elemPtrArray(
     const maybe_undef_array_ptr_val = try sema.resolveValue(array_ptr);
     // The index must not be undefined since it can be out of bounds.
     const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: {
-        const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntAdvanced(sema));
+        const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntSema(mod));
         if (index >= array_len_s) {
             const sentinel_label: []const u8 = if (array_sent) " +1 (sentinel)" else "";
             return sema.fail(block, elem_index_src, "index {d} outside array of length {d}{s}", .{ index, array_len, sentinel_label });
@@ -28658,14 +28589,14 @@ fn elemPtrArray(
         break :o index;
     } else null;
 
-    const elem_ptr_ty = try sema.elemPtrType(array_ptr_ty, offset);
+    const elem_ptr_ty = try array_ptr_ty.elemPtrType(offset, mod);
 
     if (maybe_undef_array_ptr_val) |array_ptr_val| {
         if (array_ptr_val.isUndef(mod)) {
             return mod.undefRef(elem_ptr_ty);
         }
         if (offset) |index| {
-            const elem_ptr = try array_ptr_val.ptrElem(index, sema);
+            const elem_ptr = try array_ptr_val.ptrElem(index, mod);
             return Air.internedToRef(elem_ptr.toIntern());
         }
     }
@@ -28710,19 +28641,19 @@ fn elemValSlice(
 
     if (maybe_slice_val) |slice_val| {
         runtime_src = elem_index_src;
-        const slice_len = try slice_val.sliceLen(sema);
+        const slice_len = try slice_val.sliceLen(mod);
         const slice_len_s = slice_len + @intFromBool(slice_sent);
         if (slice_len_s == 0) {
             return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{});
         }
         if (maybe_index_val) |index_val| {
-            const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema));
+            const index: usize = @intCast(try index_val.toUnsignedIntSema(mod));
             if (index >= slice_len_s) {
                 const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else "";
                 return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label });
             }
-            const elem_ptr_ty = try sema.elemPtrType(slice_ty, index);
-            const elem_ptr_val = try slice_val.ptrElem(index, sema);
+            const elem_ptr_ty = try slice_ty.elemPtrType(index, mod);
+            const elem_ptr_val = try slice_val.ptrElem(index, mod);
             if (try sema.pointerDeref(block, slice_src, elem_ptr_val, elem_ptr_ty)) |elem_val| {
                 return Air.internedToRef(elem_val.toIntern());
             }
@@ -28735,13 +28666,12 @@ fn elemValSlice(
     try sema.requireRuntimeBlock(block, src, runtime_src);
     if (oob_safety and block.wantSafety()) {
         const len_inst = if (maybe_slice_val) |slice_val|
-            try mod.intRef(Type.usize, try slice_val.sliceLen(sema))
+            try mod.intRef(Type.usize, try slice_val.sliceLen(mod))
         else
             try block.addTyOp(.slice_len, Type.usize, slice);
         const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt;
         try sema.panicIndexOutOfBounds(block, src, elem_index, len_inst, cmp_op);
     }
-    try sema.queueFullTypeResolution(sema.typeOf(slice));
     return block.addBinOp(.slice_elem_val, slice, elem_index);
 }
 
@@ -28762,17 +28692,17 @@ fn elemPtrSlice(
     const maybe_undef_slice_val = try sema.resolveValue(slice);
     // The index must not be undefined since it can be out of bounds.
     const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: {
-        const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntAdvanced(sema));
+        const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntSema(mod));
         break :o index;
     } else null;
 
-    const elem_ptr_ty = try sema.elemPtrType(slice_ty, offset);
+    const elem_ptr_ty = try slice_ty.elemPtrType(offset, mod);
 
     if (maybe_undef_slice_val) |slice_val| {
         if (slice_val.isUndef(mod)) {
             return mod.undefRef(elem_ptr_ty);
         }
-        const slice_len = try slice_val.sliceLen(sema);
+        const slice_len = try slice_val.sliceLen(mod);
         const slice_len_s = slice_len + @intFromBool(slice_sent);
         if (slice_len_s == 0) {
             return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{});
@@ -28782,7 +28712,7 @@ fn elemPtrSlice(
                 const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else "";
                 return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label });
             }
-            const elem_ptr_val = try slice_val.ptrElem(index, sema);
+            const elem_ptr_val = try slice_val.ptrElem(index, mod);
             return Air.internedToRef(elem_ptr_val.toIntern());
         }
     }
@@ -28795,7 +28725,7 @@ fn elemPtrSlice(
         const len_inst = len: {
             if (maybe_undef_slice_val) |slice_val|
                 if (!slice_val.isUndef(mod))
-                    break :len try mod.intRef(Type.usize, try slice_val.sliceLen(sema));
+                    break :len try mod.intRef(Type.usize, try slice_val.sliceLen(mod));
             break :len try block.addTyOp(.slice_len, Type.usize, slice);
         };
         const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt;
@@ -28860,9 +28790,9 @@ fn coerceExtra(
     if (dest_ty.isGenericPoison()) return inst;
     const zcu = sema.mod;
     const dest_ty_src = inst_src; // TODO better source location
-    try sema.resolveTypeFields(dest_ty);
+    try dest_ty.resolveFields(zcu);
     const inst_ty = sema.typeOf(inst);
-    try sema.resolveTypeFields(inst_ty);
+    try inst_ty.resolveFields(zcu);
     const target = zcu.getTarget();
     // If the types are the same, we can return the operand.
     if (dest_ty.eql(inst_ty, zcu))
@@ -28876,7 +28806,6 @@ fn coerceExtra(
             return sema.coerceInMemory(val, dest_ty);
         }
         try sema.requireRuntimeBlock(block, inst_src, null);
-        try sema.queueFullTypeResolution(dest_ty);
         const new_val = try block.addBitCast(dest_ty, inst);
         try sema.checkKnownAllocPtr(block, inst, new_val);
         return new_val;
@@ -29172,7 +29101,7 @@ fn coerceExtra(
                     // empty tuple to zero-length slice
                     // note that this allows coercing to a mutable slice.
                     if (inst_child_ty.structFieldCount(zcu) == 0) {
-                        const align_val = try dest_ty.ptrAlignmentAdvanced(zcu, sema);
+                        const align_val = try dest_ty.ptrAlignmentAdvanced(zcu, .sema);
                         return Air.internedToRef(try zcu.intern(.{ .slice = .{
                             .ty = dest_ty.toIntern(),
                             .ptr = try zcu.intern(.{ .ptr = .{
@@ -29317,7 +29246,7 @@ fn coerceExtra(
                     }
                     break :int;
                 };
-                const result_val = try val.floatFromIntAdvanced(sema.arena, inst_ty, dest_ty, zcu, sema);
+                const result_val = try val.floatFromIntAdvanced(sema.arena, inst_ty, dest_ty, zcu, .sema);
                 // TODO implement this compile error
                 //const int_again_val = try result_val.intFromFloat(sema.arena, inst_ty);
                 //if (!int_again_val.eql(val, inst_ty, zcu)) {
@@ -30649,7 +30578,6 @@ fn storePtr2(
     }
 
     try sema.requireRuntimeBlock(block, src, runtime_src);
-    try sema.queueFullTypeResolution(elem_ty);
 
     if (ptr_ty.ptrInfo(mod).flags.vector_index == .runtime) {
         const ptr_inst = ptr.toIndex().?;
@@ -30871,10 +30799,10 @@ fn bitCast(
     operand_src: ?LazySrcLoc,
 ) CompileError!Air.Inst.Ref {
     const zcu = sema.mod;
-    try sema.resolveTypeLayout(dest_ty);
+    try dest_ty.resolveLayout(zcu);
 
     const old_ty = sema.typeOf(inst);
-    try sema.resolveTypeLayout(old_ty);
+    try old_ty.resolveLayout(zcu);
 
     const dest_bits = dest_ty.bitSize(zcu);
     const old_bits = old_ty.bitSize(zcu);
@@ -31056,7 +30984,7 @@ fn coerceEnumToUnion(
 
         const union_obj = mod.typeToUnion(union_ty).?;
         const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
-        try sema.resolveTypeFields(field_ty);
+        try field_ty.resolveFields(mod);
         if (field_ty.zigTypeTag(mod) == .NoReturn) {
             const msg = msg: {
                 const msg = try sema.errMsg(inst_src, "cannot initialize 'noreturn' field of union", .{});
@@ -31469,8 +31397,8 @@ fn coerceTupleToStruct(
 ) !Air.Inst.Ref {
     const mod = sema.mod;
     const ip = &mod.intern_pool;
-    try sema.resolveTypeFields(struct_ty);
-    try sema.resolveStructFieldInits(struct_ty);
+    try struct_ty.resolveFields(mod);
+    try struct_ty.resolveStructFieldInits(mod);
 
     if (struct_ty.isTupleOrAnonStruct(mod)) {
         return sema.coerceTupleToTuple(block, struct_ty, inst, inst_src);
@@ -31817,7 +31745,7 @@ fn analyzeDeclRefInner(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.Decl
     });
     // TODO: if this is a `decl_ref` of a non-variable decl, only depend on decl type
     try sema.declareDependency(.{ .decl_val = decl_index });
-    const ptr_ty = try sema.ptrType(.{
+    const ptr_ty = try mod.ptrTypeSema(.{
         .child = decl_val.typeOf(mod).toIntern(),
         .flags = .{
             .alignment = owner_decl.alignment,
@@ -31864,14 +31792,14 @@ fn analyzeRef(
 
     try sema.requireRuntimeBlock(block, src, null);
     const address_space = target_util.defaultAddressSpace(mod.getTarget(), .local);
-    const ptr_type = try sema.ptrType(.{
+    const ptr_type = try mod.ptrTypeSema(.{
         .child = operand_ty.toIntern(),
         .flags = .{
             .is_const = true,
             .address_space = address_space,
         },
     });
-    const mut_ptr_type = try sema.ptrType(.{
+    const mut_ptr_type = try mod.ptrTypeSema(.{
         .child = operand_ty.toIntern(),
         .flags = .{ .address_space = address_space },
     });
@@ -31979,7 +31907,7 @@ fn analyzeSliceLen(
         if (slice_val.isUndef(mod)) {
             return mod.undefRef(Type.usize);
         }
-        return mod.intRef(Type.usize, try slice_val.sliceLen(sema));
+        return mod.intRef(Type.usize, try slice_val.sliceLen(mod));
     }
     try sema.requireRuntimeBlock(block, src, null);
     return block.addTyOp(.slice_len, Type.usize, slice_inst);
@@ -32347,7 +32275,7 @@ fn analyzeSlice(
         assert(manyptr_ty_key.flags.size == .One);
         manyptr_ty_key.child = elem_ty.toIntern();
         manyptr_ty_key.flags.size = .Many;
-        break :ptr try sema.coerceCompatiblePtrs(block, try sema.ptrType(manyptr_ty_key), ptr_or_slice, ptr_src);
+        break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrTypeSema(manyptr_ty_key), ptr_or_slice, ptr_src);
     } else ptr_or_slice;
 
     const start = try sema.coerce(block, Type.usize, uncasted_start, start_src);
@@ -32416,7 +32344,7 @@ fn analyzeSlice(
                             return sema.fail(block, src, "slice of undefined", .{});
                         }
                         const has_sentinel = slice_ty.sentinel(mod) != null;
-                        const slice_len = try slice_val.sliceLen(sema);
+                        const slice_len = try slice_val.sliceLen(mod);
                         const len_plus_sent = slice_len + @intFromBool(has_sentinel);
                         const slice_len_val_with_sentinel = try mod.intValue(Type.usize, len_plus_sent);
                         if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, Type.usize))) {
@@ -32431,7 +32359,7 @@ fn analyzeSlice(
                                 "end index {} out of bounds for slice of length {d}{s}",
                                 .{
                                     end_val.fmtValue(mod, sema),
-                                    try slice_val.sliceLen(sema),
+                                    try slice_val.sliceLen(mod),
                                     sentinel_label,
                                 },
                             );
@@ -32504,7 +32432,7 @@ fn analyzeSlice(
 
                 const many_ptr_ty = try mod.manyConstPtrType(elem_ty);
                 const many_ptr_val = try mod.getCoerced(ptr_val, many_ptr_ty);
-                const elem_ptr = try many_ptr_val.ptrElem(sentinel_index, sema);
+                const elem_ptr = try many_ptr_val.ptrElem(sentinel_index, mod);
                 const res = try sema.pointerDerefExtra(block, src, elem_ptr);
                 const actual_sentinel = switch (res) {
                     .runtime_load => break :sentinel_check,
@@ -32567,9 +32495,9 @@ fn analyzeSlice(
     const new_allowzero = new_ptr_ty_info.flags.is_allowzero and sema.typeOf(ptr).ptrSize(mod) != .C;
 
     if (opt_new_len_val) |new_len_val| {
-        const new_len_int = try new_len_val.toUnsignedIntAdvanced(sema);
+        const new_len_int = try new_len_val.toUnsignedIntSema(mod);
 
-        const return_ty = try sema.ptrType(.{
+        const return_ty = try mod.ptrTypeSema(.{
             .child = (try mod.arrayType(.{
                 .len = new_len_int,
                 .sentinel = if (sentinel) |s| s.toIntern() else .none,
@@ -32631,7 +32559,7 @@ fn analyzeSlice(
         return sema.fail(block, src, "non-zero length slice of undefined pointer", .{});
     }
 
-    const return_ty = try sema.ptrType(.{
+    const return_ty = try mod.ptrTypeSema(.{
         .child = elem_ty.toIntern(),
         .sentinel = if (sentinel) |s| s.toIntern() else .none,
         .flags = .{
@@ -32659,7 +32587,7 @@ fn analyzeSlice(
             if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| {
                 // we don't need to add one for sentinels because the
                 // underlying value data includes the sentinel
-                break :blk try mod.intRef(Type.usize, try slice_val.sliceLen(sema));
+                break :blk try mod.intRef(Type.usize, try slice_val.sliceLen(mod));
             }
 
             const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice);
@@ -32751,7 +32679,7 @@ fn cmpNumeric(
                 if (lhs_val.isNan(mod) or rhs_val.isNan(mod)) {
                     return if (op == std.math.CompareOperator.neq) .bool_true else .bool_false;
                 }
-                return if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, mod, sema))
+                return if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, mod, .sema))
                     .bool_true
                 else
                     .bool_false;
@@ -32820,11 +32748,11 @@ fn cmpNumeric(
     // a signed integer with mantissa bits + 1, and if there was any non-integral part of the float,
     // add/subtract 1.
     const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val|
-        !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))
+        !(try lhs_val.compareAllWithZeroSema(.gte, mod))
     else
         (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt(mod));
     const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val|
-        !(try rhs_val.compareAllWithZeroAdvanced(.gte, sema))
+        !(try rhs_val.compareAllWithZeroSema(.gte, mod))
     else
         (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt(mod));
     const dest_int_is_signed = lhs_is_signed or rhs_is_signed;
@@ -32972,7 +32900,7 @@ fn compareIntsOnlyPossibleResult(
 ) Allocator.Error!?bool {
     const mod = sema.mod;
     const rhs_info = rhs_ty.intInfo(mod);
-    const vs_zero = lhs_val.orderAgainstZeroAdvanced(mod, sema) catch unreachable;
+    const vs_zero = lhs_val.orderAgainstZeroAdvanced(mod, .sema) catch unreachable;
     const is_zero = vs_zero == .eq;
     const is_negative = vs_zero == .lt;
     const is_positive = vs_zero == .gt;
@@ -33136,7 +33064,6 @@ fn wrapErrorUnionPayload(
         } })));
     }
     try sema.requireRuntimeBlock(block, inst_src, null);
-    try sema.queueFullTypeResolution(dest_payload_ty);
     return block.addTyOp(.wrap_errunion_payload, dest_ty, coerced);
 }
 
@@ -33939,7 +33866,7 @@ fn resolvePeerTypesInner(
 
                 opt_ptr_info = ptr_info;
             }
-            return .{ .success = try sema.ptrType(opt_ptr_info.?) };
+            return .{ .success = try mod.ptrTypeSema(opt_ptr_info.?) };
         },
 
         .ptr => {
@@ -34249,7 +34176,7 @@ fn resolvePeerTypesInner(
                 },
             }
 
-            return .{ .success = try sema.ptrType(opt_ptr_info.?) };
+            return .{ .success = try mod.ptrTypeSema(opt_ptr_info.?) };
         },
 
         .func => {
@@ -34606,7 +34533,7 @@ fn resolvePeerTypesInner(
                 var comptime_val: ?Value = null;
                 for (peer_tys) |opt_ty| {
                     const struct_ty = opt_ty orelse continue;
-                    try sema.resolveStructFieldInits(struct_ty);
+                    try struct_ty.resolveStructFieldInits(mod);
 
                     const uncoerced_field_val = try struct_ty.structFieldValueComptime(mod, field_index) orelse {
                         comptime_val = null;
@@ -34742,181 +34669,22 @@ pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void {
     const ip = &mod.intern_pool;
     const fn_ty_info = mod.typeToFunc(fn_ty).?;
 
-    try sema.resolveTypeFully(Type.fromInterned(fn_ty_info.return_type));
+    try Type.fromInterned(fn_ty_info.return_type).resolveFully(mod);
 
     if (mod.comp.config.any_error_tracing and
         Type.fromInterned(fn_ty_info.return_type).isError(mod))
     {
         // Ensure the type exists so that backends can assume that.
-        _ = try sema.getBuiltinType("StackTrace");
+        _ = try mod.getBuiltinType("StackTrace");
     }
 
     for (0..fn_ty_info.param_types.len) |i| {
-        try sema.resolveTypeFully(Type.fromInterned(fn_ty_info.param_types.get(ip)[i]));
+        try Type.fromInterned(fn_ty_info.param_types.get(ip)[i]).resolveFully(mod);
     }
 }
 
-/// Make it so that calling hash() and eql() on `val` will not assert due
-/// to a type not having its layout resolved.
 fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value {
-    const mod = sema.mod;
-    switch (mod.intern_pool.indexToKey(val.toIntern())) {
-        .int => |int| switch (int.storage) {
-            .u64, .i64, .big_int => return val,
-            .lazy_align, .lazy_size => return mod.intValue(
-                Type.fromInterned(int.ty),
-                (try val.getUnsignedIntAdvanced(mod, sema)).?,
-            ),
-        },
-        .slice => |slice| {
-            const ptr = try sema.resolveLazyValue(Value.fromInterned(slice.ptr));
-            const len = try sema.resolveLazyValue(Value.fromInterned(slice.len));
-            if (ptr.toIntern() == slice.ptr and len.toIntern() == slice.len) return val;
-            return Value.fromInterned(try mod.intern(.{ .slice = .{
-                .ty = slice.ty,
-                .ptr = ptr.toIntern(),
-                .len = len.toIntern(),
-            } }));
-        },
-        .ptr => |ptr| {
-            switch (ptr.base_addr) {
-                .decl, .comptime_alloc, .anon_decl, .int => return val,
-                .comptime_field => |field_val| {
-                    const resolved_field_val =
-                        (try sema.resolveLazyValue(Value.fromInterned(field_val))).toIntern();
-                    return if (resolved_field_val == field_val)
-                        val
-                    else
-                        Value.fromInterned((try mod.intern(.{ .ptr = .{
-                            .ty = ptr.ty,
-                            .base_addr = .{ .comptime_field = resolved_field_val },
-                            .byte_offset = ptr.byte_offset,
-                        } })));
-                },
-                .eu_payload, .opt_payload => |base| {
-                    const resolved_base = (try sema.resolveLazyValue(Value.fromInterned(base))).toIntern();
-                    return if (resolved_base == base)
-                        val
-                    else
-                        Value.fromInterned((try mod.intern(.{ .ptr = .{
-                            .ty = ptr.ty,
-                            .base_addr = switch (ptr.base_addr) {
-                                .eu_payload => .{ .eu_payload = resolved_base },
-                                .opt_payload => .{ .opt_payload = resolved_base },
-                                else => unreachable,
-                            },
-                            .byte_offset = ptr.byte_offset,
-                        } })));
-                },
-                .arr_elem, .field => |base_index| {
-                    const resolved_base = (try sema.resolveLazyValue(Value.fromInterned(base_index.base))).toIntern();
-                    return if (resolved_base == base_index.base)
-                        val
-                    else
-                        Value.fromInterned((try mod.intern(.{ .ptr = .{
-                            .ty = ptr.ty,
-                            .base_addr = switch (ptr.base_addr) {
-                                .arr_elem => .{ .arr_elem = .{
-                                    .base = resolved_base,
-                                    .index = base_index.index,
-                                } },
-                                .field => .{ .field = .{
-                                    .base = resolved_base,
-                                    .index = base_index.index,
-                                } },
-                                else => unreachable,
-                            },
-                            .byte_offset = ptr.byte_offset,
-                        } })));
-                },
-            }
-        },
-        .aggregate => |aggregate| switch (aggregate.storage) {
-            .bytes => return val,
-            .elems => |elems| {
-                var resolved_elems: []InternPool.Index = &.{};
-                for (elems, 0..) |elem, i| {
-                    const resolved_elem = (try sema.resolveLazyValue(Value.fromInterned(elem))).toIntern();
-                    if (resolved_elems.len == 0 and resolved_elem != elem) {
-                        resolved_elems = try sema.arena.alloc(InternPool.Index, elems.len);
-                        @memcpy(resolved_elems[0..i], elems[0..i]);
-                    }
-                    if (resolved_elems.len > 0) resolved_elems[i] = resolved_elem;
-                }
-                return if (resolved_elems.len == 0) val else Value.fromInterned((try mod.intern(.{ .aggregate = .{
-                    .ty = aggregate.ty,
-                    .storage = .{ .elems = resolved_elems },
-                } })));
-            },
-            .repeated_elem => |elem| {
-                const resolved_elem = (try sema.resolveLazyValue(Value.fromInterned(elem))).toIntern();
-                return if (resolved_elem == elem) val else Value.fromInterned((try mod.intern(.{ .aggregate = .{
-                    .ty = aggregate.ty,
-                    .storage = .{ .repeated_elem = resolved_elem },
-                } })));
-            },
-        },
-        .un => |un| {
-            const resolved_tag = if (un.tag == .none)
-                .none
-            else
-                (try sema.resolveLazyValue(Value.fromInterned(un.tag))).toIntern();
-            const resolved_val = (try sema.resolveLazyValue(Value.fromInterned(un.val))).toIntern();
-            return if (resolved_tag == un.tag and resolved_val == un.val)
-                val
-            else
-                Value.fromInterned((try mod.intern(.{ .un = .{
-                    .ty = un.ty,
-                    .tag = resolved_tag,
-                    .val = resolved_val,
-                } })));
-        },
-        else => return val,
-    }
-}
-
-pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void {
-    const mod = sema.mod;
-    switch (mod.intern_pool.indexToKey(ty.toIntern())) {
-        .simple_type => |simple_type| return sema.resolveSimpleType(simple_type),
-        else => {},
-    }
-    switch (ty.zigTypeTag(mod)) {
-        .Struct => return sema.resolveStructLayout(ty),
-        .Union => return sema.resolveUnionLayout(ty),
-        .Array => {
-            if (ty.arrayLenIncludingSentinel(mod) == 0) return;
-            const elem_ty = ty.childType(mod);
-            return sema.resolveTypeLayout(elem_ty);
-        },
-        .Optional => {
-            const payload_ty = ty.optionalChild(mod);
-            // In case of querying the ABI alignment of this optional, we will ask
-            // for hasRuntimeBits() of the payload type, so we need "requires comptime"
-            // to be known already before this function returns.
-            _ = try sema.typeRequiresComptime(payload_ty);
-            return sema.resolveTypeLayout(payload_ty);
-        },
-        .ErrorUnion => {
-            const payload_ty = ty.errorUnionPayload(mod);
-            return sema.resolveTypeLayout(payload_ty);
-        },
-        .Fn => {
-            const info = mod.typeToFunc(ty).?;
-            if (info.is_generic) {
-                // Resolving of generic function types is deferred to when
-                // the function is instantiated.
-                return;
-            }
-            const ip = &mod.intern_pool;
-            for (0..info.param_types.len) |i| {
-                const param_ty = info.param_types.get(ip)[i];
-                try sema.resolveTypeLayout(Type.fromInterned(param_ty));
-            }
-            try sema.resolveTypeLayout(Type.fromInterned(info.return_type));
-        },
-        else => {},
-    }
+    return val.resolveLazy(sema.arena, sema.mod);
 }
 
 /// Resolve a struct's alignment only without triggering resolution of its layout.
@@ -34925,11 +34693,13 @@ pub fn resolveStructAlignment(
     sema: *Sema,
     ty: InternPool.Index,
     struct_type: InternPool.LoadedStructType,
-) CompileError!Alignment {
+) SemaError!void {
     const mod = sema.mod;
     const ip = &mod.intern_pool;
     const target = mod.getTarget();
 
+    assert(sema.ownerUnit().unwrap().decl == struct_type.decl.unwrap().?);
+
     assert(struct_type.flagsPtr(ip).alignment == .none);
     assert(struct_type.layout != .@"packed");
 
@@ -34940,7 +34710,7 @@ pub fn resolveStructAlignment(
         struct_type.flagsPtr(ip).assumed_pointer_aligned = true;
         const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
         struct_type.flagsPtr(ip).alignment = result;
-        return result;
+        return;
     }
 
     try sema.resolveTypeFieldsStruct(ty, struct_type);
@@ -34952,7 +34722,7 @@ pub fn resolveStructAlignment(
         struct_type.flagsPtr(ip).assumed_pointer_aligned = true;
         const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
         struct_type.flagsPtr(ip).alignment = result;
-        return result;
+        return;
     }
     defer struct_type.clearAlignmentWip(ip);
 
@@ -34962,30 +34732,35 @@ pub fn resolveStructAlignment(
         const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
         if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty))
             continue;
-        const field_align = try sema.structFieldAlignment(
+        const field_align = try mod.structFieldAlignmentAdvanced(
             struct_type.fieldAlign(ip, i),
             field_ty,
             struct_type.layout,
+            .sema,
         );
         result = result.maxStrict(field_align);
     }
 
     struct_type.flagsPtr(ip).alignment = result;
-    return result;
 }
 
-fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
+pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
     const zcu = sema.mod;
     const ip = &zcu.intern_pool;
     const struct_type = zcu.typeToStruct(ty) orelse return;
 
+    assert(sema.ownerUnit().unwrap().decl == struct_type.decl.unwrap().?);
+
     if (struct_type.haveLayout(ip))
         return;
 
-    try sema.resolveTypeFields(ty);
+    try ty.resolveFields(zcu);
 
     if (struct_type.layout == .@"packed") {
-        try semaBackingIntType(zcu, struct_type);
+        semaBackingIntType(zcu, struct_type) catch |err| switch (err) {
+            error.OutOfMemory, error.AnalysisFail => |e| return e,
+            error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable,
+        };
         return;
     }
 
@@ -35021,10 +34796,11 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
             },
             else => return err,
         };
-        field_align.* = try sema.structFieldAlignment(
+        field_align.* = try zcu.structFieldAlignmentAdvanced(
             struct_type.fieldAlign(ip, i),
             field_ty,
             struct_type.layout,
+            .sema,
         );
         big_align = big_align.maxStrict(field_align.*);
     }
@@ -35160,7 +34936,7 @@ fn semaBackingIntType(mod: *Module, struct_type: InternPool.LoadedStructType) Co
         var accumulator: u64 = 0;
         for (0..struct_type.field_types.len) |i| {
             const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
-            accumulator += try field_ty.bitSizeAdvanced(mod, &sema);
+            accumulator += try field_ty.bitSizeAdvanced(mod, .sema);
         }
         break :blk accumulator;
     };
@@ -35270,11 +35046,13 @@ pub fn resolveUnionAlignment(
     sema: *Sema,
     ty: Type,
     union_type: InternPool.LoadedUnionType,
-) CompileError!Alignment {
+) SemaError!void {
     const mod = sema.mod;
     const ip = &mod.intern_pool;
     const target = mod.getTarget();
 
+    assert(sema.ownerUnit().unwrap().decl == union_type.decl);
+
     assert(!union_type.haveLayout(ip));
 
     if (union_type.flagsPtr(ip).status == .field_types_wip) {
@@ -35284,7 +35062,7 @@ pub fn resolveUnionAlignment(
         union_type.flagsPtr(ip).assumed_pointer_aligned = true;
         const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
         union_type.flagsPtr(ip).alignment = result;
-        return result;
+        return;
     }
 
     try sema.resolveTypeFieldsUnion(ty, union_type);
@@ -35304,11 +35082,10 @@ pub fn resolveUnionAlignment(
     }
 
     union_type.flagsPtr(ip).alignment = max_align;
-    return max_align;
 }
 
 /// This logic must be kept in sync with `Module.getUnionLayout`.
-fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
+pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
     const zcu = sema.mod;
     const ip = &zcu.intern_pool;
 
@@ -35317,6 +35094,8 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
     // Load again, since the tag type might have changed due to resolution.
     const union_type = ip.loadUnionType(ty.ip_index);
 
+    assert(sema.ownerUnit().unwrap().decl == union_type.decl);
+
     switch (union_type.flagsPtr(ip).status) {
         .none, .have_field_types => {},
         .field_types_wip, .layout_wip => {
@@ -35425,53 +35204,15 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
 
 /// Returns `error.AnalysisFail` if any of the types (recursively) failed to
 /// be resolved.
-pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void {
-    const mod = sema.mod;
-    const ip = &mod.intern_pool;
-    switch (ty.zigTypeTag(mod)) {
-        .Pointer => {
-            return sema.resolveTypeFully(ty.childType(mod));
-        },
-        .Struct => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
-            .struct_type => try sema.resolveStructFully(ty),
-            .anon_struct_type => |tuple| {
-                for (tuple.types.get(ip)) |field_ty| {
-                    try sema.resolveTypeFully(Type.fromInterned(field_ty));
-                }
-            },
-            .simple_type => |simple_type| try sema.resolveSimpleType(simple_type),
-            else => {},
-        },
-        .Union => return sema.resolveUnionFully(ty),
-        .Array => return sema.resolveTypeFully(ty.childType(mod)),
-        .Optional => {
-            return sema.resolveTypeFully(ty.optionalChild(mod));
-        },
-        .ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload(mod)),
-        .Fn => {
-            const info = mod.typeToFunc(ty).?;
-            if (info.is_generic) {
-                // Resolving of generic function types is deferred to when
-                // the function is instantiated.
-                return;
-            }
-            for (0..info.param_types.len) |i| {
-                const param_ty = info.param_types.get(ip)[i];
-                try sema.resolveTypeFully(Type.fromInterned(param_ty));
-            }
-            try sema.resolveTypeFully(Type.fromInterned(info.return_type));
-        },
-        else => {},
-    }
-}
-
-fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void {
+pub fn resolveStructFully(sema: *Sema, ty: Type) SemaError!void {
     try sema.resolveStructLayout(ty);
 
     const mod = sema.mod;
     const ip = &mod.intern_pool;
     const struct_type = mod.typeToStruct(ty).?;
 
+    assert(sema.ownerUnit().unwrap().decl == struct_type.decl.unwrap().?);
+
     if (struct_type.setFullyResolved(ip)) return;
     errdefer struct_type.clearFullyResolved(ip);
 
@@ -35481,16 +35222,19 @@ fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void {
 
     for (0..struct_type.field_types.len) |i| {
         const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
-        try sema.resolveTypeFully(field_ty);
+        try field_ty.resolveFully(mod);
     }
 }
 
-fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void {
+pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void {
     try sema.resolveUnionLayout(ty);
 
     const mod = sema.mod;
     const ip = &mod.intern_pool;
     const union_obj = mod.typeToUnion(ty).?;
+
+    assert(sema.ownerUnit().unwrap().decl == union_obj.decl);
+
     switch (union_obj.flagsPtr(ip).status) {
         .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {},
         .fully_resolved_wip, .fully_resolved => return,
@@ -35506,7 +35250,7 @@ fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void {
         union_obj.flagsPtr(ip).status = .fully_resolved_wip;
         for (0..union_obj.field_types.len) |field_index| {
             const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
-            try sema.resolveTypeFully(field_ty);
+            try field_ty.resolveFully(mod);
         }
         union_obj.flagsPtr(ip).status = .fully_resolved;
     }
@@ -35515,135 +35259,18 @@ fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void {
     _ = try sema.typeRequiresComptime(ty);
 }
 
-pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!void {
-    const mod = sema.mod;
-    const ip = &mod.intern_pool;
-    const ty_ip = ty.toIntern();
-
-    switch (ty_ip) {
-        .none => unreachable,
-
-        .u0_type,
-        .i0_type,
-        .u1_type,
-        .u8_type,
-        .i8_type,
-        .u16_type,
-        .i16_type,
-        .u29_type,
-        .u32_type,
-        .i32_type,
-        .u64_type,
-        .i64_type,
-        .u80_type,
-        .u128_type,
-        .i128_type,
-        .usize_type,
-        .isize_type,
-        .c_char_type,
-        .c_short_type,
-        .c_ushort_type,
-        .c_int_type,
-        .c_uint_type,
-        .c_long_type,
-        .c_ulong_type,
-        .c_longlong_type,
-        .c_ulonglong_type,
-        .c_longdouble_type,
-        .f16_type,
-        .f32_type,
-        .f64_type,
-        .f80_type,
-        .f128_type,
-        .anyopaque_type,
-        .bool_type,
-        .void_type,
-        .type_type,
-        .anyerror_type,
-        .adhoc_inferred_error_set_type,
-        .comptime_int_type,
-        .comptime_float_type,
-        .noreturn_type,
-        .anyframe_type,
-        .null_type,
-        .undefined_type,
-        .enum_literal_type,
-        .manyptr_u8_type,
-        .manyptr_const_u8_type,
-        .manyptr_const_u8_sentinel_0_type,
-        .single_const_pointer_to_comptime_int_type,
-        .slice_const_u8_type,
-        .slice_const_u8_sentinel_0_type,
-        .optional_noreturn_type,
-        .anyerror_void_error_union_type,
-        .generic_poison_type,
-        .empty_struct_type,
-        => {},
-
-        .undef => unreachable,
-        .zero => unreachable,
-        .zero_usize => unreachable,
-        .zero_u8 => unreachable,
-        .one => unreachable,
-        .one_usize => unreachable,
-        .one_u8 => unreachable,
-        .four_u8 => unreachable,
-        .negative_one => unreachable,
-        .calling_convention_c => unreachable,
-        .calling_convention_inline => unreachable,
-        .void_value => unreachable,
-        .unreachable_value => unreachable,
-        .null_value => unreachable,
-        .bool_true => unreachable,
-        .bool_false => unreachable,
-        .empty_struct => unreachable,
-        .generic_poison => unreachable,
-
-        else => switch (ip.items.items(.tag)[@intFromEnum(ty_ip)]) {
-            .type_struct,
-            .type_struct_packed,
-            .type_struct_packed_inits,
-            => try sema.resolveTypeFieldsStruct(ty_ip, ip.loadStructType(ty_ip)),
-
-            .type_union => try sema.resolveTypeFieldsUnion(Type.fromInterned(ty_ip), ip.loadUnionType(ty_ip)),
-            .simple_type => try sema.resolveSimpleType(ip.indexToKey(ty_ip).simple_type),
-            else => {},
-        },
-    }
-}
-
-/// Fully resolves a simple type. This is usually a nop, but for builtin types with
-/// special InternPool indices (such as std.builtin.Type) it will analyze and fully
-/// resolve the container type.
-fn resolveSimpleType(sema: *Sema, simple_type: InternPool.SimpleType) CompileError!void {
-    const builtin_type_name: []const u8 = switch (simple_type) {
-        .atomic_order => "AtomicOrder",
-        .atomic_rmw_op => "AtomicRmwOp",
-        .calling_convention => "CallingConvention",
-        .address_space => "AddressSpace",
-        .float_mode => "FloatMode",
-        .reduce_op => "ReduceOp",
-        .call_modifier => "CallModifer",
-        .prefetch_options => "PrefetchOptions",
-        .export_options => "ExportOptions",
-        .extern_options => "ExternOptions",
-        .type_info => "Type",
-        else => return,
-    };
-    // This will fully resolve the type.
-    _ = try sema.getBuiltinType(builtin_type_name);
-}
-
 pub fn resolveTypeFieldsStruct(
     sema: *Sema,
     ty: InternPool.Index,
     struct_type: InternPool.LoadedStructType,
-) CompileError!void {
+) SemaError!void {
     const zcu = sema.mod;
     const ip = &zcu.intern_pool;
     // If there is no owner decl it means the struct has no fields.
     const owner_decl = struct_type.decl.unwrap() orelse return;
 
+    assert(sema.ownerUnit().unwrap().decl == owner_decl);
+
     switch (zcu.declPtr(owner_decl).analysis) {
         .file_failure,
         .dependency_failure,
@@ -35674,16 +35301,19 @@ pub fn resolveTypeFieldsStruct(
             }
             return error.AnalysisFail;
         },
-        else => |e| return e,
+        error.OutOfMemory => return error.OutOfMemory,
+        error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable,
     };
 }
 
-pub fn resolveStructFieldInits(sema: *Sema, ty: Type) CompileError!void {
+pub fn resolveStructFieldInits(sema: *Sema, ty: Type) SemaError!void {
     const zcu = sema.mod;
     const ip = &zcu.intern_pool;
     const struct_type = zcu.typeToStruct(ty) orelse return;
     const owner_decl = struct_type.decl.unwrap() orelse return;
 
+    assert(sema.ownerUnit().unwrap().decl == owner_decl);
+
     // Inits can start as resolved
     if (struct_type.haveFieldInits(ip)) return;
 
@@ -35706,15 +35336,19 @@ pub fn resolveStructFieldInits(sema: *Sema, ty: Type) CompileError!void {
             }
             return error.AnalysisFail;
         },
-        else => |e| return e,
+        error.OutOfMemory => return error.OutOfMemory,
+        error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable,
     };
     struct_type.setHaveFieldInits(ip);
 }
 
-pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.LoadedUnionType) CompileError!void {
+pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.LoadedUnionType) SemaError!void {
     const zcu = sema.mod;
     const ip = &zcu.intern_pool;
     const owner_decl = zcu.declPtr(union_type.decl);
+
+    assert(sema.ownerUnit().unwrap().decl == union_type.decl);
+
     switch (owner_decl.analysis) {
         .file_failure,
         .dependency_failure,
@@ -35752,7 +35386,8 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Load
             }
             return error.AnalysisFail;
         },
-        else => |e| return e,
+        error.OutOfMemory => return error.OutOfMemory,
+        error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable,
     };
     union_type.flagsPtr(ip).status = .have_field_types;
 }
@@ -36801,106 +36436,6 @@ fn generateUnionTagTypeSimple(
     return enum_ty;
 }
 
-fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref {
-    const zcu = sema.mod;
-
-    var block: Block = .{
-        .parent = null,
-        .sema = sema,
-        .namespace = sema.owner_decl.src_namespace,
-        .instructions = .{},
-        .inlining = null,
-        .is_comptime = true,
-        .src_base_inst = sema.owner_decl.zir_decl_index.unwrap() orelse owner: {
-            assert(sema.owner_decl.has_tv);
-            assert(sema.owner_decl.owns_tv);
-            switch (sema.owner_decl.typeOf(zcu).zigTypeTag(zcu)) {
-                .Type => break :owner sema.owner_decl.val.toType().typeDeclInst(zcu).?,
-                .Fn => {
-                    const owner = zcu.funcInfo(sema.owner_decl.val.toIntern()).generic_owner;
-                    const generic_owner_decl = zcu.declPtr(zcu.funcInfo(owner).owner_decl);
-                    break :owner generic_owner_decl.zir_decl_index.unwrap().?;
-                },
-                else => unreachable,
-            }
-        },
-        .type_name_ctx = sema.owner_decl.name,
-    };
-    defer block.instructions.deinit(sema.gpa);
-
-    const src = block.nodeOffset(0);
-
-    const decl_index = try getBuiltinDecl(sema, &block, name);
-    return sema.analyzeDeclVal(&block, src, decl_index);
-}
-
-fn getBuiltinDecl(sema: *Sema, block: *Block, name: []const u8) CompileError!InternPool.DeclIndex {
-    const gpa = sema.gpa;
-
-    const src = block.nodeOffset(0);
-
-    const mod = sema.mod;
-    const ip = &mod.intern_pool;
-    const std_mod = mod.std_mod;
-    const std_file = (mod.importPkg(std_mod) catch unreachable).file;
-    const opt_builtin_inst = (try sema.namespaceLookupRef(
-        block,
-        src,
-        mod.declPtr(std_file.root_decl.unwrap().?).src_namespace.toOptional(),
-        try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls),
-    )) orelse @panic("lib/std.zig is corrupt and missing 'builtin'");
-    const builtin_inst = try sema.analyzeLoad(block, src, opt_builtin_inst, src);
-    const builtin_ty = sema.analyzeAsType(block, src, builtin_inst) catch |err| switch (err) {
-        error.AnalysisFail => std.debug.panic("std.builtin is corrupt", .{}),
-        else => |e| return e,
-    };
-    const decl_index = (try sema.namespaceLookup(
-        block,
-        src,
-        builtin_ty.getNamespaceIndex(mod),
-        try ip.getOrPutString(gpa, name, .no_embedded_nulls),
-    )) orelse std.debug.panic("lib/std/builtin.zig is corrupt and missing '{s}'", .{name});
-    return decl_index;
-}
-
-fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type {
-    const zcu = sema.mod;
-    const ty_inst = try sema.getBuiltin(name);
-
-    var block: Block = .{
-        .parent = null,
-        .sema = sema,
-        .namespace = sema.owner_decl.src_namespace,
-        .instructions = .{},
-        .inlining = null,
-        .is_comptime = true,
-        .src_base_inst = sema.owner_decl.zir_decl_index.unwrap() orelse owner: {
-            assert(sema.owner_decl.has_tv);
-            assert(sema.owner_decl.owns_tv);
-            switch (sema.owner_decl.typeOf(zcu).zigTypeTag(zcu)) {
-                .Type => break :owner sema.owner_decl.val.toType().typeDeclInst(zcu).?,
-                .Fn => {
-                    const owner = zcu.funcInfo(sema.owner_decl.val.toIntern()).generic_owner;
-                    const generic_owner_decl = zcu.declPtr(zcu.funcInfo(owner).owner_decl);
-                    break :owner generic_owner_decl.zir_decl_index.unwrap().?;
-                },
-                else => unreachable,
-            }
-        },
-        .type_name_ctx = sema.owner_decl.name,
-    };
-    defer block.instructions.deinit(sema.gpa);
-
-    const src = block.nodeOffset(0);
-
-    const result_ty = sema.analyzeAsType(&block, src, ty_inst) catch |err| switch (err) {
-        error.AnalysisFail => std.debug.panic("std.builtin.{s} is corrupt", .{name}),
-        else => |e| return e,
-    };
-    try sema.resolveTypeFully(result_ty); // Should not fail
-    return result_ty;
-}
-
 /// There is another implementation of this in `Type.onePossibleValue`. This one
 /// in `Sema` is for calling during semantic analysis, and performs field resolution
 /// to get the answer. The one in `Type` is for calling during codegen and asserts
@@ -37104,8 +36639,11 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
                 },
 
                 .struct_type => {
+                    // Resolving the layout first helps to avoid loops.
+                    // If the type has a coherent layout, we can recurse through fields safely.
+                    try ty.resolveLayout(zcu);
+
                     const struct_type = ip.loadStructType(ty.toIntern());
-                    try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type);
 
                     if (struct_type.field_types.len == 0) {
                         // In this case the struct has no fields at all and
@@ -37122,20 +36660,11 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
                     );
                     for (field_vals, 0..) |*field_val, i| {
                         if (struct_type.fieldIsComptime(ip, i)) {
-                            try sema.resolveStructFieldInits(ty);
+                            try ty.resolveStructFieldInits(zcu);
                             field_val.* = struct_type.field_inits.get(ip)[i];
                             continue;
                         }
                         const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
-                        if (field_ty.eql(ty, zcu)) {
-                            const msg = try sema.errMsg(
-                                ty.srcLoc(zcu),
-                                "struct '{}' depends on itself",
-                                .{ty.fmt(zcu)},
-                            );
-                            try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{});
-                            return sema.failWithOwnedErrorMsg(null, msg);
-                        }
                         if (try sema.typeHasOnePossibleValue(field_ty)) |field_opv| {
                             field_val.* = field_opv.toIntern();
                         } else return null;
@@ -37163,8 +36692,11 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
                 },
 
                 .union_type => {
+                    // Resolving the layout first helps to avoid loops.
+                    // If the type has a coherent layout, we can recurse through fields safely.
+                    try ty.resolveLayout(zcu);
+
                     const union_obj = ip.loadUnionType(ty.toIntern());
-                    try sema.resolveTypeFieldsUnion(ty, union_obj);
                     const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.tagTypePtr(ip).*))) orelse
                         return null;
                     if (union_obj.field_types.len == 0) {
@@ -37172,15 +36704,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
                         return Value.fromInterned(only);
                     }
                     const only_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
-                    if (only_field_ty.eql(ty, zcu)) {
-                        const msg = try sema.errMsg(
-                            ty.srcLoc(zcu),
-                            "union '{}' depends on itself",
-                            .{ty.fmt(zcu)},
-                        );
-                        try sema.addFieldErrNote(ty, 0, msg, "while checking this field", .{});
-                        return sema.failWithOwnedErrorMsg(null, msg);
-                    }
                     const val_val = (try sema.typeHasOnePossibleValue(only_field_ty)) orelse
                         return null;
                     const only = try zcu.intern(.{ .un = .{
@@ -37298,7 +36821,7 @@ fn analyzeComptimeAlloc(
     // Needed to make an anon decl with type `var_type` (the `finish()` call below).
     _ = try sema.typeHasOnePossibleValue(var_type);
 
-    const ptr_type = try sema.ptrType(.{
+    const ptr_type = try mod.ptrTypeSema(.{
         .child = var_type.toIntern(),
         .flags = .{
             .alignment = alignment,
@@ -37485,64 +37008,28 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type {
 
 /// `generic_poison` will return false.
 /// May return false negatives when structs and unions are having their field types resolved.
-pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
-    return ty.comptimeOnlyAdvanced(sema.mod, sema);
+pub fn typeRequiresComptime(sema: *Sema, ty: Type) SemaError!bool {
+    return ty.comptimeOnlyAdvanced(sema.mod, .sema);
 }
 
-pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool {
-    const mod = sema.mod;
-    return ty.hasRuntimeBitsAdvanced(mod, false, .{ .sema = sema }) catch |err| switch (err) {
+pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) SemaError!bool {
+    return ty.hasRuntimeBitsAdvanced(sema.mod, false, .sema) catch |err| switch (err) {
         error.NeedLazy => unreachable,
         else => |e| return e,
     };
 }
 
-pub fn typeAbiSize(sema: *Sema, ty: Type) !u64 {
-    try sema.resolveTypeLayout(ty);
+pub fn typeAbiSize(sema: *Sema, ty: Type) SemaError!u64 {
+    try ty.resolveLayout(sema.mod);
     return ty.abiSize(sema.mod);
 }
 
-pub fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!Alignment {
-    return (try ty.abiAlignmentAdvanced(sema.mod, .{ .sema = sema })).scalar;
-}
-
-/// Not valid to call for packed unions.
-/// Keep implementation in sync with `Module.unionFieldNormalAlignment`.
-pub fn unionFieldAlignment(sema: *Sema, u: InternPool.LoadedUnionType, field_index: u32) !Alignment {
-    const mod = sema.mod;
-    const ip = &mod.intern_pool;
-    const field_align = u.fieldAlign(ip, field_index);
-    if (field_align != .none) return field_align;
-    const field_ty = Type.fromInterned(u.field_types.get(ip)[field_index]);
-    if (field_ty.isNoReturn(sema.mod)) return .none;
-    return sema.typeAbiAlignment(field_ty);
-}
-
-/// Keep implementation in sync with `Module.structFieldAlignment`.
-pub fn structFieldAlignment(
-    sema: *Sema,
-    explicit_alignment: InternPool.Alignment,
-    field_ty: Type,
-    layout: std.builtin.Type.ContainerLayout,
-) !Alignment {
-    if (explicit_alignment != .none)
-        return explicit_alignment;
-    const mod = sema.mod;
-    switch (layout) {
-        .@"packed" => return .none,
-        .auto => if (mod.getTarget().ofmt != .c) return sema.typeAbiAlignment(field_ty),
-        .@"extern" => {},
-    }
-    // extern
-    const ty_abi_align = try sema.typeAbiAlignment(field_ty);
-    if (field_ty.isAbiInt(mod) and field_ty.intInfo(mod).bits >= 128) {
-        return ty_abi_align.maxStrict(.@"16");
-    }
-    return ty_abi_align;
+pub fn typeAbiAlignment(sema: *Sema, ty: Type) SemaError!Alignment {
+    return (try ty.abiAlignmentAdvanced(sema.mod, .sema)).scalar;
 }
 
 pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool {
-    return ty.fnHasRuntimeBitsAdvanced(sema.mod, sema);
+    return ty.fnHasRuntimeBitsAdvanced(sema.mod, .sema);
 }
 
 fn unionFieldIndex(
@@ -37554,7 +37041,7 @@ fn unionFieldIndex(
 ) !u32 {
     const mod = sema.mod;
     const ip = &mod.intern_pool;
-    try sema.resolveTypeFields(union_ty);
+    try union_ty.resolveFields(mod);
     const union_obj = mod.typeToUnion(union_ty).?;
     const field_index = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse
         return sema.failWithBadUnionFieldAccess(block, union_ty, union_obj, field_src, field_name);
@@ -37570,7 +37057,7 @@ fn structFieldIndex(
 ) !u32 {
     const mod = sema.mod;
     const ip = &mod.intern_pool;
-    try sema.resolveTypeFields(struct_ty);
+    try struct_ty.resolveFields(mod);
     if (struct_ty.isAnonStruct(mod)) {
         return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src);
     } else {
@@ -37601,10 +37088,6 @@ fn anonStructFieldIndex(
     });
 }
 
-fn queueFullTypeResolution(sema: *Sema, ty: Type) !void {
-    try sema.types_to_resolve.put(sema.gpa, ty.toIntern(), {});
-}
-
 /// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting
 /// overflow_idx to the vector index the overflow was at (or 0 for a scalar).
 fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) !Value {
@@ -37662,8 +37145,8 @@ fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value {
     // resorting to BigInt first.
     var lhs_space: Value.BigIntSpace = undefined;
     var rhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
-    const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
+    const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema);
+    const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema);
     const limbs = try sema.arena.alloc(
         std.math.big.Limb,
         @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
@@ -37752,8 +37235,8 @@ fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value {
     // resorting to BigInt first.
     var lhs_space: Value.BigIntSpace = undefined;
     var rhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
-    const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
+    const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema);
+    const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema);
     const limbs = try sema.arena.alloc(
         std.math.big.Limb,
         @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
@@ -37836,8 +37319,8 @@ fn intSubWithOverflowScalar(
 
     var lhs_space: Value.BigIntSpace = undefined;
     var rhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
-    const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
+    const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema);
+    const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema);
     const limbs = try sema.arena.alloc(
         std.math.big.Limb,
         std.math.big.int.calcTwosCompLimbCount(info.bits),
@@ -38024,7 +37507,7 @@ fn intFitsInType(
 
 fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool {
     const mod = sema.mod;
-    if (!(try int_val.compareAllWithZeroAdvanced(.gte, sema))) return false;
+    if (!(try int_val.compareAllWithZeroSema(.gte, mod))) return false;
     const end_val = try mod.intValue(tag_ty, end);
     if (!(try sema.compareAll(int_val, .lt, end_val, tag_ty))) return false;
     return true;
@@ -38094,8 +37577,8 @@ fn intAddWithOverflowScalar(
 
     var lhs_space: Value.BigIntSpace = undefined;
     var rhs_space: Value.BigIntSpace = undefined;
-    const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
-    const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
+    const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema);
+    const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema);
     const limbs = try sema.arena.alloc(
         std.math.big.Limb,
         std.math.big.int.calcTwosCompLimbCount(info.bits),
@@ -38149,7 +37632,7 @@ fn compareScalar(
     switch (op) {
         .eq => return sema.valuesEqual(coerced_lhs, coerced_rhs, ty),
         .neq => return !(try sema.valuesEqual(coerced_lhs, coerced_rhs, ty)),
-        else => return Value.compareHeteroAdvanced(coerced_lhs, op, coerced_rhs, mod, sema),
+        else => return Value.compareHeteroAdvanced(coerced_lhs, op, coerced_rhs, mod, .sema),
     }
 }
 
@@ -38185,80 +37668,6 @@ fn compareVector(
     } })));
 }
 
-/// Returns the type of a pointer to an element.
-/// Asserts that the type is a pointer, and that the element type is indexable.
-/// If the element index is comptime-known, it must be passed in `offset`.
-/// For *@Vector(n, T), return *align(a:b:h:v) T
-/// For *[N]T, return *T
-/// For [*]T, returns *T
-/// For []T, returns *T
-/// Handles const-ness and address spaces in particular.
-/// This code is duplicated in `analyzePtrArithmetic`.
-pub fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
-    const mod = sema.mod;
-    const ptr_info = ptr_ty.ptrInfo(mod);
-    const elem_ty = ptr_ty.elemType2(mod);
-    const is_allowzero = ptr_info.flags.is_allowzero and (offset orelse 0) == 0;
-    const parent_ty = ptr_ty.childType(mod);
-
-    const VI = InternPool.Key.PtrType.VectorIndex;
-
-    const vector_info: struct {
-        host_size: u16 = 0,
-        alignment: Alignment = .none,
-        vector_index: VI = .none,
-    } = if (parent_ty.isVector(mod) and ptr_info.flags.size == .One) blk: {
-        const elem_bits = elem_ty.bitSize(mod);
-        if (elem_bits == 0) break :blk .{};
-        const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits);
-        if (!is_packed) break :blk .{};
-
-        break :blk .{
-            .host_size = @intCast(parent_ty.arrayLen(mod)),
-            .alignment = parent_ty.abiAlignment(mod),
-            .vector_index = if (offset) |some| @enumFromInt(some) else .runtime,
-        };
-    } else .{};
-
-    const alignment: Alignment = a: {
-        // Calculate the new pointer alignment.
-        if (ptr_info.flags.alignment == .none) {
-            // In case of an ABI-aligned pointer, any pointer arithmetic
-            // maintains the same ABI-alignedness.
-            break :a vector_info.alignment;
-        }
-        // If the addend is not a comptime-known value we can still count on
-        // it being a multiple of the type size.
-        const elem_size = try sema.typeAbiSize(elem_ty);
-        const addend = if (offset) |off| elem_size * off else elem_size;
-
-        // The resulting pointer is aligned to the lcd between the offset (an
-        // arbitrary number) and the alignment factor (always a power of two,
-        // non zero).
-        const new_align: Alignment = @enumFromInt(@min(
-            @ctz(addend),
-            ptr_info.flags.alignment.toLog2Units(),
-        ));
-        assert(new_align != .none);
-        break :a new_align;
-    };
-    return sema.ptrType(.{
-        .child = elem_ty.toIntern(),
-        .flags = .{
-            .alignment = alignment,
-            .is_const = ptr_info.flags.is_const,
-            .is_volatile = ptr_info.flags.is_volatile,
-            .is_allowzero = is_allowzero,
-            .address_space = ptr_info.flags.address_space,
-            .vector_index = vector_info.vector_index,
-        },
-        .packed_offset = .{
-            .host_size = vector_info.host_size,
-            .bit_offset = 0,
-        },
-    });
-}
-
 /// Merge lhs with rhs.
 /// Asserts that lhs and rhs are both error sets and are resolved.
 fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type {
@@ -38299,13 +37708,6 @@ fn isKnownZigType(sema: *Sema, ref: Air.Inst.Ref, tag: std.builtin.TypeId) bool
     return sema.typeOf(ref).zigTypeTag(sema.mod) == tag;
 }
 
-pub fn ptrType(sema: *Sema, info: InternPool.Key.PtrType) CompileError!Type {
-    if (info.flags.alignment != .none) {
-        _ = try sema.typeAbiAlignment(Type.fromInterned(info.child));
-    }
-    return sema.mod.ptrType(info);
-}
-
 pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void {
     if (!sema.mod.comp.debug_incremental) return;
 
@@ -38425,12 +37827,12 @@ fn maybeDerefSliceAsArray(
         else => unreachable,
     };
     const elem_ty = Type.fromInterned(slice.ty).childType(zcu);
-    const len = try Value.fromInterned(slice.len).toUnsignedIntAdvanced(sema);
+    const len = try Value.fromInterned(slice.len).toUnsignedIntSema(zcu);
     const array_ty = try zcu.arrayType(.{
         .child = elem_ty.toIntern(),
         .len = len,
     });
-    const ptr_ty = try sema.ptrType(p: {
+    const ptr_ty = try zcu.ptrTypeSema(p: {
         var p = Type.fromInterned(slice.ty).ptrInfo(zcu);
         p.flags.size = .One;
         p.child = array_ty.toIntern();
src/Type.zig
@@ -5,6 +5,7 @@
 
 const std = @import("std");
 const builtin = @import("builtin");
+const Allocator = std.mem.Allocator;
 const Value = @import("Value.zig");
 const assert = std.debug.assert;
 const Target = std.Target;
@@ -18,6 +19,7 @@ const InternPool = @import("InternPool.zig");
 const Alignment = InternPool.Alignment;
 const Zir = std.zig.Zir;
 const Type = @This();
+const SemaError = Zcu.SemaError;
 
 ip_index: InternPool.Index,
 
@@ -458,7 +460,7 @@ pub fn toValue(self: Type) Value {
     return Value.fromInterned(self.toIntern());
 }
 
-const RuntimeBitsError = Module.CompileError || error{NeedLazy};
+const RuntimeBitsError = SemaError || error{NeedLazy};
 
 /// true if and only if the type takes up space in memory at runtime.
 /// There are two reasons a type will return false:
@@ -475,7 +477,7 @@ pub fn hasRuntimeBitsAdvanced(
     ty: Type,
     mod: *Module,
     ignore_comptime_only: bool,
-    strat: AbiAlignmentAdvancedStrat,
+    strat: ResolveStratLazy,
 ) RuntimeBitsError!bool {
     const ip = &mod.intern_pool;
     return switch (ty.toIntern()) {
@@ -488,8 +490,8 @@ pub fn hasRuntimeBitsAdvanced(
                 // to comptime-only types do not, with the exception of function pointers.
                 if (ignore_comptime_only) return true;
                 return switch (strat) {
-                    .sema => |sema| !(try sema.typeRequiresComptime(ty)),
-                    .eager => !comptimeOnly(ty, mod),
+                    .sema => !try ty.comptimeOnlyAdvanced(mod, .sema),
+                    .eager => !ty.comptimeOnly(mod),
                     .lazy => error.NeedLazy,
                 };
             },
@@ -506,8 +508,8 @@ pub fn hasRuntimeBitsAdvanced(
                 }
                 if (ignore_comptime_only) return true;
                 return switch (strat) {
-                    .sema => |sema| !(try sema.typeRequiresComptime(child_ty)),
-                    .eager => !comptimeOnly(child_ty, mod),
+                    .sema => !try child_ty.comptimeOnlyAdvanced(mod, .sema),
+                    .eager => !child_ty.comptimeOnly(mod),
                     .lazy => error.NeedLazy,
                 };
             },
@@ -578,7 +580,7 @@ pub fn hasRuntimeBitsAdvanced(
                     return true;
                 }
                 switch (strat) {
-                    .sema => |sema| _ = try sema.resolveTypeFields(ty),
+                    .sema => try ty.resolveFields(mod),
                     .eager => assert(struct_type.haveFieldTypes(ip)),
                     .lazy => if (!struct_type.haveFieldTypes(ip)) return error.NeedLazy,
                 }
@@ -622,7 +624,7 @@ pub fn hasRuntimeBitsAdvanced(
                     },
                 }
                 switch (strat) {
-                    .sema => |sema| _ = try sema.resolveTypeFields(ty),
+                    .sema => try ty.resolveFields(mod),
                     .eager => assert(union_type.flagsPtr(ip).status.haveFieldTypes()),
                     .lazy => if (!union_type.flagsPtr(ip).status.haveFieldTypes())
                         return error.NeedLazy,
@@ -784,19 +786,18 @@ pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool {
 }
 
 pub fn fnHasRuntimeBits(ty: Type, mod: *Module) bool {
-    return ty.fnHasRuntimeBitsAdvanced(mod, null) catch unreachable;
+    return ty.fnHasRuntimeBitsAdvanced(mod, .normal) catch unreachable;
 }
 
 /// Determines whether a function type has runtime bits, i.e. whether a
 /// function with this type can exist at runtime.
 /// Asserts that `ty` is a function type.
-/// If `opt_sema` is not provided, asserts that the return type is sufficiently resolved.
-pub fn fnHasRuntimeBitsAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.CompileError!bool {
+pub fn fnHasRuntimeBitsAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaError!bool {
     const fn_info = mod.typeToFunc(ty).?;
     if (fn_info.is_generic) return false;
     if (fn_info.is_var_args) return true;
     if (fn_info.cc == .Inline) return false;
-    return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(mod, opt_sema);
+    return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(mod, strat);
 }
 
 pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool {
@@ -820,23 +821,23 @@ pub fn isNoReturn(ty: Type, mod: *Module) bool {
 
 /// Returns `none` if the pointer is naturally aligned and the element type is 0-bit.
 pub fn ptrAlignment(ty: Type, mod: *Module) Alignment {
-    return ptrAlignmentAdvanced(ty, mod, null) catch unreachable;
+    return ptrAlignmentAdvanced(ty, mod, .normal) catch unreachable;
 }
 
-pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !Alignment {
+pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) !Alignment {
     return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
         .ptr_type => |ptr_type| {
             if (ptr_type.flags.alignment != .none)
                 return ptr_type.flags.alignment;
 
-            if (opt_sema) |sema| {
-                const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .{ .sema = sema });
+            if (strat == .sema) {
+                const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .sema);
                 return res.scalar;
             }
 
             return (Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
         },
-        .opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(mod, opt_sema),
+        .opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(mod, strat),
         else => unreachable,
     };
 }
@@ -868,10 +869,34 @@ pub const AbiAlignmentAdvanced = union(enum) {
     val: Value,
 };
 
-pub const AbiAlignmentAdvancedStrat = union(enum) {
-    eager,
+pub const ResolveStratLazy = enum {
+    /// Return a `lazy_size` or `lazy_align` value if necessary.
+    /// This value can be resolved later using `Value.resolveLazy`.
     lazy,
-    sema: *Sema,
+    /// Return a scalar result, expecting all necessary type resolution to be completed.
+    /// Backends should typically use this, since they must not perform type resolution.
+    eager,
+    /// Return a scalar result, performing type resolution as necessary.
+    /// This should typically be used from semantic analysis.
+    sema,
+};
+
+/// The chosen strategy can be easily optimized away in release builds.
+/// However, in debug builds, it helps to avoid acceidentally resolving types in backends.
+pub const ResolveStrat = enum {
+    /// Assert that all necessary resolution is completed.
+    /// Backends should typically use this, since they must not perform type resolution.
+    normal,
+    /// Perform type resolution as necessary using `Zcu`.
+    /// This should typically be used from semantic analysis.
+    sema,
+
+    pub fn toLazy(strat: ResolveStrat) ResolveStratLazy {
+        return switch (strat) {
+            .normal => .eager,
+            .sema => .sema,
+        };
+    }
 };
 
 /// If you pass `eager` you will get back `scalar` and assert the type is resolved.
@@ -883,17 +908,12 @@ pub const AbiAlignmentAdvancedStrat = union(enum) {
 pub fn abiAlignmentAdvanced(
     ty: Type,
     mod: *Module,
-    strat: AbiAlignmentAdvancedStrat,
-) Module.CompileError!AbiAlignmentAdvanced {
+    strat: ResolveStratLazy,
+) SemaError!AbiAlignmentAdvanced {
     const target = mod.getTarget();
     const use_llvm = mod.comp.config.use_llvm;
     const ip = &mod.intern_pool;
 
-    const opt_sema = switch (strat) {
-        .sema => |sema| sema,
-        else => null,
-    };
-
     switch (ty.toIntern()) {
         .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = .@"1" },
         else => switch (ip.indexToKey(ty.toIntern())) {
@@ -911,7 +931,7 @@ pub fn abiAlignmentAdvanced(
                 if (vector_type.len == 0) return .{ .scalar = .@"1" };
                 switch (mod.comp.getZigBackend()) {
                     else => {
-                        const elem_bits: u32 = @intCast(try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, opt_sema));
+                        const elem_bits: u32 = @intCast(try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, .sema));
                         if (elem_bits == 0) return .{ .scalar = .@"1" };
                         const bytes = ((elem_bits * vector_type.len) + 7) / 8;
                         const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes);
@@ -1024,7 +1044,7 @@ pub fn abiAlignmentAdvanced(
                 const struct_type = ip.loadStructType(ty.toIntern());
                 if (struct_type.layout == .@"packed") {
                     switch (strat) {
-                        .sema => |sema| try sema.resolveTypeLayout(ty),
+                        .sema => try ty.resolveLayout(mod),
                         .lazy => if (struct_type.backingIntType(ip).* == .none) return .{
                             .val = Value.fromInterned((try mod.intern(.{ .int = .{
                                 .ty = .comptime_int_type,
@@ -1036,19 +1056,16 @@ pub fn abiAlignmentAdvanced(
                     return .{ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiAlignment(mod) };
                 }
 
-                const flags = struct_type.flagsPtr(ip).*;
-                if (flags.alignment != .none) return .{ .scalar = flags.alignment };
-
-                return switch (strat) {
+                if (struct_type.flagsPtr(ip).alignment == .none) switch (strat) {
                     .eager => unreachable, // struct alignment not resolved
-                    .sema => |sema| .{
-                        .scalar = try sema.resolveStructAlignment(ty.toIntern(), struct_type),
-                    },
-                    .lazy => .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+                    .sema => try ty.resolveStructAlignment(mod),
+                    .lazy => return .{ .val = Value.fromInterned(try mod.intern(.{ .int = .{
                         .ty = .comptime_int_type,
                         .storage = .{ .lazy_align = ty.toIntern() },
-                    } }))) },
+                    } })) },
                 };
+
+                return .{ .scalar = struct_type.flagsPtr(ip).alignment };
             },
             .anon_struct_type => |tuple| {
                 var big_align: Alignment = .@"1";
@@ -1070,12 +1087,10 @@ pub fn abiAlignmentAdvanced(
             },
             .union_type => {
                 const union_type = ip.loadUnionType(ty.toIntern());
-                const flags = union_type.flagsPtr(ip).*;
-                if (flags.alignment != .none) return .{ .scalar = flags.alignment };
 
-                if (!union_type.haveLayout(ip)) switch (strat) {
+                if (union_type.flagsPtr(ip).alignment == .none) switch (strat) {
                     .eager => unreachable, // union layout not resolved
-                    .sema => |sema| return .{ .scalar = try sema.resolveUnionAlignment(ty, union_type) },
+                    .sema => try ty.resolveUnionAlignment(mod),
                     .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
                         .ty = .comptime_int_type,
                         .storage = .{ .lazy_align = ty.toIntern() },
@@ -1117,9 +1132,9 @@ pub fn abiAlignmentAdvanced(
 fn abiAlignmentAdvancedErrorUnion(
     ty: Type,
     mod: *Module,
-    strat: AbiAlignmentAdvancedStrat,
+    strat: ResolveStratLazy,
     payload_ty: Type,
-) Module.CompileError!AbiAlignmentAdvanced {
+) SemaError!AbiAlignmentAdvanced {
     // This code needs to be kept in sync with the equivalent switch prong
     // in abiSizeAdvanced.
     const code_align = abiAlignment(Type.anyerror, mod);
@@ -1154,8 +1169,8 @@ fn abiAlignmentAdvancedErrorUnion(
 fn abiAlignmentAdvancedOptional(
     ty: Type,
     mod: *Module,
-    strat: AbiAlignmentAdvancedStrat,
-) Module.CompileError!AbiAlignmentAdvanced {
+    strat: ResolveStratLazy,
+) SemaError!AbiAlignmentAdvanced {
     const target = mod.getTarget();
     const child_type = ty.optionalChild(mod);
 
@@ -1217,8 +1232,8 @@ const AbiSizeAdvanced = union(enum) {
 pub fn abiSizeAdvanced(
     ty: Type,
     mod: *Module,
-    strat: AbiAlignmentAdvancedStrat,
-) Module.CompileError!AbiSizeAdvanced {
+    strat: ResolveStratLazy,
+) SemaError!AbiSizeAdvanced {
     const target = mod.getTarget();
     const use_llvm = mod.comp.config.use_llvm;
     const ip = &mod.intern_pool;
@@ -1252,9 +1267,9 @@ pub fn abiSizeAdvanced(
                 }
             },
             .vector_type => |vector_type| {
-                const opt_sema = switch (strat) {
-                    .sema => |sema| sema,
-                    .eager => null,
+                const sub_strat: ResolveStrat = switch (strat) {
+                    .sema => .sema,
+                    .eager => .normal,
                     .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
                         .ty = .comptime_int_type,
                         .storage = .{ .lazy_size = ty.toIntern() },
@@ -1269,7 +1284,7 @@ pub fn abiSizeAdvanced(
                 };
                 const total_bytes = switch (mod.comp.getZigBackend()) {
                     else => total_bytes: {
-                        const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, opt_sema);
+                        const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, sub_strat);
                         const total_bits = elem_bits * vector_type.len;
                         break :total_bytes (total_bits + 7) / 8;
                     },
@@ -1403,7 +1418,7 @@ pub fn abiSizeAdvanced(
             .struct_type => {
                 const struct_type = ip.loadStructType(ty.toIntern());
                 switch (strat) {
-                    .sema => |sema| try sema.resolveTypeLayout(ty),
+                    .sema => try ty.resolveLayout(mod),
                     .lazy => switch (struct_type.layout) {
                         .@"packed" => {
                             if (struct_type.backingIntType(ip).* == .none) return .{
@@ -1436,7 +1451,7 @@ pub fn abiSizeAdvanced(
             },
             .anon_struct_type => |tuple| {
                 switch (strat) {
-                    .sema => |sema| try sema.resolveTypeLayout(ty),
+                    .sema => try ty.resolveLayout(mod),
                     .lazy, .eager => {},
                 }
                 const field_count = tuple.types.len;
@@ -1449,7 +1464,7 @@ pub fn abiSizeAdvanced(
             .union_type => {
                 const union_type = ip.loadUnionType(ty.toIntern());
                 switch (strat) {
-                    .sema => |sema| try sema.resolveTypeLayout(ty),
+                    .sema => try ty.resolveLayout(mod),
                     .lazy => if (!union_type.flagsPtr(ip).status.haveLayout()) return .{
                         .val = Value.fromInterned((try mod.intern(.{ .int = .{
                             .ty = .comptime_int_type,
@@ -1493,8 +1508,8 @@ pub fn abiSizeAdvanced(
 fn abiSizeAdvancedOptional(
     ty: Type,
     mod: *Module,
-    strat: AbiAlignmentAdvancedStrat,
-) Module.CompileError!AbiSizeAdvanced {
+    strat: ResolveStratLazy,
+) SemaError!AbiSizeAdvanced {
     const child_ty = ty.optionalChild(mod);
 
     if (child_ty.isNoReturn(mod)) {
@@ -1661,21 +1676,18 @@ pub fn maxIntAlignment(target: std.Target, use_llvm: bool) u16 {
 }
 
 pub fn bitSize(ty: Type, mod: *Module) u64 {
-    return bitSizeAdvanced(ty, mod, null) catch unreachable;
+    return bitSizeAdvanced(ty, mod, .normal) catch unreachable;
 }
 
-/// If you pass `opt_sema`, any recursive type resolutions will happen if
-/// necessary, possibly returning a CompileError. Passing `null` instead asserts
-/// the type is fully resolved, and there will be no error, guaranteed.
 pub fn bitSizeAdvanced(
     ty: Type,
     mod: *Module,
-    opt_sema: ?*Sema,
-) Module.CompileError!u64 {
+    strat: ResolveStrat,
+) SemaError!u64 {
     const target = mod.getTarget();
     const ip = &mod.intern_pool;
 
-    const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager;
+    const strat_lazy: ResolveStratLazy = strat.toLazy();
 
     switch (ip.indexToKey(ty.toIntern())) {
         .int_type => |int_type| return int_type.bits,
@@ -1690,22 +1702,22 @@ pub fn bitSizeAdvanced(
             if (len == 0) return 0;
             const elem_ty = Type.fromInterned(array_type.child);
             const elem_size = @max(
-                (try elem_ty.abiAlignmentAdvanced(mod, strat)).scalar.toByteUnits() orelse 0,
-                (try elem_ty.abiSizeAdvanced(mod, strat)).scalar,
+                (try elem_ty.abiAlignmentAdvanced(mod, strat_lazy)).scalar.toByteUnits() orelse 0,
+                (try elem_ty.abiSizeAdvanced(mod, strat_lazy)).scalar,
             );
             if (elem_size == 0) return 0;
-            const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema);
+            const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, strat);
             return (len - 1) * 8 * elem_size + elem_bit_size;
         },
         .vector_type => |vector_type| {
             const child_ty = Type.fromInterned(vector_type.child);
-            const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema);
+            const elem_bit_size = try bitSizeAdvanced(child_ty, mod, strat);
             return elem_bit_size * vector_type.len;
         },
         .opt_type => {
             // Optionals and error unions are not packed so their bitsize
             // includes padding bits.
-            return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8;
+            return (try abiSizeAdvanced(ty, mod, strat_lazy)).scalar * 8;
         },
 
         .error_set_type, .inferred_error_set_type => return mod.errorSetBits(),
@@ -1713,7 +1725,7 @@ pub fn bitSizeAdvanced(
         .error_union_type => {
             // Optionals and error unions are not packed so their bitsize
             // includes padding bits.
-            return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8;
+            return (try abiSizeAdvanced(ty, mod, strat_lazy)).scalar * 8;
         },
         .func_type => unreachable, // represents machine code; not a pointer
         .simple_type => |t| switch (t) {
@@ -1770,43 +1782,43 @@ pub fn bitSizeAdvanced(
         .struct_type => {
             const struct_type = ip.loadStructType(ty.toIntern());
             const is_packed = struct_type.layout == .@"packed";
-            if (opt_sema) |sema| {
-                try sema.resolveTypeFields(ty);
-                if (is_packed) try sema.resolveTypeLayout(ty);
+            if (strat == .sema) {
+                try ty.resolveFields(mod);
+                if (is_packed) try ty.resolveLayout(mod);
             }
             if (is_packed) {
-                return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(mod, opt_sema);
+                return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(mod, strat);
             }
-            return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
+            return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8;
         },
 
         .anon_struct_type => {
-            if (opt_sema) |sema| try sema.resolveTypeFields(ty);
-            return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
+            if (strat == .sema) try ty.resolveFields(mod);
+            return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8;
         },
 
         .union_type => {
             const union_type = ip.loadUnionType(ty.toIntern());
             const is_packed = ty.containerLayout(mod) == .@"packed";
-            if (opt_sema) |sema| {
-                try sema.resolveTypeFields(ty);
-                if (is_packed) try sema.resolveTypeLayout(ty);
+            if (strat == .sema) {
+                try ty.resolveFields(mod);
+                if (is_packed) try ty.resolveLayout(mod);
             }
             if (!is_packed) {
-                return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
+                return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8;
             }
             assert(union_type.flagsPtr(ip).status.haveFieldTypes());
 
             var size: u64 = 0;
             for (0..union_type.field_types.len) |field_index| {
                 const field_ty = union_type.field_types.get(ip)[field_index];
-                size = @max(size, try bitSizeAdvanced(Type.fromInterned(field_ty), mod, opt_sema));
+                size = @max(size, try bitSizeAdvanced(Type.fromInterned(field_ty), mod, strat));
             }
 
             return size;
         },
         .opaque_type => unreachable,
-        .enum_type => return bitSizeAdvanced(Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), mod, opt_sema),
+        .enum_type => return bitSizeAdvanced(Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), mod, strat),
 
         // values, not types
         .undef,
@@ -2722,13 +2734,12 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value {
 /// During semantic analysis, instead call `Sema.typeRequiresComptime` which
 /// resolves field types rather than asserting they are already resolved.
 pub fn comptimeOnly(ty: Type, mod: *Module) bool {
-    return ty.comptimeOnlyAdvanced(mod, null) catch unreachable;
+    return ty.comptimeOnlyAdvanced(mod, .normal) catch unreachable;
 }
 
 /// `generic_poison` will return false.
 /// May return false negatives when structs and unions are having their field types resolved.
-/// If `opt_sema` is not provided, asserts that the type is sufficiently resolved.
-pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.CompileError!bool {
+pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaError!bool {
     const ip = &mod.intern_pool;
     return switch (ty.toIntern()) {
         .empty_struct_type => false,
@@ -2738,19 +2749,19 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com
             .ptr_type => |ptr_type| {
                 const child_ty = Type.fromInterned(ptr_type.child);
                 switch (child_ty.zigTypeTag(mod)) {
-                    .Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(mod, opt_sema),
+                    .Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(mod, strat),
                     .Opaque => return false,
-                    else => return child_ty.comptimeOnlyAdvanced(mod, opt_sema),
+                    else => return child_ty.comptimeOnlyAdvanced(mod, strat),
                 }
             },
             .anyframe_type => |child| {
                 if (child == .none) return false;
-                return Type.fromInterned(child).comptimeOnlyAdvanced(mod, opt_sema);
+                return Type.fromInterned(child).comptimeOnlyAdvanced(mod, strat);
             },
-            .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(mod, opt_sema),
-            .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(mod, opt_sema),
-            .opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(mod, opt_sema),
-            .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(mod, opt_sema),
+            .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(mod, strat),
+            .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(mod, strat),
+            .opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(mod, strat),
+            .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(mod, strat),
 
             .error_set_type,
             .inferred_error_set_type,
@@ -2817,8 +2828,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com
                     .no, .wip => false,
                     .yes => true,
                     .unknown => {
-                        // The type is not resolved; assert that we have a Sema.
-                        const sema = opt_sema.?;
+                        assert(strat == .sema);
 
                         if (struct_type.flagsPtr(ip).field_types_wip)
                             return false;
@@ -2826,13 +2836,13 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com
                         struct_type.flagsPtr(ip).requires_comptime = .wip;
                         errdefer struct_type.flagsPtr(ip).requires_comptime = .unknown;
 
-                        try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type);
+                        try ty.resolveFields(mod);
 
                         for (0..struct_type.field_types.len) |i_usize| {
                             const i: u32 = @intCast(i_usize);
                             if (struct_type.fieldIsComptime(ip, i)) continue;
                             const field_ty = struct_type.field_types.get(ip)[i];
-                            if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) {
+                            if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) {
                                 // Note that this does not cause the layout to
                                 // be considered resolved. Comptime-only types
                                 // still maintain a layout of their
@@ -2851,7 +2861,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com
             .anon_struct_type => |tuple| {
                 for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
                     const have_comptime_val = val != .none;
-                    if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) return true;
+                    if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) return true;
                 }
                 return false;
             },
@@ -2862,8 +2872,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com
                     .no, .wip => return false,
                     .yes => return true,
                     .unknown => {
-                        // The type is not resolved; assert that we have a Sema.
-                        const sema = opt_sema.?;
+                        assert(strat == .sema);
 
                         if (union_type.flagsPtr(ip).status == .field_types_wip)
                             return false;
@@ -2871,11 +2880,11 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com
                         union_type.flagsPtr(ip).requires_comptime = .wip;
                         errdefer union_type.flagsPtr(ip).requires_comptime = .unknown;
 
-                        try sema.resolveTypeFieldsUnion(ty, union_type);
+                        try ty.resolveFields(mod);
 
                         for (0..union_type.field_types.len) |field_idx| {
                             const field_ty = union_type.field_types.get(ip)[field_idx];
-                            if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) {
+                            if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) {
                                 union_type.flagsPtr(ip).requires_comptime = .yes;
                                 return true;
                             }
@@ -2889,7 +2898,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com
 
             .opaque_type => false,
 
-            .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(mod, opt_sema),
+            .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(mod, strat),
 
             // values, not types
             .undef,
@@ -3180,10 +3189,10 @@ pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type {
 }
 
 pub fn structFieldAlign(ty: Type, index: usize, zcu: *Zcu) Alignment {
-    return ty.structFieldAlignAdvanced(index, zcu, null) catch unreachable;
+    return ty.structFieldAlignAdvanced(index, zcu, .normal) catch unreachable;
 }
 
-pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, opt_sema: ?*Sema) !Alignment {
+pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, strat: ResolveStrat) !Alignment {
     const ip = &zcu.intern_pool;
     switch (ip.indexToKey(ty.toIntern())) {
         .struct_type => {
@@ -3191,22 +3200,14 @@ pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, opt_sema: ?*S
             assert(struct_type.layout != .@"packed");
             const explicit_align = struct_type.fieldAlign(ip, index);
             const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]);
-            if (opt_sema) |sema| {
-                return sema.structFieldAlignment(explicit_align, field_ty, struct_type.layout);
-            } else {
-                return zcu.structFieldAlignment(explicit_align, field_ty, struct_type.layout);
-            }
+            return zcu.structFieldAlignmentAdvanced(explicit_align, field_ty, struct_type.layout, strat);
         },
         .anon_struct_type => |anon_struct| {
-            return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(zcu, if (opt_sema) |sema| .{ .sema = sema } else .eager)).scalar;
+            return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(zcu, strat.toLazy())).scalar;
         },
         .union_type => {
             const union_obj = ip.loadUnionType(ty.toIntern());
-            if (opt_sema) |sema| {
-                return sema.unionFieldAlignment(union_obj, @intCast(index));
-            } else {
-                return zcu.unionFieldNormalAlignment(union_obj, @intCast(index));
-            }
+            return zcu.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(index), strat);
         },
         else => unreachable,
     }
@@ -3546,6 +3547,397 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx:
     } };
 }
 
+pub fn resolveLayout(ty: Type, zcu: *Zcu) SemaError!void {
+    const ip = &zcu.intern_pool;
+    switch (ip.indexToKey(ty.toIntern())) {
+        .simple_type => |simple_type| return resolveSimpleType(simple_type, zcu),
+        else => {},
+    }
+    switch (ty.zigTypeTag(zcu)) {
+        .Struct => switch (ip.indexToKey(ty.toIntern())) {
+            .anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| {
+                const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]);
+                try field_ty.resolveLayout(zcu);
+            },
+            .struct_type => return ty.resolveStructInner(zcu, .layout),
+            else => unreachable,
+        },
+        .Union => return ty.resolveUnionInner(zcu, .layout),
+        .Array => {
+            if (ty.arrayLenIncludingSentinel(zcu) == 0) return;
+            const elem_ty = ty.childType(zcu);
+            return elem_ty.resolveLayout(zcu);
+        },
+        .Optional => {
+            const payload_ty = ty.optionalChild(zcu);
+            return payload_ty.resolveLayout(zcu);
+        },
+        .ErrorUnion => {
+            const payload_ty = ty.errorUnionPayload(zcu);
+            return payload_ty.resolveLayout(zcu);
+        },
+        .Fn => {
+            const info = zcu.typeToFunc(ty).?;
+            if (info.is_generic) {
+                // Resolving of generic function types is deferred to when
+                // the function is instantiated.
+                return;
+            }
+            for (0..info.param_types.len) |i| {
+                const param_ty = info.param_types.get(ip)[i];
+                try Type.fromInterned(param_ty).resolveLayout(zcu);
+            }
+            try Type.fromInterned(info.return_type).resolveLayout(zcu);
+        },
+        else => {},
+    }
+}
+
+pub fn resolveFields(ty: Type, zcu: *Zcu) SemaError!void {
+    const ip = &zcu.intern_pool;
+    const ty_ip = ty.toIntern();
+
+    switch (ty_ip) {
+        .none => unreachable,
+
+        .u0_type,
+        .i0_type,
+        .u1_type,
+        .u8_type,
+        .i8_type,
+        .u16_type,
+        .i16_type,
+        .u29_type,
+        .u32_type,
+        .i32_type,
+        .u64_type,
+        .i64_type,
+        .u80_type,
+        .u128_type,
+        .i128_type,
+        .usize_type,
+        .isize_type,
+        .c_char_type,
+        .c_short_type,
+        .c_ushort_type,
+        .c_int_type,
+        .c_uint_type,
+        .c_long_type,
+        .c_ulong_type,
+        .c_longlong_type,
+        .c_ulonglong_type,
+        .c_longdouble_type,
+        .f16_type,
+        .f32_type,
+        .f64_type,
+        .f80_type,
+        .f128_type,
+        .anyopaque_type,
+        .bool_type,
+        .void_type,
+        .type_type,
+        .anyerror_type,
+        .adhoc_inferred_error_set_type,
+        .comptime_int_type,
+        .comptime_float_type,
+        .noreturn_type,
+        .anyframe_type,
+        .null_type,
+        .undefined_type,
+        .enum_literal_type,
+        .manyptr_u8_type,
+        .manyptr_const_u8_type,
+        .manyptr_const_u8_sentinel_0_type,
+        .single_const_pointer_to_comptime_int_type,
+        .slice_const_u8_type,
+        .slice_const_u8_sentinel_0_type,
+        .optional_noreturn_type,
+        .anyerror_void_error_union_type,
+        .generic_poison_type,
+        .empty_struct_type,
+        => {},
+
+        .undef => unreachable,
+        .zero => unreachable,
+        .zero_usize => unreachable,
+        .zero_u8 => unreachable,
+        .one => unreachable,
+        .one_usize => unreachable,
+        .one_u8 => unreachable,
+        .four_u8 => unreachable,
+        .negative_one => unreachable,
+        .calling_convention_c => unreachable,
+        .calling_convention_inline => unreachable,
+        .void_value => unreachable,
+        .unreachable_value => unreachable,
+        .null_value => unreachable,
+        .bool_true => unreachable,
+        .bool_false => unreachable,
+        .empty_struct => unreachable,
+        .generic_poison => unreachable,
+
+        else => switch (ip.items.items(.tag)[@intFromEnum(ty_ip)]) {
+            .type_struct,
+            .type_struct_packed,
+            .type_struct_packed_inits,
+            => return ty.resolveStructInner(zcu, .fields),
+
+            .type_union => return ty.resolveUnionInner(zcu, .fields),
+
+            .simple_type => return resolveSimpleType(ip.indexToKey(ty_ip).simple_type, zcu),
+
+            else => {},
+        },
+    }
+}
+
+pub fn resolveFully(ty: Type, zcu: *Zcu) SemaError!void {
+    const ip = &zcu.intern_pool;
+
+    switch (ip.indexToKey(ty.toIntern())) {
+        .simple_type => |simple_type| return resolveSimpleType(simple_type, zcu),
+        else => {},
+    }
+
+    switch (ty.zigTypeTag(zcu)) {
+        .Type,
+        .Void,
+        .Bool,
+        .NoReturn,
+        .Int,
+        .Float,
+        .ComptimeFloat,
+        .ComptimeInt,
+        .Undefined,
+        .Null,
+        .ErrorSet,
+        .Enum,
+        .Opaque,
+        .Frame,
+        .AnyFrame,
+        .Vector,
+        .EnumLiteral,
+        => {},
+
+        .Pointer => return ty.childType(zcu).resolveFully(zcu),
+        .Array => return ty.childType(zcu).resolveFully(zcu),
+        .Optional => return ty.optionalChild(zcu).resolveFully(zcu),
+        .ErrorUnion => return ty.errorUnionPayload(zcu).resolveFully(zcu),
+        .Fn => {
+            const info = zcu.typeToFunc(ty).?;
+            if (info.is_generic) return;
+            for (0..info.param_types.len) |i| {
+                const param_ty = info.param_types.get(ip)[i];
+                try Type.fromInterned(param_ty).resolveFully(zcu);
+            }
+            try Type.fromInterned(info.return_type).resolveFully(zcu);
+        },
+
+        .Struct => switch (ip.indexToKey(ty.toIntern())) {
+            .anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| {
+                const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]);
+                try field_ty.resolveFully(zcu);
+            },
+            .struct_type => return ty.resolveStructInner(zcu, .full),
+            else => unreachable,
+        },
+        .Union => return ty.resolveUnionInner(zcu, .full),
+    }
+}
+
+pub fn resolveStructFieldInits(ty: Type, zcu: *Zcu) SemaError!void {
+    // TODO: stop calling this for tuples!
+    _ = zcu.typeToStruct(ty) orelse return;
+    return ty.resolveStructInner(zcu, .inits);
+}
+
+pub fn resolveStructAlignment(ty: Type, zcu: *Zcu) SemaError!void {
+    return ty.resolveStructInner(zcu, .alignment);
+}
+
+pub fn resolveUnionAlignment(ty: Type, zcu: *Zcu) SemaError!void {
+    return ty.resolveUnionInner(zcu, .alignment);
+}
+
+/// `ty` must be a struct.
+fn resolveStructInner(
+    ty: Type,
+    zcu: *Zcu,
+    resolution: enum { fields, inits, alignment, layout, full },
+) SemaError!void {
+    const gpa = zcu.gpa;
+
+    const struct_obj = zcu.typeToStruct(ty).?;
+    const owner_decl_index = struct_obj.decl.unwrap() orelse return;
+
+    var analysis_arena = std.heap.ArenaAllocator.init(gpa);
+    defer analysis_arena.deinit();
+
+    var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa);
+    defer comptime_err_ret_trace.deinit();
+
+    var sema: Sema = .{
+        .mod = zcu,
+        .gpa = gpa,
+        .arena = analysis_arena.allocator(),
+        .code = undefined, // This ZIR will not be used.
+        .owner_decl = zcu.declPtr(owner_decl_index),
+        .owner_decl_index = owner_decl_index,
+        .func_index = .none,
+        .func_is_naked = false,
+        .fn_ret_ty = Type.void,
+        .fn_ret_ty_ies = null,
+        .owner_func_index = .none,
+        .comptime_err_ret_trace = &comptime_err_ret_trace,
+    };
+    defer sema.deinit();
+
+    switch (resolution) {
+        .fields => return sema.resolveTypeFieldsStruct(ty.toIntern(), struct_obj),
+        .inits => return sema.resolveStructFieldInits(ty),
+        .alignment => return sema.resolveStructAlignment(ty.toIntern(), struct_obj),
+        .layout => return sema.resolveStructLayout(ty),
+        .full => return sema.resolveStructFully(ty),
+    }
+}
+
+/// `ty` must be a union.
+fn resolveUnionInner(
+    ty: Type,
+    zcu: *Zcu,
+    resolution: enum { fields, alignment, layout, full },
+) SemaError!void {
+    const gpa = zcu.gpa;
+
+    const union_obj = zcu.typeToUnion(ty).?;
+    const owner_decl_index = union_obj.decl;
+
+    var analysis_arena = std.heap.ArenaAllocator.init(gpa);
+    defer analysis_arena.deinit();
+
+    var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa);
+    defer comptime_err_ret_trace.deinit();
+
+    var sema: Sema = .{
+        .mod = zcu,
+        .gpa = gpa,
+        .arena = analysis_arena.allocator(),
+        .code = undefined, // This ZIR will not be used.
+        .owner_decl = zcu.declPtr(owner_decl_index),
+        .owner_decl_index = owner_decl_index,
+        .func_index = .none,
+        .func_is_naked = false,
+        .fn_ret_ty = Type.void,
+        .fn_ret_ty_ies = null,
+        .owner_func_index = .none,
+        .comptime_err_ret_trace = &comptime_err_ret_trace,
+    };
+    defer sema.deinit();
+
+    switch (resolution) {
+        .fields => return sema.resolveTypeFieldsUnion(ty, union_obj),
+        .alignment => return sema.resolveUnionAlignment(ty, union_obj),
+        .layout => return sema.resolveUnionLayout(ty),
+        .full => return sema.resolveUnionFully(ty),
+    }
+}
+
+/// Fully resolves a simple type. This is usually a nop, but for builtin types with
+/// special InternPool indices (such as std.builtin.Type) it will analyze and fully
+/// resolve the type.
+fn resolveSimpleType(simple_type: InternPool.SimpleType, zcu: *Zcu) Allocator.Error!void {
+    const builtin_type_name: []const u8 = switch (simple_type) {
+        .atomic_order => "AtomicOrder",
+        .atomic_rmw_op => "AtomicRmwOp",
+        .calling_convention => "CallingConvention",
+        .address_space => "AddressSpace",
+        .float_mode => "FloatMode",
+        .reduce_op => "ReduceOp",
+        .call_modifier => "CallModifer",
+        .prefetch_options => "PrefetchOptions",
+        .export_options => "ExportOptions",
+        .extern_options => "ExternOptions",
+        .type_info => "Type",
+        else => return,
+    };
+    // This will fully resolve the type.
+    _ = try zcu.getBuiltinType(builtin_type_name);
+}
+
+/// Returns the type of a pointer to an element.
+/// Asserts that the type is a pointer, and that the element type is indexable.
+/// If the element index is comptime-known, it must be passed in `offset`.
+/// For *@Vector(n, T), return *align(a:b:h:v) T
+/// For *[N]T, return *T
+/// For [*]T, returns *T
+/// For []T, returns *T
+/// Handles const-ness and address spaces in particular.
+/// This code is duplicated in `Sema.analyzePtrArithmetic`.
+/// May perform type resolution and return a transitive `error.AnalysisFail`.
+pub fn elemPtrType(ptr_ty: Type, offset: ?usize, zcu: *Zcu) !Type {
+    const ptr_info = ptr_ty.ptrInfo(zcu);
+    const elem_ty = ptr_ty.elemType2(zcu);
+    const is_allowzero = ptr_info.flags.is_allowzero and (offset orelse 0) == 0;
+    const parent_ty = ptr_ty.childType(zcu);
+
+    const VI = InternPool.Key.PtrType.VectorIndex;
+
+    const vector_info: struct {
+        host_size: u16 = 0,
+        alignment: Alignment = .none,
+        vector_index: VI = .none,
+    } = if (parent_ty.isVector(zcu) and ptr_info.flags.size == .One) blk: {
+        const elem_bits = elem_ty.bitSize(zcu);
+        if (elem_bits == 0) break :blk .{};
+        const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits);
+        if (!is_packed) break :blk .{};
+
+        break :blk .{
+            .host_size = @intCast(parent_ty.arrayLen(zcu)),
+            .alignment = parent_ty.abiAlignment(zcu),
+            .vector_index = if (offset) |some| @enumFromInt(some) else .runtime,
+        };
+    } else .{};
+
+    const alignment: Alignment = a: {
+        // Calculate the new pointer alignment.
+        if (ptr_info.flags.alignment == .none) {
+            // In case of an ABI-aligned pointer, any pointer arithmetic
+            // maintains the same ABI-alignedness.
+            break :a vector_info.alignment;
+        }
+        // If the addend is not a comptime-known value we can still count on
+        // it being a multiple of the type size.
+        const elem_size = (try elem_ty.abiSizeAdvanced(zcu, .sema)).scalar;
+        const addend = if (offset) |off| elem_size * off else elem_size;
+
+        // The resulting pointer is aligned to the lcd between the offset (an
+        // arbitrary number) and the alignment factor (always a power of two,
+        // non zero).
+        const new_align: Alignment = @enumFromInt(@min(
+            @ctz(addend),
+            ptr_info.flags.alignment.toLog2Units(),
+        ));
+        assert(new_align != .none);
+        break :a new_align;
+    };
+    return zcu.ptrTypeSema(.{
+        .child = elem_ty.toIntern(),
+        .flags = .{
+            .alignment = alignment,
+            .is_const = ptr_info.flags.is_const,
+            .is_volatile = ptr_info.flags.is_volatile,
+            .is_allowzero = is_allowzero,
+            .address_space = ptr_info.flags.address_space,
+            .vector_index = vector_info.vector_index,
+        },
+        .packed_offset = .{
+            .host_size = vector_info.host_size,
+            .bit_offset = 0,
+        },
+    });
+}
+
 pub const @"u1": Type = .{ .ip_index = .u1_type };
 pub const @"u8": Type = .{ .ip_index = .u8_type };
 pub const @"u16": Type = .{ .ip_index = .u16_type };
src/Value.zig
@@ -161,9 +161,11 @@ pub fn intFromEnum(val: Value, ty: Type, mod: *Module) Allocator.Error!Value {
     };
 }
 
+pub const ResolveStrat = Type.ResolveStrat;
+
 /// Asserts the value is an integer.
 pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *Module) BigIntConst {
-    return val.toBigIntAdvanced(space, mod, null) catch unreachable;
+    return val.toBigIntAdvanced(space, mod, .normal) catch unreachable;
 }
 
 /// Asserts the value is an integer.
@@ -171,7 +173,7 @@ pub fn toBigIntAdvanced(
     val: Value,
     space: *BigIntSpace,
     mod: *Module,
-    opt_sema: ?*Sema,
+    strat: ResolveStrat,
 ) Module.CompileError!BigIntConst {
     return switch (val.toIntern()) {
         .bool_false => BigIntMutable.init(&space.limbs, 0).toConst(),
@@ -181,7 +183,7 @@ pub fn toBigIntAdvanced(
             .int => |int| switch (int.storage) {
                 .u64, .i64, .big_int => int.storage.toBigInt(space),
                 .lazy_align, .lazy_size => |ty| {
-                    if (opt_sema) |sema| try sema.resolveTypeLayout(Type.fromInterned(ty));
+                    if (strat == .sema) try Type.fromInterned(ty).resolveLayout(mod);
                     const x = switch (int.storage) {
                         else => unreachable,
                         .lazy_align => Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0,
@@ -190,10 +192,10 @@ pub fn toBigIntAdvanced(
                     return BigIntMutable.init(&space.limbs, x).toConst();
                 },
             },
-            .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).toBigIntAdvanced(space, mod, opt_sema),
+            .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).toBigIntAdvanced(space, mod, strat),
             .opt, .ptr => BigIntMutable.init(
                 &space.limbs,
-                (try val.getUnsignedIntAdvanced(mod, opt_sema)).?,
+                (try val.getUnsignedIntAdvanced(mod, strat)).?,
             ).toConst(),
             else => unreachable,
         },
@@ -228,12 +230,12 @@ pub fn getVariable(val: Value, mod: *Module) ?InternPool.Key.Variable {
 /// If the value fits in a u64, return it, otherwise null.
 /// Asserts not undefined.
 pub fn getUnsignedInt(val: Value, mod: *Module) ?u64 {
-    return getUnsignedIntAdvanced(val, mod, null) catch unreachable;
+    return getUnsignedIntAdvanced(val, mod, .normal) catch unreachable;
 }
 
 /// If the value fits in a u64, return it, otherwise null.
 /// Asserts not undefined.
-pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 {
+pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, strat: ResolveStrat) !?u64 {
     return switch (val.toIntern()) {
         .undef => unreachable,
         .bool_false => 0,
@@ -244,28 +246,22 @@ pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64
                 .big_int => |big_int| big_int.to(u64) catch null,
                 .u64 => |x| x,
                 .i64 => |x| std.math.cast(u64, x),
-                .lazy_align => |ty| if (opt_sema) |sema|
-                    (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits() orelse 0
-                else
-                    Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0,
-                .lazy_size => |ty| if (opt_sema) |sema|
-                    (try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar
-                else
-                    Type.fromInterned(ty).abiSize(mod),
+                .lazy_align => |ty| (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, strat.toLazy())).scalar.toByteUnits() orelse 0,
+                .lazy_size => |ty| (try Type.fromInterned(ty).abiSizeAdvanced(mod, strat.toLazy())).scalar,
             },
             .ptr => |ptr| switch (ptr.base_addr) {
                 .int => ptr.byte_offset,
                 .field => |field| {
-                    const base_addr = (try Value.fromInterned(field.base).getUnsignedIntAdvanced(mod, opt_sema)) orelse return null;
+                    const base_addr = (try Value.fromInterned(field.base).getUnsignedIntAdvanced(mod, strat)) orelse return null;
                     const struct_ty = Value.fromInterned(field.base).typeOf(mod).childType(mod);
-                    if (opt_sema) |sema| try sema.resolveTypeLayout(struct_ty);
+                    if (strat == .sema) try struct_ty.resolveLayout(mod);
                     return base_addr + struct_ty.structFieldOffset(@intCast(field.index), mod) + ptr.byte_offset;
                 },
                 else => null,
             },
             .opt => |opt| switch (opt.val) {
                 .none => 0,
-                else => |payload| Value.fromInterned(payload).getUnsignedIntAdvanced(mod, opt_sema),
+                else => |payload| Value.fromInterned(payload).getUnsignedIntAdvanced(mod, strat),
             },
             else => null,
         },
@@ -273,13 +269,13 @@ pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64
 }
 
 /// Asserts the value is an integer and it fits in a u64
-pub fn toUnsignedInt(val: Value, mod: *Module) u64 {
-    return getUnsignedInt(val, mod).?;
+pub fn toUnsignedInt(val: Value, zcu: *Zcu) u64 {
+    return getUnsignedInt(val, zcu).?;
 }
 
 /// Asserts the value is an integer and it fits in a u64
-pub fn toUnsignedIntAdvanced(val: Value, sema: *Sema) !u64 {
-    return (try getUnsignedIntAdvanced(val, sema.mod, sema)).?;
+pub fn toUnsignedIntSema(val: Value, zcu: *Zcu) !u64 {
+    return (try getUnsignedIntAdvanced(val, zcu, .sema)).?;
 }
 
 /// Asserts the value is an integer and it fits in a i64
@@ -1028,13 +1024,13 @@ pub fn floatHasFraction(self: Value, mod: *const Module) bool {
 }
 
 pub fn orderAgainstZero(lhs: Value, mod: *Module) std.math.Order {
-    return orderAgainstZeroAdvanced(lhs, mod, null) catch unreachable;
+    return orderAgainstZeroAdvanced(lhs, mod, .normal) catch unreachable;
 }
 
 pub fn orderAgainstZeroAdvanced(
     lhs: Value,
     mod: *Module,
-    opt_sema: ?*Sema,
+    strat: ResolveStrat,
 ) Module.CompileError!std.math.Order {
     return switch (lhs.toIntern()) {
         .bool_false => .eq,
@@ -1052,13 +1048,13 @@ pub fn orderAgainstZeroAdvanced(
                 .lazy_size => |ty| return if (Type.fromInterned(ty).hasRuntimeBitsAdvanced(
                     mod,
                     false,
-                    if (opt_sema) |sema| .{ .sema = sema } else .eager,
+                    strat.toLazy(),
                 ) catch |err| switch (err) {
                     error.NeedLazy => unreachable,
                     else => |e| return e,
                 }) .gt else .eq,
             },
-            .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).orderAgainstZeroAdvanced(mod, opt_sema),
+            .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).orderAgainstZeroAdvanced(mod, strat),
             .float => |float| switch (float.storage) {
                 inline else => |x| std.math.order(x, 0),
             },
@@ -1069,14 +1065,13 @@ pub fn orderAgainstZeroAdvanced(
 
 /// Asserts the value is comparable.
 pub fn order(lhs: Value, rhs: Value, mod: *Module) std.math.Order {
-    return orderAdvanced(lhs, rhs, mod, null) catch unreachable;
+    return orderAdvanced(lhs, rhs, mod, .normal) catch unreachable;
 }
 
 /// Asserts the value is comparable.
-/// If opt_sema is null then this function asserts things are resolved and cannot fail.
-pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, opt_sema: ?*Sema) !std.math.Order {
-    const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, opt_sema);
-    const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, opt_sema);
+pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, strat: ResolveStrat) !std.math.Order {
+    const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, strat);
+    const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, strat);
     switch (lhs_against_zero) {
         .lt => if (rhs_against_zero != .lt) return .lt,
         .eq => return rhs_against_zero.invert(),
@@ -1096,15 +1091,15 @@ pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, opt_sema: ?*Sema) !st
 
     var lhs_bigint_space: BigIntSpace = undefined;
     var rhs_bigint_space: BigIntSpace = undefined;
-    const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, mod, opt_sema);
-    const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, mod, opt_sema);
+    const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, mod, strat);
+    const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, mod, strat);
     return lhs_bigint.order(rhs_bigint);
 }
 
 /// Asserts the value is comparable. Does not take a type parameter because it supports
 /// comparisons between heterogeneous types.
 pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, mod: *Module) bool {
-    return compareHeteroAdvanced(lhs, op, rhs, mod, null) catch unreachable;
+    return compareHeteroAdvanced(lhs, op, rhs, mod, .normal) catch unreachable;
 }
 
 pub fn compareHeteroAdvanced(
@@ -1112,7 +1107,7 @@ pub fn compareHeteroAdvanced(
     op: std.math.CompareOperator,
     rhs: Value,
     mod: *Module,
-    opt_sema: ?*Sema,
+    strat: ResolveStrat,
 ) !bool {
     if (lhs.pointerDecl(mod)) |lhs_decl| {
         if (rhs.pointerDecl(mod)) |rhs_decl| {
@@ -1135,7 +1130,7 @@ pub fn compareHeteroAdvanced(
             else => {},
         }
     }
-    return (try orderAdvanced(lhs, rhs, mod, opt_sema)).compare(op);
+    return (try orderAdvanced(lhs, rhs, mod, strat)).compare(op);
 }
 
 /// Asserts the values are comparable. Both operands have type `ty`.
@@ -1176,22 +1171,22 @@ pub fn compareScalar(
 ///
 /// Note that `!compareAllWithZero(.eq, ...) != compareAllWithZero(.neq, ...)`
 pub fn compareAllWithZero(lhs: Value, op: std.math.CompareOperator, mod: *Module) bool {
-    return compareAllWithZeroAdvancedExtra(lhs, op, mod, null) catch unreachable;
+    return compareAllWithZeroAdvancedExtra(lhs, op, mod, .normal) catch unreachable;
 }
 
-pub fn compareAllWithZeroAdvanced(
+pub fn compareAllWithZeroSema(
     lhs: Value,
     op: std.math.CompareOperator,
-    sema: *Sema,
+    zcu: *Zcu,
 ) Module.CompileError!bool {
-    return compareAllWithZeroAdvancedExtra(lhs, op, sema.mod, sema);
+    return compareAllWithZeroAdvancedExtra(lhs, op, zcu, .sema);
 }
 
 pub fn compareAllWithZeroAdvancedExtra(
     lhs: Value,
     op: std.math.CompareOperator,
     mod: *Module,
-    opt_sema: ?*Sema,
+    strat: ResolveStrat,
 ) Module.CompileError!bool {
     if (lhs.isInf(mod)) {
         switch (op) {
@@ -1211,14 +1206,14 @@ pub fn compareAllWithZeroAdvancedExtra(
                 if (!std.math.order(byte, 0).compare(op)) break false;
             } else true,
             .elems => |elems| for (elems) |elem| {
-                if (!try Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, opt_sema)) break false;
+                if (!try Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, strat)) break false;
             } else true,
-            .repeated_elem => |elem| Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, opt_sema),
+            .repeated_elem => |elem| Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, strat),
         },
         .undef => return false,
         else => {},
     }
-    return (try orderAgainstZeroAdvanced(lhs, mod, opt_sema)).compare(op);
+    return (try orderAgainstZeroAdvanced(lhs, mod, strat)).compare(op);
 }
 
 pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool {
@@ -1279,9 +1274,9 @@ pub fn slicePtr(val: Value, mod: *Module) Value {
 }
 
 /// Gets the `len` field of a slice value as a `u64`.
-/// Resolves the length using the provided `Sema` if necessary.
-pub fn sliceLen(val: Value, sema: *Sema) !u64 {
-    return Value.fromInterned(sema.mod.intern_pool.sliceLen(val.toIntern())).toUnsignedIntAdvanced(sema);
+/// Resolves the length using `Sema` if necessary.
+pub fn sliceLen(val: Value, zcu: *Zcu) !u64 {
+    return Value.fromInterned(zcu.intern_pool.sliceLen(val.toIntern())).toUnsignedIntSema(zcu);
 }
 
 /// Asserts the value is an aggregate, and returns the element value at the given index.
@@ -1482,29 +1477,29 @@ pub fn isFloat(self: Value, mod: *const Module) bool {
 }
 
 pub fn floatFromInt(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module) !Value {
-    return floatFromIntAdvanced(val, arena, int_ty, float_ty, mod, null) catch |err| switch (err) {
+    return floatFromIntAdvanced(val, arena, int_ty, float_ty, mod, .normal) catch |err| switch (err) {
         error.OutOfMemory => return error.OutOfMemory,
         else => unreachable,
     };
 }
 
-pub fn floatFromIntAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value {
+pub fn floatFromIntAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, strat: ResolveStrat) !Value {
     if (int_ty.zigTypeTag(mod) == .Vector) {
         const result_data = try arena.alloc(InternPool.Index, int_ty.vectorLen(mod));
         const scalar_ty = float_ty.scalarType(mod);
         for (result_data, 0..) |*scalar, i| {
             const elem_val = try val.elemValue(mod, i);
-            scalar.* = (try floatFromIntScalar(elem_val, scalar_ty, mod, opt_sema)).toIntern();
+            scalar.* = (try floatFromIntScalar(elem_val, scalar_ty, mod, strat)).toIntern();
         }
         return Value.fromInterned((try mod.intern(.{ .aggregate = .{
             .ty = float_ty.toIntern(),
             .storage = .{ .elems = result_data },
         } })));
     }
-    return floatFromIntScalar(val, float_ty, mod, opt_sema);
+    return floatFromIntScalar(val, float_ty, mod, strat);
 }
 
-pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value {
+pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, strat: ResolveStrat) !Value {
     return switch (mod.intern_pool.indexToKey(val.toIntern())) {
         .undef => try mod.undefValue(float_ty),
         .int => |int| switch (int.storage) {
@@ -1513,16 +1508,8 @@ pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*
                 return mod.floatValue(float_ty, float);
             },
             inline .u64, .i64 => |x| floatFromIntInner(x, float_ty, mod),
-            .lazy_align => |ty| if (opt_sema) |sema| {
-                return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits() orelse 0, float_ty, mod);
-            } else {
-                return floatFromIntInner(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0, float_ty, mod);
-            },
-            .lazy_size => |ty| if (opt_sema) |sema| {
-                return floatFromIntInner((try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod);
-            } else {
-                return floatFromIntInner(Type.fromInterned(ty).abiSize(mod), float_ty, mod);
-            },
+            .lazy_align => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(mod, strat.toLazy())).scalar.toByteUnits() orelse 0, float_ty, mod),
+            .lazy_size => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiSizeAdvanced(mod, strat.toLazy())).scalar, float_ty, mod),
         },
         else => unreachable,
     };
@@ -3616,17 +3603,15 @@ pub const RuntimeIndex = InternPool.RuntimeIndex;
 
 /// `parent_ptr` must be a single-pointer to some optional.
 /// Returns a pointer to the payload of the optional.
-/// This takes a `Sema` because it may need to perform type resolution.
-pub fn ptrOptPayload(parent_ptr: Value, sema: *Sema) !Value {
-    const zcu = sema.mod;
-
+/// May perform type resolution.
+pub fn ptrOptPayload(parent_ptr: Value, zcu: *Zcu) !Value {
     const parent_ptr_ty = parent_ptr.typeOf(zcu);
     const opt_ty = parent_ptr_ty.childType(zcu);
 
     assert(parent_ptr_ty.ptrSize(zcu) == .One);
     assert(opt_ty.zigTypeTag(zcu) == .Optional);
 
-    const result_ty = try sema.ptrType(info: {
+    const result_ty = try zcu.ptrTypeSema(info: {
         var new = parent_ptr_ty.ptrInfo(zcu);
         // We can correctly preserve alignment `.none`, since an optional has the same
         // natural alignment as its child type.
@@ -3651,17 +3636,15 @@ pub fn ptrOptPayload(parent_ptr: Value, sema: *Sema) !Value {
 
 /// `parent_ptr` must be a single-pointer to some error union.
 /// Returns a pointer to the payload of the error union.
-/// This takes a `Sema` because it may need to perform type resolution.
-pub fn ptrEuPayload(parent_ptr: Value, sema: *Sema) !Value {
-    const zcu = sema.mod;
-
+/// May perform type resolution.
+pub fn ptrEuPayload(parent_ptr: Value, zcu: *Zcu) !Value {
     const parent_ptr_ty = parent_ptr.typeOf(zcu);
     const eu_ty = parent_ptr_ty.childType(zcu);
 
     assert(parent_ptr_ty.ptrSize(zcu) == .One);
     assert(eu_ty.zigTypeTag(zcu) == .ErrorUnion);
 
-    const result_ty = try sema.ptrType(info: {
+    const result_ty = try zcu.ptrTypeSema(info: {
         var new = parent_ptr_ty.ptrInfo(zcu);
         // We can correctly preserve alignment `.none`, since an error union has a
         // natural alignment greater than or equal to that of its payload type.
@@ -3682,10 +3665,8 @@ pub fn ptrEuPayload(parent_ptr: Value, sema: *Sema) !Value {
 /// `parent_ptr` must be a single-pointer to a struct, union, or slice.
 /// Returns a pointer to the aggregate field at the specified index.
 /// For slices, uses `slice_ptr_index` and `slice_len_index`.
-/// This takes a `Sema` because it may need to perform type resolution.
-pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value {
-    const zcu = sema.mod;
-
+/// May perform type resolution.
+pub fn ptrField(parent_ptr: Value, field_idx: u32, zcu: *Zcu) !Value {
     const parent_ptr_ty = parent_ptr.typeOf(zcu);
     const aggregate_ty = parent_ptr_ty.childType(zcu);
 
@@ -3698,17 +3679,17 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value {
         .Struct => field: {
             const field_ty = aggregate_ty.structFieldType(field_idx, zcu);
             switch (aggregate_ty.containerLayout(zcu)) {
-                .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, sema) },
+                .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, .sema) },
                 .@"extern" => {
                     // Well-defined layout, so just offset the pointer appropriately.
                     const byte_off = aggregate_ty.structFieldOffset(field_idx, zcu);
                     const field_align = a: {
                         const parent_align = if (parent_ptr_info.flags.alignment == .none) pa: {
-                            break :pa try sema.typeAbiAlignment(aggregate_ty);
+                            break :pa (try aggregate_ty.abiAlignmentAdvanced(zcu, .sema)).scalar;
                         } else parent_ptr_info.flags.alignment;
                         break :a InternPool.Alignment.fromLog2Units(@min(parent_align.toLog2Units(), @ctz(byte_off)));
                     };
-                    const result_ty = try sema.ptrType(info: {
+                    const result_ty = try zcu.ptrTypeSema(info: {
                         var new = parent_ptr_info;
                         new.child = field_ty.toIntern();
                         new.flags.alignment = field_align;
@@ -3723,14 +3704,14 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value {
                             new.packed_offset = packed_offset;
                             new.child = field_ty.toIntern();
                             if (new.flags.alignment == .none) {
-                                new.flags.alignment = try sema.typeAbiAlignment(aggregate_ty);
+                                new.flags.alignment = (try aggregate_ty.abiAlignmentAdvanced(zcu, .sema)).scalar;
                             }
                             break :info new;
                         });
                         return zcu.getCoerced(parent_ptr, result_ty);
                     },
                     .byte_ptr => |ptr_info| {
-                        const result_ty = try sema.ptrType(info: {
+                        const result_ty = try zcu.ptrTypeSema(info: {
                             var new = parent_ptr_info;
                             new.child = field_ty.toIntern();
                             new.packed_offset = .{
@@ -3749,10 +3730,10 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value {
             const union_obj = zcu.typeToUnion(aggregate_ty).?;
             const field_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[field_idx]);
             switch (aggregate_ty.containerLayout(zcu)) {
-                .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, sema) },
+                .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, .sema) },
                 .@"extern" => {
                     // Point to the same address.
-                    const result_ty = try sema.ptrType(info: {
+                    const result_ty = try zcu.ptrTypeSema(info: {
                         var new = parent_ptr_info;
                         new.child = field_ty.toIntern();
                         break :info new;
@@ -3762,28 +3743,28 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value {
                 .@"packed" => {
                     // If the field has an ABI size matching its bit size, then we can continue to use a
                     // non-bit pointer if the parent pointer is also a non-bit pointer.
-                    if (parent_ptr_info.packed_offset.host_size == 0 and try sema.typeAbiSize(field_ty) * 8 == try field_ty.bitSizeAdvanced(zcu, sema)) {
+                    if (parent_ptr_info.packed_offset.host_size == 0 and (try field_ty.abiSizeAdvanced(zcu, .sema)).scalar * 8 == try field_ty.bitSizeAdvanced(zcu, .sema)) {
                         // We must offset the pointer on big-endian targets, since the bits of packed memory don't align nicely.
                         const byte_offset = switch (zcu.getTarget().cpu.arch.endian()) {
                             .little => 0,
-                            .big => try sema.typeAbiSize(aggregate_ty) - try sema.typeAbiSize(field_ty),
+                            .big => (try aggregate_ty.abiSizeAdvanced(zcu, .sema)).scalar - (try field_ty.abiSizeAdvanced(zcu, .sema)).scalar,
                         };
-                        const result_ty = try sema.ptrType(info: {
+                        const result_ty = try zcu.ptrTypeSema(info: {
                             var new = parent_ptr_info;
                             new.child = field_ty.toIntern();
                             new.flags.alignment = InternPool.Alignment.fromLog2Units(
-                                @ctz(byte_offset | (try parent_ptr_ty.ptrAlignmentAdvanced(zcu, sema)).toByteUnits().?),
+                                @ctz(byte_offset | (try parent_ptr_ty.ptrAlignmentAdvanced(zcu, .sema)).toByteUnits().?),
                             );
                             break :info new;
                         });
                         return parent_ptr.getOffsetPtr(byte_offset, result_ty, zcu);
                     } else {
                         // The result must be a bit-pointer if it is not already.
-                        const result_ty = try sema.ptrType(info: {
+                        const result_ty = try zcu.ptrTypeSema(info: {
                             var new = parent_ptr_info;
                             new.child = field_ty.toIntern();
                             if (new.packed_offset.host_size == 0) {
-                                new.packed_offset.host_size = @intCast(((try aggregate_ty.bitSizeAdvanced(zcu, sema)) + 7) / 8);
+                                new.packed_offset.host_size = @intCast(((try aggregate_ty.bitSizeAdvanced(zcu, .sema)) + 7) / 8);
                                 assert(new.packed_offset.bit_offset == 0);
                             }
                             break :info new;
@@ -3805,14 +3786,14 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value {
     };
 
     const new_align: InternPool.Alignment = if (parent_ptr_info.flags.alignment != .none) a: {
-        const ty_align = try sema.typeAbiAlignment(field_ty);
+        const ty_align = (try field_ty.abiAlignmentAdvanced(zcu, .sema)).scalar;
         const true_field_align = if (field_align == .none) ty_align else field_align;
         const new_align = true_field_align.min(parent_ptr_info.flags.alignment);
         if (new_align == ty_align) break :a .none;
         break :a new_align;
     } else field_align;
 
-    const result_ty = try sema.ptrType(info: {
+    const result_ty = try zcu.ptrTypeSema(info: {
         var new = parent_ptr_info;
         new.child = field_ty.toIntern();
         new.flags.alignment = new_align;
@@ -3834,10 +3815,8 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value {
 
 /// `orig_parent_ptr` must be either a single-pointer to an array or vector, or a many-pointer or C-pointer or slice.
 /// Returns a pointer to the element at the specified index.
-/// This takes a `Sema` because it may need to perform type resolution.
-pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, sema: *Sema) !Value {
-    const zcu = sema.mod;
-
+/// May perform type resolution.
+pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, zcu: *Zcu) !Value {
     const parent_ptr = switch (orig_parent_ptr.typeOf(zcu).ptrSize(zcu)) {
         .One, .Many, .C => orig_parent_ptr,
         .Slice => orig_parent_ptr.slicePtr(zcu),
@@ -3845,7 +3824,7 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, sema: *Sema) !Value {
 
     const parent_ptr_ty = parent_ptr.typeOf(zcu);
     const elem_ty = parent_ptr_ty.childType(zcu);
-    const result_ty = try sema.elemPtrType(parent_ptr_ty, @intCast(field_idx));
+    const result_ty = try parent_ptr_ty.elemPtrType(@intCast(field_idx), zcu);
 
     if (parent_ptr.isUndef(zcu)) return zcu.undefValue(result_ty);
 
@@ -3862,21 +3841,21 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, sema: *Sema) !Value {
 
     const strat: PtrStrat = switch (parent_ptr_ty.ptrSize(zcu)) {
         .One => switch (elem_ty.zigTypeTag(zcu)) {
-            .Vector => .{ .offset = field_idx * @divExact(try elem_ty.childType(zcu).bitSizeAdvanced(zcu, sema), 8) },
+            .Vector => .{ .offset = field_idx * @divExact(try elem_ty.childType(zcu).bitSizeAdvanced(zcu, .sema), 8) },
             .Array => strat: {
                 const arr_elem_ty = elem_ty.childType(zcu);
-                if (try sema.typeRequiresComptime(arr_elem_ty)) {
+                if (try arr_elem_ty.comptimeOnlyAdvanced(zcu, .sema)) {
                     break :strat .{ .elem_ptr = arr_elem_ty };
                 }
-                break :strat .{ .offset = field_idx * try sema.typeAbiSize(arr_elem_ty) };
+                break :strat .{ .offset = field_idx * (try arr_elem_ty.abiSizeAdvanced(zcu, .sema)).scalar };
             },
             else => unreachable,
         },
 
-        .Many, .C => if (try sema.typeRequiresComptime(elem_ty))
+        .Many, .C => if (try elem_ty.comptimeOnlyAdvanced(zcu, .sema))
             .{ .elem_ptr = elem_ty }
         else
-            .{ .offset = field_idx * try sema.typeAbiSize(elem_ty) },
+            .{ .offset = field_idx * (try elem_ty.abiSizeAdvanced(zcu, .sema)).scalar },
 
         .Slice => unreachable,
     };
@@ -4014,11 +3993,7 @@ pub const PointerDeriveStep = union(enum) {
 pub fn pointerDerivation(ptr_val: Value, arena: Allocator, zcu: *Zcu) Allocator.Error!PointerDeriveStep {
     return ptr_val.pointerDerivationAdvanced(arena, zcu, null) catch |err| switch (err) {
         error.OutOfMemory => |e| return e,
-        error.AnalysisFail,
-        error.GenericPoison,
-        error.ComptimeReturn,
-        error.ComptimeBreak,
-        => unreachable,
+        error.AnalysisFail => unreachable,
     };
 }
 
@@ -4087,8 +4062,8 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op
             const base_ptr_ty = base_ptr.typeOf(zcu);
             const agg_ty = base_ptr_ty.childType(zcu);
             const field_ty, const field_align = switch (agg_ty.zigTypeTag(zcu)) {
-                .Struct => .{ agg_ty.structFieldType(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, opt_sema) },
-                .Union => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, opt_sema) },
+                .Struct => .{ agg_ty.structFieldType(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, .sema) },
+                .Union => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, .sema) },
                 .Pointer => .{ switch (field.index) {
                     Value.slice_ptr_index => agg_ty.slicePtrFieldType(zcu),
                     Value.slice_len_index => Type.usize,
@@ -4269,3 +4244,118 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op
         .new_ptr_ty = Type.fromInterned(ptr.ty),
     } };
 }
+
+pub fn resolveLazy(val: Value, arena: Allocator, zcu: *Zcu) Zcu.SemaError!Value {
+    switch (zcu.intern_pool.indexToKey(val.toIntern())) {
+        .int => |int| switch (int.storage) {
+            .u64, .i64, .big_int => return val,
+            .lazy_align, .lazy_size => return zcu.intValue(
+                Type.fromInterned(int.ty),
+                (try val.getUnsignedIntAdvanced(zcu, .sema)).?,
+            ),
+        },
+        .slice => |slice| {
+            const ptr = try Value.fromInterned(slice.ptr).resolveLazy(arena, zcu);
+            const len = try Value.fromInterned(slice.len).resolveLazy(arena, zcu);
+            if (ptr.toIntern() == slice.ptr and len.toIntern() == slice.len) return val;
+            return Value.fromInterned(try zcu.intern(.{ .slice = .{
+                .ty = slice.ty,
+                .ptr = ptr.toIntern(),
+                .len = len.toIntern(),
+            } }));
+        },
+        .ptr => |ptr| {
+            switch (ptr.base_addr) {
+                .decl, .comptime_alloc, .anon_decl, .int => return val,
+                .comptime_field => |field_val| {
+                    const resolved_field_val = (try Value.fromInterned(field_val).resolveLazy(arena, zcu)).toIntern();
+                    return if (resolved_field_val == field_val)
+                        val
+                    else
+                        Value.fromInterned((try zcu.intern(.{ .ptr = .{
+                            .ty = ptr.ty,
+                            .base_addr = .{ .comptime_field = resolved_field_val },
+                            .byte_offset = ptr.byte_offset,
+                        } })));
+                },
+                .eu_payload, .opt_payload => |base| {
+                    const resolved_base = (try Value.fromInterned(base).resolveLazy(arena, zcu)).toIntern();
+                    return if (resolved_base == base)
+                        val
+                    else
+                        Value.fromInterned((try zcu.intern(.{ .ptr = .{
+                            .ty = ptr.ty,
+                            .base_addr = switch (ptr.base_addr) {
+                                .eu_payload => .{ .eu_payload = resolved_base },
+                                .opt_payload => .{ .opt_payload = resolved_base },
+                                else => unreachable,
+                            },
+                            .byte_offset = ptr.byte_offset,
+                        } })));
+                },
+                .arr_elem, .field => |base_index| {
+                    const resolved_base = (try Value.fromInterned(base_index.base).resolveLazy(arena, zcu)).toIntern();
+                    return if (resolved_base == base_index.base)
+                        val
+                    else
+                        Value.fromInterned((try zcu.intern(.{ .ptr = .{
+                            .ty = ptr.ty,
+                            .base_addr = switch (ptr.base_addr) {
+                                .arr_elem => .{ .arr_elem = .{
+                                    .base = resolved_base,
+                                    .index = base_index.index,
+                                } },
+                                .field => .{ .field = .{
+                                    .base = resolved_base,
+                                    .index = base_index.index,
+                                } },
+                                else => unreachable,
+                            },
+                            .byte_offset = ptr.byte_offset,
+                        } })));
+                },
+            }
+        },
+        .aggregate => |aggregate| switch (aggregate.storage) {
+            .bytes => return val,
+            .elems => |elems| {
+                var resolved_elems: []InternPool.Index = &.{};
+                for (elems, 0..) |elem, i| {
+                    const resolved_elem = (try Value.fromInterned(elem).resolveLazy(arena, zcu)).toIntern();
+                    if (resolved_elems.len == 0 and resolved_elem != elem) {
+                        resolved_elems = try arena.alloc(InternPool.Index, elems.len);
+                        @memcpy(resolved_elems[0..i], elems[0..i]);
+                    }
+                    if (resolved_elems.len > 0) resolved_elems[i] = resolved_elem;
+                }
+                return if (resolved_elems.len == 0) val else Value.fromInterned((try zcu.intern(.{ .aggregate = .{
+                    .ty = aggregate.ty,
+                    .storage = .{ .elems = resolved_elems },
+                } })));
+            },
+            .repeated_elem => |elem| {
+                const resolved_elem = (try Value.fromInterned(elem).resolveLazy(arena, zcu)).toIntern();
+                return if (resolved_elem == elem) val else Value.fromInterned((try zcu.intern(.{ .aggregate = .{
+                    .ty = aggregate.ty,
+                    .storage = .{ .repeated_elem = resolved_elem },
+                } })));
+            },
+        },
+        .un => |un| {
+            const resolved_tag = if (un.tag == .none)
+                .none
+            else
+                (try Value.fromInterned(un.tag).resolveLazy(arena, zcu)).toIntern();
+            const resolved_val = (try Value.fromInterned(un.val).resolveLazy(arena, zcu)).toIntern();
+            return if (resolved_tag == un.tag and resolved_val == un.val)
+                val
+            else
+                Value.fromInterned((try zcu.intern(.{ .un = .{
+                    .ty = un.ty,
+                    .tag = resolved_tag,
+                    .val = resolved_val,
+                } })));
+        },
+        else => return val,
+    }
+}
src/Zcu.zig
@@ -3593,7 +3593,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In
         },
         error.OutOfMemory => return error.OutOfMemory,
     };
-    defer air.deinit(gpa);
+    errdefer air.deinit(gpa);
 
     const invalidate_ies_deps = i: {
         if (!was_outdated) break :i false;
@@ -3615,13 +3615,36 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In
     const dump_llvm_ir = build_options.enable_debug_extensions and (comp.verbose_llvm_ir != null or comp.verbose_llvm_bc != null);
 
     if (comp.bin_file == null and zcu.llvm_object == null and !dump_air and !dump_llvm_ir) {
+        air.deinit(gpa);
         return;
     }
 
+    try comp.work_queue.writeItem(.{ .codegen_func = .{
+        .func = func_index,
+        .air = air,
+    } });
+}
+
+/// Takes ownership of `air`, even on error.
+/// If any types referenced by `air` are unresolved, marks the codegen as failed.
+pub fn linkerUpdateFunc(zcu: *Zcu, func_index: InternPool.Index, air: Air) Allocator.Error!void {
+    const gpa = zcu.gpa;
+    const ip = &zcu.intern_pool;
+    const comp = zcu.comp;
+
+    defer {
+        var air_mut = air;
+        air_mut.deinit(gpa);
+    }
+
+    const func = zcu.funcInfo(func_index);
+    const decl_index = func.owner_decl;
+    const decl = zcu.declPtr(decl_index);
+
     var liveness = try Liveness.analyze(gpa, air, ip);
     defer liveness.deinit(gpa);
 
-    if (dump_air) {
+    if (build_options.enable_debug_extensions and comp.verbose_air) {
         const fqn = try decl.fullyQualifiedName(zcu);
         std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(ip)});
         @import("print_air.zig").dump(zcu, air, liveness);
@@ -3629,7 +3652,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In
     }
 
     if (std.debug.runtime_safety) {
-        var verify = Liveness.Verify{
+        var verify: Liveness.Verify = .{
             .gpa = gpa,
             .air = air,
             .liveness = liveness,
@@ -3642,7 +3665,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In
             else => {
                 try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1);
                 zcu.failed_analysis.putAssumeCapacityNoClobber(
-                    AnalUnit.wrap(.{ .decl = decl_index }),
+                    AnalUnit.wrap(.{ .func = func_index }),
                     try Module.ErrorMsg.create(
                         gpa,
                         decl.navSrcLoc(zcu),
@@ -3659,7 +3682,13 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In
     const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(ip), 0);
     defer codegen_prog_node.end();
 
-    if (comp.bin_file) |lf| {
+    if (!air.typesFullyResolved(zcu)) {
+        // A type we depend on failed to resolve. This is a transitive failure.
+        // Correcting this failure will involve changing a type this function
+        // depends on, hence triggering re-analysis of this function, so this
+        // interacts correctly with incremental compilation.
+        func.analysis(ip).state = .codegen_failure;
+    } else if (comp.bin_file) |lf| {
         lf.updateFunc(zcu, func_index, air, liveness) catch |err| switch (err) {
             error.OutOfMemory => return error.OutOfMemory,
             error.AnalysisFail => {
@@ -3667,7 +3696,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In
             },
             else => {
                 try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1);
-                zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try Module.ErrorMsg.create(
+                zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .func = func_index }), try Module.ErrorMsg.create(
                     gpa,
                     decl.navSrcLoc(zcu),
                     "unable to codegen: {s}",
@@ -3735,7 +3764,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index)
 
     // Decl itself is safely analyzed, and body analysis is not yet queued
 
-    try mod.comp.work_queue.writeItem(.{ .codegen_func = func_index });
+    try mod.comp.work_queue.writeItem(.{ .analyze_func = func_index });
     if (mod.emit_h != null) {
         // TODO: we ideally only want to do this if the function's type changed
         // since the last update
@@ -3812,7 +3841,7 @@ fn getFileRootStruct(zcu: *Zcu, decl_index: Decl.Index, namespace_index: Namespa
     decl.analysis = .complete;
 
     try zcu.scanNamespace(namespace_index, decls, decl);
-
+    try zcu.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index });
     return wip_ty.finish(ip, decl_index, namespace_index.toOptional());
 }
 
@@ -4103,7 +4132,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult {
     // Note this resolves the type of the Decl, not the value; if this Decl
     // is a struct, for example, this resolves `type` (which needs no resolution),
     // not the struct itself.
-    try sema.resolveTypeLayout(decl_ty);
+    try decl_ty.resolveLayout(mod);
 
     if (decl.kind == .@"usingnamespace") {
         if (!decl_ty.eql(Type.type, mod)) {
@@ -4220,7 +4249,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult {
     if (has_runtime_bits) {
         // Needed for codegen_decl which will call updateDecl and then the
         // codegen backend wants full access to the Decl Type.
-        try sema.resolveTypeFully(decl_ty);
+        try decl_ty.resolveFully(mod);
 
         try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl_index });
 
@@ -5212,23 +5241,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
         else => |e| return e,
     };
 
-    // Similarly, resolve any queued up types that were requested to be resolved for
-    // the backends.
-    for (sema.types_to_resolve.keys()) |ty| {
-        sema.resolveTypeFully(Type.fromInterned(ty)) catch |err| switch (err) {
-            error.GenericPoison => unreachable,
-            error.ComptimeReturn => unreachable,
-            error.ComptimeBreak => unreachable,
-            error.AnalysisFail => {
-                // In this case our function depends on a type that had a compile error.
-                // We should not try to lower this function.
-                decl.analysis = .dependency_failure;
-                return error.AnalysisFail;
-            },
-            else => |e| return e,
-        };
-    }
-
     try sema.flushExports();
 
     return .{
@@ -5793,6 +5805,16 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type
     return Type.fromInterned((try intern(mod, .{ .ptr_type = canon_info })));
 }
 
+/// Like `ptrType`, but if `info` specifies an `alignment`, first ensures the pointer
+/// child type's alignment is resolved so that an invalid alignment is not used.
+/// In general, prefer this function during semantic analysis.
+pub fn ptrTypeSema(zcu: *Zcu, info: InternPool.Key.PtrType) SemaError!Type {
+    if (info.flags.alignment != .none) {
+        _ = try Type.fromInterned(info.child).abiAlignmentAdvanced(zcu, .sema);
+    }
+    return zcu.ptrType(info);
+}
+
 pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type {
     return ptrType(mod, .{ .child = child_type.toIntern() });
 }
@@ -6368,15 +6390,21 @@ pub fn unionAbiAlignment(mod: *Module, loaded_union: InternPool.LoadedUnionType)
     return max_align;
 }
 
-/// Returns the field alignment, assuming the union is not packed.
-/// Keep implementation in sync with `Sema.unionFieldAlignment`.
-/// Prefer to call that function instead of this one during Sema.
-pub fn unionFieldNormalAlignment(mod: *Module, loaded_union: InternPool.LoadedUnionType, field_index: u32) Alignment {
-    const ip = &mod.intern_pool;
+/// Returns the field alignment of a non-packed union. Asserts the layout is not packed.
+pub fn unionFieldNormalAlignment(zcu: *Zcu, loaded_union: InternPool.LoadedUnionType, field_index: u32) Alignment {
+    return zcu.unionFieldNormalAlignmentAdvanced(loaded_union, field_index, .normal) catch unreachable;
+}
+
+/// Returns the field alignment of a non-packed union. Asserts the layout is not packed.
+/// If `strat` is `.sema`, may perform type resolution.
+pub fn unionFieldNormalAlignmentAdvanced(zcu: *Zcu, loaded_union: InternPool.LoadedUnionType, field_index: u32, strat: Type.ResolveStrat) SemaError!Alignment {
+    const ip = &zcu.intern_pool;
+    assert(loaded_union.flagsPtr(ip).layout != .@"packed");
     const field_align = loaded_union.fieldAlign(ip, field_index);
     if (field_align != .none) return field_align;
     const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
-    return field_ty.abiAlignment(mod);
+    if (field_ty.isNoReturn(zcu)) return .none;
+    return (try field_ty.abiAlignmentAdvanced(zcu, strat.toLazy())).scalar;
 }
 
 /// Returns the index of the active field, given the current tag value
@@ -6387,41 +6415,37 @@ pub fn unionTagFieldIndex(mod: *Module, loaded_union: InternPool.LoadedUnionType
     return loaded_union.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern());
 }
 
-/// Returns the field alignment of a non-packed struct in byte units.
-/// Keep implementation in sync with `Sema.structFieldAlignment`.
-/// asserts the layout is not packed.
+/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed.
 pub fn structFieldAlignment(
-    mod: *Module,
+    zcu: *Zcu,
     explicit_alignment: InternPool.Alignment,
     field_ty: Type,
     layout: std.builtin.Type.ContainerLayout,
 ) Alignment {
+    return zcu.structFieldAlignmentAdvanced(explicit_alignment, field_ty, layout, .normal) catch unreachable;
+}
+
+/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed.
+/// If `strat` is `.sema`, may perform type resolution.
+pub fn structFieldAlignmentAdvanced(
+    zcu: *Zcu,
+    explicit_alignment: InternPool.Alignment,
+    field_ty: Type,
+    layout: std.builtin.Type.ContainerLayout,
+    strat: Type.ResolveStrat,
+) SemaError!Alignment {
     assert(layout != .@"packed");
     if (explicit_alignment != .none) return explicit_alignment;
+    const ty_abi_align = (try field_ty.abiAlignmentAdvanced(zcu, strat.toLazy())).scalar;
     switch (layout) {
         .@"packed" => unreachable,
-        .auto => {
-            if (mod.getTarget().ofmt == .c) {
-                return structFieldAlignmentExtern(mod, field_ty);
-            } else {
-                return field_ty.abiAlignment(mod);
-            }
-        },
-        .@"extern" => return structFieldAlignmentExtern(mod, field_ty),
+        .auto => if (zcu.getTarget().ofmt != .c) return ty_abi_align,
+        .@"extern" => {},
     }
-}
-
-/// Returns the field alignment of an extern struct in byte units.
-/// This logic is duplicated in Type.abiAlignmentAdvanced.
-pub fn structFieldAlignmentExtern(mod: *Module, field_ty: Type) Alignment {
-    const ty_abi_align = field_ty.abiAlignment(mod);
-
-    if (field_ty.isAbiInt(mod) and field_ty.intInfo(mod).bits >= 128) {
-        // The C ABI requires 128 bit integer fields of structs
-        // to be 16-bytes aligned.
-        return ty_abi_align.max(.@"16");
+    // extern
+    if (field_ty.isAbiInt(zcu) and field_ty.intInfo(zcu).bits >= 128) {
+        return ty_abi_align.maxStrict(.@"16");
     }
-
     return ty_abi_align;
 }
 
@@ -6480,3 +6504,29 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, Resolved
 
     return result;
 }
+
+pub fn getBuiltin(zcu: *Zcu, name: []const u8) Allocator.Error!Air.Inst.Ref {
+    const decl_index = try zcu.getBuiltinDecl(name);
+    zcu.ensureDeclAnalyzed(decl_index) catch @panic("std.builtin is corrupt");
+    return Air.internedToRef(zcu.declPtr(decl_index).val.toIntern());
+}
+
+pub fn getBuiltinDecl(zcu: *Zcu, name: []const u8) Allocator.Error!InternPool.DeclIndex {
+    const gpa = zcu.gpa;
+    const ip = &zcu.intern_pool;
+    const std_file = (zcu.importPkg(zcu.std_mod) catch @panic("failed to import lib/std.zig")).file;
+    const std_namespace = zcu.declPtr(std_file.root_decl.unwrap().?).getOwnedInnerNamespace(zcu).?;
+    const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls);
+    const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std.zig is corrupt and missing 'builtin'");
+    zcu.ensureDeclAnalyzed(builtin_decl) catch @panic("std.builtin is corrupt");
+    const builtin_namespace = zcu.declPtr(builtin_decl).getInnerNamespace(zcu) orelse @panic("std.builtin is corrupt");
+    const name_str = try ip.getOrPutString(gpa, name, .no_embedded_nulls);
+    return builtin_namespace.decls.getKeyAdapted(name_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std/builtin.zig is corrupt");
+}
+
+pub fn getBuiltinType(zcu: *Zcu, name: []const u8) Allocator.Error!Type {
+    const ty_inst = try zcu.getBuiltin(name);
+    const ty = Type.fromInterned(ty_inst.toInterned() orelse @panic("std.builtin is corrupt"));
+    ty.resolveFully(zcu) catch @panic("std.builtin is corrupt");
+    return ty;
+}
test/cases/compile_errors/direct_struct_loop.zig
@@ -10,4 +10,3 @@ export fn entry() usize {
 // target=native
 //
 // :1:11: error: struct 'tmp.A' depends on itself
-// :2:5: note: while checking this field
test/cases/compile_errors/indirect_struct_loop.zig
@@ -16,6 +16,3 @@ export fn entry() usize {
 // target=native
 //
 // :1:11: error: struct 'tmp.A' depends on itself
-// :8:5: note: while checking this field
-// :5:5: note: while checking this field
-// :2:5: note: while checking this field
test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_struct_that_contains_itself.zig
@@ -13,4 +13,3 @@ export fn entry() usize {
 // target=native
 //
 // :1:13: error: struct 'tmp.Foo' depends on itself
-// :2:5: note: while checking this field
test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_union_that_contains_itself.zig
@@ -13,4 +13,3 @@ export fn entry() usize {
 // target=native
 //
 // :1:13: error: union 'tmp.Foo' depends on itself
-// :2:5: note: while checking this field
test/cases/compile_errors/invalid_dependency_on_struct_size.zig
@@ -16,4 +16,3 @@ comptime {
 // target=native
 //
 // :6:21: error: struct layout depends on it having runtime bits
-// :4:13: note: while checking this field
test/cases/compile_errors/struct_depends_on_itself_via_optional_field.zig
@@ -15,5 +15,3 @@ export fn entry() void {
 // target=native
 //
 // :1:17: error: struct 'tmp.LhsExpr' depends on itself
-// :5:5: note: while checking this field
-// :2:5: note: while checking this field
test/cases/compile_errors/struct_type_returned_from_non-generic_function.zig
@@ -1,5 +1,5 @@
 pub export fn entry(param: usize) usize {
-    return struct { param };
+    return struct { @TypeOf(param) };
 }
 
 // error
test/src/Cases.zig
@@ -395,10 +395,7 @@ fn addFromDirInner(
         if (entry.kind != .file) continue;
 
         // Ignore stuff such as .swp files
-        switch (Compilation.classifyFileExt(entry.basename)) {
-            .unknown => continue,
-            else => {},
-        }
+        if (!knownFileExtension(entry.basename)) continue;
         try filenames.append(try ctx.arena.dupe(u8, entry.path));
     }
 
@@ -623,8 +620,6 @@ pub fn lowerToBuildSteps(
     b: *std.Build,
     parent_step: *std.Build.Step,
     test_filters: []const []const u8,
-    cases_dir_path: []const u8,
-    incremental_exe: *std.Build.Step.Compile,
 ) void {
     const host = std.zig.system.resolveTargetQuery(.{}) catch |err|
         std.debug.panic("unable to detect native host: {s}\n", .{@errorName(err)});
@@ -637,20 +632,11 @@ pub fn lowerToBuildSteps(
             // compilation is in a happier state.
             continue;
         }
-        for (test_filters) |test_filter| {
-            if (std.mem.indexOf(u8, incr_case.base_path, test_filter)) |_| break;
-        } else if (test_filters.len > 0) continue;
-        const case_base_path_with_dir = std.fs.path.join(b.allocator, &.{
-            cases_dir_path, incr_case.base_path,
-        }) catch @panic("OOM");
-        const run = b.addRunArtifact(incremental_exe);
-        run.setName(incr_case.base_path);
-        run.addArgs(&.{
-            case_base_path_with_dir,
-            b.graph.zig_exe,
-        });
-        run.expectStdOutEqual("");
-        parent_step.dependOn(&run.step);
+        // TODO: the logic for running these was bad, so I've ripped it out. Rewrite this
+        // in a way that actually spawns the compiler, communicating with it over the
+        // compiler server protocol.
+        _ = incr_case;
+        @panic("TODO implement incremental test case executor");
     }
 
     for (self.cases.items) |case| {
@@ -1236,192 +1222,6 @@ const assert = std.debug.assert;
 const Allocator = std.mem.Allocator;
 const getExternalExecutor = std.zig.system.getExternalExecutor;
 
-const Compilation = @import("../../src/Compilation.zig");
-const zig_h = @import("../../src/link.zig").File.C.zig_h;
-const introspect = @import("../../src/introspect.zig");
-const ThreadPool = std.Thread.Pool;
-const WaitGroup = std.Thread.WaitGroup;
-const build_options = @import("build_options");
-const Package = @import("../../src/Package.zig");
-
-pub const std_options = .{
-    .log_level = .err,
-};
-
-var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{
-    .stack_trace_frames = build_options.mem_leak_frames,
-}){};
-
-// TODO: instead of embedding the compiler in this process, spawn the compiler
-// as a sub-process and communicate the updates using the compiler protocol.
-pub fn main() !void {
-    const use_gpa = build_options.force_gpa or !builtin.link_libc;
-    const gpa = gpa: {
-        if (use_gpa) {
-            break :gpa general_purpose_allocator.allocator();
-        }
-        // We would prefer to use raw libc allocator here, but cannot
-        // use it if it won't support the alignment we need.
-        if (@alignOf(std.c.max_align_t) < @alignOf(i128)) {
-            break :gpa std.heap.c_allocator;
-        }
-        break :gpa std.heap.raw_c_allocator;
-    };
-
-    var single_threaded_arena = std.heap.ArenaAllocator.init(gpa);
-    defer single_threaded_arena.deinit();
-
-    var thread_safe_arena: std.heap.ThreadSafeAllocator = .{
-        .child_allocator = single_threaded_arena.allocator(),
-    };
-    const arena = thread_safe_arena.allocator();
-
-    const args = try std.process.argsAlloc(arena);
-    const case_file_path = args[1];
-    const zig_exe_path = args[2];
-
-    var filenames = std.ArrayList([]const u8).init(arena);
-
-    const case_dirname = std.fs.path.dirname(case_file_path).?;
-    var iterable_dir = try std.fs.cwd().openDir(case_dirname, .{ .iterate = true });
-    defer iterable_dir.close();
-
-    if (std.mem.endsWith(u8, case_file_path, ".0.zig")) {
-        const stem = case_file_path[case_dirname.len + 1 .. case_file_path.len - "0.zig".len];
-        var it = iterable_dir.iterate();
-        while (try it.next()) |entry| {
-            if (entry.kind != .file) continue;
-            if (!std.mem.startsWith(u8, entry.name, stem)) continue;
-            try filenames.append(try std.fs.path.join(arena, &.{ case_dirname, entry.name }));
-        }
-    } else {
-        try filenames.append(case_file_path);
-    }
-
-    if (filenames.items.len == 0) {
-        std.debug.print("failed to find the input source file(s) from '{s}'\n", .{
-            case_file_path,
-        });
-        std.process.exit(1);
-    }
-
-    // Sort filenames, so that incremental tests are contiguous and in-order
-    sortTestFilenames(filenames.items);
-
-    var ctx = Cases.init(gpa, arena);
-
-    var test_it = TestIterator{ .filenames = filenames.items };
-    while (try test_it.next()) |batch| {
-        const strategy: TestStrategy = if (batch.len > 1) .incremental else .independent;
-        var cases = std.ArrayList(usize).init(arena);
-
-        for (batch) |filename| {
-            const max_file_size = 10 * 1024 * 1024;
-            const src = try iterable_dir.readFileAllocOptions(arena, filename, max_file_size, null, 1, 0);
-
-            // Parse the manifest
-            var manifest = try TestManifest.parse(arena, src);
-
-            if (cases.items.len == 0) {
-                const backends = try manifest.getConfigForKeyAlloc(arena, "backend", Backend);
-                const targets = try manifest.getConfigForKeyAlloc(arena, "target", std.Target.Query);
-                const c_frontends = try manifest.getConfigForKeyAlloc(ctx.arena, "c_frontend", CFrontend);
-                const is_test = try manifest.getConfigForKeyAssertSingle("is_test", bool);
-                const link_libc = try manifest.getConfigForKeyAssertSingle("link_libc", bool);
-                const output_mode = try manifest.getConfigForKeyAssertSingle("output_mode", std.builtin.OutputMode);
-
-                if (manifest.type == .translate_c) {
-                    for (c_frontends) |c_frontend| {
-                        for (targets) |target_query| {
-                            const output = try manifest.trailingLinesSplit(ctx.arena);
-                            try ctx.translate.append(.{
-                                .name = std.fs.path.stem(filename),
-                                .c_frontend = c_frontend,
-                                .target = resolveTargetQuery(target_query),
-                                .is_test = is_test,
-                                .link_libc = link_libc,
-                                .input = src,
-                                .kind = .{ .translate = output },
-                            });
-                        }
-                    }
-                    continue;
-                }
-                if (manifest.type == .run_translated_c) {
-                    for (c_frontends) |c_frontend| {
-                        for (targets) |target_query| {
-                            const output = try manifest.trailingSplit(ctx.arena);
-                            try ctx.translate.append(.{
-                                .name = std.fs.path.stem(filename),
-                                .c_frontend = c_frontend,
-                                .target = resolveTargetQuery(target_query),
-                                .is_test = is_test,
-                                .link_libc = link_libc,
-                                .output = output,
-                                .input = src,
-                                .kind = .{ .run = output },
-                            });
-                        }
-                    }
-                    continue;
-                }
-
-                // Cross-product to get all possible test combinations
-                for (backends) |backend| {
-                    for (targets) |target| {
-                        const next = ctx.cases.items.len;
-                        try ctx.cases.append(.{
-                            .name = std.fs.path.stem(filename),
-                            .target = target,
-                            .backend = backend,
-                            .updates = std.ArrayList(Cases.Update).init(ctx.cases.allocator),
-                            .is_test = is_test,
-                            .output_mode = output_mode,
-                            .link_libc = backend == .llvm,
-                            .deps = std.ArrayList(DepModule).init(ctx.cases.allocator),
-                        });
-                        try cases.append(next);
-                    }
-                }
-            }
-
-            for (cases.items) |case_index| {
-                const case = &ctx.cases.items[case_index];
-                if (strategy == .incremental and case.backend == .stage2 and case.target.getCpuArch() == .x86_64 and !case.link_libc and case.target.getOsTag() != .plan9) {
-                    // https://github.com/ziglang/zig/issues/15174
-                    continue;
-                }
-
-                switch (manifest.type) {
-                    .compile => {
-                        case.addCompile(src);
-                    },
-                    .@"error" => {
-                        const errors = try manifest.trailingLines(arena);
-                        switch (strategy) {
-                            .independent => {
-                                case.addError(src, errors);
-                            },
-                            .incremental => {
-                                case.addErrorNamed("update", src, errors);
-                            },
-                        }
-                    },
-                    .run => {
-                        const output = try manifest.trailingSplit(ctx.arena);
-                        case.addCompareOutput(src, output);
-                    },
-                    .translate_c => @panic("c_frontend specified for compile case"),
-                    .run_translated_c => @panic("c_frontend specified for compile case"),
-                    .cli => @panic("TODO cli tests"),
-                }
-            }
-        }
-    }
-
-    return runCases(&ctx, zig_exe_path);
-}
-
 fn resolveTargetQuery(query: std.Target.Query) std.Build.ResolvedTarget {
     return .{
         .query = query,
@@ -1430,470 +1230,33 @@ fn resolveTargetQuery(query: std.Target.Query) std.Build.ResolvedTarget {
     };
 }
 
-fn runCases(self: *Cases, zig_exe_path: []const u8) !void {
-    const host = try std.zig.system.resolveTargetQuery(.{});
-
-    var progress = std.Progress{};
-    const root_node = progress.start("compiler", self.cases.items.len);
-    progress.terminal = null;
-    defer root_node.end();
-
-    var zig_lib_directory = try introspect.findZigLibDirFromSelfExe(self.gpa, zig_exe_path);
-    defer zig_lib_directory.handle.close();
-    defer self.gpa.free(zig_lib_directory.path.?);
-
-    var aux_thread_pool: ThreadPool = undefined;
-    try aux_thread_pool.init(.{ .allocator = self.gpa });
-    defer aux_thread_pool.deinit();
-
-    // Use the same global cache dir for all the tests, such that we for example don't have to
-    // rebuild musl libc for every case (when LLVM backend is enabled).
-    var global_tmp = std.testing.tmpDir(.{});
-    defer global_tmp.cleanup();
-
-    var cache_dir = try global_tmp.dir.makeOpenPath(".zig-cache", .{});
-    defer cache_dir.close();
-    const tmp_dir_path = try std.fs.path.join(self.gpa, &[_][]const u8{ ".", ".zig-cache", "tmp", &global_tmp.sub_path });
-    defer self.gpa.free(tmp_dir_path);
-
-    const global_cache_directory: Compilation.Directory = .{
-        .handle = cache_dir,
-        .path = try std.fs.path.join(self.gpa, &[_][]const u8{ tmp_dir_path, ".zig-cache" }),
-    };
-    defer self.gpa.free(global_cache_directory.path.?);
-
-    {
-        for (self.cases.items) |*case| {
-            if (build_options.skip_non_native) {
-                if (case.target.getCpuArch() != builtin.cpu.arch)
-                    continue;
-                if (case.target.getObjectFormat() != builtin.object_format)
-                    continue;
-            }
-
-            // Skip tests that require LLVM backend when it is not available
-            if (!build_options.have_llvm and case.backend == .llvm)
-                continue;
-
-            assert(case.backend != .stage1);
-
-            for (build_options.test_filters) |test_filter| {
-                if (std.mem.indexOf(u8, case.name, test_filter)) |_| break;
-            } else if (build_options.test_filters.len > 0) continue;
-
-            var prg_node = root_node.start(case.name, case.updates.items.len);
-            prg_node.activate();
-            defer prg_node.end();
-
-            try runOneCase(
-                self.gpa,
-                &prg_node,
-                case.*,
-                zig_lib_directory,
-                zig_exe_path,
-                &aux_thread_pool,
-                global_cache_directory,
-                host,
-            );
-        }
-
-        for (self.translate.items) |*case| {
-            _ = case;
-            @panic("TODO is this even used?");
-        }
-    }
-}
-
-fn runOneCase(
-    allocator: Allocator,
-    root_node: *std.Progress.Node,
-    case: Case,
-    zig_lib_directory: Compilation.Directory,
-    zig_exe_path: []const u8,
-    thread_pool: *ThreadPool,
-    global_cache_directory: Compilation.Directory,
-    host: std.Target,
-) !void {
-    const tmp_src_path = "tmp.zig";
-    const enable_rosetta = build_options.enable_rosetta;
-    const enable_qemu = build_options.enable_qemu;
-    const enable_wine = build_options.enable_wine;
-    const enable_wasmtime = build_options.enable_wasmtime;
-    const enable_darling = build_options.enable_darling;
-    const glibc_runtimes_dir: ?[]const u8 = build_options.glibc_runtimes_dir;
-
-    const target = try std.zig.system.resolveTargetQuery(case.target);
-
-    var arena_allocator = std.heap.ArenaAllocator.init(allocator);
-    defer arena_allocator.deinit();
-    const arena = arena_allocator.allocator();
-
-    var tmp = std.testing.tmpDir(.{});
-    defer tmp.cleanup();
-
-    var cache_dir = try tmp.dir.makeOpenPath(".zig-cache", .{});
-    defer cache_dir.close();
-
-    const tmp_dir_path = try std.fs.path.join(
-        arena,
-        &[_][]const u8{ ".", ".zig-cache", "tmp", &tmp.sub_path },
-    );
-    const local_cache_path = try std.fs.path.join(
-        arena,
-        &[_][]const u8{ tmp_dir_path, ".zig-cache" },
-    );
-
-    const zig_cache_directory: Compilation.Directory = .{
-        .handle = cache_dir,
-        .path = local_cache_path,
-    };
-
-    var main_pkg: Package = .{
-        .root_src_directory = .{ .path = tmp_dir_path, .handle = tmp.dir },
-        .root_src_path = tmp_src_path,
-    };
-    defer {
-        var it = main_pkg.table.iterator();
-        while (it.next()) |kv| {
-            allocator.free(kv.key_ptr.*);
-            kv.value_ptr.*.destroy(allocator);
-        }
-        main_pkg.table.deinit(allocator);
-    }
-
-    for (case.deps.items) |dep| {
-        var pkg = try Package.create(
-            allocator,
-            tmp_dir_path,
-            dep.path,
-        );
-        errdefer pkg.destroy(allocator);
-        try main_pkg.add(allocator, dep.name, pkg);
+fn knownFileExtension(filename: []const u8) bool {
+    // List taken from `Compilation.classifyFileExt` in the compiler.
+    for ([_][]const u8{
+        ".c",     ".C",    ".cc",       ".cpp",
+        ".cxx",   ".stub", ".m",        ".mm",
+        ".ll",    ".bc",   ".s",        ".S",
+        ".h",     ".zig",  ".so",       ".dll",
+        ".dylib", ".tbd",  ".a",        ".lib",
+        ".o",     ".obj",  ".cu",       ".def",
+        ".rc",    ".res",  ".manifest",
+    }) |ext| {
+        if (std.mem.endsWith(u8, filename, ext)) return true;
     }
-
-    const bin_name = try std.zig.binNameAlloc(arena, .{
-        .root_name = "test_case",
-        .target = target,
-        .output_mode = case.output_mode,
-    });
-
-    const emit_directory: Compilation.Directory = .{
-        .path = tmp_dir_path,
-        .handle = tmp.dir,
-    };
-    const emit_bin: Compilation.EmitLoc = .{
-        .directory = emit_directory,
-        .basename = bin_name,
-    };
-    const emit_h: ?Compilation.EmitLoc = if (case.emit_h) .{
-        .directory = emit_directory,
-        .basename = "test_case.h",
-    } else null;
-    const use_llvm: bool = switch (case.backend) {
-        .llvm => true,
-        else => false,
-    };
-    const comp = try Compilation.create(allocator, .{
-        .local_cache_directory = zig_cache_directory,
-        .global_cache_directory = global_cache_directory,
-        .zig_lib_directory = zig_lib_directory,
-        .thread_pool = thread_pool,
-        .root_name = "test_case",
-        .target = target,
-        // TODO: support tests for object file building, and library builds
-        // and linking. This will require a rework to support multi-file
-        // tests.
-        .output_mode = case.output_mode,
-        .is_test = case.is_test,
-        .optimize_mode = case.optimize_mode,
-        .emit_bin = emit_bin,
-        .emit_h = emit_h,
-        .main_pkg = &main_pkg,
-        .keep_source_files_loaded = true,
-        .is_native_os = case.target.isNativeOs(),
-        .is_native_abi = case.target.isNativeAbi(),
-        .dynamic_linker = target.dynamic_linker.get(),
-        .link_libc = case.link_libc,
-        .use_llvm = use_llvm,
-        .self_exe_path = zig_exe_path,
-        // TODO instead of turning off color, pass in a std.Progress.Node
-        .color = .off,
-        .reference_trace = 0,
-        // TODO: force self-hosted linkers with stage2 backend to avoid LLD creeping in
-        //       until the auto-select mechanism deems them worthy
-        .use_lld = switch (case.backend) {
-            .stage2 => false,
-            else => null,
-        },
-    });
-    defer comp.destroy();
-
-    update: for (case.updates.items, 0..) |update, update_index| {
-        var update_node = root_node.start(update.name, 3);
-        update_node.activate();
-        defer update_node.end();
-
-        var sync_node = update_node.start("write", 0);
-        sync_node.activate();
-        for (update.files.items) |file| {
-            try tmp.dir.writeFile(.{ .sub_path = file.path, .data = file.src });
-        }
-        sync_node.end();
-
-        var module_node = update_node.start("parse/analysis/codegen", 0);
-        module_node.activate();
-        try comp.makeBinFileWritable();
-        try comp.update(&module_node);
-        module_node.end();
-
-        if (update.case != .Error) {
-            var all_errors = try comp.getAllErrorsAlloc();
-            defer all_errors.deinit(allocator);
-            if (all_errors.errorMessageCount() > 0) {
-                all_errors.renderToStdErr(.{
-                    .ttyconf = std.io.tty.detectConfig(std.io.getStdErr()),
-                });
-                // TODO print generated C code
-                return error.UnexpectedCompileErrors;
-            }
-        }
-
-        switch (update.case) {
-            .Header => |expected_output| {
-                var file = try tmp.dir.openFile("test_case.h", .{ .mode = .read_only });
-                defer file.close();
-                const out = try file.reader().readAllAlloc(arena, 5 * 1024 * 1024);
-
-                try std.testing.expectEqualStrings(expected_output, out);
-            },
-            .CompareObjectFile => |expected_output| {
-                var file = try tmp.dir.openFile(bin_name, .{ .mode = .read_only });
-                defer file.close();
-                const out = try file.reader().readAllAlloc(arena, 5 * 1024 * 1024);
-
-                try std.testing.expectEqualStrings(expected_output, out);
-            },
-            .Compile => {},
-            .Error => |expected_errors| {
-                var test_node = update_node.start("assert", 0);
-                test_node.activate();
-                defer test_node.end();
-
-                var error_bundle = try comp.getAllErrorsAlloc();
-                defer error_bundle.deinit(allocator);
-
-                if (error_bundle.errorMessageCount() == 0) {
-                    return error.ExpectedCompilationErrors;
-                }
-
-                var actual_stderr = std.ArrayList(u8).init(arena);
-                try error_bundle.renderToWriter(.{
-                    .ttyconf = .no_color,
-                    .include_reference_trace = false,
-                    .include_source_line = false,
-                }, actual_stderr.writer());
-
-                // Render the expected lines into a string that we can compare verbatim.
-                var expected_generated = std.ArrayList(u8).init(arena);
-
-                var actual_line_it = std.mem.splitScalar(u8, actual_stderr.items, '\n');
-                for (expected_errors) |expect_line| {
-                    const actual_line = actual_line_it.next() orelse {
-                        try expected_generated.appendSlice(expect_line);
-                        try expected_generated.append('\n');
-                        continue;
-                    };
-                    if (std.mem.endsWith(u8, actual_line, expect_line)) {
-                        try expected_generated.appendSlice(actual_line);
-                        try expected_generated.append('\n');
-                        continue;
-                    }
-                    if (std.mem.startsWith(u8, expect_line, ":?:?: ")) {
-                        if (std.mem.endsWith(u8, actual_line, expect_line[":?:?: ".len..])) {
-                            try expected_generated.appendSlice(actual_line);
-                            try expected_generated.append('\n');
-                            continue;
-                        }
-                    }
-                    try expected_generated.appendSlice(expect_line);
-                    try expected_generated.append('\n');
-                }
-
-                try std.testing.expectEqualStrings(expected_generated.items, actual_stderr.items);
-            },
-            .Execution => |expected_stdout| {
-                if (!std.process.can_spawn) {
-                    std.debug.print("Unable to spawn child processes on {s}, skipping test.\n", .{@tagName(builtin.os.tag)});
-                    continue :update; // Pass test.
-                }
-
-                update_node.setEstimatedTotalItems(4);
-
-                var argv = std.ArrayList([]const u8).init(allocator);
-                defer argv.deinit();
-
-                const exec_result = x: {
-                    var exec_node = update_node.start("execute", 0);
-                    exec_node.activate();
-                    defer exec_node.end();
-
-                    // We go out of our way here to use the unique temporary directory name in
-                    // the exe_path so that it makes its way into the cache hash, avoiding
-                    // cache collisions from multiple threads doing `zig run` at the same time
-                    // on the same test_case.c input filename.
-                    const ss = std.fs.path.sep_str;
-                    const exe_path = try std.fmt.allocPrint(
-                        arena,
-                        ".." ++ ss ++ "{s}" ++ ss ++ "{s}",
-                        .{ &tmp.sub_path, bin_name },
-                    );
-                    if (case.target.ofmt != null and case.target.ofmt.? == .c) {
-                        if (getExternalExecutor(host, &target, .{ .link_libc = true }) != .native) {
-                            // We wouldn't be able to run the compiled C code.
-                            continue :update; // Pass test.
-                        }
-                        try argv.appendSlice(&[_][]const u8{
-                            zig_exe_path,
-                            "run",
-                            "-cflags",
-                            "-std=c99",
-                            "-pedantic",
-                            "-Werror",
-                            "-Wno-incompatible-library-redeclaration", // https://github.com/ziglang/zig/issues/875
-                            "--",
-                            "-lc",
-                            exe_path,
-                        });
-                        if (zig_lib_directory.path) |p| {
-                            try argv.appendSlice(&.{ "-I", p });
-                        }
-                    } else switch (getExternalExecutor(host, &target, .{ .link_libc = case.link_libc })) {
-                        .native => {
-                            if (case.backend == .stage2 and case.target.getCpuArch().isArmOrThumb()) {
-                                // https://github.com/ziglang/zig/issues/13623
-                                continue :update; // Pass test.
-                            }
-                            try argv.append(exe_path);
-                        },
-                        .bad_dl, .bad_os_or_cpu => continue :update, // Pass test.
-
-                        .rosetta => if (enable_rosetta) {
-                            try argv.append(exe_path);
-                        } else {
-                            continue :update; // Rosetta not available, pass test.
-                        },
-
-                        .qemu => |qemu_bin_name| if (enable_qemu) {
-                            const need_cross_glibc = target.isGnuLibC() and case.link_libc;
-                            const glibc_dir_arg: ?[]const u8 = if (need_cross_glibc)
-                                glibc_runtimes_dir orelse continue :update // glibc dir not available; pass test
-                            else
-                                null;
-                            try argv.append(qemu_bin_name);
-                            if (glibc_dir_arg) |dir| {
-                                const linux_triple = try target.linuxTriple(arena);
-                                const full_dir = try std.fs.path.join(arena, &[_][]const u8{
-                                    dir,
-                                    linux_triple,
-                                });
-
-                                try argv.append("-L");
-                                try argv.append(full_dir);
-                            }
-                            try argv.append(exe_path);
-                        } else {
-                            continue :update; // QEMU not available; pass test.
-                        },
-
-                        .wine => |wine_bin_name| if (enable_wine) {
-                            try argv.append(wine_bin_name);
-                            try argv.append(exe_path);
-                        } else {
-                            continue :update; // Wine not available; pass test.
-                        },
-
-                        .wasmtime => |wasmtime_bin_name| if (enable_wasmtime) {
-                            try argv.append(wasmtime_bin_name);
-                            try argv.append("--dir=.");
-                            try argv.append(exe_path);
-                        } else {
-                            continue :update; // wasmtime not available; pass test.
-                        },
-
-                        .darling => |darling_bin_name| if (enable_darling) {
-                            try argv.append(darling_bin_name);
-                            // Since we use relative to cwd here, we invoke darling with
-                            // "shell" subcommand.
-                            try argv.append("shell");
-                            try argv.append(exe_path);
-                        } else {
-                            continue :update; // Darling not available; pass test.
-                        },
-                    }
-
-                    try comp.makeBinFileExecutable();
-
-                    while (true) {
-                        break :x std.process.Child.run(.{
-                            .allocator = allocator,
-                            .argv = argv.items,
-                            .cwd_dir = tmp.dir,
-                            .cwd = tmp_dir_path,
-                        }) catch |err| switch (err) {
-                            error.FileBusy => {
-                                // There is a fundamental design flaw in Unix systems with how
-                                // ETXTBSY interacts with fork+exec.
-                                // https://github.com/golang/go/issues/22315
-                                // https://bugs.openjdk.org/browse/JDK-8068370
-                                // Unfortunately, this could be a real error, but we can't
-                                // tell the difference here.
-                                continue;
-                            },
-                            else => {
-                                std.debug.print("\n{s}.{d} The following command failed with {s}:\n", .{
-                                    case.name, update_index, @errorName(err),
-                                });
-                                dumpArgs(argv.items);
-                                return error.ChildProcessExecution;
-                            },
-                        };
-                    }
-                };
-                var test_node = update_node.start("test", 0);
-                test_node.activate();
-                defer test_node.end();
-                defer allocator.free(exec_result.stdout);
-                defer allocator.free(exec_result.stderr);
-                switch (exec_result.term) {
-                    .Exited => |code| {
-                        if (code != 0) {
-                            std.debug.print("\n{s}\n{s}: execution exited with code {d}:\n", .{
-                                exec_result.stderr, case.name, code,
-                            });
-                            dumpArgs(argv.items);
-                            return error.ChildProcessExecution;
-                        }
-                    },
-                    else => {
-                        std.debug.print("\n{s}\n{s}: execution crashed:\n", .{
-                            exec_result.stderr, case.name,
-                        });
-                        dumpArgs(argv.items);
-                        return error.ChildProcessExecution;
-                    },
-                }
-                try std.testing.expectEqualStrings(expected_stdout, exec_result.stdout);
-                // We allow stderr to have garbage in it because wasmtime prints a
-                // warning about --invoke even though we don't pass it.
-                //std.testing.expectEqualStrings("", exec_result.stderr);
-            },
-        }
-    }
-}
-
-fn dumpArgs(argv: []const []const u8) void {
-    for (argv) |arg| {
-        std.debug.print("{s} ", .{arg});
+    // Final check for .so.X, .so.X.Y, .so.X.Y.Z.
+    // From `Compilation.hasSharedLibraryExt`.
+    var it = std.mem.splitScalar(u8, filename, '.');
+    _ = it.first();
+    var so_txt = it.next() orelse return false;
+    while (!std.mem.eql(u8, so_txt, "so")) {
+        so_txt = it.next() orelse return false;
     }
-    std.debug.print("\n", .{});
+    const n1 = it.next() orelse return false;
+    const n2 = it.next();
+    const n3 = it.next();
+    _ = std.fmt.parseInt(u32, n1, 10) catch return false;
+    if (n2) |x| _ = std.fmt.parseInt(u32, x, 10) catch return false;
+    if (n3) |x| _ = std.fmt.parseInt(u32, x, 10) catch return false;
+    if (it.next() != null) return false;
+    return false;
 }
test/tests.zig
@@ -1250,7 +1250,6 @@ pub fn addCases(
     b: *std.Build,
     parent_step: *Step,
     test_filters: []const []const u8,
-    check_case_exe: *std.Build.Step.Compile,
     target: std.Build.ResolvedTarget,
     translate_c_options: @import("src/Cases.zig").TranslateCOptions,
     build_options: @import("cases.zig").BuildOptions,
@@ -1268,12 +1267,9 @@ pub fn addCases(
 
     cases.lowerToTranslateCSteps(b, parent_step, test_filters, target, translate_c_options);
 
-    const cases_dir_path = try b.build_root.join(b.allocator, &.{ "test", "cases" });
     cases.lowerToBuildSteps(
         b,
         parent_step,
         test_filters,
-        cases_dir_path,
-        check_case_exe,
     );
 }
build.zig
@@ -82,15 +82,6 @@ pub fn build(b: *std.Build) !void {
     docs_step.dependOn(langref_step);
     docs_step.dependOn(std_docs_step);
 
-    const check_case_exe = b.addExecutable(.{
-        .name = "check-case",
-        .root_source_file = b.path("test/src/Cases.zig"),
-        .target = b.graph.host,
-        .optimize = optimize,
-        .single_threaded = single_threaded,
-    });
-    check_case_exe.stack_size = stack_size;
-
     const skip_debug = b.option(bool, "skip-debug", "Main test suite skips debug builds") orelse false;
     const skip_release = b.option(bool, "skip-release", "Main test suite skips release builds") orelse false;
     const skip_release_small = b.option(bool, "skip-release-small", "Main test suite skips release-small builds") orelse skip_release;
@@ -222,7 +213,6 @@ pub fn build(b: *std.Build) !void {
     if (target.result.os.tag == .windows and target.result.abi == .gnu) {
         // LTO is currently broken on mingw, this can be removed when it's fixed.
         exe.want_lto = false;
-        check_case_exe.want_lto = false;
     }
 
     const use_llvm = b.option(bool, "use-llvm", "Use the llvm backend");
@@ -245,7 +235,6 @@ pub fn build(b: *std.Build) !void {
 
     if (link_libc) {
         exe.linkLibC();
-        check_case_exe.linkLibC();
     }
 
     const is_debug = optimize == .Debug;
@@ -339,21 +328,17 @@ pub fn build(b: *std.Build) !void {
             }
 
             try addCmakeCfgOptionsToExe(b, cfg, exe, use_zig_libcxx);
-            try addCmakeCfgOptionsToExe(b, cfg, check_case_exe, use_zig_libcxx);
         } else {
             // Here we are -Denable-llvm but no cmake integration.
             try addStaticLlvmOptionsToExe(exe);
-            try addStaticLlvmOptionsToExe(check_case_exe);
         }
         if (target.result.os.tag == .windows) {
-            inline for (.{ exe, check_case_exe }) |artifact| {
-                // LLVM depends on networking as of version 18.
-                artifact.linkSystemLibrary("ws2_32");
+            // LLVM depends on networking as of version 18.
+            exe.linkSystemLibrary("ws2_32");
 
-                artifact.linkSystemLibrary("version");
-                artifact.linkSystemLibrary("uuid");
-                artifact.linkSystemLibrary("ole32");
-            }
+            exe.linkSystemLibrary("version");
+            exe.linkSystemLibrary("uuid");
+            exe.linkSystemLibrary("ole32");
         }
     }
 
@@ -394,7 +379,6 @@ pub fn build(b: *std.Build) !void {
     const test_filters = b.option([]const []const u8, "test-filter", "Skip tests that do not match any filter") orelse &[0][]const u8{};
 
     const test_cases_options = b.addOptions();
-    check_case_exe.root_module.addOptions("build_options", test_cases_options);
 
     test_cases_options.addOption(bool, "enable_tracy", false);
     test_cases_options.addOption(bool, "enable_debug_extensions", enable_debug_extensions);
@@ -458,7 +442,7 @@ pub fn build(b: *std.Build) !void {
     test_step.dependOn(check_fmt);
 
     const test_cases_step = b.step("test-cases", "Run the main compiler test cases");
-    try tests.addCases(b, test_cases_step, test_filters, check_case_exe, target, .{
+    try tests.addCases(b, test_cases_step, test_filters, target, .{
         .skip_translate_c = skip_translate_c,
         .skip_run_translated_c = skip_run_translated_c,
     }, .{