Commit a1053e8e1d

Jacob Young <jacobly0@users.noreply.github.com>
2024-07-11 16:47:23
InternPool: add and use a mutate mutex for each list
This allows the mutate mutex to only be locked during actual grows, which are rare. For the lists that didn't previously have a mutex, this change has little effect since grows are rare and there is zero contention on a mutex that is only ever locked by one thread. This change allows `extra` to be mutated without racing with a grow.
1 parent d72a8db
src/Air/types_resolved.zig
@@ -501,8 +501,8 @@ fn checkType(ty: Type, zcu: *Zcu) bool {
             .struct_type => {
                 const struct_obj = zcu.typeToStruct(ty).?;
                 return switch (struct_obj.layout) {
-                    .@"packed" => struct_obj.backingIntType(ip).* != .none,
-                    .auto, .@"extern" => struct_obj.flagsPtr(ip).fully_resolved,
+                    .@"packed" => struct_obj.backingIntTypeUnordered(ip) != .none,
+                    .auto, .@"extern" => struct_obj.flagsUnordered(ip).fully_resolved,
                 };
             },
             .anon_struct_type => |tuple| {
@@ -516,6 +516,6 @@ fn checkType(ty: Type, zcu: *Zcu) bool {
             },
             else => unreachable,
         },
-        .Union => return zcu.typeToUnion(ty).?.flagsPtr(ip).status == .fully_resolved,
+        .Union => return zcu.typeToUnion(ty).?.flagsUnordered(ip).status == .fully_resolved,
     };
 }
src/arch/arm/abi.zig
@@ -56,7 +56,7 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread, ctx: Context) Class {
         .Union => {
             const bit_size = ty.bitSize(pt);
             const union_obj = pt.zcu.typeToUnion(ty).?;
-            if (union_obj.getLayout(ip) == .@"packed") {
+            if (union_obj.flagsUnordered(ip).layout == .@"packed") {
                 if (bit_size > 64) return .memory;
                 return .byval;
             }
src/arch/riscv64/CodeGen.zig
@@ -768,7 +768,7 @@ pub fn generate(
         @intFromEnum(FrameIndex.stack_frame),
         FrameAlloc.init(.{
             .size = 0,
-            .alignment = func.analysis(ip).stack_alignment.max(.@"1"),
+            .alignment = func.analysisUnordered(ip).stack_alignment.max(.@"1"),
         }),
     );
     function.frame_allocs.set(
src/arch/wasm/abi.zig
@@ -71,7 +71,7 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread) [2]Class {
         },
         .Union => {
             const union_obj = pt.zcu.typeToUnion(ty).?;
-            if (union_obj.getLayout(ip) == .@"packed") {
+            if (union_obj.flagsUnordered(ip).layout == .@"packed") {
                 if (ty.bitSize(pt) <= 64) return direct;
                 return .{ .direct, .direct };
             }
@@ -107,7 +107,7 @@ pub fn scalarType(ty: Type, pt: Zcu.PerThread) Type {
     switch (ty.zigTypeTag(mod)) {
         .Struct => {
             if (mod.typeToPackedStruct(ty)) |packed_struct| {
-                return scalarType(Type.fromInterned(packed_struct.backingIntType(ip).*), pt);
+                return scalarType(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), pt);
             } else {
                 assert(ty.structFieldCount(mod) == 1);
                 return scalarType(ty.structFieldType(0, mod), pt);
@@ -115,7 +115,7 @@ pub fn scalarType(ty: Type, pt: Zcu.PerThread) Type {
         },
         .Union => {
             const union_obj = mod.typeToUnion(ty).?;
-            if (union_obj.getLayout(ip) != .@"packed") {
+            if (union_obj.flagsUnordered(ip).layout != .@"packed") {
                 const layout = pt.getUnionLayout(union_obj);
                 if (layout.payload_size == 0 and layout.tag_size != 0) {
                     return scalarType(ty.unionTagTypeSafety(mod).?, pt);
src/arch/wasm/CodeGen.zig
@@ -1011,7 +1011,7 @@ fn typeToValtype(ty: Type, pt: Zcu.PerThread) wasm.Valtype {
         },
         .Struct => {
             if (pt.zcu.typeToPackedStruct(ty)) |packed_struct| {
-                return typeToValtype(Type.fromInterned(packed_struct.backingIntType(ip).*), pt);
+                return typeToValtype(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), pt);
             } else {
                 return wasm.Valtype.i32;
             }
@@ -1746,7 +1746,7 @@ fn isByRef(ty: Type, pt: Zcu.PerThread) bool {
         => return ty.hasRuntimeBitsIgnoreComptime(pt),
         .Union => {
             if (mod.typeToUnion(ty)) |union_obj| {
-                if (union_obj.getLayout(ip) == .@"packed") {
+                if (union_obj.flagsUnordered(ip).layout == .@"packed") {
                     return ty.abiSize(pt) > 8;
                 }
             }
@@ -1754,7 +1754,7 @@ fn isByRef(ty: Type, pt: Zcu.PerThread) bool {
         },
         .Struct => {
             if (mod.typeToPackedStruct(ty)) |packed_struct| {
-                return isByRef(Type.fromInterned(packed_struct.backingIntType(ip).*), pt);
+                return isByRef(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), pt);
             }
             return ty.hasRuntimeBitsIgnoreComptime(pt);
         },
@@ -3377,7 +3377,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
                 assert(struct_type.layout == .@"packed");
                 var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
                 val.writeToPackedMemory(ty, pt, &buf, 0) catch unreachable;
-                const backing_int_ty = Type.fromInterned(struct_type.backingIntType(ip).*);
+                const backing_int_ty = Type.fromInterned(struct_type.backingIntTypeUnordered(ip));
                 const int_val = try pt.intValue(
                     backing_int_ty,
                     mem.readInt(u64, &buf, .little),
@@ -3443,7 +3443,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
         },
         .Struct => {
             const packed_struct = mod.typeToPackedStruct(ty).?;
-            return func.emitUndefined(Type.fromInterned(packed_struct.backingIntType(ip).*));
+            return func.emitUndefined(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)));
         },
         else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(mod)}),
     }
@@ -3974,7 +3974,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             .Struct => result: {
                 const packed_struct = mod.typeToPackedStruct(struct_ty).?;
                 const offset = pt.structPackedFieldBitOffset(packed_struct, field_index);
-                const backing_ty = Type.fromInterned(packed_struct.backingIntType(ip).*);
+                const backing_ty = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
                 const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse {
                     return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{});
                 };
@@ -5377,7 +5377,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                     }
                     const packed_struct = mod.typeToPackedStruct(result_ty).?;
                     const field_types = packed_struct.field_types;
-                    const backing_type = Type.fromInterned(packed_struct.backingIntType(ip).*);
+                    const backing_type = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
 
                     // ensure the result is zero'd
                     const result = try func.allocLocal(backing_type);
src/arch/x86_64/abi.zig
@@ -349,7 +349,7 @@ fn classifySystemVStruct(
                 .@"packed" => {},
             }
         } else if (pt.zcu.typeToUnion(field_ty)) |field_loaded_union| {
-            switch (field_loaded_union.getLayout(ip)) {
+            switch (field_loaded_union.flagsUnordered(ip).layout) {
                 .auto, .@"extern" => {
                     byte_offset = classifySystemVUnion(result, byte_offset, field_loaded_union, pt, target);
                     continue;
@@ -362,11 +362,11 @@ fn classifySystemVStruct(
             result_class.* = result_class.combineSystemV(field_class);
         byte_offset += field_ty.abiSize(pt);
     }
-    const final_byte_offset = starting_byte_offset + loaded_struct.size(ip).*;
+    const final_byte_offset = starting_byte_offset + loaded_struct.sizeUnordered(ip);
     std.debug.assert(final_byte_offset == std.mem.alignForward(
         u64,
         byte_offset,
-        loaded_struct.flagsPtr(ip).alignment.toByteUnits().?,
+        loaded_struct.flagsUnordered(ip).alignment.toByteUnits().?,
     ));
     return final_byte_offset;
 }
@@ -390,7 +390,7 @@ fn classifySystemVUnion(
                 .@"packed" => {},
             }
         } else if (pt.zcu.typeToUnion(field_ty)) |field_loaded_union| {
-            switch (field_loaded_union.getLayout(ip)) {
+            switch (field_loaded_union.flagsUnordered(ip).layout) {
                 .auto, .@"extern" => {
                     _ = classifySystemVUnion(result, starting_byte_offset, field_loaded_union, pt, target);
                     continue;
@@ -402,7 +402,7 @@ fn classifySystemVUnion(
         for (result[@intCast(starting_byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
             result_class.* = result_class.combineSystemV(field_class);
     }
-    return starting_byte_offset + loaded_union.size(ip).*;
+    return starting_byte_offset + loaded_union.sizeUnordered(ip);
 }
 
 pub const SysV = struct {
src/arch/x86_64/CodeGen.zig
@@ -856,7 +856,7 @@ pub fn generate(
         @intFromEnum(FrameIndex.stack_frame),
         FrameAlloc.init(.{
             .size = 0,
-            .alignment = func.analysis(ip).stack_alignment.max(.@"1"),
+            .alignment = func.analysisUnordered(ip).stack_alignment.max(.@"1"),
         }),
     );
     function.frame_allocs.set(
src/codegen/c/Type.zig
@@ -1744,7 +1744,7 @@ pub const Pool = struct {
                         .@"packed" => return pool.fromType(
                             allocator,
                             scratch,
-                            Type.fromInterned(loaded_struct.backingIntType(ip).*),
+                            Type.fromInterned(loaded_struct.backingIntTypeUnordered(ip)),
                             pt,
                             mod,
                             kind,
@@ -1817,7 +1817,7 @@ pub const Pool = struct {
                 },
                 .union_type => {
                     const loaded_union = ip.loadUnionType(ip_index);
-                    switch (loaded_union.getLayout(ip)) {
+                    switch (loaded_union.flagsUnordered(ip).layout) {
                         .auto, .@"extern" => {
                             const has_tag = loaded_union.hasTag(ip);
                             const fwd_decl = try pool.getFwdDecl(allocator, .{
src/codegen/c.zig
@@ -1366,7 +1366,7 @@ pub const DeclGen = struct {
                 const loaded_union = ip.loadUnionType(ty.toIntern());
                 if (un.tag == .none) {
                     const backing_ty = try ty.unionBackingType(pt);
-                    switch (loaded_union.getLayout(ip)) {
+                    switch (loaded_union.flagsUnordered(ip).layout) {
                         .@"packed" => {
                             if (!location.isInitializer()) {
                                 try writer.writeByte('(');
@@ -1401,7 +1401,7 @@ pub const DeclGen = struct {
                     const field_index = zcu.unionTagFieldIndex(loaded_union, Value.fromInterned(un.tag)).?;
                     const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
                     const field_name = loaded_union.loadTagType(ip).names.get(ip)[field_index];
-                    if (loaded_union.getLayout(ip) == .@"packed") {
+                    if (loaded_union.flagsUnordered(ip).layout == .@"packed") {
                         if (field_ty.hasRuntimeBits(pt)) {
                             if (field_ty.isPtrAtRuntime(zcu)) {
                                 try writer.writeByte('(');
@@ -1629,7 +1629,7 @@ pub const DeclGen = struct {
                 },
                 .union_type => {
                     const loaded_union = ip.loadUnionType(ty.toIntern());
-                    switch (loaded_union.getLayout(ip)) {
+                    switch (loaded_union.flagsUnordered(ip).layout) {
                         .auto, .@"extern" => {
                             if (!location.isInitializer()) {
                                 try writer.writeByte('(');
@@ -1792,7 +1792,7 @@ pub const DeclGen = struct {
                 else => unreachable,
             }
         }
-        if (fn_val.getFunction(zcu)) |func| if (func.analysis(ip).is_cold)
+        if (fn_val.getFunction(zcu)) |func| if (func.analysisUnordered(ip).is_cold)
             try w.writeAll("zig_cold ");
         if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn ");
 
@@ -5527,7 +5527,7 @@ fn fieldLocation(
                 .{ .field = field_index } },
         .union_type => {
             const loaded_union = ip.loadUnionType(container_ty.toIntern());
-            switch (loaded_union.getLayout(ip)) {
+            switch (loaded_union.flagsUnordered(ip).layout) {
                 .auto, .@"extern" => {
                     const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
                     if (!field_ty.hasRuntimeBitsIgnoreComptime(pt))
@@ -5763,7 +5763,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
             .{ .field = extra.field_index },
         .union_type => field_name: {
             const loaded_union = ip.loadUnionType(struct_ty.toIntern());
-            switch (loaded_union.getLayout(ip)) {
+            switch (loaded_union.flagsUnordered(ip).layout) {
                 .auto, .@"extern" => {
                     const name = loaded_union.loadTagType(ip).names.get(ip)[extra.field_index];
                     break :field_name if (loaded_union.hasTag(ip))
@@ -7267,7 +7267,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
 
     const writer = f.object.writer();
     const local = try f.allocLocal(inst, union_ty);
-    if (loaded_union.getLayout(ip) == .@"packed") return f.moveCValue(inst, union_ty, payload);
+    if (loaded_union.flagsUnordered(ip).layout == .@"packed") return f.moveCValue(inst, union_ty, payload);
 
     const field: CValue = if (union_ty.unionTagTypeSafety(zcu)) |tag_ty| field: {
         const layout = union_ty.unionGetLayout(pt);
src/codegen/llvm.zig
@@ -1086,7 +1086,7 @@ pub const Object = struct {
         // If there is no such function in the module, it means the source code does not need it.
         const name = o.builder.strtabStringIfExists(lt_errors_fn_name) orelse return;
         const llvm_fn = o.builder.getGlobal(name) orelse return;
-        const errors_len = o.pt.zcu.intern_pool.global_error_set.mutate.list.len;
+        const errors_len = o.pt.zcu.intern_pool.global_error_set.getNamesFromMainThread().len;
 
         var wip = try Builder.WipFunction.init(&o.builder, .{
             .function = llvm_fn.ptrConst(&o.builder).kind.function,
@@ -1385,13 +1385,14 @@ pub const Object = struct {
         var attributes = try function_index.ptrConst(&o.builder).attributes.toWip(&o.builder);
         defer attributes.deinit(&o.builder);
 
-        if (func.analysis(ip).is_noinline) {
+        const func_analysis = func.analysisUnordered(ip);
+        if (func_analysis.is_noinline) {
             try attributes.addFnAttr(.@"noinline", &o.builder);
         } else {
             _ = try attributes.removeFnAttr(.@"noinline");
         }
 
-        const stack_alignment = func.analysis(ip).stack_alignment;
+        const stack_alignment = func.analysisUnordered(ip).stack_alignment;
         if (stack_alignment != .none) {
             try attributes.addFnAttr(.{ .alignstack = stack_alignment.toLlvm() }, &o.builder);
             try attributes.addFnAttr(.@"noinline", &o.builder);
@@ -1399,7 +1400,7 @@ pub const Object = struct {
             _ = try attributes.removeFnAttr(.alignstack);
         }
 
-        if (func.analysis(ip).is_cold) {
+        if (func_analysis.is_cold) {
             try attributes.addFnAttr(.cold, &o.builder);
         } else {
             _ = try attributes.removeFnAttr(.cold);
@@ -2403,7 +2404,7 @@ pub const Object = struct {
                 defer gpa.free(name);
 
                 if (zcu.typeToPackedStruct(ty)) |struct_type| {
-                    const backing_int_ty = struct_type.backingIntType(ip).*;
+                    const backing_int_ty = struct_type.backingIntTypeUnordered(ip);
                     if (backing_int_ty != .none) {
                         const info = Type.fromInterned(backing_int_ty).intInfo(zcu);
                         const builder_name = try o.builder.metadataString(name);
@@ -2615,7 +2616,7 @@ pub const Object = struct {
                     if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue;
 
                     const field_size = Type.fromInterned(field_ty).abiSize(pt);
-                    const field_align: InternPool.Alignment = switch (union_type.flagsPtr(ip).layout) {
+                    const field_align: InternPool.Alignment = switch (union_type.flagsUnordered(ip).layout) {
                         .@"packed" => .none,
                         .auto, .@"extern" => pt.unionFieldNormalAlignment(union_type, @intCast(field_index)),
                     };
@@ -3303,7 +3304,7 @@ pub const Object = struct {
                     const struct_type = ip.loadStructType(t.toIntern());
 
                     if (struct_type.layout == .@"packed") {
-                        const int_ty = try o.lowerType(Type.fromInterned(struct_type.backingIntType(ip).*));
+                        const int_ty = try o.lowerType(Type.fromInterned(struct_type.backingIntTypeUnordered(ip)));
                         try o.type_map.put(o.gpa, t.toIntern(), int_ty);
                         return int_ty;
                     }
@@ -3346,7 +3347,7 @@ pub const Object = struct {
                             // This is a zero-bit field. If there are runtime bits after this field,
                             // map to the next LLVM field (which we know exists): otherwise, don't
                             // map the field, indicating it's at the end of the struct.
-                            if (offset != struct_type.size(ip).*) {
+                            if (offset != struct_type.sizeUnordered(ip)) {
                                 try o.struct_field_map.put(o.gpa, .{
                                     .struct_ty = t.toIntern(),
                                     .field_index = field_index,
@@ -3450,7 +3451,7 @@ pub const Object = struct {
                     const union_obj = ip.loadUnionType(t.toIntern());
                     const layout = pt.getUnionLayout(union_obj);
 
-                    if (union_obj.flagsPtr(ip).layout == .@"packed") {
+                    if (union_obj.flagsUnordered(ip).layout == .@"packed") {
                         const int_ty = try o.builder.intType(@intCast(t.bitSize(pt)));
                         try o.type_map.put(o.gpa, t.toIntern(), int_ty);
                         return int_ty;
@@ -3697,7 +3698,7 @@ pub const Object = struct {
                 if (layout.payload_size == 0) return o.lowerValue(un.tag);
 
                 const union_obj = mod.typeToUnion(ty).?;
-                const container_layout = union_obj.getLayout(ip);
+                const container_layout = union_obj.flagsUnordered(ip).layout;
 
                 assert(container_layout == .@"packed");
 
@@ -4205,7 +4206,7 @@ pub const Object = struct {
                 if (layout.payload_size == 0) return o.lowerValue(un.tag);
 
                 const union_obj = mod.typeToUnion(ty).?;
-                const container_layout = union_obj.getLayout(ip);
+                const container_layout = union_obj.flagsUnordered(ip).layout;
 
                 var need_unnamed = false;
                 const payload = if (un.tag != .none) p: {
@@ -10045,7 +10046,7 @@ pub const FuncGen = struct {
             },
             .Struct => {
                 if (mod.typeToPackedStruct(result_ty)) |struct_type| {
-                    const backing_int_ty = struct_type.backingIntType(ip).*;
+                    const backing_int_ty = struct_type.backingIntTypeUnordered(ip);
                     assert(backing_int_ty != .none);
                     const big_bits = Type.fromInterned(backing_int_ty).bitSize(pt);
                     const int_ty = try o.builder.intType(@intCast(big_bits));
@@ -10155,7 +10156,7 @@ pub const FuncGen = struct {
         const layout = union_ty.unionGetLayout(pt);
         const union_obj = mod.typeToUnion(union_ty).?;
 
-        if (union_obj.getLayout(ip) == .@"packed") {
+        if (union_obj.flagsUnordered(ip).layout == .@"packed") {
             const big_bits = union_ty.bitSize(pt);
             const int_llvm_ty = try o.builder.intType(@intCast(big_bits));
             const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
@@ -11281,7 +11282,7 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.E
             .struct_type => {
                 const struct_type = ip.loadStructType(return_type.toIntern());
                 assert(struct_type.haveLayout(ip));
-                const size: u64 = struct_type.size(ip).*;
+                const size: u64 = struct_type.sizeUnordered(ip);
                 assert((std.math.divCeil(u64, size, 8) catch unreachable) == types_index);
                 if (size % 8 > 0) {
                     types_buffer[types_index - 1] = try o.builder.intType(@intCast(size % 8 * 8));
@@ -11587,7 +11588,7 @@ const ParamTypeIterator = struct {
                 .struct_type => {
                     const struct_type = ip.loadStructType(ty.toIntern());
                     assert(struct_type.haveLayout(ip));
-                    const size: u64 = struct_type.size(ip).*;
+                    const size: u64 = struct_type.sizeUnordered(ip);
                     assert((std.math.divCeil(u64, size, 8) catch unreachable) == types_index);
                     if (size % 8 > 0) {
                         types_buffer[types_index - 1] =
src/codegen/spirv.zig
@@ -1463,7 +1463,7 @@ const DeclGen = struct {
         const ip = &mod.intern_pool;
         const union_obj = mod.typeToUnion(ty).?;
 
-        if (union_obj.getLayout(ip) == .@"packed") {
+        if (union_obj.flagsUnordered(ip).layout == .@"packed") {
             return self.todo("packed union types", .{});
         }
 
@@ -1735,7 +1735,7 @@ const DeclGen = struct {
                 };
 
                 if (struct_type.layout == .@"packed") {
-                    return try self.resolveType(Type.fromInterned(struct_type.backingIntType(ip).*), .direct);
+                    return try self.resolveType(Type.fromInterned(struct_type.backingIntTypeUnordered(ip)), .direct);
                 }
 
                 var member_types = std.ArrayList(IdRef).init(self.gpa);
@@ -5081,7 +5081,7 @@ const DeclGen = struct {
         const union_ty = mod.typeToUnion(ty).?;
         const tag_ty = Type.fromInterned(union_ty.enum_tag_ty);
 
-        if (union_ty.getLayout(ip) == .@"packed") {
+        if (union_ty.flagsUnordered(ip).layout == .@"packed") {
             unreachable; // TODO
         }
 
src/link/Elf/ZigObject.zig
@@ -1093,7 +1093,7 @@ pub fn updateFunc(
     const code = switch (res) {
         .ok => code_buffer.items,
         .fail => |em| {
-            func.analysis(&mod.intern_pool).state = .codegen_failure;
+            func.setAnalysisState(&mod.intern_pool, .codegen_failure);
             try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em);
             return;
         },
src/link/MachO/ZigObject.zig
@@ -699,7 +699,7 @@ pub fn updateFunc(
     const code = switch (res) {
         .ok => code_buffer.items,
         .fail => |em| {
-            func.analysis(&mod.intern_pool).state = .codegen_failure;
+            func.setAnalysisState(&mod.intern_pool, .codegen_failure);
             try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em);
             return;
         },
src/link/Wasm/ZigObject.zig
@@ -1051,7 +1051,7 @@ fn setupErrorsLen(zig_object: *ZigObject, wasm_file: *Wasm) !void {
     const gpa = wasm_file.base.comp.gpa;
     const sym_index = zig_object.findGlobalSymbol("__zig_errors_len") orelse return;
 
-    const errors_len = 1 + wasm_file.base.comp.module.?.intern_pool.global_error_set.mutate.list.len;
+    const errors_len = 1 + wasm_file.base.comp.module.?.intern_pool.global_error_set.getNamesFromMainThread().len;
     // overwrite existing atom if it already exists (maybe the error set has increased)
     // if not, allcoate a new atom.
     const atom_index = if (wasm_file.symbol_atom.get(.{ .file = zig_object.index, .index = sym_index })) |index| blk: {
src/link/Coff.zig
@@ -1156,7 +1156,7 @@ pub fn updateFunc(self: *Coff, pt: Zcu.PerThread, func_index: InternPool.Index,
     const code = switch (res) {
         .ok => code_buffer.items,
         .fail => |em| {
-            func.analysis(&mod.intern_pool).state = .codegen_failure;
+            func.setAnalysisState(&mod.intern_pool, .codegen_failure);
             try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em);
             return;
         },
src/link/Plan9.zig
@@ -449,7 +449,7 @@ pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index,
     const code = switch (res) {
         .ok => try code_buffer.toOwnedSlice(),
         .fail => |em| {
-            func.analysis(&mod.intern_pool).state = .codegen_failure;
+            func.setAnalysisState(&mod.intern_pool, .codegen_failure);
             try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em);
             return;
         },
src/Zcu/PerThread.zig
@@ -641,8 +641,8 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
 
     // We'll want to remember what the IES used to be before the update for
     // dependency invalidation purposes.
-    const old_resolved_ies = if (func.analysis(ip).inferred_error_set)
-        func.resolvedErrorSet(ip).*
+    const old_resolved_ies = if (func.analysisUnordered(ip).inferred_error_set)
+        func.resolvedErrorSetUnordered(ip)
     else
         .none;
 
@@ -671,7 +671,7 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
         zcu.deleteUnitReferences(func_as_depender);
     }
 
-    switch (func.analysis(ip).state) {
+    switch (func.analysisUnordered(ip).state) {
         .success => if (!was_outdated) return,
         .sema_failure,
         .dependency_failure,
@@ -693,11 +693,11 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
 
     var air = pt.analyzeFnBody(func_index, sema_arena) catch |err| switch (err) {
         error.AnalysisFail => {
-            if (func.analysis(ip).state == .in_progress) {
+            if (func.analysisUnordered(ip).state == .in_progress) {
                 // If this decl caused the compile error, the analysis field would
                 // be changed to indicate it was this Decl's fault. Because this
                 // did not happen, we infer here that it was a dependency failure.
-                func.analysis(ip).state = .dependency_failure;
+                func.setAnalysisState(ip, .dependency_failure);
             }
             return error.AnalysisFail;
         },
@@ -707,8 +707,8 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
 
     const invalidate_ies_deps = i: {
         if (!was_outdated) break :i false;
-        if (!func.analysis(ip).inferred_error_set) break :i true;
-        const new_resolved_ies = func.resolvedErrorSet(ip).*;
+        if (!func.analysisUnordered(ip).inferred_error_set) break :i true;
+        const new_resolved_ies = func.resolvedErrorSetUnordered(ip);
         break :i new_resolved_ies != old_resolved_ies;
     };
     if (invalidate_ies_deps) {
@@ -783,7 +783,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai
                         .{@errorName(err)},
                     ),
                 );
-                func.analysis(ip).state = .codegen_failure;
+                func.setAnalysisState(ip, .codegen_failure);
                 return;
             },
         };
@@ -797,12 +797,12 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai
         // Correcting this failure will involve changing a type this function
         // depends on, hence triggering re-analysis of this function, so this
         // interacts correctly with incremental compilation.
-        func.analysis(ip).state = .codegen_failure;
+        func.setAnalysisState(ip, .codegen_failure);
     } else if (comp.bin_file) |lf| {
         lf.updateFunc(pt, func_index, air, liveness) catch |err| switch (err) {
             error.OutOfMemory => return error.OutOfMemory,
             error.AnalysisFail => {
-                func.analysis(ip).state = .codegen_failure;
+                func.setAnalysisState(ip, .codegen_failure);
             },
             else => {
                 try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1);
@@ -812,7 +812,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai
                     "unable to codegen: {s}",
                     .{@errorName(err)},
                 ));
-                func.analysis(ip).state = .codegen_failure;
+                func.setAnalysisState(ip, .codegen_failure);
                 try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .func = func_index }));
             },
         };
@@ -1080,7 +1080,7 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult {
     const old_linksection = decl.@"linksection";
     const old_addrspace = decl.@"addrspace";
     const old_is_inline = if (decl.getOwnedFunction(zcu)) |prev_func|
-        prev_func.analysis(ip).state == .inline_only
+        prev_func.analysisUnordered(ip).state == .inline_only
     else
         false;
 
@@ -2037,7 +2037,7 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All
         .fn_ret_ty = Type.fromInterned(fn_ty_info.return_type),
         .fn_ret_ty_ies = null,
         .owner_func_index = func_index,
-        .branch_quota = @max(func.branchQuota(ip).*, Sema.default_branch_quota),
+        .branch_quota = @max(func.branchQuotaUnordered(ip), Sema.default_branch_quota),
         .comptime_err_ret_trace = &comptime_err_ret_trace,
     };
     defer sema.deinit();
@@ -2047,14 +2047,14 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All
     try sema.declareDependency(.{ .src_hash = decl.zir_decl_index.unwrap().? });
     try sema.declareDependency(.{ .decl_val = decl_index });
 
-    if (func.analysis(ip).inferred_error_set) {
+    if (func.analysisUnordered(ip).inferred_error_set) {
         const ies = try arena.create(Sema.InferredErrorSet);
         ies.* = .{ .func = func_index };
         sema.fn_ret_ty_ies = ies;
     }
 
     // reset in case calls to errorable functions are removed.
-    func.analysis(ip).calls_or_awaits_errorable_fn = false;
+    func.setCallsOrAwaitsErrorableFn(ip, false);
 
     // First few indexes of extra are reserved and set at the end.
     const reserved_count = @typeInfo(Air.ExtraIndex).Enum.fields.len;
@@ -2080,7 +2080,7 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All
     };
     defer inner_block.instructions.deinit(gpa);
 
-    const fn_info = sema.code.getFnInfo(func.zirBodyInst(ip).resolve(ip));
+    const fn_info = sema.code.getFnInfo(func.zirBodyInstUnordered(ip).resolve(ip));
 
     // Here we are performing "runtime semantic analysis" for a function body, which means
     // we must map the parameter ZIR instructions to `arg` AIR instructions.
@@ -2149,7 +2149,7 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All
         });
     }
 
-    func.analysis(ip).state = .in_progress;
+    func.setAnalysisState(ip, .in_progress);
 
     const last_arg_index = inner_block.instructions.items.len;
 
@@ -2176,7 +2176,7 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All
     }
 
     // If we don't get an error return trace from a caller, create our own.
-    if (func.analysis(ip).calls_or_awaits_errorable_fn and
+    if (func.analysisUnordered(ip).calls_or_awaits_errorable_fn and
         mod.comp.config.any_error_tracing and
         !sema.fn_ret_ty.isError(mod))
     {
@@ -2218,10 +2218,10 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All
             else => |e| return e,
         };
         assert(ies.resolved != .none);
-        ip.funcIesResolved(func_index).* = ies.resolved;
+        ip.funcSetIesResolved(func_index, ies.resolved);
     }
 
-    func.analysis(ip).state = .success;
+    func.setAnalysisState(ip, .success);
 
     // Finally we must resolve the return type and parameter types so that backends
     // have full access to type information.
@@ -2415,6 +2415,7 @@ fn processExportsInner(
 ) error{OutOfMemory}!void {
     const zcu = pt.zcu;
     const gpa = zcu.gpa;
+    const ip = &zcu.intern_pool;
 
     for (export_indices) |export_idx| {
         const new_export = &zcu.all_exports.items[export_idx];
@@ -2423,7 +2424,7 @@ fn processExportsInner(
             new_export.status = .failed_retryable;
             try zcu.failed_exports.ensureUnusedCapacity(gpa, 1);
             const msg = try Zcu.ErrorMsg.create(gpa, new_export.src, "exported symbol collision: {}", .{
-                new_export.opts.name.fmt(&zcu.intern_pool),
+                new_export.opts.name.fmt(ip),
             });
             errdefer msg.destroy(gpa);
             const other_export = zcu.all_exports.items[gop.value_ptr.*];
@@ -2443,8 +2444,7 @@ fn processExportsInner(
             if (!decl.owns_tv) break :failed false;
             if (decl.typeOf(zcu).zigTypeTag(zcu) != .Fn) break :failed false;
             // Check if owned function failed
-            const a = zcu.funcInfo(decl.val.toIntern()).analysis(&zcu.intern_pool);
-            break :failed a.state != .success;
+            break :failed zcu.funcInfo(decl.val.toIntern()).analysisUnordered(ip).state != .success;
         }) {
             // This `Decl` is failed, so was never sent to codegen.
             // TODO: we should probably tell the backend to delete any old exports of this `Decl`?
@@ -3072,7 +3072,7 @@ pub fn getUnionLayout(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionTyp
             most_aligned_field_size = field_size;
         }
     }
-    const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag();
+    const have_tag = loaded_union.flagsUnordered(ip).runtime_tag.hasTag();
     if (!have_tag or !Type.fromInterned(loaded_union.enum_tag_ty).hasRuntimeBits(pt)) {
         return .{
             .abi_size = payload_align.forward(payload_size),
@@ -3091,7 +3091,7 @@ pub fn getUnionLayout(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionTyp
     const tag_size = Type.fromInterned(loaded_union.enum_tag_ty).abiSize(pt);
     const tag_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(pt).max(.@"1");
     return .{
-        .abi_size = loaded_union.size(ip).*,
+        .abi_size = loaded_union.sizeUnordered(ip),
         .abi_align = tag_align.max(payload_align),
         .most_aligned_field = most_aligned_field,
         .most_aligned_field_size = most_aligned_field_size,
@@ -3100,7 +3100,7 @@ pub fn getUnionLayout(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionTyp
         .payload_align = payload_align,
         .tag_align = tag_align,
         .tag_size = tag_size,
-        .padding = loaded_union.padding(ip).*,
+        .padding = loaded_union.paddingUnordered(ip),
     };
 }
 
@@ -3142,7 +3142,7 @@ pub fn unionFieldNormalAlignmentAdvanced(
     strat: Type.ResolveStrat,
 ) Zcu.SemaError!InternPool.Alignment {
     const ip = &pt.zcu.intern_pool;
-    assert(loaded_union.flagsPtr(ip).layout != .@"packed");
+    assert(loaded_union.flagsUnordered(ip).layout != .@"packed");
     const field_align = loaded_union.fieldAlign(ip, field_index);
     if (field_align != .none) return field_align;
     const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
src/codegen.zig
@@ -548,8 +548,8 @@ pub fn generateSymbol(
                             }
                         }
 
-                        const size = struct_type.size(ip).*;
-                        const alignment = struct_type.flagsPtr(ip).alignment.toByteUnits().?;
+                        const size = struct_type.sizeUnordered(ip);
+                        const alignment = struct_type.flagsUnordered(ip).alignment.toByteUnits().?;
 
                         const padding = math.cast(
                             usize,
src/Compilation.zig
@@ -3011,7 +3011,7 @@ pub fn totalErrorCount(comp: *Compilation) u32 {
             }
         }
 
-        if (zcu.intern_pool.global_error_set.mutate.list.len > zcu.error_limit) {
+        if (zcu.intern_pool.global_error_set.getNamesFromMainThread().len > zcu.error_limit) {
             total += 1;
         }
     }
@@ -3140,7 +3140,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
             try addModuleErrorMsg(zcu, &bundle, value.*, &all_references);
         }
 
-        const actual_error_count = zcu.intern_pool.global_error_set.mutate.list.len;
+        const actual_error_count = zcu.intern_pool.global_error_set.getNamesFromMainThread().len;
         if (actual_error_count > zcu.error_limit) {
             try bundle.addRootErrorMessage(.{
                 .msg = try bundle.printString("ZCU used more errors than possible: used {d}, max {d}", .{
src/InternPool.zig
@@ -147,8 +147,6 @@ pub fn trackZir(
     }
     defer shard.mutate.tracked_inst_map.len += 1;
     const local = ip.getLocal(tid);
-    local.mutate.tracked_insts.mutex.lock();
-    defer local.mutate.tracked_insts.mutex.unlock();
     const list = local.getMutableTrackedInsts(gpa);
     try list.ensureUnusedCapacity(1);
     const map_header = map.header().*;
@@ -418,10 +416,10 @@ const Local = struct {
         arena: std.heap.ArenaAllocator.State,
 
         items: ListMutate,
-        extra: MutexListMutate,
+        extra: ListMutate,
         limbs: ListMutate,
         strings: ListMutate,
-        tracked_insts: MutexListMutate,
+        tracked_insts: ListMutate,
         files: ListMutate,
         maps: ListMutate,
 
@@ -471,20 +469,12 @@ const Local = struct {
     const Namespaces = List(struct { *[1 << namespaces_bucket_width]Zcu.Namespace });
 
     const ListMutate = struct {
+        mutex: std.Thread.Mutex,
         len: u32,
 
         const empty: ListMutate = .{
-            .len = 0,
-        };
-    };
-
-    const MutexListMutate = struct {
-        mutex: std.Thread.Mutex,
-        list: ListMutate,
-
-        const empty: MutexListMutate = .{
             .mutex = .{},
-            .list = ListMutate.empty,
+            .len = 0,
         };
     };
 
@@ -694,6 +684,8 @@ const Local = struct {
                         const new_slice = new_list.view().slice();
                         inline for (fields) |field| @memcpy(new_slice.items(field)[0..len], old_slice.items(field)[0..len]);
                     }
+                    mutable.mutate.mutex.lock();
+                    defer mutable.mutate.mutex.unlock();
                     mutable.list.release(new_list);
                 }
 
@@ -760,7 +752,7 @@ const Local = struct {
         return .{
             .gpa = gpa,
             .arena = &local.mutate.arena,
-            .mutate = &local.mutate.extra.list,
+            .mutate = &local.mutate.extra,
             .list = &local.shared.extra,
         };
     }
@@ -802,7 +794,7 @@ const Local = struct {
         return .{
             .gpa = gpa,
             .arena = &local.mutate.arena,
-            .mutate = &local.mutate.tracked_insts.list,
+            .mutate = &local.mutate.tracked_insts,
             .list = &local.shared.tracked_insts,
         };
     }
@@ -1714,29 +1706,76 @@ pub const Key = union(enum) {
         comptime_args: Index.Slice,
 
         /// Returns a pointer that becomes invalid after any additions to the `InternPool`.
-        pub fn analysis(func: *const Func, ip: *const InternPool) *FuncAnalysis {
+        fn analysisPtr(func: Func, ip: *InternPool) *FuncAnalysis {
             const extra = ip.getLocalShared(func.tid).extra.acquire();
             return @ptrCast(&extra.view().items(.@"0")[func.analysis_extra_index]);
         }
 
+        pub fn analysisUnordered(func: Func, ip: *const InternPool) FuncAnalysis {
+            return @atomicLoad(FuncAnalysis, func.analysisPtr(@constCast(ip)), .unordered);
+        }
+
+        pub fn setAnalysisState(func: Func, ip: *InternPool, state: FuncAnalysis.State) void {
+            const extra_mutex = &ip.getLocal(func.tid).mutate.extra.mutex;
+            extra_mutex.lock();
+            defer extra_mutex.unlock();
+
+            const analysis_ptr = func.analysisPtr(ip);
+            var analysis = analysis_ptr.*;
+            analysis.state = state;
+            @atomicStore(FuncAnalysis, analysis_ptr, analysis, .release);
+        }
+
+        pub fn setCallsOrAwaitsErrorableFn(func: Func, ip: *InternPool, value: bool) void {
+            const extra_mutex = &ip.getLocal(func.tid).mutate.extra.mutex;
+            extra_mutex.lock();
+            defer extra_mutex.unlock();
+
+            const analysis_ptr = func.analysisPtr(ip);
+            var analysis = analysis_ptr.*;
+            analysis.calls_or_awaits_errorable_fn = value;
+            @atomicStore(FuncAnalysis, analysis_ptr, analysis, .release);
+        }
+
         /// Returns a pointer that becomes invalid after any additions to the `InternPool`.
-        pub fn zirBodyInst(func: *const Func, ip: *const InternPool) *TrackedInst.Index {
+        fn zirBodyInstPtr(func: Func, ip: *InternPool) *TrackedInst.Index {
             const extra = ip.getLocalShared(func.tid).extra.acquire();
             return @ptrCast(&extra.view().items(.@"0")[func.zir_body_inst_extra_index]);
         }
 
+        pub fn zirBodyInstUnordered(func: Func, ip: *const InternPool) TrackedInst.Index {
+            return @atomicLoad(TrackedInst.Index, func.zirBodyInstPtr(@constCast(ip)), .unordered);
+        }
+
         /// Returns a pointer that becomes invalid after any additions to the `InternPool`.
-        pub fn branchQuota(func: *const Func, ip: *const InternPool) *u32 {
+        fn branchQuotaPtr(func: Func, ip: *InternPool) *u32 {
             const extra = ip.getLocalShared(func.tid).extra.acquire();
             return &extra.view().items(.@"0")[func.branch_quota_extra_index];
         }
 
+        pub fn branchQuotaUnordered(func: Func, ip: *const InternPool) u32 {
+            return @atomicLoad(u32, func.branchQuotaPtr(@constCast(ip)), .unordered);
+        }
+
+        pub fn maxBranchQuota(func: Func, ip: *InternPool, new_branch_quota: u32) void {
+            const extra_mutex = &ip.getLocal(func.tid).mutate.extra.mutex;
+            extra_mutex.lock();
+            defer extra_mutex.unlock();
+
+            const branch_quota_ptr = func.branchQuotaPtr(ip);
+            @atomicStore(u32, branch_quota_ptr, @max(branch_quota_ptr.*, new_branch_quota), .release);
+        }
+
         /// Returns a pointer that becomes invalid after any additions to the `InternPool`.
-        pub fn resolvedErrorSet(func: *const Func, ip: *const InternPool) *Index {
+        fn resolvedErrorSetPtr(func: Func, ip: *InternPool) *Index {
             const extra = ip.getLocalShared(func.tid).extra.acquire();
-            assert(func.analysis(ip).inferred_error_set);
+            assert(func.analysisUnordered(ip).inferred_error_set);
             return @ptrCast(&extra.view().items(.@"0")[func.resolved_error_set_extra_index]);
         }
+
+        pub fn resolvedErrorSetUnordered(func: Func, ip: *const InternPool) Index {
+            return @atomicLoad(Index, func.resolvedErrorSetPtr(@constCast(ip)), .unordered);
+        }
     };
 
     pub const Int = struct {
@@ -2663,47 +2702,170 @@ pub const LoadedUnionType = struct {
     /// This accessor is provided so that the tag type can be mutated, and so that
     /// when it is mutated, the mutations are observed.
     /// The returned pointer expires with any addition to the `InternPool`.
-    pub fn tagTypePtr(self: LoadedUnionType, ip: *const InternPool) *Index {
+    fn tagTypePtr(self: LoadedUnionType, ip: *InternPool) *Index {
         const extra = ip.getLocalShared(self.tid).extra.acquire();
         const field_index = std.meta.fieldIndex(Tag.TypeUnion, "tag_ty").?;
         return @ptrCast(&extra.view().items(.@"0")[self.extra_index + field_index]);
     }
 
+    pub fn tagTypeUnordered(u: LoadedUnionType, ip: *const InternPool) Index {
+        return @atomicLoad(Index, u.tagTypePtr(@constCast(ip)), .unordered);
+    }
+
+    pub fn setTagType(u: LoadedUnionType, ip: *InternPool, tag_type: Index) void {
+        const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
+        @atomicStore(Index, u.tagTypePtr(ip), tag_type, .release);
+    }
+
     /// The returned pointer expires with any addition to the `InternPool`.
-    pub fn flagsPtr(self: LoadedUnionType, ip: *const InternPool) *Tag.TypeUnion.Flags {
+    fn flagsPtr(self: LoadedUnionType, ip: *InternPool) *Tag.TypeUnion.Flags {
         const extra = ip.getLocalShared(self.tid).extra.acquire();
         const field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?;
         return @ptrCast(&extra.view().items(.@"0")[self.extra_index + field_index]);
     }
 
+    pub fn flagsUnordered(u: LoadedUnionType, ip: *const InternPool) Tag.TypeUnion.Flags {
+        return @atomicLoad(Tag.TypeUnion.Flags, u.flagsPtr(@constCast(ip)), .unordered);
+    }
+
+    pub fn setStatus(u: LoadedUnionType, ip: *InternPool, status: Status) void {
+        const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
+        const flags_ptr = u.flagsPtr(ip);
+        var flags = flags_ptr.*;
+        flags.status = status;
+        @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release);
+    }
+
+    pub fn setStatusIfLayoutWip(u: LoadedUnionType, ip: *InternPool, status: Status) void {
+        const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
+        const flags_ptr = u.flagsPtr(ip);
+        var flags = flags_ptr.*;
+        if (flags.status == .layout_wip) flags.status = status;
+        @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release);
+    }
+
+    pub fn setAlignment(u: LoadedUnionType, ip: *InternPool, alignment: Alignment) void {
+        const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
+        const flags_ptr = u.flagsPtr(ip);
+        var flags = flags_ptr.*;
+        flags.alignment = alignment;
+        @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release);
+    }
+
+    pub fn assumeRuntimeBitsIfFieldTypesWip(u: LoadedUnionType, ip: *InternPool) bool {
+        const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
+        const flags_ptr = u.flagsPtr(ip);
+        var flags = flags_ptr.*;
+        defer if (flags.status == .field_types_wip) {
+            flags.assumed_runtime_bits = true;
+            @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release);
+        };
+        return flags.status == .field_types_wip;
+    }
+
+    pub fn setRequiresComptimeWip(u: LoadedUnionType, ip: *InternPool) RequiresComptime {
+        const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
+        const flags_ptr = u.flagsPtr(ip);
+        var flags = flags_ptr.*;
+        defer if (flags.requires_comptime == .unknown) {
+            flags.requires_comptime = .wip;
+            @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release);
+        };
+        return flags.requires_comptime;
+    }
+
+    pub fn setRequiresComptime(u: LoadedUnionType, ip: *InternPool, requires_comptime: RequiresComptime) void {
+        assert(requires_comptime != .wip); // see setRequiresComptimeWip
+
+        const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
+        const flags_ptr = u.flagsPtr(ip);
+        var flags = flags_ptr.*;
+        flags.requires_comptime = requires_comptime;
+        @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release);
+    }
+
+    pub fn assumePointerAlignedIfFieldTypesWip(u: LoadedUnionType, ip: *InternPool, ptr_align: Alignment) bool {
+        const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
+        const flags_ptr = u.flagsPtr(ip);
+        var flags = flags_ptr.*;
+        defer if (flags.status == .field_types_wip) {
+            flags.alignment = ptr_align;
+            flags.assumed_pointer_aligned = true;
+            @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release);
+        };
+        return flags.status == .field_types_wip;
+    }
+
     /// The returned pointer expires with any addition to the `InternPool`.
-    pub fn size(self: LoadedUnionType, ip: *const InternPool) *u32 {
+    fn sizePtr(self: LoadedUnionType, ip: *InternPool) *u32 {
         const extra = ip.getLocalShared(self.tid).extra.acquire();
         const field_index = std.meta.fieldIndex(Tag.TypeUnion, "size").?;
         return &extra.view().items(.@"0")[self.extra_index + field_index];
     }
 
+    pub fn sizeUnordered(u: LoadedUnionType, ip: *const InternPool) u32 {
+        return @atomicLoad(u32, u.sizePtr(@constCast(ip)), .unordered);
+    }
+
     /// The returned pointer expires with any addition to the `InternPool`.
-    pub fn padding(self: LoadedUnionType, ip: *const InternPool) *u32 {
+    fn paddingPtr(self: LoadedUnionType, ip: *InternPool) *u32 {
         const extra = ip.getLocalShared(self.tid).extra.acquire();
         const field_index = std.meta.fieldIndex(Tag.TypeUnion, "padding").?;
         return &extra.view().items(.@"0")[self.extra_index + field_index];
     }
 
+    pub fn paddingUnordered(u: LoadedUnionType, ip: *const InternPool) u32 {
+        return @atomicLoad(u32, u.paddingPtr(@constCast(ip)), .unordered);
+    }
+
     pub fn hasTag(self: LoadedUnionType, ip: *const InternPool) bool {
-        return self.flagsPtr(ip).runtime_tag.hasTag();
+        return self.flagsUnordered(ip).runtime_tag.hasTag();
     }
 
     pub fn haveFieldTypes(self: LoadedUnionType, ip: *const InternPool) bool {
-        return self.flagsPtr(ip).status.haveFieldTypes();
+        return self.flagsUnordered(ip).status.haveFieldTypes();
     }
 
     pub fn haveLayout(self: LoadedUnionType, ip: *const InternPool) bool {
-        return self.flagsPtr(ip).status.haveLayout();
+        return self.flagsUnordered(ip).status.haveLayout();
     }
 
-    pub fn getLayout(self: LoadedUnionType, ip: *const InternPool) std.builtin.Type.ContainerLayout {
-        return self.flagsPtr(ip).layout;
+    pub fn setHaveLayout(u: LoadedUnionType, ip: *InternPool, size: u32, padding: u32, alignment: Alignment) void {
+        const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
+        @atomicStore(u32, u.sizePtr(ip), size, .unordered);
+        @atomicStore(u32, u.paddingPtr(ip), padding, .unordered);
+        const flags_ptr = u.flagsPtr(ip);
+        var flags = flags_ptr.*;
+        flags.alignment = alignment;
+        flags.status = .have_layout;
+        @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release);
     }
 
     pub fn fieldAlign(self: LoadedUnionType, ip: *const InternPool, field_index: usize) Alignment {
@@ -2726,7 +2888,7 @@ pub const LoadedUnionType = struct {
 
     pub fn setFieldAligns(self: LoadedUnionType, ip: *const InternPool, aligns: []const Alignment) void {
         if (aligns.len == 0) return;
-        assert(self.flagsPtr(ip).any_aligned_fields);
+        assert(self.flagsUnordered(ip).any_aligned_fields);
         @memcpy(self.field_aligns.get(ip), aligns);
     }
 };
@@ -2877,26 +3039,26 @@ pub const LoadedStructType = struct {
     };
 
     /// Look up field index based on field name.
-    pub fn nameIndex(self: LoadedStructType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
-        const names_map = self.names_map.unwrap() orelse {
+    pub fn nameIndex(s: LoadedStructType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
+        const names_map = s.names_map.unwrap() orelse {
             const i = name.toUnsigned(ip) orelse return null;
-            if (i >= self.field_types.len) return null;
+            if (i >= s.field_types.len) return null;
             return i;
         };
         const map = names_map.getConst(ip);
-        const adapter: NullTerminatedString.Adapter = .{ .strings = self.field_names.get(ip) };
+        const adapter: NullTerminatedString.Adapter = .{ .strings = s.field_names.get(ip) };
         const field_index = map.getIndexAdapted(name, adapter) orelse return null;
         return @intCast(field_index);
     }
 
     /// Returns the already-existing field with the same name, if any.
     pub fn addFieldName(
-        self: LoadedStructType,
+        s: LoadedStructType,
         ip: *InternPool,
         name: NullTerminatedString,
     ) ?u32 {
-        const extra = ip.getLocalShared(self.tid).extra.acquire();
-        return ip.addFieldName(extra, self.names_map.unwrap().?, self.field_names.start, name);
+        const extra = ip.getLocalShared(s.tid).extra.acquire();
+        return ip.addFieldName(extra, s.names_map.unwrap().?, s.field_names.start, name);
     }
 
     pub fn fieldAlign(s: LoadedStructType, ip: *const InternPool, i: usize) Alignment {
@@ -2924,143 +3086,313 @@ pub const LoadedStructType = struct {
         s.comptime_bits.setBit(ip, i);
     }
 
+    /// The returned pointer expires with any addition to the `InternPool`.
+    /// Asserts the struct is not packed.
+    fn flagsPtr(s: LoadedStructType, ip: *InternPool) *Tag.TypeStruct.Flags {
+        assert(s.layout != .@"packed");
+        const extra = ip.getLocalShared(s.tid).extra.acquire();
+        const flags_field_index = std.meta.fieldIndex(Tag.TypeStruct, "flags").?;
+        return @ptrCast(&extra.view().items(.@"0")[s.extra_index + flags_field_index]);
+    }
+
+    pub fn flagsUnordered(s: LoadedStructType, ip: *const InternPool) Tag.TypeStruct.Flags {
+        return @atomicLoad(Tag.TypeStruct.Flags, s.flagsPtr(@constCast(ip)), .unordered);
+    }
+
+    /// The returned pointer expires with any addition to the `InternPool`.
+    /// Asserts that the struct is packed.
+    fn packedFlagsPtr(s: LoadedStructType, ip: *InternPool) *Tag.TypeStructPacked.Flags {
+        assert(s.layout == .@"packed");
+        const extra = ip.getLocalShared(s.tid).extra.acquire();
+        const flags_field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?;
+        return @ptrCast(&extra.view().items(.@"0")[s.extra_index + flags_field_index]);
+    }
+
+    pub fn packedFlagsUnordered(s: LoadedStructType, ip: *const InternPool) Tag.TypeStructPacked.Flags {
+        return @atomicLoad(Tag.TypeStructPacked.Flags, s.packedFlagsPtr(@constCast(ip)), .unordered);
+    }
+
     /// Reads the non-opv flag calculated during AstGen. Used to short-circuit more
     /// complicated logic.
-    pub fn knownNonOpv(s: LoadedStructType, ip: *InternPool) bool {
+    pub fn knownNonOpv(s: LoadedStructType, ip: *const InternPool) bool {
         return switch (s.layout) {
             .@"packed" => false,
-            .auto, .@"extern" => s.flagsPtr(ip).known_non_opv,
+            .auto, .@"extern" => s.flagsUnordered(ip).known_non_opv,
         };
     }
 
-    /// The returned pointer expires with any addition to the `InternPool`.
-    /// Asserts the struct is not packed.
-    pub fn flagsPtr(self: LoadedStructType, ip: *InternPool) *Tag.TypeStruct.Flags {
-        assert(self.layout != .@"packed");
-        const extra = ip.getLocalShared(self.tid).extra.acquire();
-        const flags_field_index = std.meta.fieldIndex(Tag.TypeStruct, "flags").?;
-        return @ptrCast(&extra.view().items(.@"0")[self.extra_index + flags_field_index]);
+    pub fn requiresComptime(s: LoadedStructType, ip: *const InternPool) RequiresComptime {
+        return s.flagsUnordered(ip).requires_comptime;
     }
 
-    /// The returned pointer expires with any addition to the `InternPool`.
-    /// Asserts that the struct is packed.
-    pub fn packedFlagsPtr(self: LoadedStructType, ip: *InternPool) *Tag.TypeStructPacked.Flags {
-        assert(self.layout == .@"packed");
-        const extra = ip.getLocalShared(self.tid).extra.acquire();
-        const flags_field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?;
-        return @ptrCast(&extra.view().items(.@"0")[self.extra_index + flags_field_index]);
+    pub fn setRequiresComptimeWip(s: LoadedStructType, ip: *InternPool) RequiresComptime {
+        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
+        const flags_ptr = s.flagsPtr(ip);
+        var flags = flags_ptr.*;
+        defer if (flags.requires_comptime == .unknown) {
+            flags.requires_comptime = .wip;
+            @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
+        };
+        return flags.requires_comptime;
+    }
+
+    pub fn setRequiresComptime(s: LoadedStructType, ip: *InternPool, requires_comptime: RequiresComptime) void {
+        assert(requires_comptime != .wip); // see setRequiresComptimeWip
+
+        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
+        const flags_ptr = s.flagsPtr(ip);
+        var flags = flags_ptr.*;
+        flags.requires_comptime = requires_comptime;
+        @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
     }
 
     pub fn assumeRuntimeBitsIfFieldTypesWip(s: LoadedStructType, ip: *InternPool) bool {
         if (s.layout == .@"packed") return false;
+
+        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
         const flags_ptr = s.flagsPtr(ip);
-        if (flags_ptr.field_types_wip) {
-            flags_ptr.assumed_runtime_bits = true;
-            return true;
-        }
-        return false;
+        var flags = flags_ptr.*;
+        defer if (flags.field_types_wip) {
+            flags.assumed_runtime_bits = true;
+            @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
+        };
+        return flags.field_types_wip;
     }
 
-    pub fn setTypesWip(s: LoadedStructType, ip: *InternPool) bool {
+    pub fn setFieldTypesWip(s: LoadedStructType, ip: *InternPool) bool {
         if (s.layout == .@"packed") return false;
+
+        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
         const flags_ptr = s.flagsPtr(ip);
-        if (flags_ptr.field_types_wip) return true;
-        flags_ptr.field_types_wip = true;
-        return false;
+        var flags = flags_ptr.*;
+        defer {
+            flags.field_types_wip = true;
+            @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
+        }
+        return flags.field_types_wip;
     }
 
-    pub fn clearTypesWip(s: LoadedStructType, ip: *InternPool) void {
+    pub fn clearFieldTypesWip(s: LoadedStructType, ip: *InternPool) void {
         if (s.layout == .@"packed") return;
-        s.flagsPtr(ip).field_types_wip = false;
+
+        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
+        const flags_ptr = s.flagsPtr(ip);
+        var flags = flags_ptr.*;
+        flags.field_types_wip = false;
+        @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
     }
 
     pub fn setLayoutWip(s: LoadedStructType, ip: *InternPool) bool {
         if (s.layout == .@"packed") return false;
+
+        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
         const flags_ptr = s.flagsPtr(ip);
-        if (flags_ptr.layout_wip) return true;
-        flags_ptr.layout_wip = true;
-        return false;
+        var flags = flags_ptr.*;
+        defer {
+            flags.layout_wip = true;
+            @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
+        }
+        return flags.layout_wip;
     }
 
     pub fn clearLayoutWip(s: LoadedStructType, ip: *InternPool) void {
         if (s.layout == .@"packed") return;
-        s.flagsPtr(ip).layout_wip = false;
+
+        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
+        const flags_ptr = s.flagsPtr(ip);
+        var flags = flags_ptr.*;
+        flags.layout_wip = false;
+        @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
     }
 
-    pub fn setAlignmentWip(s: LoadedStructType, ip: *InternPool) bool {
-        if (s.layout == .@"packed") return false;
+    pub fn setAlignment(s: LoadedStructType, ip: *InternPool, alignment: Alignment) void {
+        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
+        const flags_ptr = s.flagsPtr(ip);
+        var flags = flags_ptr.*;
+        flags.alignment = alignment;
+        @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
+    }
+
+    pub fn assumePointerAlignedIfFieldTypesWip(s: LoadedStructType, ip: *InternPool, ptr_align: Alignment) bool {
+        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
+        const flags_ptr = s.flagsPtr(ip);
+        var flags = flags_ptr.*;
+        defer if (flags.field_types_wip) {
+            flags.alignment = ptr_align;
+            flags.assumed_pointer_aligned = true;
+            @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
+        };
+        return flags.field_types_wip;
+    }
+
+    pub fn assumePointerAlignedIfWip(s: LoadedStructType, ip: *InternPool, ptr_align: Alignment) bool {
+        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
         const flags_ptr = s.flagsPtr(ip);
-        if (flags_ptr.alignment_wip) return true;
-        flags_ptr.alignment_wip = true;
-        return false;
+        var flags = flags_ptr.*;
+        defer {
+            if (flags.alignment_wip) {
+                flags.alignment = ptr_align;
+                flags.assumed_pointer_aligned = true;
+            } else flags.alignment_wip = true;
+            @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
+        }
+        return flags.alignment_wip;
     }
 
     pub fn clearAlignmentWip(s: LoadedStructType, ip: *InternPool) void {
         if (s.layout == .@"packed") return;
-        s.flagsPtr(ip).alignment_wip = false;
+
+        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
+        const flags_ptr = s.flagsPtr(ip);
+        var flags = flags_ptr.*;
+        flags.alignment_wip = false;
+        @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
     }
 
     pub fn setInitsWip(s: LoadedStructType, ip: *InternPool) bool {
-        const local = ip.getLocal(s.tid);
-        local.mutate.extra.mutex.lock();
-        defer local.mutate.extra.mutex.unlock();
-        return switch (s.layout) {
-            .@"packed" => @as(Tag.TypeStructPacked.Flags, @bitCast(@atomicRmw(
-                u32,
-                @as(*u32, @ptrCast(s.packedFlagsPtr(ip))),
-                .Or,
-                @bitCast(Tag.TypeStructPacked.Flags{ .field_inits_wip = true }),
-                .acq_rel,
-            ))).field_inits_wip,
-            .auto, .@"extern" => @as(Tag.TypeStruct.Flags, @bitCast(@atomicRmw(
-                u32,
-                @as(*u32, @ptrCast(s.flagsPtr(ip))),
-                .Or,
-                @bitCast(Tag.TypeStruct.Flags{ .field_inits_wip = true }),
-                .acq_rel,
-            ))).field_inits_wip,
-        };
+        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
+        switch (s.layout) {
+            .@"packed" => {
+                const flags_ptr = s.packedFlagsPtr(ip);
+                var flags = flags_ptr.*;
+                defer {
+                    flags.field_inits_wip = true;
+                    @atomicStore(Tag.TypeStructPacked.Flags, flags_ptr, flags, .release);
+                }
+                return flags.field_inits_wip;
+            },
+            .auto, .@"extern" => {
+                const flags_ptr = s.flagsPtr(ip);
+                var flags = flags_ptr.*;
+                defer {
+                    flags.field_inits_wip = true;
+                    @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
+                }
+                return flags.field_inits_wip;
+            },
+        }
     }
 
     pub fn clearInitsWip(s: LoadedStructType, ip: *InternPool) void {
+        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
         switch (s.layout) {
-            .@"packed" => s.packedFlagsPtr(ip).field_inits_wip = false,
-            .auto, .@"extern" => s.flagsPtr(ip).field_inits_wip = false,
+            .@"packed" => {
+                const flags_ptr = s.packedFlagsPtr(ip);
+                var flags = flags_ptr.*;
+                flags.field_inits_wip = false;
+                @atomicStore(Tag.TypeStructPacked.Flags, flags_ptr, flags, .release);
+            },
+            .auto, .@"extern" => {
+                const flags_ptr = s.flagsPtr(ip);
+                var flags = flags_ptr.*;
+                flags.field_inits_wip = false;
+                @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
+            },
         }
     }
 
     pub fn setFullyResolved(s: LoadedStructType, ip: *InternPool) bool {
         if (s.layout == .@"packed") return true;
+
+        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
         const flags_ptr = s.flagsPtr(ip);
-        if (flags_ptr.fully_resolved) return true;
-        flags_ptr.fully_resolved = true;
-        return false;
+        var flags = flags_ptr.*;
+        defer {
+            flags.fully_resolved = true;
+            @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
+        }
+        return flags.fully_resolved;
     }
 
     pub fn clearFullyResolved(s: LoadedStructType, ip: *InternPool) void {
-        s.flagsPtr(ip).fully_resolved = false;
+        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
+        const flags_ptr = s.flagsPtr(ip);
+        var flags = flags_ptr.*;
+        flags.fully_resolved = false;
+        @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
     }
 
     /// The returned pointer expires with any addition to the `InternPool`.
     /// Asserts the struct is not packed.
-    pub fn size(self: LoadedStructType, ip: *InternPool) *u32 {
-        assert(self.layout != .@"packed");
-        const extra = ip.getLocalShared(self.tid).extra.acquire();
+    fn sizePtr(s: LoadedStructType, ip: *InternPool) *u32 {
+        assert(s.layout != .@"packed");
+        const extra = ip.getLocalShared(s.tid).extra.acquire();
         const size_field_index = std.meta.fieldIndex(Tag.TypeStruct, "size").?;
-        return @ptrCast(&extra.view().items(.@"0")[self.extra_index + size_field_index]);
+        return @ptrCast(&extra.view().items(.@"0")[s.extra_index + size_field_index]);
+    }
+
+    pub fn sizeUnordered(s: LoadedStructType, ip: *const InternPool) u32 {
+        return @atomicLoad(u32, s.sizePtr(@constCast(ip)), .unordered);
     }
 
     /// The backing integer type of the packed struct. Whether zig chooses
     /// this type or the user specifies it, it is stored here. This will be
     /// set to `none` until the layout is resolved.
     /// Asserts the struct is packed.
-    pub fn backingIntType(s: LoadedStructType, ip: *InternPool) *Index {
+    fn backingIntTypePtr(s: LoadedStructType, ip: *InternPool) *Index {
         assert(s.layout == .@"packed");
         const extra = ip.getLocalShared(s.tid).extra.acquire();
         const field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "backing_int_ty").?;
         return @ptrCast(&extra.view().items(.@"0")[s.extra_index + field_index]);
     }
 
+    pub fn backingIntTypeUnordered(s: LoadedStructType, ip: *const InternPool) Index {
+        return @atomicLoad(Index, s.backingIntTypePtr(@constCast(ip)), .unordered);
+    }
+
+    pub fn setBackingIntType(s: LoadedStructType, ip: *InternPool, backing_int_ty: Index) void {
+        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
+        @atomicStore(Index, s.backingIntTypePtr(ip), backing_int_ty, .release);
+    }
+
     /// Asserts the struct is not packed.
     pub fn setZirIndex(s: LoadedStructType, ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
         assert(s.layout != .@"packed");
@@ -3073,29 +3405,56 @@ pub const LoadedStructType = struct {
         return types.len == 0 or types[0] != .none;
     }
 
-    pub fn haveFieldInits(s: LoadedStructType, ip: *InternPool) bool {
+    pub fn haveFieldInits(s: LoadedStructType, ip: *const InternPool) bool {
         return switch (s.layout) {
-            .@"packed" => s.packedFlagsPtr(ip).inits_resolved,
-            .auto, .@"extern" => s.flagsPtr(ip).inits_resolved,
+            .@"packed" => s.packedFlagsUnordered(ip).inits_resolved,
+            .auto, .@"extern" => s.flagsUnordered(ip).inits_resolved,
         };
     }
 
     pub fn setHaveFieldInits(s: LoadedStructType, ip: *InternPool) void {
+        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
         switch (s.layout) {
-            .@"packed" => s.packedFlagsPtr(ip).inits_resolved = true,
-            .auto, .@"extern" => s.flagsPtr(ip).inits_resolved = true,
+            .@"packed" => {
+                const flags_ptr = s.packedFlagsPtr(ip);
+                var flags = flags_ptr.*;
+                flags.inits_resolved = true;
+                @atomicStore(Tag.TypeStructPacked.Flags, flags_ptr, flags, .release);
+            },
+            .auto, .@"extern" => {
+                const flags_ptr = s.flagsPtr(ip);
+                var flags = flags_ptr.*;
+                flags.inits_resolved = true;
+                @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
+            },
         }
     }
 
     pub fn haveLayout(s: LoadedStructType, ip: *InternPool) bool {
         return switch (s.layout) {
-            .@"packed" => s.backingIntType(ip).* != .none,
-            .auto, .@"extern" => s.flagsPtr(ip).layout_resolved,
+            .@"packed" => s.backingIntTypeUnordered(ip) != .none,
+            .auto, .@"extern" => s.flagsUnordered(ip).layout_resolved,
         };
     }
 
+    pub fn setLayoutResolved(s: LoadedStructType, ip: *InternPool, size: u32, alignment: Alignment) void {
+        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
+        extra_mutex.lock();
+        defer extra_mutex.unlock();
+
+        @atomicStore(u32, s.sizePtr(ip), size, .unordered);
+        const flags_ptr = s.flagsPtr(ip);
+        var flags = flags_ptr.*;
+        flags.alignment = alignment;
+        flags.layout_resolved = true;
+        @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
+    }
+
     pub fn isTuple(s: LoadedStructType, ip: *InternPool) bool {
-        return s.layout != .@"packed" and s.flagsPtr(ip).is_tuple;
+        return s.layout != .@"packed" and s.flagsUnordered(ip).is_tuple;
     }
 
     pub fn hasReorderedFields(s: LoadedStructType) bool {
@@ -3209,7 +3568,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
             const decl: DeclIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "decl").?]);
             const zir_index: TrackedInst.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?]);
             const fields_len = extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "fields_len").?];
-            const flags: Tag.TypeStruct.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "flags").?], .monotonic));
+            const flags: Tag.TypeStruct.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "flags").?], .unordered));
             var extra_index = item.data + @as(u32, @typeInfo(Tag.TypeStruct).Struct.fields.len);
             const captures_len = if (flags.any_captures) c: {
                 const len = extra_list.view().items(.@"0")[extra_index];
@@ -3317,7 +3676,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
             const fields_len = extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "fields_len").?];
             const namespace: OptionalNamespaceIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "namespace").?]);
             const names_map: MapIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "names_map").?]);
-            const flags: Tag.TypeStructPacked.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?], .monotonic));
+            const flags: Tag.TypeStructPacked.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?], .unordered));
             var extra_index = item.data + @as(u32, @typeInfo(Tag.TypeStructPacked).Struct.fields.len);
             const has_inits = item.tag == .type_struct_packed_inits;
             const captures_len = if (flags.any_captures) c: {
@@ -5442,10 +5801,10 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void {
             .arena = .{},
 
             .items = Local.ListMutate.empty,
-            .extra = Local.MutexListMutate.empty,
+            .extra = Local.ListMutate.empty,
             .limbs = Local.ListMutate.empty,
             .strings = Local.ListMutate.empty,
-            .tracked_insts = Local.MutexListMutate.empty,
+            .tracked_insts = Local.ListMutate.empty,
             .files = Local.ListMutate.empty,
             .maps = Local.ListMutate.empty,
 
@@ -5635,7 +5994,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
             const extra_list = unwrapped_index.getExtra(ip);
             const extra_items = extra_list.view().items(.@"0");
             const zir_index: TrackedInst.Index = @enumFromInt(extra_items[data + std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?]);
-            const flags: Tag.TypeStruct.Flags = @bitCast(@atomicLoad(u32, &extra_items[data + std.meta.fieldIndex(Tag.TypeStruct, "flags").?], .monotonic));
+            const flags: Tag.TypeStruct.Flags = @bitCast(@atomicLoad(u32, &extra_items[data + std.meta.fieldIndex(Tag.TypeStruct, "flags").?], .unordered));
             const end_extra_index = data + @as(u32, @typeInfo(Tag.TypeStruct).Struct.fields.len);
             if (flags.is_reified) {
                 assert(!flags.any_captures);
@@ -5658,7 +6017,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
             const extra_list = unwrapped_index.getExtra(ip);
             const extra_items = extra_list.view().items(.@"0");
             const zir_index: TrackedInst.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "zir_index").?]);
-            const flags: Tag.TypeStructPacked.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?], .monotonic));
+            const flags: Tag.TypeStructPacked.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?], .unordered));
             const end_extra_index = data + @as(u32, @typeInfo(Tag.TypeStructPacked).Struct.fields.len);
             if (flags.is_reified) {
                 assert(!flags.any_captures);
@@ -6155,7 +6514,7 @@ fn extraFuncDecl(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Ke
 fn extraFuncInstance(ip: *const InternPool, tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.Func {
     const extra_items = extra.view().items(.@"0");
     const analysis_extra_index = extra_index + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?;
-    const analysis: FuncAnalysis = @bitCast(@atomicLoad(u32, &extra_items[analysis_extra_index], .monotonic));
+    const analysis: FuncAnalysis = @bitCast(@atomicLoad(u32, &extra_items[analysis_extra_index], .unordered));
     const owner_decl: DeclIndex = @enumFromInt(extra_items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "owner_decl").?]);
     const ty: Index = @enumFromInt(extra_items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "ty").?]);
     const generic_owner: Index = @enumFromInt(extra_items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "generic_owner").?]);
@@ -8702,7 +9061,7 @@ pub fn remove(ip: *InternPool, tid: Zcu.PerThread.Id, index: Index) void {
         // Restore the original item at this index.
         assert(static_keys[@intFromEnum(index)] == .simple_type);
         const items = ip.getLocalShared(unwrapped_index.tid).items.acquire().view();
-        @atomicStore(Tag, &items.items(.tag)[unwrapped_index.index], .simple_type, .monotonic);
+        @atomicStore(Tag, &items.items(.tag)[unwrapped_index.index], .simple_type, .unordered);
         return;
     }
 
@@ -8719,7 +9078,7 @@ pub fn remove(ip: *InternPool, tid: Zcu.PerThread.Id, index: Index) void {
     // Thus, we will rewrite the tag to `removed`, leaking the item until
     // next GC but causing `KeyAdapter` to ignore it.
     const items = ip.getLocalShared(unwrapped_index.tid).items.acquire().view();
-    @atomicStore(Tag, &items.items(.tag)[unwrapped_index.index], .removed, .monotonic);
+    @atomicStore(Tag, &items.items(.tag)[unwrapped_index.index], .removed, .unordered);
 }
 
 fn addInt(
@@ -9415,9 +9774,11 @@ pub fn errorUnionPayload(ip: *const InternPool, ty: Index) Index {
 /// The is only legal because the initializer is not part of the hash.
 pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void {
     const unwrapped_index = index.unwrap(ip);
+
     const local = ip.getLocal(unwrapped_index.tid);
     local.mutate.extra.mutex.lock();
     defer local.mutate.extra.mutex.unlock();
+
     const extra_items = local.shared.extra.view().items(.@"0");
     const item = unwrapped_index.getItem(ip);
     assert(item.tag == .variable);
@@ -9436,7 +9797,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
     var decls_len: usize = 0;
     for (ip.locals) |*local| {
         items_len += local.mutate.items.len;
-        extra_len += local.mutate.extra.list.len;
+        extra_len += local.mutate.extra.len;
         limbs_len += local.mutate.limbs.len;
         decls_len += local.mutate.decls.buckets_list.len;
     }
@@ -10730,29 +11091,29 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois
     };
 }
 
-pub fn isFuncBody(ip: *const InternPool, index: Index) bool {
-    return switch (index.unwrap(ip).getTag(ip)) {
+pub fn isFuncBody(ip: *const InternPool, func: Index) bool {
+    return switch (func.unwrap(ip).getTag(ip)) {
         .func_decl, .func_instance, .func_coerced => true,
         else => false,
     };
 }
 
-pub fn funcAnalysis(ip: *const InternPool, index: Index) *FuncAnalysis {
-    const unwrapped_index = index.unwrap(ip);
-    const extra = unwrapped_index.getExtra(ip);
-    const item = unwrapped_index.getItem(ip);
+fn funcAnalysisPtr(ip: *InternPool, func: Index) *FuncAnalysis {
+    const unwrapped_func = func.unwrap(ip);
+    const extra = unwrapped_func.getExtra(ip);
+    const item = unwrapped_func.getItem(ip);
     const extra_index = switch (item.tag) {
         .func_decl => item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?,
         .func_instance => item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?,
         .func_coerced => {
             const extra_index = item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").?;
-            const func_index: Index = @enumFromInt(extra.view().items(.@"0")[extra_index]);
-            const unwrapped_func = func_index.unwrap(ip);
-            const func_item = unwrapped_func.getItem(ip);
-            return @ptrCast(&unwrapped_func.getExtra(ip).view().items(.@"0")[
-                switch (func_item.tag) {
-                    .func_decl => func_item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?,
-                    .func_instance => func_item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?,
+            const coerced_func_index: Index = @enumFromInt(extra.view().items(.@"0")[extra_index]);
+            const unwrapped_coerced_func = coerced_func_index.unwrap(ip);
+            const coerced_func_item = unwrapped_coerced_func.getItem(ip);
+            return @ptrCast(&unwrapped_coerced_func.getExtra(ip).view().items(.@"0")[
+                switch (coerced_func_item.tag) {
+                    .func_decl => coerced_func_item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?,
+                    .func_instance => coerced_func_item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?,
                     else => unreachable,
                 }
             ]);
@@ -10762,14 +11123,65 @@ pub fn funcAnalysis(ip: *const InternPool, index: Index) *FuncAnalysis {
     return @ptrCast(&extra.view().items(.@"0")[extra_index]);
 }
 
-pub fn funcHasInferredErrorSet(ip: *const InternPool, i: Index) bool {
-    return funcAnalysis(ip, i).inferred_error_set;
+pub fn funcAnalysisUnordered(ip: *const InternPool, func: Index) FuncAnalysis {
+    return @atomicLoad(FuncAnalysis, @constCast(ip).funcAnalysisPtr(func), .unordered);
 }
 
-pub fn funcZirBodyInst(ip: *const InternPool, index: Index) TrackedInst.Index {
-    const unwrapped_index = index.unwrap(ip);
-    const item = unwrapped_index.getItem(ip);
-    const item_extra = unwrapped_index.getExtra(ip);
+pub fn funcSetAnalysisState(ip: *InternPool, func: Index, state: FuncAnalysis.State) void {
+    const unwrapped_func = func.unwrap(ip);
+    const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex;
+    extra_mutex.lock();
+    defer extra_mutex.unlock();
+
+    const analysis_ptr = ip.funcAnalysisPtr(func);
+    var analysis = analysis_ptr.*;
+    analysis.state = state;
+    @atomicStore(FuncAnalysis, analysis_ptr, analysis, .release);
+}
+
+pub fn funcMaxStackAlignment(ip: *InternPool, func: Index, new_stack_alignment: Alignment) void {
+    const unwrapped_func = func.unwrap(ip);
+    const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex;
+    extra_mutex.lock();
+    defer extra_mutex.unlock();
+
+    const analysis_ptr = ip.funcAnalysisPtr(func);
+    var analysis = analysis_ptr.*;
+    analysis.stack_alignment = switch (analysis.stack_alignment) {
+        .none => new_stack_alignment,
+        else => |old_stack_alignment| old_stack_alignment.maxStrict(new_stack_alignment),
+    };
+    @atomicStore(FuncAnalysis, analysis_ptr, analysis, .release);
+}
+
+pub fn funcSetCallsOrAwaitsErrorableFn(ip: *InternPool, func: Index) void {
+    const unwrapped_func = func.unwrap(ip);
+    const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex;
+    extra_mutex.lock();
+    defer extra_mutex.unlock();
+
+    const analysis_ptr = ip.funcAnalysisPtr(func);
+    var analysis = analysis_ptr.*;
+    analysis.calls_or_awaits_errorable_fn = true;
+    @atomicStore(FuncAnalysis, analysis_ptr, analysis, .release);
+}
+
+pub fn funcSetCold(ip: *InternPool, func: Index, is_cold: bool) void {
+    const unwrapped_func = func.unwrap(ip);
+    const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex;
+    extra_mutex.lock();
+    defer extra_mutex.unlock();
+
+    const analysis_ptr = ip.funcAnalysisPtr(func);
+    var analysis = analysis_ptr.*;
+    analysis.is_cold = is_cold;
+    @atomicStore(FuncAnalysis, analysis_ptr, analysis, .release);
+}
+
+pub fn funcZirBodyInst(ip: *const InternPool, func: Index) TrackedInst.Index {
+    const unwrapped_func = func.unwrap(ip);
+    const item = unwrapped_func.getItem(ip);
+    const item_extra = unwrapped_func.getExtra(ip);
     const zir_body_inst_field_index = std.meta.fieldIndex(Tag.FuncDecl, "zir_body_inst").?;
     switch (item.tag) {
         .func_decl => return @enumFromInt(item_extra.view().items(.@"0")[item.data + zir_body_inst_field_index]),
@@ -10806,17 +11218,17 @@ pub fn iesFuncIndex(ip: *const InternPool, ies_index: Index) Index {
 /// Returns a mutable pointer to the resolved error set type of an inferred
 /// error set function. The returned pointer is invalidated when anything is
 /// added to `ip`.
-pub fn iesResolved(ip: *const InternPool, ies_index: Index) *Index {
+fn iesResolvedPtr(ip: *InternPool, ies_index: Index) *Index {
     const ies_item = ies_index.getItem(ip);
     assert(ies_item.tag == .type_inferred_error_set);
-    return funcIesResolved(ip, ies_item.data);
+    return ip.funcIesResolvedPtr(ies_item.data);
 }
 
 /// Returns a mutable pointer to the resolved error set type of an inferred
 /// error set function. The returned pointer is invalidated when anything is
 /// added to `ip`.
-pub fn funcIesResolved(ip: *const InternPool, func_index: Index) *Index {
-    assert(funcHasInferredErrorSet(ip, func_index));
+fn funcIesResolvedPtr(ip: *InternPool, func_index: Index) *Index {
+    assert(ip.funcAnalysisUnordered(func_index).inferred_error_set);
     const unwrapped_func = func_index.unwrap(ip);
     const func_extra = unwrapped_func.getExtra(ip);
     const func_item = unwrapped_func.getItem(ip);
@@ -10842,6 +11254,19 @@ pub fn funcIesResolved(ip: *const InternPool, func_index: Index) *Index {
     return @ptrCast(&func_extra.view().items(.@"0")[extra_index]);
 }
 
+pub fn funcIesResolvedUnordered(ip: *const InternPool, index: Index) Index {
+    return @atomicLoad(Index, @constCast(ip).funcIesResolvedPtr(index), .unordered);
+}
+
+pub fn funcSetIesResolved(ip: *InternPool, index: Index, ies: Index) void {
+    const unwrapped_func = index.unwrap(ip);
+    const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex;
+    extra_mutex.lock();
+    defer extra_mutex.unlock();
+
+    @atomicStore(Index, ip.funcIesResolvedPtr(index), ies, .release);
+}
+
 pub fn funcDeclInfo(ip: *const InternPool, index: Index) Key.Func {
     const unwrapped_index = index.unwrap(ip);
     const item = unwrapped_index.getItem(ip);
@@ -10950,7 +11375,10 @@ const GlobalErrorSet = struct {
         names: Names,
         map: Shard.Map(GlobalErrorSet.Index),
     } align(std.atomic.cache_line),
-    mutate: Local.MutexListMutate align(std.atomic.cache_line),
+    mutate: struct {
+        names: Local.ListMutate,
+        map: struct { mutex: std.Thread.Mutex },
+    } align(std.atomic.cache_line),
 
     const Names = Local.List(struct { NullTerminatedString });
 
@@ -10959,7 +11387,10 @@ const GlobalErrorSet = struct {
             .names = Names.empty,
             .map = Shard.Map(GlobalErrorSet.Index).empty,
         },
-        .mutate = Local.MutexListMutate.empty,
+        .mutate = .{
+            .names = Local.ListMutate.empty,
+            .map = .{ .mutex = .{} },
+        },
     };
 
     const Index = enum(Zcu.ErrorInt) {
@@ -10969,7 +11400,7 @@ const GlobalErrorSet = struct {
 
     /// Not thread-safe, may only be called from the main thread.
     pub fn getNamesFromMainThread(ges: *const GlobalErrorSet) []const NullTerminatedString {
-        const len = ges.mutate.list.len;
+        const len = ges.mutate.names.len;
         return if (len > 0) ges.shared.names.view().items(.@"0")[0..len] else &.{};
     }
 
@@ -10994,8 +11425,8 @@ const GlobalErrorSet = struct {
             if (entry.hash != hash) continue;
             if (names.view().items(.@"0")[@intFromEnum(index) - 1] == name) return index;
         }
-        ges.mutate.mutex.lock();
-        defer ges.mutate.mutex.unlock();
+        ges.mutate.map.mutex.lock();
+        defer ges.mutate.map.mutex.unlock();
         if (map.entries != ges.shared.map.entries) {
             map = ges.shared.map;
             map_mask = map.header().mask();
@@ -11012,12 +11443,12 @@ const GlobalErrorSet = struct {
         const mutable_names: Names.Mutable = .{
             .gpa = gpa,
             .arena = arena_state,
-            .mutate = &ges.mutate.list,
+            .mutate = &ges.mutate.names,
             .list = &ges.shared.names,
         };
         try mutable_names.ensureUnusedCapacity(1);
         const map_header = map.header().*;
-        if (ges.mutate.list.len < map_header.capacity * 3 / 5) {
+        if (ges.mutate.names.len < map_header.capacity * 3 / 5) {
             mutable_names.appendAssumeCapacity(.{name});
             const index: GlobalErrorSet.Index = @enumFromInt(mutable_names.mutate.len);
             const entry = &map.entries[map_index];
src/Sema.zig
@@ -2535,13 +2535,13 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.Error
     }
 
     if (sema.owner_func_index != .none) {
-        ip.funcAnalysis(sema.owner_func_index).state = .sema_failure;
+        ip.funcSetAnalysisState(sema.owner_func_index, .sema_failure);
     } else {
         sema.owner_decl.analysis = .sema_failure;
     }
 
     if (sema.func_index != .none) {
-        ip.funcAnalysis(sema.func_index).state = .sema_failure;
+        ip.funcSetAnalysisState(sema.func_index, .sema_failure);
     }
 
     return error.AnalysisFail;
@@ -6555,14 +6555,7 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
     }
     sema.prev_stack_alignment_src = src;
 
-    const ip = &mod.intern_pool;
-    const a = ip.funcAnalysis(sema.func_index);
-    if (a.stack_alignment != .none) {
-        a.stack_alignment = @enumFromInt(@max(
-            @intFromEnum(alignment),
-            @intFromEnum(a.stack_alignment),
-        ));
-    }
+    mod.intern_pool.funcMaxStackAlignment(sema.func_index, alignment);
 }
 
 fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
@@ -6575,7 +6568,7 @@ fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData)
         .needed_comptime_reason = "operand to @setCold must be comptime-known",
     });
     if (sema.func_index == .none) return; // does nothing outside a function
-    ip.funcAnalysis(sema.func_index).is_cold = is_cold;
+    ip.funcSetCold(sema.func_index, is_cold);
 }
 
 fn zirSetFloatMode(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
@@ -7090,7 +7083,7 @@ fn zirCall(
     const call_inst = try sema.analyzeCall(block, func, func_ty, callee_src, call_src, modifier, ensure_result_used, args_info, call_dbg_node, .call);
 
     if (sema.owner_func_index == .none or
-        !mod.intern_pool.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn)
+        !mod.intern_pool.funcAnalysisUnordered(sema.owner_func_index).calls_or_awaits_errorable_fn)
     {
         // No errorable fn actually called; we have no error return trace
         input_is_error = false;
@@ -7798,7 +7791,7 @@ fn analyzeCall(
         _ = ics.callee();
 
         if (!inlining.has_comptime_args) {
-            if (module_fn.analysis(ip).state == .sema_failure)
+            if (module_fn.analysisUnordered(ip).state == .sema_failure)
                 return error.AnalysisFail;
 
             var block_it = block;
@@ -7821,7 +7814,7 @@ fn analyzeCall(
             try sema.resolveInst(fn_info.ret_ty_ref);
         const ret_ty_src: LazySrcLoc = .{ .base_node_inst = module_fn.zir_body_inst, .offset = .{ .node_offset_fn_type_ret_ty = 0 } };
         sema.fn_ret_ty = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst);
-        if (module_fn.analysis(ip).inferred_error_set) {
+        if (module_fn.analysisUnordered(ip).inferred_error_set) {
             // Create a fresh inferred error set type for inline/comptime calls.
             const ies = try sema.arena.create(InferredErrorSet);
             ies.* = .{ .func = .none };
@@ -7947,7 +7940,7 @@ fn analyzeCall(
         if (call_dbg_node) |some| try sema.zirDbgStmt(block, some);
 
         if (sema.owner_func_index != .none and Type.fromInterned(func_ty_info.return_type).isError(mod)) {
-            ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true;
+            ip.funcSetCallsOrAwaitsErrorableFn(sema.owner_func_index);
         }
 
         if (try sema.resolveValue(func)) |func_val| {
@@ -8391,7 +8384,7 @@ fn instantiateGenericCall(
     const callee_index = (child_sema.resolveConstDefinedValue(&child_block, LazySrcLoc.unneeded, new_func_inst, undefined) catch unreachable).toIntern();
 
     const callee = zcu.funcInfo(callee_index);
-    callee.branchQuota(ip).* = @max(callee.branchQuota(ip).*, sema.branch_quota);
+    callee.maxBranchQuota(ip, sema.branch_quota);
 
     // Make a runtime call to the new function, making sure to omit the comptime args.
     const func_ty = Type.fromInterned(callee.ty);
@@ -8413,7 +8406,7 @@ fn instantiateGenericCall(
     if (sema.owner_func_index != .none and
         Type.fromInterned(func_ty_info.return_type).isError(zcu))
     {
-        ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true;
+        ip.funcSetCallsOrAwaitsErrorableFn(sema.owner_func_index);
     }
 
     try sema.addReferenceEntry(call_src, AnalUnit.wrap(.{ .func = callee_index }));
@@ -8774,9 +8767,9 @@ fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
         const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntSema(pt));
         if (int > len: {
             const mutate = &ip.global_error_set.mutate;
-            mutate.mutex.lock();
-            defer mutate.mutex.unlock();
-            break :len mutate.list.len;
+            mutate.map.mutex.lock();
+            defer mutate.map.mutex.unlock();
+            break :len mutate.names.len;
         } or int == 0)
             return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int});
         return Air.internedToRef((try pt.intern(.{ .err = .{
@@ -18400,7 +18393,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             try ty.resolveLayout(pt); // Getting alignment requires type layout
             const union_obj = mod.typeToUnion(ty).?;
             const tag_type = union_obj.loadTagType(ip);
-            const layout = union_obj.getLayout(ip);
+            const layout = union_obj.flagsUnordered(ip).layout;
 
             const union_field_vals = try gpa.alloc(InternPool.Index, tag_type.names.len);
             defer gpa.free(union_field_vals);
@@ -18718,8 +18711,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             const backing_integer_val = try pt.intern(.{ .opt = .{
                 .ty = (try pt.optionalType(.type_type)).toIntern(),
                 .val = if (mod.typeToPackedStruct(ty)) |packed_struct| val: {
-                    assert(Type.fromInterned(packed_struct.backingIntType(ip).*).isInt(mod));
-                    break :val packed_struct.backingIntType(ip).*;
+                    assert(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)).isInt(mod));
+                    break :val packed_struct.backingIntTypeUnordered(ip);
                 } else .none,
             } });
 
@@ -19800,7 +19793,7 @@ fn restoreErrRetIndex(sema: *Sema, start_block: *Block, src: LazySrcLoc, target_
         return;
     }
 
-    if (!mod.intern_pool.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn) return;
+    if (!mod.intern_pool.funcAnalysisUnordered(sema.owner_func_index).calls_or_awaits_errorable_fn) return;
     if (!start_block.ownerModule().error_tracing) return;
 
     assert(saved_index != .none); // The .error_return_trace_index field was dropped somewhere
@@ -21058,7 +21051,7 @@ fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
     const opt_ptr_stack_trace_ty = try pt.optionalType(ptr_stack_trace_ty.toIntern());
 
     if (sema.owner_func_index != .none and
-        ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn and
+        ip.funcAnalysisUnordered(sema.owner_func_index).calls_or_awaits_errorable_fn and
         block.ownerModule().error_tracing)
     {
         return block.addTy(.err_return_trace, opt_ptr_stack_trace_ty);
@@ -22206,8 +22199,8 @@ fn reifyUnion(
     if (any_aligns) {
         loaded_union.setFieldAligns(ip, field_aligns);
     }
-    loaded_union.tagTypePtr(ip).* = enum_tag_ty;
-    loaded_union.flagsPtr(ip).status = .have_field_types;
+    loaded_union.setTagType(ip, enum_tag_ty);
+    loaded_union.setStatus(ip, .have_field_types);
 
     try pt.finalizeAnonDecl(new_decl_index);
     try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index });
@@ -22469,10 +22462,10 @@ fn reifyStruct(
         if (opt_backing_int_val.optionalValue(mod)) |backing_int_val| {
             const backing_int_ty = backing_int_val.toType();
             try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum);
-            struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
+            struct_type.setBackingIntType(ip, backing_int_ty.toIntern());
         } else {
             const backing_int_ty = try pt.intType(.unsigned, @intCast(fields_bit_sum));
-            struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
+            struct_type.setBackingIntType(ip, backing_int_ty.toIntern());
         }
     }
 
@@ -28352,7 +28345,7 @@ fn unionFieldPtr(
             .is_const = union_ptr_info.flags.is_const,
             .is_volatile = union_ptr_info.flags.is_volatile,
             .address_space = union_ptr_info.flags.address_space,
-            .alignment = if (union_obj.getLayout(ip) == .auto) blk: {
+            .alignment = if (union_obj.flagsUnordered(ip).layout == .auto) blk: {
                 const union_align = if (union_ptr_info.flags.alignment != .none)
                     union_ptr_info.flags.alignment
                 else
@@ -28380,7 +28373,7 @@ fn unionFieldPtr(
     }
 
     if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| ct: {
-        switch (union_obj.getLayout(ip)) {
+        switch (union_obj.flagsUnordered(ip).layout) {
             .auto => if (initializing) {
                 // Store to the union to initialize the tag.
                 const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
@@ -28418,7 +28411,7 @@ fn unionFieldPtr(
     }
 
     try sema.requireRuntimeBlock(block, src, null);
-    if (!initializing and union_obj.getLayout(ip) == .auto and block.wantSafety() and
+    if (!initializing and union_obj.flagsUnordered(ip).layout == .auto and block.wantSafety() and
         union_ty.unionTagTypeSafety(mod) != null and union_obj.field_types.len > 1)
     {
         const wanted_tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
@@ -28461,7 +28454,7 @@ fn unionFieldVal(
         const un = ip.indexToKey(union_val.toIntern()).un;
         const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
         const tag_matches = un.tag == field_tag.toIntern();
-        switch (union_obj.getLayout(ip)) {
+        switch (union_obj.flagsUnordered(ip).layout) {
             .auto => {
                 if (tag_matches) {
                     return Air.internedToRef(un.val);
@@ -28495,7 +28488,7 @@ fn unionFieldVal(
     }
 
     try sema.requireRuntimeBlock(block, src, null);
-    if (union_obj.getLayout(ip) == .auto and block.wantSafety() and
+    if (union_obj.flagsUnordered(ip).layout == .auto and block.wantSafety() and
         union_ty.unionTagTypeSafety(zcu) != null and union_obj.field_types.len > 1)
     {
         const wanted_tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
@@ -32042,7 +32035,7 @@ pub fn ensureDeclAnalyzed(sema: *Sema, decl_index: InternPool.DeclIndex) Compile
 
     pt.ensureDeclAnalyzed(decl_index) catch |err| {
         if (sema.owner_func_index != .none) {
-            ip.funcAnalysis(sema.owner_func_index).state = .dependency_failure;
+            ip.funcSetAnalysisState(sema.owner_func_index, .dependency_failure);
         } else {
             sema.owner_decl.analysis = .dependency_failure;
         }
@@ -32056,7 +32049,7 @@ fn ensureFuncBodyAnalyzed(sema: *Sema, func: InternPool.Index) CompileError!void
     const ip = &mod.intern_pool;
     pt.ensureFuncBodyAnalyzed(func) catch |err| {
         if (sema.owner_func_index != .none) {
-            ip.funcAnalysis(sema.owner_func_index).state = .dependency_failure;
+            ip.funcSetAnalysisState(sema.owner_func_index, .dependency_failure);
         } else {
             sema.owner_decl.analysis = .dependency_failure;
         }
@@ -32402,7 +32395,7 @@ fn analyzeIsNonErrComptimeOnly(
                 // If the error set is empty, we must return a comptime true or false.
                 // However we want to avoid unnecessarily resolving an inferred error set
                 // in case it is already non-empty.
-                switch (ip.funcIesResolved(func_index).*) {
+                switch (ip.funcIesResolvedUnordered(func_index)) {
                     .anyerror_type => break :blk,
                     .none => {},
                     else => |i| if (ip.indexToKey(i).error_set_type.names.len != 0) break :blk,
@@ -33471,7 +33464,7 @@ fn wrapErrorUnionSet(
                 .inferred_error_set_type => |func_index| ok: {
                     // We carefully do this in an order that avoids unnecessarily
                     // resolving the destination error set type.
-                    switch (ip.funcIesResolved(func_index).*) {
+                    switch (ip.funcIesResolvedUnordered(func_index)) {
                         .anyerror_type => break :ok,
                         .none => if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) {
                             break :ok;
@@ -35076,33 +35069,25 @@ pub fn resolveStructAlignment(
 
     assert(sema.ownerUnit().unwrap().decl == struct_type.decl.unwrap().?);
 
-    assert(struct_type.flagsPtr(ip).alignment == .none);
     assert(struct_type.layout != .@"packed");
+    assert(struct_type.flagsUnordered(ip).alignment == .none);
 
-    if (struct_type.flagsPtr(ip).field_types_wip) {
-        // We'll guess "pointer-aligned", if the struct has an
-        // underaligned pointer field then some allocations
-        // might require explicit alignment.
-        struct_type.flagsPtr(ip).assumed_pointer_aligned = true;
-        const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
-        struct_type.flagsPtr(ip).alignment = result;
-        return;
-    }
+    const ptr_align = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
+
+    // We'll guess "pointer-aligned", if the struct has an
+    // underaligned pointer field then some allocations
+    // might require explicit alignment.
+    if (struct_type.assumePointerAlignedIfFieldTypesWip(ip, ptr_align)) return;
 
     try sema.resolveTypeFieldsStruct(ty, struct_type);
 
-    if (struct_type.setAlignmentWip(ip)) {
-        // We'll guess "pointer-aligned", if the struct has an
-        // underaligned pointer field then some allocations
-        // might require explicit alignment.
-        struct_type.flagsPtr(ip).assumed_pointer_aligned = true;
-        const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
-        struct_type.flagsPtr(ip).alignment = result;
-        return;
-    }
+    // We'll guess "pointer-aligned", if the struct has an
+    // underaligned pointer field then some allocations
+    // might require explicit alignment.
+    if (struct_type.assumePointerAlignedIfWip(ip, ptr_align)) return;
     defer struct_type.clearAlignmentWip(ip);
 
-    var result: Alignment = .@"1";
+    var alignment: Alignment = .@"1";
 
     for (0..struct_type.field_types.len) |i| {
         const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
@@ -35114,10 +35099,10 @@ pub fn resolveStructAlignment(
             struct_type.layout,
             .sema,
         );
-        result = result.maxStrict(field_align);
+        alignment = alignment.maxStrict(field_align);
     }
 
-    struct_type.flagsPtr(ip).alignment = result;
+    struct_type.setAlignment(ip, alignment);
 }
 
 pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
@@ -35182,7 +35167,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
         big_align = big_align.maxStrict(field_align.*);
     }
 
-    if (struct_type.flagsPtr(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
+    if (struct_type.flagsUnordered(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
         const msg = try sema.errMsg(
             ty.srcLoc(zcu),
             "struct layout depends on it having runtime bits",
@@ -35191,7 +35176,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
         return sema.failWithOwnedErrorMsg(null, msg);
     }
 
-    if (struct_type.flagsPtr(ip).assumed_pointer_aligned and
+    if (struct_type.flagsUnordered(ip).assumed_pointer_aligned and
         big_align.compareStrict(.neq, Alignment.fromByteUnits(@divExact(zcu.getTarget().ptrBitWidth(), 8))))
     {
         const msg = try sema.errMsg(
@@ -35259,10 +35244,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
         offsets[i] = @intCast(aligns[i].forward(offset));
         offset = offsets[i] + sizes[i];
     }
-    struct_type.size(ip).* = @intCast(big_align.forward(offset));
-    const flags = struct_type.flagsPtr(ip);
-    flags.alignment = big_align;
-    flags.layout_resolved = true;
+    struct_type.setLayoutResolved(ip, @intCast(big_align.forward(offset)), big_align);
     _ = try sema.typeRequiresComptime(ty);
 }
 
@@ -35355,13 +35337,13 @@ fn semaBackingIntType(pt: Zcu.PerThread, struct_type: InternPool.LoadedStructTyp
         };
 
         try sema.checkBackingIntType(&block, backing_int_src, backing_int_ty, fields_bit_sum);
-        struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
+        struct_type.setBackingIntType(ip, backing_int_ty.toIntern());
     } else {
         if (fields_bit_sum > std.math.maxInt(u16)) {
             return sema.fail(&block, block.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum});
         }
         const backing_int_ty = try pt.intType(.unsigned, @intCast(fields_bit_sum));
-        struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
+        struct_type.setBackingIntType(ip, backing_int_ty.toIntern());
     }
 
     try sema.flushExports();
@@ -35435,15 +35417,12 @@ pub fn resolveUnionAlignment(
 
     assert(!union_type.haveLayout(ip));
 
-    if (union_type.flagsPtr(ip).status == .field_types_wip) {
-        // We'll guess "pointer-aligned", if the union has an
-        // underaligned pointer field then some allocations
-        // might require explicit alignment.
-        union_type.flagsPtr(ip).assumed_pointer_aligned = true;
-        const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
-        union_type.flagsPtr(ip).alignment = result;
-        return;
-    }
+    const ptr_align = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
+
+    // We'll guess "pointer-aligned", if the union has an
+    // underaligned pointer field then some allocations
+    // might require explicit alignment.
+    if (union_type.assumePointerAlignedIfFieldTypesWip(ip, ptr_align)) return;
 
     try sema.resolveTypeFieldsUnion(ty, union_type);
 
@@ -35461,7 +35440,7 @@ pub fn resolveUnionAlignment(
         max_align = max_align.max(field_align);
     }
 
-    union_type.flagsPtr(ip).alignment = max_align;
+    union_type.setAlignment(ip, max_align);
 }
 
 /// This logic must be kept in sync with `Module.getUnionLayout`.
@@ -35476,7 +35455,8 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
 
     assert(sema.ownerUnit().unwrap().decl == union_type.decl);
 
-    switch (union_type.flagsPtr(ip).status) {
+    const old_flags = union_type.flagsUnordered(ip);
+    switch (old_flags.status) {
         .none, .have_field_types => {},
         .field_types_wip, .layout_wip => {
             const msg = try sema.errMsg(
@@ -35489,12 +35469,9 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
         .have_layout, .fully_resolved_wip, .fully_resolved => return,
     }
 
-    const prev_status = union_type.flagsPtr(ip).status;
-    errdefer if (union_type.flagsPtr(ip).status == .layout_wip) {
-        union_type.flagsPtr(ip).status = prev_status;
-    };
+    errdefer union_type.setStatusIfLayoutWip(ip, old_flags.status);
 
-    union_type.flagsPtr(ip).status = .layout_wip;
+    union_type.setStatus(ip, .layout_wip);
 
     var max_size: u64 = 0;
     var max_align: Alignment = .@"1";
@@ -35521,8 +35498,8 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
         max_align = max_align.max(field_align);
     }
 
-    const flags = union_type.flagsPtr(ip);
-    const has_runtime_tag = flags.runtime_tag.hasTag() and try sema.typeHasRuntimeBits(Type.fromInterned(union_type.enum_tag_ty));
+    const has_runtime_tag = union_type.flagsUnordered(ip).runtime_tag.hasTag() and
+        try sema.typeHasRuntimeBits(Type.fromInterned(union_type.enum_tag_ty));
     const size, const alignment, const padding = if (has_runtime_tag) layout: {
         const enum_tag_type = Type.fromInterned(union_type.enum_tag_ty);
         const tag_align = try sema.typeAbiAlignment(enum_tag_type);
@@ -35556,12 +35533,9 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
         break :layout .{ size, max_align.max(tag_align), padding };
     } else .{ max_align.forward(max_size), max_align, 0 };
 
-    union_type.size(ip).* = @intCast(size);
-    union_type.padding(ip).* = padding;
-    flags.alignment = alignment;
-    flags.status = .have_layout;
+    union_type.setHaveLayout(ip, @intCast(size), padding, alignment);
 
-    if (union_type.flagsPtr(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
+    if (union_type.flagsUnordered(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
         const msg = try sema.errMsg(
             ty.srcLoc(pt.zcu),
             "union layout depends on it having runtime bits",
@@ -35570,7 +35544,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
         return sema.failWithOwnedErrorMsg(null, msg);
     }
 
-    if (union_type.flagsPtr(ip).assumed_pointer_aligned and
+    if (union_type.flagsUnordered(ip).assumed_pointer_aligned and
         alignment.compareStrict(.neq, Alignment.fromByteUnits(@divExact(pt.zcu.getTarget().ptrBitWidth(), 8))))
     {
         const msg = try sema.errMsg(
@@ -35617,7 +35591,7 @@ pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void {
 
     assert(sema.ownerUnit().unwrap().decl == union_obj.decl);
 
-    switch (union_obj.flagsPtr(ip).status) {
+    switch (union_obj.flagsUnordered(ip).status) {
         .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {},
         .fully_resolved_wip, .fully_resolved => return,
     }
@@ -35626,15 +35600,15 @@ pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void {
         // After we have resolve union layout we have to go over the fields again to
         // make sure pointer fields get their child types resolved as well.
         // See also similar code for structs.
-        const prev_status = union_obj.flagsPtr(ip).status;
-        errdefer union_obj.flagsPtr(ip).status = prev_status;
+        const prev_status = union_obj.flagsUnordered(ip).status;
+        errdefer union_obj.setStatus(ip, prev_status);
 
-        union_obj.flagsPtr(ip).status = .fully_resolved_wip;
+        union_obj.setStatus(ip, .fully_resolved_wip);
         for (0..union_obj.field_types.len) |field_index| {
             const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
             try field_ty.resolveFully(pt);
         }
-        union_obj.flagsPtr(ip).status = .fully_resolved;
+        union_obj.setStatus(ip, .fully_resolved);
     }
 
     // And let's not forget comptime-only status.
@@ -35667,7 +35641,7 @@ pub fn resolveTypeFieldsStruct(
 
     if (struct_type.haveFieldTypes(ip)) return;
 
-    if (struct_type.setTypesWip(ip)) {
+    if (struct_type.setFieldTypesWip(ip)) {
         const msg = try sema.errMsg(
             Type.fromInterned(ty).srcLoc(zcu),
             "struct '{}' depends on itself",
@@ -35675,7 +35649,7 @@ pub fn resolveTypeFieldsStruct(
         );
         return sema.failWithOwnedErrorMsg(null, msg);
     }
-    defer struct_type.clearTypesWip(ip);
+    defer struct_type.clearFieldTypesWip(ip);
 
     semaStructFields(pt, sema.arena, struct_type) catch |err| switch (err) {
         error.AnalysisFail => {
@@ -35744,7 +35718,7 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Load
         },
         else => {},
     }
-    switch (union_type.flagsPtr(ip).status) {
+    switch (union_type.flagsUnordered(ip).status) {
         .none => {},
         .field_types_wip => {
             const msg = try sema.errMsg(
@@ -35762,8 +35736,8 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Load
         => return,
     }
 
-    union_type.flagsPtr(ip).status = .field_types_wip;
-    errdefer union_type.flagsPtr(ip).status = .none;
+    union_type.setStatus(ip, .field_types_wip);
+    errdefer union_type.setStatus(ip, .none);
     semaUnionFields(pt, sema.arena, union_type) catch |err| switch (err) {
         error.AnalysisFail => {
             if (owner_decl.analysis == .complete) {
@@ -35774,7 +35748,7 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Load
         error.OutOfMemory => return error.OutOfMemory,
         error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable,
     };
-    union_type.flagsPtr(ip).status = .have_field_types;
+    union_type.setStatus(ip, .have_field_types);
 }
 
 /// Returns a normal error set corresponding to the fully populated inferred
@@ -35795,10 +35769,10 @@ fn resolveInferredErrorSet(
 
     // TODO: during an incremental update this might not be `.none`, but the
     // function might be out-of-date!
-    const resolved_ty = func.resolvedErrorSet(ip).*;
+    const resolved_ty = func.resolvedErrorSetUnordered(ip);
     if (resolved_ty != .none) return resolved_ty;
 
-    if (func.analysis(ip).state == .in_progress)
+    if (func.analysisUnordered(ip).state == .in_progress)
         return sema.fail(block, src, "unable to resolve inferred error set", .{});
 
     // In order to ensure that all dependencies are properly added to the set,
@@ -35835,7 +35809,7 @@ fn resolveInferredErrorSet(
 
     // This will now have been resolved by the logic at the end of `Module.analyzeFnBody`
     // which calls `resolveInferredErrorSetPtr`.
-    const final_resolved_ty = func.resolvedErrorSet(ip).*;
+    const final_resolved_ty = func.resolvedErrorSetUnordered(ip);
     assert(final_resolved_ty != .none);
     return final_resolved_ty;
 }
@@ -36001,8 +35975,7 @@ fn semaStructFields(
             return;
         },
         .auto, .@"extern" => {
-            struct_type.size(ip).* = 0;
-            struct_type.flagsPtr(ip).layout_resolved = true;
+            struct_type.setLayoutResolved(ip, 0, .none);
             return;
         },
     };
@@ -36196,7 +36169,7 @@ fn semaStructFields(
         extra_index += zir_field.init_body_len;
     }
 
-    struct_type.clearTypesWip(ip);
+    struct_type.clearFieldTypesWip(ip);
     if (!any_inits) struct_type.setHaveFieldInits(ip);
 
     try sema.flushExports();
@@ -36472,7 +36445,7 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L
             }
         } else {
             // The provided type is the enum tag type.
-            union_type.tagTypePtr(ip).* = provided_ty.toIntern();
+            union_type.setTagType(ip, provided_ty.toIntern());
             const enum_type = switch (ip.indexToKey(provided_ty.toIntern())) {
                 .enum_type => ip.loadEnumType(provided_ty.toIntern()),
                 else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{provided_ty.fmt(pt)}),
@@ -36610,10 +36583,11 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L
         }
 
         if (explicit_tags_seen.len > 0) {
-            const tag_info = ip.loadEnumType(union_type.tagTypePtr(ip).*);
+            const tag_ty = union_type.tagTypeUnordered(ip);
+            const tag_info = ip.loadEnumType(tag_ty);
             const enum_index = tag_info.nameIndex(ip, field_name) orelse {
                 return sema.fail(&block_scope, name_src, "no field named '{}' in enum '{}'", .{
-                    field_name.fmt(ip), Type.fromInterned(union_type.tagTypePtr(ip).*).fmt(pt),
+                    field_name.fmt(ip), Type.fromInterned(tag_ty).fmt(pt),
                 });
             };
 
@@ -36650,7 +36624,7 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L
             };
             return sema.failWithOwnedErrorMsg(&block_scope, msg);
         }
-        const layout = union_type.getLayout(ip);
+        const layout = union_type.flagsUnordered(ip).layout;
         if (layout == .@"extern" and
             !try sema.validateExternType(field_ty, .union_field))
         {
@@ -36693,7 +36667,8 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L
     union_type.setFieldAligns(ip, field_aligns.items);
 
     if (explicit_tags_seen.len > 0) {
-        const tag_info = ip.loadEnumType(union_type.tagTypePtr(ip).*);
+        const tag_ty = union_type.tagTypeUnordered(ip);
+        const tag_info = ip.loadEnumType(tag_ty);
         if (tag_info.names.len > fields_len) {
             const msg = msg: {
                 const msg = try sema.errMsg(src, "enum field(s) missing in union", .{});
@@ -36701,21 +36676,21 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L
 
                 for (tag_info.names.get(ip), 0..) |field_name, field_index| {
                     if (explicit_tags_seen[field_index]) continue;
-                    try sema.addFieldErrNote(Type.fromInterned(union_type.tagTypePtr(ip).*), field_index, msg, "field '{}' missing, declared here", .{
+                    try sema.addFieldErrNote(Type.fromInterned(tag_ty), field_index, msg, "field '{}' missing, declared here", .{
                         field_name.fmt(ip),
                     });
                 }
-                try sema.addDeclaredHereNote(msg, Type.fromInterned(union_type.tagTypePtr(ip).*));
+                try sema.addDeclaredHereNote(msg, Type.fromInterned(tag_ty));
                 break :msg msg;
             };
             return sema.failWithOwnedErrorMsg(&block_scope, msg);
         }
     } else if (enum_field_vals.count() > 0) {
         const enum_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals.keys(), zcu.declPtr(union_type.decl));
-        union_type.tagTypePtr(ip).* = enum_ty;
+        union_type.setTagType(ip, enum_ty);
     } else {
         const enum_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, zcu.declPtr(union_type.decl));
-        union_type.tagTypePtr(ip).* = enum_ty;
+        union_type.setTagType(ip, enum_ty);
     }
 
     try sema.flushExports();
@@ -37091,7 +37066,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
                     try ty.resolveLayout(pt);
 
                     const union_obj = ip.loadUnionType(ty.toIntern());
-                    const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.tagTypePtr(ip).*))) orelse
+                    const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.tagTypeUnordered(ip)))) orelse
                         return null;
                     if (union_obj.field_types.len == 0) {
                         const only = try pt.intern(.{ .empty_enum_value = ty.toIntern() });
src/Type.zig
@@ -605,17 +605,15 @@ pub fn hasRuntimeBitsAdvanced(
 
             .union_type => {
                 const union_type = ip.loadUnionType(ty.toIntern());
-                switch (union_type.flagsPtr(ip).runtime_tag) {
+                const union_flags = union_type.flagsUnordered(ip);
+                switch (union_flags.runtime_tag) {
                     .none => {
-                        if (union_type.flagsPtr(ip).status == .field_types_wip) {
-                            // In this case, we guess that hasRuntimeBits() for this type is true,
-                            // and then later if our guess was incorrect, we emit a compile error.
-                            union_type.flagsPtr(ip).assumed_runtime_bits = true;
-                            return true;
-                        }
+                        // In this case, we guess that hasRuntimeBits() for this type is true,
+                        // and then later if our guess was incorrect, we emit a compile error.
+                        if (union_type.assumeRuntimeBitsIfFieldTypesWip(ip)) return true;
                     },
                     .safety, .tagged => {
-                        const tag_ty = union_type.tagTypePtr(ip).*;
+                        const tag_ty = union_type.tagTypeUnordered(ip);
                         // tag_ty will be `none` if this union's tag type is not resolved yet,
                         // in which case we want control flow to continue down below.
                         if (tag_ty != .none and
@@ -627,8 +625,8 @@ pub fn hasRuntimeBitsAdvanced(
                 }
                 switch (strat) {
                     .sema => try ty.resolveFields(pt),
-                    .eager => assert(union_type.flagsPtr(ip).status.haveFieldTypes()),
-                    .lazy => if (!union_type.flagsPtr(ip).status.haveFieldTypes())
+                    .eager => assert(union_flags.status.haveFieldTypes()),
+                    .lazy => if (!union_flags.status.haveFieldTypes())
                         return error.NeedLazy,
                 }
                 for (0..union_type.field_types.len) |field_index| {
@@ -745,8 +743,8 @@ pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool {
         },
         .union_type => {
             const union_type = ip.loadUnionType(ty.toIntern());
-            return switch (union_type.flagsPtr(ip).runtime_tag) {
-                .none, .safety => union_type.flagsPtr(ip).layout != .auto,
+            return switch (union_type.flagsUnordered(ip).runtime_tag) {
+                .none, .safety => union_type.flagsUnordered(ip).layout != .auto,
                 .tagged => false,
             };
         },
@@ -1045,7 +1043,7 @@ pub fn abiAlignmentAdvanced(
                 if (struct_type.layout == .@"packed") {
                     switch (strat) {
                         .sema => try ty.resolveLayout(pt),
-                        .lazy => if (struct_type.backingIntType(ip).* == .none) return .{
+                        .lazy => if (struct_type.backingIntTypeUnordered(ip) == .none) return .{
                             .val = Value.fromInterned(try pt.intern(.{ .int = .{
                                 .ty = .comptime_int_type,
                                 .storage = .{ .lazy_align = ty.toIntern() },
@@ -1053,10 +1051,10 @@ pub fn abiAlignmentAdvanced(
                         },
                         .eager => {},
                     }
-                    return .{ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiAlignment(pt) };
+                    return .{ .scalar = Type.fromInterned(struct_type.backingIntTypeUnordered(ip)).abiAlignment(pt) };
                 }
 
-                if (struct_type.flagsPtr(ip).alignment == .none) switch (strat) {
+                if (struct_type.flagsUnordered(ip).alignment == .none) switch (strat) {
                     .eager => unreachable, // struct alignment not resolved
                     .sema => try ty.resolveStructAlignment(pt),
                     .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
@@ -1065,7 +1063,7 @@ pub fn abiAlignmentAdvanced(
                     } })) },
                 };
 
-                return .{ .scalar = struct_type.flagsPtr(ip).alignment };
+                return .{ .scalar = struct_type.flagsUnordered(ip).alignment };
             },
             .anon_struct_type => |tuple| {
                 var big_align: Alignment = .@"1";
@@ -1088,7 +1086,7 @@ pub fn abiAlignmentAdvanced(
             .union_type => {
                 const union_type = ip.loadUnionType(ty.toIntern());
 
-                if (union_type.flagsPtr(ip).alignment == .none) switch (strat) {
+                if (union_type.flagsUnordered(ip).alignment == .none) switch (strat) {
                     .eager => unreachable, // union layout not resolved
                     .sema => try ty.resolveUnionAlignment(pt),
                     .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
@@ -1097,7 +1095,7 @@ pub fn abiAlignmentAdvanced(
                     } })) },
                 };
 
-                return .{ .scalar = union_type.flagsPtr(ip).alignment };
+                return .{ .scalar = union_type.flagsUnordered(ip).alignment };
             },
             .opaque_type => return .{ .scalar = .@"1" },
             .enum_type => return .{
@@ -1420,7 +1418,7 @@ pub fn abiSizeAdvanced(
                     .sema => try ty.resolveLayout(pt),
                     .lazy => switch (struct_type.layout) {
                         .@"packed" => {
-                            if (struct_type.backingIntType(ip).* == .none) return .{
+                            if (struct_type.backingIntTypeUnordered(ip) == .none) return .{
                                 .val = Value.fromInterned(try pt.intern(.{ .int = .{
                                     .ty = .comptime_int_type,
                                     .storage = .{ .lazy_size = ty.toIntern() },
@@ -1440,11 +1438,11 @@ pub fn abiSizeAdvanced(
                 }
                 switch (struct_type.layout) {
                     .@"packed" => return .{
-                        .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiSize(pt),
+                        .scalar = Type.fromInterned(struct_type.backingIntTypeUnordered(ip)).abiSize(pt),
                     },
                     .auto, .@"extern" => {
                         assert(struct_type.haveLayout(ip));
-                        return .{ .scalar = struct_type.size(ip).* };
+                        return .{ .scalar = struct_type.sizeUnordered(ip) };
                     },
                 }
             },
@@ -1464,7 +1462,7 @@ pub fn abiSizeAdvanced(
                 const union_type = ip.loadUnionType(ty.toIntern());
                 switch (strat) {
                     .sema => try ty.resolveLayout(pt),
-                    .lazy => if (!union_type.flagsPtr(ip).status.haveLayout()) return .{
+                    .lazy => if (!union_type.flagsUnordered(ip).status.haveLayout()) return .{
                         .val = Value.fromInterned(try pt.intern(.{ .int = .{
                             .ty = .comptime_int_type,
                             .storage = .{ .lazy_size = ty.toIntern() },
@@ -1474,7 +1472,7 @@ pub fn abiSizeAdvanced(
                 }
 
                 assert(union_type.haveLayout(ip));
-                return .{ .scalar = union_type.size(ip).* };
+                return .{ .scalar = union_type.sizeUnordered(ip) };
             },
             .opaque_type => unreachable, // no size available
             .enum_type => return .{ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiSize(pt) },
@@ -1788,7 +1786,7 @@ pub fn bitSizeAdvanced(
                 if (is_packed) try ty.resolveLayout(pt);
             }
             if (is_packed) {
-                return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(pt, strat);
+                return try Type.fromInterned(struct_type.backingIntTypeUnordered(ip)).bitSizeAdvanced(pt, strat);
             }
             return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8;
         },
@@ -1808,7 +1806,7 @@ pub fn bitSizeAdvanced(
             if (!is_packed) {
                 return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8;
             }
-            assert(union_type.flagsPtr(ip).status.haveFieldTypes());
+            assert(union_type.flagsUnordered(ip).status.haveFieldTypes());
 
             var size: u64 = 0;
             for (0..union_type.field_types.len) |field_index| {
@@ -2056,9 +2054,10 @@ pub fn unionTagType(ty: Type, mod: *Module) ?Type {
         else => return null,
     }
     const union_type = ip.loadUnionType(ty.toIntern());
-    switch (union_type.flagsPtr(ip).runtime_tag) {
+    const union_flags = union_type.flagsUnordered(ip);
+    switch (union_flags.runtime_tag) {
         .tagged => {
-            assert(union_type.flagsPtr(ip).status.haveFieldTypes());
+            assert(union_flags.status.haveFieldTypes());
             return Type.fromInterned(union_type.enum_tag_ty);
         },
         else => return null,
@@ -2135,7 +2134,7 @@ pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout
     return switch (ip.indexToKey(ty.toIntern())) {
         .struct_type => ip.loadStructType(ty.toIntern()).layout,
         .anon_struct_type => .auto,
-        .union_type => ip.loadUnionType(ty.toIntern()).flagsPtr(ip).layout,
+        .union_type => ip.loadUnionType(ty.toIntern()).flagsUnordered(ip).layout,
         else => unreachable,
     };
 }
@@ -2157,7 +2156,7 @@ pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool {
         .anyerror_type, .adhoc_inferred_error_set_type => false,
         else => switch (ip.indexToKey(ty.toIntern())) {
             .error_set_type => |error_set_type| error_set_type.names.len == 0,
-            .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) {
+            .inferred_error_set_type => |i| switch (ip.funcIesResolvedUnordered(i)) {
                 .none, .anyerror_type => false,
                 else => |t| ip.indexToKey(t).error_set_type.names.len == 0,
             },
@@ -2175,7 +2174,7 @@ pub fn isAnyError(ty: Type, mod: *Module) bool {
         .anyerror_type => true,
         .adhoc_inferred_error_set_type => false,
         else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
-            .inferred_error_set_type => |i| ip.funcIesResolved(i).* == .anyerror_type,
+            .inferred_error_set_type => |i| ip.funcIesResolvedUnordered(i) == .anyerror_type,
             else => false,
         },
     };
@@ -2200,7 +2199,7 @@ pub fn errorSetHasFieldIp(
         .anyerror_type => true,
         else => switch (ip.indexToKey(ty)) {
             .error_set_type => |error_set_type| error_set_type.nameIndex(ip, name) != null,
-            .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) {
+            .inferred_error_set_type => |i| switch (ip.funcIesResolvedUnordered(i)) {
                 .anyerror_type => true,
                 .none => false,
                 else => |t| ip.indexToKey(t).error_set_type.nameIndex(ip, name) != null,
@@ -2336,7 +2335,7 @@ pub fn intInfo(starting_ty: Type, mod: *Module) InternPool.Key.IntType {
         .c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) },
         else => switch (ip.indexToKey(ty.toIntern())) {
             .int_type => |int_type| return int_type,
-            .struct_type => ty = Type.fromInterned(ip.loadStructType(ty.toIntern()).backingIntType(ip).*),
+            .struct_type => ty = Type.fromInterned(ip.loadStructType(ty.toIntern()).backingIntTypeUnordered(ip)),
             .enum_type => ty = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty),
             .vector_type => |vector_type| ty = Type.fromInterned(vector_type.child),
 
@@ -2826,17 +2825,18 @@ pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) Se
                     return false;
 
                 // A struct with no fields is not comptime-only.
-                return switch (struct_type.flagsPtr(ip).requires_comptime) {
+                return switch (struct_type.setRequiresComptimeWip(ip)) {
                     .no, .wip => false,
                     .yes => true,
                     .unknown => {
                         assert(strat == .sema);
 
-                        if (struct_type.flagsPtr(ip).field_types_wip)
+                        if (struct_type.flagsUnordered(ip).field_types_wip) {
+                            struct_type.setRequiresComptime(ip, .unknown);
                             return false;
+                        }
 
-                        struct_type.flagsPtr(ip).requires_comptime = .wip;
-                        errdefer struct_type.flagsPtr(ip).requires_comptime = .unknown;
+                        errdefer struct_type.setRequiresComptime(ip, .unknown);
 
                         try ty.resolveFields(pt);
 
@@ -2849,12 +2849,12 @@ pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) Se
                                 // be considered resolved. Comptime-only types
                                 // still maintain a layout of their
                                 // runtime-known fields.
-                                struct_type.flagsPtr(ip).requires_comptime = .yes;
+                                struct_type.setRequiresComptime(ip, .yes);
                                 return true;
                             }
                         }
 
-                        struct_type.flagsPtr(ip).requires_comptime = .no;
+                        struct_type.setRequiresComptime(ip, .no);
                         return false;
                     },
                 };
@@ -2870,29 +2870,30 @@ pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) Se
 
             .union_type => {
                 const union_type = ip.loadUnionType(ty.toIntern());
-                switch (union_type.flagsPtr(ip).requires_comptime) {
+                switch (union_type.setRequiresComptimeWip(ip)) {
                     .no, .wip => return false,
                     .yes => return true,
                     .unknown => {
                         assert(strat == .sema);
 
-                        if (union_type.flagsPtr(ip).status == .field_types_wip)
+                        if (union_type.flagsUnordered(ip).status == .field_types_wip) {
+                            union_type.setRequiresComptime(ip, .unknown);
                             return false;
+                        }
 
-                        union_type.flagsPtr(ip).requires_comptime = .wip;
-                        errdefer union_type.flagsPtr(ip).requires_comptime = .unknown;
+                        errdefer union_type.setRequiresComptime(ip, .unknown);
 
                         try ty.resolveFields(pt);
 
                         for (0..union_type.field_types.len) |field_idx| {
                             const field_ty = union_type.field_types.get(ip)[field_idx];
                             if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(pt, strat)) {
-                                union_type.flagsPtr(ip).requires_comptime = .yes;
+                                union_type.setRequiresComptime(ip, .yes);
                                 return true;
                             }
                         }
 
-                        union_type.flagsPtr(ip).requires_comptime = .no;
+                        union_type.setRequiresComptime(ip, .no);
                         return false;
                     },
                 }
@@ -3117,7 +3118,7 @@ pub fn errorSetNames(ty: Type, mod: *Module) InternPool.NullTerminatedString.Sli
     const ip = &mod.intern_pool;
     return switch (ip.indexToKey(ty.toIntern())) {
         .error_set_type => |x| x.names,
-        .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) {
+        .inferred_error_set_type => |i| switch (ip.funcIesResolvedUnordered(i)) {
             .none => unreachable, // unresolved inferred error set
             .anyerror_type => unreachable,
             else => |t| ip.indexToKey(t).error_set_type.names,
@@ -3374,7 +3375,7 @@ pub fn isTuple(ty: Type, mod: *Module) bool {
             const struct_type = ip.loadStructType(ty.toIntern());
             if (struct_type.layout == .@"packed") return false;
             if (struct_type.decl == .none) return false;
-            return struct_type.flagsPtr(ip).is_tuple;
+            return struct_type.flagsUnordered(ip).is_tuple;
         },
         .anon_struct_type => |anon_struct| anon_struct.names.len == 0,
         else => false,
@@ -3396,7 +3397,7 @@ pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool {
             const struct_type = ip.loadStructType(ty.toIntern());
             if (struct_type.layout == .@"packed") return false;
             if (struct_type.decl == .none) return false;
-            return struct_type.flagsPtr(ip).is_tuple;
+            return struct_type.flagsUnordered(ip).is_tuple;
         },
         .anon_struct_type => true,
         else => false,
src/Value.zig
@@ -558,7 +558,7 @@ pub fn writeToPackedMemory(
         },
         .Union => {
             const union_obj = mod.typeToUnion(ty).?;
-            switch (union_obj.getLayout(ip)) {
+            switch (union_obj.flagsUnordered(ip).layout) {
                 .auto, .@"extern" => unreachable, // Handled in non-packed writeToMemory
                 .@"packed" => {
                     if (val.unionTag(mod)) |union_tag| {
src/Zcu.zig
@@ -2968,7 +2968,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index)
     const is_outdated = mod.outdated.contains(func_as_depender) or
         mod.potentially_outdated.contains(func_as_depender);
 
-    switch (func.analysis(ip).state) {
+    switch (func.analysisUnordered(ip).state) {
         .none => {},
         .queued => return,
         // As above, we don't need to forward errors here.
@@ -2989,7 +2989,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index)
         // since the last update
         try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index });
     }
-    func.analysis(ip).state = .queued;
+    func.setAnalysisState(ip, .queued);
 }
 
 pub const SemaDeclResult = packed struct {