Commit 8297f28546

Andrew Kelley <andrew@ziglang.org>
2023-05-10 21:16:24
stage2: move struct types and aggregate values to InternPool
1 parent 275652f
src/arch/aarch64/abi.zig
@@ -21,7 +21,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class {
     var maybe_float_bits: ?u16 = null;
     switch (ty.zigTypeTag(mod)) {
         .Struct => {
-            if (ty.containerLayout() == .Packed) return .byval;
+            if (ty.containerLayout(mod) == .Packed) return .byval;
             const float_count = countFloats(ty, mod, &maybe_float_bits);
             if (float_count <= sret_float_count) return .{ .float_array = float_count };
 
@@ -31,7 +31,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class {
             return .integer;
         },
         .Union => {
-            if (ty.containerLayout() == .Packed) return .byval;
+            if (ty.containerLayout(mod) == .Packed) return .byval;
             const float_count = countFloats(ty, mod, &maybe_float_bits);
             if (float_count <= sret_float_count) return .{ .float_array = float_count };
 
@@ -90,11 +90,11 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 {
             return max_count;
         },
         .Struct => {
-            const fields_len = ty.structFieldCount();
+            const fields_len = ty.structFieldCount(mod);
             var count: u8 = 0;
             var i: u32 = 0;
             while (i < fields_len) : (i += 1) {
-                const field_ty = ty.structFieldType(i);
+                const field_ty = ty.structFieldType(i, mod);
                 const field_count = countFloats(field_ty, mod, maybe_float_bits);
                 if (field_count == invalid) return invalid;
                 count += field_count;
@@ -125,10 +125,10 @@ pub fn getFloatArrayType(ty: Type, mod: *Module) ?Type {
             return null;
         },
         .Struct => {
-            const fields_len = ty.structFieldCount();
+            const fields_len = ty.structFieldCount(mod);
             var i: u32 = 0;
             while (i < fields_len) : (i += 1) {
-                const field_ty = ty.structFieldType(i);
+                const field_ty = ty.structFieldType(i, mod);
                 if (getFloatArrayType(field_ty, mod)) |some| return some;
             }
             return null;
src/arch/aarch64/CodeGen.zig
@@ -4119,7 +4119,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
         const mod = self.bin_file.options.module.?;
         const mcv = try self.resolveInst(operand);
         const struct_ty = self.typeOf(operand);
-        const struct_field_ty = struct_ty.structFieldType(index);
+        const struct_field_ty = struct_ty.structFieldType(index, mod);
         const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
 
         switch (mcv) {
@@ -5466,10 +5466,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
             const reg_lock = self.register_manager.lockReg(rwo.reg);
             defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
 
-            const wrapped_ty = ty.structFieldType(0);
+            const wrapped_ty = ty.structFieldType(0, mod);
             try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg });
 
-            const overflow_bit_ty = ty.structFieldType(1);
+            const overflow_bit_ty = ty.structFieldType(1, mod);
             const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod));
             const raw_cond_reg = try self.register_manager.allocReg(null, gp);
             const cond_reg = self.registerAlias(raw_cond_reg, overflow_bit_ty);
src/arch/arm/abi.zig
@@ -32,7 +32,7 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
     switch (ty.zigTypeTag(mod)) {
         .Struct => {
             const bit_size = ty.bitSize(mod);
-            if (ty.containerLayout() == .Packed) {
+            if (ty.containerLayout(mod) == .Packed) {
                 if (bit_size > 64) return .memory;
                 return .byval;
             }
@@ -40,10 +40,10 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
             const float_count = countFloats(ty, mod, &maybe_float_bits);
             if (float_count <= byval_float_count) return .byval;
 
-            const fields = ty.structFieldCount();
+            const fields = ty.structFieldCount(mod);
             var i: u32 = 0;
             while (i < fields) : (i += 1) {
-                const field_ty = ty.structFieldType(i);
+                const field_ty = ty.structFieldType(i, mod);
                 const field_alignment = ty.structFieldAlign(i, mod);
                 const field_size = field_ty.bitSize(mod);
                 if (field_size > 32 or field_alignment > 32) {
@@ -54,7 +54,7 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
         },
         .Union => {
             const bit_size = ty.bitSize(mod);
-            if (ty.containerLayout() == .Packed) {
+            if (ty.containerLayout(mod) == .Packed) {
                 if (bit_size > 64) return .memory;
                 return .byval;
             }
@@ -132,11 +132,11 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u32 {
             return max_count;
         },
         .Struct => {
-            const fields_len = ty.structFieldCount();
+            const fields_len = ty.structFieldCount(mod);
             var count: u32 = 0;
             var i: u32 = 0;
             while (i < fields_len) : (i += 1) {
-                const field_ty = ty.structFieldType(i);
+                const field_ty = ty.structFieldType(i, mod);
                 const field_count = countFloats(field_ty, mod, maybe_float_bits);
                 if (field_count == invalid) return invalid;
                 count += field_count;
src/arch/arm/CodeGen.zig
@@ -2910,7 +2910,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
         const mcv = try self.resolveInst(operand);
         const struct_ty = self.typeOf(operand);
         const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
-        const struct_field_ty = struct_ty.structFieldType(index);
+        const struct_field_ty = struct_ty.structFieldType(index, mod);
 
         switch (mcv) {
             .dead, .unreach => unreachable,
@@ -5404,10 +5404,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
             const reg_lock = self.register_manager.lockReg(reg);
             defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
 
-            const wrapped_ty = ty.structFieldType(0);
+            const wrapped_ty = ty.structFieldType(0, mod);
             try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg });
 
-            const overflow_bit_ty = ty.structFieldType(1);
+            const overflow_bit_ty = ty.structFieldType(1, mod);
             const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod));
             const cond_reg = try self.register_manager.allocReg(null, gp);
 
src/arch/riscv64/abi.zig
@@ -15,7 +15,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class {
     switch (ty.zigTypeTag(mod)) {
         .Struct => {
             const bit_size = ty.bitSize(mod);
-            if (ty.containerLayout() == .Packed) {
+            if (ty.containerLayout(mod) == .Packed) {
                 if (bit_size > max_byval_size) return .memory;
                 return .byval;
             }
@@ -26,7 +26,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class {
         },
         .Union => {
             const bit_size = ty.bitSize(mod);
-            if (ty.containerLayout() == .Packed) {
+            if (ty.containerLayout(mod) == .Packed) {
                 if (bit_size > max_byval_size) return .memory;
                 return .byval;
             }
src/arch/sparc64/CodeGen.zig
@@ -3993,10 +3993,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
             const reg_lock = self.register_manager.lockReg(rwo.reg);
             defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
 
-            const wrapped_ty = ty.structFieldType(0);
+            const wrapped_ty = ty.structFieldType(0, mod);
             try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg });
 
-            const overflow_bit_ty = ty.structFieldType(1);
+            const overflow_bit_ty = ty.structFieldType(1, mod);
             const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod));
             const cond_reg = try self.register_manager.allocReg(null, gp);
 
src/arch/wasm/abi.zig
@@ -26,14 +26,14 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class {
     if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none;
     switch (ty.zigTypeTag(mod)) {
         .Struct => {
-            if (ty.containerLayout() == .Packed) {
+            if (ty.containerLayout(mod) == .Packed) {
                 if (ty.bitSize(mod) <= 64) return direct;
                 return .{ .direct, .direct };
             }
             // When the struct type is non-scalar
-            if (ty.structFieldCount() > 1) return memory;
+            if (ty.structFieldCount(mod) > 1) return memory;
             // When the struct's alignment is non-natural
-            const field = ty.structFields().values()[0];
+            const field = ty.structFields(mod).values()[0];
             if (field.abi_align != 0) {
                 if (field.abi_align > field.ty.abiAlignment(mod)) {
                     return memory;
@@ -64,7 +64,7 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class {
             return direct;
         },
         .Union => {
-            if (ty.containerLayout() == .Packed) {
+            if (ty.containerLayout(mod) == .Packed) {
                 if (ty.bitSize(mod) <= 64) return direct;
                 return .{ .direct, .direct };
             }
@@ -96,19 +96,19 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class {
 pub fn scalarType(ty: Type, mod: *Module) Type {
     switch (ty.zigTypeTag(mod)) {
         .Struct => {
-            switch (ty.containerLayout()) {
+            switch (ty.containerLayout(mod)) {
                 .Packed => {
-                    const struct_obj = ty.castTag(.@"struct").?.data;
+                    const struct_obj = mod.typeToStruct(ty).?;
                     return scalarType(struct_obj.backing_int_ty, mod);
                 },
                 else => {
-                    std.debug.assert(ty.structFieldCount() == 1);
-                    return scalarType(ty.structFieldType(0), mod);
+                    std.debug.assert(ty.structFieldCount(mod) == 1);
+                    return scalarType(ty.structFieldType(0, mod), mod);
                 },
             }
         },
         .Union => {
-            if (ty.containerLayout() != .Packed) {
+            if (ty.containerLayout(mod) != .Packed) {
                 const layout = ty.unionGetLayout(mod);
                 if (layout.payload_size == 0 and layout.tag_size != 0) {
                     return scalarType(ty.unionTagTypeSafety().?, mod);
src/arch/wasm/CodeGen.zig
@@ -1006,9 +1006,9 @@ fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype {
             if (info.bits > 32 and info.bits <= 128) break :blk wasm.Valtype.i64;
             break :blk wasm.Valtype.i32; // represented as pointer to stack
         },
-        .Struct => switch (ty.containerLayout()) {
+        .Struct => switch (ty.containerLayout(mod)) {
             .Packed => {
-                const struct_obj = ty.castTag(.@"struct").?.data;
+                const struct_obj = mod.typeToStruct(ty).?;
                 return typeToValtype(struct_obj.backing_int_ty, mod);
             },
             else => wasm.Valtype.i32,
@@ -1017,7 +1017,7 @@ fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype {
             .direct => wasm.Valtype.v128,
             .unrolled => wasm.Valtype.i32,
         },
-        .Union => switch (ty.containerLayout()) {
+        .Union => switch (ty.containerLayout(mod)) {
             .Packed => {
                 const int_ty = mod.intType(.unsigned, @intCast(u16, ty.bitSize(mod))) catch @panic("out of memory");
                 return typeToValtype(int_ty, mod);
@@ -1747,8 +1747,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
             return ty.hasRuntimeBitsIgnoreComptime(mod);
         },
         .Struct => {
-            if (ty.castTag(.@"struct")) |struct_ty| {
-                const struct_obj = struct_ty.data;
+            if (mod.typeToStruct(ty)) |struct_obj| {
                 if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) {
                     return isByRef(struct_obj.backing_int_ty, mod);
                 }
@@ -2954,11 +2953,11 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
             const parent_ty = field_ptr.container_ty;
 
             const field_offset = switch (parent_ty.zigTypeTag(mod)) {
-                .Struct => switch (parent_ty.containerLayout()) {
+                .Struct => switch (parent_ty.containerLayout(mod)) {
                     .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, mod),
                     else => parent_ty.structFieldOffset(field_ptr.field_index, mod),
                 },
-                .Union => switch (parent_ty.containerLayout()) {
+                .Union => switch (parent_ty.containerLayout(mod)) {
                     .Packed => 0,
                     else => blk: {
                         const layout: Module.Union.Layout = parent_ty.unionGetLayout(mod);
@@ -3158,7 +3157,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
             return WValue{ .imm32 = @boolToInt(is_pl) };
         },
         .Struct => {
-            const struct_obj = ty.castTag(.@"struct").?.data;
+            const struct_obj = mod.typeToStruct(ty).?;
             assert(struct_obj.layout == .Packed);
             var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
             val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable;
@@ -3225,7 +3224,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
             return WValue{ .imm32 = 0xaaaaaaaa };
         },
         .Struct => {
-            const struct_obj = ty.castTag(.@"struct").?.data;
+            const struct_obj = mod.typeToStruct(ty).?;
             assert(struct_obj.layout == .Packed);
             return func.emitUndefined(struct_obj.backing_int_ty);
         },
@@ -3635,7 +3634,7 @@ fn structFieldPtr(
 ) InnerError!WValue {
     const mod = func.bin_file.base.options.module.?;
     const result_ty = func.typeOfIndex(inst);
-    const offset = switch (struct_ty.containerLayout()) {
+    const offset = switch (struct_ty.containerLayout(mod)) {
         .Packed => switch (struct_ty.zigTypeTag(mod)) {
             .Struct => offset: {
                 if (result_ty.ptrInfo(mod).host_size != 0) {
@@ -3668,13 +3667,13 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const struct_ty = func.typeOf(struct_field.struct_operand);
     const operand = try func.resolveInst(struct_field.struct_operand);
     const field_index = struct_field.field_index;
-    const field_ty = struct_ty.structFieldType(field_index);
+    const field_ty = struct_ty.structFieldType(field_index, mod);
     if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
 
-    const result = switch (struct_ty.containerLayout()) {
+    const result = switch (struct_ty.containerLayout(mod)) {
         .Packed => switch (struct_ty.zigTypeTag(mod)) {
             .Struct => result: {
-                const struct_obj = struct_ty.castTag(.@"struct").?.data;
+                const struct_obj = mod.typeToStruct(struct_ty).?;
                 const offset = struct_obj.packedFieldBitOffset(mod, field_index);
                 const backing_ty = struct_obj.backing_int_ty;
                 const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse {
@@ -4998,12 +4997,12 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 }
                 break :result_value result;
             },
-            .Struct => switch (result_ty.containerLayout()) {
+            .Struct => switch (result_ty.containerLayout(mod)) {
                 .Packed => {
                     if (isByRef(result_ty, mod)) {
                         return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{});
                     }
-                    const struct_obj = result_ty.castTag(.@"struct").?.data;
+                    const struct_obj = mod.typeToStruct(result_ty).?;
                     const fields = struct_obj.fields.values();
                     const backing_type = struct_obj.backing_int_ty;
 
@@ -5051,7 +5050,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                     for (elements, 0..) |elem, elem_index| {
                         if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue;
 
-                        const elem_ty = result_ty.structFieldType(elem_index);
+                        const elem_ty = result_ty.structFieldType(elem_index, mod);
                         const elem_size = @intCast(u32, elem_ty.abiSize(mod));
                         const value = try func.resolveInst(elem);
                         try func.store(offset, value, elem_ty, 0);
src/arch/x86_64/abi.zig
@@ -41,7 +41,7 @@ pub fn classifyWindows(ty: Type, mod: *Module) Class {
             1, 2, 4, 8 => return .integer,
             else => switch (ty.zigTypeTag(mod)) {
                 .Int => return .win_i128,
-                .Struct, .Union => if (ty.containerLayout() == .Packed) {
+                .Struct, .Union => if (ty.containerLayout(mod) == .Packed) {
                     return .win_i128;
                 } else {
                     return .memory;
@@ -210,7 +210,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
             // "If the size of the aggregate exceeds a single eightbyte, each is classified
             // separately.".
             const ty_size = ty.abiSize(mod);
-            if (ty.containerLayout() == .Packed) {
+            if (ty.containerLayout(mod) == .Packed) {
                 assert(ty_size <= 128);
                 result[0] = .integer;
                 if (ty_size > 64) result[1] = .integer;
@@ -221,7 +221,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
 
             var result_i: usize = 0; // out of 8
             var byte_i: usize = 0; // out of 8
-            const fields = ty.structFields();
+            const fields = ty.structFields(mod);
             for (fields.values()) |field| {
                 if (field.abi_align != 0) {
                     if (field.abi_align < field.ty.abiAlignment(mod)) {
@@ -329,7 +329,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
             // "If the size of the aggregate exceeds a single eightbyte, each is classified
             // separately.".
             const ty_size = ty.abiSize(mod);
-            if (ty.containerLayout() == .Packed) {
+            if (ty.containerLayout(mod) == .Packed) {
                 assert(ty_size <= 128);
                 result[0] = .integer;
                 if (ty_size > 64) result[1] = .integer;
src/arch/x86_64/CodeGen.zig
@@ -3252,13 +3252,13 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                     try self.genSetMem(
                         .{ .frame = frame_index },
                         @intCast(i32, tuple_ty.structFieldOffset(1, mod)),
-                        tuple_ty.structFieldType(1),
+                        tuple_ty.structFieldType(1, mod),
                         .{ .eflags = cc },
                     );
                     try self.genSetMem(
                         .{ .frame = frame_index },
                         @intCast(i32, tuple_ty.structFieldOffset(0, mod)),
-                        tuple_ty.structFieldType(0),
+                        tuple_ty.structFieldType(0, mod),
                         partial_mcv,
                     );
                     break :result .{ .load_frame = .{ .index = frame_index } };
@@ -3289,7 +3289,7 @@ fn genSetFrameTruncatedOverflowCompare(
     };
     defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const ty = tuple_ty.structFieldType(0);
+    const ty = tuple_ty.structFieldType(0, mod);
     const int_info = ty.intInfo(mod);
 
     const hi_limb_bits = (int_info.bits - 1) % 64 + 1;
@@ -3336,7 +3336,7 @@ fn genSetFrameTruncatedOverflowCompare(
     try self.genSetMem(
         .{ .frame = frame_index },
         @intCast(i32, tuple_ty.structFieldOffset(1, mod)),
-        tuple_ty.structFieldType(1),
+        tuple_ty.structFieldType(1, mod),
         if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne },
     );
 }
@@ -3393,13 +3393,13 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                         try self.genSetMem(
                             .{ .frame = frame_index },
                             @intCast(i32, tuple_ty.structFieldOffset(0, mod)),
-                            tuple_ty.structFieldType(0),
+                            tuple_ty.structFieldType(0, mod),
                             partial_mcv,
                         );
                         try self.genSetMem(
                             .{ .frame = frame_index },
                             @intCast(i32, tuple_ty.structFieldOffset(1, mod)),
-                            tuple_ty.structFieldType(1),
+                            tuple_ty.structFieldType(1, mod),
                             .{ .immediate = 0 }, // cc being set is impossible
                         );
                     } else try self.genSetFrameTruncatedOverflowCompare(
@@ -5563,7 +5563,7 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32
     const ptr_field_ty = self.typeOfIndex(inst);
     const ptr_container_ty = self.typeOf(operand);
     const container_ty = ptr_container_ty.childType(mod);
-    const field_offset = @intCast(i32, switch (container_ty.containerLayout()) {
+    const field_offset = @intCast(i32, switch (container_ty.containerLayout(mod)) {
         .Auto, .Extern => container_ty.structFieldOffset(index, mod),
         .Packed => if (container_ty.zigTypeTag(mod) == .Struct and
             ptr_field_ty.ptrInfo(mod).host_size == 0)
@@ -5591,16 +5591,16 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
 
         const container_ty = self.typeOf(operand);
         const container_rc = regClassForType(container_ty, mod);
-        const field_ty = container_ty.structFieldType(index);
+        const field_ty = container_ty.structFieldType(index, mod);
         if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none;
         const field_rc = regClassForType(field_ty, mod);
         const field_is_gp = field_rc.supersetOf(gp);
 
         const src_mcv = try self.resolveInst(operand);
-        const field_off = switch (container_ty.containerLayout()) {
+        const field_off = switch (container_ty.containerLayout(mod)) {
             .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, mod) * 8),
-            .Packed => if (container_ty.castTag(.@"struct")) |struct_obj|
-                struct_obj.data.packedFieldBitOffset(mod, index)
+            .Packed => if (mod.typeToStruct(container_ty)) |struct_obj|
+                struct_obj.packedFieldBitOffset(mod, index)
             else
                 0,
         };
@@ -10036,13 +10036,13 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
             try self.genSetMem(
                 base,
                 disp + @intCast(i32, ty.structFieldOffset(0, mod)),
-                ty.structFieldType(0),
+                ty.structFieldType(0, mod),
                 .{ .register = ro.reg },
             );
             try self.genSetMem(
                 base,
                 disp + @intCast(i32, ty.structFieldOffset(1, mod)),
-                ty.structFieldType(1),
+                ty.structFieldType(1, mod),
                 .{ .eflags = ro.eflags },
             );
         },
@@ -11259,8 +11259,8 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
             .Struct => {
                 const frame_index =
                     try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod));
-                if (result_ty.containerLayout() == .Packed) {
-                    const struct_obj = result_ty.castTag(.@"struct").?.data;
+                if (result_ty.containerLayout(mod) == .Packed) {
+                    const struct_obj = mod.typeToStruct(result_ty).?;
                     try self.genInlineMemset(
                         .{ .lea_frame = .{ .index = frame_index } },
                         .{ .immediate = 0 },
@@ -11269,7 +11269,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
                     for (elements, 0..) |elem, elem_i| {
                         if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue;
 
-                        const elem_ty = result_ty.structFieldType(elem_i);
+                        const elem_ty = result_ty.structFieldType(elem_i, mod);
                         const elem_bit_size = @intCast(u32, elem_ty.bitSize(mod));
                         if (elem_bit_size > 64) {
                             return self.fail(
@@ -11341,7 +11341,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
                 } else for (elements, 0..) |elem, elem_i| {
                     if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue;
 
-                    const elem_ty = result_ty.structFieldType(elem_i);
+                    const elem_ty = result_ty.structFieldType(elem_i, mod);
                     const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, mod));
                     const elem_mcv = try self.resolveInst(elem);
                     const mat_elem_mcv = switch (elem_mcv) {
src/codegen/c/type.zig
@@ -299,7 +299,7 @@ pub const CType = extern union {
         pub fn fieldAlign(struct_ty: Type, field_i: usize, mod: *Module) AlignAs {
             return init(
                 struct_ty.structFieldAlign(field_i, mod),
-                struct_ty.structFieldType(field_i).abiAlignment(mod),
+                struct_ty.structFieldType(field_i, mod).abiAlignment(mod),
             );
         }
         pub fn unionPayloadAlign(union_ty: Type, mod: *Module) AlignAs {
@@ -1486,23 +1486,23 @@ pub const CType = extern union {
                     }
                 },
 
-                .Struct, .Union => |zig_ty_tag| if (ty.containerLayout() == .Packed) {
-                    if (ty.castTag(.@"struct")) |struct_obj| {
-                        try self.initType(struct_obj.data.backing_int_ty, kind, lookup);
+                .Struct, .Union => |zig_ty_tag| if (ty.containerLayout(mod) == .Packed) {
+                    if (mod.typeToStruct(ty)) |struct_obj| {
+                        try self.initType(struct_obj.backing_int_ty, kind, lookup);
                     } else {
                         const bits = @intCast(u16, ty.bitSize(mod));
                         const int_ty = try mod.intType(.unsigned, bits);
                         try self.initType(int_ty, kind, lookup);
                     }
-                } else if (ty.isTupleOrAnonStruct()) {
+                } else if (ty.isTupleOrAnonStruct(mod)) {
                     if (lookup.isMutable()) {
                         for (0..switch (zig_ty_tag) {
-                            .Struct => ty.structFieldCount(),
+                            .Struct => ty.structFieldCount(mod),
                             .Union => ty.unionFields().count(),
                             else => unreachable,
                         }) |field_i| {
-                            const field_ty = ty.structFieldType(field_i);
-                            if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
+                            const field_ty = ty.structFieldType(field_i, mod);
+                            if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
                                 !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
                             _ = try lookup.typeToIndex(field_ty, switch (kind) {
                                 .forward, .forward_parameter => .forward,
@@ -1579,11 +1579,11 @@ pub const CType = extern union {
                         } else {
                             var is_packed = false;
                             for (0..switch (zig_ty_tag) {
-                                .Struct => ty.structFieldCount(),
+                                .Struct => ty.structFieldCount(mod),
                                 .Union => ty.unionFields().count(),
                                 else => unreachable,
                             }) |field_i| {
-                                const field_ty = ty.structFieldType(field_i);
+                                const field_ty = ty.structFieldType(field_i, mod);
                                 if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
 
                                 const field_align = AlignAs.fieldAlign(ty, field_i, mod);
@@ -1929,15 +1929,15 @@ pub const CType = extern union {
                 => {
                     const zig_ty_tag = ty.zigTypeTag(mod);
                     const fields_len = switch (zig_ty_tag) {
-                        .Struct => ty.structFieldCount(),
+                        .Struct => ty.structFieldCount(mod),
                         .Union => ty.unionFields().count(),
                         else => unreachable,
                     };
 
                     var c_fields_len: usize = 0;
                     for (0..fields_len) |field_i| {
-                        const field_ty = ty.structFieldType(field_i);
-                        if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
+                        const field_ty = ty.structFieldType(field_i, mod);
+                        if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
                             !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
                         c_fields_len += 1;
                     }
@@ -1945,8 +1945,8 @@ pub const CType = extern union {
                     const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len);
                     var c_field_i: usize = 0;
                     for (0..fields_len) |field_i| {
-                        const field_ty = ty.structFieldType(field_i);
-                        if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
+                        const field_ty = ty.structFieldType(field_i, mod);
+                        if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
                             !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
 
                         defer c_field_i += 1;
@@ -1955,7 +1955,7 @@ pub const CType = extern union {
                                 std.fmt.allocPrintZ(arena, "f{}", .{field_i})
                             else
                                 arena.dupeZ(u8, switch (zig_ty_tag) {
-                                    .Struct => ty.structFieldName(field_i),
+                                    .Struct => ty.structFieldName(field_i, mod),
                                     .Union => ty.unionFields().keys()[field_i],
                                     else => unreachable,
                                 }),
@@ -2074,7 +2074,7 @@ pub const CType = extern union {
                         .fwd_anon_struct,
                         .fwd_anon_union,
                         => {
-                            if (!ty.isTupleOrAnonStruct()) return false;
+                            if (!ty.isTupleOrAnonStruct(mod)) return false;
 
                             var name_buf: [
                                 std.fmt.count("f{}", .{std.math.maxInt(usize)})
@@ -2084,12 +2084,12 @@ pub const CType = extern union {
                             const zig_ty_tag = ty.zigTypeTag(mod);
                             var c_field_i: usize = 0;
                             for (0..switch (zig_ty_tag) {
-                                .Struct => ty.structFieldCount(),
+                                .Struct => ty.structFieldCount(mod),
                                 .Union => ty.unionFields().count(),
                                 else => unreachable,
                             }) |field_i| {
-                                const field_ty = ty.structFieldType(field_i);
-                                if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
+                                const field_ty = ty.structFieldType(field_i, mod);
+                                if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
                                     !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
 
                                 defer c_field_i += 1;
@@ -2105,7 +2105,7 @@ pub const CType = extern union {
                                     if (ty.isSimpleTuple())
                                         std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable
                                     else switch (zig_ty_tag) {
-                                        .Struct => ty.structFieldName(field_i),
+                                        .Struct => ty.structFieldName(field_i, mod),
                                         .Union => ty.unionFields().keys()[field_i],
                                         else => unreachable,
                                     },
@@ -2210,12 +2210,12 @@ pub const CType = extern union {
 
                             const zig_ty_tag = ty.zigTypeTag(mod);
                             for (0..switch (ty.zigTypeTag(mod)) {
-                                .Struct => ty.structFieldCount(),
+                                .Struct => ty.structFieldCount(mod),
                                 .Union => ty.unionFields().count(),
                                 else => unreachable,
                             }) |field_i| {
-                                const field_ty = ty.structFieldType(field_i);
-                                if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
+                                const field_ty = ty.structFieldType(field_i, mod);
+                                if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
                                     !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
 
                                 self.updateHasherRecurse(hasher, field_ty, switch (self.kind) {
@@ -2227,7 +2227,7 @@ pub const CType = extern union {
                                 hasher.update(if (ty.isSimpleTuple())
                                     std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable
                                 else switch (zig_ty_tag) {
-                                    .Struct => ty.structFieldName(field_i),
+                                    .Struct => ty.structFieldName(field_i, mod),
                                     .Union => ty.unionFields().keys()[field_i],
                                     else => unreachable,
                                 });
src/codegen/c.zig
@@ -820,7 +820,7 @@ pub const DeclGen = struct {
                     try dg.renderValue(writer, Type.bool, val, initializer_type);
                     return writer.writeAll(" }");
                 },
-                .Struct => switch (ty.containerLayout()) {
+                .Struct => switch (ty.containerLayout(mod)) {
                     .Auto, .Extern => {
                         if (!location.isInitializer()) {
                             try writer.writeByte('(');
@@ -830,9 +830,9 @@ pub const DeclGen = struct {
 
                         try writer.writeByte('{');
                         var empty = true;
-                        for (0..ty.structFieldCount()) |field_i| {
-                            if (ty.structFieldIsComptime(field_i)) continue;
-                            const field_ty = ty.structFieldType(field_i);
+                        for (0..ty.structFieldCount(mod)) |field_i| {
+                            if (ty.structFieldIsComptime(field_i, mod)) continue;
+                            const field_ty = ty.structFieldType(field_i, mod);
                             if (!field_ty.hasRuntimeBits(mod)) continue;
 
                             if (!empty) try writer.writeByte(',');
@@ -1328,7 +1328,7 @@ pub const DeclGen = struct {
                 },
                 else => unreachable,
             },
-            .Struct => switch (ty.containerLayout()) {
+            .Struct => switch (ty.containerLayout(mod)) {
                 .Auto, .Extern => {
                     const field_vals = val.castTag(.aggregate).?.data;
 
@@ -1341,8 +1341,8 @@ pub const DeclGen = struct {
                     try writer.writeByte('{');
                     var empty = true;
                     for (field_vals, 0..) |field_val, field_i| {
-                        if (ty.structFieldIsComptime(field_i)) continue;
-                        const field_ty = ty.structFieldType(field_i);
+                        if (ty.structFieldIsComptime(field_i, mod)) continue;
+                        const field_ty = ty.structFieldType(field_i, mod);
                         if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
 
                         if (!empty) try writer.writeByte(',');
@@ -1363,8 +1363,8 @@ pub const DeclGen = struct {
 
                     var eff_num_fields: usize = 0;
                     for (0..field_vals.len) |field_i| {
-                        if (ty.structFieldIsComptime(field_i)) continue;
-                        const field_ty = ty.structFieldType(field_i);
+                        if (ty.structFieldIsComptime(field_i, mod)) continue;
+                        const field_ty = ty.structFieldType(field_i, mod);
                         if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
 
                         eff_num_fields += 1;
@@ -1386,8 +1386,8 @@ pub const DeclGen = struct {
                         var eff_index: usize = 0;
                         var needs_closing_paren = false;
                         for (field_vals, 0..) |field_val, field_i| {
-                            if (ty.structFieldIsComptime(field_i)) continue;
-                            const field_ty = ty.structFieldType(field_i);
+                            if (ty.structFieldIsComptime(field_i, mod)) continue;
+                            const field_ty = ty.structFieldType(field_i, mod);
                             if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
 
                             const cast_context = IntCastContext{ .value = .{ .value = field_val } };
@@ -1416,8 +1416,8 @@ pub const DeclGen = struct {
                         // a << a_off | b << b_off | c << c_off
                         var empty = true;
                         for (field_vals, 0..) |field_val, field_i| {
-                            if (ty.structFieldIsComptime(field_i)) continue;
-                            const field_ty = ty.structFieldType(field_i);
+                            if (ty.structFieldIsComptime(field_i, mod)) continue;
+                            const field_ty = ty.structFieldType(field_i, mod);
                             if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
 
                             if (!empty) try writer.writeAll(" | ");
@@ -1453,7 +1453,7 @@ pub const DeclGen = struct {
                 const field_i = ty.unionTagFieldIndex(union_obj.tag, mod).?;
                 const field_ty = ty.unionFields().values()[field_i].ty;
                 const field_name = ty.unionFields().keys()[field_i];
-                if (ty.containerLayout() == .Packed) {
+                if (ty.containerLayout(mod) == .Packed) {
                     if (field_ty.hasRuntimeBits(mod)) {
                         if (field_ty.isPtrAtRuntime(mod)) {
                             try writer.writeByte('(');
@@ -5218,25 +5218,25 @@ fn fieldLocation(
     end: void,
 } {
     return switch (container_ty.zigTypeTag(mod)) {
-        .Struct => switch (container_ty.containerLayout()) {
-            .Auto, .Extern => for (field_index..container_ty.structFieldCount()) |next_field_index| {
-                if (container_ty.structFieldIsComptime(next_field_index)) continue;
-                const field_ty = container_ty.structFieldType(next_field_index);
+        .Struct => switch (container_ty.containerLayout(mod)) {
+            .Auto, .Extern => for (field_index..container_ty.structFieldCount(mod)) |next_field_index| {
+                if (container_ty.structFieldIsComptime(next_field_index, mod)) continue;
+                const field_ty = container_ty.structFieldType(next_field_index, mod);
                 if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
 
                 break .{ .field = if (container_ty.isSimpleTuple())
                     .{ .field = next_field_index }
                 else
-                    .{ .identifier = container_ty.structFieldName(next_field_index) } };
+                    .{ .identifier = container_ty.structFieldName(next_field_index, mod) } };
             } else if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin,
             .Packed => if (field_ptr_ty.ptrInfo(mod).host_size == 0)
                 .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, mod) }
             else
                 .begin,
         },
-        .Union => switch (container_ty.containerLayout()) {
+        .Union => switch (container_ty.containerLayout(mod)) {
             .Auto, .Extern => {
-                const field_ty = container_ty.structFieldType(field_index);
+                const field_ty = container_ty.structFieldType(field_index, mod);
                 if (!field_ty.hasRuntimeBitsIgnoreComptime(mod))
                     return if (container_ty.unionTagTypeSafety() != null and
                         !container_ty.unionHasAllZeroBitFieldTypes(mod))
@@ -5417,101 +5417,111 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
     // Ensure complete type definition is visible before accessing fields.
     _ = try f.typeToIndex(struct_ty, .complete);
 
-    const field_name: CValue = switch (struct_ty.tag()) {
-        .tuple, .anon_struct, .@"struct" => switch (struct_ty.containerLayout()) {
-            .Auto, .Extern => if (struct_ty.isSimpleTuple())
+    const field_name: CValue = switch (struct_ty.ip_index) {
+        .none => switch (struct_ty.tag()) {
+            .tuple, .anon_struct => if (struct_ty.isSimpleTuple())
                 .{ .field = extra.field_index }
             else
-                .{ .identifier = struct_ty.structFieldName(extra.field_index) },
-            .Packed => {
-                const struct_obj = struct_ty.castTag(.@"struct").?.data;
-                const int_info = struct_ty.intInfo(mod);
-
-                const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
-
-                const bit_offset = struct_obj.packedFieldBitOffset(mod, extra.field_index);
-                const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
+                .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) },
 
-                const field_int_signedness = if (inst_ty.isAbiInt(mod))
-                    inst_ty.intInfo(mod).signedness
-                else
-                    .unsigned;
-                const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod)));
-
-                const temp_local = try f.allocLocal(inst, field_int_ty);
-                try f.writeCValue(writer, temp_local, .Other);
-                try writer.writeAll(" = zig_wrap_");
-                try f.object.dg.renderTypeForBuiltinFnName(writer, field_int_ty);
-                try writer.writeAll("((");
-                try f.renderType(writer, field_int_ty);
-                try writer.writeByte(')');
-                const cant_cast = int_info.bits > 64;
-                if (cant_cast) {
-                    if (field_int_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
-                    try writer.writeAll("zig_lo_");
-                    try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
-                    try writer.writeByte('(');
-                }
-                if (bit_offset > 0) {
-                    try writer.writeAll("zig_shr_");
-                    try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
-                    try writer.writeByte('(');
-                }
-                try f.writeCValue(writer, struct_byval, .Other);
-                if (bit_offset > 0) {
-                    try writer.writeAll(", ");
-                    try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
-                    try writer.writeByte(')');
-                }
-                if (cant_cast) try writer.writeByte(')');
-                try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits);
-                try writer.writeAll(");\n");
-                if (inst_ty.eql(field_int_ty, f.object.dg.module)) return temp_local;
+            .@"union", .union_safety_tagged, .union_tagged => if (struct_ty.containerLayout(mod) == .Packed) {
+                const operand_lval = if (struct_byval == .constant) blk: {
+                    const operand_local = try f.allocLocal(inst, struct_ty);
+                    try f.writeCValue(writer, operand_local, .Other);
+                    try writer.writeAll(" = ");
+                    try f.writeCValue(writer, struct_byval, .Initializer);
+                    try writer.writeAll(";\n");
+                    break :blk operand_local;
+                } else struct_byval;
 
                 const local = try f.allocLocal(inst, inst_ty);
-                try writer.writeAll("memcpy(");
-                try f.writeCValue(writer, .{ .local_ref = local.new_local }, .FunctionArgument);
-                try writer.writeAll(", ");
-                try f.writeCValue(writer, .{ .local_ref = temp_local.new_local }, .FunctionArgument);
+                try writer.writeAll("memcpy(&");
+                try f.writeCValue(writer, local, .Other);
+                try writer.writeAll(", &");
+                try f.writeCValue(writer, operand_lval, .Other);
                 try writer.writeAll(", sizeof(");
                 try f.renderType(writer, inst_ty);
                 try writer.writeAll("));\n");
-                try freeLocal(f, inst, temp_local.new_local, 0);
+
+                if (struct_byval == .constant) {
+                    try freeLocal(f, inst, operand_lval.new_local, 0);
+                }
+
                 return local;
+            } else field_name: {
+                const name = struct_ty.unionFields().keys()[extra.field_index];
+                break :field_name if (struct_ty.unionTagTypeSafety()) |_|
+                    .{ .payload_identifier = name }
+                else
+                    .{ .identifier = name };
             },
+            else => unreachable,
         },
-        .@"union", .union_safety_tagged, .union_tagged => if (struct_ty.containerLayout() == .Packed) {
-            const operand_lval = if (struct_byval == .constant) blk: {
-                const operand_local = try f.allocLocal(inst, struct_ty);
-                try f.writeCValue(writer, operand_local, .Other);
-                try writer.writeAll(" = ");
-                try f.writeCValue(writer, struct_byval, .Initializer);
-                try writer.writeAll(";\n");
-                break :blk operand_local;
-            } else struct_byval;
+        else => switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) {
+            .struct_type => switch (struct_ty.containerLayout(mod)) {
+                .Auto, .Extern => if (struct_ty.isSimpleTuple())
+                    .{ .field = extra.field_index }
+                else
+                    .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) },
+                .Packed => {
+                    const struct_obj = mod.typeToStruct(struct_ty).?;
+                    const int_info = struct_ty.intInfo(mod);
 
-            const local = try f.allocLocal(inst, inst_ty);
-            try writer.writeAll("memcpy(&");
-            try f.writeCValue(writer, local, .Other);
-            try writer.writeAll(", &");
-            try f.writeCValue(writer, operand_lval, .Other);
-            try writer.writeAll(", sizeof(");
-            try f.renderType(writer, inst_ty);
-            try writer.writeAll("));\n");
+                    const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
 
-            if (struct_byval == .constant) {
-                try freeLocal(f, inst, operand_lval.new_local, 0);
-            }
+                    const bit_offset = struct_obj.packedFieldBitOffset(mod, extra.field_index);
+                    const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
 
-            return local;
-        } else field_name: {
-            const name = struct_ty.unionFields().keys()[extra.field_index];
-            break :field_name if (struct_ty.unionTagTypeSafety()) |_|
-                .{ .payload_identifier = name }
-            else
-                .{ .identifier = name };
+                    const field_int_signedness = if (inst_ty.isAbiInt(mod))
+                        inst_ty.intInfo(mod).signedness
+                    else
+                        .unsigned;
+                    const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod)));
+
+                    const temp_local = try f.allocLocal(inst, field_int_ty);
+                    try f.writeCValue(writer, temp_local, .Other);
+                    try writer.writeAll(" = zig_wrap_");
+                    try f.object.dg.renderTypeForBuiltinFnName(writer, field_int_ty);
+                    try writer.writeAll("((");
+                    try f.renderType(writer, field_int_ty);
+                    try writer.writeByte(')');
+                    const cant_cast = int_info.bits > 64;
+                    if (cant_cast) {
+                        if (field_int_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+                        try writer.writeAll("zig_lo_");
+                        try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
+                        try writer.writeByte('(');
+                    }
+                    if (bit_offset > 0) {
+                        try writer.writeAll("zig_shr_");
+                        try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
+                        try writer.writeByte('(');
+                    }
+                    try f.writeCValue(writer, struct_byval, .Other);
+                    if (bit_offset > 0) {
+                        try writer.writeAll(", ");
+                        try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
+                        try writer.writeByte(')');
+                    }
+                    if (cant_cast) try writer.writeByte(')');
+                    try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits);
+                    try writer.writeAll(");\n");
+                    if (inst_ty.eql(field_int_ty, f.object.dg.module)) return temp_local;
+
+                    const local = try f.allocLocal(inst, inst_ty);
+                    try writer.writeAll("memcpy(");
+                    try f.writeCValue(writer, .{ .local_ref = local.new_local }, .FunctionArgument);
+                    try writer.writeAll(", ");
+                    try f.writeCValue(writer, .{ .local_ref = temp_local.new_local }, .FunctionArgument);
+                    try writer.writeAll(", sizeof(");
+                    try f.renderType(writer, inst_ty);
+                    try writer.writeAll("));\n");
+                    try freeLocal(f, inst, temp_local.new_local, 0);
+                    return local;
+                },
+            },
+            else => unreachable,
         },
-        else => unreachable,
     };
 
     const local = try f.allocLocal(inst, inst_ty);
@@ -6805,17 +6815,17 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
                 try a.end(f, writer);
             }
         },
-        .Struct => switch (inst_ty.containerLayout()) {
+        .Struct => switch (inst_ty.containerLayout(mod)) {
             .Auto, .Extern => for (resolved_elements, 0..) |element, field_i| {
-                if (inst_ty.structFieldIsComptime(field_i)) continue;
-                const field_ty = inst_ty.structFieldType(field_i);
+                if (inst_ty.structFieldIsComptime(field_i, mod)) continue;
+                const field_ty = inst_ty.structFieldType(field_i, mod);
                 if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
 
                 const a = try Assignment.start(f, writer, field_ty);
                 try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple())
                     .{ .field = field_i }
                 else
-                    .{ .identifier = inst_ty.structFieldName(field_i) });
+                    .{ .identifier = inst_ty.structFieldName(field_i, mod) });
                 try a.assign(f, writer);
                 try f.writeCValue(writer, element, .Other);
                 try a.end(f, writer);
@@ -6831,8 +6841,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
 
                 var empty = true;
                 for (0..elements.len) |field_i| {
-                    if (inst_ty.structFieldIsComptime(field_i)) continue;
-                    const field_ty = inst_ty.structFieldType(field_i);
+                    if (inst_ty.structFieldIsComptime(field_i, mod)) continue;
+                    const field_ty = inst_ty.structFieldType(field_i, mod);
                     if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
 
                     if (!empty) {
@@ -6844,8 +6854,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
                 }
                 empty = true;
                 for (resolved_elements, 0..) |element, field_i| {
-                    if (inst_ty.structFieldIsComptime(field_i)) continue;
-                    const field_ty = inst_ty.structFieldType(field_i);
+                    if (inst_ty.structFieldIsComptime(field_i, mod)) continue;
+                    const field_ty = inst_ty.structFieldType(field_i, mod);
                     if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
 
                     if (!empty) try writer.writeAll(", ");
src/codegen/llvm.zig
@@ -1986,8 +1986,7 @@ pub const Object = struct {
                 const name = try ty.nameAlloc(gpa, o.module);
                 defer gpa.free(name);
 
-                if (ty.castTag(.@"struct")) |payload| {
-                    const struct_obj = payload.data;
+                if (mod.typeToStruct(ty)) |struct_obj| {
                     if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) {
                         assert(struct_obj.haveLayout());
                         const info = struct_obj.backing_int_ty.intInfo(mod);
@@ -2075,8 +2074,7 @@ pub const Object = struct {
                     return full_di_ty;
                 }
 
-                if (ty.castTag(.@"struct")) |payload| {
-                    const struct_obj = payload.data;
+                if (mod.typeToStruct(ty)) |struct_obj| {
                     if (!struct_obj.haveFieldTypes()) {
                         // This can happen if a struct type makes it all the way to
                         // flush() without ever being instantiated or referenced (even
@@ -2105,8 +2103,8 @@ pub const Object = struct {
                     return struct_di_ty;
                 }
 
-                const fields = ty.structFields();
-                const layout = ty.containerLayout();
+                const fields = ty.structFields(mod);
+                const layout = ty.containerLayout(mod);
 
                 var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{};
                 defer di_fields.deinit(gpa);
@@ -2116,7 +2114,7 @@ pub const Object = struct {
                 comptime assert(struct_layout_version == 2);
                 var offset: u64 = 0;
 
-                var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(mod);
+                var it = mod.typeToStruct(ty).?.runtimeFieldIterator(mod);
                 while (it.next()) |field_and_index| {
                     const field = field_and_index.field;
                     const field_size = field.ty.abiSize(mod);
@@ -2990,7 +2988,7 @@ pub const DeclGen = struct {
                     return llvm_struct_ty;
                 }
 
-                const struct_obj = t.castTag(.@"struct").?.data;
+                const struct_obj = mod.typeToStruct(t).?;
 
                 if (struct_obj.layout == .Packed) {
                     assert(struct_obj.haveLayout());
@@ -3696,7 +3694,7 @@ pub const DeclGen = struct {
                     }
                 }
 
-                const struct_obj = tv.ty.castTag(.@"struct").?.data;
+                const struct_obj = mod.typeToStruct(tv.ty).?;
 
                 if (struct_obj.layout == .Packed) {
                     assert(struct_obj.haveLayout());
@@ -4043,7 +4041,7 @@ pub const DeclGen = struct {
                 const llvm_u32 = dg.context.intType(32);
                 switch (parent_ty.zigTypeTag(mod)) {
                     .Union => {
-                        if (parent_ty.containerLayout() == .Packed) {
+                        if (parent_ty.containerLayout(mod) == .Packed) {
                             return parent_llvm_ptr;
                         }
 
@@ -4065,14 +4063,14 @@ pub const DeclGen = struct {
                         return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
                     },
                     .Struct => {
-                        if (parent_ty.containerLayout() == .Packed) {
+                        if (parent_ty.containerLayout(mod) == .Packed) {
                             if (!byte_aligned) return parent_llvm_ptr;
                             const llvm_usize = dg.context.intType(target.ptrBitWidth());
                             const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize);
                             // count bits of fields before this one
                             const prev_bits = b: {
                                 var b: usize = 0;
-                                for (parent_ty.structFields().values()[0..field_index]) |field| {
+                                for (parent_ty.structFields(mod).values()[0..field_index]) |field| {
                                     if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
                                     b += @intCast(usize, field.ty.bitSize(mod));
                                 }
@@ -5983,7 +5981,7 @@ pub const FuncGen = struct {
         const struct_ty = self.typeOf(struct_field.struct_operand);
         const struct_llvm_val = try self.resolveInst(struct_field.struct_operand);
         const field_index = struct_field.field_index;
-        const field_ty = struct_ty.structFieldType(field_index);
+        const field_ty = struct_ty.structFieldType(field_index, mod);
         if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
             return null;
         }
@@ -5991,9 +5989,9 @@ pub const FuncGen = struct {
         if (!isByRef(struct_ty, mod)) {
             assert(!isByRef(field_ty, mod));
             switch (struct_ty.zigTypeTag(mod)) {
-                .Struct => switch (struct_ty.containerLayout()) {
+                .Struct => switch (struct_ty.containerLayout(mod)) {
                     .Packed => {
-                        const struct_obj = struct_ty.castTag(.@"struct").?.data;
+                        const struct_obj = mod.typeToStruct(struct_ty).?;
                         const bit_offset = struct_obj.packedFieldBitOffset(mod, field_index);
                         const containing_int = struct_llvm_val;
                         const shift_amt = containing_int.typeOf().constInt(bit_offset, .False);
@@ -6019,7 +6017,7 @@ pub const FuncGen = struct {
                     },
                 },
                 .Union => {
-                    assert(struct_ty.containerLayout() == .Packed);
+                    assert(struct_ty.containerLayout(mod) == .Packed);
                     const containing_int = struct_llvm_val;
                     const elem_llvm_ty = try self.dg.lowerType(field_ty);
                     if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) {
@@ -6041,7 +6039,7 @@ pub const FuncGen = struct {
 
         switch (struct_ty.zigTypeTag(mod)) {
             .Struct => {
-                assert(struct_ty.containerLayout() != .Packed);
+                assert(struct_ty.containerLayout(mod) != .Packed);
                 var ptr_ty_buf: Type.Payload.Pointer = undefined;
                 const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?;
                 const struct_llvm_ty = try self.dg.lowerType(struct_ty);
@@ -9289,8 +9287,8 @@ pub const FuncGen = struct {
                 return vector;
             },
             .Struct => {
-                if (result_ty.containerLayout() == .Packed) {
-                    const struct_obj = result_ty.castTag(.@"struct").?.data;
+                if (result_ty.containerLayout(mod) == .Packed) {
+                    const struct_obj = mod.typeToStruct(result_ty).?;
                     assert(struct_obj.haveLayout());
                     const big_bits = struct_obj.backing_int_ty.bitSize(mod);
                     const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits));
@@ -9795,7 +9793,7 @@ pub const FuncGen = struct {
         const mod = self.dg.module;
         const struct_ty = struct_ptr_ty.childType(mod);
         switch (struct_ty.zigTypeTag(mod)) {
-            .Struct => switch (struct_ty.containerLayout()) {
+            .Struct => switch (struct_ty.containerLayout(mod)) {
                 .Packed => {
                     const result_ty = self.typeOfIndex(inst);
                     const result_ty_info = result_ty.ptrInfo(mod);
@@ -9838,7 +9836,7 @@ pub const FuncGen = struct {
             },
             .Union => {
                 const layout = struct_ty.unionGetLayout(mod);
-                if (layout.payload_size == 0 or struct_ty.containerLayout() == .Packed) return struct_ptr;
+                if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .Packed) return struct_ptr;
                 const payload_index = @boolToInt(layout.tag_align >= layout.payload_align);
                 const union_llvm_ty = try self.dg.lowerType(struct_ty);
                 const union_field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_ptr, payload_index, "");
@@ -10530,11 +10528,11 @@ fn llvmFieldIndex(
         }
         return null;
     }
-    const layout = ty.containerLayout();
+    const layout = ty.containerLayout(mod);
     assert(layout != .Packed);
 
     var llvm_field_index: c_uint = 0;
-    var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(mod);
+    var it = mod.typeToStruct(ty).?.runtimeFieldIterator(mod);
     while (it.next()) |field_and_index| {
         const field = field_and_index.field;
         const field_align = field.alignment(mod, layout);
@@ -11113,7 +11111,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
         .Array, .Frame => return ty.hasRuntimeBits(mod),
         .Struct => {
             // Packed structs are represented to LLVM as integers.
-            if (ty.containerLayout() == .Packed) return false;
+            if (ty.containerLayout(mod) == .Packed) return false;
             if (ty.isSimpleTupleOrAnonStruct()) {
                 const tuple = ty.tupleFields();
                 var count: usize = 0;
@@ -11127,7 +11125,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
                 return false;
             }
             var count: usize = 0;
-            const fields = ty.structFields();
+            const fields = ty.structFields(mod);
             for (fields.values()) |field| {
                 if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue;
 
@@ -11137,7 +11135,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
             }
             return false;
         },
-        .Union => switch (ty.containerLayout()) {
+        .Union => switch (ty.containerLayout(mod)) {
             .Packed => return false,
             else => return ty.hasRuntimeBits(mod),
         },
@@ -11176,8 +11174,8 @@ fn isScalar(mod: *Module, ty: Type) bool {
         .Vector,
         => true,
 
-        .Struct => ty.containerLayout() == .Packed,
-        .Union => ty.containerLayout() == .Packed,
+        .Struct => ty.containerLayout(mod) == .Packed,
+        .Union => ty.containerLayout(mod) == .Packed,
         else => false,
     };
 }
src/codegen/spirv.zig
@@ -685,7 +685,7 @@ pub const DeclGen = struct {
                     if (ty.isSimpleTupleOrAnonStruct()) {
                         unreachable; // TODO
                     } else {
-                        const struct_ty = ty.castTag(.@"struct").?.data;
+                        const struct_ty = mod.typeToStruct(ty).?;
 
                         if (struct_ty.layout == .Packed) {
                             return dg.todo("packed struct constants", .{});
@@ -1306,7 +1306,7 @@ pub const DeclGen = struct {
                     } });
                 }
 
-                const struct_ty = ty.castTag(.@"struct").?.data;
+                const struct_ty = mod.typeToStruct(ty).?;
 
                 if (struct_ty.layout == .Packed) {
                     return try self.resolveType(struct_ty.backing_int_ty, .direct);
@@ -2576,7 +2576,7 @@ pub const DeclGen = struct {
         const struct_ty = self.typeOf(struct_field.struct_operand);
         const object_id = try self.resolve(struct_field.struct_operand);
         const field_index = struct_field.field_index;
-        const field_ty = struct_ty.structFieldType(field_index);
+        const field_ty = struct_ty.structFieldType(field_index, mod);
 
         if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
 
@@ -2595,7 +2595,7 @@ pub const DeclGen = struct {
         const mod = self.module;
         const object_ty = object_ptr_ty.childType(mod);
         switch (object_ty.zigTypeTag(mod)) {
-            .Struct => switch (object_ty.containerLayout()) {
+            .Struct => switch (object_ty.containerLayout(mod)) {
                 .Packed => unreachable, // TODO
                 else => {
                     const field_index_ty_ref = try self.intType(.unsigned, 32);
src/link/Dwarf.zig
@@ -360,13 +360,13 @@ pub const DeclState = struct {
                         dbg_info_buffer.appendSliceAssumeCapacity(struct_name);
                         dbg_info_buffer.appendAssumeCapacity(0);
 
-                        const struct_obj = ty.castTag(.@"struct").?.data;
+                        const struct_obj = mod.typeToStruct(ty).?;
                         if (struct_obj.layout == .Packed) {
                             log.debug("TODO implement .debug_info for packed structs", .{});
                             break :blk;
                         }
 
-                        const fields = ty.structFields();
+                        const fields = ty.structFields(mod);
                         for (fields.keys(), 0..) |field_name, field_index| {
                             const field = fields.get(field_name).?;
                             if (!field.ty.hasRuntimeBits(mod)) continue;
src/codegen.zig
@@ -503,8 +503,8 @@ pub fn generateSymbol(
             return Result.ok;
         },
         .Struct => {
-            if (typed_value.ty.containerLayout() == .Packed) {
-                const struct_obj = typed_value.ty.castTag(.@"struct").?.data;
+            if (typed_value.ty.containerLayout(mod) == .Packed) {
+                const struct_obj = mod.typeToStruct(typed_value.ty).?;
                 const fields = struct_obj.fields.values();
                 const field_vals = typed_value.val.castTag(.aggregate).?.data;
                 const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow;
@@ -539,7 +539,7 @@ pub fn generateSymbol(
             const struct_begin = code.items.len;
             const field_vals = typed_value.val.castTag(.aggregate).?.data;
             for (field_vals, 0..) |field_val, index| {
-                const field_ty = typed_value.ty.structFieldType(index);
+                const field_ty = typed_value.ty.structFieldType(index, mod);
                 if (!field_ty.hasRuntimeBits(mod)) continue;
 
                 switch (try generateSymbol(bin_file, src_loc, .{
src/InternPool.zig
@@ -1,5 +1,10 @@
 //! All interned objects have both a value and a type.
+//! This data structure is self-contained, with the following exceptions:
+//! * type_struct via Module.Struct.Index
+//! * type_opaque via Module.Namespace.Index and Module.Decl.Index
 
+/// Maps `Key` to `Index`. `Key` objects are not stored anywhere; they are
+/// constructed lazily.
 map: std.AutoArrayHashMapUnmanaged(void, void) = .{},
 items: std.MultiArrayList(Item) = .{},
 extra: std.ArrayListUnmanaged(u32) = .{},
@@ -9,6 +14,13 @@ extra: std.ArrayListUnmanaged(u32) = .{},
 /// violate the above mechanism.
 limbs: std.ArrayListUnmanaged(u64) = .{},
 
+/// Struct objects are stored in this data structure because:
+/// * They contain pointers such as the field maps.
+/// * They need to be mutated after creation.
+allocated_structs: std.SegmentedList(Module.Struct, 0) = .{},
+/// When a Struct object is freed from `allocated_structs`, it is pushed into this stack.
+structs_free_list: std.ArrayListUnmanaged(Module.Struct.Index) = .{},
+
 const std = @import("std");
 const Allocator = std.mem.Allocator;
 const assert = std.debug.assert;
@@ -17,8 +29,7 @@ const BigIntMutable = std.math.big.int.Mutable;
 const Limb = std.math.big.Limb;
 
 const InternPool = @This();
-const DeclIndex = @import("Module.zig").Decl.Index;
-const NamespaceIndex = @import("Module.zig").Namespace.Index;
+const Module = @import("Module.zig");
 
 const KeyAdapter = struct {
     intern_pool: *const InternPool,
@@ -45,11 +56,20 @@ pub const Key = union(enum) {
         payload_type: Index,
     },
     simple_type: SimpleType,
+    /// If `empty_struct_type` is handled separately, then this value may be
+    /// safely assumed to never be `none`.
+    struct_type: StructType,
+    union_type: struct {
+        fields_len: u32,
+        // TODO move Module.Union data to InternPool
+    },
+    opaque_type: OpaqueType,
+
     simple_value: SimpleValue,
     extern_func: struct {
         ty: Index,
         /// The Decl that corresponds to the function itself.
-        decl: DeclIndex,
+        decl: Module.Decl.Index,
         /// Library name if specified.
         /// For example `extern "c" fn write(...) usize` would have 'c' as library name.
         /// Index into the string table bytes.
@@ -62,13 +82,11 @@ pub const Key = union(enum) {
         ty: Index,
         tag: BigIntConst,
     },
-    struct_type: StructType,
-    opaque_type: OpaqueType,
-
-    union_type: struct {
-        fields_len: u32,
-        // TODO move Module.Union data to InternPool
-    },
+    /// An instance of a struct, array, or vector.
+    /// Each element/field stored as an `Index`.
+    /// In the case of sentinel-terminated arrays, the sentinel value *is* stored,
+    /// so the slice length will be one more than the type's array length.
+    aggregate: Aggregate,
 
     pub const IntType = std.builtin.Type.Int;
 
@@ -113,16 +131,27 @@ pub const Key = union(enum) {
         child: Index,
     };
 
-    pub const StructType = struct {
-        fields_len: u32,
-        // TODO move Module.Struct data to InternPool
-    };
-
     pub const OpaqueType = struct {
         /// The Decl that corresponds to the opaque itself.
-        decl: DeclIndex,
+        decl: Module.Decl.Index,
         /// Represents the declarations inside this opaque.
-        namespace: NamespaceIndex,
+        namespace: Module.Namespace.Index,
+    };
+
+    /// There are three possibilities here:
+    /// * `@TypeOf(.{})` (untyped empty struct literal)
+    ///   - namespace == .none, index == .none
+    /// * A struct which has a namepace, but no fields.
+    ///   - index == .none
+    /// * A struct which has fields as well as a namepace.
+    pub const StructType = struct {
+        /// This will be `none` only in the case of `@TypeOf(.{})`
+        /// (`Index.empty_struct_type`).
+        namespace: Module.Namespace.OptionalIndex,
+        /// The `none` tag is used to represent two cases:
+        /// * `@TypeOf(.{})`, in which case `namespace` will also be `none`.
+        /// * A struct with no fields, in which case `namespace` will be populated.
+        index: Module.Struct.OptionalIndex,
     };
 
     pub const Int = struct {
@@ -156,18 +185,24 @@ pub const Key = union(enum) {
         addr: Addr,
 
         pub const Addr = union(enum) {
-            decl: DeclIndex,
+            decl: Module.Decl.Index,
             int: Index,
         };
     };
 
     /// `null` is represented by the `val` field being `none`.
     pub const Opt = struct {
+        /// This is the optional type; not the payload type.
         ty: Index,
         /// This could be `none`, indicating the optional is `null`.
         val: Index,
     };
 
+    pub const Aggregate = struct {
+        ty: Index,
+        fields: []const Index,
+    };
+
     pub fn hash32(key: Key) u32 {
         return @truncate(u32, key.hash64());
     }
@@ -193,8 +228,15 @@ pub const Key = union(enum) {
             .simple_value,
             .extern_func,
             .opt,
+            .struct_type,
             => |info| std.hash.autoHash(hasher, info),
 
+            .union_type => |union_type| {
+                _ = union_type;
+                @panic("TODO");
+            },
+            .opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl),
+
             .int => |int| {
                 // Canonicalize all integers by converting them to BigIntConst.
                 var buffer: Key.Int.Storage.BigIntSpace = undefined;
@@ -221,16 +263,10 @@ pub const Key = union(enum) {
                 for (enum_tag.tag.limbs) |limb| std.hash.autoHash(hasher, limb);
             },
 
-            .struct_type => |struct_type| {
-                if (struct_type.fields_len != 0) {
-                    @panic("TODO");
-                }
-            },
-            .union_type => |union_type| {
-                _ = union_type;
-                @panic("TODO");
+            .aggregate => |aggregate| {
+                std.hash.autoHash(hasher, aggregate.ty);
+                for (aggregate.fields) |field| std.hash.autoHash(hasher, field);
             },
-            .opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl),
         }
     }
 
@@ -280,6 +316,10 @@ pub const Key = union(enum) {
                 const b_info = b.opt;
                 return std.meta.eql(a_info, b_info);
             },
+            .struct_type => |a_info| {
+                const b_info = b.struct_type;
+                return std.meta.eql(a_info, b_info);
+            },
 
             .ptr => |a_info| {
                 const b_info = b.ptr;
@@ -331,16 +371,6 @@ pub const Key = union(enum) {
                 @panic("TODO");
             },
 
-            .struct_type => |a_info| {
-                const b_info = b.struct_type;
-
-                // TODO: remove this special case for empty_struct
-                if (a_info.fields_len == 0 and b_info.fields_len == 0)
-                    return true;
-
-                @panic("TODO");
-            },
-
             .union_type => |a_info| {
                 const b_info = b.union_type;
 
@@ -353,6 +383,11 @@ pub const Key = union(enum) {
                 const b_info = b.opaque_type;
                 return a_info.decl == b_info.decl;
             },
+            .aggregate => |a_info| {
+                const b_info = b.aggregate;
+                if (a_info.ty != b_info.ty) return false;
+                return std.mem.eql(Index, a_info.fields, b_info.fields);
+            },
         }
     }
 
@@ -375,6 +410,7 @@ pub const Key = union(enum) {
             .opt,
             .extern_func,
             .enum_tag,
+            .aggregate,
             => |x| return x.ty,
 
             .simple_value => |s| switch (s) {
@@ -471,6 +507,7 @@ pub const Index = enum(u32) {
     anyerror_void_error_union_type,
     generic_poison_type,
     var_args_param_type,
+    /// `@TypeOf(.{})`
     empty_struct_type,
 
     /// `undefined` (untyped)
@@ -691,7 +728,8 @@ pub const static_keys = [_]Key{
 
     // empty_struct_type
     .{ .struct_type = .{
-        .fields_len = 0,
+        .namespace = .none,
+        .index = .none,
     } },
 
     .{ .simple_value = .undefined },
@@ -792,16 +830,18 @@ pub const Tag = enum(u8) {
     /// An opaque type.
     /// data is index of Key.OpaqueType in extra.
     type_opaque,
+    /// A struct type.
+    /// data is Module.Struct.OptionalIndex
+    /// The `none` tag is used to represent `@TypeOf(.{})`.
+    type_struct,
+    /// A struct type that has only a namespace; no fields, and there is no
+    /// Module.Struct object allocated for it.
+    /// data is Module.Namespace.Index.
+    type_struct_ns,
 
     /// A value that can be represented with only an enum tag.
     /// data is SimpleValue enum value.
     simple_value,
-    /// The SimpleType and SimpleValue enums are exposed via the InternPool API using
-    /// SimpleType and SimpleValue as the Key data themselves.
-    /// This tag is for miscellaneous types and values that can be represented with
-    /// only an enum tag, but will be presented via the API with a different Key.
-    /// data is SimpleInternal enum value.
-    simple_internal,
     /// A pointer to an integer value.
     /// data is extra index of PtrInt, which contains the type and address.
     /// Only pointer types are allowed to have this encoding. Optional types must use
@@ -809,6 +849,8 @@ pub const Tag = enum(u8) {
     ptr_int,
     /// An optional value that is non-null.
     /// data is Index of the payload value.
+    /// In order to use this encoding, one must ensure that the `InternPool`
+    /// already contains the optional type corresponding to this payload.
     opt_payload,
     /// An optional value that is null.
     /// data is Index of the payload type.
@@ -859,6 +901,13 @@ pub const Tag = enum(u8) {
     extern_func,
     /// A regular function.
     func,
+    /// This represents the only possible value for *some* types which have
+    /// only one possible value. Not all only-possible-values are encoded this way;
+    /// for example structs which have all comptime fields are not encoded this way.
+    /// The set of values that are encoded this way is:
+    /// * A struct which has 0 fields.
+    /// data is Index of the type, which is known to be zero bits at runtime.
+    only_possible_value,
 };
 
 /// Having `SimpleType` and `SimpleValue` in separate enums makes it easier to
@@ -912,9 +961,12 @@ pub const SimpleType = enum(u32) {
 };
 
 pub const SimpleValue = enum(u32) {
+    /// This is untyped `undefined`.
     undefined,
     void,
+    /// This is untyped `null`.
     null,
+    /// This is the untyped empty struct literal: `.{}`
     empty_struct,
     true,
     false,
@@ -923,12 +975,6 @@ pub const SimpleValue = enum(u32) {
     generic_poison,
 };
 
-pub const SimpleInternal = enum(u32) {
-    /// This is the empty struct type. Note that empty_struct value is exposed
-    /// via SimpleValue.
-    type_empty_struct,
-};
-
 pub const Pointer = struct {
     child: Index,
     sentinel: Index,
@@ -1005,7 +1051,7 @@ pub const ErrorUnion = struct {
 /// 0. field name: null-terminated string index for each fields_len; declaration order
 pub const EnumSimple = struct {
     /// The Decl that corresponds to the enum itself.
-    decl: DeclIndex,
+    decl: Module.Decl.Index,
     /// An integer type which is used for the numerical value of the enum. This
     /// is inferred by Zig to be the smallest power of two unsigned int that
     /// fits the number of fields. It is stored here to avoid unnecessary
@@ -1091,6 +1137,10 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void {
     ip.items.deinit(gpa);
     ip.extra.deinit(gpa);
     ip.limbs.deinit(gpa);
+
+    ip.structs_free_list.deinit(gpa);
+    ip.allocated_structs.deinit(gpa);
+
     ip.* = undefined;
 }
 
@@ -1167,20 +1217,38 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
         .type_enum_simple => @panic("TODO"),
 
         .type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) },
-
-        .simple_internal => switch (@intToEnum(SimpleInternal, data)) {
-            .type_empty_struct => .{ .struct_type = .{
-                .fields_len = 0,
-            } },
+        .type_struct => {
+            const struct_index = @intToEnum(Module.Struct.OptionalIndex, data);
+            const namespace = if (struct_index.unwrap()) |i|
+                ip.structPtrConst(i).namespace.toOptional()
+            else
+                .none;
+            return .{ .struct_type = .{
+                .index = struct_index,
+                .namespace = namespace,
+            } };
         },
+        .type_struct_ns => .{ .struct_type = .{
+            .index = .none,
+            .namespace = @intToEnum(Module.Namespace.Index, data).toOptional(),
+        } },
+
         .opt_null => .{ .opt = .{
             .ty = @intToEnum(Index, data),
             .val = .none,
         } },
-        .opt_payload => .{ .opt = .{
-            .ty = indexToKey(ip, @intToEnum(Index, data)).typeOf(),
-            .val = @intToEnum(Index, data),
-        } },
+        .opt_payload => {
+            const payload_val = @intToEnum(Index, data);
+            // The existence of `opt_payload` guarantees that the optional type will be
+            // stored in the `InternPool`.
+            const opt_ty = ip.getAssumeExists(.{
+                .opt_type = indexToKey(ip, payload_val).typeOf(),
+            });
+            return .{ .opt = .{
+                .ty = opt_ty,
+                .val = payload_val,
+            } };
+        },
         .ptr_int => {
             const info = ip.extraData(PtrInt, data);
             return .{ .ptr = .{
@@ -1225,6 +1293,16 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
         .float_f128 => @panic("TODO"),
         .extern_func => @panic("TODO"),
         .func => @panic("TODO"),
+        .only_possible_value => {
+            const ty = @intToEnum(Index, data);
+            return switch (ip.indexToKey(ty)) {
+                .struct_type => .{ .aggregate = .{
+                    .ty = ty,
+                    .fields = &.{},
+                } },
+                else => unreachable,
+            };
+        },
     };
 }
 
@@ -1359,12 +1437,15 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
         },
 
         .struct_type => |struct_type| {
-            if (struct_type.fields_len != 0) {
-                @panic("TODO"); // handle structs other than empty_struct
-            }
-            ip.items.appendAssumeCapacity(.{
-                .tag = .simple_internal,
-                .data = @enumToInt(SimpleInternal.type_empty_struct),
+            ip.items.appendAssumeCapacity(if (struct_type.index.unwrap()) |i| .{
+                .tag = .type_struct,
+                .data = @enumToInt(i),
+            } else if (struct_type.namespace.unwrap()) |i| .{
+                .tag = .type_struct_ns,
+                .data = @enumToInt(i),
+            } else .{
+                .tag = .type_struct,
+                .data = @enumToInt(Module.Struct.OptionalIndex.none),
             });
         },
 
@@ -1398,6 +1479,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
 
         .opt => |opt| {
             assert(opt.ty != .none);
+            assert(ip.isOptionalType(opt.ty));
             ip.items.appendAssumeCapacity(if (opt.val == .none) .{
                 .tag = .opt_null,
                 .data = @enumToInt(opt.ty),
@@ -1549,10 +1631,35 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
             const tag: Tag = if (enum_tag.tag.positive) .enum_tag_positive else .enum_tag_negative;
             try addInt(ip, gpa, enum_tag.ty, tag, enum_tag.tag.limbs);
         },
+
+        .aggregate => |aggregate| {
+            if (aggregate.fields.len == 0) {
+                ip.items.appendAssumeCapacity(.{
+                    .tag = .only_possible_value,
+                    .data = @enumToInt(aggregate.ty),
+                });
+                return @intToEnum(Index, ip.items.len - 1);
+            }
+            @panic("TODO");
+        },
     }
     return @intToEnum(Index, ip.items.len - 1);
 }
 
+pub fn getAssumeExists(ip: InternPool, key: Key) Index {
+    const adapter: KeyAdapter = .{ .intern_pool = &ip };
+    const index = ip.map.getIndexAdapted(key, adapter).?;
+    return @intToEnum(Index, index);
+}
+
+/// This operation only happens under compile error conditions.
+/// Leak the index until the next garbage collection.
+pub fn remove(ip: *InternPool, index: Index) void {
+    _ = ip;
+    _ = index;
+    @panic("TODO this is a bit problematic to implement, could we maybe just never support a remove() operation on InternPool?");
+}
+
 fn addInt(ip: *InternPool, gpa: Allocator, ty: Index, tag: Tag, limbs: []const Limb) !void {
     const limbs_len = @intCast(u32, limbs.len);
     try ip.reserveLimbs(gpa, @typeInfo(Int).Struct.fields.len + limbs_len);
@@ -1578,8 +1685,8 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
         ip.extra.appendAssumeCapacity(switch (field.type) {
             u32 => @field(extra, field.name),
             Index => @enumToInt(@field(extra, field.name)),
-            DeclIndex => @enumToInt(@field(extra, field.name)),
-            NamespaceIndex => @enumToInt(@field(extra, field.name)),
+            Module.Decl.Index => @enumToInt(@field(extra, field.name)),
+            Module.Namespace.Index => @enumToInt(@field(extra, field.name)),
             i32 => @bitCast(u32, @field(extra, field.name)),
             Pointer.Flags => @bitCast(u32, @field(extra, field.name)),
             Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)),
@@ -1635,8 +1742,8 @@ fn extraData(ip: InternPool, comptime T: type, index: usize) T {
         @field(result, field.name) = switch (field.type) {
             u32 => int32,
             Index => @intToEnum(Index, int32),
-            DeclIndex => @intToEnum(DeclIndex, int32),
-            NamespaceIndex => @intToEnum(NamespaceIndex, int32),
+            Module.Decl.Index => @intToEnum(Module.Decl.Index, int32),
+            Module.Namespace.Index => @intToEnum(Module.Namespace.Index, int32),
             i32 => @bitCast(i32, int32),
             Pointer.Flags => @bitCast(Pointer.Flags, int32),
             Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32),
@@ -1808,6 +1915,20 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
     }
 }
 
+pub fn indexToStruct(ip: *InternPool, val: Index) Module.Struct.OptionalIndex {
+    const tags = ip.items.items(.tag);
+    if (val == .none) return .none;
+    if (tags[@enumToInt(val)] != .type_struct) return .none;
+    const datas = ip.items.items(.data);
+    return @intToEnum(Module.Struct.Index, datas[@enumToInt(val)]).toOptional();
+}
+
+pub fn isOptionalType(ip: InternPool, ty: Index) bool {
+    const tags = ip.items.items(.tag);
+    if (ty == .none) return false;
+    return tags[@enumToInt(ty)] == .type_optional;
+}
+
 pub fn dump(ip: InternPool) void {
     dumpFallible(ip, std.heap.page_allocator) catch return;
 }
@@ -1859,9 +1980,10 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
             .type_error_union => @sizeOf(ErrorUnion),
             .type_enum_simple => @sizeOf(EnumSimple),
             .type_opaque => @sizeOf(Key.OpaqueType),
+            .type_struct => 0,
+            .type_struct_ns => 0,
             .simple_type => 0,
             .simple_value => 0,
-            .simple_internal => 0,
             .ptr_int => @sizeOf(PtrInt),
             .opt_null => 0,
             .opt_payload => 0,
@@ -1887,6 +2009,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
             .float_f128 => @sizeOf(Float128),
             .extern_func => @panic("TODO"),
             .func => @panic("TODO"),
+            .only_possible_value => 0,
         });
     }
     const SortContext = struct {
@@ -1905,3 +2028,34 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
         });
     }
 }
+
+pub fn structPtr(ip: *InternPool, index: Module.Struct.Index) *Module.Struct {
+    return ip.allocated_structs.at(@enumToInt(index));
+}
+
+pub fn structPtrConst(ip: InternPool, index: Module.Struct.Index) *const Module.Struct {
+    return ip.allocated_structs.at(@enumToInt(index));
+}
+
+pub fn structPtrUnwrapConst(ip: InternPool, index: Module.Struct.OptionalIndex) ?*const Module.Struct {
+    return structPtrConst(ip, index.unwrap() orelse return null);
+}
+
+pub fn createStruct(
+    ip: *InternPool,
+    gpa: Allocator,
+    initialization: Module.Struct,
+) Allocator.Error!Module.Struct.Index {
+    if (ip.structs_free_list.popOrNull()) |index| return index;
+    const ptr = try ip.allocated_structs.addOne(gpa);
+    ptr.* = initialization;
+    return @intToEnum(Module.Struct.Index, ip.allocated_structs.len - 1);
+}
+
+pub fn destroyStruct(ip: *InternPool, gpa: Allocator, index: Module.Struct.Index) void {
+    ip.structPtr(index).* = undefined;
+    ip.structs_free_list.append(gpa, index) catch {
+        // In order to keep `destroyStruct` a non-fallible function, we ignore memory
+        // allocation failures here, instead leaking the Struct until garbage collection.
+    };
+}
src/Module.zig
@@ -839,11 +839,14 @@ pub const Decl = struct {
 
     /// If the Decl has a value and it is a struct, return it,
     /// otherwise null.
-    pub fn getStruct(decl: *Decl) ?*Struct {
-        if (!decl.owns_tv) return null;
-        const ty = (decl.val.castTag(.ty) orelse return null).data;
-        const struct_obj = (ty.castTag(.@"struct") orelse return null).data;
-        return struct_obj;
+    pub fn getStruct(decl: *Decl, mod: *Module) ?*Struct {
+        return mod.structPtrUnwrap(getStructIndex(decl, mod));
+    }
+
+    pub fn getStructIndex(decl: *Decl, mod: *Module) Struct.OptionalIndex {
+        if (!decl.owns_tv) return .none;
+        const ty = (decl.val.castTag(.ty) orelse return .none).data;
+        return mod.intern_pool.indexToStruct(ty.ip_index);
     }
 
     /// If the Decl has a value and it is a union, return it,
@@ -884,32 +887,29 @@ pub const Decl = struct {
     /// Only returns it if the Decl is the owner.
     pub fn getInnerNamespaceIndex(decl: *Decl, mod: *Module) Namespace.OptionalIndex {
         if (!decl.owns_tv) return .none;
-        if (decl.val.ip_index == .none) {
-            const ty = (decl.val.castTag(.ty) orelse return .none).data;
-            switch (ty.tag()) {
-                .@"struct" => {
-                    const struct_obj = ty.castTag(.@"struct").?.data;
-                    return struct_obj.namespace.toOptional();
-                },
-                .enum_full, .enum_nonexhaustive => {
-                    const enum_obj = ty.cast(Type.Payload.EnumFull).?.data;
-                    return enum_obj.namespace.toOptional();
-                },
-                .empty_struct => {
-                    @panic("TODO");
-                },
-                .@"union", .union_safety_tagged, .union_tagged => {
-                    const union_obj = ty.cast(Type.Payload.Union).?.data;
-                    return union_obj.namespace.toOptional();
-                },
+        switch (decl.val.ip_index) {
+            .empty_struct_type => return .none,
+            .none => {
+                const ty = (decl.val.castTag(.ty) orelse return .none).data;
+                switch (ty.tag()) {
+                    .enum_full, .enum_nonexhaustive => {
+                        const enum_obj = ty.cast(Type.Payload.EnumFull).?.data;
+                        return enum_obj.namespace.toOptional();
+                    },
+                    .@"union", .union_safety_tagged, .union_tagged => {
+                        const union_obj = ty.cast(Type.Payload.Union).?.data;
+                        return union_obj.namespace.toOptional();
+                    },
 
-                else => return .none,
-            }
+                    else => return .none,
+                }
+            },
+            else => return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) {
+                .opaque_type => |opaque_type| opaque_type.namespace.toOptional(),
+                .struct_type => |struct_type| struct_type.namespace,
+                else => .none,
+            },
         }
-        return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) {
-            .opaque_type => |opaque_type| opaque_type.namespace.toOptional(),
-            else => .none,
-        };
     }
 
     /// Same as `getInnerNamespaceIndex` but additionally obtains the pointer.
@@ -1046,6 +1046,28 @@ pub const Struct = struct {
     is_tuple: bool,
     assumed_runtime_bits: bool = false,
 
+    pub const Index = enum(u32) {
+        _,
+
+        pub fn toOptional(i: Index) OptionalIndex {
+            return @intToEnum(OptionalIndex, @enumToInt(i));
+        }
+    };
+
+    pub const OptionalIndex = enum(u32) {
+        none = std.math.maxInt(u32),
+        _,
+
+        pub fn init(oi: ?Index) OptionalIndex {
+            return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none));
+        }
+
+        pub fn unwrap(oi: OptionalIndex) ?Index {
+            if (oi == .none) return null;
+            return @intToEnum(Index, @enumToInt(oi));
+        }
+    };
+
     pub const Fields = std.StringArrayHashMapUnmanaged(Field);
 
     /// The `Type` and `Value` memory is owned by the arena of the Struct's owner_decl.
@@ -1111,12 +1133,7 @@ pub const Struct = struct {
     }
 
     pub fn srcLoc(s: Struct, mod: *Module) SrcLoc {
-        const owner_decl = mod.declPtr(s.owner_decl);
-        return .{
-            .file_scope = owner_decl.getFileScope(mod),
-            .parent_decl_node = owner_decl.src_node,
-            .lazy = LazySrcLoc.nodeOffset(0),
-        };
+        return mod.declPtr(s.owner_decl).srcLoc(mod);
     }
 
     pub fn fieldSrcLoc(s: Struct, mod: *Module, query: FieldSrcQuery) SrcLoc {
@@ -3622,6 +3639,16 @@ pub fn namespacePtr(mod: *Module, index: Namespace.Index) *Namespace {
     return mod.allocated_namespaces.at(@enumToInt(index));
 }
 
+pub fn structPtr(mod: *Module, index: Struct.Index) *Struct {
+    return mod.intern_pool.structPtr(index);
+}
+
+/// This one accepts an index from the InternPool and asserts that it is not
+/// the anonymous empty struct type.
+pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct {
+    return structPtr(mod, index.unwrap() orelse return null);
+}
+
 /// Returns true if and only if the Decl is the top level struct associated with a File.
 pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool {
     const decl = mod.declPtr(decl_index);
@@ -4078,7 +4105,7 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void {
 
         if (!decl.owns_tv) continue;
 
-        if (decl.getStruct()) |struct_obj| {
+        if (decl.getStruct(mod)) |struct_obj| {
             struct_obj.zir_index = inst_map.get(struct_obj.zir_index) orelse {
                 try file.deleted_decls.append(gpa, decl_index);
                 continue;
@@ -4597,36 +4624,50 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
     errdefer new_decl_arena.deinit();
     const new_decl_arena_allocator = new_decl_arena.allocator();
 
-    const struct_obj = try new_decl_arena_allocator.create(Module.Struct);
-    const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj);
-    const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty);
-    const ty_ty = comptime Type.type;
-    struct_obj.* = .{
-        .owner_decl = undefined, // set below
+    // Because these three things each reference each other, `undefined`
+    // placeholders are used before being set after the struct type gains an
+    // InternPool index.
+    const new_namespace_index = try mod.createNamespace(.{
+        .parent = .none,
+        .ty = undefined,
+        .file_scope = file,
+    });
+    const new_namespace = mod.namespacePtr(new_namespace_index);
+    errdefer mod.destroyNamespace(new_namespace_index);
+
+    const new_decl_index = try mod.allocateNewDecl(new_namespace_index, 0, null);
+    const new_decl = mod.declPtr(new_decl_index);
+    errdefer @panic("TODO error handling");
+
+    const struct_index = try mod.createStruct(.{
+        .owner_decl = new_decl_index,
         .fields = .{},
         .zir_index = undefined, // set below
         .layout = .Auto,
         .status = .none,
         .known_non_opv = undefined,
         .is_tuple = undefined, // set below
-        .namespace = try mod.createNamespace(.{
-            .parent = .none,
-            .ty = struct_ty,
-            .file_scope = file,
-        }),
-    };
-    const new_decl_index = try mod.allocateNewDecl(struct_obj.namespace, 0, null);
-    const new_decl = mod.declPtr(new_decl_index);
+        .namespace = new_namespace_index,
+    });
+    errdefer mod.destroyStruct(struct_index);
+
+    const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{
+        .index = struct_index.toOptional(),
+        .namespace = new_namespace_index.toOptional(),
+    } });
+    errdefer mod.intern_pool.remove(struct_ty);
+
+    new_namespace.ty = struct_ty.toType();
     file.root_decl = new_decl_index.toOptional();
-    struct_obj.owner_decl = new_decl_index;
+
     new_decl.name = try file.fullyQualifiedNameZ(gpa);
     new_decl.src_line = 0;
     new_decl.is_pub = true;
     new_decl.is_exported = false;
     new_decl.has_align = false;
     new_decl.has_linksection_or_addrspace = false;
-    new_decl.ty = ty_ty;
-    new_decl.val = struct_val;
+    new_decl.ty = Type.type;
+    new_decl.val = struct_ty.toValue();
     new_decl.@"align" = 0;
     new_decl.@"linksection" = null;
     new_decl.has_tv = true;
@@ -4639,6 +4680,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
     if (file.status == .success_zir) {
         assert(file.zir_loaded);
         const main_struct_inst = Zir.main_struct_inst;
+        const struct_obj = mod.structPtr(struct_index);
         struct_obj.zir_index = main_struct_inst;
         const extended = file.zir.instructions.items(.data)[main_struct_inst].extended;
         const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
@@ -4665,7 +4707,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
         var wip_captures = try WipCaptureScope.init(gpa, new_decl_arena_allocator, null);
         defer wip_captures.deinit();
 
-        if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_obj)) |_| {
+        if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_index)) |_| {
             try wip_captures.finalize();
             new_decl.analysis = .complete;
         } else |err| switch (err) {
@@ -4761,11 +4803,12 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
     if (mod.declIsRoot(decl_index)) {
         log.debug("semaDecl root {*} ({s})", .{ decl, decl.name });
         const main_struct_inst = Zir.main_struct_inst;
-        const struct_obj = decl.getStruct().?;
+        const struct_index = decl.getStructIndex(mod).unwrap().?;
+        const struct_obj = mod.structPtr(struct_index);
         // This might not have gotten set in `semaFile` if the first time had
         // a ZIR failure, so we set it here in case.
         struct_obj.zir_index = main_struct_inst;
-        try sema.analyzeStructDecl(decl, main_struct_inst, struct_obj);
+        try sema.analyzeStructDecl(decl, main_struct_inst, struct_index);
         decl.analysis = .complete;
         decl.generation = mod.generation;
         return false;
@@ -5970,6 +6013,14 @@ pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void {
     };
 }
 
+pub fn createStruct(mod: *Module, initialization: Struct) Allocator.Error!Struct.Index {
+    return mod.intern_pool.createStruct(mod.gpa, initialization);
+}
+
+pub fn destroyStruct(mod: *Module, index: Struct.Index) void {
+    return mod.intern_pool.destroyStruct(mod.gpa, index);
+}
+
 pub fn allocateNewDecl(
     mod: *Module,
     namespace: Namespace.Index,
@@ -7202,12 +7253,7 @@ pub fn atomicPtrAlignment(
 }
 
 pub fn opaqueSrcLoc(mod: *Module, opaque_type: InternPool.Key.OpaqueType) SrcLoc {
-    const owner_decl = mod.declPtr(opaque_type.decl);
-    return .{
-        .file_scope = owner_decl.getFileScope(mod),
-        .parent_decl_node = owner_decl.src_node,
-        .lazy = LazySrcLoc.nodeOffset(0),
-    };
+    return mod.declPtr(opaque_type.decl).srcLoc(mod);
 }
 
 pub fn opaqueFullyQualifiedName(mod: *Module, opaque_type: InternPool.Key.OpaqueType) ![:0]u8 {
@@ -7221,3 +7267,12 @@ pub fn declFileScope(mod: *Module, decl_index: Decl.Index) *File {
 pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.Index {
     return mod.namespacePtr(namespace_index).getDeclIndex(mod);
 }
+
+/// Returns null in the following cases:
+/// * `@TypeOf(.{})`
+/// * A struct which has no fields (`struct {}`).
+/// * Not a struct.
+pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct {
+    const struct_index = mod.intern_pool.indexToStruct(ty.ip_index).unwrap() orelse return null;
+    return mod.structPtr(struct_index);
+}
src/Sema.zig
@@ -2090,16 +2090,17 @@ fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty:
 }
 
 fn failWithInvalidComptimeFieldStore(sema: *Sema, block: *Block, init_src: LazySrcLoc, container_ty: Type, field_index: usize) CompileError {
+    const mod = sema.mod;
     const msg = msg: {
         const msg = try sema.errMsg(block, init_src, "value stored in comptime field does not match the default value of the field", .{});
         errdefer msg.destroy(sema.gpa);
 
-        const struct_ty = container_ty.castTag(.@"struct") orelse break :msg msg;
-        const default_value_src = struct_ty.data.fieldSrcLoc(sema.mod, .{
+        const struct_ty = mod.typeToStruct(container_ty) orelse break :msg msg;
+        const default_value_src = struct_ty.fieldSrcLoc(mod, .{
             .index = field_index,
             .range = .value,
         });
-        try sema.mod.errNoteNonLazy(default_value_src, msg, "default value set here", .{});
+        try mod.errNoteNonLazy(default_value_src, msg, "default value set here", .{});
         break :msg msg;
     };
     return sema.failWithOwnedErrorMsg(msg);
@@ -2632,8 +2633,10 @@ pub fn analyzeStructDecl(
     sema: *Sema,
     new_decl: *Decl,
     inst: Zir.Inst.Index,
-    struct_obj: *Module.Struct,
+    struct_index: Module.Struct.Index,
 ) SemaError!void {
+    const mod = sema.mod;
+    const struct_obj = mod.structPtr(struct_index);
     const extended = sema.code.instructions.items(.data)[inst].extended;
     assert(extended.opcode == .struct_decl);
     const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
@@ -2662,7 +2665,7 @@ pub fn analyzeStructDecl(
         }
     }
 
-    _ = try sema.mod.scanNamespace(struct_obj.namespace, extra_index, decls_len, new_decl);
+    _ = try mod.scanNamespace(struct_obj.namespace, extra_index, decls_len, new_decl);
 }
 
 fn zirStructDecl(
@@ -2671,28 +2674,38 @@ fn zirStructDecl(
     extended: Zir.Inst.Extended.InstData,
     inst: Zir.Inst.Index,
 ) CompileError!Air.Inst.Ref {
+    const mod = sema.mod;
+    const gpa = sema.gpa;
     const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
     const src: LazySrcLoc = if (small.has_src_node) blk: {
         const node_offset = @bitCast(i32, sema.code.extra[extended.operand]);
         break :blk LazySrcLoc.nodeOffset(node_offset);
     } else sema.src;
 
-    var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
+    var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
     errdefer new_decl_arena.deinit();
-    const new_decl_arena_allocator = new_decl_arena.allocator();
 
-    const mod = sema.mod;
-    const struct_obj = try new_decl_arena_allocator.create(Module.Struct);
-    const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj);
-    const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty);
+    // Because these three things each reference each other, `undefined`
+    // placeholders are used before being set after the struct type gains an
+    // InternPool index.
+
     const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
         .ty = Type.type,
-        .val = struct_val,
+        .val = undefined,
     }, small.name_strategy, "struct", inst);
     const new_decl = mod.declPtr(new_decl_index);
     new_decl.owns_tv = true;
     errdefer mod.abortAnonDecl(new_decl_index);
-    struct_obj.* = .{
+
+    const new_namespace_index = try mod.createNamespace(.{
+        .parent = block.namespace.toOptional(),
+        .ty = undefined,
+        .file_scope = block.getFileScope(mod),
+    });
+    const new_namespace = mod.namespacePtr(new_namespace_index);
+    errdefer mod.destroyNamespace(new_namespace_index);
+
+    const struct_index = try mod.createStruct(.{
         .owner_decl = new_decl_index,
         .fields = .{},
         .zir_index = inst,
@@ -2700,13 +2713,20 @@ fn zirStructDecl(
         .status = .none,
         .known_non_opv = undefined,
         .is_tuple = small.is_tuple,
-        .namespace = try mod.createNamespace(.{
-            .parent = block.namespace.toOptional(),
-            .ty = struct_ty,
-            .file_scope = block.getFileScope(mod),
-        }),
-    };
-    try sema.analyzeStructDecl(new_decl, inst, struct_obj);
+        .namespace = new_namespace_index,
+    });
+    errdefer mod.destroyStruct(struct_index);
+
+    const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{
+        .index = struct_index.toOptional(),
+        .namespace = new_namespace_index.toOptional(),
+    } });
+    errdefer mod.intern_pool.remove(struct_ty);
+
+    new_decl.val = struct_ty.toValue();
+    new_namespace.ty = struct_ty.toType();
+
+    try sema.analyzeStructDecl(new_decl, inst, struct_index);
     try new_decl.finalizeNewArena(&new_decl_arena);
     return sema.analyzeDeclVal(block, src, new_decl_index);
 }
@@ -2721,6 +2741,7 @@ fn createAnonymousDeclTypeNamed(
     inst: ?Zir.Inst.Index,
 ) !Decl.Index {
     const mod = sema.mod;
+    const gpa = sema.gpa;
     const namespace = block.namespace;
     const src_scope = block.wip_capture_scope;
     const src_decl = mod.declPtr(block.src_decl);
@@ -2736,16 +2757,16 @@ fn createAnonymousDeclTypeNamed(
             // semantically analyzed.
             // This name is also used as the key in the parent namespace so it cannot be
             // renamed.
-            const name = try std.fmt.allocPrintZ(sema.gpa, "{s}__{s}_{d}", .{
+            const name = try std.fmt.allocPrintZ(gpa, "{s}__{s}_{d}", .{
                 src_decl.name, anon_prefix, @enumToInt(new_decl_index),
             });
-            errdefer sema.gpa.free(name);
+            errdefer gpa.free(name);
             try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name);
             return new_decl_index;
         },
         .parent => {
-            const name = try sema.gpa.dupeZ(u8, mem.sliceTo(sema.mod.declPtr(block.src_decl).name, 0));
-            errdefer sema.gpa.free(name);
+            const name = try gpa.dupeZ(u8, mem.sliceTo(sema.mod.declPtr(block.src_decl).name, 0));
+            errdefer gpa.free(name);
             try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name);
             return new_decl_index;
         },
@@ -2753,7 +2774,7 @@ fn createAnonymousDeclTypeNamed(
             const fn_info = sema.code.getFnInfo(sema.func.?.zir_body_inst);
             const zir_tags = sema.code.instructions.items(.tag);
 
-            var buf = std.ArrayList(u8).init(sema.gpa);
+            var buf = std.ArrayList(u8).init(gpa);
             defer buf.deinit();
             try buf.appendSlice(mem.sliceTo(sema.mod.declPtr(block.src_decl).name, 0));
             try buf.appendSlice("(");
@@ -2781,7 +2802,7 @@ fn createAnonymousDeclTypeNamed(
 
             try buf.appendSlice(")");
             const name = try buf.toOwnedSliceSentinel(0);
-            errdefer sema.gpa.free(name);
+            errdefer gpa.free(name);
             try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name);
             return new_decl_index;
         },
@@ -2794,10 +2815,10 @@ fn createAnonymousDeclTypeNamed(
                 .dbg_var_ptr, .dbg_var_val => {
                     if (zir_data[i].str_op.operand != ref) continue;
 
-                    const name = try std.fmt.allocPrintZ(sema.gpa, "{s}.{s}", .{
+                    const name = try std.fmt.allocPrintZ(gpa, "{s}.{s}", .{
                         src_decl.name, zir_data[i].str_op.getStr(sema.code),
                     });
-                    errdefer sema.gpa.free(name);
+                    errdefer gpa.free(name);
 
                     try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name);
                     return new_decl_index;
@@ -3216,13 +3237,13 @@ fn zirOpaqueDecl(
         .file_scope = block.getFileScope(mod),
     });
     const new_namespace = mod.namespacePtr(new_namespace_index);
-    errdefer @panic("TODO error handling");
+    errdefer mod.destroyNamespace(new_namespace_index);
 
     const opaque_ty = try mod.intern_pool.get(gpa, .{ .opaque_type = .{
         .decl = new_decl_index,
         .namespace = new_namespace_index,
     } });
-    errdefer @panic("TODO error handling");
+    errdefer mod.intern_pool.remove(opaque_ty);
 
     new_decl.val = opaque_ty.toValue();
     new_namespace.ty = opaque_ty.toType();
@@ -3960,7 +3981,7 @@ fn zirArrayBasePtr(
     const elem_ty = sema.typeOf(base_ptr).childType(mod);
     switch (elem_ty.zigTypeTag(mod)) {
         .Array, .Vector => return base_ptr,
-        .Struct => if (elem_ty.isTuple()) {
+        .Struct => if (elem_ty.isTuple(mod)) {
             // TODO validate element count
             return base_ptr;
         },
@@ -4150,7 +4171,7 @@ fn validateArrayInitTy(
             }
             return;
         },
-        .Struct => if (ty.isTuple()) {
+        .Struct => if (ty.isTuple(mod)) {
             _ = try sema.resolveTypeFields(ty);
             const array_len = ty.arrayLen(mod);
             if (extra.init_count > array_len) {
@@ -4358,7 +4379,7 @@ fn validateStructInit(
     const gpa = sema.gpa;
 
     // Maps field index to field_ptr index of where it was already initialized.
-    const found_fields = try gpa.alloc(Zir.Inst.Index, struct_ty.structFieldCount());
+    const found_fields = try gpa.alloc(Zir.Inst.Index, struct_ty.structFieldCount(mod));
     defer gpa.free(found_fields);
     @memset(found_fields, 0);
 
@@ -4370,7 +4391,7 @@ fn validateStructInit(
         const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
         struct_ptr_zir_ref = field_ptr_extra.lhs;
         const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start);
-        const field_index = if (struct_ty.isTuple())
+        const field_index = if (struct_ty.isTuple(mod))
             try sema.tupleFieldIndex(block, struct_ty, field_name, field_src)
         else
             try sema.structFieldIndex(block, struct_ty, field_name, field_src);
@@ -4403,9 +4424,9 @@ fn validateStructInit(
         for (found_fields, 0..) |field_ptr, i| {
             if (field_ptr != 0) continue;
 
-            const default_val = struct_ty.structFieldDefaultValue(i);
+            const default_val = struct_ty.structFieldDefaultValue(i, mod);
             if (default_val.ip_index == .unreachable_value) {
-                if (struct_ty.isTuple()) {
+                if (struct_ty.isTuple(mod)) {
                     const template = "missing tuple field with index {d}";
                     if (root_msg) |msg| {
                         try sema.errNote(block, init_src, msg, template, .{i});
@@ -4414,7 +4435,7 @@ fn validateStructInit(
                     }
                     continue;
                 }
-                const field_name = struct_ty.structFieldName(i);
+                const field_name = struct_ty.structFieldName(i, mod);
                 const template = "missing struct field: {s}";
                 const args = .{field_name};
                 if (root_msg) |msg| {
@@ -4426,7 +4447,7 @@ fn validateStructInit(
             }
 
             const field_src = init_src; // TODO better source location
-            const default_field_ptr = if (struct_ty.isTuple())
+            const default_field_ptr = if (struct_ty.isTuple(mod))
                 try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true)
             else
                 try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true);
@@ -4436,11 +4457,11 @@ fn validateStructInit(
         }
 
         if (root_msg) |msg| {
-            if (struct_ty.castTag(.@"struct")) |struct_obj| {
-                const fqn = try struct_obj.data.getFullyQualifiedName(mod);
+            if (mod.typeToStruct(struct_ty)) |struct_obj| {
+                const fqn = try struct_obj.getFullyQualifiedName(mod);
                 defer gpa.free(fqn);
                 try mod.errNoteNonLazy(
-                    struct_obj.data.srcLoc(mod),
+                    struct_obj.srcLoc(mod),
                     msg,
                     "struct '{s}' declared here",
                     .{fqn},
@@ -4463,12 +4484,12 @@ fn validateStructInit(
 
     // We collect the comptime field values in case the struct initialization
     // ends up being comptime-known.
-    const field_values = try sema.arena.alloc(Value, struct_ty.structFieldCount());
+    const field_values = try sema.arena.alloc(Value, struct_ty.structFieldCount(mod));
 
     field: for (found_fields, 0..) |field_ptr, i| {
         if (field_ptr != 0) {
             // Determine whether the value stored to this pointer is comptime-known.
-            const field_ty = struct_ty.structFieldType(i);
+            const field_ty = struct_ty.structFieldType(i, mod);
             if (try sema.typeHasOnePossibleValue(field_ty)) |opv| {
                 field_values[i] = opv;
                 continue;
@@ -4548,9 +4569,9 @@ fn validateStructInit(
             continue :field;
         }
 
-        const default_val = struct_ty.structFieldDefaultValue(i);
+        const default_val = struct_ty.structFieldDefaultValue(i, mod);
         if (default_val.ip_index == .unreachable_value) {
-            if (struct_ty.isTuple()) {
+            if (struct_ty.isTuple(mod)) {
                 const template = "missing tuple field with index {d}";
                 if (root_msg) |msg| {
                     try sema.errNote(block, init_src, msg, template, .{i});
@@ -4559,7 +4580,7 @@ fn validateStructInit(
                 }
                 continue;
             }
-            const field_name = struct_ty.structFieldName(i);
+            const field_name = struct_ty.structFieldName(i, mod);
             const template = "missing struct field: {s}";
             const args = .{field_name};
             if (root_msg) |msg| {
@@ -4573,11 +4594,11 @@ fn validateStructInit(
     }
 
     if (root_msg) |msg| {
-        if (struct_ty.castTag(.@"struct")) |struct_obj| {
-            const fqn = try struct_obj.data.getFullyQualifiedName(sema.mod);
+        if (mod.typeToStruct(struct_ty)) |struct_obj| {
+            const fqn = try struct_obj.getFullyQualifiedName(sema.mod);
             defer gpa.free(fqn);
             try sema.mod.errNoteNonLazy(
-                struct_obj.data.srcLoc(sema.mod),
+                struct_obj.srcLoc(sema.mod),
                 msg,
                 "struct '{s}' declared here",
                 .{fqn},
@@ -4605,7 +4626,7 @@ fn validateStructInit(
         if (field_ptr != 0) continue;
 
         const field_src = init_src; // TODO better source location
-        const default_field_ptr = if (struct_ty.isTuple())
+        const default_field_ptr = if (struct_ty.isTuple(mod))
             try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true)
         else
             try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true);
@@ -4638,7 +4659,7 @@ fn zirValidateArrayInit(
 
             var i = instrs.len;
             while (i < array_len) : (i += 1) {
-                const default_val = array_ty.structFieldDefaultValue(i);
+                const default_val = array_ty.structFieldDefaultValue(i, mod);
                 if (default_val.ip_index == .unreachable_value) {
                     const template = "missing tuple field with index {d}";
                     if (root_msg) |msg| {
@@ -4698,7 +4719,7 @@ fn zirValidateArrayInit(
     outer: for (instrs, 0..) |elem_ptr, i| {
         // Determine whether the value stored to this pointer is comptime-known.
 
-        if (array_ty.isTuple()) {
+        if (array_ty.isTuple(mod)) {
             if (try array_ty.structFieldValueComptime(mod, i)) |opv| {
                 element_vals[i] = opv;
                 continue;
@@ -7950,7 +7971,7 @@ fn zirElemTypeIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
     const indexable_ty = try sema.resolveType(block, .unneeded, bin.lhs);
     assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction
     if (indexable_ty.zigTypeTag(mod) == .Struct) {
-        const elem_type = indexable_ty.structFieldType(@enumToInt(bin.rhs));
+        const elem_type = indexable_ty.structFieldType(@enumToInt(bin.rhs), mod);
         return sema.addType(elem_type);
     } else {
         const elem_type = indexable_ty.elemType2(mod);
@@ -9822,7 +9843,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
             };
             return sema.failWithOwnedErrorMsg(msg);
         },
-        .Struct, .Union => if (dest_ty.containerLayout() == .Auto) {
+        .Struct, .Union => if (dest_ty.containerLayout(mod) == .Auto) {
             const container = switch (dest_ty.zigTypeTag(mod)) {
                 .Struct => "struct",
                 .Union => "union",
@@ -9885,7 +9906,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
             };
             return sema.failWithOwnedErrorMsg(msg);
         },
-        .Struct, .Union => if (operand_ty.containerLayout() == .Auto) {
+        .Struct, .Union => if (operand_ty.containerLayout(mod) == .Auto) {
             const container = switch (operand_ty.zigTypeTag(mod)) {
                 .Struct => "struct",
                 .Union => "union",
@@ -12041,12 +12062,12 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 if (mem.eql(u8, name, field_name)) break true;
             } else false;
         }
-        if (ty.isTuple()) {
+        if (ty.isTuple(mod)) {
             const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch break :hf false;
-            break :hf field_index < ty.structFieldCount();
+            break :hf field_index < ty.structFieldCount(mod);
         }
         break :hf switch (ty.zigTypeTag(mod)) {
-            .Struct => ty.structFields().contains(field_name),
+            .Struct => ty.structFields(mod).contains(field_name),
             .Union => ty.unionFields().contains(field_name),
             .Enum => ty.enumFields().contains(field_name),
             .Array => mem.eql(u8, field_name, "len"),
@@ -12601,14 +12622,15 @@ fn analyzeTupleCat(
     lhs: Air.Inst.Ref,
     rhs: Air.Inst.Ref,
 ) CompileError!Air.Inst.Ref {
+    const mod = sema.mod;
     const lhs_ty = sema.typeOf(lhs);
     const rhs_ty = sema.typeOf(rhs);
     const src = LazySrcLoc.nodeOffset(src_node);
     const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = src_node };
     const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node };
 
-    const lhs_len = lhs_ty.structFieldCount();
-    const rhs_len = rhs_ty.structFieldCount();
+    const lhs_len = lhs_ty.structFieldCount(mod);
+    const rhs_len = rhs_ty.structFieldCount(mod);
     const dest_fields = lhs_len + rhs_len;
 
     if (dest_fields == 0) {
@@ -12629,8 +12651,8 @@ fn analyzeTupleCat(
         var runtime_src: ?LazySrcLoc = null;
         var i: u32 = 0;
         while (i < lhs_len) : (i += 1) {
-            types[i] = lhs_ty.structFieldType(i);
-            const default_val = lhs_ty.structFieldDefaultValue(i);
+            types[i] = lhs_ty.structFieldType(i, mod);
+            const default_val = lhs_ty.structFieldDefaultValue(i, mod);
             values[i] = default_val;
             const operand_src = lhs_src; // TODO better source location
             if (default_val.ip_index == .unreachable_value) {
@@ -12639,8 +12661,8 @@ fn analyzeTupleCat(
         }
         i = 0;
         while (i < rhs_len) : (i += 1) {
-            types[i + lhs_len] = rhs_ty.structFieldType(i);
-            const default_val = rhs_ty.structFieldDefaultValue(i);
+            types[i + lhs_len] = rhs_ty.structFieldType(i, mod);
+            const default_val = rhs_ty.structFieldDefaultValue(i, mod);
             values[i + lhs_len] = default_val;
             const operand_src = rhs_src; // TODO better source location
             if (default_val.ip_index == .unreachable_value) {
@@ -12691,8 +12713,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     const rhs_ty = sema.typeOf(rhs);
     const src = inst_data.src();
 
-    const lhs_is_tuple = lhs_ty.isTuple();
-    const rhs_is_tuple = rhs_ty.isTuple();
+    const lhs_is_tuple = lhs_ty.isTuple(mod);
+    const rhs_is_tuple = rhs_ty.isTuple(mod);
     if (lhs_is_tuple and rhs_is_tuple) {
         return sema.analyzeTupleCat(block, inst_data.src_node, lhs, rhs);
     }
@@ -12800,8 +12822,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             var elem_i: usize = 0;
             while (elem_i < lhs_len) : (elem_i += 1) {
                 const lhs_elem_i = elem_i;
-                const elem_ty = if (lhs_is_tuple) lhs_ty.structFieldType(lhs_elem_i) else lhs_info.elem_type;
-                const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i) else Value.@"unreachable";
+                const elem_ty = if (lhs_is_tuple) lhs_ty.structFieldType(lhs_elem_i, mod) else lhs_info.elem_type;
+                const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i, mod) else Value.@"unreachable";
                 const elem_val = if (elem_default_val.ip_index == .unreachable_value) try lhs_sub_val.elemValue(mod, lhs_elem_i) else elem_default_val;
                 const elem_val_inst = try sema.addConstant(elem_ty, elem_val);
                 const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded);
@@ -12810,8 +12832,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             }
             while (elem_i < result_len) : (elem_i += 1) {
                 const rhs_elem_i = elem_i - lhs_len;
-                const elem_ty = if (rhs_is_tuple) rhs_ty.structFieldType(rhs_elem_i) else rhs_info.elem_type;
-                const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i) else Value.@"unreachable";
+                const elem_ty = if (rhs_is_tuple) rhs_ty.structFieldType(rhs_elem_i, mod) else rhs_info.elem_type;
+                const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i, mod) else Value.@"unreachable";
                 const elem_val = if (elem_default_val.ip_index == .unreachable_value) try rhs_sub_val.elemValue(mod, rhs_elem_i) else elem_default_val;
                 const elem_val_inst = try sema.addConstant(elem_ty, elem_val);
                 const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded);
@@ -12909,8 +12931,8 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins
             }
         },
         .Struct => {
-            if (operand_ty.isTuple() and peer_ty.isIndexable(mod)) {
-                assert(!peer_ty.isTuple());
+            if (operand_ty.isTuple(mod) and peer_ty.isIndexable(mod)) {
+                assert(!peer_ty.isTuple(mod));
                 return .{
                     .elem_type = peer_ty.elemType2(mod),
                     .sentinel = null,
@@ -12930,12 +12952,13 @@ fn analyzeTupleMul(
     operand: Air.Inst.Ref,
     factor: u64,
 ) CompileError!Air.Inst.Ref {
+    const mod = sema.mod;
     const operand_ty = sema.typeOf(operand);
     const src = LazySrcLoc.nodeOffset(src_node);
     const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = src_node };
     const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node };
 
-    const tuple_len = operand_ty.structFieldCount();
+    const tuple_len = operand_ty.structFieldCount(mod);
     const final_len_u64 = std.math.mul(u64, tuple_len, factor) catch
         return sema.fail(block, rhs_src, "operation results in overflow", .{});
 
@@ -12951,8 +12974,8 @@ fn analyzeTupleMul(
         var runtime_src: ?LazySrcLoc = null;
         var i: u32 = 0;
         while (i < tuple_len) : (i += 1) {
-            types[i] = operand_ty.structFieldType(i);
-            values[i] = operand_ty.structFieldDefaultValue(i);
+            types[i] = operand_ty.structFieldType(i, mod);
+            values[i] = operand_ty.structFieldDefaultValue(i, mod);
             const operand_src = lhs_src; // TODO better source location
             if (values[i].ip_index == .unreachable_value) {
                 runtime_src = operand_src;
@@ -13006,7 +13029,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     const operator_src: LazySrcLoc = .{ .node_offset_main_token = inst_data.src_node };
     const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
 
-    if (lhs_ty.isTuple()) {
+    if (lhs_ty.isTuple(mod)) {
         // In `**` rhs must be comptime-known, but lhs can be runtime-known
         const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, "array multiplication factor must be comptime-known");
         return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor);
@@ -14502,7 +14525,7 @@ fn zirOverflowArithmetic(
 
     const element_refs = try sema.arena.alloc(Air.Inst.Ref, 2);
     element_refs[0] = result.inst;
-    element_refs[1] = try sema.addConstant(tuple_ty.structFieldType(1), result.overflow_bit);
+    element_refs[1] = try sema.addConstant(tuple_ty.structFieldType(1, mod), result.overflow_bit);
     return block.addAggregateInit(tuple_ty, element_refs);
 }
 
@@ -16378,7 +16401,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
 
             const union_ty = try sema.resolveTypeFields(ty);
             try sema.resolveTypeLayout(ty); // Getting alignment requires type layout
-            const layout = union_ty.containerLayout();
+            const layout = union_ty.containerLayout(mod);
 
             const union_fields = union_ty.unionFields();
             const union_field_vals = try fields_anon_decl.arena().alloc(Value, union_fields.count());
@@ -16484,7 +16507,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             };
             const struct_ty = try sema.resolveTypeFields(ty);
             try sema.resolveTypeLayout(ty); // Getting alignment requires type layout
-            const layout = struct_ty.containerLayout();
+            const layout = struct_ty.containerLayout(mod);
 
             const struct_field_vals = fv: {
                 if (struct_ty.isSimpleTupleOrAnonStruct()) {
@@ -16532,7 +16555,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                     }
                     break :fv struct_field_vals;
                 }
-                const struct_fields = struct_ty.structFields();
+                const struct_fields = struct_ty.structFields(mod);
                 const struct_field_vals = try fields_anon_decl.arena().alloc(Value, struct_fields.count());
 
                 for (struct_field_vals, 0..) |*field_val, i| {
@@ -16600,7 +16623,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
 
             const backing_integer_val = blk: {
                 if (layout == .Packed) {
-                    const struct_obj = struct_ty.castTag(.@"struct").?.data;
+                    const struct_obj = mod.typeToStruct(struct_ty).?;
                     assert(struct_obj.haveLayout());
                     assert(struct_obj.backing_int_ty.isInt(mod));
                     const backing_int_ty_val = try Value.Tag.ty.create(sema.arena, struct_obj.backing_int_ty);
@@ -16624,7 +16647,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 // decls: []const Declaration,
                 decls_val,
                 // is_tuple: bool,
-                Value.makeBool(struct_ty.isTuple()),
+                Value.makeBool(struct_ty.isTuple(mod)),
             };
 
             return sema.addConstant(
@@ -17801,12 +17824,13 @@ fn structInitEmpty(
     dest_src: LazySrcLoc,
     init_src: LazySrcLoc,
 ) CompileError!Air.Inst.Ref {
+    const mod = sema.mod;
     const gpa = sema.gpa;
     // This logic must be synchronized with that in `zirStructInit`.
     const struct_ty = try sema.resolveTypeFields(obj_ty);
 
     // The init values to use for the struct instance.
-    const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount());
+    const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount(mod));
     defer gpa.free(field_inits);
     @memset(field_inits, .none);
 
@@ -17897,18 +17921,18 @@ fn zirStructInit(
 
         // Maps field index to field_type index of where it was already initialized.
         // For making sure all fields are accounted for and no fields are duplicated.
-        const found_fields = try gpa.alloc(Zir.Inst.Index, resolved_ty.structFieldCount());
+        const found_fields = try gpa.alloc(Zir.Inst.Index, resolved_ty.structFieldCount(mod));
         defer gpa.free(found_fields);
 
         // The init values to use for the struct instance.
-        const field_inits = try gpa.alloc(Air.Inst.Ref, resolved_ty.structFieldCount());
+        const field_inits = try gpa.alloc(Air.Inst.Ref, resolved_ty.structFieldCount(mod));
         defer gpa.free(field_inits);
         @memset(field_inits, .none);
 
         var field_i: u32 = 0;
         var extra_index = extra.end;
 
-        const is_packed = resolved_ty.containerLayout() == .Packed;
+        const is_packed = resolved_ty.containerLayout(mod) == .Packed;
         while (field_i < extra.data.fields_len) : (field_i += 1) {
             const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra_index);
             extra_index = item.end;
@@ -17917,7 +17941,7 @@ fn zirStructInit(
             const field_src: LazySrcLoc = .{ .node_offset_initializer = field_type_data.src_node };
             const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data;
             const field_name = sema.code.nullTerminatedString(field_type_extra.name_start);
-            const field_index = if (resolved_ty.isTuple())
+            const field_index = if (resolved_ty.isTuple(mod))
                 try sema.tupleFieldIndex(block, resolved_ty, field_name, field_src)
             else
                 try sema.structFieldIndex(block, resolved_ty, field_name, field_src);
@@ -17940,7 +17964,7 @@ fn zirStructInit(
                     return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known");
                 };
 
-                if (!init_val.eql(default_value, resolved_ty.structFieldType(field_index), sema.mod)) {
+                if (!init_val.eql(default_value, resolved_ty.structFieldType(field_index, mod), sema.mod)) {
                     return sema.failWithInvalidComptimeFieldStore(block, field_src, resolved_ty, field_index);
                 }
             };
@@ -18029,13 +18053,13 @@ fn finishStructInit(
                 field_inits[i] = try sema.addConstant(struct_obj.types[i], default_val);
             }
         }
-    } else if (struct_ty.isTuple()) {
+    } else if (struct_ty.isTuple(mod)) {
         var i: u32 = 0;
-        const len = struct_ty.structFieldCount();
+        const len = struct_ty.structFieldCount(mod);
         while (i < len) : (i += 1) {
             if (field_inits[i] != .none) continue;
 
-            const default_val = struct_ty.structFieldDefaultValue(i);
+            const default_val = struct_ty.structFieldDefaultValue(i, mod);
             if (default_val.ip_index == .unreachable_value) {
                 const template = "missing tuple field with index {d}";
                 if (root_msg) |msg| {
@@ -18044,11 +18068,11 @@ fn finishStructInit(
                     root_msg = try sema.errMsg(block, init_src, template, .{i});
                 }
             } else {
-                field_inits[i] = try sema.addConstant(struct_ty.structFieldType(i), default_val);
+                field_inits[i] = try sema.addConstant(struct_ty.structFieldType(i, mod), default_val);
             }
         }
     } else {
-        const struct_obj = struct_ty.castTag(.@"struct").?.data;
+        const struct_obj = mod.typeToStruct(struct_ty).?;
         for (struct_obj.fields.values(), 0..) |field, i| {
             if (field_inits[i] != .none) continue;
 
@@ -18068,11 +18092,11 @@ fn finishStructInit(
     }
 
     if (root_msg) |msg| {
-        if (struct_ty.castTag(.@"struct")) |struct_obj| {
-            const fqn = try struct_obj.data.getFullyQualifiedName(sema.mod);
+        if (mod.typeToStruct(struct_ty)) |struct_obj| {
+            const fqn = try struct_obj.getFullyQualifiedName(sema.mod);
             defer gpa.free(fqn);
             try sema.mod.errNoteNonLazy(
-                struct_obj.data.srcLoc(sema.mod),
+                struct_obj.srcLoc(sema.mod),
                 msg,
                 "struct '{s}' declared here",
                 .{fqn},
@@ -18277,7 +18301,7 @@ fn zirArrayInit(
     for (args[1..], 0..) |arg, i| {
         const resolved_arg = try sema.resolveInst(arg);
         const elem_ty = if (array_ty.zigTypeTag(mod) == .Struct)
-            array_ty.structFieldType(i)
+            array_ty.structFieldType(i, mod)
         else
             array_ty.elemType2(mod);
         resolved_args[i] = sema.coerce(block, elem_ty, resolved_arg, .unneeded) catch |err| switch (err) {
@@ -18331,12 +18355,12 @@ fn zirArrayInit(
         });
         const alloc = try block.addTy(.alloc, alloc_ty);
 
-        if (array_ty.isTuple()) {
+        if (array_ty.isTuple(mod)) {
             for (resolved_args, 0..) |arg, i| {
                 const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
                     .mutable = true,
                     .@"addrspace" = target_util.defaultAddressSpace(target, .local),
-                    .pointee_type = array_ty.structFieldType(i),
+                    .pointee_type = array_ty.structFieldType(i, mod),
                 });
                 const elem_ptr_ty_ref = try sema.addType(elem_ptr_ty);
 
@@ -18514,7 +18538,7 @@ fn fieldType(
                     const field_index = try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src);
                     return sema.addType(cur_ty.tupleFields().types[field_index]);
                 }
-                const struct_obj = cur_ty.castTag(.@"struct").?.data;
+                const struct_obj = mod.typeToStruct(cur_ty).?;
                 const field = struct_obj.fields.get(field_name) orelse
                     return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name);
                 return sema.addType(field.ty);
@@ -19185,13 +19209,13 @@ fn zirReify(
                 .file_scope = block.getFileScope(mod),
             });
             const new_namespace = mod.namespacePtr(new_namespace_index);
-            errdefer @panic("TODO error handling");
+            errdefer mod.destroyNamespace(new_namespace_index);
 
             const opaque_ty = try mod.intern_pool.get(gpa, .{ .opaque_type = .{
                 .decl = new_decl_index,
                 .namespace = new_namespace_index,
             } });
-            errdefer @panic("TODO error handling");
+            errdefer mod.intern_pool.remove(opaque_ty);
 
             new_decl.val = opaque_ty.toValue();
             new_namespace.ty = opaque_ty.toType();
@@ -19493,22 +19517,34 @@ fn reifyStruct(
     name_strategy: Zir.Inst.NameStrategy,
     is_tuple: bool,
 ) CompileError!Air.Inst.Ref {
-    var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
+    const mod = sema.mod;
+    const gpa = sema.gpa;
+
+    var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
     errdefer new_decl_arena.deinit();
     const new_decl_arena_allocator = new_decl_arena.allocator();
 
-    const struct_obj = try new_decl_arena_allocator.create(Module.Struct);
-    const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj);
-    const new_struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty);
-    const mod = sema.mod;
+    // Because these three things each reference each other, `undefined`
+    // placeholders are used before being set after the struct type gains an
+    // InternPool index.
+
     const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
         .ty = Type.type,
-        .val = new_struct_val,
+        .val = undefined,
     }, name_strategy, "struct", inst);
     const new_decl = mod.declPtr(new_decl_index);
     new_decl.owns_tv = true;
     errdefer mod.abortAnonDecl(new_decl_index);
-    struct_obj.* = .{
+
+    const new_namespace_index = try mod.createNamespace(.{
+        .parent = block.namespace.toOptional(),
+        .ty = undefined,
+        .file_scope = block.getFileScope(mod),
+    });
+    const new_namespace = mod.namespacePtr(new_namespace_index);
+    errdefer mod.destroyNamespace(new_namespace_index);
+
+    const struct_index = try mod.createStruct(.{
         .owner_decl = new_decl_index,
         .fields = .{},
         .zir_index = inst,
@@ -19516,12 +19552,19 @@ fn reifyStruct(
         .status = .have_field_types,
         .known_non_opv = false,
         .is_tuple = is_tuple,
-        .namespace = try mod.createNamespace(.{
-            .parent = block.namespace.toOptional(),
-            .ty = struct_ty,
-            .file_scope = block.getFileScope(mod),
-        }),
-    };
+        .namespace = new_namespace_index,
+    });
+    const struct_obj = mod.structPtr(struct_index);
+    errdefer mod.destroyStruct(struct_index);
+
+    const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{
+        .index = struct_index.toOptional(),
+        .namespace = new_namespace_index.toOptional(),
+    } });
+    errdefer mod.intern_pool.remove(struct_ty);
+
+    new_decl.val = struct_ty.toValue();
+    new_namespace.ty = struct_ty.toType();
 
     // Fields
     const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod));
@@ -19609,7 +19652,7 @@ fn reifyStruct(
         if (field_ty.zigTypeTag(mod) == .Opaque) {
             const msg = msg: {
                 const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
-                errdefer msg.destroy(sema.gpa);
+                errdefer msg.destroy(gpa);
 
                 try sema.addDeclaredHereNote(msg, field_ty);
                 break :msg msg;
@@ -19619,7 +19662,7 @@ fn reifyStruct(
         if (field_ty.zigTypeTag(mod) == .NoReturn) {
             const msg = msg: {
                 const msg = try sema.errMsg(block, src, "struct fields cannot be 'noreturn'", .{});
-                errdefer msg.destroy(sema.gpa);
+                errdefer msg.destroy(gpa);
 
                 try sema.addDeclaredHereNote(msg, field_ty);
                 break :msg msg;
@@ -19629,7 +19672,7 @@ fn reifyStruct(
         if (struct_obj.layout == .Extern and !try sema.validateExternType(field_ty, .struct_field)) {
             const msg = msg: {
                 const msg = try sema.errMsg(block, src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
-                errdefer msg.destroy(sema.gpa);
+                errdefer msg.destroy(gpa);
 
                 const src_decl = sema.mod.declPtr(block.src_decl);
                 try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), field_ty, .struct_field);
@@ -19641,7 +19684,7 @@ fn reifyStruct(
         } else if (struct_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) {
             const msg = msg: {
                 const msg = try sema.errMsg(block, src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
-                errdefer msg.destroy(sema.gpa);
+                errdefer msg.destroy(gpa);
 
                 const src_decl = sema.mod.declPtr(block.src_decl);
                 try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl, mod), field_ty);
@@ -19660,7 +19703,7 @@ fn reifyStruct(
             sema.resolveTypeLayout(field.ty) catch |err| switch (err) {
                 error.AnalysisFail => {
                     const msg = sema.err orelse return err;
-                    try sema.addFieldErrNote(struct_ty, index, msg, "while checking this field", .{});
+                    try sema.addFieldErrNote(struct_ty.toType(), index, msg, "while checking this field", .{});
                     return err;
                 },
                 else => return err,
@@ -20558,21 +20601,21 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
         },
     }
 
-    const field_index = if (ty.isTuple()) blk: {
+    const field_index = if (ty.isTuple(mod)) blk: {
         if (mem.eql(u8, field_name, "len")) {
             return sema.fail(block, src, "no offset available for 'len' field of tuple", .{});
         }
         break :blk try sema.tupleFieldIndex(block, ty, field_name, rhs_src);
     } else try sema.structFieldIndex(block, ty, field_name, rhs_src);
 
-    if (ty.structFieldIsComptime(field_index)) {
+    if (ty.structFieldIsComptime(field_index, mod)) {
         return sema.fail(block, src, "no offset available for comptime field", .{});
     }
 
-    switch (ty.containerLayout()) {
+    switch (ty.containerLayout(mod)) {
         .Packed => {
             var bit_sum: u64 = 0;
-            const fields = ty.structFields();
+            const fields = ty.structFields(mod);
             for (fields.values(), 0..) |field, i| {
                 if (i == field_index) {
                     return bit_sum;
@@ -21810,6 +21853,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
     const tracy = trace(@src());
     defer tracy.end();
 
+    const mod = sema.mod;
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const modifier_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
     const func_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
@@ -21869,11 +21913,11 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
     const args = try sema.resolveInst(extra.args);
 
     const args_ty = sema.typeOf(args);
-    if (!args_ty.isTuple() and args_ty.ip_index != .empty_struct_type) {
+    if (!args_ty.isTuple(mod) and args_ty.ip_index != .empty_struct_type) {
         return sema.fail(block, args_src, "expected a tuple, found '{}'", .{args_ty.fmt(sema.mod)});
     }
 
-    var resolved_args: []Air.Inst.Ref = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount());
+    var resolved_args: []Air.Inst.Ref = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount(mod));
     for (resolved_args, 0..) |*resolved, i| {
         resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @intCast(u32, i), args_ty);
     }
@@ -21905,7 +21949,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
 
     const field_index = switch (parent_ty.zigTypeTag(mod)) {
         .Struct => blk: {
-            if (parent_ty.isTuple()) {
+            if (parent_ty.isTuple(mod)) {
                 if (mem.eql(u8, field_name, "len")) {
                     return sema.fail(block, src, "cannot get @fieldParentPtr of 'len' field of tuple", .{});
                 }
@@ -21918,7 +21962,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
         else => unreachable,
     };
 
-    if (parent_ty.zigTypeTag(mod) == .Struct and parent_ty.structFieldIsComptime(field_index)) {
+    if (parent_ty.zigTypeTag(mod) == .Struct and parent_ty.structFieldIsComptime(field_index, mod)) {
         return sema.fail(block, src, "cannot get @fieldParentPtr of a comptime field", .{});
     }
 
@@ -21926,17 +21970,17 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
     const field_ptr_ty_info = field_ptr_ty.ptrInfo(mod);
 
     var ptr_ty_data: Type.Payload.Pointer.Data = .{
-        .pointee_type = parent_ty.structFieldType(field_index),
+        .pointee_type = parent_ty.structFieldType(field_index, mod),
         .mutable = field_ptr_ty_info.mutable,
         .@"addrspace" = field_ptr_ty_info.@"addrspace",
     };
 
-    if (parent_ty.containerLayout() == .Packed) {
+    if (parent_ty.containerLayout(mod) == .Packed) {
         return sema.fail(block, src, "TODO handle packed structs/unions with @fieldParentPtr", .{});
     } else {
         ptr_ty_data.@"align" = blk: {
-            if (parent_ty.castTag(.@"struct")) |struct_obj| {
-                break :blk struct_obj.data.fields.values()[field_index].abi_align;
+            if (mod.typeToStruct(parent_ty)) |struct_obj| {
+                break :blk struct_obj.fields.values()[field_index].abi_align;
             } else if (parent_ty.cast(Type.Payload.Union)) |union_obj| {
                 break :blk union_obj.data.fields.values()[field_index].abi_align;
             } else {
@@ -23380,8 +23424,7 @@ fn explainWhyTypeIsComptimeInner(
         .Struct => {
             if ((try type_set.getOrPutContext(sema.gpa, ty, .{ .mod = mod })).found_existing) return;
 
-            if (ty.castTag(.@"struct")) |payload| {
-                const struct_obj = payload.data;
+            if (mod.typeToStruct(ty)) |struct_obj| {
                 for (struct_obj.fields.values(), 0..) |field, i| {
                     const field_src_loc = struct_obj.fieldSrcLoc(sema.mod, .{
                         .index = i,
@@ -23472,7 +23515,7 @@ fn validateExternType(
         .Enum => {
             return sema.validateExternType(try ty.intTagType(mod), position);
         },
-        .Struct, .Union => switch (ty.containerLayout()) {
+        .Struct, .Union => switch (ty.containerLayout(mod)) {
             .Extern => return true,
             .Packed => {
                 const bit_size = try ty.bitSizeAdvanced(mod, sema);
@@ -23569,7 +23612,7 @@ fn explainWhyTypeIsNotExtern(
 
 /// Returns true if `ty` is allowed in packed types.
 /// Does *NOT* require `ty` to be resolved in any way.
-fn validatePackedType(ty: Type, mod: *const Module) bool {
+fn validatePackedType(ty: Type, mod: *Module) bool {
     switch (ty.zigTypeTag(mod)) {
         .Type,
         .ComptimeFloat,
@@ -23595,7 +23638,7 @@ fn validatePackedType(ty: Type, mod: *const Module) bool {
         .Enum,
         => return true,
         .Pointer => return !ty.isSlice(mod),
-        .Struct, .Union => return ty.containerLayout() == .Packed,
+        .Struct, .Union => return ty.containerLayout(mod) == .Packed,
     }
 }
 
@@ -24419,27 +24462,27 @@ fn fieldCallBind(
         switch (concrete_ty.zigTypeTag(mod)) {
             .Struct => {
                 const struct_ty = try sema.resolveTypeFields(concrete_ty);
-                if (struct_ty.castTag(.@"struct")) |struct_obj| {
-                    const field_index_usize = struct_obj.data.fields.getIndex(field_name) orelse
+                if (mod.typeToStruct(struct_ty)) |struct_obj| {
+                    const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
                         break :find_field;
                     const field_index = @intCast(u32, field_index_usize);
-                    const field = struct_obj.data.fields.values()[field_index];
+                    const field = struct_obj.fields.values()[field_index];
 
                     return sema.finishFieldCallBind(block, src, ptr_ty, field.ty, field_index, object_ptr);
-                } else if (struct_ty.isTuple()) {
+                } else if (struct_ty.isTuple(mod)) {
                     if (mem.eql(u8, field_name, "len")) {
-                        return .{ .direct = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount()) };
+                        return .{ .direct = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount(mod)) };
                     }
                     if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| {
-                        if (field_index >= struct_ty.structFieldCount()) break :find_field;
-                        return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(field_index), field_index, object_ptr);
+                        if (field_index >= struct_ty.structFieldCount(mod)) break :find_field;
+                        return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(field_index, mod), field_index, object_ptr);
                     } else |_| {}
                 } else {
-                    const max = struct_ty.structFieldCount();
+                    const max = struct_ty.structFieldCount(mod);
                     var i: u32 = 0;
                     while (i < max) : (i += 1) {
-                        if (mem.eql(u8, struct_ty.structFieldName(i), field_name)) {
-                            return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(i), i, object_ptr);
+                        if (mem.eql(u8, struct_ty.structFieldName(i, mod), field_name)) {
+                            return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(i, mod), i, object_ptr);
                         }
                     }
                 }
@@ -24651,9 +24694,9 @@ fn structFieldPtr(
     const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty);
     try sema.resolveStructLayout(struct_ty);
 
-    if (struct_ty.isTuple()) {
+    if (struct_ty.isTuple(mod)) {
         if (mem.eql(u8, field_name, "len")) {
-            const len_inst = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount());
+            const len_inst = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount(mod));
             return sema.analyzeRef(block, src, len_inst);
         }
         const field_index = try sema.tupleFieldIndex(block, struct_ty, field_name, field_name_src);
@@ -24663,7 +24706,7 @@ fn structFieldPtr(
         return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing);
     }
 
-    const struct_obj = struct_ty.castTag(.@"struct").?.data;
+    const struct_obj = mod.typeToStruct(struct_ty).?;
 
     const field_index_big = struct_obj.fields.getIndex(field_name) orelse
         return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name);
@@ -24687,7 +24730,7 @@ fn structFieldPtrByIndex(
     }
 
     const mod = sema.mod;
-    const struct_obj = struct_ty.castTag(.@"struct").?.data;
+    const struct_obj = mod.typeToStruct(struct_ty).?;
     const field = struct_obj.fields.values()[field_index];
     const struct_ptr_ty = sema.typeOf(struct_ptr);
     const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod);
@@ -24799,8 +24842,11 @@ fn structFieldVal(
                 const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src);
                 return sema.tupleFieldValByIndex(block, src, struct_byval, field_index, struct_ty);
             },
-            .@"struct" => {
-                const struct_obj = struct_ty.castTag(.@"struct").?.data;
+            else => unreachable,
+        },
+        else => switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) {
+            .struct_type => |struct_type| {
+                const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
                 if (struct_obj.is_tuple) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty);
 
                 const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
@@ -24827,7 +24873,6 @@ fn structFieldVal(
             },
             else => unreachable,
         },
-        else => unreachable,
     }
 }
 
@@ -24840,8 +24885,9 @@ fn tupleFieldVal(
     field_name_src: LazySrcLoc,
     tuple_ty: Type,
 ) CompileError!Air.Inst.Ref {
+    const mod = sema.mod;
     if (mem.eql(u8, field_name, "len")) {
-        return sema.addIntUnsigned(Type.usize, tuple_ty.structFieldCount());
+        return sema.addIntUnsigned(Type.usize, tuple_ty.structFieldCount(mod));
     }
     const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_name_src);
     return sema.tupleFieldValByIndex(block, src, tuple_byval, field_index, tuple_ty);
@@ -24858,7 +24904,7 @@ fn tupleFieldIndex(
     const mod = sema.mod;
     assert(!std.mem.eql(u8, field_name, "len"));
     if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| {
-        if (field_index < tuple_ty.structFieldCount()) return field_index;
+        if (field_index < tuple_ty.structFieldCount(mod)) return field_index;
         return sema.fail(block, field_name_src, "index '{s}' out of bounds of tuple '{}'", .{
             field_name, tuple_ty.fmt(mod),
         });
@@ -24878,7 +24924,7 @@ fn tupleFieldValByIndex(
     tuple_ty: Type,
 ) CompileError!Air.Inst.Ref {
     const mod = sema.mod;
-    const field_ty = tuple_ty.structFieldType(field_index);
+    const field_ty = tuple_ty.structFieldType(field_index, mod);
 
     if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| {
         return sema.addConstant(field_ty, default_value);
@@ -25251,7 +25297,7 @@ fn tupleFieldPtr(
     const tuple_ptr_ty = sema.typeOf(tuple_ptr);
     const tuple_ty = tuple_ptr_ty.childType(mod);
     _ = try sema.resolveTypeFields(tuple_ty);
-    const field_count = tuple_ty.structFieldCount();
+    const field_count = tuple_ty.structFieldCount(mod);
 
     if (field_count == 0) {
         return sema.fail(block, tuple_ptr_src, "indexing into empty tuple is not allowed", .{});
@@ -25263,7 +25309,7 @@ fn tupleFieldPtr(
         });
     }
 
-    const field_ty = tuple_ty.structFieldType(field_index);
+    const field_ty = tuple_ty.structFieldType(field_index, mod);
     const ptr_field_ty = try Type.ptr(sema.arena, mod, .{
         .pointee_type = field_ty,
         .mutable = tuple_ptr_ty.ptrIsMutable(mod),
@@ -25308,7 +25354,7 @@ fn tupleField(
 ) CompileError!Air.Inst.Ref {
     const mod = sema.mod;
     const tuple_ty = try sema.resolveTypeFields(sema.typeOf(tuple));
-    const field_count = tuple_ty.structFieldCount();
+    const field_count = tuple_ty.structFieldCount(mod);
 
     if (field_count == 0) {
         return sema.fail(block, tuple_src, "indexing into empty tuple is not allowed", .{});
@@ -25320,7 +25366,7 @@ fn tupleField(
         });
     }
 
-    const field_ty = tuple_ty.structFieldType(field_index);
+    const field_ty = tuple_ty.structFieldType(field_index, mod);
 
     if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| {
         return sema.addConstant(field_ty, default_value); // comptime field
@@ -25919,7 +25965,7 @@ fn coerceExtra(
                     .Array => {
                         // pointer to tuple to pointer to array
                         if (inst_ty.isSinglePointer(mod) and
-                            inst_ty.childType(mod).isTuple() and
+                            inst_ty.childType(mod).isTuple(mod) and
                             sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result))
                         {
                             return sema.coerceTupleToArrayPtrs(block, dest_ty, dest_ty_src, inst, inst_src);
@@ -25939,11 +25985,11 @@ fn coerceExtra(
 
                     if (!inst_ty.isSinglePointer(mod)) break :to_slice;
                     const inst_child_ty = inst_ty.childType(mod);
-                    if (!inst_child_ty.isTuple()) break :to_slice;
+                    if (!inst_child_ty.isTuple(mod)) break :to_slice;
 
                     // empty tuple to zero-length slice
                     // note that this allows coercing to a mutable slice.
-                    if (inst_child_ty.structFieldCount() == 0) {
+                    if (inst_child_ty.structFieldCount(mod) == 0) {
                         // Optional slice is represented with a null pointer so
                         // we use a dummy pointer value with the required alignment.
                         const slice_val = try Value.Tag.slice.create(sema.arena, .{
@@ -26213,7 +26259,7 @@ fn coerceExtra(
                 if (inst == .empty_struct) {
                     return sema.arrayInitEmpty(block, inst_src, dest_ty);
                 }
-                if (inst_ty.isTuple()) {
+                if (inst_ty.isTuple(mod)) {
                     return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src);
                 }
             },
@@ -26225,7 +26271,7 @@ fn coerceExtra(
         .Vector => switch (inst_ty.zigTypeTag(mod)) {
             .Array, .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src),
             .Struct => {
-                if (inst_ty.isTuple()) {
+                if (inst_ty.isTuple(mod)) {
                     return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src);
                 }
             },
@@ -26238,7 +26284,7 @@ fn coerceExtra(
             if (inst == .empty_struct) {
                 return sema.structInitEmpty(block, dest_ty, dest_ty_src, inst_src);
             }
-            if (inst_ty.isTupleOrAnonStruct()) {
+            if (inst_ty.isTupleOrAnonStruct(mod)) {
                 return sema.coerceTupleToStruct(block, dest_ty, inst, inst_src) catch |err| switch (err) {
                     error.NotCoercible => break :blk,
                     else => |e| return e,
@@ -27304,8 +27350,8 @@ fn storePtr2(
     // this code does not handle tuple-to-struct coercion which requires dealing with missing
     // fields.
     const operand_ty = sema.typeOf(uncasted_operand);
-    if (operand_ty.isTuple() and elem_ty.zigTypeTag(mod) == .Array) {
-        const field_count = operand_ty.structFieldCount();
+    if (operand_ty.isTuple(mod) and elem_ty.zigTypeTag(mod) == .Array) {
+        const field_count = operand_ty.structFieldCount(mod);
         var i: u32 = 0;
         while (i < field_count) : (i += 1) {
             const elem_src = operand_src; // TODO better source location
@@ -27804,7 +27850,7 @@ fn beginComptimePtrMutation(
 
                         switch (parent.ty.zigTypeTag(mod)) {
                             .Struct => {
-                                const fields = try arena.alloc(Value, parent.ty.structFieldCount());
+                                const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod));
                                 @memset(fields, Value.undef);
 
                                 val_ptr.* = try Value.Tag.aggregate.create(arena, fields);
@@ -27813,7 +27859,7 @@ fn beginComptimePtrMutation(
                                     sema,
                                     block,
                                     src,
-                                    parent.ty.structFieldType(field_index),
+                                    parent.ty.structFieldType(field_index, mod),
                                     &fields[field_index],
                                     ptr_elem_ty,
                                     parent.decl_ref_mut,
@@ -27832,7 +27878,7 @@ fn beginComptimePtrMutation(
                                     sema,
                                     block,
                                     src,
-                                    parent.ty.structFieldType(field_index),
+                                    parent.ty.structFieldType(field_index, mod),
                                     &payload.data.val,
                                     ptr_elem_ty,
                                     parent.decl_ref_mut,
@@ -27878,7 +27924,7 @@ fn beginComptimePtrMutation(
                             sema,
                             block,
                             src,
-                            parent.ty.structFieldType(field_index),
+                            parent.ty.structFieldType(field_index, mod),
                             duped,
                             ptr_elem_ty,
                             parent.decl_ref_mut,
@@ -27889,7 +27935,7 @@ fn beginComptimePtrMutation(
                             sema,
                             block,
                             src,
-                            parent.ty.structFieldType(field_index),
+                            parent.ty.structFieldType(field_index, mod),
                             &val_ptr.castTag(.aggregate).?.data[field_index],
                             ptr_elem_ty,
                             parent.decl_ref_mut,
@@ -27907,7 +27953,7 @@ fn beginComptimePtrMutation(
                                 sema,
                                 block,
                                 src,
-                                parent.ty.structFieldType(field_index),
+                                parent.ty.structFieldType(field_index, mod),
                                 &payload.val,
                                 ptr_elem_ty,
                                 parent.decl_ref_mut,
@@ -28269,8 +28315,8 @@ fn beginComptimePtrLoad(
                 var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.container_ptr, field_ptr.container_ty);
 
                 if (field_ptr.container_ty.hasWellDefinedLayout(mod)) {
-                    const struct_ty = field_ptr.container_ty.castTag(.@"struct");
-                    if (struct_ty != null and struct_ty.?.data.layout == .Packed) {
+                    const struct_obj = mod.typeToStruct(field_ptr.container_ty);
+                    if (struct_obj != null and struct_obj.?.layout == .Packed) {
                         // packed structs are not byte addressable
                         deref.parent = null;
                     } else if (deref.parent) |*parent| {
@@ -28310,7 +28356,7 @@ fn beginComptimePtrLoad(
                         else => unreachable,
                     };
                 } else {
-                    const field_ty = field_ptr.container_ty.structFieldType(field_index);
+                    const field_ty = field_ptr.container_ty.structFieldType(field_index, mod);
                     deref.pointee = TypedValue{
                         .ty = field_ty,
                         .val = try tv.val.fieldValue(tv.ty, mod, field_index),
@@ -28483,7 +28529,7 @@ fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_resul
     const inst_info = inst_ty.ptrInfo(mod);
     const len0 = (inst_info.pointee_type.zigTypeTag(mod) == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel(mod) == 0 or
         (inst_info.pointee_type.arrayLen(mod) == 0 and dest_info.sentinel == null and dest_info.size != .C and dest_info.size != .Many))) or
-        (inst_info.pointee_type.isTuple() and inst_info.pointee_type.structFieldCount() == 0);
+        (inst_info.pointee_type.isTuple(mod) and inst_info.pointee_type.structFieldCount(mod) == 0);
 
     const ok_cv_qualifiers =
         ((inst_info.mutable or !dest_info.mutable) or len0) and
@@ -28714,8 +28760,9 @@ fn coerceAnonStructToUnion(
     inst: Air.Inst.Ref,
     inst_src: LazySrcLoc,
 ) !Air.Inst.Ref {
+    const mod = sema.mod;
     const inst_ty = sema.typeOf(inst);
-    const field_count = inst_ty.structFieldCount();
+    const field_count = inst_ty.structFieldCount(mod);
     if (field_count != 1) {
         const msg = msg: {
             const msg = if (field_count > 1) try sema.errMsg(
@@ -28927,7 +28974,7 @@ fn coerceTupleToSlicePtrs(
     const tuple_ty = sema.typeOf(ptr_tuple).childType(mod);
     const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src);
     const slice_info = slice_ty.ptrInfo(mod);
-    const array_ty = try Type.array(sema.arena, tuple_ty.structFieldCount(), slice_info.sentinel, slice_info.pointee_type, sema.mod);
+    const array_ty = try Type.array(sema.arena, tuple_ty.structFieldCount(mod), slice_info.sentinel, slice_info.pointee_type, sema.mod);
     const array_inst = try sema.coerceTupleToArray(block, array_ty, slice_ty_src, tuple, tuple_src);
     if (slice_info.@"align" != 0) {
         return sema.fail(block, slice_ty_src, "TODO: override the alignment of the array decl we create here", .{});
@@ -28966,20 +29013,21 @@ fn coerceTupleToStruct(
     inst: Air.Inst.Ref,
     inst_src: LazySrcLoc,
 ) !Air.Inst.Ref {
+    const mod = sema.mod;
     const struct_ty = try sema.resolveTypeFields(dest_ty);
 
-    if (struct_ty.isTupleOrAnonStruct()) {
+    if (struct_ty.isTupleOrAnonStruct(mod)) {
         return sema.coerceTupleToTuple(block, struct_ty, inst, inst_src);
     }
 
-    const fields = struct_ty.structFields();
+    const fields = struct_ty.structFields(mod);
     const field_vals = try sema.arena.alloc(Value, fields.count());
     const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len);
     @memset(field_refs, .none);
 
     const inst_ty = sema.typeOf(inst);
     var runtime_src: ?LazySrcLoc = null;
-    const field_count = inst_ty.structFieldCount();
+    const field_count = inst_ty.structFieldCount(mod);
     var field_i: u32 = 0;
     while (field_i < field_count) : (field_i += 1) {
         const field_src = inst_src; // TODO better source location
@@ -29061,13 +29109,14 @@ fn coerceTupleToTuple(
     inst: Air.Inst.Ref,
     inst_src: LazySrcLoc,
 ) !Air.Inst.Ref {
-    const dest_field_count = tuple_ty.structFieldCount();
+    const mod = sema.mod;
+    const dest_field_count = tuple_ty.structFieldCount(mod);
     const field_vals = try sema.arena.alloc(Value, dest_field_count);
     const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len);
     @memset(field_refs, .none);
 
     const inst_ty = sema.typeOf(inst);
-    const inst_field_count = inst_ty.structFieldCount();
+    const inst_field_count = inst_ty.structFieldCount(mod);
     if (inst_field_count > dest_field_count) return error.NotCoercible;
 
     var runtime_src: ?LazySrcLoc = null;
@@ -29085,8 +29134,8 @@ fn coerceTupleToTuple(
 
         const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_src);
 
-        const field_ty = tuple_ty.structFieldType(field_i);
-        const default_val = tuple_ty.structFieldDefaultValue(field_i);
+        const field_ty = tuple_ty.structFieldType(field_i, mod);
+        const default_val = tuple_ty.structFieldDefaultValue(field_i, mod);
         const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i);
         const coerced = try sema.coerce(block, field_ty, elem_ref, field_src);
         field_refs[field_index] = coerced;
@@ -29115,12 +29164,12 @@ fn coerceTupleToTuple(
     for (field_refs, 0..) |*field_ref, i| {
         if (field_ref.* != .none) continue;
 
-        const default_val = tuple_ty.structFieldDefaultValue(i);
-        const field_ty = tuple_ty.structFieldType(i);
+        const default_val = tuple_ty.structFieldDefaultValue(i, mod);
+        const field_ty = tuple_ty.structFieldType(i, mod);
 
         const field_src = inst_src; // TODO better source location
         if (default_val.ip_index == .unreachable_value) {
-            if (tuple_ty.isTuple()) {
+            if (tuple_ty.isTuple(mod)) {
                 const template = "missing tuple field: {d}";
                 if (root_msg) |msg| {
                     try sema.errNote(block, field_src, msg, template, .{i});
@@ -29130,7 +29179,7 @@ fn coerceTupleToTuple(
                 continue;
             }
             const template = "missing struct field: {s}";
-            const args = .{tuple_ty.structFieldName(i)};
+            const args = .{tuple_ty.structFieldName(i, mod)};
             if (root_msg) |msg| {
                 try sema.errNote(block, field_src, msg, template, args);
             } else {
@@ -31222,17 +31271,17 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void {
 }
 
 fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
+    const mod = sema.mod;
     const resolved_ty = try sema.resolveTypeFields(ty);
-    if (resolved_ty.castTag(.@"struct")) |payload| {
-        const struct_obj = payload.data;
+    if (mod.typeToStruct(resolved_ty)) |struct_obj| {
         switch (struct_obj.status) {
             .none, .have_field_types => {},
             .field_types_wip, .layout_wip => {
                 const msg = try Module.ErrorMsg.create(
                     sema.gpa,
-                    struct_obj.srcLoc(sema.mod),
+                    struct_obj.srcLoc(mod),
                     "struct '{}' depends on itself",
-                    .{ty.fmt(sema.mod)},
+                    .{ty.fmt(mod)},
                 );
                 return sema.failWithOwnedErrorMsg(msg);
             },
@@ -31256,7 +31305,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
         }
 
         if (struct_obj.layout == .Packed) {
-            try semaBackingIntType(sema.mod, struct_obj);
+            try semaBackingIntType(mod, struct_obj);
         }
 
         struct_obj.status = .have_layout;
@@ -31265,20 +31314,20 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
         if (struct_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(resolved_ty))) {
             const msg = try Module.ErrorMsg.create(
                 sema.gpa,
-                struct_obj.srcLoc(sema.mod),
+                struct_obj.srcLoc(mod),
                 "struct layout depends on it having runtime bits",
                 .{},
             );
             return sema.failWithOwnedErrorMsg(msg);
         }
 
-        if (struct_obj.layout == .Auto and sema.mod.backendSupportsFeature(.field_reordering)) {
+        if (struct_obj.layout == .Auto and mod.backendSupportsFeature(.field_reordering)) {
             const optimized_order = if (struct_obj.owner_decl == sema.owner_decl_index)
                 try sema.perm_arena.alloc(u32, struct_obj.fields.count())
             else blk: {
-                const decl = sema.mod.declPtr(struct_obj.owner_decl);
+                const decl = mod.declPtr(struct_obj.owner_decl);
                 var decl_arena: std.heap.ArenaAllocator = undefined;
-                const decl_arena_allocator = decl.value_arena.?.acquire(sema.mod.gpa, &decl_arena);
+                const decl_arena_allocator = decl.value_arena.?.acquire(mod.gpa, &decl_arena);
                 defer decl.value_arena.?.release(&decl_arena);
                 break :blk try decl_arena_allocator.alloc(u32, struct_obj.fields.count());
             };
@@ -31528,7 +31577,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
     return switch (ty.ip_index) {
         .empty_struct_type => false,
         .none => switch (ty.tag()) {
-            .empty_struct,
             .error_set,
             .error_set_single,
             .error_set_inferred,
@@ -31569,27 +31617,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
                 return false;
             },
 
-            .@"struct" => {
-                const struct_obj = ty.castTag(.@"struct").?.data;
-                switch (struct_obj.requires_comptime) {
-                    .no, .wip => return false,
-                    .yes => return true,
-                    .unknown => {
-                        var requires_comptime = false;
-                        struct_obj.requires_comptime = .wip;
-                        for (struct_obj.fields.values()) |field| {
-                            if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true;
-                        }
-                        if (requires_comptime) {
-                            struct_obj.requires_comptime = .yes;
-                        } else {
-                            struct_obj.requires_comptime = .no;
-                        }
-                        return requires_comptime;
-                    },
-                }
-            },
-
             .@"union", .union_safety_tagged, .union_tagged => {
                 const union_obj = ty.cast(Type.Payload.Union).?.data;
                 switch (union_obj.requires_comptime) {
@@ -31686,7 +31713,27 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
                 .type_info,
                 => true,
             },
-            .struct_type => @panic("TODO"),
+            .struct_type => |struct_type| {
+                const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
+                switch (struct_obj.requires_comptime) {
+                    .no, .wip => return false,
+                    .yes => return true,
+                    .unknown => {
+                        var requires_comptime = false;
+                        struct_obj.requires_comptime = .wip;
+                        for (struct_obj.fields.values()) |field| {
+                            if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true;
+                        }
+                        if (requires_comptime) {
+                            struct_obj.requires_comptime = .yes;
+                        } else {
+                            struct_obj.requires_comptime = .no;
+                        }
+                        return requires_comptime;
+                    },
+                }
+            },
+
             .union_type => @panic("TODO"),
             .opaque_type => false,
 
@@ -31697,6 +31744,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
             .ptr => unreachable,
             .opt => unreachable,
             .enum_tag => unreachable,
+            .aggregate => unreachable,
         },
     };
 }
@@ -31710,16 +31758,21 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void {
             const child_ty = try sema.resolveTypeFields(ty.childType(mod));
             return sema.resolveTypeFully(child_ty);
         },
-        .Struct => switch (ty.tag()) {
-            .@"struct" => return sema.resolveStructFully(ty),
-            .tuple, .anon_struct => {
-                const tuple = ty.tupleFields();
+        .Struct => switch (ty.ip_index) {
+            .none => switch (ty.tag()) {
+                .tuple, .anon_struct => {
+                    const tuple = ty.tupleFields();
 
-                for (tuple.types) |field_ty| {
-                    try sema.resolveTypeFully(field_ty);
-                }
+                    for (tuple.types) |field_ty| {
+                        try sema.resolveTypeFully(field_ty);
+                    }
+                },
+                else => {},
+            },
+            else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+                .struct_type => return sema.resolveStructFully(ty),
+                else => {},
             },
-            else => {},
         },
         .Union => return sema.resolveUnionFully(ty),
         .Array => return sema.resolveTypeFully(ty.childType(mod)),
@@ -31746,9 +31799,9 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void {
 fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void {
     try sema.resolveStructLayout(ty);
 
+    const mod = sema.mod;
     const resolved_ty = try sema.resolveTypeFields(ty);
-    const payload = resolved_ty.castTag(.@"struct").?;
-    const struct_obj = payload.data;
+    const struct_obj = mod.typeToStruct(resolved_ty).?;
 
     switch (struct_obj.status) {
         .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {},
@@ -31806,11 +31859,6 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type {
 
     switch (ty.ip_index) {
         .none => switch (ty.tag()) {
-            .@"struct" => {
-                const struct_obj = ty.castTag(.@"struct").?.data;
-                try sema.resolveTypeFieldsStruct(ty, struct_obj);
-                return ty;
-            },
             .@"union", .union_safety_tagged, .union_tagged => {
                 const union_obj = ty.cast(Type.Payload.Union).?.data;
                 try sema.resolveTypeFieldsUnion(ty, union_obj);
@@ -31904,7 +31952,11 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type {
         .prefetch_options_type => return sema.getBuiltinType("PrefetchOptions"),
 
         _ => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
-            .struct_type => @panic("TODO"),
+            .struct_type => |struct_type| {
+                const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return ty;
+                try sema.resolveTypeFieldsStruct(ty, struct_obj);
+                return ty;
+            },
             .union_type => @panic("TODO"),
             else => return ty,
         },
@@ -33010,28 +33062,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
                 }
             },
 
-            .@"struct" => {
-                const resolved_ty = try sema.resolveTypeFields(ty);
-                const s = resolved_ty.castTag(.@"struct").?.data;
-                for (s.fields.values(), 0..) |field, i| {
-                    if (field.is_comptime) continue;
-                    if (field.ty.eql(resolved_ty, sema.mod)) {
-                        const msg = try Module.ErrorMsg.create(
-                            sema.gpa,
-                            s.srcLoc(sema.mod),
-                            "struct '{}' depends on itself",
-                            .{ty.fmt(sema.mod)},
-                        );
-                        try sema.addFieldErrNote(resolved_ty, i, msg, "while checking this field", .{});
-                        return sema.failWithOwnedErrorMsg(msg);
-                    }
-                    if ((try sema.typeHasOnePossibleValue(field.ty)) == null) {
-                        return null;
-                    }
-                }
-                return Value.empty_struct;
-            },
-
             .tuple, .anon_struct => {
                 const tuple = ty.tupleFields();
                 for (tuple.values, 0..) |val, i| {
@@ -33120,8 +33150,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
                 });
             },
 
-            .empty_struct => return Value.empty_struct,
-
             .array => {
                 if (ty.arrayLen(mod) == 0)
                     return Value.initTag(.empty_array);
@@ -33212,7 +33240,34 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
                 .generic_poison => return error.GenericPoison,
                 .var_args_param => unreachable,
             },
-            .struct_type => @panic("TODO"),
+            .struct_type => |struct_type| {
+                const resolved_ty = try sema.resolveTypeFields(ty);
+                if (mod.structPtrUnwrap(struct_type.index)) |s| {
+                    for (s.fields.values(), 0..) |field, i| {
+                        if (field.is_comptime) continue;
+                        if (field.ty.eql(resolved_ty, sema.mod)) {
+                            const msg = try Module.ErrorMsg.create(
+                                sema.gpa,
+                                s.srcLoc(sema.mod),
+                                "struct '{}' depends on itself",
+                                .{ty.fmt(sema.mod)},
+                            );
+                            try sema.addFieldErrNote(resolved_ty, i, msg, "while checking this field", .{});
+                            return sema.failWithOwnedErrorMsg(msg);
+                        }
+                        if ((try sema.typeHasOnePossibleValue(field.ty)) == null) {
+                            return null;
+                        }
+                    }
+                }
+                // In this case the struct has no fields and therefore has one possible value.
+                const empty = try mod.intern(.{ .aggregate = .{
+                    .ty = ty.ip_index,
+                    .fields = &.{},
+                } });
+                return empty.toValue();
+            },
+
             .union_type => @panic("TODO"),
             .opaque_type => null,
 
@@ -33223,6 +33278,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
             .ptr => unreachable,
             .opt => unreachable,
             .enum_tag => unreachable,
+            .aggregate => unreachable,
         },
     }
 }
@@ -33614,7 +33670,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
         .empty_struct_type => false,
 
         .none => switch (ty.tag()) {
-            .empty_struct,
             .error_set,
             .error_set_single,
             .error_set_inferred,
@@ -33655,31 +33710,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
                 return false;
             },
 
-            .@"struct" => {
-                const struct_obj = ty.castTag(.@"struct").?.data;
-                switch (struct_obj.requires_comptime) {
-                    .no, .wip => return false,
-                    .yes => return true,
-                    .unknown => {
-                        if (struct_obj.status == .field_types_wip)
-                            return false;
-
-                        try sema.resolveTypeFieldsStruct(ty, struct_obj);
-
-                        struct_obj.requires_comptime = .wip;
-                        for (struct_obj.fields.values()) |field| {
-                            if (field.is_comptime) continue;
-                            if (try sema.typeRequiresComptime(field.ty)) {
-                                struct_obj.requires_comptime = .yes;
-                                return true;
-                            }
-                        }
-                        struct_obj.requires_comptime = .no;
-                        return false;
-                    },
-                }
-            },
-
             .@"union", .union_safety_tagged, .union_tagged => {
                 const union_obj = ty.cast(Type.Payload.Union).?.data;
                 switch (union_obj.requires_comptime) {
@@ -33782,7 +33812,31 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
 
                 .var_args_param => unreachable,
             },
-            .struct_type => @panic("TODO"),
+            .struct_type => |struct_type| {
+                const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
+                switch (struct_obj.requires_comptime) {
+                    .no, .wip => return false,
+                    .yes => return true,
+                    .unknown => {
+                        if (struct_obj.status == .field_types_wip)
+                            return false;
+
+                        try sema.resolveTypeFieldsStruct(ty, struct_obj);
+
+                        struct_obj.requires_comptime = .wip;
+                        for (struct_obj.fields.values()) |field| {
+                            if (field.is_comptime) continue;
+                            if (try sema.typeRequiresComptime(field.ty)) {
+                                struct_obj.requires_comptime = .yes;
+                                return true;
+                            }
+                        }
+                        struct_obj.requires_comptime = .no;
+                        return false;
+                    },
+                }
+            },
+
             .union_type => @panic("TODO"),
             .opaque_type => false,
 
@@ -33793,6 +33847,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
             .ptr => unreachable,
             .opt => unreachable,
             .enum_tag => unreachable,
+            .aggregate => unreachable,
         },
     };
 }
@@ -33864,11 +33919,12 @@ fn structFieldIndex(
     field_name: []const u8,
     field_src: LazySrcLoc,
 ) !u32 {
+    const mod = sema.mod;
     const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty);
     if (struct_ty.isAnonStruct()) {
         return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src);
     } else {
-        const struct_obj = struct_ty.castTag(.@"struct").?.data;
+        const struct_obj = mod.typeToStruct(struct_ty).?;
         const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
             return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name);
         return @intCast(u32, field_index_usize);
src/type.zig
@@ -59,8 +59,6 @@ pub const Type = struct {
 
                 .anyframe_T => return .AnyFrame,
 
-                .empty_struct,
-                .@"struct",
                 .tuple,
                 .anon_struct,
                 => return .Struct,
@@ -148,6 +146,7 @@ pub const Type = struct {
                 .opt => unreachable,
                 .enum_tag => unreachable,
                 .simple_value => unreachable,
+                .aggregate => unreachable,
             },
         }
     }
@@ -501,16 +500,6 @@ pub const Type = struct {
                 return a.elemType2(mod).eql(b.elemType2(mod), mod);
             },
 
-            .empty_struct => {
-                const a_namespace = a.castTag(.empty_struct).?.data;
-                const b_namespace = (b.castTag(.empty_struct) orelse return false).data;
-                return a_namespace == b_namespace;
-            },
-            .@"struct" => {
-                const a_struct_obj = a.castTag(.@"struct").?.data;
-                const b_struct_obj = (b.castTag(.@"struct") orelse return false).data;
-                return a_struct_obj == b_struct_obj;
-            },
             .tuple => {
                 if (!b.isSimpleTuple()) return false;
 
@@ -720,15 +709,6 @@ pub const Type = struct {
                 hashWithHasher(ty.childType(mod), hasher, mod);
             },
 
-            .empty_struct => {
-                std.hash.autoHash(hasher, std.builtin.TypeId.Struct);
-                const namespace: *const Module.Namespace = ty.castTag(.empty_struct).?.data;
-                std.hash.autoHash(hasher, namespace);
-            },
-            .@"struct" => {
-                const struct_obj: *const Module.Struct = ty.castTag(.@"struct").?.data;
-                std.hash.autoHash(hasher, struct_obj);
-            },
             .tuple => {
                 std.hash.autoHash(hasher, std.builtin.TypeId.Struct);
 
@@ -955,8 +935,6 @@ pub const Type = struct {
             .error_set => return self.copyPayloadShallow(allocator, Payload.ErrorSet),
             .error_set_inferred => return self.copyPayloadShallow(allocator, Payload.ErrorSetInferred),
             .error_set_single => return self.copyPayloadShallow(allocator, Payload.Name),
-            .empty_struct => return self.copyPayloadShallow(allocator, Payload.ContainerScope),
-            .@"struct" => return self.copyPayloadShallow(allocator, Payload.Struct),
             .@"union", .union_safety_tagged, .union_tagged => return self.copyPayloadShallow(allocator, Payload.Union),
             .enum_simple => return self.copyPayloadShallow(allocator, Payload.EnumSimple),
             .enum_numbered => return self.copyPayloadShallow(allocator, Payload.EnumNumbered),
@@ -1033,14 +1011,6 @@ pub const Type = struct {
         while (true) {
             const t = ty.tag();
             switch (t) {
-                .empty_struct => return writer.writeAll("struct {}"),
-
-                .@"struct" => {
-                    const struct_obj = ty.castTag(.@"struct").?.data;
-                    return writer.print("({s} decl={d})", .{
-                        @tagName(t), struct_obj.owner_decl,
-                    });
-                },
                 .@"union", .union_safety_tagged, .union_tagged => {
                     const union_obj = ty.cast(Payload.Union).?.data;
                     return writer.print("({s} decl={d})", .{
@@ -1247,22 +1217,10 @@ pub const Type = struct {
     /// Prints a name suitable for `@typeName`.
     pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void {
         switch (ty.ip_index) {
-            .empty_struct_type => try writer.writeAll("@TypeOf(.{})"),
-
             .none => switch (ty.tag()) {
                 .inferred_alloc_const => unreachable,
                 .inferred_alloc_mut => unreachable,
 
-                .empty_struct => {
-                    const namespace = ty.castTag(.empty_struct).?.data;
-                    try namespace.renderFullyQualifiedName(mod, "", writer);
-                },
-
-                .@"struct" => {
-                    const struct_obj = ty.castTag(.@"struct").?.data;
-                    const decl = mod.declPtr(struct_obj.owner_decl);
-                    try decl.renderFullyQualifiedName(mod, writer);
-                },
                 .@"union", .union_safety_tagged, .union_tagged => {
                     const union_obj = ty.cast(Payload.Union).?.data;
                     const decl = mod.declPtr(union_obj.owner_decl);
@@ -1548,7 +1506,18 @@ pub const Type = struct {
                     return;
                 },
                 .simple_type => |s| return writer.writeAll(@tagName(s)),
-                .struct_type => @panic("TODO"),
+                .struct_type => |struct_type| {
+                    if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| {
+                        const decl = mod.declPtr(struct_obj.owner_decl);
+                        try decl.renderFullyQualifiedName(mod, writer);
+                    } else if (struct_type.namespace.unwrap()) |namespace_index| {
+                        const namespace = mod.namespacePtr(namespace_index);
+                        try namespace.renderFullyQualifiedName(mod, "", writer);
+                    } else {
+                        try writer.writeAll("@TypeOf(.{})");
+                    }
+                },
+
                 .union_type => @panic("TODO"),
                 .opaque_type => |opaque_type| {
                     const decl = mod.declPtr(opaque_type.decl);
@@ -1562,6 +1531,7 @@ pub const Type = struct {
                 .ptr => unreachable,
                 .opt => unreachable,
                 .enum_tag => unreachable,
+                .aggregate => unreachable,
             },
         }
     }
@@ -1624,12 +1594,10 @@ pub const Type = struct {
                 },
 
                 // These are false because they are comptime-only types.
-                .empty_struct,
                 // These are function *bodies*, not pointers.
                 // Special exceptions have to be made when emitting functions due to
                 // this returning false.
-                .function,
-                => return false,
+                .function => return false,
 
                 .optional => {
                     const child_ty = ty.optionalChild(mod);
@@ -1646,28 +1614,6 @@ pub const Type = struct {
                     }
                 },
 
-                .@"struct" => {
-                    const struct_obj = ty.castTag(.@"struct").?.data;
-                    if (struct_obj.status == .field_types_wip) {
-                        // In this case, we guess that hasRuntimeBits() for this type is true,
-                        // and then later if our guess was incorrect, we emit a compile error.
-                        struct_obj.assumed_runtime_bits = true;
-                        return true;
-                    }
-                    switch (strat) {
-                        .sema => |sema| _ = try sema.resolveTypeFields(ty),
-                        .eager => assert(struct_obj.haveFieldTypes()),
-                        .lazy => if (!struct_obj.haveFieldTypes()) return error.NeedLazy,
-                    }
-                    for (struct_obj.fields.values()) |field| {
-                        if (field.is_comptime) continue;
-                        if (try field.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
-                            return true;
-                    } else {
-                        return false;
-                    }
-                },
-
                 .enum_full => {
                     const enum_full = ty.castTag(.enum_full).?.data;
                     return enum_full.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat);
@@ -1824,7 +1770,31 @@ pub const Type = struct {
                     .generic_poison => unreachable,
                     .var_args_param => unreachable,
                 },
-                .struct_type => @panic("TODO"),
+                .struct_type => |struct_type| {
+                    const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse {
+                        // This struct has no fields.
+                        return false;
+                    };
+                    if (struct_obj.status == .field_types_wip) {
+                        // In this case, we guess that hasRuntimeBits() for this type is true,
+                        // and then later if our guess was incorrect, we emit a compile error.
+                        struct_obj.assumed_runtime_bits = true;
+                        return true;
+                    }
+                    switch (strat) {
+                        .sema => |sema| _ = try sema.resolveTypeFields(ty),
+                        .eager => assert(struct_obj.haveFieldTypes()),
+                        .lazy => if (!struct_obj.haveFieldTypes()) return error.NeedLazy,
+                    }
+                    for (struct_obj.fields.values()) |field| {
+                        if (field.is_comptime) continue;
+                        if (try field.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
+                            return true;
+                    } else {
+                        return false;
+                    }
+                },
+
                 .union_type => @panic("TODO"),
                 .opaque_type => true,
 
@@ -1835,6 +1805,7 @@ pub const Type = struct {
                 .ptr => unreachable,
                 .opt => unreachable,
                 .enum_tag => unreachable,
+                .aggregate => unreachable,
             },
         }
     }
@@ -1862,7 +1833,6 @@ pub const Type = struct {
                 .anyframe_T,
                 .tuple,
                 .anon_struct,
-                .empty_struct,
                 => false,
 
                 .enum_full,
@@ -1877,7 +1847,6 @@ pub const Type = struct {
                 => ty.childType(mod).hasWellDefinedLayout(mod),
 
                 .optional => ty.isPtrLikeOptional(mod),
-                .@"struct" => ty.castTag(.@"struct").?.data.layout != .Auto,
                 .@"union", .union_safety_tagged => ty.cast(Payload.Union).?.data.layout != .Auto,
                 .union_tagged => false,
             },
@@ -1936,7 +1905,13 @@ pub const Type = struct {
 
                     .var_args_param => unreachable,
                 },
-                .struct_type => @panic("TODO"),
+                .struct_type => |struct_type| {
+                    const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse {
+                        // Struct with no fields has a well-defined layout of no bits.
+                        return true;
+                    };
+                    return struct_obj.layout != .Auto;
+                },
                 .union_type => @panic("TODO"),
                 .opaque_type => false,
 
@@ -1947,6 +1922,7 @@ pub const Type = struct {
                 .ptr => unreachable,
                 .opt => unreachable,
                 .enum_tag => unreachable,
+                .aggregate => unreachable,
             },
         };
     }
@@ -2146,68 +2122,6 @@ pub const Type = struct {
                 .optional => return abiAlignmentAdvancedOptional(ty, mod, strat),
                 .error_union => return abiAlignmentAdvancedErrorUnion(ty, mod, strat),
 
-                .@"struct" => {
-                    const struct_obj = ty.castTag(.@"struct").?.data;
-                    if (opt_sema) |sema| {
-                        if (struct_obj.status == .field_types_wip) {
-                            // We'll guess "pointer-aligned", if the struct has an
-                            // underaligned pointer field then some allocations
-                            // might require explicit alignment.
-                            return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) };
-                        }
-                        _ = try sema.resolveTypeFields(ty);
-                    }
-                    if (!struct_obj.haveFieldTypes()) switch (strat) {
-                        .eager => unreachable, // struct layout not resolved
-                        .sema => unreachable, // handled above
-                        .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) },
-                    };
-                    if (struct_obj.layout == .Packed) {
-                        switch (strat) {
-                            .sema => |sema| try sema.resolveTypeLayout(ty),
-                            .lazy => |arena| {
-                                if (!struct_obj.haveLayout()) {
-                                    return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) };
-                                }
-                            },
-                            .eager => {},
-                        }
-                        assert(struct_obj.haveLayout());
-                        return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(mod) };
-                    }
-
-                    const fields = ty.structFields();
-                    var big_align: u32 = 0;
-                    for (fields.values()) |field| {
-                        if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
-                            error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) },
-                            else => |e| return e,
-                        })) continue;
-
-                        const field_align = if (field.abi_align != 0)
-                            field.abi_align
-                        else switch (try field.ty.abiAlignmentAdvanced(mod, strat)) {
-                            .scalar => |a| a,
-                            .val => switch (strat) {
-                                .eager => unreachable, // struct layout not resolved
-                                .sema => unreachable, // handled above
-                                .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) },
-                            },
-                        };
-                        big_align = @max(big_align, field_align);
-
-                        // This logic is duplicated in Module.Struct.Field.alignment.
-                        if (struct_obj.layout == .Extern or target.ofmt == .c) {
-                            if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) {
-                                // The C ABI requires 128 bit integer fields of structs
-                                // to be 16-bytes aligned.
-                                big_align = @max(big_align, 16);
-                            }
-                        }
-                    }
-                    return AbiAlignmentAdvanced{ .scalar = big_align };
-                },
-
                 .tuple, .anon_struct => {
                     const tuple = ty.tupleFields();
                     var big_align: u32 = 0;
@@ -2241,8 +2155,6 @@ pub const Type = struct {
                     return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, true);
                 },
 
-                .empty_struct => return AbiAlignmentAdvanced{ .scalar = 0 },
-
                 .inferred_alloc_const,
                 .inferred_alloc_mut,
                 => unreachable,
@@ -2337,7 +2249,69 @@ pub const Type = struct {
                     .generic_poison => unreachable,
                     .var_args_param => unreachable,
                 },
-                .struct_type => @panic("TODO"),
+                .struct_type => |struct_type| {
+                    const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse
+                        return AbiAlignmentAdvanced{ .scalar = 0 };
+
+                    if (opt_sema) |sema| {
+                        if (struct_obj.status == .field_types_wip) {
+                            // We'll guess "pointer-aligned", if the struct has an
+                            // underaligned pointer field then some allocations
+                            // might require explicit alignment.
+                            return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) };
+                        }
+                        _ = try sema.resolveTypeFields(ty);
+                    }
+                    if (!struct_obj.haveFieldTypes()) switch (strat) {
+                        .eager => unreachable, // struct layout not resolved
+                        .sema => unreachable, // handled above
+                        .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) },
+                    };
+                    if (struct_obj.layout == .Packed) {
+                        switch (strat) {
+                            .sema => |sema| try sema.resolveTypeLayout(ty),
+                            .lazy => |arena| {
+                                if (!struct_obj.haveLayout()) {
+                                    return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) };
+                                }
+                            },
+                            .eager => {},
+                        }
+                        assert(struct_obj.haveLayout());
+                        return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(mod) };
+                    }
+
+                    const fields = ty.structFields(mod);
+                    var big_align: u32 = 0;
+                    for (fields.values()) |field| {
+                        if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
+                            error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) },
+                            else => |e| return e,
+                        })) continue;
+
+                        const field_align = if (field.abi_align != 0)
+                            field.abi_align
+                        else switch (try field.ty.abiAlignmentAdvanced(mod, strat)) {
+                            .scalar => |a| a,
+                            .val => switch (strat) {
+                                .eager => unreachable, // struct layout not resolved
+                                .sema => unreachable, // handled above
+                                .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) },
+                            },
+                        };
+                        big_align = @max(big_align, field_align);
+
+                        // This logic is duplicated in Module.Struct.Field.alignment.
+                        if (struct_obj.layout == .Extern or target.ofmt == .c) {
+                            if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) {
+                                // The C ABI requires 128 bit integer fields of structs
+                                // to be 16-bytes aligned.
+                                big_align = @max(big_align, 16);
+                            }
+                        }
+                    }
+                    return AbiAlignmentAdvanced{ .scalar = big_align };
+                },
                 .union_type => @panic("TODO"),
                 .opaque_type => return AbiAlignmentAdvanced{ .scalar = 1 },
 
@@ -2348,6 +2322,7 @@ pub const Type = struct {
                 .ptr => unreachable,
                 .opt => unreachable,
                 .enum_tag => unreachable,
+                .aggregate => unreachable,
             },
         }
     }
@@ -2517,42 +2492,16 @@ pub const Type = struct {
                 .inferred_alloc_const => unreachable,
                 .inferred_alloc_mut => unreachable,
 
-                .empty_struct => return AbiSizeAdvanced{ .scalar = 0 },
-
-                .@"struct", .tuple, .anon_struct => switch (ty.containerLayout()) {
-                    .Packed => {
-                        const struct_obj = ty.castTag(.@"struct").?.data;
-                        switch (strat) {
-                            .sema => |sema| try sema.resolveTypeLayout(ty),
-                            .lazy => |arena| {
-                                if (!struct_obj.haveLayout()) {
-                                    return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) };
-                                }
-                            },
-                            .eager => {},
-                        }
-                        assert(struct_obj.haveLayout());
-                        return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(mod) };
-                    },
-                    else => {
-                        switch (strat) {
-                            .sema => |sema| try sema.resolveTypeLayout(ty),
-                            .lazy => |arena| {
-                                if (ty.castTag(.@"struct")) |payload| {
-                                    const struct_obj = payload.data;
-                                    if (!struct_obj.haveLayout()) {
-                                        return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) };
-                                    }
-                                }
-                            },
-                            .eager => {},
-                        }
-                        const field_count = ty.structFieldCount();
-                        if (field_count == 0) {
-                            return AbiSizeAdvanced{ .scalar = 0 };
-                        }
-                        return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) };
-                    },
+                .tuple, .anon_struct => {
+                    switch (strat) {
+                        .sema => |sema| try sema.resolveTypeLayout(ty),
+                        .lazy, .eager => {},
+                    }
+                    const field_count = ty.structFieldCount(mod);
+                    if (field_count == 0) {
+                        return AbiSizeAdvanced{ .scalar = 0 };
+                    }
+                    return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) };
                 },
 
                 .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => {
@@ -2752,7 +2701,42 @@ pub const Type = struct {
                     .generic_poison => unreachable,
                     .var_args_param => unreachable,
                 },
-                .struct_type => @panic("TODO"),
+                .struct_type => |struct_type| switch (ty.containerLayout(mod)) {
+                    .Packed => {
+                        const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse
+                            return AbiSizeAdvanced{ .scalar = 0 };
+
+                        switch (strat) {
+                            .sema => |sema| try sema.resolveTypeLayout(ty),
+                            .lazy => |arena| {
+                                if (!struct_obj.haveLayout()) {
+                                    return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) };
+                                }
+                            },
+                            .eager => {},
+                        }
+                        assert(struct_obj.haveLayout());
+                        return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(mod) };
+                    },
+                    else => {
+                        switch (strat) {
+                            .sema => |sema| try sema.resolveTypeLayout(ty),
+                            .lazy => |arena| {
+                                const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse
+                                    return AbiSizeAdvanced{ .scalar = 0 };
+                                if (!struct_obj.haveLayout()) {
+                                    return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) };
+                                }
+                            },
+                            .eager => {},
+                        }
+                        const field_count = ty.structFieldCount(mod);
+                        if (field_count == 0) {
+                            return AbiSizeAdvanced{ .scalar = 0 };
+                        }
+                        return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) };
+                    },
+                },
                 .union_type => @panic("TODO"),
                 .opaque_type => unreachable, // no size available
 
@@ -2763,6 +2747,7 @@ pub const Type = struct {
                 .ptr => unreachable,
                 .opt => unreachable,
                 .enum_tag => unreachable,
+                .aggregate => unreachable,
             },
         }
     }
@@ -2850,189 +2835,189 @@ pub const Type = struct {
     ) Module.CompileError!u64 {
         const target = mod.getTarget();
 
-        if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
-            .int_type => |int_type| return int_type.bits,
-            .ptr_type => |ptr_type| switch (ptr_type.size) {
-                .Slice => return target.ptrBitWidth() * 2,
-                else => return target.ptrBitWidth() * 2,
-            },
-            .array_type => |array_type| {
-                const len = array_type.len + @boolToInt(array_type.sentinel != .none);
-                if (len == 0) return 0;
-                const elem_ty = array_type.child.toType();
-                const elem_size = std.math.max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod));
-                if (elem_size == 0) return 0;
-                const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema);
-                return (len - 1) * 8 * elem_size + elem_bit_size;
-            },
-            .vector_type => |vector_type| {
-                const child_ty = vector_type.child.toType();
-                const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema);
-                return elem_bit_size * vector_type.len;
-            },
-            .opt_type => @panic("TODO"),
-            .error_union_type => @panic("TODO"),
-            .simple_type => |t| switch (t) {
-                .f16 => return 16,
-                .f32 => return 32,
-                .f64 => return 64,
-                .f80 => return 80,
-                .f128 => return 128,
-
-                .usize,
-                .isize,
-                .@"anyframe",
-                => return target.ptrBitWidth(),
-
-                .c_char => return target.c_type_bit_size(.char),
-                .c_short => return target.c_type_bit_size(.short),
-                .c_ushort => return target.c_type_bit_size(.ushort),
-                .c_int => return target.c_type_bit_size(.int),
-                .c_uint => return target.c_type_bit_size(.uint),
-                .c_long => return target.c_type_bit_size(.long),
-                .c_ulong => return target.c_type_bit_size(.ulong),
-                .c_longlong => return target.c_type_bit_size(.longlong),
-                .c_ulonglong => return target.c_type_bit_size(.ulonglong),
-                .c_longdouble => return target.c_type_bit_size(.longdouble),
-
-                .bool => return 1,
-                .void => return 0,
-
-                // TODO revisit this when we have the concept of the error tag type
-                .anyerror => return 16,
-
-                .anyopaque => unreachable,
-                .type => unreachable,
-                .comptime_int => unreachable,
-                .comptime_float => unreachable,
-                .noreturn => unreachable,
-                .null => unreachable,
-                .undefined => unreachable,
-                .enum_literal => unreachable,
-                .generic_poison => unreachable,
-                .var_args_param => unreachable,
-
-                .atomic_order => unreachable, // missing call to resolveTypeFields
-                .atomic_rmw_op => unreachable, // missing call to resolveTypeFields
-                .calling_convention => unreachable, // missing call to resolveTypeFields
-                .address_space => unreachable, // missing call to resolveTypeFields
-                .float_mode => unreachable, // missing call to resolveTypeFields
-                .reduce_op => unreachable, // missing call to resolveTypeFields
-                .call_modifier => unreachable, // missing call to resolveTypeFields
-                .prefetch_options => unreachable, // missing call to resolveTypeFields
-                .export_options => unreachable, // missing call to resolveTypeFields
-                .extern_options => unreachable, // missing call to resolveTypeFields
-                .type_info => unreachable, // missing call to resolveTypeFields
-            },
-            .struct_type => @panic("TODO"),
-            .union_type => @panic("TODO"),
-            .opaque_type => unreachable,
-
-            // values, not types
-            .simple_value => unreachable,
-            .extern_func => unreachable,
-            .int => unreachable,
-            .ptr => unreachable,
-            .opt => unreachable,
-            .enum_tag => unreachable,
-        };
-
         const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager;
 
-        switch (ty.tag()) {
-            .function => unreachable, // represents machine code; not a pointer
-            .empty_struct => unreachable,
-            .inferred_alloc_const => unreachable,
-            .inferred_alloc_mut => unreachable,
-
-            .@"struct" => {
-                const struct_obj = ty.castTag(.@"struct").?.data;
-                if (struct_obj.layout != .Packed) {
-                    return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
-                }
-                if (opt_sema) |sema| _ = try sema.resolveTypeLayout(ty);
-                assert(struct_obj.haveLayout());
-                return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema);
-            },
-
-            .tuple, .anon_struct => {
-                if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty);
-                if (ty.containerLayout() != .Packed) {
-                    return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
-                }
-                var total: u64 = 0;
-                for (ty.tupleFields().types) |field_ty| {
-                    total += try bitSizeAdvanced(field_ty, mod, opt_sema);
-                }
-                return total;
-            },
+        switch (ty.ip_index) {
+            .none => switch (ty.tag()) {
+                .function => unreachable, // represents machine code; not a pointer
+                .inferred_alloc_const => unreachable,
+                .inferred_alloc_mut => unreachable,
 
-            .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => {
-                const int_tag_ty = try ty.intTagType(mod);
-                return try bitSizeAdvanced(int_tag_ty, mod, opt_sema);
-            },
+                .tuple, .anon_struct => {
+                    if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty);
+                    if (ty.containerLayout(mod) != .Packed) {
+                        return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
+                    }
+                    var total: u64 = 0;
+                    for (ty.tupleFields().types) |field_ty| {
+                        total += try bitSizeAdvanced(field_ty, mod, opt_sema);
+                    }
+                    return total;
+                },
 
-            .@"union", .union_safety_tagged, .union_tagged => {
-                if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty);
-                if (ty.containerLayout() != .Packed) {
-                    return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
-                }
-                const union_obj = ty.cast(Payload.Union).?.data;
-                assert(union_obj.haveFieldTypes());
+                .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => {
+                    const int_tag_ty = try ty.intTagType(mod);
+                    return try bitSizeAdvanced(int_tag_ty, mod, opt_sema);
+                },
 
-                var size: u64 = 0;
-                for (union_obj.fields.values()) |field| {
-                    size = @max(size, try bitSizeAdvanced(field.ty, mod, opt_sema));
-                }
-                return size;
-            },
+                .@"union", .union_safety_tagged, .union_tagged => {
+                    if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty);
+                    if (ty.containerLayout(mod) != .Packed) {
+                        return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
+                    }
+                    const union_obj = ty.cast(Payload.Union).?.data;
+                    assert(union_obj.haveFieldTypes());
 
-            .array => {
-                const payload = ty.castTag(.array).?.data;
-                const elem_size = std.math.max(payload.elem_type.abiAlignment(mod), payload.elem_type.abiSize(mod));
-                if (elem_size == 0 or payload.len == 0)
-                    return @as(u64, 0);
-                const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema);
-                return (payload.len - 1) * 8 * elem_size + elem_bit_size;
-            },
-            .array_sentinel => {
-                const payload = ty.castTag(.array_sentinel).?.data;
-                const elem_size = std.math.max(
-                    payload.elem_type.abiAlignment(mod),
-                    payload.elem_type.abiSize(mod),
-                );
-                const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema);
-                return payload.len * 8 * elem_size + elem_bit_size;
-            },
+                    var size: u64 = 0;
+                    for (union_obj.fields.values()) |field| {
+                        size = @max(size, try bitSizeAdvanced(field.ty, mod, opt_sema));
+                    }
+                    return size;
+                },
+
+                .array => {
+                    const payload = ty.castTag(.array).?.data;
+                    const elem_size = std.math.max(payload.elem_type.abiAlignment(mod), payload.elem_type.abiSize(mod));
+                    if (elem_size == 0 or payload.len == 0)
+                        return @as(u64, 0);
+                    const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema);
+                    return (payload.len - 1) * 8 * elem_size + elem_bit_size;
+                },
+                .array_sentinel => {
+                    const payload = ty.castTag(.array_sentinel).?.data;
+                    const elem_size = std.math.max(
+                        payload.elem_type.abiAlignment(mod),
+                        payload.elem_type.abiSize(mod),
+                    );
+                    const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema);
+                    return payload.len * 8 * elem_size + elem_bit_size;
+                },
 
-            .anyframe_T => return target.ptrBitWidth(),
+                .anyframe_T => return target.ptrBitWidth(),
 
-            .pointer => switch (ty.castTag(.pointer).?.data.size) {
-                .Slice => return target.ptrBitWidth() * 2,
-                else => return target.ptrBitWidth(),
+                .pointer => switch (ty.castTag(.pointer).?.data.size) {
+                    .Slice => return target.ptrBitWidth() * 2,
+                    else => return target.ptrBitWidth(),
+                },
+
+                .error_set,
+                .error_set_single,
+                .error_set_inferred,
+                .error_set_merged,
+                => return 16, // TODO revisit this when we have the concept of the error tag type
+
+                .optional, .error_union => {
+                    // Optionals and error unions are not packed so their bitsize
+                    // includes padding bits.
+                    return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8;
+                },
             },
+            else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+                .int_type => |int_type| return int_type.bits,
+                .ptr_type => |ptr_type| switch (ptr_type.size) {
+                    .Slice => return target.ptrBitWidth() * 2,
+                    else => return target.ptrBitWidth() * 2,
+                },
+                .array_type => |array_type| {
+                    const len = array_type.len + @boolToInt(array_type.sentinel != .none);
+                    if (len == 0) return 0;
+                    const elem_ty = array_type.child.toType();
+                    const elem_size = std.math.max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod));
+                    if (elem_size == 0) return 0;
+                    const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema);
+                    return (len - 1) * 8 * elem_size + elem_bit_size;
+                },
+                .vector_type => |vector_type| {
+                    const child_ty = vector_type.child.toType();
+                    const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema);
+                    return elem_bit_size * vector_type.len;
+                },
+                .opt_type => @panic("TODO"),
+                .error_union_type => @panic("TODO"),
+                .simple_type => |t| switch (t) {
+                    .f16 => return 16,
+                    .f32 => return 32,
+                    .f64 => return 64,
+                    .f80 => return 80,
+                    .f128 => return 128,
 
-            .error_set,
-            .error_set_single,
-            .error_set_inferred,
-            .error_set_merged,
-            => return 16, // TODO revisit this when we have the concept of the error tag type
+                    .usize,
+                    .isize,
+                    .@"anyframe",
+                    => return target.ptrBitWidth(),
+
+                    .c_char => return target.c_type_bit_size(.char),
+                    .c_short => return target.c_type_bit_size(.short),
+                    .c_ushort => return target.c_type_bit_size(.ushort),
+                    .c_int => return target.c_type_bit_size(.int),
+                    .c_uint => return target.c_type_bit_size(.uint),
+                    .c_long => return target.c_type_bit_size(.long),
+                    .c_ulong => return target.c_type_bit_size(.ulong),
+                    .c_longlong => return target.c_type_bit_size(.longlong),
+                    .c_ulonglong => return target.c_type_bit_size(.ulonglong),
+                    .c_longdouble => return target.c_type_bit_size(.longdouble),
+
+                    .bool => return 1,
+                    .void => return 0,
+
+                    // TODO revisit this when we have the concept of the error tag type
+                    .anyerror => return 16,
+
+                    .anyopaque => unreachable,
+                    .type => unreachable,
+                    .comptime_int => unreachable,
+                    .comptime_float => unreachable,
+                    .noreturn => unreachable,
+                    .null => unreachable,
+                    .undefined => unreachable,
+                    .enum_literal => unreachable,
+                    .generic_poison => unreachable,
+                    .var_args_param => unreachable,
+
+                    .atomic_order => unreachable, // missing call to resolveTypeFields
+                    .atomic_rmw_op => unreachable, // missing call to resolveTypeFields
+                    .calling_convention => unreachable, // missing call to resolveTypeFields
+                    .address_space => unreachable, // missing call to resolveTypeFields
+                    .float_mode => unreachable, // missing call to resolveTypeFields
+                    .reduce_op => unreachable, // missing call to resolveTypeFields
+                    .call_modifier => unreachable, // missing call to resolveTypeFields
+                    .prefetch_options => unreachable, // missing call to resolveTypeFields
+                    .export_options => unreachable, // missing call to resolveTypeFields
+                    .extern_options => unreachable, // missing call to resolveTypeFields
+                    .type_info => unreachable, // missing call to resolveTypeFields
+                },
+                .struct_type => |struct_type| {
+                    const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0;
+                    if (struct_obj.layout != .Packed) {
+                        return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
+                    }
+                    if (opt_sema) |sema| _ = try sema.resolveTypeLayout(ty);
+                    assert(struct_obj.haveLayout());
+                    return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema);
+                },
+
+                .union_type => @panic("TODO"),
+                .opaque_type => unreachable,
 
-            .optional, .error_union => {
-                // Optionals and error unions are not packed so their bitsize
-                // includes padding bits.
-                return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8;
+                // values, not types
+                .simple_value => unreachable,
+                .extern_func => unreachable,
+                .int => unreachable,
+                .ptr => unreachable,
+                .opt => unreachable,
+                .enum_tag => unreachable,
+                .aggregate => unreachable,
             },
         }
     }
 
     /// Returns true if the type's layout is already resolved and it is safe
     /// to use `abiSize`, `abiAlignment` and `bitSize` on it.
-    pub fn layoutIsResolved(ty: Type, mod: *const Module) bool {
+    pub fn layoutIsResolved(ty: Type, mod: *Module) bool {
         switch (ty.zigTypeTag(mod)) {
             .Struct => {
-                if (ty.castTag(.@"struct")) |struct_ty| {
-                    return struct_ty.data.haveLayout();
+                if (mod.typeToStruct(ty)) |struct_obj| {
+                    return struct_obj.haveLayout();
                 }
                 return true;
             },
@@ -3500,18 +3485,23 @@ pub const Type = struct {
         }
     }
 
-    pub fn containerLayout(ty: Type) std.builtin.Type.ContainerLayout {
+    pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout {
         return switch (ty.ip_index) {
             .empty_struct_type => .Auto,
             .none => switch (ty.tag()) {
                 .tuple, .anon_struct => .Auto,
-                .@"struct" => ty.castTag(.@"struct").?.data.layout,
                 .@"union" => ty.castTag(.@"union").?.data.layout,
                 .union_safety_tagged => ty.castTag(.union_safety_tagged).?.data.layout,
                 .union_tagged => ty.castTag(.union_tagged).?.data.layout,
                 else => unreachable,
             },
-            else => unreachable,
+            else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+                .struct_type => |struct_type| {
+                    const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .Auto;
+                    return struct_obj.layout;
+                },
+                else => unreachable,
+            },
         };
     }
 
@@ -3631,14 +3621,16 @@ pub const Type = struct {
                 .array_sentinel => ty.castTag(.array_sentinel).?.data.len,
                 .tuple => ty.castTag(.tuple).?.data.types.len,
                 .anon_struct => ty.castTag(.anon_struct).?.data.types.len,
-                .@"struct" => ty.castTag(.@"struct").?.data.fields.count(),
-                .empty_struct => 0,
 
                 else => unreachable,
             },
             else => switch (ip.indexToKey(ty.ip_index)) {
                 .vector_type => |vector_type| vector_type.len,
                 .array_type => |array_type| array_type.len,
+                .struct_type => |struct_type| {
+                    const struct_obj = ip.structPtrUnwrapConst(struct_type.index) orelse return 0;
+                    return struct_obj.fields.count();
+                },
                 else => unreachable,
             },
         };
@@ -3665,11 +3657,9 @@ pub const Type = struct {
     /// Asserts the type is an array, pointer or vector.
     pub fn sentinel(ty: Type, mod: *const Module) ?Value {
         return switch (ty.ip_index) {
-            .empty_struct_type => null,
             .none => switch (ty.tag()) {
                 .array,
                 .tuple,
-                .@"struct",
                 => null,
 
                 .pointer => ty.castTag(.pointer).?.data.sentinel,
@@ -3721,16 +3711,16 @@ pub const Type = struct {
 
     /// Returns true for integers, enums, error sets, and packed structs.
     /// If this function returns true, then intInfo() can be called on the type.
-    pub fn isAbiInt(ty: Type, mod: *const Module) bool {
+    pub fn isAbiInt(ty: Type, mod: *Module) bool {
         return switch (ty.zigTypeTag(mod)) {
             .Int, .Enum, .ErrorSet => true,
-            .Struct => ty.containerLayout() == .Packed,
+            .Struct => ty.containerLayout(mod) == .Packed,
             else => false,
         };
     }
 
     /// Asserts the type is an integer, enum, error set, or vector of one of them.
-    pub fn intInfo(starting_ty: Type, mod: *const Module) InternPool.Key.IntType {
+    pub fn intInfo(starting_ty: Type, mod: *Module) InternPool.Key.IntType {
         const target = mod.getTarget();
         var ty = starting_ty;
 
@@ -3750,12 +3740,6 @@ pub const Type = struct {
                     return .{ .signedness = .unsigned, .bits = 16 };
                 },
 
-                .@"struct" => {
-                    const struct_obj = ty.castTag(.@"struct").?.data;
-                    assert(struct_obj.layout == .Packed);
-                    ty = struct_obj.backing_int_ty;
-                },
-
                 else => unreachable,
             },
             .anyerror_type => {
@@ -3775,6 +3759,12 @@ pub const Type = struct {
             .c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) },
             else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
                 .int_type => |int_type| return int_type,
+                .struct_type => |struct_type| {
+                    const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+                    assert(struct_obj.layout == .Packed);
+                    ty = struct_obj.backing_int_ty;
+                },
+
                 .ptr_type => unreachable,
                 .array_type => unreachable,
                 .vector_type => |vector_type| ty = vector_type.child.toType(),
@@ -3782,7 +3772,7 @@ pub const Type = struct {
                 .opt_type => unreachable,
                 .error_union_type => unreachable,
                 .simple_type => unreachable, // handled via Index enum tag above
-                .struct_type => @panic("TODO"),
+
                 .union_type => unreachable,
                 .opaque_type => unreachable,
 
@@ -3793,6 +3783,7 @@ pub const Type = struct {
                 .ptr => unreachable,
                 .opt => unreachable,
                 .enum_tag => unreachable,
+                .aggregate => unreachable,
             },
         };
     }
@@ -3996,17 +3987,6 @@ pub const Type = struct {
                     }
                 },
 
-                .@"struct" => {
-                    const s = ty.castTag(.@"struct").?.data;
-                    assert(s.haveFieldTypes());
-                    for (s.fields.values()) |field| {
-                        if (field.is_comptime) continue;
-                        if ((try field.ty.onePossibleValue(mod)) != null) continue;
-                        return null;
-                    }
-                    return Value.empty_struct;
-                },
-
                 .tuple, .anon_struct => {
                     const tuple = ty.tupleFields();
                     for (tuple.values, 0..) |val, i| {
@@ -4069,8 +4049,6 @@ pub const Type = struct {
                     return Value.empty_struct;
                 },
 
-                .empty_struct => return Value.empty_struct,
-
                 .array => {
                     if (ty.arrayLen(mod) == 0)
                         return Value.initTag(.empty_array);
@@ -4158,7 +4136,23 @@ pub const Type = struct {
                     .generic_poison => unreachable,
                     .var_args_param => unreachable,
                 },
-                .struct_type => @panic("TODO"),
+                .struct_type => |struct_type| {
+                    if (mod.structPtrUnwrap(struct_type.index)) |s| {
+                        assert(s.haveFieldTypes());
+                        for (s.fields.values()) |field| {
+                            if (field.is_comptime) continue;
+                            if ((try field.ty.onePossibleValue(mod)) != null) continue;
+                            return null;
+                        }
+                    }
+                    // In this case the struct has no fields and therefore has one possible value.
+                    const empty = try mod.intern(.{ .aggregate = .{
+                        .ty = ty.ip_index,
+                        .fields = &.{},
+                    } });
+                    return empty.toValue();
+                },
+
                 .union_type => @panic("TODO"),
                 .opaque_type => return null,
 
@@ -4169,6 +4163,7 @@ pub const Type = struct {
                 .ptr => unreachable,
                 .opt => unreachable,
                 .enum_tag => unreachable,
+                .aggregate => unreachable,
             },
         };
     }
@@ -4177,12 +4172,11 @@ pub const Type = struct {
     /// resolves field types rather than asserting they are already resolved.
     /// TODO merge these implementations together with the "advanced" pattern seen
     /// elsewhere in this file.
-    pub fn comptimeOnly(ty: Type, mod: *const Module) bool {
+    pub fn comptimeOnly(ty: Type, mod: *Module) bool {
         return switch (ty.ip_index) {
             .empty_struct_type => false,
 
             .none => switch (ty.tag()) {
-                .empty_struct,
                 .error_set,
                 .error_set_single,
                 .error_set_inferred,
@@ -4222,20 +4216,6 @@ pub const Type = struct {
                     return false;
                 },
 
-                .@"struct" => {
-                    const struct_obj = ty.castTag(.@"struct").?.data;
-                    switch (struct_obj.requires_comptime) {
-                        .wip, .unknown => {
-                            // Return false to avoid incorrect dependency loops.
-                            // This will be handled correctly once merged with
-                            // `Sema.typeRequiresComptime`.
-                            return false;
-                        },
-                        .no => return false,
-                        .yes => return true,
-                    }
-                },
-
                 .@"union", .union_safety_tagged, .union_tagged => {
                     const union_obj = ty.cast(Type.Payload.Union).?.data;
                     switch (union_obj.requires_comptime) {
@@ -4326,7 +4306,21 @@ pub const Type = struct {
 
                     .var_args_param => unreachable,
                 },
-                .struct_type => @panic("TODO"),
+                .struct_type => |struct_type| {
+                    // A struct with no fields is not comptime-only.
+                    const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
+                    switch (struct_obj.requires_comptime) {
+                        .wip, .unknown => {
+                            // Return false to avoid incorrect dependency loops.
+                            // This will be handled correctly once merged with
+                            // `Sema.typeRequiresComptime`.
+                            return false;
+                        },
+                        .no => return false,
+                        .yes => return true,
+                    }
+                },
+
                 .union_type => @panic("TODO"),
                 .opaque_type => false,
 
@@ -4337,6 +4331,7 @@ pub const Type = struct {
                 .ptr => unreachable,
                 .opt => unreachable,
                 .enum_tag => unreachable,
+                .aggregate => unreachable,
             },
         };
     }
@@ -4352,19 +4347,19 @@ pub const Type = struct {
         };
     }
 
-    pub fn isIndexable(ty: Type, mod: *const Module) bool {
+    pub fn isIndexable(ty: Type, mod: *Module) bool {
         return switch (ty.zigTypeTag(mod)) {
             .Array, .Vector => true,
             .Pointer => switch (ty.ptrSize(mod)) {
                 .Slice, .Many, .C => true,
                 .One => ty.childType(mod).zigTypeTag(mod) == .Array,
             },
-            .Struct => ty.isTuple(),
+            .Struct => ty.isTuple(mod),
             else => false,
         };
     }
 
-    pub fn indexableHasLen(ty: Type, mod: *const Module) bool {
+    pub fn indexableHasLen(ty: Type, mod: *Module) bool {
         return switch (ty.zigTypeTag(mod)) {
             .Array, .Vector => true,
             .Pointer => switch (ty.ptrSize(mod)) {
@@ -4372,7 +4367,7 @@ pub const Type = struct {
                 .Slice => true,
                 .One => ty.childType(mod).zigTypeTag(mod) == .Array,
             },
-            .Struct => ty.isTuple(),
+            .Struct => ty.isTuple(mod),
             else => false,
         };
     }
@@ -4381,10 +4376,8 @@ pub const Type = struct {
     pub fn getNamespaceIndex(ty: Type, mod: *Module) Module.Namespace.OptionalIndex {
         return switch (ty.ip_index) {
             .none => switch (ty.tag()) {
-                .@"struct" => ty.castTag(.@"struct").?.data.namespace.toOptional(),
                 .enum_full => ty.castTag(.enum_full).?.data.namespace.toOptional(),
                 .enum_nonexhaustive => ty.castTag(.enum_nonexhaustive).?.data.namespace.toOptional(),
-                .empty_struct => @panic("TODO"),
                 .@"union" => ty.castTag(.@"union").?.data.namespace.toOptional(),
                 .union_safety_tagged => ty.castTag(.union_safety_tagged).?.data.namespace.toOptional(),
                 .union_tagged => ty.castTag(.union_tagged).?.data.namespace.toOptional(),
@@ -4393,6 +4386,7 @@ pub const Type = struct {
             },
             else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
                 .opaque_type => |opaque_type| opaque_type.namespace.toOptional(),
+                .struct_type => |struct_type| struct_type.namespace,
                 else => .none,
             },
         };
@@ -4618,161 +4612,188 @@ pub const Type = struct {
         }
     }
 
-    pub fn structFields(ty: Type) Module.Struct.Fields {
-        return switch (ty.ip_index) {
-            .empty_struct_type => .{},
-            .none => switch (ty.tag()) {
-                .empty_struct => .{},
-                .@"struct" => {
-                    const struct_obj = ty.castTag(.@"struct").?.data;
-                    assert(struct_obj.haveFieldTypes());
-                    return struct_obj.fields;
-                },
-                else => unreachable,
+    pub fn structFields(ty: Type, mod: *Module) Module.Struct.Fields {
+        switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+            .struct_type => |struct_type| {
+                const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .{};
+                assert(struct_obj.haveFieldTypes());
+                return struct_obj.fields;
             },
             else => unreachable,
-        };
+        }
     }
 
-    pub fn structFieldName(ty: Type, field_index: usize) []const u8 {
-        switch (ty.tag()) {
-            .@"struct" => {
-                const struct_obj = ty.castTag(.@"struct").?.data;
-                assert(struct_obj.haveFieldTypes());
-                return struct_obj.fields.keys()[field_index];
+    pub fn structFieldName(ty: Type, field_index: usize, mod: *Module) []const u8 {
+        switch (ty.ip_index) {
+            .none => switch (ty.tag()) {
+                .anon_struct => return ty.castTag(.anon_struct).?.data.names[field_index],
+                else => unreachable,
+            },
+            else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+                .struct_type => |struct_type| {
+                    const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+                    assert(struct_obj.haveFieldTypes());
+                    return struct_obj.fields.keys()[field_index];
+                },
+                else => unreachable,
             },
-            .anon_struct => return ty.castTag(.anon_struct).?.data.names[field_index],
-            else => unreachable,
         }
     }
 
-    pub fn structFieldCount(ty: Type) usize {
+    pub fn structFieldCount(ty: Type, mod: *Module) usize {
         return switch (ty.ip_index) {
             .empty_struct_type => 0,
             .none => switch (ty.tag()) {
-                .@"struct" => {
-                    const struct_obj = ty.castTag(.@"struct").?.data;
+                .tuple => ty.castTag(.tuple).?.data.types.len,
+                .anon_struct => ty.castTag(.anon_struct).?.data.types.len,
+                else => unreachable,
+            },
+            else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+                .struct_type => |struct_type| {
+                    const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0;
                     assert(struct_obj.haveFieldTypes());
                     return struct_obj.fields.count();
                 },
-                .empty_struct => 0,
-                .tuple => ty.castTag(.tuple).?.data.types.len,
-                .anon_struct => ty.castTag(.anon_struct).?.data.types.len,
                 else => unreachable,
             },
-            else => unreachable,
         };
     }
 
     /// Supports structs and unions.
-    pub fn structFieldType(ty: Type, index: usize) Type {
-        switch (ty.tag()) {
-            .@"struct" => {
-                const struct_obj = ty.castTag(.@"struct").?.data;
-                return struct_obj.fields.values()[index].ty;
+    pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type {
+        return switch (ty.ip_index) {
+            .none => switch (ty.tag()) {
+                .@"union", .union_safety_tagged, .union_tagged => {
+                    const union_obj = ty.cast(Payload.Union).?.data;
+                    return union_obj.fields.values()[index].ty;
+                },
+                .tuple => return ty.castTag(.tuple).?.data.types[index],
+                .anon_struct => return ty.castTag(.anon_struct).?.data.types[index],
+                else => unreachable,
             },
-            .@"union", .union_safety_tagged, .union_tagged => {
-                const union_obj = ty.cast(Payload.Union).?.data;
-                return union_obj.fields.values()[index].ty;
+            else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+                .struct_type => |struct_type| {
+                    const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+                    return struct_obj.fields.values()[index].ty;
+                },
+                else => unreachable,
             },
-            .tuple => return ty.castTag(.tuple).?.data.types[index],
-            .anon_struct => return ty.castTag(.anon_struct).?.data.types[index],
-            else => unreachable,
-        }
+        };
     }
 
     pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) u32 {
-        switch (ty.tag()) {
-            .@"struct" => {
-                const struct_obj = ty.castTag(.@"struct").?.data;
-                assert(struct_obj.layout != .Packed);
-                return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout);
+        switch (ty.ip_index) {
+            .none => switch (ty.tag()) {
+                .@"union", .union_safety_tagged, .union_tagged => {
+                    const union_obj = ty.cast(Payload.Union).?.data;
+                    return union_obj.fields.values()[index].normalAlignment(mod);
+                },
+                .tuple => return ty.castTag(.tuple).?.data.types[index].abiAlignment(mod),
+                .anon_struct => return ty.castTag(.anon_struct).?.data.types[index].abiAlignment(mod),
+                else => unreachable,
             },
-            .@"union", .union_safety_tagged, .union_tagged => {
-                const union_obj = ty.cast(Payload.Union).?.data;
-                return union_obj.fields.values()[index].normalAlignment(mod);
+            else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+                .struct_type => |struct_type| {
+                    const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+                    assert(struct_obj.layout != .Packed);
+                    return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout);
+                },
+                else => unreachable,
             },
-            .tuple => return ty.castTag(.tuple).?.data.types[index].abiAlignment(mod),
-            .anon_struct => return ty.castTag(.anon_struct).?.data.types[index].abiAlignment(mod),
-            else => unreachable,
         }
     }
 
-    pub fn structFieldDefaultValue(ty: Type, index: usize) Value {
-        switch (ty.tag()) {
-            .@"struct" => {
-                const struct_obj = ty.castTag(.@"struct").?.data;
-                return struct_obj.fields.values()[index].default_val;
-            },
-            .tuple => {
-                const tuple = ty.castTag(.tuple).?.data;
-                return tuple.values[index];
+    pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value {
+        switch (ty.ip_index) {
+            .none => switch (ty.tag()) {
+                .tuple => {
+                    const tuple = ty.castTag(.tuple).?.data;
+                    return tuple.values[index];
+                },
+                .anon_struct => {
+                    const struct_obj = ty.castTag(.anon_struct).?.data;
+                    return struct_obj.values[index];
+                },
+                else => unreachable,
             },
-            .anon_struct => {
-                const struct_obj = ty.castTag(.anon_struct).?.data;
-                return struct_obj.values[index];
+            else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+                .struct_type => |struct_type| {
+                    const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+                    return struct_obj.fields.values()[index].default_val;
+                },
+                else => unreachable,
             },
-            else => unreachable,
         }
     }
 
     pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value {
-        switch (ty.tag()) {
-            .@"struct" => {
-                const struct_obj = ty.castTag(.@"struct").?.data;
-                const field = struct_obj.fields.values()[index];
-                if (field.is_comptime) {
-                    return field.default_val;
-                } else {
-                    return field.ty.onePossibleValue(mod);
-                }
-            },
-            .tuple => {
-                const tuple = ty.castTag(.tuple).?.data;
-                const val = tuple.values[index];
-                if (val.ip_index == .unreachable_value) {
-                    return tuple.types[index].onePossibleValue(mod);
-                } else {
-                    return val;
-                }
+        switch (ty.ip_index) {
+            .none => switch (ty.tag()) {
+                .tuple => {
+                    const tuple = ty.castTag(.tuple).?.data;
+                    const val = tuple.values[index];
+                    if (val.ip_index == .unreachable_value) {
+                        return tuple.types[index].onePossibleValue(mod);
+                    } else {
+                        return val;
+                    }
+                },
+                .anon_struct => {
+                    const anon_struct = ty.castTag(.anon_struct).?.data;
+                    const val = anon_struct.values[index];
+                    if (val.ip_index == .unreachable_value) {
+                        return anon_struct.types[index].onePossibleValue(mod);
+                    } else {
+                        return val;
+                    }
+                },
+                else => unreachable,
             },
-            .anon_struct => {
-                const anon_struct = ty.castTag(.anon_struct).?.data;
-                const val = anon_struct.values[index];
-                if (val.ip_index == .unreachable_value) {
-                    return anon_struct.types[index].onePossibleValue(mod);
-                } else {
-                    return val;
-                }
+            else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+                .struct_type => |struct_type| {
+                    const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+                    const field = struct_obj.fields.values()[index];
+                    if (field.is_comptime) {
+                        return field.default_val;
+                    } else {
+                        return field.ty.onePossibleValue(mod);
+                    }
+                },
+                else => unreachable,
             },
-            else => unreachable,
         }
     }
 
-    pub fn structFieldIsComptime(ty: Type, index: usize) bool {
-        switch (ty.tag()) {
-            .@"struct" => {
-                const struct_obj = ty.castTag(.@"struct").?.data;
-                if (struct_obj.layout == .Packed) return false;
-                const field = struct_obj.fields.values()[index];
-                return field.is_comptime;
-            },
-            .tuple => {
-                const tuple = ty.castTag(.tuple).?.data;
-                const val = tuple.values[index];
-                return val.ip_index != .unreachable_value;
+    pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool {
+        switch (ty.ip_index) {
+            .none => switch (ty.tag()) {
+                .tuple => {
+                    const tuple = ty.castTag(.tuple).?.data;
+                    const val = tuple.values[index];
+                    return val.ip_index != .unreachable_value;
+                },
+                .anon_struct => {
+                    const anon_struct = ty.castTag(.anon_struct).?.data;
+                    const val = anon_struct.values[index];
+                    return val.ip_index != .unreachable_value;
+                },
+                else => unreachable,
             },
-            .anon_struct => {
-                const anon_struct = ty.castTag(.anon_struct).?.data;
-                const val = anon_struct.values[index];
-                return val.ip_index != .unreachable_value;
+            else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+                .struct_type => |struct_type| {
+                    const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+                    if (struct_obj.layout == .Packed) return false;
+                    const field = struct_obj.fields.values()[index];
+                    return field.is_comptime;
+                },
+                else => unreachable,
             },
-            else => unreachable,
         }
     }
 
     pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *Module) u32 {
-        const struct_obj = ty.castTag(.@"struct").?.data;
+        const struct_type = mod.intern_pool.indexToKey(ty.ip_index).struct_type;
+        const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
         assert(struct_obj.layout == .Packed);
         comptime assert(Type.packed_struct_layout_version == 2);
 
@@ -4833,7 +4854,8 @@ pub const Type = struct {
     /// Get an iterator that iterates over all the struct field, returning the field and
     /// offset of that field. Asserts that the type is a non-packed struct.
     pub fn iterateStructOffsets(ty: Type, mod: *Module) StructOffsetIterator {
-        const struct_obj = ty.castTag(.@"struct").?.data;
+        const struct_type = mod.intern_pool.indexToKey(ty.ip_index).struct_type;
+        const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
         assert(struct_obj.haveLayout());
         assert(struct_obj.layout != .Packed);
         return .{ .struct_obj = struct_obj, .module = mod };
@@ -4841,57 +4863,62 @@ pub const Type = struct {
 
     /// Supports structs and unions.
     pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 {
-        switch (ty.tag()) {
-            .@"struct" => {
-                const struct_obj = ty.castTag(.@"struct").?.data;
-                assert(struct_obj.haveLayout());
-                assert(struct_obj.layout != .Packed);
-                var it = ty.iterateStructOffsets(mod);
-                while (it.next()) |field_offset| {
-                    if (index == field_offset.field)
-                        return field_offset.offset;
-                }
-
-                return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1));
-            },
+        switch (ty.ip_index) {
+            .none => switch (ty.tag()) {
+                .tuple, .anon_struct => {
+                    const tuple = ty.tupleFields();
 
-            .tuple, .anon_struct => {
-                const tuple = ty.tupleFields();
+                    var offset: u64 = 0;
+                    var big_align: u32 = 0;
 
-                var offset: u64 = 0;
-                var big_align: u32 = 0;
+                    for (tuple.types, 0..) |field_ty, i| {
+                        const field_val = tuple.values[i];
+                        if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) {
+                            // comptime field
+                            if (i == index) return offset;
+                            continue;
+                        }
 
-                for (tuple.types, 0..) |field_ty, i| {
-                    const field_val = tuple.values[i];
-                    if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) {
-                        // comptime field
+                        const field_align = field_ty.abiAlignment(mod);
+                        big_align = @max(big_align, field_align);
+                        offset = std.mem.alignForwardGeneric(u64, offset, field_align);
                         if (i == index) return offset;
-                        continue;
+                        offset += field_ty.abiSize(mod);
                     }
+                    offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1));
+                    return offset;
+                },
 
-                    const field_align = field_ty.abiAlignment(mod);
-                    big_align = @max(big_align, field_align);
-                    offset = std.mem.alignForwardGeneric(u64, offset, field_align);
-                    if (i == index) return offset;
-                    offset += field_ty.abiSize(mod);
-                }
-                offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1));
-                return offset;
+                .@"union" => return 0,
+                .union_safety_tagged, .union_tagged => {
+                    const union_obj = ty.cast(Payload.Union).?.data;
+                    const layout = union_obj.getLayout(mod, true);
+                    if (layout.tag_align >= layout.payload_align) {
+                        // {Tag, Payload}
+                        return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align);
+                    } else {
+                        // {Payload, Tag}
+                        return 0;
+                    }
+                },
+                else => unreachable,
             },
+            else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+                .struct_type => |struct_type| {
+                    const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+                    assert(struct_obj.haveLayout());
+                    assert(struct_obj.layout != .Packed);
+                    var it = ty.iterateStructOffsets(mod);
+                    while (it.next()) |field_offset| {
+                        if (index == field_offset.field)
+                            return field_offset.offset;
+                    }
 
-            .@"union" => return 0,
-            .union_safety_tagged, .union_tagged => {
-                const union_obj = ty.cast(Payload.Union).?.data;
-                const layout = union_obj.getLayout(mod, true);
-                if (layout.tag_align >= layout.payload_align) {
-                    // {Tag, Payload}
-                    return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align);
-                } else {
-                    // {Payload, Tag}
-                    return 0;
-                }
+                    return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1));
+                },
+
+                else => unreachable,
             },
-            else => unreachable,
         }
     }
 
@@ -4901,6 +4928,7 @@ pub const Type = struct {
 
     pub fn declSrcLocOrNull(ty: Type, mod: *Module) ?Module.SrcLoc {
         switch (ty.ip_index) {
+            .empty_struct_type => return null,
             .none => switch (ty.tag()) {
                 .enum_full, .enum_nonexhaustive => {
                     const enum_full = ty.cast(Payload.EnumFull).?.data;
@@ -4914,10 +4942,6 @@ pub const Type = struct {
                     const enum_simple = ty.castTag(.enum_simple).?.data;
                     return enum_simple.srcLoc(mod);
                 },
-                .@"struct" => {
-                    const struct_obj = ty.castTag(.@"struct").?.data;
-                    return struct_obj.srcLoc(mod);
-                },
                 .error_set => {
                     const error_set = ty.castTag(.error_set).?.data;
                     return error_set.srcLoc(mod);
@@ -4930,7 +4954,10 @@ pub const Type = struct {
                 else => return null,
             },
             else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
-                .struct_type => @panic("TODO"),
+                .struct_type => |struct_type| {
+                    const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+                    return struct_obj.srcLoc(mod);
+                },
                 .union_type => @panic("TODO"),
                 .opaque_type => |opaque_type| mod.opaqueSrcLoc(opaque_type),
                 else => null,
@@ -4954,10 +4981,6 @@ pub const Type = struct {
                     const enum_simple = ty.castTag(.enum_simple).?.data;
                     return enum_simple.owner_decl;
                 },
-                .@"struct" => {
-                    const struct_obj = ty.castTag(.@"struct").?.data;
-                    return struct_obj.owner_decl;
-                },
                 .error_set => {
                     const error_set = ty.castTag(.error_set).?.data;
                     return error_set.owner_decl;
@@ -4970,7 +4993,10 @@ pub const Type = struct {
                 else => return null,
             },
             else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
-                .struct_type => @panic("TODO"),
+                .struct_type => |struct_type| {
+                    const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return null;
+                    return struct_obj.owner_decl;
+                },
                 .union_type => @panic("TODO"),
                 .opaque_type => |opaque_type| opaque_type.decl,
                 else => null,
@@ -5013,8 +5039,6 @@ pub const Type = struct {
         /// The type is the inferred error set of a specific function.
         error_set_inferred,
         error_set_merged,
-        empty_struct,
-        @"struct",
         @"union",
         union_safety_tagged,
         union_tagged,
@@ -5046,12 +5070,10 @@ pub const Type = struct {
                 .function => Payload.Function,
                 .error_union => Payload.ErrorUnion,
                 .error_set_single => Payload.Name,
-                .@"struct" => Payload.Struct,
                 .@"union", .union_safety_tagged, .union_tagged => Payload.Union,
                 .enum_full, .enum_nonexhaustive => Payload.EnumFull,
                 .enum_simple => Payload.EnumSimple,
                 .enum_numbered => Payload.EnumNumbered,
-                .empty_struct => Payload.ContainerScope,
                 .tuple => Payload.Tuple,
                 .anon_struct => Payload.AnonStruct,
             };
@@ -5082,15 +5104,19 @@ pub const Type = struct {
         }
     };
 
-    pub fn isTuple(ty: Type) bool {
+    pub fn isTuple(ty: Type, mod: *Module) bool {
         return switch (ty.ip_index) {
-            .empty_struct_type => true,
             .none => switch (ty.tag()) {
                 .tuple => true,
-                .@"struct" => ty.castTag(.@"struct").?.data.is_tuple,
                 else => false,
             },
-            else => false, // TODO struct
+            else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+                .struct_type => |struct_type| {
+                    const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
+                    return struct_obj.is_tuple;
+                },
+                else => false,
+            },
         };
     }
 
@@ -5101,36 +5127,41 @@ pub const Type = struct {
                 .anon_struct => true,
                 else => false,
             },
-            else => false, // TODO struct
+            else => false,
         };
     }
 
-    pub fn isTupleOrAnonStruct(ty: Type) bool {
+    pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool {
         return switch (ty.ip_index) {
             .empty_struct_type => true,
             .none => switch (ty.tag()) {
                 .tuple, .anon_struct => true,
-                .@"struct" => ty.castTag(.@"struct").?.data.is_tuple,
                 else => false,
             },
-            else => false, // TODO struct
+            else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+                .struct_type => |struct_type| {
+                    const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
+                    return struct_obj.is_tuple;
+                },
+                else => false,
+            },
         };
     }
 
     pub fn isSimpleTuple(ty: Type) bool {
         return switch (ty.ip_index) {
-            .empty_struct => true,
+            .empty_struct_type => true,
             .none => switch (ty.tag()) {
                 .tuple => true,
                 else => false,
             },
-            else => false, // TODO
+            else => false,
         };
     }
 
     pub fn isSimpleTupleOrAnonStruct(ty: Type) bool {
         return switch (ty.ip_index) {
-            .empty_struct => true,
+            .empty_struct_type => true,
             .none => switch (ty.tag()) {
                 .tuple, .anon_struct => true,
                 else => false,
@@ -5142,7 +5173,7 @@ pub const Type = struct {
     // Only allowed for simple tuple types
     pub fn tupleFields(ty: Type) Payload.Tuple.Data {
         return switch (ty.ip_index) {
-            .empty_struct => .{ .types = &.{}, .values = &.{} },
+            .empty_struct_type => .{ .types = &.{}, .values = &.{} },
             .none => switch (ty.tag()) {
                 .tuple => ty.castTag(.tuple).?.data,
                 .anon_struct => .{
@@ -5319,18 +5350,6 @@ pub const Type = struct {
             data: []const u8,
         };
 
-        /// Mostly used for namespace like structs with zero fields.
-        /// Most commonly used for files.
-        pub const ContainerScope = struct {
-            base: Payload,
-            data: *Module.Namespace,
-        };
-
-        pub const Struct = struct {
-            base: Payload = .{ .tag = .@"struct" },
-            data: *Module.Struct,
-        };
-
         pub const Tuple = struct {
             base: Payload = .{ .tag = .tuple },
             data: Data,
src/TypedValue.zig
@@ -180,7 +180,7 @@ pub fn print(
                     switch (field_ptr.container_ty.tag()) {
                         .tuple => return writer.print(".@\"{d}\"", .{field_ptr.field_index}),
                         else => {
-                            const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index);
+                            const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index, mod);
                             return writer.print(".{s}", .{field_name});
                         },
                     }
@@ -381,21 +381,27 @@ fn printAggregate(
     }
     if (ty.zigTypeTag(mod) == .Struct) {
         try writer.writeAll(".{");
-        const max_len = std.math.min(ty.structFieldCount(), max_aggregate_items);
+        const max_len = std.math.min(ty.structFieldCount(mod), max_aggregate_items);
 
         var i: u32 = 0;
         while (i < max_len) : (i += 1) {
             if (i != 0) try writer.writeAll(", ");
-            switch (ty.tag()) {
-                .anon_struct, .@"struct" => try writer.print(".{s} = ", .{ty.structFieldName(i)}),
-                else => {},
+            switch (ty.ip_index) {
+                .none => switch (ty.tag()) {
+                    .anon_struct => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}),
+                    else => {},
+                },
+                else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+                    .struct_type => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}),
+                    else => {},
+                },
             }
             try print(.{
-                .ty = ty.structFieldType(i),
+                .ty = ty.structFieldType(i, mod),
                 .val = try val.fieldValue(ty, mod, i),
             }, writer, level - 1, mod);
         }
-        if (ty.structFieldCount() > max_aggregate_items) {
+        if (ty.structFieldCount(mod) > max_aggregate_items) {
             try writer.writeAll(", ...");
         }
         return writer.writeAll("}");
src/value.zig
@@ -996,10 +996,10 @@ pub const Value = struct {
                 const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8;
                 return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
             },
-            .Struct => switch (ty.containerLayout()) {
+            .Struct => switch (ty.containerLayout(mod)) {
                 .Auto => return error.IllDefinedMemoryLayout,
                 .Extern => {
-                    const fields = ty.structFields().values();
+                    const fields = ty.structFields(mod).values();
                     const field_vals = val.castTag(.aggregate).?.data;
                     for (fields, 0..) |field, i| {
                         const off = @intCast(usize, ty.structFieldOffset(i, mod));
@@ -1017,7 +1017,7 @@ pub const Value = struct {
                 const int = mod.global_error_set.get(val.castTag(.@"error").?.data.name).?;
                 std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), endian);
             },
-            .Union => switch (ty.containerLayout()) {
+            .Union => switch (ty.containerLayout(mod)) {
                 .Auto => return error.IllDefinedMemoryLayout,
                 .Extern => return error.Unimplemented,
                 .Packed => {
@@ -1119,12 +1119,12 @@ pub const Value = struct {
                     bits += elem_bit_size;
                 }
             },
-            .Struct => switch (ty.containerLayout()) {
+            .Struct => switch (ty.containerLayout(mod)) {
                 .Auto => unreachable, // Sema is supposed to have emitted a compile error already
                 .Extern => unreachable, // Handled in non-packed writeToMemory
                 .Packed => {
                     var bits: u16 = 0;
-                    const fields = ty.structFields().values();
+                    const fields = ty.structFields(mod).values();
                     const field_vals = val.castTag(.aggregate).?.data;
                     for (fields, 0..) |field, i| {
                         const field_bits = @intCast(u16, field.ty.bitSize(mod));
@@ -1133,7 +1133,7 @@ pub const Value = struct {
                     }
                 },
             },
-            .Union => switch (ty.containerLayout()) {
+            .Union => switch (ty.containerLayout(mod)) {
                 .Auto => unreachable, // Sema is supposed to have emitted a compile error already
                 .Extern => unreachable, // Handled in non-packed writeToMemory
                 .Packed => {
@@ -1236,14 +1236,14 @@ pub const Value = struct {
                 const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8;
                 return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
             },
-            .Struct => switch (ty.containerLayout()) {
+            .Struct => switch (ty.containerLayout(mod)) {
                 .Auto => unreachable, // Sema is supposed to have emitted a compile error already
                 .Extern => {
-                    const fields = ty.structFields().values();
+                    const fields = ty.structFields(mod).values();
                     const field_vals = try arena.alloc(Value, fields.len);
                     for (fields, 0..) |field, i| {
                         const off = @intCast(usize, ty.structFieldOffset(i, mod));
-                        const sz = @intCast(usize, ty.structFieldType(i).abiSize(mod));
+                        const sz = @intCast(usize, ty.structFieldType(i, mod).abiSize(mod));
                         field_vals[i] = try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena);
                     }
                     return Tag.aggregate.create(arena, field_vals);
@@ -1346,12 +1346,12 @@ pub const Value = struct {
                 }
                 return Tag.aggregate.create(arena, elems);
             },
-            .Struct => switch (ty.containerLayout()) {
+            .Struct => switch (ty.containerLayout(mod)) {
                 .Auto => unreachable, // Sema is supposed to have emitted a compile error already
                 .Extern => unreachable, // Handled by non-packed readFromMemory
                 .Packed => {
                     var bits: u16 = 0;
-                    const fields = ty.structFields().values();
+                    const fields = ty.structFields(mod).values();
                     const field_vals = try arena.alloc(Value, fields.len);
                     for (fields, 0..) |field, i| {
                         const field_bits = @intCast(u16, field.ty.bitSize(mod));
@@ -1996,7 +1996,7 @@ pub const Value = struct {
                 }
 
                 if (ty.zigTypeTag(mod) == .Struct) {
-                    const fields = ty.structFields().values();
+                    const fields = ty.structFields(mod).values();
                     assert(fields.len == a_field_vals.len);
                     for (fields, 0..) |field, i| {
                         if (!(try eqlAdvanced(a_field_vals[i], field.ty, b_field_vals[i], field.ty, mod, opt_sema))) {
@@ -2019,7 +2019,7 @@ pub const Value = struct {
             .@"union" => {
                 const a_union = a.castTag(.@"union").?.data;
                 const b_union = b.castTag(.@"union").?.data;
-                switch (ty.containerLayout()) {
+                switch (ty.containerLayout(mod)) {
                     .Packed, .Extern => {
                         const tag_ty = ty.unionTagTypeHypothetical();
                         if (!(try eqlAdvanced(a_union.tag, tag_ty, b_union.tag, tag_ty, mod, opt_sema))) {
@@ -2252,7 +2252,7 @@ pub const Value = struct {
                     .aggregate => {
                         const field_values = val.castTag(.aggregate).?.data;
                         for (field_values, 0..) |field_val, i| {
-                            const field_ty = ty.structFieldType(i);
+                            const field_ty = ty.structFieldType(i, mod);
                             field_val.hash(field_ty, hasher, mod);
                         }
                     },
@@ -2623,7 +2623,7 @@ pub const Value = struct {
                     const data = val.castTag(.field_ptr).?.data;
                     if (data.container_ptr.pointerDecl()) |decl_index| {
                         const container_decl = mod.declPtr(decl_index);
-                        const field_type = data.container_ty.structFieldType(data.field_index);
+                        const field_type = data.container_ty.structFieldType(data.field_index, mod);
                         const field_val = try container_decl.val.fieldValue(field_type, mod, data.field_index);
                         return field_val.elemValue(mod, index);
                     } else unreachable;
@@ -2758,16 +2758,6 @@ pub const Value = struct {
     pub fn fieldValue(val: Value, ty: Type, mod: *Module, index: usize) !Value {
         switch (val.ip_index) {
             .undef => return Value.undef,
-            .empty_struct => {
-                if (ty.isSimpleTupleOrAnonStruct()) {
-                    const tuple = ty.tupleFields();
-                    return tuple.values[index];
-                }
-                if (try ty.structFieldValueComptime(mod, index)) |some| {
-                    return some;
-                }
-                unreachable;
-            },
 
             .none => switch (val.tag()) {
                 .aggregate => {
@@ -2784,7 +2774,10 @@ pub const Value = struct {
 
                 else => unreachable,
             },
-            else => unreachable,
+            else => return switch (mod.intern_pool.indexToKey(val.ip_index)) {
+                .aggregate => |aggregate| aggregate.fields[index].toValue(),
+                else => unreachable,
+            },
         }
     }