Commit accd5701c2

Andrew Kelley <andrew@ziglang.org>
2023-08-25 05:43:43
compiler: move struct types into InternPool proper
Structs were previously using `SegmentedList` to be given indexes, but were not actually backed by the InternPool arrays. After this, the only remaining uses of `SegmentedList` in the compiler are `Module.Decl` and `Module.Namespace`. Once those last two are migrated to become backed by InternPool arrays as well, we can introduce state serialization via writing these arrays to disk all at once. Unfortunately there are a lot of source code locations that touch the struct type API, so this commit is still work-in-progress. Once I get it compiling and passing the test suite, I can provide some interesting data points such as how it affected the InternPool memory size and performance comparison against master branch. I also couldn't resist migrating over a bunch of alignment API over to use the log2 Alignment type rather than a mismash of u32 and u64 byte units with 0 meaning something implicitly different and special at every location. Turns out you can do all the math you need directly on the log2 representation of alignments.
1 parent 0345d78
src/arch/aarch64/CodeGen.zig
@@ -23,6 +23,7 @@ const DW = std.dwarf;
 const leb128 = std.leb;
 const log = std.log.scoped(.codegen);
 const build_options = @import("build_options");
+const Alignment = InternPool.Alignment;
 
 const CodeGenError = codegen.CodeGenError;
 const Result = codegen.Result;
@@ -506,11 +507,9 @@ fn gen(self: *Self) !void {
             // (or w0 when pointer size is 32 bits). As this register
             // might get overwritten along the way, save the address
             // to the stack.
-            const ptr_bits = self.target.ptrBitWidth();
-            const ptr_bytes = @divExact(ptr_bits, 8);
             const ret_ptr_reg = self.registerAlias(.x0, Type.usize);
 
-            const stack_offset = try self.allocMem(ptr_bytes, ptr_bytes, null);
+            const stack_offset = try self.allocMem(8, .@"8", null);
 
             try self.genSetStack(Type.usize, stack_offset, MCValue{ .register = ret_ptr_reg });
             self.ret_mcv = MCValue{ .stack_offset = stack_offset };
@@ -998,11 +997,11 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
 fn allocMem(
     self: *Self,
     abi_size: u32,
-    abi_align: u32,
+    abi_align: Alignment,
     maybe_inst: ?Air.Inst.Index,
 ) !u32 {
     assert(abi_size > 0);
-    assert(abi_align > 0);
+    assert(abi_align != .none);
 
     // In order to efficiently load and store stack items that fit
     // into registers, we bump up the alignment to the next power of
@@ -1010,10 +1009,10 @@ fn allocMem(
     const adjusted_align = if (abi_size > 8)
         abi_align
     else
-        std.math.ceilPowerOfTwoAssert(u32, abi_size);
+        Alignment.fromNonzeroByteUnits(std.math.ceilPowerOfTwoAssert(u64, abi_size));
 
     // TODO find a free slot instead of always appending
-    const offset = mem.alignForward(u32, self.next_stack_offset, adjusted_align) + abi_size;
+    const offset: u32 = @intCast(adjusted_align.forward(self.next_stack_offset) + abi_size);
     self.next_stack_offset = offset;
     self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset);
 
@@ -1515,12 +1514,9 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
         const len = try self.resolveInst(bin_op.rhs);
         const len_ty = self.typeOf(bin_op.rhs);
 
-        const ptr_bits = self.target.ptrBitWidth();
-        const ptr_bytes = @divExact(ptr_bits, 8);
-
-        const stack_offset = try self.allocMem(ptr_bytes * 2, ptr_bytes * 2, inst);
+        const stack_offset = try self.allocMem(16, .@"8", inst);
         try self.genSetStack(ptr_ty, stack_offset, ptr);
-        try self.genSetStack(len_ty, stack_offset - ptr_bytes, len);
+        try self.genSetStack(len_ty, stack_offset - 8, len);
         break :result MCValue{ .stack_offset = stack_offset };
     };
     return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
@@ -3285,9 +3281,9 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
             break :result MCValue{ .register = reg };
         }
 
-        const optional_abi_size = @as(u32, @intCast(optional_ty.abiSize(mod)));
+        const optional_abi_size: u32 = @intCast(optional_ty.abiSize(mod));
         const optional_abi_align = optional_ty.abiAlignment(mod);
-        const offset = @as(u32, @intCast(payload_ty.abiSize(mod)));
+        const offset: u32 = @intCast(payload_ty.abiSize(mod));
 
         const stack_offset = try self.allocMem(optional_abi_size, optional_abi_align, inst);
         try self.genSetStack(payload_ty, stack_offset, operand);
@@ -3376,7 +3372,7 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
 fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
     const ty_op = self.air.instructions.items(.data)[inst].ty_op;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
-        const ptr_bits = self.target.ptrBitWidth();
+        const ptr_bits = 64;
         const ptr_bytes = @divExact(ptr_bits, 8);
         const mcv = try self.resolveInst(ty_op.operand);
         switch (mcv) {
@@ -3400,7 +3396,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
 fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
     const ty_op = self.air.instructions.items(.data)[inst].ty_op;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
-        const ptr_bits = self.target.ptrBitWidth();
+        const ptr_bits = 64;
         const ptr_bytes = @divExact(ptr_bits, 8);
         const mcv = try self.resolveInst(ty_op.operand);
         switch (mcv) {
@@ -4272,8 +4268,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
     if (info.return_value == .stack_offset) {
         log.debug("airCall: return by reference", .{});
         const ret_ty = fn_ty.fnReturnType(mod);
-        const ret_abi_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
-        const ret_abi_align = @as(u32, @intCast(ret_ty.abiAlignment(mod)));
+        const ret_abi_size: u32 = @intCast(ret_ty.abiSize(mod));
+        const ret_abi_align = ret_ty.abiAlignment(mod);
         const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
 
         const ret_ptr_reg = self.registerAlias(.x0, Type.usize);
@@ -5939,11 +5935,8 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
         const ptr = try self.resolveInst(ty_op.operand);
         const array_ty = ptr_ty.childType(mod);
         const array_len = @as(u32, @intCast(array_ty.arrayLen(mod)));
-
-        const ptr_bits = self.target.ptrBitWidth();
-        const ptr_bytes = @divExact(ptr_bits, 8);
-
-        const stack_offset = try self.allocMem(ptr_bytes * 2, ptr_bytes * 2, inst);
+        const ptr_bytes = 8;
+        const stack_offset = try self.allocMem(ptr_bytes * 2, .@"8", inst);
         try self.genSetStack(ptr_ty, stack_offset, ptr);
         try self.genSetStack(Type.usize, stack_offset - ptr_bytes, .{ .immediate = array_len });
         break :result MCValue{ .stack_offset = stack_offset };
@@ -6254,7 +6247,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
 
                 // We round up NCRN only for non-Apple platforms which allow the 16-byte aligned
                 // values to spread across odd-numbered registers.
-                if (ty.toType().abiAlignment(mod) == 16 and !self.target.isDarwin()) {
+                if (ty.toType().abiAlignment(mod) == .@"16" and !self.target.isDarwin()) {
                     // Round up NCRN to the next even number
                     ncrn += ncrn % 2;
                 }
@@ -6272,7 +6265,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
                     ncrn = 8;
                     // TODO Apple allows the arguments on the stack to be non-8-byte aligned provided
                     // that the entire stack space consumed by the arguments is 8-byte aligned.
-                    if (ty.toType().abiAlignment(mod) == 8) {
+                    if (ty.toType().abiAlignment(mod) == .@"8") {
                         if (nsaa % 8 != 0) {
                             nsaa += 8 - (nsaa % 8);
                         }
@@ -6312,10 +6305,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
 
             for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
                 if (ty.toType().abiSize(mod) > 0) {
-                    const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
+                    const param_size: u32 = @intCast(ty.toType().abiSize(mod));
                     const param_alignment = ty.toType().abiAlignment(mod);
 
-                    stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment);
+                    stack_offset = @intCast(param_alignment.forward(stack_offset));
                     result_arg.* = .{ .stack_argument_offset = stack_offset };
                     stack_offset += param_size;
                 } else {
src/arch/arm/abi.zig
@@ -47,7 +47,7 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
                 const field_ty = ty.structFieldType(i, mod);
                 const field_alignment = ty.structFieldAlign(i, mod);
                 const field_size = field_ty.bitSize(mod);
-                if (field_size > 32 or field_alignment > 32) {
+                if (field_size > 32 or field_alignment.compare(.gt, .@"32")) {
                     return Class.arrSize(bit_size, 64);
                 }
             }
@@ -66,7 +66,7 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
 
             for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| {
                 if (field_ty.toType().bitSize(mod) > 32 or
-                    mod.unionFieldNormalAlignment(union_obj, @intCast(field_index)) > 32)
+                    mod.unionFieldNormalAlignment(union_obj, @intCast(field_index)).compare(.gt, .@"32"))
                 {
                     return Class.arrSize(bit_size, 64);
                 }
src/arch/arm/CodeGen.zig
@@ -23,6 +23,7 @@ const DW = std.dwarf;
 const leb128 = std.leb;
 const log = std.log.scoped(.codegen);
 const build_options = @import("build_options");
+const Alignment = InternPool.Alignment;
 
 const Result = codegen.Result;
 const CodeGenError = codegen.CodeGenError;
@@ -508,7 +509,7 @@ fn gen(self: *Self) !void {
             // The address of where to store the return value is in
             // r0. As this register might get overwritten along the
             // way, save the address to the stack.
-            const stack_offset = try self.allocMem(4, 4, null);
+            const stack_offset = try self.allocMem(4, .@"4", null);
 
             try self.genSetStack(Type.usize, stack_offset, MCValue{ .register = .r0 });
             self.ret_mcv = MCValue{ .stack_offset = stack_offset };
@@ -986,14 +987,14 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
 fn allocMem(
     self: *Self,
     abi_size: u32,
-    abi_align: u32,
+    abi_align: Alignment,
     maybe_inst: ?Air.Inst.Index,
 ) !u32 {
     assert(abi_size > 0);
-    assert(abi_align > 0);
+    assert(abi_align != .none);
 
     // TODO find a free slot instead of always appending
-    const offset = mem.alignForward(u32, self.next_stack_offset, abi_align) + abi_size;
+    const offset: u32 = @intCast(abi_align.forward(self.next_stack_offset) + abi_size);
     self.next_stack_offset = offset;
     self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset);
 
@@ -1490,7 +1491,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
         const len = try self.resolveInst(bin_op.rhs);
         const len_ty = self.typeOf(bin_op.rhs);
 
-        const stack_offset = try self.allocMem(8, 4, inst);
+        const stack_offset = try self.allocMem(8, .@"4", inst);
         try self.genSetStack(ptr_ty, stack_offset, ptr);
         try self.genSetStack(len_ty, stack_offset - 4, len);
         break :result MCValue{ .stack_offset = stack_offset };
@@ -4251,8 +4252,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
     const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: {
         log.debug("airCall: return by reference", .{});
         const ret_ty = fn_ty.fnReturnType(mod);
-        const ret_abi_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
-        const ret_abi_align = @as(u32, @intCast(ret_ty.abiAlignment(mod)));
+        const ret_abi_size: u32 = @intCast(ret_ty.abiSize(mod));
+        const ret_abi_align = ret_ty.abiAlignment(mod);
         const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
 
         const ptr_ty = try mod.singleMutPtrType(ret_ty);
@@ -5896,7 +5897,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
         const array_ty = ptr_ty.childType(mod);
         const array_len = @as(u32, @intCast(array_ty.arrayLen(mod)));
 
-        const stack_offset = try self.allocMem(8, 8, inst);
+        const stack_offset = try self.allocMem(8, .@"8", inst);
         try self.genSetStack(ptr_ty, stack_offset, ptr);
         try self.genSetStack(Type.usize, stack_offset - 4, .{ .immediate = array_len });
         break :result MCValue{ .stack_offset = stack_offset };
@@ -6201,7 +6202,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
             }
 
             for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
-                if (ty.toType().abiAlignment(mod) == 8)
+                if (ty.toType().abiAlignment(mod) == .@"8")
                     ncrn = std.mem.alignForward(usize, ncrn, 2);
 
                 const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
@@ -6216,7 +6217,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
                     return self.fail("TODO MCValues split between registers and stack", .{});
                 } else {
                     ncrn = 4;
-                    if (ty.toType().abiAlignment(mod) == 8)
+                    if (ty.toType().abiAlignment(mod) == .@"8")
                         nsaa = std.mem.alignForward(u32, nsaa, 8);
 
                     result_arg.* = .{ .stack_argument_offset = nsaa };
@@ -6252,10 +6253,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
 
             for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
                 if (ty.toType().abiSize(mod) > 0) {
-                    const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
+                    const param_size: u32 = @intCast(ty.toType().abiSize(mod));
                     const param_alignment = ty.toType().abiAlignment(mod);
 
-                    stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment);
+                    stack_offset = @intCast(param_alignment.forward(stack_offset));
                     result_arg.* = .{ .stack_argument_offset = stack_offset };
                     stack_offset += param_size;
                 } else {
src/arch/riscv64/CodeGen.zig
@@ -23,6 +23,7 @@ const leb128 = std.leb;
 const log = std.log.scoped(.codegen);
 const build_options = @import("build_options");
 const codegen = @import("../../codegen.zig");
+const Alignment = InternPool.Alignment;
 
 const CodeGenError = codegen.CodeGenError;
 const Result = codegen.Result;
@@ -53,7 +54,7 @@ ret_mcv: MCValue,
 fn_type: Type,
 arg_index: usize,
 src_loc: Module.SrcLoc,
-stack_align: u32,
+stack_align: Alignment,
 
 /// MIR Instructions
 mir_instructions: std.MultiArrayList(Mir.Inst) = .{},
@@ -788,11 +789,10 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
     try table.ensureUnusedCapacity(self.gpa, additional_count);
 }
 
-fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 {
-    if (abi_align > self.stack_align)
-        self.stack_align = abi_align;
+fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: Alignment) !u32 {
+    self.stack_align = self.stack_align.max(abi_align);
     // TODO find a free slot instead of always appending
-    const offset = mem.alignForward(u32, self.next_stack_offset, abi_align);
+    const offset: u32 = @intCast(abi_align.forward(self.next_stack_offset));
     self.next_stack_offset = offset + abi_size;
     if (self.next_stack_offset > self.max_end_stack)
         self.max_end_stack = self.next_stack_offset;
@@ -822,8 +822,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
         return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
     };
     const abi_align = elem_ty.abiAlignment(mod);
-    if (abi_align > self.stack_align)
-        self.stack_align = abi_align;
+    self.stack_align = self.stack_align.max(abi_align);
 
     if (reg_ok) {
         // Make sure the type can fit in a register before we try to allocate one.
@@ -2602,7 +2601,7 @@ const CallMCValues = struct {
     args: []MCValue,
     return_value: MCValue,
     stack_byte_count: u32,
-    stack_align: u32,
+    stack_align: Alignment,
 
     fn deinit(self: *CallMCValues, func: *Self) void {
         func.gpa.free(self.args);
@@ -2632,7 +2631,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
             assert(result.args.len == 0);
             result.return_value = .{ .unreach = {} };
             result.stack_byte_count = 0;
-            result.stack_align = 1;
+            result.stack_align = .@"1";
             return result;
         },
         .Unspecified, .C => {
@@ -2671,7 +2670,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
             }
 
             result.stack_byte_count = next_stack_offset;
-            result.stack_align = 16;
+            result.stack_align = .@"16";
         },
         else => return self.fail("TODO implement function parameters for {} on riscv64", .{cc}),
     }
src/arch/sparc64/CodeGen.zig
@@ -24,6 +24,7 @@ const CodeGenError = codegen.CodeGenError;
 const Result = @import("../../codegen.zig").Result;
 const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
 const Endian = std.builtin.Endian;
+const Alignment = InternPool.Alignment;
 
 const build_options = @import("build_options");
 
@@ -62,7 +63,7 @@ ret_mcv: MCValue,
 fn_type: Type,
 arg_index: usize,
 src_loc: Module.SrcLoc,
-stack_align: u32,
+stack_align: Alignment,
 
 /// MIR Instructions
 mir_instructions: std.MultiArrayList(Mir.Inst) = .{},
@@ -227,7 +228,7 @@ const CallMCValues = struct {
     args: []MCValue,
     return_value: MCValue,
     stack_byte_count: u32,
-    stack_align: u32,
+    stack_align: Alignment,
 
     fn deinit(self: *CallMCValues, func: *Self) void {
         func.gpa.free(self.args);
@@ -424,7 +425,7 @@ fn gen(self: *Self) !void {
 
         // Backpatch stack offset
         const total_stack_size = self.max_end_stack + abi.stack_reserved_area;
-        const stack_size = mem.alignForward(u32, total_stack_size, self.stack_align);
+        const stack_size = self.stack_align.forward(total_stack_size);
         if (math.cast(i13, stack_size)) |size| {
             self.mir_instructions.set(save_inst, .{
                 .tag = .save,
@@ -880,11 +881,8 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
         const ptr = try self.resolveInst(ty_op.operand);
         const array_ty = ptr_ty.childType(mod);
         const array_len = @as(u32, @intCast(array_ty.arrayLen(mod)));
-
-        const ptr_bits = self.target.ptrBitWidth();
-        const ptr_bytes = @divExact(ptr_bits, 8);
-
-        const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2);
+        const ptr_bytes = 8;
+        const stack_offset = try self.allocMem(inst, ptr_bytes * 2, .@"8");
         try self.genSetStack(ptr_ty, stack_offset, ptr);
         try self.genSetStack(Type.usize, stack_offset - ptr_bytes, .{ .immediate = array_len });
         break :result MCValue{ .stack_offset = stack_offset };
@@ -2438,11 +2436,8 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
         const ptr_ty = self.typeOf(bin_op.lhs);
         const len = try self.resolveInst(bin_op.rhs);
         const len_ty = self.typeOf(bin_op.rhs);
-
-        const ptr_bits = self.target.ptrBitWidth();
-        const ptr_bytes = @divExact(ptr_bits, 8);
-
-        const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2);
+        const ptr_bytes = 8;
+        const stack_offset = try self.allocMem(inst, ptr_bytes * 2, .@"8");
         try self.genSetStack(ptr_ty, stack_offset, ptr);
         try self.genSetStack(len_ty, stack_offset - ptr_bytes, len);
         break :result MCValue{ .stack_offset = stack_offset };
@@ -2782,11 +2777,10 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
     return result_index;
 }
 
-fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 {
-    if (abi_align > self.stack_align)
-        self.stack_align = abi_align;
+fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: Alignment) !u32 {
+    self.stack_align = self.stack_align.max(abi_align);
     // TODO find a free slot instead of always appending
-    const offset = mem.alignForward(u32, self.next_stack_offset, abi_align) + abi_size;
+    const offset: u32 = @intCast(abi_align.forward(self.next_stack_offset) + abi_size);
     self.next_stack_offset = offset;
     if (self.next_stack_offset > self.max_end_stack)
         self.max_end_stack = self.next_stack_offset;
@@ -2825,8 +2819,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
         return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
     };
     const abi_align = elem_ty.abiAlignment(mod);
-    if (abi_align > self.stack_align)
-        self.stack_align = abi_align;
+    self.stack_align = self.stack_align.max(abi_align);
 
     if (reg_ok) {
         // Make sure the type can fit in a register before we try to allocate one.
@@ -4479,7 +4472,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
             assert(result.args.len == 0);
             result.return_value = .{ .unreach = {} };
             result.stack_byte_count = 0;
-            result.stack_align = 1;
+            result.stack_align = .@"1";
             return result;
         },
         .Unspecified, .C => {
@@ -4521,7 +4514,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
             }
 
             result.stack_byte_count = next_stack_offset;
-            result.stack_align = 16;
+            result.stack_align = .@"16";
 
             if (ret_ty.zigTypeTag(mod) == .NoReturn) {
                 result.return_value = .{ .unreach = {} };
src/arch/wasm/abi.zig
@@ -32,16 +32,17 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class {
                 if (ty.bitSize(mod) <= 64) return direct;
                 return .{ .direct, .direct };
             }
-            // When the struct type is non-scalar
-            if (ty.structFieldCount(mod) > 1) return memory;
-            // When the struct's alignment is non-natural
-            const field = ty.structFields(mod).values()[0];
-            if (field.abi_align != .none) {
-                if (field.abi_align.toByteUnitsOptional().? > field.ty.abiAlignment(mod)) {
-                    return memory;
-                }
+            if (ty.structFieldCount(mod) > 1) {
+                // The struct type is non-scalar.
+                return memory;
+            }
+            const field_ty = ty.structFieldType(0, mod);
+            const resolved_align = ty.structFieldAlign(0, mod);
+            if (resolved_align.compare(.gt, field_ty.abiAlignment(mod))) {
+                // The struct's alignment is greater than natural alignment.
+                return memory;
             }
-            return classifyType(field.ty, mod);
+            return classifyType(field_ty, mod);
         },
         .Int, .Enum, .ErrorSet, .Vector => {
             const int_bits = ty.intInfo(mod).bits;
@@ -101,15 +102,11 @@ pub fn scalarType(ty: Type, mod: *Module) Type {
     const ip = &mod.intern_pool;
     switch (ty.zigTypeTag(mod)) {
         .Struct => {
-            switch (ty.containerLayout(mod)) {
-                .Packed => {
-                    const struct_obj = mod.typeToStruct(ty).?;
-                    return scalarType(struct_obj.backing_int_ty, mod);
-                },
-                else => {
-                    assert(ty.structFieldCount(mod) == 1);
-                    return scalarType(ty.structFieldType(0, mod), mod);
-                },
+            if (mod.typeToPackedStruct(ty)) |packed_struct| {
+                return scalarType(packed_struct.backingIntType(ip).toType(), mod);
+            } else {
+                assert(ty.structFieldCount(mod) == 1);
+                return scalarType(ty.structFieldType(0, mod), mod);
             }
         },
         .Union => {
src/arch/wasm/CodeGen.zig
@@ -25,6 +25,7 @@ const target_util = @import("../../target.zig");
 const Mir = @import("Mir.zig");
 const Emit = @import("Emit.zig");
 const abi = @import("abi.zig");
+const Alignment = InternPool.Alignment;
 const errUnionPayloadOffset = codegen.errUnionPayloadOffset;
 const errUnionErrorOffset = codegen.errUnionErrorOffset;
 
@@ -709,7 +710,7 @@ stack_size: u32 = 0,
 /// tool-conventions: https://github.com/WebAssembly/tool-conventions/blob/main/BasicCABI.md
 /// and also what the llvm backend will emit.
 /// However, local variables or the usage of `@setAlignStack` can overwrite this default.
-stack_alignment: u32 = 16,
+stack_alignment: Alignment = .@"16",
 
 // For each individual Wasm valtype we store a seperate free list which
 // allows us to re-use locals that are no longer used. e.g. a temporary local.
@@ -991,6 +992,7 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32
 /// Using a given `Type`, returns the corresponding type
 fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype {
     const target = mod.getTarget();
+    const ip = &mod.intern_pool;
     return switch (ty.zigTypeTag(mod)) {
         .Float => switch (ty.floatBits(target)) {
             16 => wasm.Valtype.i32, // stored/loaded as u16
@@ -1005,12 +1007,12 @@ fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype {
             if (info.bits > 32 and info.bits <= 128) break :blk wasm.Valtype.i64;
             break :blk wasm.Valtype.i32; // represented as pointer to stack
         },
-        .Struct => switch (ty.containerLayout(mod)) {
-            .Packed => {
-                const struct_obj = mod.typeToStruct(ty).?;
-                return typeToValtype(struct_obj.backing_int_ty, mod);
-            },
-            else => wasm.Valtype.i32,
+        .Struct => {
+            if (mod.typeToPackedStruct(ty)) |packed_struct| {
+                return typeToValtype(packed_struct.backingIntType(ip).toType(), mod);
+            } else {
+                return wasm.Valtype.i32;
+            }
         },
         .Vector => switch (determineSimdStoreStrategy(ty, mod)) {
             .direct => wasm.Valtype.v128,
@@ -1285,12 +1287,12 @@ fn genFunc(func: *CodeGen) InnerError!void {
         // store stack pointer so we can restore it when we return from the function
         try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.initial_stack_value.local.value } });
         // get the total stack size
-        const aligned_stack = std.mem.alignForward(u32, func.stack_size, func.stack_alignment);
-        try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(aligned_stack)) } });
-        // substract it from the current stack pointer
+        const aligned_stack = func.stack_alignment.forward(func.stack_size);
+        try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(aligned_stack) } });
+        // subtract it from the current stack pointer
         try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } });
         // Get negative stack aligment
-        try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(func.stack_alignment)) * -1 } });
+        try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(func.stack_alignment.toByteUnitsOptional().?)) * -1 } });
         // Bitwise-and the value to get the new stack pointer to ensure the pointers are aligned with the abi alignment
         try prologue.append(.{ .tag = .i32_and, .data = .{ .tag = {} } });
         // store the current stack pointer as the bottom, which will be used to calculate all stack pointer offsets
@@ -1438,7 +1440,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
                 });
                 try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
                     .offset = value.offset(),
-                    .alignment = scalar_type.abiAlignment(mod),
+                    .alignment = @intCast(scalar_type.abiAlignment(mod).toByteUnitsOptional().?),
                 });
             }
         },
@@ -1527,11 +1529,9 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue {
     };
     const abi_align = ty.abiAlignment(mod);
 
-    if (abi_align > func.stack_alignment) {
-        func.stack_alignment = abi_align;
-    }
+    func.stack_alignment = func.stack_alignment.max(abi_align);
 
-    const offset = std.mem.alignForward(u32, func.stack_size, abi_align);
+    const offset: u32 = @intCast(abi_align.forward(func.stack_size));
     defer func.stack_size = offset + abi_size;
 
     return WValue{ .stack_offset = .{ .value = offset, .references = 1 } };
@@ -1560,11 +1560,9 @@ fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue {
             pointee_ty.fmt(mod), pointee_ty.abiSize(mod),
         });
     };
-    if (abi_alignment > func.stack_alignment) {
-        func.stack_alignment = abi_alignment;
-    }
+    func.stack_alignment = func.stack_alignment.max(abi_alignment);
 
-    const offset = std.mem.alignForward(u32, func.stack_size, abi_alignment);
+    const offset: u32 = @intCast(abi_alignment.forward(func.stack_size));
     defer func.stack_size = offset + abi_size;
 
     return WValue{ .stack_offset = .{ .value = offset, .references = 1 } };
@@ -1749,10 +1747,8 @@ fn isByRef(ty: Type, mod: *Module) bool {
             return ty.hasRuntimeBitsIgnoreComptime(mod);
         },
         .Struct => {
-            if (mod.typeToStruct(ty)) |struct_obj| {
-                if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) {
-                    return isByRef(struct_obj.backing_int_ty, mod);
-                }
+            if (mod.typeToPackedStruct(ty)) |packed_struct| {
+                return isByRef(packed_struct.backingIntType(ip).toType(), mod);
             }
             return ty.hasRuntimeBitsIgnoreComptime(mod);
         },
@@ -2120,7 +2116,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 });
                 try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
                     .offset = operand.offset(),
-                    .alignment = scalar_type.abiAlignment(mod),
+                    .alignment = @intCast(scalar_type.abiAlignment(mod).toByteUnitsOptional().?),
                 });
             },
             else => try func.emitWValue(operand),
@@ -2385,19 +2381,19 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
         },
         .Vector => switch (determineSimdStoreStrategy(ty, mod)) {
             .unrolled => {
-                const len = @as(u32, @intCast(abi_size));
+                const len: u32 = @intCast(abi_size);
                 return func.memcpy(lhs, rhs, .{ .imm32 = len });
             },
             .direct => {
                 try func.emitWValue(lhs);
                 try func.lowerToStack(rhs);
                 // TODO: Add helper functions for simd opcodes
-                const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
+                const extra_index: u32 = @intCast(func.mir_extra.items.len);
                 // stores as := opcode, offset, alignment (opcode::memarg)
                 try func.mir_extra.appendSlice(func.gpa, &[_]u32{
                     std.wasm.simdOpcode(.v128_store),
                     offset + lhs.offset(),
-                    ty.abiAlignment(mod),
+                    @intCast(ty.abiAlignment(mod).toByteUnits(0)),
                 });
                 return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
             },
@@ -2451,7 +2447,10 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
     // store rhs value at stack pointer's location in memory
     try func.addMemArg(
         Mir.Inst.Tag.fromOpcode(opcode),
-        .{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(mod) },
+        .{
+            .offset = offset + lhs.offset(),
+            .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
+        },
     );
 }
 
@@ -2510,7 +2509,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
         try func.mir_extra.appendSlice(func.gpa, &[_]u32{
             std.wasm.simdOpcode(.v128_load),
             offset + operand.offset(),
-            ty.abiAlignment(mod),
+            @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
         });
         try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
         return WValue{ .stack = {} };
@@ -2526,7 +2525,10 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
 
     try func.addMemArg(
         Mir.Inst.Tag.fromOpcode(opcode),
-        .{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(mod) },
+        .{
+            .offset = offset + operand.offset(),
+            .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
+        },
     );
 
     return WValue{ .stack = {} };
@@ -3023,10 +3025,10 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
                     else => blk: {
                         const layout: Module.UnionLayout = parent_ty.unionGetLayout(mod);
                         if (layout.payload_size == 0) break :blk 0;
-                        if (layout.payload_align > layout.tag_align) break :blk 0;
+                        if (layout.payload_align.compare(.gt, layout.tag_align)) break :blk 0;
 
                         // tag is stored first so calculate offset from where payload starts
-                        break :blk @as(u32, @intCast(std.mem.alignForward(u64, layout.tag_size, layout.tag_align)));
+                        break :blk layout.tag_align.forward(layout.tag_size);
                     },
                 },
                 .Pointer => switch (parent_ty.ptrSize(mod)) {
@@ -3103,8 +3105,12 @@ fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo(
     return @as(WantedT, @intCast(result));
 }
 
+/// This function is intended to assert that `isByRef` returns `false` for `ty`.
+/// However such an assertion fails on the behavior tests currently.
 fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
     const mod = func.bin_file.base.options.module.?;
+    // TODO: enable this assertion
+    //assert(!isByRef(ty, mod));
     const ip = &mod.intern_pool;
     var val = arg_val;
     switch (ip.indexToKey(val.ip_index)) {
@@ -3235,16 +3241,18 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
                 val.writeToMemory(ty, mod, &buf) catch unreachable;
                 return func.storeSimdImmd(buf);
             },
-            .struct_type, .anon_struct_type => {
-                const struct_obj = mod.typeToStruct(ty).?;
-                assert(struct_obj.layout == .Packed);
+            .struct_type => |struct_type| {
+                // non-packed structs are not handled in this function because they
+                // are by-ref types.
+                assert(struct_type.layout == .Packed);
                 var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
-                val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable;
+                val.writeToPackedMemory(ty, mod, &buf, 0) catch unreachable;
+                const backing_int_ty = struct_type.backingIntType(ip).toType();
                 const int_val = try mod.intValue(
-                    struct_obj.backing_int_ty,
-                    std.mem.readIntLittle(u64, &buf),
+                    backing_int_ty,
+                    mem.readIntLittle(u64, &buf),
                 );
-                return func.lowerConstant(int_val, struct_obj.backing_int_ty);
+                return func.lowerConstant(int_val, backing_int_ty);
             },
             else => unreachable,
         },
@@ -3269,6 +3277,7 @@ fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue {
 
 fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
     const mod = func.bin_file.base.options.module.?;
+    const ip = &mod.intern_pool;
     switch (ty.zigTypeTag(mod)) {
         .Bool, .ErrorSet => return WValue{ .imm32 = 0xaaaaaaaa },
         .Int, .Enum => switch (ty.intInfo(mod).bits) {
@@ -3298,9 +3307,8 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
             return WValue{ .imm32 = 0xaaaaaaaa };
         },
         .Struct => {
-            const struct_obj = mod.typeToStruct(ty).?;
-            assert(struct_obj.layout == .Packed);
-            return func.emitUndefined(struct_obj.backing_int_ty);
+            const packed_struct = mod.typeToPackedStruct(ty).?;
+            return func.emitUndefined(packed_struct.backingIntType(ip).toType());
         },
         else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(mod)}),
     }
@@ -3340,7 +3348,7 @@ fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Module) i32 {
         .i64 => |x| @as(i32, @intCast(x)),
         .u64 => |x| @as(i32, @bitCast(@as(u32, @intCast(x)))),
         .big_int => unreachable,
-        .lazy_align => |ty| @as(i32, @bitCast(ty.toType().abiAlignment(mod))),
+        .lazy_align => |ty| @as(i32, @bitCast(@as(u32, @intCast(ty.toType().abiAlignment(mod).toByteUnits(0))))),
         .lazy_size => |ty| @as(i32, @bitCast(@as(u32, @intCast(ty.toType().abiSize(mod))))),
     };
 }
@@ -3757,6 +3765,7 @@ fn structFieldPtr(
 
 fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const mod = func.bin_file.base.options.module.?;
+    const ip = &mod.intern_pool;
     const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
     const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data;
 
@@ -3769,9 +3778,9 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const result = switch (struct_ty.containerLayout(mod)) {
         .Packed => switch (struct_ty.zigTypeTag(mod)) {
             .Struct => result: {
-                const struct_obj = mod.typeToStruct(struct_ty).?;
-                const offset = struct_obj.packedFieldBitOffset(mod, field_index);
-                const backing_ty = struct_obj.backing_int_ty;
+                const packed_struct = mod.typeToPackedStruct(struct_ty).?;
+                const offset = mod.structPackedFieldBitOffset(packed_struct, field_index);
+                const backing_ty = packed_struct.backingIntType(ip).toType();
                 const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse {
                     return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{});
                 };
@@ -3793,7 +3802,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                     const truncated = try func.trunc(shifted_value, int_type, backing_ty);
                     const bitcasted = try func.bitcast(field_ty, int_type, truncated);
                     break :result try bitcasted.toLocal(func, field_ty);
-                } else if (field_ty.isPtrAtRuntime(mod) and struct_obj.fields.count() == 1) {
+                } else if (field_ty.isPtrAtRuntime(mod) and packed_struct.field_types.len == 1) {
                     // In this case we do not have to perform any transformations,
                     // we can simply reuse the operand.
                     break :result func.reuseOperand(struct_field.struct_operand, operand);
@@ -4053,7 +4062,7 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
         if (pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
             try func.addMemArg(.i32_load16_u, .{
                 .offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))),
-                .alignment = Type.anyerror.abiAlignment(mod),
+                .alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnitsOptional().?),
             });
         }
 
@@ -4141,7 +4150,10 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void
         try func.emitWValue(err_union);
         try func.addImm32(0);
         const err_val_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod)));
-        try func.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 });
+        try func.addMemArg(.i32_store16, .{
+            .offset = err_union.offset() + err_val_offset,
+            .alignment = 2,
+        });
         break :result err_union;
     };
     func.finishAir(inst, result, &.{ty_op.operand});
@@ -4977,7 +4989,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 try func.mir_extra.appendSlice(func.gpa, &[_]u32{
                     opcode,
                     operand.offset(),
-                    elem_ty.abiAlignment(mod),
+                    @intCast(elem_ty.abiAlignment(mod).toByteUnitsOptional().?),
                 });
                 try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
                 try func.addLabel(.local_set, result.local.value);
@@ -5065,7 +5077,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             std.wasm.simdOpcode(.i8x16_shuffle),
         } ++ [1]u32{undefined} ** 4;
 
-        var lanes = std.mem.asBytes(operands[1..]);
+        var lanes = mem.asBytes(operands[1..]);
         for (0..@as(usize, @intCast(mask_len))) |index| {
             const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod);
             const base_index = if (mask_elem >= 0)
@@ -5099,6 +5111,7 @@ fn airReduce(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
 fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const mod = func.bin_file.base.options.module.?;
+    const ip = &mod.intern_pool;
     const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
     const result_ty = func.typeOfIndex(inst);
     const len = @as(usize, @intCast(result_ty.arrayLen(mod)));
@@ -5150,13 +5163,13 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                     if (isByRef(result_ty, mod)) {
                         return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{});
                     }
-                    const struct_obj = mod.typeToStruct(result_ty).?;
-                    const fields = struct_obj.fields.values();
-                    const backing_type = struct_obj.backing_int_ty;
+                    const packed_struct = mod.typeToPackedStruct(result_ty).?;
+                    const field_types = packed_struct.field_types;
+                    const backing_type = packed_struct.backingIntType(ip).toType();
 
                     // ensure the result is zero'd
                     const result = try func.allocLocal(backing_type);
-                    if (struct_obj.backing_int_ty.bitSize(mod) <= 32)
+                    if (backing_type.bitSize(mod) <= 32)
                         try func.addImm32(0)
                     else
                         try func.addImm64(0);
@@ -5164,22 +5177,22 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
                     var current_bit: u16 = 0;
                     for (elements, 0..) |elem, elem_index| {
-                        const field = fields[elem_index];
-                        if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+                        const field_ty = field_types.get(ip)[elem_index].toType();
+                        if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
 
-                        const shift_val = if (struct_obj.backing_int_ty.bitSize(mod) <= 32)
+                        const shift_val = if (backing_type.bitSize(mod) <= 32)
                             WValue{ .imm32 = current_bit }
                         else
                             WValue{ .imm64 = current_bit };
 
                         const value = try func.resolveInst(elem);
-                        const value_bit_size = @as(u16, @intCast(field.ty.bitSize(mod)));
+                        const value_bit_size: u16 = @intCast(field_ty.bitSize(mod));
                         const int_ty = try mod.intType(.unsigned, value_bit_size);
 
                         // load our current result on stack so we can perform all transformations
                         // using only stack values. Saving the cost of loads and stores.
                         try func.emitWValue(result);
-                        const bitcasted = try func.bitcast(int_ty, field.ty, value);
+                        const bitcasted = try func.bitcast(int_ty, field_ty, value);
                         const extended_val = try func.intcast(bitcasted, int_ty, backing_type);
                         // no need to shift any values when the current offset is 0
                         const shifted = if (current_bit != 0) shifted: {
@@ -5199,7 +5212,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                         if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue;
 
                         const elem_ty = result_ty.structFieldType(elem_index, mod);
-                        const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
+                        const elem_size: u32 = @intCast(elem_ty.abiSize(mod));
                         const value = try func.resolveInst(elem);
                         try func.store(offset, value, elem_ty, 0);
 
@@ -5256,7 +5269,7 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         if (isByRef(union_ty, mod)) {
             const result_ptr = try func.allocStack(union_ty);
             const payload = try func.resolveInst(extra.init);
-            if (layout.tag_align >= layout.payload_align) {
+            if (layout.tag_align.compare(.gte, layout.payload_align)) {
                 if (isByRef(field_ty, mod)) {
                     const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new);
                     try func.store(payload_ptr, payload, field_ty, 0);
@@ -5420,9 +5433,9 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
     // when the tag alignment is smaller than the payload, the field will be stored
     // after the payload.
-    const offset = if (layout.tag_align < layout.payload_align) blk: {
-        break :blk @as(u32, @intCast(layout.payload_size));
-    } else @as(u32, 0);
+    const offset: u32 = if (layout.tag_align.compare(.lt, layout.payload_align)) blk: {
+        break :blk @intCast(layout.payload_size);
+    } else 0;
     try func.store(union_ptr, new_tag, tag_ty, offset);
     func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 }
@@ -5439,9 +5452,9 @@ fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const operand = try func.resolveInst(ty_op.operand);
     // when the tag alignment is smaller than the payload, the field will be stored
     // after the payload.
-    const offset = if (layout.tag_align < layout.payload_align) blk: {
-        break :blk @as(u32, @intCast(layout.payload_size));
-    } else @as(u32, 0);
+    const offset: u32 = if (layout.tag_align.compare(.lt, layout.payload_align)) blk: {
+        break :blk @intCast(layout.payload_size);
+    } else 0;
     const tag = try func.load(operand, tag_ty, offset);
     const result = try tag.toLocal(func, tag_ty);
     func.finishAir(inst, result, &.{ty_op.operand});
@@ -6366,7 +6379,7 @@ fn lowerTry(
             const err_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod)));
             try func.addMemArg(.i32_load16_u, .{
                 .offset = err_union.offset() + err_offset,
-                .alignment = Type.anyerror.abiAlignment(mod),
+                .alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnitsOptional().?),
             });
         }
         try func.addTag(.i32_eqz);
@@ -7287,7 +7300,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}),
         }, .{
             .offset = ptr_operand.offset(),
-            .alignment = ty.abiAlignment(mod),
+            .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
         });
         try func.addLabel(.local_tee, val_local.local.value);
         _ = try func.cmp(.stack, expected_val, ty, .eq);
@@ -7349,7 +7362,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         try func.emitWValue(ptr);
         try func.addAtomicMemArg(tag, .{
             .offset = ptr.offset(),
-            .alignment = ty.abiAlignment(mod),
+            .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
         });
     } else {
         _ = try func.load(ptr, ty, 0);
@@ -7410,7 +7423,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                     },
                     .{
                         .offset = ptr.offset(),
-                        .alignment = ty.abiAlignment(mod),
+                        .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
                     },
                 );
                 const select_res = try func.allocLocal(ty);
@@ -7470,7 +7483,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 };
                 try func.addAtomicMemArg(tag, .{
                     .offset = ptr.offset(),
-                    .alignment = ty.abiAlignment(mod),
+                    .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
                 });
                 const result = try WValue.toLocal(.stack, func, ty);
                 return func.finishAir(inst, result, &.{ pl_op.operand, extra.operand });
@@ -7566,7 +7579,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         try func.lowerToStack(operand);
         try func.addAtomicMemArg(tag, .{
             .offset = ptr.offset(),
-            .alignment = ty.abiAlignment(mod),
+            .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
         });
     } else {
         try func.store(ptr, operand, ty, 0);
src/arch/x86_64/abi.zig
@@ -210,8 +210,9 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
             // it contains unaligned fields, it has class MEMORY"
             // "If the size of the aggregate exceeds a single eightbyte, each is classified
             // separately.".
+            const struct_type = mod.typeToStruct(ty).?;
             const ty_size = ty.abiSize(mod);
-            if (ty.containerLayout(mod) == .Packed) {
+            if (struct_type.layout == .Packed) {
                 assert(ty_size <= 128);
                 result[0] = .integer;
                 if (ty_size > 64) result[1] = .integer;
@@ -222,15 +223,13 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
 
             var result_i: usize = 0; // out of 8
             var byte_i: usize = 0; // out of 8
-            const fields = ty.structFields(mod);
-            for (fields.values()) |field| {
-                if (field.abi_align != .none) {
-                    if (field.abi_align.toByteUnitsOptional().? < field.ty.abiAlignment(mod)) {
-                        return memory_class;
-                    }
-                }
-                const field_size = field.ty.abiSize(mod);
-                const field_class_array = classifySystemV(field.ty, mod, .other);
+            for (struct_type.field_types.get(ip), 0..) |field_ty_ip, i| {
+                const field_ty = field_ty_ip.toType();
+                const field_align = struct_type.fieldAlign(ip, i);
+                if (field_align != .none and field_align.compare(.lt, field_ty.abiAlignment(mod)))
+                    return memory_class;
+                const field_size = field_ty.abiSize(mod);
+                const field_class_array = classifySystemV(field_ty, mod, .other);
                 const field_class = std.mem.sliceTo(&field_class_array, .none);
                 if (byte_i + field_size <= 8) {
                     // Combine this field with the previous one.
@@ -341,10 +340,11 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
                 return memory_class;
 
             for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| {
-                if (union_obj.fieldAlign(ip, @intCast(field_index)).toByteUnitsOptional()) |a| {
-                    if (a < field_ty.toType().abiAlignment(mod)) {
-                        return memory_class;
-                    }
+                const field_align = union_obj.fieldAlign(ip, @intCast(field_index));
+                if (field_align != .none and
+                    field_align.compare(.lt, field_ty.toType().abiAlignment(mod)))
+                {
+                    return memory_class;
                 }
                 // Combine this field with the previous one.
                 const field_class = classifySystemV(field_ty.toType(), mod, .other);
@@ -533,13 +533,3 @@ const Register = @import("bits.zig").Register;
 const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
 const Type = @import("../../type.zig").Type;
 const Value = @import("../../value.zig").Value;
-
-fn _field(comptime tag: Type.Tag, offset: u32) Module.Struct.Field {
-    return .{
-        .ty = Type.initTag(tag),
-        .default_val = Value.initTag(.unreachable_value),
-        .abi_align = 0,
-        .offset = offset,
-        .is_comptime = false,
-    };
-}
src/arch/x86_64/CodeGen.zig
@@ -27,6 +27,7 @@ const Lower = @import("Lower.zig");
 const Mir = @import("Mir.zig");
 const Module = @import("../../Module.zig");
 const InternPool = @import("../../InternPool.zig");
+const Alignment = InternPool.Alignment;
 const Target = std.Target;
 const Type = @import("../../type.zig").Type;
 const TypedValue = @import("../../TypedValue.zig");
@@ -607,19 +608,21 @@ const InstTracking = struct {
 
 const FrameAlloc = struct {
     abi_size: u31,
-    abi_align: u5,
+    abi_align: Alignment,
     ref_count: u16,
 
-    fn init(alloc_abi: struct { size: u64, alignment: u32 }) FrameAlloc {
-        assert(math.isPowerOfTwo(alloc_abi.alignment));
+    fn init(alloc_abi: struct { size: u64, alignment: Alignment }) FrameAlloc {
         return .{
             .abi_size = @intCast(alloc_abi.size),
-            .abi_align = math.log2_int(u32, alloc_abi.alignment),
+            .abi_align = alloc_abi.alignment,
             .ref_count = 0,
         };
     }
     fn initType(ty: Type, mod: *Module) FrameAlloc {
-        return init(.{ .size = ty.abiSize(mod), .alignment = ty.abiAlignment(mod) });
+        return init(.{
+            .size = ty.abiSize(mod),
+            .alignment = ty.abiAlignment(mod),
+        });
     }
 };
 
@@ -702,12 +705,12 @@ pub fn generate(
         @intFromEnum(FrameIndex.stack_frame),
         FrameAlloc.init(.{
             .size = 0,
-            .alignment = @intCast(func.analysis(ip).stack_alignment.toByteUnitsOptional() orelse 1),
+            .alignment = func.analysis(ip).stack_alignment.max(.@"1"),
         }),
     );
     function.frame_allocs.set(
         @intFromEnum(FrameIndex.call_frame),
-        FrameAlloc.init(.{ .size = 0, .alignment = 1 }),
+        FrameAlloc.init(.{ .size = 0, .alignment = .@"1" }),
     );
 
     const fn_info = mod.typeToFunc(fn_type).?;
@@ -729,15 +732,21 @@ pub fn generate(
     function.ret_mcv = call_info.return_value;
     function.frame_allocs.set(@intFromEnum(FrameIndex.ret_addr), FrameAlloc.init(.{
         .size = Type.usize.abiSize(mod),
-        .alignment = @min(Type.usize.abiAlignment(mod), call_info.stack_align),
+        .alignment = Type.usize.abiAlignment(mod).min(call_info.stack_align),
     }));
     function.frame_allocs.set(@intFromEnum(FrameIndex.base_ptr), FrameAlloc.init(.{
         .size = Type.usize.abiSize(mod),
-        .alignment = @min(Type.usize.abiAlignment(mod) * 2, call_info.stack_align),
+        .alignment = Alignment.min(
+            call_info.stack_align,
+            Alignment.fromNonzeroByteUnits(bin_file.options.target.stackAlignment()),
+        ),
     }));
     function.frame_allocs.set(
         @intFromEnum(FrameIndex.args_frame),
-        FrameAlloc.init(.{ .size = call_info.stack_byte_count, .alignment = call_info.stack_align }),
+        FrameAlloc.init(.{
+            .size = call_info.stack_byte_count,
+            .alignment = call_info.stack_align,
+        }),
     );
 
     function.gen() catch |err| switch (err) {
@@ -2156,8 +2165,8 @@ fn setFrameLoc(
 ) void {
     const frame_i = @intFromEnum(frame_index);
     if (aligned) {
-        const alignment = @as(i32, 1) << self.frame_allocs.items(.abi_align)[frame_i];
-        offset.* = mem.alignForward(i32, offset.*, alignment);
+        const alignment = self.frame_allocs.items(.abi_align)[frame_i];
+        offset.* = @intCast(alignment.forward(@intCast(offset.*)));
     }
     self.frame_locs.set(frame_i, .{ .base = base, .disp = offset.* });
     offset.* += self.frame_allocs.items(.abi_size)[frame_i];
@@ -2179,7 +2188,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout {
         const SortContext = struct {
             frame_align: @TypeOf(frame_align),
             pub fn lessThan(context: @This(), lhs: FrameIndex, rhs: FrameIndex) bool {
-                return context.frame_align[@intFromEnum(lhs)] > context.frame_align[@intFromEnum(rhs)];
+                return context.frame_align[@intFromEnum(lhs)].compare(.gt, context.frame_align[@intFromEnum(rhs)]);
             }
         };
         const sort_context = SortContext{ .frame_align = frame_align };
@@ -2189,8 +2198,8 @@ fn computeFrameLayout(self: *Self) !FrameLayout {
     const call_frame_align = frame_align[@intFromEnum(FrameIndex.call_frame)];
     const stack_frame_align = frame_align[@intFromEnum(FrameIndex.stack_frame)];
     const args_frame_align = frame_align[@intFromEnum(FrameIndex.args_frame)];
-    const needed_align = @max(call_frame_align, stack_frame_align);
-    const need_align_stack = needed_align > args_frame_align;
+    const needed_align = call_frame_align.max(stack_frame_align);
+    const need_align_stack = needed_align.compare(.gt, args_frame_align);
 
     // Create list of registers to save in the prologue.
     // TODO handle register classes
@@ -2214,21 +2223,21 @@ fn computeFrameLayout(self: *Self) !FrameLayout {
     self.setFrameLoc(.stack_frame, .rsp, &rsp_offset, true);
     for (stack_frame_order) |frame_index| self.setFrameLoc(frame_index, .rsp, &rsp_offset, true);
     rsp_offset += stack_frame_align_offset;
-    rsp_offset = mem.alignForward(i32, rsp_offset, @as(i32, 1) << needed_align);
+    rsp_offset = @intCast(needed_align.forward(@intCast(rsp_offset)));
     rsp_offset -= stack_frame_align_offset;
     frame_size[@intFromEnum(FrameIndex.call_frame)] =
         @intCast(rsp_offset - frame_offset[@intFromEnum(FrameIndex.stack_frame)]);
 
     return .{
-        .stack_mask = @as(u32, math.maxInt(u32)) << (if (need_align_stack) needed_align else 0),
+        .stack_mask = @as(u32, math.maxInt(u32)) << @intCast(if (need_align_stack) @intFromEnum(needed_align) else 0),
         .stack_adjust = @intCast(rsp_offset - frame_offset[@intFromEnum(FrameIndex.call_frame)]),
         .save_reg_list = save_reg_list,
     };
 }
 
-fn getFrameAddrAlignment(self: *Self, frame_addr: FrameAddr) u32 {
-    const alloc_align = @as(u32, 1) << self.frame_allocs.get(@intFromEnum(frame_addr.index)).abi_align;
-    return @min(alloc_align, @as(u32, @bitCast(frame_addr.off)) & (alloc_align - 1));
+fn getFrameAddrAlignment(self: *Self, frame_addr: FrameAddr) Alignment {
+    const alloc_align = self.frame_allocs.get(@intFromEnum(frame_addr.index)).abi_align;
+    return @enumFromInt(@min(@intFromEnum(alloc_align), @ctz(frame_addr.off)));
 }
 
 fn getFrameAddrSize(self: *Self, frame_addr: FrameAddr) u32 {
@@ -2241,13 +2250,13 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex {
     const frame_align = frame_allocs_slice.items(.abi_align);
 
     const stack_frame_align = &frame_align[@intFromEnum(FrameIndex.stack_frame)];
-    stack_frame_align.* = @max(stack_frame_align.*, alloc.abi_align);
+    stack_frame_align.* = stack_frame_align.max(alloc.abi_align);
 
     for (self.free_frame_indices.keys(), 0..) |frame_index, free_i| {
         const abi_size = frame_size[@intFromEnum(frame_index)];
         if (abi_size != alloc.abi_size) continue;
         const abi_align = &frame_align[@intFromEnum(frame_index)];
-        abi_align.* = @max(abi_align.*, alloc.abi_align);
+        abi_align.* = abi_align.max(alloc.abi_align);
 
         _ = self.free_frame_indices.swapRemoveAt(free_i);
         return frame_index;
@@ -2266,7 +2275,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex {
         .size = math.cast(u32, val_ty.abiSize(mod)) orelse {
             return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(mod)});
         },
-        .alignment = @max(ptr_ty.ptrAlignment(mod), 1),
+        .alignment = ptr_ty.ptrAlignment(mod).max(.@"1"),
     }));
 }
 
@@ -4266,7 +4275,7 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
     };
     defer if (tag_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const adjusted_ptr: MCValue = if (layout.payload_size > 0 and layout.tag_align < layout.payload_align) blk: {
+    const adjusted_ptr: MCValue = if (layout.payload_size > 0 and layout.tag_align.compare(.lt, layout.payload_align)) blk: {
         // TODO reusing the operand
         const reg = try self.copyToTmpRegister(ptr_union_ty, ptr);
         try self.genBinOpMir(
@@ -4309,7 +4318,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
         switch (operand) {
             .load_frame => |frame_addr| {
                 if (tag_abi_size <= 8) {
-                    const off: i32 = if (layout.tag_align < layout.payload_align)
+                    const off: i32 = if (layout.tag_align.compare(.lt, layout.payload_align))
                         @intCast(layout.payload_size)
                     else
                         0;
@@ -4321,7 +4330,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
                 return self.fail("TODO implement get_union_tag for ABI larger than 8 bytes and operand {}", .{operand});
             },
             .register => {
-                const shift: u6 = if (layout.tag_align < layout.payload_align)
+                const shift: u6 = if (layout.tag_align.compare(.lt, layout.payload_align))
                     @intCast(layout.payload_size * 8)
                 else
                     0;
@@ -5600,8 +5609,8 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
         const src_mcv = try self.resolveInst(operand);
         const field_off: u32 = switch (container_ty.containerLayout(mod)) {
             .Auto, .Extern => @intCast(container_ty.structFieldOffset(index, mod) * 8),
-            .Packed => if (mod.typeToStruct(container_ty)) |struct_obj|
-                struct_obj.packedFieldBitOffset(mod, index)
+            .Packed => if (mod.typeToStruct(container_ty)) |struct_type|
+                mod.structPackedFieldBitOffset(struct_type, index)
             else
                 0,
         };
@@ -8084,14 +8093,17 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
     // We need a properly aligned and sized call frame to be able to call this function.
     {
         const needed_call_frame =
-            FrameAlloc.init(.{ .size = info.stack_byte_count, .alignment = info.stack_align });
+            FrameAlloc.init(.{
+            .size = info.stack_byte_count,
+            .alignment = info.stack_align,
+        });
         const frame_allocs_slice = self.frame_allocs.slice();
         const stack_frame_size =
             &frame_allocs_slice.items(.abi_size)[@intFromEnum(FrameIndex.call_frame)];
         stack_frame_size.* = @max(stack_frame_size.*, needed_call_frame.abi_size);
         const stack_frame_align =
             &frame_allocs_slice.items(.abi_align)[@intFromEnum(FrameIndex.call_frame)];
-        stack_frame_align.* = @max(stack_frame_align.*, needed_call_frame.abi_align);
+        stack_frame_align.* = stack_frame_align.max(needed_call_frame.abi_align);
     }
 
     try self.spillEflagsIfOccupied();
@@ -9944,7 +9956,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
                 .indirect => try self.moveStrategy(ty, false),
                 .load_frame => |frame_addr| try self.moveStrategy(
                     ty,
-                    self.getFrameAddrAlignment(frame_addr) >= ty.abiAlignment(mod),
+                    self.getFrameAddrAlignment(frame_addr).compare(.gte, ty.abiAlignment(mod)),
                 ),
                 .lea_frame => .{ .move = .{ ._, .lea } },
                 else => unreachable,
@@ -9973,10 +9985,8 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
                         .base = .{ .reg = .ds },
                         .disp = small_addr,
                     });
-                    switch (try self.moveStrategy(ty, mem.isAlignedGeneric(
-                        u32,
+                    switch (try self.moveStrategy(ty, ty.abiAlignment(mod).check(
                         @as(u32, @bitCast(small_addr)),
-                        ty.abiAlignment(mod),
                     ))) {
                         .move => |tag| try self.asmRegisterMemory(tag, dst_alias, src_mem),
                         .insert_extract => |ie| try self.asmRegisterMemoryImmediate(
@@ -10142,22 +10152,14 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
             );
             const src_alias = registerAlias(src_reg, abi_size);
             switch (try self.moveStrategy(ty, switch (base) {
-                .none => mem.isAlignedGeneric(
-                    u32,
-                    @as(u32, @bitCast(disp)),
-                    ty.abiAlignment(mod),
-                ),
+                .none => ty.abiAlignment(mod).check(@as(u32, @bitCast(disp))),
                 .reg => |reg| switch (reg) {
-                    .es, .cs, .ss, .ds => mem.isAlignedGeneric(
-                        u32,
-                        @as(u32, @bitCast(disp)),
-                        ty.abiAlignment(mod),
-                    ),
+                    .es, .cs, .ss, .ds => ty.abiAlignment(mod).check(@as(u32, @bitCast(disp))),
                     else => false,
                 },
                 .frame => |frame_index| self.getFrameAddrAlignment(
                     .{ .index = frame_index, .off = disp },
-                ) >= ty.abiAlignment(mod),
+                ).compare(.gte, ty.abiAlignment(mod)),
             })) {
                 .move => |tag| try self.asmMemoryRegister(tag, dst_mem, src_alias),
                 .insert_extract, .vex_insert_extract => |ie| try self.asmMemoryRegisterImmediate(
@@ -11079,7 +11081,7 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
         stack_frame_size.* = @max(stack_frame_size.*, needed_call_frame.abi_size);
         const stack_frame_align =
             &frame_allocs_slice.items(.abi_align)[@intFromEnum(FrameIndex.call_frame)];
-        stack_frame_align.* = @max(stack_frame_align.*, needed_call_frame.abi_align);
+        stack_frame_align.* = stack_frame_align.max(needed_call_frame.abi_align);
     }
 
     try self.spillEflagsIfOccupied();
@@ -11418,7 +11420,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
                 const frame_index =
                     try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod));
                 if (result_ty.containerLayout(mod) == .Packed) {
-                    const struct_obj = mod.typeToStruct(result_ty).?;
+                    const struct_type = mod.typeToStruct(result_ty).?;
                     try self.genInlineMemset(
                         .{ .lea_frame = .{ .index = frame_index } },
                         .{ .immediate = 0 },
@@ -11437,7 +11439,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
                         }
                         const elem_abi_size: u32 = @intCast(elem_ty.abiSize(mod));
                         const elem_abi_bits = elem_abi_size * 8;
-                        const elem_off = struct_obj.packedFieldBitOffset(mod, elem_i);
+                        const elem_off = mod.structPackedFieldBitOffset(struct_type, elem_i);
                         const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size);
                         const elem_bit_off = elem_off % elem_abi_bits;
                         const elem_mcv = try self.resolveInst(elem);
@@ -11576,13 +11578,13 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
         const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);
         const tag_int_val = try tag_val.intFromEnum(tag_ty, mod);
         const tag_int = tag_int_val.toUnsignedInt(mod);
-        const tag_off: i32 = if (layout.tag_align < layout.payload_align)
+        const tag_off: i32 = if (layout.tag_align.compare(.lt, layout.payload_align))
             @intCast(layout.payload_size)
         else
             0;
         try self.genCopy(tag_ty, dst_mcv.address().offset(tag_off).deref(), .{ .immediate = tag_int });
 
-        const pl_off: i32 = if (layout.tag_align < layout.payload_align)
+        const pl_off: i32 = if (layout.tag_align.compare(.lt, layout.payload_align))
             0
         else
             @intCast(layout.tag_size);
@@ -11823,7 +11825,7 @@ const CallMCValues = struct {
     args: []MCValue,
     return_value: InstTracking,
     stack_byte_count: u31,
-    stack_align: u31,
+    stack_align: Alignment,
 
     fn deinit(self: *CallMCValues, func: *Self) void {
         func.gpa.free(self.args);
@@ -11867,12 +11869,12 @@ fn resolveCallingConventionValues(
         .Naked => {
             assert(result.args.len == 0);
             result.return_value = InstTracking.init(.unreach);
-            result.stack_align = 8;
+            result.stack_align = .@"8";
         },
         .C => {
             var param_reg_i: usize = 0;
             var param_sse_reg_i: usize = 0;
-            result.stack_align = 16;
+            result.stack_align = .@"16";
 
             switch (self.target.os.tag) {
                 .windows => {
@@ -11957,7 +11959,7 @@ fn resolveCallingConventionValues(
                 }
 
                 const param_size: u31 = @intCast(ty.abiSize(mod));
-                const param_align: u31 = @intCast(ty.abiAlignment(mod));
+                const param_align: u31 = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?);
                 result.stack_byte_count =
                     mem.alignForward(u31, result.stack_byte_count, param_align);
                 arg.* = .{ .load_frame = .{
@@ -11968,7 +11970,7 @@ fn resolveCallingConventionValues(
             }
         },
         .Unspecified => {
-            result.stack_align = 16;
+            result.stack_align = .@"16";
 
             // Return values
             if (ret_ty.zigTypeTag(mod) == .NoReturn) {
@@ -11997,7 +11999,7 @@ fn resolveCallingConventionValues(
                     continue;
                 }
                 const param_size: u31 = @intCast(ty.abiSize(mod));
-                const param_align: u31 = @intCast(ty.abiAlignment(mod));
+                const param_align: u31 = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?);
                 result.stack_byte_count =
                     mem.alignForward(u31, result.stack_byte_count, param_align);
                 arg.* = .{ .load_frame = .{
@@ -12010,7 +12012,7 @@ fn resolveCallingConventionValues(
         else => return self.fail("TODO implement function parameters and return values for {} on x86_64", .{cc}),
     }
 
-    result.stack_byte_count = mem.alignForward(u31, result.stack_byte_count, result.stack_align);
+    result.stack_byte_count = @intCast(result.stack_align.forward(result.stack_byte_count));
     return result;
 }
 
src/codegen/c/type.zig
@@ -283,14 +283,20 @@ pub const CType = extern union {
         @"align": Alignment,
         abi: Alignment,
 
-        pub fn init(alignment: u64, abi_alignment: u32) AlignAs {
-            const @"align" = Alignment.fromByteUnits(alignment);
-            const abi_align = Alignment.fromNonzeroByteUnits(abi_alignment);
+        pub fn init(@"align": Alignment, abi_align: Alignment) AlignAs {
+            assert(abi_align != .none);
             return .{
                 .@"align" = if (@"align" != .none) @"align" else abi_align,
                 .abi = abi_align,
             };
         }
+
+        pub fn initByteUnits(alignment: u64, abi_alignment: u32) AlignAs {
+            return init(
+                Alignment.fromByteUnits(alignment),
+                Alignment.fromNonzeroByteUnits(abi_alignment),
+            );
+        }
         pub fn abiAlign(ty: Type, mod: *Module) AlignAs {
             const abi_align = ty.abiAlignment(mod);
             return init(abi_align, abi_align);
@@ -1360,6 +1366,7 @@ pub const CType = extern union {
 
         pub fn initType(self: *@This(), ty: Type, kind: Kind, lookup: Lookup) !void {
             const mod = lookup.getModule();
+            const ip = &mod.intern_pool;
 
             self.* = undefined;
             if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(mod))
@@ -1382,12 +1389,12 @@ pub const CType = extern union {
                     .array => switch (kind) {
                         .forward, .complete, .global => {
                             const abi_size = ty.abiSize(mod);
-                            const abi_align = ty.abiAlignment(mod);
+                            const abi_align = ty.abiAlignment(mod).toByteUnits(0);
                             self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{
                                 .len = @divExact(abi_size, abi_align),
                                 .elem_type = tagFromIntInfo(.{
                                     .signedness = .unsigned,
-                                    .bits = @as(u16, @intCast(abi_align * 8)),
+                                    .bits = @intCast(abi_align * 8),
                                 }).toIndex(),
                             } } };
                             self.value = .{ .cty = initPayload(&self.storage.seq) };
@@ -1488,10 +1495,10 @@ pub const CType = extern union {
                 },
 
                 .Struct, .Union => |zig_ty_tag| if (ty.containerLayout(mod) == .Packed) {
-                    if (mod.typeToStruct(ty)) |struct_obj| {
-                        try self.initType(struct_obj.backing_int_ty, kind, lookup);
+                    if (mod.typeToPackedStruct(ty)) |packed_struct| {
+                        try self.initType(packed_struct.backingIntType(ip).toType(), kind, lookup);
                     } else {
-                        const bits = @as(u16, @intCast(ty.bitSize(mod)));
+                        const bits: u16 = @intCast(ty.bitSize(mod));
                         const int_ty = try mod.intType(.unsigned, bits);
                         try self.initType(int_ty, kind, lookup);
                     }
@@ -1722,7 +1729,6 @@ pub const CType = extern union {
 
                 .Fn => {
                     const info = mod.typeToFunc(ty).?;
-                    const ip = &mod.intern_pool;
                     if (!info.is_generic) {
                         if (lookup.isMutable()) {
                             const param_kind: Kind = switch (kind) {
src/codegen/c.zig
@@ -17,6 +17,7 @@ const LazySrcLoc = Module.LazySrcLoc;
 const Air = @import("../Air.zig");
 const Liveness = @import("../Liveness.zig");
 const InternPool = @import("../InternPool.zig");
+const Alignment = InternPool.Alignment;
 
 const BigIntLimb = std.math.big.Limb;
 const BigInt = std.math.big.int;
@@ -292,7 +293,7 @@ pub const Function = struct {
 
         const result: CValue = if (lowersToArray(ty, mod)) result: {
             const writer = f.object.code_header.writer();
-            const alignment = 0;
+            const alignment: Alignment = .none;
             const decl_c_value = try f.allocLocalValue(ty, alignment);
             const gpa = f.object.dg.gpa;
             try f.allocs.put(gpa, decl_c_value.new_local, false);
@@ -318,25 +319,25 @@ pub const Function = struct {
     /// Skips the reuse logic. This function should be used for any persistent allocation, i.e.
     /// those which go into `allocs`. This function does not add the resulting local into `allocs`;
     /// that responsibility lies with the caller.
-    fn allocLocalValue(f: *Function, ty: Type, alignment: u32) !CValue {
+    fn allocLocalValue(f: *Function, ty: Type, alignment: Alignment) !CValue {
         const mod = f.object.dg.module;
         const gpa = f.object.dg.gpa;
         try f.locals.append(gpa, .{
             .cty_idx = try f.typeToIndex(ty, .complete),
             .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)),
         });
-        return .{ .new_local = @as(LocalIndex, @intCast(f.locals.items.len - 1)) };
+        return .{ .new_local = @intCast(f.locals.items.len - 1) };
     }
 
     fn allocLocal(f: *Function, inst: Air.Inst.Index, ty: Type) !CValue {
-        const result = try f.allocAlignedLocal(ty, .{}, 0);
+        const result = try f.allocAlignedLocal(ty, .{}, .none);
         log.debug("%{d}: allocating t{d}", .{ inst, result.new_local });
         return result;
     }
 
     /// Only allocates the local; does not print anything. Will attempt to re-use locals, so should
     /// not be used for persistent locals (i.e. those in `allocs`).
-    fn allocAlignedLocal(f: *Function, ty: Type, _: CQualifiers, alignment: u32) !CValue {
+    fn allocAlignedLocal(f: *Function, ty: Type, _: CQualifiers, alignment: Alignment) !CValue {
         const mod = f.object.dg.module;
         if (f.free_locals_map.getPtr(.{
             .cty_idx = try f.typeToIndex(ty, .complete),
@@ -1299,139 +1300,134 @@ pub const DeclGen = struct {
                     }
                     try writer.writeByte('}');
                 },
-                .struct_type => |struct_type| {
-                    const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
-                    switch (struct_obj.layout) {
-                        .Auto, .Extern => {
-                            if (!location.isInitializer()) {
+                .struct_type => |struct_type| switch (struct_type.layout) {
+                    .Auto, .Extern => {
+                        if (!location.isInitializer()) {
+                            try writer.writeByte('(');
+                            try dg.renderType(writer, ty);
+                            try writer.writeByte(')');
+                        }
+
+                        try writer.writeByte('{');
+                        var empty = true;
+                        const field_types = struct_type.field_types.get(ip);
+                        for (struct_type.runtime_order.get(ip)) |runtime_order| {
+                            const field_i = runtime_order.toInt() orelse break;
+                            const field_ty = field_types[field_i];
+
+                            if (!empty) try writer.writeByte(',');
+                            const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
+                                .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
+                                    .ty = field_ty,
+                                    .storage = .{ .u64 = bytes[field_i] },
+                                } }),
+                                .elems => |elems| elems[field_i],
+                                .repeated_elem => |elem| elem,
+                            };
+                            try dg.renderValue(writer, field_ty.toType(), field_val.toValue(), initializer_type);
+
+                            empty = false;
+                        }
+                        try writer.writeByte('}');
+                    },
+                    .Packed => {
+                        const int_info = ty.intInfo(mod);
+
+                        const bits = Type.smallestUnsignedBits(int_info.bits - 1);
+                        const bit_offset_ty = try mod.intType(.unsigned, bits);
+                        const field_types = struct_type.field_types.get(ip);
+
+                        var bit_offset: u64 = 0;
+                        var eff_num_fields: usize = 0;
+
+                        for (field_types) |field_ty| {
+                            if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
+                            eff_num_fields += 1;
+                        }
+
+                        if (eff_num_fields == 0) {
+                            try writer.writeByte('(');
+                            try dg.renderValue(writer, ty, Value.undef, initializer_type);
+                            try writer.writeByte(')');
+                        } else if (ty.bitSize(mod) > 64) {
+                            // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
+                            var num_or = eff_num_fields - 1;
+                            while (num_or > 0) : (num_or -= 1) {
+                                try writer.writeAll("zig_or_");
+                                try dg.renderTypeForBuiltinFnName(writer, ty);
                                 try writer.writeByte('(');
-                                try dg.renderType(writer, ty);
-                                try writer.writeByte(')');
                             }
 
-                            try writer.writeByte('{');
-                            var empty = true;
-                            for (struct_obj.fields.values(), 0..) |field, field_i| {
-                                if (field.is_comptime) continue;
-                                if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+                            var eff_index: usize = 0;
+                            var needs_closing_paren = false;
+                            for (field_types, 0..) |field_ty, field_i| {
+                                if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
 
-                                if (!empty) try writer.writeByte(',');
                                 const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
                                     .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
-                                        .ty = field.ty.toIntern(),
+                                        .ty = field_ty,
                                         .storage = .{ .u64 = bytes[field_i] },
                                     } }),
                                     .elems => |elems| elems[field_i],
                                     .repeated_elem => |elem| elem,
                                 };
-                                try dg.renderValue(writer, field.ty, field_val.toValue(), initializer_type);
-
-                                empty = false;
-                            }
-                            try writer.writeByte('}');
-                        },
-                        .Packed => {
-                            const int_info = ty.intInfo(mod);
-
-                            const bits = Type.smallestUnsignedBits(int_info.bits - 1);
-                            const bit_offset_ty = try mod.intType(.unsigned, bits);
-
-                            var bit_offset: u64 = 0;
-                            var eff_num_fields: usize = 0;
+                                const cast_context = IntCastContext{ .value = .{ .value = field_val.toValue() } };
+                                if (bit_offset != 0) {
+                                    try writer.writeAll("zig_shl_");
+                                    try dg.renderTypeForBuiltinFnName(writer, ty);
+                                    try writer.writeByte('(');
+                                    try dg.renderIntCast(writer, ty, cast_context, field_ty.toType(), .FunctionArgument);
+                                    try writer.writeAll(", ");
+                                    const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
+                                    try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
+                                    try writer.writeByte(')');
+                                } else {
+                                    try dg.renderIntCast(writer, ty, cast_context, field_ty.toType(), .FunctionArgument);
+                                }
 
-                            for (struct_obj.fields.values()) |field| {
-                                if (field.is_comptime) continue;
-                                if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+                                if (needs_closing_paren) try writer.writeByte(')');
+                                if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
 
-                                eff_num_fields += 1;
+                                bit_offset += field_ty.toType().bitSize(mod);
+                                needs_closing_paren = true;
+                                eff_index += 1;
                             }
+                        } else {
+                            try writer.writeByte('(');
+                            // a << a_off | b << b_off | c << c_off
+                            var empty = true;
+                            for (field_types, 0..) |field_ty, field_i| {
+                                if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
 
-                            if (eff_num_fields == 0) {
+                                if (!empty) try writer.writeAll(" | ");
                                 try writer.writeByte('(');
-                                try dg.renderValue(writer, ty, Value.undef, initializer_type);
+                                try dg.renderType(writer, ty);
                                 try writer.writeByte(')');
-                            } else if (ty.bitSize(mod) > 64) {
-                                // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
-                                var num_or = eff_num_fields - 1;
-                                while (num_or > 0) : (num_or -= 1) {
-                                    try writer.writeAll("zig_or_");
-                                    try dg.renderTypeForBuiltinFnName(writer, ty);
-                                    try writer.writeByte('(');
-                                }
 
-                                var eff_index: usize = 0;
-                                var needs_closing_paren = false;
-                                for (struct_obj.fields.values(), 0..) |field, field_i| {
-                                    if (field.is_comptime) continue;
-                                    if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
-
-                                    const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
-                                        .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
-                                            .ty = field.ty.toIntern(),
-                                            .storage = .{ .u64 = bytes[field_i] },
-                                        } }),
-                                        .elems => |elems| elems[field_i],
-                                        .repeated_elem => |elem| elem,
-                                    };
-                                    const cast_context = IntCastContext{ .value = .{ .value = field_val.toValue() } };
-                                    if (bit_offset != 0) {
-                                        try writer.writeAll("zig_shl_");
-                                        try dg.renderTypeForBuiltinFnName(writer, ty);
-                                        try writer.writeByte('(');
-                                        try dg.renderIntCast(writer, ty, cast_context, field.ty, .FunctionArgument);
-                                        try writer.writeAll(", ");
-                                        const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
-                                        try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
-                                        try writer.writeByte(')');
-                                    } else {
-                                        try dg.renderIntCast(writer, ty, cast_context, field.ty, .FunctionArgument);
-                                    }
-
-                                    if (needs_closing_paren) try writer.writeByte(')');
-                                    if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
-
-                                    bit_offset += field.ty.bitSize(mod);
-                                    needs_closing_paren = true;
-                                    eff_index += 1;
-                                }
-                            } else {
-                                try writer.writeByte('(');
-                                // a << a_off | b << b_off | c << c_off
-                                var empty = true;
-                                for (struct_obj.fields.values(), 0..) |field, field_i| {
-                                    if (field.is_comptime) continue;
-                                    if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
-
-                                    if (!empty) try writer.writeAll(" | ");
-                                    try writer.writeByte('(');
-                                    try dg.renderType(writer, ty);
-                                    try writer.writeByte(')');
+                                const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
+                                    .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
+                                        .ty = field_ty,
+                                        .storage = .{ .u64 = bytes[field_i] },
+                                    } }),
+                                    .elems => |elems| elems[field_i],
+                                    .repeated_elem => |elem| elem,
+                                };
 
-                                    const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
-                                        .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
-                                            .ty = field.ty.toIntern(),
-                                            .storage = .{ .u64 = bytes[field_i] },
-                                        } }),
-                                        .elems => |elems| elems[field_i],
-                                        .repeated_elem => |elem| elem,
-                                    };
-
-                                    if (bit_offset != 0) {
-                                        try dg.renderValue(writer, field.ty, field_val.toValue(), .Other);
-                                        try writer.writeAll(" << ");
-                                        const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
-                                        try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
-                                    } else {
-                                        try dg.renderValue(writer, field.ty, field_val.toValue(), .Other);
-                                    }
-
-                                    bit_offset += field.ty.bitSize(mod);
-                                    empty = false;
+                                if (bit_offset != 0) {
+                                    try dg.renderValue(writer, field_ty.toType(), field_val.toValue(), .Other);
+                                    try writer.writeAll(" << ");
+                                    const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
+                                    try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
+                                } else {
+                                    try dg.renderValue(writer, field_ty.toType(), field_val.toValue(), .Other);
                                 }
-                                try writer.writeByte(')');
+
+                                bit_offset += field_ty.toType().bitSize(mod);
+                                empty = false;
                             }
-                        },
-                    }
+                            try writer.writeByte(')');
+                        }
+                    },
                 },
                 else => unreachable,
             },
@@ -1723,7 +1719,7 @@ pub const DeclGen = struct {
         ty: Type,
         name: CValue,
         qualifiers: CQualifiers,
-        alignment: u64,
+        alignment: Alignment,
         kind: CType.Kind,
     ) error{ OutOfMemory, AnalysisFail }!void {
         const mod = dg.module;
@@ -1854,7 +1850,7 @@ pub const DeclGen = struct {
             decl.ty,
             .{ .decl = decl_index },
             CQualifiers.init(.{ .@"const" = variable.is_const }),
-            @as(u32, @intCast(decl.alignment.toByteUnits(0))),
+            decl.alignment,
             .complete,
         );
         try fwd_decl_writer.writeAll(";\n");
@@ -2460,7 +2456,7 @@ pub fn genErrDecls(o: *Object) !void {
         } });
 
         try writer.writeAll("static ");
-        try o.dg.renderTypeAndName(writer, name_ty, .{ .identifier = identifier }, Const, 0, .complete);
+        try o.dg.renderTypeAndName(writer, name_ty, .{ .identifier = identifier }, Const, .none, .complete);
         try writer.writeAll(" = ");
         try o.dg.renderValue(writer, name_ty, name_val.toValue(), .StaticInitializer);
         try writer.writeAll(";\n");
@@ -2472,7 +2468,7 @@ pub fn genErrDecls(o: *Object) !void {
     });
 
     try writer.writeAll("static ");
-    try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = array_identifier }, Const, 0, .complete);
+    try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = array_identifier }, Const, .none, .complete);
     try writer.writeAll(" = {");
     for (mod.global_error_set.keys(), 0..) |name_nts, value| {
         const name = mod.intern_pool.stringToSlice(name_nts);
@@ -2523,7 +2519,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
             try w.writeByte(' ');
             try w.writeAll(fn_name);
             try w.writeByte('(');
-            try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, 0, .complete);
+            try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, .none, .complete);
             try w.writeAll(") {\n switch (tag) {\n");
             for (enum_ty.enumFields(mod), 0..) |name_ip, index_usize| {
                 const index = @as(u32, @intCast(index_usize));
@@ -2546,7 +2542,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
                 try w.print("  case {}: {{\n   static ", .{
                     try o.dg.fmtIntLiteral(enum_ty, int_val, .Other),
                 });
-                try o.dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, Const, 0, .complete);
+                try o.dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, Const, .none, .complete);
                 try w.writeAll(" = ");
                 try o.dg.renderValue(w, name_ty, name_val.toValue(), .Initializer);
                 try w.writeAll(";\n   return (");
@@ -2706,7 +2702,7 @@ pub fn genDecl(o: *Object) !void {
         if (variable.is_weak_linkage) try w.writeAll("zig_weak_linkage ");
         if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s|
             try w.print("zig_linksection(\"{s}\", ", .{s});
-        try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .{}, decl.alignment.toByteUnits(0), .complete);
+        try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .{}, decl.alignment, .complete);
         if (decl.@"linksection" != .none) try w.writeAll(", read, write)");
         try w.writeAll(" = ");
         try o.dg.renderValue(w, tv.ty, variable.init.toValue(), .StaticInitializer);
@@ -2717,14 +2713,14 @@ pub fn genDecl(o: *Object) !void {
         const fwd_decl_writer = o.dg.fwd_decl.writer();
 
         try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static ");
-        try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, Const, decl.alignment.toByteUnits(0), .complete);
+        try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, Const, decl.alignment, .complete);
         try fwd_decl_writer.writeAll(";\n");
 
         const w = o.writer();
         if (!is_global) try w.writeAll("static ");
         if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s|
             try w.print("zig_linksection(\"{s}\", ", .{s});
-        try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, Const, decl.alignment.toByteUnits(0), .complete);
+        try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, Const, decl.alignment, .complete);
         if (decl.@"linksection" != .none) try w.writeAll(", read)");
         try w.writeAll(" = ");
         try o.dg.renderValue(w, tv.ty, tv.val, .StaticInitializer);
@@ -3353,8 +3349,8 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
 
     try reap(f, inst, &.{ty_op.operand});
 
-    const is_aligned = if (ptr_info.flags.alignment.toByteUnitsOptional()) |alignment|
-        alignment >= src_ty.abiAlignment(mod)
+    const is_aligned = if (ptr_info.flags.alignment != .none)
+        ptr_info.flags.alignment.compare(.gte, src_ty.abiAlignment(mod))
     else
         true;
     const is_array = lowersToArray(src_ty, mod);
@@ -3625,8 +3621,8 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
         return .none;
     }
 
-    const is_aligned = if (ptr_info.flags.alignment.toByteUnitsOptional()) |alignment|
-        alignment >= src_ty.abiAlignment(mod)
+    const is_aligned = if (ptr_info.flags.alignment != .none)
+        ptr_info.flags.alignment.compare(.gte, src_ty.abiAlignment(mod))
     else
         true;
     const is_array = lowersToArray(ptr_info.child.toType(), mod);
@@ -4847,7 +4843,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
             if (is_reg) {
                 const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType(mod);
                 try writer.writeAll("register ");
-                const alignment = 0;
+                const alignment: Alignment = .none;
                 const local_value = try f.allocLocalValue(output_ty, alignment);
                 try f.allocs.put(gpa, local_value.new_local, false);
                 try f.object.dg.renderTypeAndName(writer, output_ty, local_value, .{}, alignment, .complete);
@@ -4880,7 +4876,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
             if (asmInputNeedsLocal(f, constraint, input_val)) {
                 const input_ty = f.typeOf(input);
                 if (is_reg) try writer.writeAll("register ");
-                const alignment = 0;
+                const alignment: Alignment = .none;
                 const local_value = try f.allocLocalValue(input_ty, alignment);
                 try f.allocs.put(gpa, local_value.new_local, false);
                 try f.object.dg.renderTypeAndName(writer, input_ty, local_value, Const, alignment, .complete);
@@ -5427,12 +5423,12 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
             else
                 .{ .identifier = ip.stringToSlice(struct_ty.structFieldName(extra.field_index, mod)) },
             .Packed => {
-                const struct_obj = mod.typeToStruct(struct_ty).?;
+                const struct_type = mod.typeToStruct(struct_ty).?;
                 const int_info = struct_ty.intInfo(mod);
 
                 const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
 
-                const bit_offset = struct_obj.packedFieldBitOffset(mod, extra.field_index);
+                const bit_offset = mod.structPackedFieldBitOffset(struct_type, extra.field_index);
                 const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
 
                 const field_int_signedness = if (inst_ty.isAbiInt(mod))
src/codegen/llvm.zig
@@ -1076,7 +1076,7 @@ pub const Object = struct {
         table_variable_index.setMutability(.constant, &o.builder);
         table_variable_index.setUnnamedAddr(.unnamed_addr, &o.builder);
         table_variable_index.setAlignment(
-            Builder.Alignment.fromByteUnits(slice_ty.abiAlignment(mod)),
+            slice_ty.abiAlignment(mod).toLlvm(),
             &o.builder,
         );
 
@@ -1318,8 +1318,9 @@ pub const Object = struct {
             _ = try attributes.removeFnAttr(.@"noinline");
         }
 
-        if (func.analysis(ip).stack_alignment.toByteUnitsOptional()) |alignment| {
-            try attributes.addFnAttr(.{ .alignstack = Builder.Alignment.fromByteUnits(alignment) }, &o.builder);
+        const stack_alignment = func.analysis(ip).stack_alignment;
+        if (stack_alignment != .none) {
+            try attributes.addFnAttr(.{ .alignstack = stack_alignment.toLlvm() }, &o.builder);
             try attributes.addFnAttr(.@"noinline", &o.builder);
         } else {
             _ = try attributes.removeFnAttr(.alignstack);
@@ -1407,7 +1408,7 @@ pub const Object = struct {
                         const param = wip.arg(llvm_arg_i);
 
                         if (isByRef(param_ty, mod)) {
-                            const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+                            const alignment = param_ty.abiAlignment(mod).toLlvm();
                             const param_llvm_ty = param.typeOfWip(&wip);
                             const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target);
                             _ = try wip.store(.normal, param, arg_ptr, alignment);
@@ -1423,7 +1424,7 @@ pub const Object = struct {
                         const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
                         const param_llvm_ty = try o.lowerType(param_ty);
                         const param = wip.arg(llvm_arg_i);
-                        const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+                        const alignment = param_ty.abiAlignment(mod).toLlvm();
 
                         try o.addByRefParamAttrs(&attributes, llvm_arg_i, alignment, it.byval_attr, param_llvm_ty);
                         llvm_arg_i += 1;
@@ -1438,7 +1439,7 @@ pub const Object = struct {
                         const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
                         const param_llvm_ty = try o.lowerType(param_ty);
                         const param = wip.arg(llvm_arg_i);
-                        const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+                        const alignment = param_ty.abiAlignment(mod).toLlvm();
 
                         try attributes.addParamAttr(llvm_arg_i, .noundef, &o.builder);
                         llvm_arg_i += 1;
@@ -1456,7 +1457,7 @@ pub const Object = struct {
                         llvm_arg_i += 1;
 
                         const param_llvm_ty = try o.lowerType(param_ty);
-                        const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+                        const alignment = param_ty.abiAlignment(mod).toLlvm();
                         const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target);
                         _ = try wip.store(.normal, param, arg_ptr, alignment);
 
@@ -1481,10 +1482,10 @@ pub const Object = struct {
                         if (ptr_info.flags.is_const) {
                             try attributes.addParamAttr(llvm_arg_i, .readonly, &o.builder);
                         }
-                        const elem_align = Builder.Alignment.fromByteUnits(
-                            ptr_info.flags.alignment.toByteUnitsOptional() orelse
-                                @max(ptr_info.child.toType().abiAlignment(mod), 1),
-                        );
+                        const elem_align = (if (ptr_info.flags.alignment != .none)
+                            @as(InternPool.Alignment, ptr_info.flags.alignment)
+                        else
+                            ptr_info.child.toType().abiAlignment(mod).max(.@"1")).toLlvm();
                         try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder);
                         const ptr_param = wip.arg(llvm_arg_i);
                         llvm_arg_i += 1;
@@ -1501,7 +1502,7 @@ pub const Object = struct {
                         const field_types = it.types_buffer[0..it.types_len];
                         const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
                         const param_llvm_ty = try o.lowerType(param_ty);
-                        const param_alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+                        const param_alignment = param_ty.abiAlignment(mod).toLlvm();
                         const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, param_alignment, target);
                         const llvm_ty = try o.builder.structType(.normal, field_types);
                         for (0..field_types.len) |field_i| {
@@ -1531,7 +1532,7 @@ pub const Object = struct {
                         const param = wip.arg(llvm_arg_i);
                         llvm_arg_i += 1;
 
-                        const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+                        const alignment = param_ty.abiAlignment(mod).toLlvm();
                         const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target);
                         _ = try wip.store(.normal, param, arg_ptr, alignment);
 
@@ -1546,7 +1547,7 @@ pub const Object = struct {
                         const param = wip.arg(llvm_arg_i);
                         llvm_arg_i += 1;
 
-                        const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+                        const alignment = param_ty.abiAlignment(mod).toLlvm();
                         const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target);
                         _ = try wip.store(.normal, param, arg_ptr, alignment);
 
@@ -1967,7 +1968,7 @@ pub const Object = struct {
                     di_file,
                     owner_decl.src_node + 1,
                     ty.abiSize(mod) * 8,
-                    ty.abiAlignment(mod) * 8,
+                    ty.abiAlignment(mod).toByteUnits(0) * 8,
                     enumerators.ptr,
                     @intCast(enumerators.len),
                     try o.lowerDebugType(int_ty, .full),
@@ -2055,7 +2056,7 @@ pub const Object = struct {
 
                     var offset: u64 = 0;
                     offset += ptr_size;
-                    offset = std.mem.alignForward(u64, offset, len_align);
+                    offset = len_align.forward(offset);
                     const len_offset = offset;
 
                     const fields: [2]*llvm.DIType = .{
@@ -2065,7 +2066,7 @@ pub const Object = struct {
                             di_file,
                             line,
                             ptr_size * 8, // size in bits
-                            ptr_align * 8, // align in bits
+                            ptr_align.toByteUnits(0) * 8, // align in bits
                             0, // offset in bits
                             0, // flags
                             try o.lowerDebugType(ptr_ty, .full),
@@ -2076,7 +2077,7 @@ pub const Object = struct {
                             di_file,
                             line,
                             len_size * 8, // size in bits
-                            len_align * 8, // align in bits
+                            len_align.toByteUnits(0) * 8, // align in bits
                             len_offset * 8, // offset in bits
                             0, // flags
                             try o.lowerDebugType(len_ty, .full),
@@ -2089,7 +2090,7 @@ pub const Object = struct {
                         di_file,
                         line,
                         ty.abiSize(mod) * 8, // size in bits
-                        ty.abiAlignment(mod) * 8, // align in bits
+                        ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
                         0, // flags
                         null, // derived from
                         &fields,
@@ -2110,7 +2111,7 @@ pub const Object = struct {
                 const ptr_di_ty = dib.createPointerType(
                     elem_di_ty,
                     target.ptrBitWidth(),
-                    ty.ptrAlignment(mod) * 8,
+                    ty.ptrAlignment(mod).toByteUnits(0) * 8,
                     name,
                 );
                 // The recursive call to `lowerDebugType` means we can't use `gop` anymore.
@@ -2142,7 +2143,7 @@ pub const Object = struct {
             .Array => {
                 const array_di_ty = dib.createArrayType(
                     ty.abiSize(mod) * 8,
-                    ty.abiAlignment(mod) * 8,
+                    ty.abiAlignment(mod).toByteUnits(0) * 8,
                     try o.lowerDebugType(ty.childType(mod), .full),
                     @intCast(ty.arrayLen(mod)),
                 );
@@ -2174,7 +2175,7 @@ pub const Object = struct {
 
                 const vector_di_ty = dib.createVectorType(
                     ty.abiSize(mod) * 8,
-                    ty.abiAlignment(mod) * 8,
+                    @intCast(ty.abiAlignment(mod).toByteUnits(0) * 8),
                     elem_di_type,
                     ty.vectorLen(mod),
                 );
@@ -2223,7 +2224,7 @@ pub const Object = struct {
 
                 var offset: u64 = 0;
                 offset += payload_size;
-                offset = std.mem.alignForward(u64, offset, non_null_align);
+                offset = non_null_align.forward(offset);
                 const non_null_offset = offset;
 
                 const fields: [2]*llvm.DIType = .{
@@ -2233,7 +2234,7 @@ pub const Object = struct {
                         di_file,
                         line,
                         payload_size * 8, // size in bits
-                        payload_align * 8, // align in bits
+                        payload_align.toByteUnits(0) * 8, // align in bits
                         0, // offset in bits
                         0, // flags
                         try o.lowerDebugType(child_ty, .full),
@@ -2244,7 +2245,7 @@ pub const Object = struct {
                         di_file,
                         line,
                         non_null_size * 8, // size in bits
-                        non_null_align * 8, // align in bits
+                        non_null_align.toByteUnits(0) * 8, // align in bits
                         non_null_offset * 8, // offset in bits
                         0, // flags
                         try o.lowerDebugType(non_null_ty, .full),
@@ -2257,7 +2258,7 @@ pub const Object = struct {
                     di_file,
                     line,
                     ty.abiSize(mod) * 8, // size in bits
-                    ty.abiAlignment(mod) * 8, // align in bits
+                    ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
                     0, // flags
                     null, // derived from
                     &fields,
@@ -2306,16 +2307,16 @@ pub const Object = struct {
                 var payload_index: u32 = undefined;
                 var error_offset: u64 = undefined;
                 var payload_offset: u64 = undefined;
-                if (error_align > payload_align) {
+                if (error_align.compare(.gt, payload_align)) {
                     error_index = 0;
                     payload_index = 1;
                     error_offset = 0;
-                    payload_offset = std.mem.alignForward(u64, error_size, payload_align);
+                    payload_offset = payload_align.forward(error_size);
                 } else {
                     payload_index = 0;
                     error_index = 1;
                     payload_offset = 0;
-                    error_offset = std.mem.alignForward(u64, payload_size, error_align);
+                    error_offset = error_align.forward(payload_size);
                 }
 
                 var fields: [2]*llvm.DIType = undefined;
@@ -2325,7 +2326,7 @@ pub const Object = struct {
                     di_file,
                     line,
                     error_size * 8, // size in bits
-                    error_align * 8, // align in bits
+                    error_align.toByteUnits(0) * 8, // align in bits
                     error_offset * 8, // offset in bits
                     0, // flags
                     try o.lowerDebugType(Type.anyerror, .full),
@@ -2336,7 +2337,7 @@ pub const Object = struct {
                     di_file,
                     line,
                     payload_size * 8, // size in bits
-                    payload_align * 8, // align in bits
+                    payload_align.toByteUnits(0) * 8, // align in bits
                     payload_offset * 8, // offset in bits
                     0, // flags
                     try o.lowerDebugType(payload_ty, .full),
@@ -2348,7 +2349,7 @@ pub const Object = struct {
                     di_file,
                     line,
                     ty.abiSize(mod) * 8, // size in bits
-                    ty.abiAlignment(mod) * 8, // align in bits
+                    ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
                     0, // flags
                     null, // derived from
                     &fields,
@@ -2374,10 +2375,10 @@ pub const Object = struct {
                 const name = try o.allocTypeName(ty);
                 defer gpa.free(name);
 
-                if (mod.typeToStruct(ty)) |struct_obj| {
-                    if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) {
-                        assert(struct_obj.haveLayout());
-                        const info = struct_obj.backing_int_ty.intInfo(mod);
+                if (mod.typeToPackedStruct(ty)) |struct_type| {
+                    const backing_int_ty = struct_type.backingIntType(ip).*;
+                    if (backing_int_ty != .none) {
+                        const info = backing_int_ty.toType().intInfo(mod);
                         const dwarf_encoding: c_uint = switch (info.signedness) {
                             .signed => DW.ATE.signed,
                             .unsigned => DW.ATE.unsigned,
@@ -2417,7 +2418,7 @@ pub const Object = struct {
 
                             const field_size = field_ty.toType().abiSize(mod);
                             const field_align = field_ty.toType().abiAlignment(mod);
-                            const field_offset = std.mem.alignForward(u64, offset, field_align);
+                            const field_offset = field_align.forward(offset);
                             offset = field_offset + field_size;
 
                             const field_name = if (tuple.names.len != 0)
@@ -2432,7 +2433,7 @@ pub const Object = struct {
                                 null, // file
                                 0, // line
                                 field_size * 8, // size in bits
-                                field_align * 8, // align in bits
+                                field_align.toByteUnits(0) * 8, // align in bits
                                 field_offset * 8, // offset in bits
                                 0, // flags
                                 try o.lowerDebugType(field_ty.toType(), .full),
@@ -2445,7 +2446,7 @@ pub const Object = struct {
                             null, // file
                             0, // line
                             ty.abiSize(mod) * 8, // size in bits
-                            ty.abiAlignment(mod) * 8, // align in bits
+                            ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
                             0, // flags
                             null, // derived from
                             di_fields.items.ptr,
@@ -2459,10 +2460,8 @@ pub const Object = struct {
                         try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
                         return full_di_ty;
                     },
-                    .struct_type => |struct_type| s: {
-                        const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :s;
-
-                        if (!struct_obj.haveFieldTypes()) {
+                    .struct_type => |struct_type| {
+                        if (!struct_type.haveFieldTypes(ip)) {
                             // This can happen if a struct type makes it all the way to
                             // flush() without ever being instantiated or referenced (even
                             // via pointer). The only reason we are hearing about it now is
@@ -2492,26 +2491,30 @@ pub const Object = struct {
                     return struct_di_ty;
                 }
 
-                const fields = ty.structFields(mod);
-                const layout = ty.containerLayout(mod);
+                const struct_type = mod.typeToStruct(ty).?;
+                const field_types = struct_type.field_types.get(ip);
+                const field_names = struct_type.field_names.get(ip);
 
                 var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{};
                 defer di_fields.deinit(gpa);
 
-                try di_fields.ensureUnusedCapacity(gpa, fields.count());
+                try di_fields.ensureUnusedCapacity(gpa, field_types.len);
 
                 comptime assert(struct_layout_version == 2);
                 var offset: u64 = 0;
-
-                var it = mod.typeToStruct(ty).?.runtimeFieldIterator(mod);
-                while (it.next()) |field_and_index| {
-                    const field = field_and_index.field;
-                    const field_size = field.ty.abiSize(mod);
-                    const field_align = field.alignment(mod, layout);
-                    const field_offset = std.mem.alignForward(u64, offset, field_align);
+                var it = struct_type.iterateRuntimeOrder(ip);
+                while (it.next()) |field_index| {
+                    const field_ty = field_types[field_index].toType();
+                    const field_size = field_ty.abiSize(mod);
+                    const field_align = mod.structFieldAlignment(
+                        struct_type.fieldAlign(ip, field_index),
+                        field_ty,
+                        struct_type.layout,
+                    );
+                    const field_offset = field_align.forward(offset);
                     offset = field_offset + field_size;
 
-                    const field_name = ip.stringToSlice(fields.keys()[field_and_index.index]);
+                    const field_name = ip.stringToSlice(field_names[field_index]);
 
                     try di_fields.append(gpa, dib.createMemberType(
                         fwd_decl.toScope(),
@@ -2519,10 +2522,10 @@ pub const Object = struct {
                         null, // file
                         0, // line
                         field_size * 8, // size in bits
-                        field_align * 8, // align in bits
+                        field_align.toByteUnits(0) * 8, // align in bits
                         field_offset * 8, // offset in bits
                         0, // flags
-                        try o.lowerDebugType(field.ty, .full),
+                        try o.lowerDebugType(field_ty, .full),
                     ));
                 }
 
@@ -2532,7 +2535,7 @@ pub const Object = struct {
                     null, // file
                     0, // line
                     ty.abiSize(mod) * 8, // size in bits
-                    ty.abiAlignment(mod) * 8, // align in bits
+                    ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
                     0, // flags
                     null, // derived from
                     di_fields.items.ptr,
@@ -2588,7 +2591,7 @@ pub const Object = struct {
                         null, // file
                         0, // line
                         ty.abiSize(mod) * 8, // size in bits
-                        ty.abiAlignment(mod) * 8, // align in bits
+                        ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
                         0, // flags
                         null, // derived from
                         &di_fields,
@@ -2624,7 +2627,7 @@ pub const Object = struct {
                         null, // file
                         0, // line
                         field_size * 8, // size in bits
-                        field_align * 8, // align in bits
+                        field_align.toByteUnits(0) * 8, // align in bits
                         0, // offset in bits
                         0, // flags
                         field_di_ty,
@@ -2644,7 +2647,7 @@ pub const Object = struct {
                     null, // file
                     0, // line
                     ty.abiSize(mod) * 8, // size in bits
-                    ty.abiAlignment(mod) * 8, // align in bits
+                    ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
                     0, // flags
                     di_fields.items.ptr,
                     @intCast(di_fields.items.len),
@@ -2661,12 +2664,12 @@ pub const Object = struct {
 
                 var tag_offset: u64 = undefined;
                 var payload_offset: u64 = undefined;
-                if (layout.tag_align >= layout.payload_align) {
+                if (layout.tag_align.compare(.gte, layout.payload_align)) {
                     tag_offset = 0;
-                    payload_offset = std.mem.alignForward(u64, layout.tag_size, layout.payload_align);
+                    payload_offset = layout.payload_align.forward(layout.tag_size);
                 } else {
                     payload_offset = 0;
-                    tag_offset = std.mem.alignForward(u64, layout.payload_size, layout.tag_align);
+                    tag_offset = layout.tag_align.forward(layout.payload_size);
                 }
 
                 const tag_di = dib.createMemberType(
@@ -2675,7 +2678,7 @@ pub const Object = struct {
                     null, // file
                     0, // line
                     layout.tag_size * 8,
-                    layout.tag_align * 8, // align in bits
+                    layout.tag_align.toByteUnits(0) * 8,
                     tag_offset * 8, // offset in bits
                     0, // flags
                     try o.lowerDebugType(union_obj.enum_tag_ty.toType(), .full),
@@ -2687,14 +2690,14 @@ pub const Object = struct {
                     null, // file
                     0, // line
                     layout.payload_size * 8, // size in bits
-                    layout.payload_align * 8, // align in bits
+                    layout.payload_align.toByteUnits(0) * 8,
                     payload_offset * 8, // offset in bits
                     0, // flags
                     union_di_ty,
                 );
 
                 const full_di_fields: [2]*llvm.DIType =
-                    if (layout.tag_align >= layout.payload_align)
+                    if (layout.tag_align.compare(.gte, layout.payload_align))
                     .{ tag_di, payload_di }
                 else
                     .{ payload_di, tag_di };
@@ -2705,7 +2708,7 @@ pub const Object = struct {
                     null, // file
                     0, // line
                     ty.abiSize(mod) * 8, // size in bits
-                    ty.abiAlignment(mod) * 8, // align in bits
+                    ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
                     0, // flags
                     null, // derived from
                     &full_di_fields,
@@ -2925,8 +2928,8 @@ pub const Object = struct {
             else => function_index.setCallConv(toLlvmCallConv(fn_info.cc, target), &o.builder),
         }
 
-        if (fn_info.alignment.toByteUnitsOptional()) |alignment|
-            function_index.setAlignment(Builder.Alignment.fromByteUnits(alignment), &o.builder);
+        if (fn_info.alignment != .none)
+            function_index.setAlignment(fn_info.alignment.toLlvm(), &o.builder);
 
         // Function attributes that are independent of analysis results of the function body.
         try o.addCommonFnAttributes(&attributes);
@@ -2949,9 +2952,8 @@ pub const Object = struct {
                 .byref => {
                     const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1];
                     const param_llvm_ty = try o.lowerType(param_ty.toType());
-                    const alignment =
-                        Builder.Alignment.fromByteUnits(param_ty.toType().abiAlignment(mod));
-                    try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
+                    const alignment = param_ty.toType().abiAlignment(mod);
+                    try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment.toLlvm(), it.byval_attr, param_llvm_ty);
                 },
                 .byref_mut => try attributes.addParamAttr(it.llvm_index - 1, .noundef, &o.builder),
                 // No attributes needed for these.
@@ -3248,21 +3250,21 @@ pub const Object = struct {
 
                     var fields: [3]Builder.Type = undefined;
                     var fields_len: usize = 2;
-                    const padding_len = if (error_align > payload_align) pad: {
+                    const padding_len = if (error_align.compare(.gt, payload_align)) pad: {
                         fields[0] = error_type;
                         fields[1] = payload_type;
                         const payload_end =
-                            std.mem.alignForward(u64, error_size, payload_align) +
+                            payload_align.forward(error_size) +
                             payload_size;
-                        const abi_size = std.mem.alignForward(u64, payload_end, error_align);
+                        const abi_size = error_align.forward(payload_end);
                         break :pad abi_size - payload_end;
                     } else pad: {
                         fields[0] = payload_type;
                         fields[1] = error_type;
                         const error_end =
-                            std.mem.alignForward(u64, payload_size, error_align) +
+                            error_align.forward(payload_size) +
                             error_size;
-                        const abi_size = std.mem.alignForward(u64, error_end, payload_align);
+                        const abi_size = payload_align.forward(error_end);
                         break :pad abi_size - error_end;
                     };
                     if (padding_len > 0) {
@@ -3276,43 +3278,44 @@ pub const Object = struct {
                     const gop = try o.type_map.getOrPut(o.gpa, t.toIntern());
                     if (gop.found_existing) return gop.value_ptr.*;
 
-                    const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
-                    if (struct_obj.layout == .Packed) {
-                        assert(struct_obj.haveLayout());
-                        const int_ty = try o.lowerType(struct_obj.backing_int_ty);
+                    if (struct_type.layout == .Packed) {
+                        const int_ty = try o.lowerType(struct_type.backingIntType(ip).toType());
                         gop.value_ptr.* = int_ty;
                         return int_ty;
                     }
 
                     const name = try o.builder.string(ip.stringToSlice(
-                        try struct_obj.getFullyQualifiedName(mod),
+                        try mod.declPtr(struct_type.decl.unwrap().?).getFullyQualifiedName(mod),
                     ));
                     const ty = try o.builder.opaqueType(name);
                     gop.value_ptr.* = ty; // must be done before any recursive calls
 
-                    assert(struct_obj.haveFieldTypes());
-
                     var llvm_field_types = std.ArrayListUnmanaged(Builder.Type){};
                     defer llvm_field_types.deinit(o.gpa);
                     // Although we can estimate how much capacity to add, these cannot be
                     // relied upon because of the recursive calls to lowerType below.
-                    try llvm_field_types.ensureUnusedCapacity(o.gpa, struct_obj.fields.count());
-                    try o.struct_field_map.ensureUnusedCapacity(o.gpa, @intCast(struct_obj.fields.count()));
+                    try llvm_field_types.ensureUnusedCapacity(o.gpa, struct_type.field_types.len);
+                    try o.struct_field_map.ensureUnusedCapacity(o.gpa, struct_type.field_types.len);
 
                     comptime assert(struct_layout_version == 2);
                     var offset: u64 = 0;
-                    var big_align: u32 = 1;
+                    var big_align: InternPool.Alignment = .@"1";
                     var struct_kind: Builder.Type.Structure.Kind = .normal;
 
-                    var it = struct_obj.runtimeFieldIterator(mod);
-                    while (it.next()) |field_and_index| {
-                        const field = field_and_index.field;
-                        const field_align = field.alignment(mod, struct_obj.layout);
-                        const field_ty_align = field.ty.abiAlignment(mod);
-                        if (field_align < field_ty_align) struct_kind = .@"packed";
-                        big_align = @max(big_align, field_align);
+                    for (struct_type.runtime_order.get(ip)) |runtime_index| {
+                        const field_index = runtime_index.toInt() orelse break;
+                        const field_ty = struct_type.field_types.get(ip)[field_index].toType();
+                        const field_aligns = struct_type.field_aligns.get(ip);
+                        const field_align = mod.structFieldAlignment(
+                            if (field_aligns.len == 0) .none else field_aligns[field_index],
+                            field_ty,
+                            struct_type.layout,
+                        );
+                        const field_ty_align = field_ty.abiAlignment(mod);
+                        if (field_align.compare(.lt, field_ty_align)) struct_kind = .@"packed";
+                        big_align = big_align.max(field_align);
                         const prev_offset = offset;
-                        offset = std.mem.alignForward(u64, offset, field_align);
+                        offset = field_align.forward(offset);
 
                         const padding_len = offset - prev_offset;
                         if (padding_len > 0) try llvm_field_types.append(
@@ -3321,15 +3324,15 @@ pub const Object = struct {
                         );
                         try o.struct_field_map.put(o.gpa, .{
                             .struct_ty = t.toIntern(),
-                            .field_index = field_and_index.index,
+                            .field_index = field_index,
                         }, @intCast(llvm_field_types.items.len));
-                        try llvm_field_types.append(o.gpa, try o.lowerType(field.ty));
+                        try llvm_field_types.append(o.gpa, try o.lowerType(field_ty));
 
-                        offset += field.ty.abiSize(mod);
+                        offset += field_ty.abiSize(mod);
                     }
                     {
                         const prev_offset = offset;
-                        offset = std.mem.alignForward(u64, offset, big_align);
+                        offset = big_align.forward(offset);
                         const padding_len = offset - prev_offset;
                         if (padding_len > 0) try llvm_field_types.append(
                             o.gpa,
@@ -3353,7 +3356,7 @@ pub const Object = struct {
 
                     comptime assert(struct_layout_version == 2);
                     var offset: u64 = 0;
-                    var big_align: u32 = 0;
+                    var big_align: InternPool.Alignment = .none;
 
                     for (
                         anon_struct_type.types.get(ip),
@@ -3363,9 +3366,9 @@ pub const Object = struct {
                         if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
 
                         const field_align = field_ty.toType().abiAlignment(mod);
-                        big_align = @max(big_align, field_align);
+                        big_align = big_align.max(field_align);
                         const prev_offset = offset;
-                        offset = std.mem.alignForward(u64, offset, field_align);
+                        offset = field_align.forward(offset);
 
                         const padding_len = offset - prev_offset;
                         if (padding_len > 0) try llvm_field_types.append(
@@ -3382,7 +3385,7 @@ pub const Object = struct {
                     }
                     {
                         const prev_offset = offset;
-                        offset = std.mem.alignForward(u64, offset, big_align);
+                        offset = big_align.forward(offset);
                         const padding_len = offset - prev_offset;
                         if (padding_len > 0) try llvm_field_types.append(
                             o.gpa,
@@ -3447,7 +3450,7 @@ pub const Object = struct {
                     var llvm_fields: [3]Builder.Type = undefined;
                     var llvm_fields_len: usize = 2;
 
-                    if (layout.tag_align >= layout.payload_align) {
+                    if (layout.tag_align.compare(.gte, layout.payload_align)) {
                         llvm_fields = .{ enum_tag_ty, payload_ty, .none };
                     } else {
                         llvm_fields = .{ payload_ty, enum_tag_ty, .none };
@@ -3687,7 +3690,7 @@ pub const Object = struct {
 
                 var fields: [3]Builder.Type = undefined;
                 var vals: [3]Builder.Constant = undefined;
-                if (error_align > payload_align) {
+                if (error_align.compare(.gt, payload_align)) {
                     vals[0] = llvm_error_value;
                     vals[1] = llvm_payload_value;
                 } else {
@@ -3910,7 +3913,7 @@ pub const Object = struct {
                     comptime assert(struct_layout_version == 2);
                     var llvm_index: usize = 0;
                     var offset: u64 = 0;
-                    var big_align: u32 = 0;
+                    var big_align: InternPool.Alignment = .none;
                     var need_unnamed = false;
                     for (
                         tuple.types.get(ip),
@@ -3921,9 +3924,9 @@ pub const Object = struct {
                         if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
 
                         const field_align = field_ty.toType().abiAlignment(mod);
-                        big_align = @max(big_align, field_align);
+                        big_align = big_align.max(field_align);
                         const prev_offset = offset;
-                        offset = std.mem.alignForward(u64, offset, field_align);
+                        offset = field_align.forward(offset);
 
                         const padding_len = offset - prev_offset;
                         if (padding_len > 0) {
@@ -3946,7 +3949,7 @@ pub const Object = struct {
                     }
                     {
                         const prev_offset = offset;
-                        offset = std.mem.alignForward(u64, offset, big_align);
+                        offset = big_align.forward(offset);
                         const padding_len = offset - prev_offset;
                         if (padding_len > 0) {
                             fields[llvm_index] = try o.builder.arrayType(padding_len, .i8);
@@ -3963,22 +3966,21 @@ pub const Object = struct {
                         struct_ty, vals);
                 },
                 .struct_type => |struct_type| {
-                    const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
-                    assert(struct_obj.haveLayout());
+                    assert(struct_type.haveLayout(ip));
                     const struct_ty = try o.lowerType(ty);
-                    if (struct_obj.layout == .Packed) {
+                    if (struct_type.layout == .Packed) {
                         comptime assert(Type.packed_struct_layout_version == 2);
                         var running_int = try o.builder.intConst(struct_ty, 0);
                         var running_bits: u16 = 0;
-                        for (struct_obj.fields.values(), 0..) |field, field_index| {
-                            if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+                        for (struct_type.field_types.get(ip), 0..) |field_ty, field_index| {
+                            if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
 
                             const non_int_val =
                                 try o.lowerValue((try val.fieldValue(mod, field_index)).toIntern());
-                            const ty_bit_size: u16 = @intCast(field.ty.bitSize(mod));
+                            const ty_bit_size: u16 = @intCast(field_ty.toType().bitSize(mod));
                             const small_int_ty = try o.builder.intType(ty_bit_size);
                             const small_int_val = try o.builder.castConst(
-                                if (field.ty.isPtrAtRuntime(mod)) .ptrtoint else .bitcast,
+                                if (field_ty.toType().isPtrAtRuntime(mod)) .ptrtoint else .bitcast,
                                 non_int_val,
                                 small_int_ty,
                             );
@@ -4010,15 +4012,19 @@ pub const Object = struct {
                     comptime assert(struct_layout_version == 2);
                     var llvm_index: usize = 0;
                     var offset: u64 = 0;
-                    var big_align: u32 = 0;
+                    var big_align: InternPool.Alignment = .none;
                     var need_unnamed = false;
-                    var field_it = struct_obj.runtimeFieldIterator(mod);
-                    while (field_it.next()) |field_and_index| {
-                        const field = field_and_index.field;
-                        const field_align = field.alignment(mod, struct_obj.layout);
-                        big_align = @max(big_align, field_align);
+                    var field_it = struct_type.iterateRuntimeOrder(ip);
+                    while (field_it.next()) |field_index| {
+                        const field_ty = struct_type.field_types.get(ip)[field_index];
+                        const field_align = mod.structFieldAlignment(
+                            struct_type.fieldAlign(ip, field_index),
+                            field_ty.toType(),
+                            struct_type.layout,
+                        );
+                        big_align = big_align.max(field_align);
                         const prev_offset = offset;
-                        offset = std.mem.alignForward(u64, offset, field_align);
+                        offset = field_align.forward(offset);
 
                         const padding_len = offset - prev_offset;
                         if (padding_len > 0) {
@@ -4032,18 +4038,18 @@ pub const Object = struct {
                         }
 
                         vals[llvm_index] = try o.lowerValue(
-                            (try val.fieldValue(mod, field_and_index.index)).toIntern(),
+                            (try val.fieldValue(mod, field_index)).toIntern(),
                         );
                         fields[llvm_index] = vals[llvm_index].typeOf(&o.builder);
                         if (fields[llvm_index] != struct_ty.structFields(&o.builder)[llvm_index])
                             need_unnamed = true;
                         llvm_index += 1;
 
-                        offset += field.ty.abiSize(mod);
+                        offset += field_ty.toType().abiSize(mod);
                     }
                     {
                         const prev_offset = offset;
-                        offset = std.mem.alignForward(u64, offset, big_align);
+                        offset = big_align.forward(offset);
                         const padding_len = offset - prev_offset;
                         if (padding_len > 0) {
                             fields[llvm_index] = try o.builder.arrayType(padding_len, .i8);
@@ -4093,7 +4099,7 @@ pub const Object = struct {
                     const payload = try o.lowerValue(un.val);
                     const payload_ty = payload.typeOf(&o.builder);
                     if (payload_ty != union_ty.structFields(&o.builder)[
-                        @intFromBool(layout.tag_align >= layout.payload_align)
+                        @intFromBool(layout.tag_align.compare(.gte, layout.payload_align))
                     ]) need_unnamed = true;
                     const field_size = field_ty.abiSize(mod);
                     if (field_size == layout.payload_size) break :p payload;
@@ -4115,7 +4121,7 @@ pub const Object = struct {
                 var fields: [3]Builder.Type = undefined;
                 var vals: [3]Builder.Constant = undefined;
                 var len: usize = 2;
-                if (layout.tag_align >= layout.payload_align) {
+                if (layout.tag_align.compare(.gte, layout.payload_align)) {
                     fields = .{ tag_ty, payload_ty, undefined };
                     vals = .{ tag, payload, undefined };
                 } else {
@@ -4174,14 +4180,15 @@ pub const Object = struct {
 
     fn lowerParentPtr(o: *Object, ptr_val: Value, byte_aligned: bool) Allocator.Error!Builder.Constant {
         const mod = o.module;
-        return switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr) {
+        const ip = &mod.intern_pool;
+        return switch (ip.indexToKey(ptr_val.toIntern()).ptr.addr) {
             .decl => |decl| o.lowerParentPtrDecl(decl),
             .mut_decl => |mut_decl| o.lowerParentPtrDecl(mut_decl.decl),
             .int => |int| try o.lowerIntAsPtr(int),
             .eu_payload => |eu_ptr| {
                 const parent_ptr = try o.lowerParentPtr(eu_ptr.toValue(), true);
 
-                const eu_ty = mod.intern_pool.typeOf(eu_ptr).toType().childType(mod);
+                const eu_ty = ip.typeOf(eu_ptr).toType().childType(mod);
                 const payload_ty = eu_ty.errorUnionPayload(mod);
                 if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
                     // In this case, we represent pointer to error union the same as pointer
@@ -4189,8 +4196,9 @@ pub const Object = struct {
                     return parent_ptr;
                 }
 
-                const index: u32 =
-                    if (payload_ty.abiAlignment(mod) > Type.err_int.abiSize(mod)) 2 else 1;
+                const payload_align = payload_ty.abiAlignment(mod);
+                const err_align = Type.err_int.abiAlignment(mod);
+                const index: u32 = if (payload_align.compare(.gt, err_align)) 2 else 1;
                 return o.builder.gepConst(.inbounds, try o.lowerType(eu_ty), parent_ptr, null, &.{
                     try o.builder.intConst(.i32, 0), try o.builder.intConst(.i32, index),
                 });
@@ -4198,7 +4206,7 @@ pub const Object = struct {
             .opt_payload => |opt_ptr| {
                 const parent_ptr = try o.lowerParentPtr(opt_ptr.toValue(), true);
 
-                const opt_ty = mod.intern_pool.typeOf(opt_ptr).toType().childType(mod);
+                const opt_ty = ip.typeOf(opt_ptr).toType().childType(mod);
                 const payload_ty = opt_ty.optionalChild(mod);
                 if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or
                     payload_ty.optionalReprIsPayload(mod))
@@ -4215,7 +4223,7 @@ pub const Object = struct {
             .comptime_field => unreachable,
             .elem => |elem_ptr| {
                 const parent_ptr = try o.lowerParentPtr(elem_ptr.base.toValue(), true);
-                const elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod);
+                const elem_ty = ip.typeOf(elem_ptr.base).toType().elemType2(mod);
 
                 return o.builder.gepConst(.inbounds, try o.lowerType(elem_ty), parent_ptr, null, &.{
                     try o.builder.intConst(try o.lowerType(Type.usize), elem_ptr.index),
@@ -4223,7 +4231,7 @@ pub const Object = struct {
             },
             .field => |field_ptr| {
                 const parent_ptr = try o.lowerParentPtr(field_ptr.base.toValue(), byte_aligned);
-                const parent_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod);
+                const parent_ty = ip.typeOf(field_ptr.base).toType().childType(mod);
 
                 const field_index: u32 = @intCast(field_ptr.index);
                 switch (parent_ty.zigTypeTag(mod)) {
@@ -4241,24 +4249,26 @@ pub const Object = struct {
 
                         const parent_llvm_ty = try o.lowerType(parent_ty);
                         return o.builder.gepConst(.inbounds, parent_llvm_ty, parent_ptr, null, &.{
-                            try o.builder.intConst(.i32, 0), try o.builder.intConst(.i32, @intFromBool(
-                                layout.tag_size > 0 and layout.tag_align >= layout.payload_align,
+                            try o.builder.intConst(.i32, 0),
+                            try o.builder.intConst(.i32, @intFromBool(
+                                layout.tag_size > 0 and layout.tag_align.compare(.gte, layout.payload_align),
                             )),
                         });
                     },
                     .Struct => {
-                        if (parent_ty.containerLayout(mod) == .Packed) {
+                        if (mod.typeToPackedStruct(parent_ty)) |struct_type| {
                             if (!byte_aligned) return parent_ptr;
                             const llvm_usize = try o.lowerType(Type.usize);
                             const base_addr =
                                 try o.builder.castConst(.ptrtoint, parent_ptr, llvm_usize);
                             // count bits of fields before this one
+                            // TODO https://github.com/ziglang/zig/issues/17178
                             const prev_bits = b: {
                                 var b: usize = 0;
-                                for (parent_ty.structFields(mod).values()[0..field_index]) |field| {
-                                    if (field.is_comptime) continue;
-                                    if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
-                                    b += @intCast(field.ty.bitSize(mod));
+                                for (0..field_index) |i| {
+                                    const field_ty = struct_type.field_types.get(ip)[i].toType();
+                                    if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+                                    b += @intCast(field_ty.bitSize(mod));
                                 }
                                 break :b b;
                             };
@@ -4407,11 +4417,11 @@ pub const Object = struct {
             if (ptr_info.flags.is_const) {
                 try attributes.addParamAttr(llvm_arg_i, .readonly, &o.builder);
             }
-            const elem_align = Builder.Alignment.fromByteUnits(
-                ptr_info.flags.alignment.toByteUnitsOptional() orelse
-                    @max(ptr_info.child.toType().abiAlignment(mod), 1),
-            );
-            try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder);
+            const elem_align = if (ptr_info.flags.alignment != .none)
+                ptr_info.flags.alignment
+            else
+                ptr_info.child.toType().abiAlignment(mod).max(.@"1");
+            try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align.toLlvm() }, &o.builder);
         } else if (ccAbiPromoteInt(fn_info.cc, mod, param_ty)) |s| switch (s) {
             .signed => try attributes.addParamAttr(llvm_arg_i, .signext, &o.builder),
             .unsigned => try attributes.addParamAttr(llvm_arg_i, .zeroext, &o.builder),
@@ -4469,7 +4479,7 @@ pub const DeclGen = struct {
         } else {
             const variable_index = try o.resolveGlobalDecl(decl_index);
             variable_index.setAlignment(
-                Builder.Alignment.fromByteUnits(decl.getAlignment(mod)),
+                decl.getAlignment(mod).toLlvm(),
                 &o.builder,
             );
             if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |section|
@@ -4611,9 +4621,7 @@ pub const FuncGen = struct {
         variable_index.setLinkage(.private, &o.builder);
         variable_index.setMutability(.constant, &o.builder);
         variable_index.setUnnamedAddr(.unnamed_addr, &o.builder);
-        variable_index.setAlignment(Builder.Alignment.fromByteUnits(
-            tv.ty.abiAlignment(mod),
-        ), &o.builder);
+        variable_index.setAlignment(tv.ty.abiAlignment(mod).toLlvm(), &o.builder);
         return o.builder.convConst(
             .unneeded,
             variable_index.toConst(&o.builder),
@@ -4929,7 +4937,7 @@ pub const FuncGen = struct {
             const llvm_ret_ty = try o.lowerType(return_type);
             try attributes.addParamAttr(0, .{ .sret = llvm_ret_ty }, &o.builder);
 
-            const alignment = Builder.Alignment.fromByteUnits(return_type.abiAlignment(mod));
+            const alignment = return_type.abiAlignment(mod).toLlvm();
             const ret_ptr = try self.buildAlloca(llvm_ret_ty, alignment);
             try llvm_args.append(ret_ptr);
             break :blk ret_ptr;
@@ -4951,7 +4959,7 @@ pub const FuncGen = struct {
                 const llvm_arg = try self.resolveInst(arg);
                 const llvm_param_ty = try o.lowerType(param_ty);
                 if (isByRef(param_ty, mod)) {
-                    const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+                    const alignment = param_ty.abiAlignment(mod).toLlvm();
                     const loaded = try self.wip.load(.normal, llvm_param_ty, llvm_arg, alignment, "");
                     try llvm_args.append(loaded);
                 } else {
@@ -4965,7 +4973,7 @@ pub const FuncGen = struct {
                 if (isByRef(param_ty, mod)) {
                     try llvm_args.append(llvm_arg);
                 } else {
-                    const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+                    const alignment = param_ty.abiAlignment(mod).toLlvm();
                     const param_llvm_ty = llvm_arg.typeOfWip(&self.wip);
                     const arg_ptr = try self.buildAlloca(param_llvm_ty, alignment);
                     _ = try self.wip.store(.normal, llvm_arg, arg_ptr, alignment);
@@ -4977,7 +4985,7 @@ pub const FuncGen = struct {
                 const param_ty = self.typeOf(arg);
                 const llvm_arg = try self.resolveInst(arg);
 
-                const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+                const alignment = param_ty.abiAlignment(mod).toLlvm();
                 const param_llvm_ty = try o.lowerType(param_ty);
                 const arg_ptr = try self.buildAlloca(param_llvm_ty, alignment);
                 if (isByRef(param_ty, mod)) {
@@ -4995,13 +5003,13 @@ pub const FuncGen = struct {
                 const int_llvm_ty = try o.builder.intType(@intCast(param_ty.abiSize(mod) * 8));
 
                 if (isByRef(param_ty, mod)) {
-                    const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+                    const alignment = param_ty.abiAlignment(mod).toLlvm();
                     const loaded = try self.wip.load(.normal, int_llvm_ty, llvm_arg, alignment, "");
                     try llvm_args.append(loaded);
                 } else {
                     // LLVM does not allow bitcasting structs so we must allocate
                     // a local, store as one type, and then load as another type.
-                    const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+                    const alignment = param_ty.abiAlignment(mod).toLlvm();
                     const int_ptr = try self.buildAlloca(int_llvm_ty, alignment);
                     _ = try self.wip.store(.normal, llvm_arg, int_ptr, alignment);
                     const loaded = try self.wip.load(.normal, int_llvm_ty, int_ptr, alignment, "");
@@ -5022,7 +5030,7 @@ pub const FuncGen = struct {
                 const llvm_arg = try self.resolveInst(arg);
                 const is_by_ref = isByRef(param_ty, mod);
                 const arg_ptr = if (is_by_ref) llvm_arg else ptr: {
-                    const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+                    const alignment = param_ty.abiAlignment(mod).toLlvm();
                     const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment);
                     _ = try self.wip.store(.normal, llvm_arg, ptr, alignment);
                     break :ptr ptr;
@@ -5048,7 +5056,7 @@ pub const FuncGen = struct {
                 const arg = args[it.zig_index - 1];
                 const arg_ty = self.typeOf(arg);
                 var llvm_arg = try self.resolveInst(arg);
-                const alignment = Builder.Alignment.fromByteUnits(arg_ty.abiAlignment(mod));
+                const alignment = arg_ty.abiAlignment(mod).toLlvm();
                 if (!isByRef(arg_ty, mod)) {
                     const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment);
                     _ = try self.wip.store(.normal, llvm_arg, ptr, alignment);
@@ -5066,7 +5074,7 @@ pub const FuncGen = struct {
                 const arg = args[it.zig_index - 1];
                 const arg_ty = self.typeOf(arg);
                 var llvm_arg = try self.resolveInst(arg);
-                const alignment = Builder.Alignment.fromByteUnits(arg_ty.abiAlignment(mod));
+                const alignment = arg_ty.abiAlignment(mod).toLlvm();
                 if (!isByRef(arg_ty, mod)) {
                     const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment);
                     _ = try self.wip.store(.normal, llvm_arg, ptr, alignment);
@@ -5097,7 +5105,7 @@ pub const FuncGen = struct {
                     const param_index = it.zig_index - 1;
                     const param_ty = fn_info.param_types.get(ip)[param_index].toType();
                     const param_llvm_ty = try o.lowerType(param_ty);
-                    const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+                    const alignment = param_ty.abiAlignment(mod).toLlvm();
                     try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
                 },
                 .byref_mut => try attributes.addParamAttr(it.llvm_index - 1, .noundef, &o.builder),
@@ -5128,10 +5136,10 @@ pub const FuncGen = struct {
                     if (ptr_info.flags.is_const) {
                         try attributes.addParamAttr(llvm_arg_i, .readonly, &o.builder);
                     }
-                    const elem_align = Builder.Alignment.fromByteUnits(
-                        ptr_info.flags.alignment.toByteUnitsOptional() orelse
-                            @max(ptr_info.child.toType().abiAlignment(mod), 1),
-                    );
+                    const elem_align = (if (ptr_info.flags.alignment != .none)
+                        @as(InternPool.Alignment, ptr_info.flags.alignment)
+                    else
+                        ptr_info.child.toType().abiAlignment(mod).max(.@"1")).toLlvm();
                     try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder);
                 },
             };
@@ -5166,7 +5174,7 @@ pub const FuncGen = struct {
                 return rp;
             } else {
                 // our by-ref status disagrees with sret so we must load.
-                const return_alignment = Builder.Alignment.fromByteUnits(return_type.abiAlignment(mod));
+                const return_alignment = return_type.abiAlignment(mod).toLlvm();
                 return self.wip.load(.normal, llvm_ret_ty, rp, return_alignment, "");
             }
         }
@@ -5177,7 +5185,7 @@ pub const FuncGen = struct {
             // In this case the function return type is honoring the calling convention by having
             // a different LLVM type than the usual one. We solve this here at the callsite
             // by using our canonical type, then loading it if necessary.
-            const alignment = Builder.Alignment.fromByteUnits(return_type.abiAlignment(mod));
+            const alignment = return_type.abiAlignment(mod).toLlvm();
             if (o.builder.useLibLlvm())
                 assert(o.target_data.abiSizeOfType(abi_ret_ty.toLlvm(&o.builder)) >=
                     o.target_data.abiSizeOfType(llvm_ret_ty.toLlvm(&o.builder)));
@@ -5192,7 +5200,7 @@ pub const FuncGen = struct {
         if (isByRef(return_type, mod)) {
             // our by-ref status disagrees with sret so we must allocate, store,
             // and return the allocation pointer.
-            const alignment = Builder.Alignment.fromByteUnits(return_type.abiAlignment(mod));
+            const alignment = return_type.abiAlignment(mod).toLlvm();
             const rp = try self.buildAlloca(llvm_ret_ty, alignment);
             _ = try self.wip.store(.normal, call, rp, alignment);
             return rp;
@@ -5266,7 +5274,7 @@ pub const FuncGen = struct {
 
         const abi_ret_ty = try lowerFnRetTy(o, fn_info);
         const operand = try self.resolveInst(un_op);
-        const alignment = Builder.Alignment.fromByteUnits(ret_ty.abiAlignment(mod));
+        const alignment = ret_ty.abiAlignment(mod).toLlvm();
 
         if (isByRef(ret_ty, mod)) {
             // operand is a pointer however self.ret_ptr is null so that means
@@ -5311,7 +5319,7 @@ pub const FuncGen = struct {
         }
         const ptr = try self.resolveInst(un_op);
         const abi_ret_ty = try lowerFnRetTy(o, fn_info);
-        const alignment = Builder.Alignment.fromByteUnits(ret_ty.abiAlignment(mod));
+        const alignment = ret_ty.abiAlignment(mod).toLlvm();
         _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, ptr, alignment, ""));
         return .none;
     }
@@ -5334,7 +5342,7 @@ pub const FuncGen = struct {
         const llvm_va_list_ty = try o.lowerType(va_list_ty);
         const mod = o.module;
 
-        const result_alignment = Builder.Alignment.fromByteUnits(va_list_ty.abiAlignment(mod));
+        const result_alignment = va_list_ty.abiAlignment(mod).toLlvm();
         const dest_list = try self.buildAlloca(llvm_va_list_ty, result_alignment);
 
         _ = try self.wip.callIntrinsic(.normal, .none, .va_copy, &.{}, &.{ dest_list, src_list }, "");
@@ -5358,7 +5366,7 @@ pub const FuncGen = struct {
         const va_list_ty = self.typeOfIndex(inst);
         const llvm_va_list_ty = try o.lowerType(va_list_ty);
 
-        const result_alignment = Builder.Alignment.fromByteUnits(va_list_ty.abiAlignment(mod));
+        const result_alignment = va_list_ty.abiAlignment(mod).toLlvm();
         const dest_list = try self.buildAlloca(llvm_va_list_ty, result_alignment);
 
         _ = try self.wip.callIntrinsic(.normal, .none, .va_start, &.{}, &.{dest_list}, "");
@@ -5690,7 +5698,7 @@ pub const FuncGen = struct {
             return fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, "");
         } else if (isByRef(err_union_ty, mod)) {
             const payload_ptr = try fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, "");
-            const payload_alignment = Builder.Alignment.fromByteUnits(payload_ty.abiAlignment(mod));
+            const payload_alignment = payload_ty.abiAlignment(mod).toLlvm();
             if (isByRef(payload_ty, mod)) {
                 if (can_elide_load)
                     return payload_ptr;
@@ -5997,7 +6005,7 @@ pub const FuncGen = struct {
             if (self.canElideLoad(body_tail))
                 return ptr;
 
-            const elem_alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod));
+            const elem_alignment = elem_ty.abiAlignment(mod).toLlvm();
             return self.loadByRef(ptr, elem_ty, elem_alignment, .normal);
         }
 
@@ -6037,7 +6045,7 @@ pub const FuncGen = struct {
                 const elem_ptr =
                     try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, "");
                 if (canElideLoad(self, body_tail)) return elem_ptr;
-                const elem_alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod));
+                const elem_alignment = elem_ty.abiAlignment(mod).toLlvm();
                 return self.loadByRef(elem_ptr, elem_ty, elem_alignment, .normal);
             } else {
                 const elem_llvm_ty = try o.lowerType(elem_ty);
@@ -6097,7 +6105,7 @@ pub const FuncGen = struct {
             &.{rhs}, "");
         if (isByRef(elem_ty, mod)) {
             if (self.canElideLoad(body_tail)) return ptr;
-            const elem_alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod));
+            const elem_alignment = elem_ty.abiAlignment(mod).toLlvm();
             return self.loadByRef(ptr, elem_ty, elem_alignment, .normal);
         }
 
@@ -6163,8 +6171,8 @@ pub const FuncGen = struct {
             switch (struct_ty.zigTypeTag(mod)) {
                 .Struct => switch (struct_ty.containerLayout(mod)) {
                     .Packed => {
-                        const struct_obj = mod.typeToStruct(struct_ty).?;
-                        const bit_offset = struct_obj.packedFieldBitOffset(mod, field_index);
+                        const struct_type = mod.typeToStruct(struct_ty).?;
+                        const bit_offset = mod.structPackedFieldBitOffset(struct_type, field_index);
                         const containing_int = struct_llvm_val;
                         const shift_amt =
                             try o.builder.intValue(containing_int.typeOfWip(&self.wip), bit_offset);
@@ -6220,16 +6228,14 @@ pub const FuncGen = struct {
                 const alignment = struct_ty.structFieldAlign(field_index, mod);
                 const field_ptr_ty = try mod.ptrType(.{
                     .child = field_ty.toIntern(),
-                    .flags = .{
-                        .alignment = InternPool.Alignment.fromNonzeroByteUnits(alignment),
-                    },
+                    .flags = .{ .alignment = alignment },
                 });
                 if (isByRef(field_ty, mod)) {
                     if (canElideLoad(self, body_tail))
                         return field_ptr;
 
-                    assert(alignment != 0);
-                    const field_alignment = Builder.Alignment.fromByteUnits(alignment);
+                    assert(alignment != .none);
+                    const field_alignment = alignment.toLlvm();
                     return self.loadByRef(field_ptr, field_ty, field_alignment, .normal);
                 } else {
                     return self.load(field_ptr, field_ptr_ty);
@@ -6238,11 +6244,11 @@ pub const FuncGen = struct {
             .Union => {
                 const union_llvm_ty = try o.lowerType(struct_ty);
                 const layout = struct_ty.unionGetLayout(mod);
-                const payload_index = @intFromBool(layout.tag_align >= layout.payload_align);
+                const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align));
                 const field_ptr =
                     try self.wip.gepStruct(union_llvm_ty, struct_llvm_val, payload_index, "");
                 const llvm_field_ty = try o.lowerType(field_ty);
-                const payload_alignment = Builder.Alignment.fromByteUnits(layout.payload_align);
+                const payload_alignment = layout.payload_align.toLlvm();
                 if (isByRef(field_ty, mod)) {
                     if (canElideLoad(self, body_tail)) return field_ptr;
                     return self.loadByRef(field_ptr, field_ty, payload_alignment, .normal);
@@ -6457,7 +6463,7 @@ pub const FuncGen = struct {
         if (isByRef(operand_ty, mod)) {
             _ = dib.insertDeclareAtEnd(operand.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
         } else if (o.module.comp.bin_file.options.optimize_mode == .Debug) {
-            const alignment = Builder.Alignment.fromByteUnits(operand_ty.abiAlignment(mod));
+            const alignment = operand_ty.abiAlignment(mod).toLlvm();
             const alloca = try self.buildAlloca(operand.typeOfWip(&self.wip), alignment);
             _ = try self.wip.store(.normal, operand, alloca, alignment);
             _ = dib.insertDeclareAtEnd(alloca.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
@@ -6612,7 +6618,7 @@ pub const FuncGen = struct {
                     llvm_param_values[llvm_param_i] = arg_llvm_value;
                     llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip);
                 } else {
-                    const alignment = Builder.Alignment.fromByteUnits(arg_ty.abiAlignment(mod));
+                    const alignment = arg_ty.abiAlignment(mod).toLlvm();
                     const arg_llvm_ty = try o.lowerType(arg_ty);
                     const load_inst =
                         try self.wip.load(.normal, arg_llvm_ty, arg_llvm_value, alignment, "");
@@ -6624,7 +6630,7 @@ pub const FuncGen = struct {
                     llvm_param_values[llvm_param_i] = arg_llvm_value;
                     llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip);
                 } else {
-                    const alignment = Builder.Alignment.fromByteUnits(arg_ty.abiAlignment(mod));
+                    const alignment = arg_ty.abiAlignment(mod).toLlvm();
                     const arg_ptr = try self.buildAlloca(arg_llvm_value.typeOfWip(&self.wip), alignment);
                     _ = try self.wip.store(.normal, arg_llvm_value, arg_ptr, alignment);
                     llvm_param_values[llvm_param_i] = arg_ptr;
@@ -6676,7 +6682,7 @@ pub const FuncGen = struct {
                 llvm_param_values[llvm_param_i] = llvm_rw_val;
                 llvm_param_types[llvm_param_i] = llvm_rw_val.typeOfWip(&self.wip);
             } else {
-                const alignment = Builder.Alignment.fromByteUnits(rw_ty.abiAlignment(mod));
+                const alignment = rw_ty.abiAlignment(mod).toLlvm();
                 const loaded = try self.wip.load(.normal, llvm_elem_ty, llvm_rw_val, alignment, "");
                 llvm_param_values[llvm_param_i] = loaded;
                 llvm_param_types[llvm_param_i] = llvm_elem_ty;
@@ -6837,7 +6843,7 @@ pub const FuncGen = struct {
                 const output_ptr = try self.resolveInst(output);
                 const output_ptr_ty = self.typeOf(output);
 
-                const alignment = Builder.Alignment.fromByteUnits(output_ptr_ty.ptrAlignment(mod));
+                const alignment = output_ptr_ty.ptrAlignment(mod).toLlvm();
                 _ = try self.wip.store(.normal, output_value, output_ptr, alignment);
             } else {
                 ret_val = output_value;
@@ -7030,7 +7036,7 @@ pub const FuncGen = struct {
         if (operand_is_ptr) {
             return self.wip.gepStruct(err_union_llvm_ty, operand, offset, "");
         } else if (isByRef(err_union_ty, mod)) {
-            const payload_alignment = Builder.Alignment.fromByteUnits(payload_ty.abiAlignment(mod));
+            const payload_alignment = payload_ty.abiAlignment(mod).toLlvm();
             const payload_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, "");
             if (isByRef(payload_ty, mod)) {
                 if (self.canElideLoad(body_tail)) return payload_ptr;
@@ -7093,7 +7099,7 @@ pub const FuncGen = struct {
         }
         const err_union_llvm_ty = try o.lowerType(err_union_ty);
         {
-            const error_alignment = Builder.Alignment.fromByteUnits(Type.err_int.abiAlignment(mod));
+            const error_alignment = Type.err_int.abiAlignment(mod).toLlvm();
             const error_offset = errUnionErrorOffset(payload_ty, mod);
             // First set the non-error value.
             const non_null_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, error_offset, "");
@@ -7133,9 +7139,7 @@ pub const FuncGen = struct {
         const field_ty = struct_ty.structFieldType(field_index, mod);
         const field_ptr_ty = try mod.ptrType(.{
             .child = field_ty.toIntern(),
-            .flags = .{
-                .alignment = InternPool.Alignment.fromNonzeroByteUnits(field_alignment),
-            },
+            .flags = .{ .alignment = field_alignment },
         });
         return self.load(field_ptr, field_ptr_ty);
     }
@@ -7153,7 +7157,7 @@ pub const FuncGen = struct {
         if (optional_ty.optionalReprIsPayload(mod)) return operand;
         const llvm_optional_ty = try o.lowerType(optional_ty);
         if (isByRef(optional_ty, mod)) {
-            const alignment = Builder.Alignment.fromByteUnits(optional_ty.abiAlignment(mod));
+            const alignment = optional_ty.abiAlignment(mod).toLlvm();
             const optional_ptr = try self.buildAlloca(llvm_optional_ty, alignment);
             const payload_ptr = try self.wip.gepStruct(llvm_optional_ty, optional_ptr, 0, "");
             const payload_ptr_ty = try mod.singleMutPtrType(payload_ty);
@@ -7181,10 +7185,10 @@ pub const FuncGen = struct {
         const payload_offset = errUnionPayloadOffset(payload_ty, mod);
         const error_offset = errUnionErrorOffset(payload_ty, mod);
         if (isByRef(err_un_ty, mod)) {
-            const alignment = Builder.Alignment.fromByteUnits(err_un_ty.abiAlignment(mod));
+            const alignment = err_un_ty.abiAlignment(mod).toLlvm();
             const result_ptr = try self.buildAlloca(err_un_llvm_ty, alignment);
             const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, "");
-            const error_alignment = Builder.Alignment.fromByteUnits(Type.err_int.abiAlignment(mod));
+            const error_alignment = Type.err_int.abiAlignment(mod).toLlvm();
             _ = try self.wip.store(.normal, ok_err_code, err_ptr, error_alignment);
             const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, "");
             const payload_ptr_ty = try mod.singleMutPtrType(payload_ty);
@@ -7210,10 +7214,10 @@ pub const FuncGen = struct {
         const payload_offset = errUnionPayloadOffset(payload_ty, mod);
         const error_offset = errUnionErrorOffset(payload_ty, mod);
         if (isByRef(err_un_ty, mod)) {
-            const alignment = Builder.Alignment.fromByteUnits(err_un_ty.abiAlignment(mod));
+            const alignment = err_un_ty.abiAlignment(mod).toLlvm();
             const result_ptr = try self.buildAlloca(err_un_llvm_ty, alignment);
             const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, "");
-            const error_alignment = Builder.Alignment.fromByteUnits(Type.err_int.abiAlignment(mod));
+            const error_alignment = Type.err_int.abiAlignment(mod).toLlvm();
             _ = try self.wip.store(.normal, operand, err_ptr, error_alignment);
             const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, "");
             const payload_ptr_ty = try mod.singleMutPtrType(payload_ty);
@@ -7260,7 +7264,7 @@ pub const FuncGen = struct {
         const access_kind: Builder.MemoryAccessKind =
             if (vector_ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal;
         const elem_llvm_ty = try o.lowerType(vector_ptr_ty.childType(mod));
-        const alignment = Builder.Alignment.fromByteUnits(vector_ptr_ty.ptrAlignment(mod));
+        const alignment = vector_ptr_ty.ptrAlignment(mod).toLlvm();
         const loaded = try self.wip.load(access_kind, elem_llvm_ty, vector_ptr, alignment, "");
 
         const new_vector = try self.wip.insertElement(loaded, operand, index, "");
@@ -7690,7 +7694,7 @@ pub const FuncGen = struct {
         const overflow_index = o.llvmFieldIndex(inst_ty, 1).?;
 
         if (isByRef(inst_ty, mod)) {
-            const result_alignment = Builder.Alignment.fromByteUnits(inst_ty.abiAlignment(mod));
+            const result_alignment = inst_ty.abiAlignment(mod).toLlvm();
             const alloca_inst = try self.buildAlloca(llvm_inst_ty, result_alignment);
             {
                 const field_ptr = try self.wip.gepStruct(llvm_inst_ty, alloca_inst, result_index, "");
@@ -8048,7 +8052,7 @@ pub const FuncGen = struct {
         const overflow_index = o.llvmFieldIndex(dest_ty, 1).?;
 
         if (isByRef(dest_ty, mod)) {
-            const result_alignment = Builder.Alignment.fromByteUnits(dest_ty.abiAlignment(mod));
+            const result_alignment = dest_ty.abiAlignment(mod).toLlvm();
             const alloca_inst = try self.buildAlloca(llvm_dest_ty, result_alignment);
             {
                 const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, result_index, "");
@@ -8321,7 +8325,7 @@ pub const FuncGen = struct {
             const array_ptr = try self.buildAlloca(llvm_dest_ty, .default);
             const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8;
             if (bitcast_ok) {
-                const alignment = Builder.Alignment.fromByteUnits(inst_ty.abiAlignment(mod));
+                const alignment = inst_ty.abiAlignment(mod).toLlvm();
                 _ = try self.wip.store(.normal, operand, array_ptr, alignment);
             } else {
                 // If the ABI size of the element type is not evenly divisible by size in bits;
@@ -8349,7 +8353,7 @@ pub const FuncGen = struct {
             if (bitcast_ok) {
                 // The array is aligned to the element's alignment, while the vector might have a completely
                 // different alignment. This means we need to enforce the alignment of this load.
-                const alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod));
+                const alignment = elem_ty.abiAlignment(mod).toLlvm();
                 return self.wip.load(.normal, llvm_vector_ty, operand, alignment, "");
             } else {
                 // If the ABI size of the element type is not evenly divisible by size in bits;
@@ -8374,14 +8378,12 @@ pub const FuncGen = struct {
         }
 
         if (operand_is_ref) {
-            const alignment = Builder.Alignment.fromByteUnits(operand_ty.abiAlignment(mod));
+            const alignment = operand_ty.abiAlignment(mod).toLlvm();
             return self.wip.load(.normal, llvm_dest_ty, operand, alignment, "");
         }
 
         if (result_is_ref) {
-            const alignment = Builder.Alignment.fromByteUnits(
-                @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)),
-            );
+            const alignment = operand_ty.abiAlignment(mod).max(inst_ty.abiAlignment(mod)).toLlvm();
             const result_ptr = try self.buildAlloca(llvm_dest_ty, alignment);
             _ = try self.wip.store(.normal, operand, result_ptr, alignment);
             return result_ptr;
@@ -8393,9 +8395,7 @@ pub const FuncGen = struct {
             // Both our operand and our result are values, not pointers,
             // but LLVM won't let us bitcast struct values or vectors with padding bits.
             // Therefore, we store operand to alloca, then load for result.
-            const alignment = Builder.Alignment.fromByteUnits(
-                @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)),
-            );
+            const alignment = operand_ty.abiAlignment(mod).max(inst_ty.abiAlignment(mod)).toLlvm();
             const result_ptr = try self.buildAlloca(llvm_dest_ty, alignment);
             _ = try self.wip.store(.normal, operand, result_ptr, alignment);
             return self.wip.load(.normal, llvm_dest_ty, result_ptr, alignment, "");
@@ -8441,7 +8441,7 @@ pub const FuncGen = struct {
             if (isByRef(inst_ty, mod)) {
                 _ = dib.insertDeclareAtEnd(arg_val.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
             } else if (o.module.comp.bin_file.options.optimize_mode == .Debug) {
-                const alignment = Builder.Alignment.fromByteUnits(inst_ty.abiAlignment(mod));
+                const alignment = inst_ty.abiAlignment(mod).toLlvm();
                 const alloca = try self.buildAlloca(arg_val.typeOfWip(&self.wip), alignment);
                 _ = try self.wip.store(.normal, arg_val, alloca, alignment);
                 _ = dib.insertDeclareAtEnd(alloca.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
@@ -8462,7 +8462,7 @@ pub const FuncGen = struct {
             return (try o.lowerPtrToVoid(ptr_ty)).toValue();
 
         const pointee_llvm_ty = try o.lowerType(pointee_type);
-        const alignment = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod));
+        const alignment = ptr_ty.ptrAlignment(mod).toLlvm();
         return self.buildAlloca(pointee_llvm_ty, alignment);
     }
 
@@ -8475,7 +8475,7 @@ pub const FuncGen = struct {
             return (try o.lowerPtrToVoid(ptr_ty)).toValue();
         if (self.ret_ptr != .none) return self.ret_ptr;
         const ret_llvm_ty = try o.lowerType(ret_ty);
-        const alignment = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod));
+        const alignment = ptr_ty.ptrAlignment(mod).toLlvm();
         return self.buildAlloca(ret_llvm_ty, alignment);
     }
 
@@ -8515,7 +8515,7 @@ pub const FuncGen = struct {
             const len = try o.builder.intValue(try o.lowerType(Type.usize), operand_ty.abiSize(mod));
             _ = try self.wip.callMemSet(
                 dest_ptr,
-                Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod)),
+                ptr_ty.ptrAlignment(mod).toLlvm(),
                 if (safety) try o.builder.intValue(.i8, 0xaa) else try o.builder.undefValue(.i8),
                 len,
                 if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal,
@@ -8646,7 +8646,7 @@ pub const FuncGen = struct {
             self.sync_scope,
             toLlvmAtomicOrdering(extra.successOrder()),
             toLlvmAtomicOrdering(extra.failureOrder()),
-            Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod)),
+            ptr_ty.ptrAlignment(mod).toLlvm(),
             "",
         );
 
@@ -8685,7 +8685,7 @@ pub const FuncGen = struct {
 
         const access_kind: Builder.MemoryAccessKind =
             if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal;
-        const ptr_alignment = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod));
+        const ptr_alignment = ptr_ty.ptrAlignment(mod).toLlvm();
 
         if (llvm_abi_ty != .none) {
             // operand needs widening and truncating or bitcasting.
@@ -8741,9 +8741,10 @@ pub const FuncGen = struct {
         if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none;
         const ordering = toLlvmAtomicOrdering(atomic_load.order);
         const llvm_abi_ty = try o.getAtomicAbiType(elem_ty, false);
-        const ptr_alignment = Builder.Alignment.fromByteUnits(
-            info.flags.alignment.toByteUnitsOptional() orelse info.child.toType().abiAlignment(mod),
-        );
+        const ptr_alignment = (if (info.flags.alignment != .none)
+            @as(InternPool.Alignment, info.flags.alignment)
+        else
+            info.child.toType().abiAlignment(mod)).toLlvm();
         const access_kind: Builder.MemoryAccessKind =
             if (info.flags.is_volatile) .@"volatile" else .normal;
         const elem_llvm_ty = try o.lowerType(elem_ty);
@@ -8807,7 +8808,7 @@ pub const FuncGen = struct {
         const dest_slice = try self.resolveInst(bin_op.lhs);
         const ptr_ty = self.typeOf(bin_op.lhs);
         const elem_ty = self.typeOf(bin_op.rhs);
-        const dest_ptr_align = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod));
+        const dest_ptr_align = ptr_ty.ptrAlignment(mod).toLlvm();
         const dest_ptr = try self.sliceOrArrayPtr(dest_slice, ptr_ty);
         const access_kind: Builder.MemoryAccessKind =
             if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal;
@@ -8911,15 +8912,13 @@ pub const FuncGen = struct {
 
         self.wip.cursor = .{ .block = body_block };
         const elem_abi_align = elem_ty.abiAlignment(mod);
-        const it_ptr_align = Builder.Alignment.fromByteUnits(
-            @min(elem_abi_align, dest_ptr_align.toByteUnits() orelse std.math.maxInt(u64)),
-        );
+        const it_ptr_align = InternPool.Alignment.fromLlvm(dest_ptr_align).min(elem_abi_align).toLlvm();
         if (isByRef(elem_ty, mod)) {
             _ = try self.wip.callMemCpy(
                 it_ptr.toValue(),
                 it_ptr_align,
                 value,
-                Builder.Alignment.fromByteUnits(elem_abi_align),
+                elem_abi_align.toLlvm(),
                 try o.builder.intValue(llvm_usize_ty, elem_abi_size),
                 access_kind,
             );
@@ -8985,9 +8984,9 @@ pub const FuncGen = struct {
             self.wip.cursor = .{ .block = memcpy_block };
             _ = try self.wip.callMemCpy(
                 dest_ptr,
-                Builder.Alignment.fromByteUnits(dest_ptr_ty.ptrAlignment(mod)),
+                dest_ptr_ty.ptrAlignment(mod).toLlvm(),
                 src_ptr,
-                Builder.Alignment.fromByteUnits(src_ptr_ty.ptrAlignment(mod)),
+                src_ptr_ty.ptrAlignment(mod).toLlvm(),
                 len,
                 access_kind,
             );
@@ -8998,9 +8997,9 @@ pub const FuncGen = struct {
 
         _ = try self.wip.callMemCpy(
             dest_ptr,
-            Builder.Alignment.fromByteUnits(dest_ptr_ty.ptrAlignment(mod)),
+            dest_ptr_ty.ptrAlignment(mod).toLlvm(),
             src_ptr,
-            Builder.Alignment.fromByteUnits(src_ptr_ty.ptrAlignment(mod)),
+            src_ptr_ty.ptrAlignment(mod).toLlvm(),
             len,
             access_kind,
         );
@@ -9021,7 +9020,7 @@ pub const FuncGen = struct {
             _ = try self.wip.store(.normal, new_tag, union_ptr, .default);
             return .none;
         }
-        const tag_index = @intFromBool(layout.tag_align < layout.payload_align);
+        const tag_index = @intFromBool(layout.tag_align.compare(.lt, layout.payload_align));
         const tag_field_ptr = try self.wip.gepStruct(try o.lowerType(un_ty), union_ptr, tag_index, "");
         // TODO alignment on this store
         _ = try self.wip.store(.normal, new_tag, tag_field_ptr, .default);
@@ -9040,13 +9039,13 @@ pub const FuncGen = struct {
             const llvm_un_ty = try o.lowerType(un_ty);
             if (layout.payload_size == 0)
                 return self.wip.load(.normal, llvm_un_ty, union_handle, .default, "");
-            const tag_index = @intFromBool(layout.tag_align < layout.payload_align);
+            const tag_index = @intFromBool(layout.tag_align.compare(.lt, layout.payload_align));
             const tag_field_ptr = try self.wip.gepStruct(llvm_un_ty, union_handle, tag_index, "");
             const llvm_tag_ty = llvm_un_ty.structFields(&o.builder)[tag_index];
             return self.wip.load(.normal, llvm_tag_ty, tag_field_ptr, .default, "");
         } else {
             if (layout.payload_size == 0) return union_handle;
-            const tag_index = @intFromBool(layout.tag_align < layout.payload_align);
+            const tag_index = @intFromBool(layout.tag_align.compare(.lt, layout.payload_align));
             return self.wip.extractValue(union_handle, &.{tag_index}, "");
         }
     }
@@ -9605,6 +9604,7 @@ pub const FuncGen = struct {
     fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
         const o = self.dg.object;
         const mod = o.module;
+        const ip = &mod.intern_pool;
         const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
         const result_ty = self.typeOfIndex(inst);
         const len: usize = @intCast(result_ty.arrayLen(mod));
@@ -9622,23 +9622,21 @@ pub const FuncGen = struct {
                 return vector;
             },
             .Struct => {
-                if (result_ty.containerLayout(mod) == .Packed) {
-                    const struct_obj = mod.typeToStruct(result_ty).?;
-                    assert(struct_obj.haveLayout());
-                    const big_bits = struct_obj.backing_int_ty.bitSize(mod);
+                if (mod.typeToPackedStruct(result_ty)) |struct_type| {
+                    const backing_int_ty = struct_type.backingIntType(ip).*;
+                    assert(backing_int_ty != .none);
+                    const big_bits = backing_int_ty.toType().bitSize(mod);
                     const int_ty = try o.builder.intType(@intCast(big_bits));
-                    const fields = struct_obj.fields.values();
                     comptime assert(Type.packed_struct_layout_version == 2);
                     var running_int = try o.builder.intValue(int_ty, 0);
                     var running_bits: u16 = 0;
-                    for (elements, 0..) |elem, i| {
-                        const field = fields[i];
-                        if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+                    for (elements, struct_type.field_types.get(ip)) |elem, field_ty| {
+                        if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
 
                         const non_int_val = try self.resolveInst(elem);
-                        const ty_bit_size: u16 = @intCast(field.ty.bitSize(mod));
+                        const ty_bit_size: u16 = @intCast(field_ty.toType().bitSize(mod));
                         const small_int_ty = try o.builder.intType(ty_bit_size);
-                        const small_int_val = if (field.ty.isPtrAtRuntime(mod))
+                        const small_int_val = if (field_ty.toType().isPtrAtRuntime(mod))
                             try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "")
                         else
                             try self.wip.cast(.bitcast, non_int_val, small_int_ty, "");
@@ -9652,10 +9650,12 @@ pub const FuncGen = struct {
                     return running_int;
                 }
 
+                assert(result_ty.containerLayout(mod) != .Packed);
+
                 if (isByRef(result_ty, mod)) {
                     // TODO in debug builds init to undef so that the padding will be 0xaa
                     // even if we fully populate the fields.
-                    const alignment = Builder.Alignment.fromByteUnits(result_ty.abiAlignment(mod));
+                    const alignment = result_ty.abiAlignment(mod).toLlvm();
                     const alloca_inst = try self.buildAlloca(llvm_result_ty, alignment);
 
                     for (elements, 0..) |elem, i| {
@@ -9668,9 +9668,7 @@ pub const FuncGen = struct {
                         const field_ptr_ty = try mod.ptrType(.{
                             .child = self.typeOf(elem).toIntern(),
                             .flags = .{
-                                .alignment = InternPool.Alignment.fromNonzeroByteUnits(
-                                    result_ty.structFieldAlign(i, mod),
-                                ),
+                                .alignment = result_ty.structFieldAlign(i, mod),
                             },
                         });
                         try self.store(field_ptr, field_ptr_ty, llvm_elem, .none);
@@ -9694,7 +9692,7 @@ pub const FuncGen = struct {
 
                 const llvm_usize = try o.lowerType(Type.usize);
                 const usize_zero = try o.builder.intValue(llvm_usize, 0);
-                const alignment = Builder.Alignment.fromByteUnits(result_ty.abiAlignment(mod));
+                const alignment = result_ty.abiAlignment(mod).toLlvm();
                 const alloca_inst = try self.buildAlloca(llvm_result_ty, alignment);
 
                 const array_info = result_ty.arrayInfo(mod);
@@ -9770,7 +9768,7 @@ pub const FuncGen = struct {
         // necessarily match the format that we need, depending on which tag is active.
         // We must construct the correct unnamed struct type here, in order to then set
         // the fields appropriately.
-        const alignment = Builder.Alignment.fromByteUnits(layout.abi_align);
+        const alignment = layout.abi_align.toLlvm();
         const result_ptr = try self.buildAlloca(union_llvm_ty, alignment);
         const llvm_payload = try self.resolveInst(extra.init);
         const field_ty = union_obj.field_types.get(ip)[extra.field_index].toType();
@@ -9799,7 +9797,7 @@ pub const FuncGen = struct {
             const tag_ty = try o.lowerType(union_obj.enum_tag_ty.toType());
             var fields: [3]Builder.Type = undefined;
             var fields_len: usize = 2;
-            if (layout.tag_align >= layout.payload_align) {
+            if (layout.tag_align.compare(.gte, layout.payload_align)) {
                 fields = .{ tag_ty, payload_ty, undefined };
             } else {
                 fields = .{ payload_ty, tag_ty, undefined };
@@ -9815,7 +9813,7 @@ pub const FuncGen = struct {
         // tag and the payload.
         const field_ptr_ty = try mod.ptrType(.{
             .child = field_ty.toIntern(),
-            .flags = .{ .alignment = InternPool.Alignment.fromNonzeroByteUnits(field_align) },
+            .flags = .{ .alignment = field_align },
         });
         if (layout.tag_size == 0) {
             const indices = [3]Builder.Value{ usize_zero, i32_zero, i32_zero };
@@ -9827,7 +9825,7 @@ pub const FuncGen = struct {
         }
 
         {
-            const payload_index = @intFromBool(layout.tag_align >= layout.payload_align);
+            const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align));
             const indices: [3]Builder.Value =
                 .{ usize_zero, try o.builder.intValue(.i32, payload_index), i32_zero };
             const len: usize = if (field_size == layout.payload_size) 2 else 3;
@@ -9836,12 +9834,12 @@ pub const FuncGen = struct {
             try self.store(field_ptr, field_ptr_ty, llvm_payload, .none);
         }
         {
-            const tag_index = @intFromBool(layout.tag_align < layout.payload_align);
+            const tag_index = @intFromBool(layout.tag_align.compare(.lt, layout.payload_align));
             const indices: [2]Builder.Value = .{ usize_zero, try o.builder.intValue(.i32, tag_index) };
             const field_ptr = try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, &indices, "");
             const tag_ty = try o.lowerType(union_obj.enum_tag_ty.toType());
             const llvm_tag = try o.builder.intValue(tag_ty, tag_int);
-            const tag_alignment = Builder.Alignment.fromByteUnits(union_obj.enum_tag_ty.toType().abiAlignment(mod));
+            const tag_alignment = union_obj.enum_tag_ty.toType().abiAlignment(mod).toLlvm();
             _ = try self.wip.store(.normal, llvm_tag, field_ptr, tag_alignment);
         }
 
@@ -9978,7 +9976,7 @@ pub const FuncGen = struct {
         variable_index.setMutability(.constant, &o.builder);
         variable_index.setUnnamedAddr(.unnamed_addr, &o.builder);
         variable_index.setAlignment(
-            Builder.Alignment.fromByteUnits(Type.slice_const_u8_sentinel_0.abiAlignment(mod)),
+            Type.slice_const_u8_sentinel_0.abiAlignment(mod).toLlvm(),
             &o.builder,
         );
 
@@ -10023,7 +10021,7 @@ pub const FuncGen = struct {
             // We have a pointer and we need to return a pointer to the first field.
             const payload_ptr = try fg.wip.gepStruct(opt_llvm_ty, opt_handle, 0, "");
 
-            const payload_alignment = Builder.Alignment.fromByteUnits(payload_ty.abiAlignment(mod));
+            const payload_alignment = payload_ty.abiAlignment(mod).toLlvm();
             if (isByRef(payload_ty, mod)) {
                 if (can_elide_load)
                     return payload_ptr;
@@ -10050,7 +10048,7 @@ pub const FuncGen = struct {
         const mod = o.module;
 
         if (isByRef(optional_ty, mod)) {
-            const payload_alignment = Builder.Alignment.fromByteUnits(optional_ty.abiAlignment(mod));
+            const payload_alignment = optional_ty.abiAlignment(mod).toLlvm();
             const alloca_inst = try self.buildAlloca(optional_llvm_ty, payload_alignment);
 
             {
@@ -10123,7 +10121,7 @@ pub const FuncGen = struct {
             .Union => {
                 const layout = struct_ty.unionGetLayout(mod);
                 if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .Packed) return struct_ptr;
-                const payload_index = @intFromBool(layout.tag_align >= layout.payload_align);
+                const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align));
                 const union_llvm_ty = try o.lowerType(struct_ty);
                 return self.wip.gepStruct(union_llvm_ty, struct_ptr, payload_index, "");
             },
@@ -10142,9 +10140,7 @@ pub const FuncGen = struct {
         const o = fg.dg.object;
         const mod = o.module;
         const pointee_llvm_ty = try o.lowerType(pointee_type);
-        const result_align = Builder.Alignment.fromByteUnits(
-            @max(ptr_alignment.toByteUnits() orelse 0, pointee_type.abiAlignment(mod)),
-        );
+        const result_align = InternPool.Alignment.fromLlvm(ptr_alignment).max(pointee_type.abiAlignment(mod)).toLlvm();
         const result_ptr = try fg.buildAlloca(pointee_llvm_ty, result_align);
         const size_bytes = pointee_type.abiSize(mod);
         _ = try fg.wip.callMemCpy(
@@ -10168,9 +10164,11 @@ pub const FuncGen = struct {
         const elem_ty = info.child.toType();
         if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none;
 
-        const ptr_alignment = Builder.Alignment.fromByteUnits(
-            info.flags.alignment.toByteUnitsOptional() orelse elem_ty.abiAlignment(mod),
-        );
+        const ptr_alignment = (if (info.flags.alignment != .none)
+            @as(InternPool.Alignment, info.flags.alignment)
+        else
+            elem_ty.abiAlignment(mod)).toLlvm();
+
         const access_kind: Builder.MemoryAccessKind =
             if (info.flags.is_volatile) .@"volatile" else .normal;
 
@@ -10201,7 +10199,7 @@ pub const FuncGen = struct {
         const elem_llvm_ty = try o.lowerType(elem_ty);
 
         if (isByRef(elem_ty, mod)) {
-            const result_align = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod));
+            const result_align = elem_ty.abiAlignment(mod).toLlvm();
             const result_ptr = try self.buildAlloca(elem_llvm_ty, result_align);
 
             const same_size_int = try o.builder.intType(@intCast(elem_bits));
@@ -10239,7 +10237,7 @@ pub const FuncGen = struct {
         if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
             return;
         }
-        const ptr_alignment = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod));
+        const ptr_alignment = ptr_ty.ptrAlignment(mod).toLlvm();
         const access_kind: Builder.MemoryAccessKind =
             if (info.flags.is_volatile) .@"volatile" else .normal;
 
@@ -10305,7 +10303,7 @@ pub const FuncGen = struct {
             ptr,
             ptr_alignment,
             elem,
-            Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod)),
+            elem_ty.abiAlignment(mod).toLlvm(),
             try o.builder.intValue(try o.lowerType(Type.usize), elem_ty.abiSize(mod)),
             access_kind,
         );
@@ -10337,7 +10335,7 @@ pub const FuncGen = struct {
         if (!target_util.hasValgrindSupport(target)) return default_value;
 
         const llvm_usize = try o.lowerType(Type.usize);
-        const usize_alignment = Builder.Alignment.fromByteUnits(Type.usize.abiAlignment(mod));
+        const usize_alignment = Type.usize.abiAlignment(mod).toLlvm();
 
         const array_llvm_ty = try o.builder.arrayType(6, llvm_usize);
         const array_ptr = if (fg.valgrind_client_request_array == .none) a: {
@@ -10718,6 +10716,7 @@ fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Err
 
 fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
     const mod = o.module;
+    const ip = &mod.intern_pool;
     const return_type = fn_info.return_type.toType();
     if (isScalar(mod, return_type)) {
         return o.lowerType(return_type);
@@ -10761,12 +10760,16 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.E
     const first_non_integer = std.mem.indexOfNone(x86_64_abi.Class, &classes, &.{.integer});
     if (first_non_integer == null or classes[first_non_integer.?] == .none) {
         assert(first_non_integer orelse classes.len == types_index);
-        if (mod.intern_pool.indexToKey(return_type.toIntern()) == .struct_type) {
-            var struct_it = return_type.iterateStructOffsets(mod);
-            while (struct_it.next()) |_| {}
-            assert((std.math.divCeil(u64, struct_it.offset, 8) catch unreachable) == types_index);
-            if (struct_it.offset % 8 > 0) types_buffer[types_index - 1] =
-                try o.builder.intType(@intCast(struct_it.offset % 8 * 8));
+        switch (ip.indexToKey(return_type.toIntern())) {
+            .struct_type => |struct_type| {
+                assert(struct_type.haveLayout(ip));
+                const size: u64 = struct_type.size(ip).*;
+                assert((std.math.divCeil(u64, size, 8) catch unreachable) == types_index);
+                if (size % 8 > 0) {
+                    types_buffer[types_index - 1] = try o.builder.intType(@intCast(size % 8 * 8));
+                }
+            },
+            else => {},
         }
         if (types_index == 1) return types_buffer[0];
     }
@@ -10982,6 +10985,7 @@ const ParamTypeIterator = struct {
 
     fn nextSystemV(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering {
         const mod = it.object.module;
+        const ip = &mod.intern_pool;
         const classes = x86_64_abi.classifySystemV(ty, mod, .arg);
         if (classes[0] == .memory) {
             it.zig_index += 1;
@@ -11037,12 +11041,17 @@ const ParamTypeIterator = struct {
                 it.llvm_index += 1;
                 return .abi_sized_int;
             }
-            if (mod.intern_pool.indexToKey(ty.toIntern()) == .struct_type) {
-                var struct_it = ty.iterateStructOffsets(mod);
-                while (struct_it.next()) |_| {}
-                assert((std.math.divCeil(u64, struct_it.offset, 8) catch unreachable) == types_index);
-                if (struct_it.offset % 8 > 0) types_buffer[types_index - 1] =
-                    try it.object.builder.intType(@intCast(struct_it.offset % 8 * 8));
+            switch (ip.indexToKey(ty.toIntern())) {
+                .struct_type => |struct_type| {
+                    assert(struct_type.haveLayout(ip));
+                    const size: u64 = struct_type.size(ip).*;
+                    assert((std.math.divCeil(u64, size, 8) catch unreachable) == types_index);
+                    if (size % 8 > 0) {
+                        types_buffer[types_index - 1] =
+                            try it.object.builder.intType(@intCast(size % 8 * 8));
+                    }
+                },
+                else => {},
             }
         }
         it.types_len = types_index;
@@ -11137,8 +11146,6 @@ fn isByRef(ty: Type, mod: *Module) bool {
 
         .Array, .Frame => return ty.hasRuntimeBits(mod),
         .Struct => {
-            // Packed structs are represented to LLVM as integers.
-            if (ty.containerLayout(mod) == .Packed) return false;
             const struct_type = switch (ip.indexToKey(ty.toIntern())) {
                 .anon_struct_type => |tuple| {
                     var count: usize = 0;
@@ -11154,14 +11161,18 @@ fn isByRef(ty: Type, mod: *Module) bool {
                 .struct_type => |s| s,
                 else => unreachable,
             };
-            const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
-            var count: usize = 0;
-            for (struct_obj.fields.values()) |field| {
-                if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue;
 
+            // Packed structs are represented to LLVM as integers.
+            if (struct_type.layout == .Packed) return false;
+
+            const field_types = struct_type.field_types.get(ip);
+            var it = struct_type.iterateRuntimeOrder(ip);
+            var count: usize = 0;
+            while (it.next()) |field_index| {
                 count += 1;
                 if (count > max_fields_byval) return true;
-                if (isByRef(field.ty, mod)) return true;
+                const field_ty = field_types[field_index].toType();
+                if (isByRef(field_ty, mod)) return true;
             }
             return false;
         },
@@ -11362,11 +11373,11 @@ fn buildAllocaInner(
 }
 
 fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u1 {
-    return @intFromBool(Type.err_int.abiAlignment(mod) > payload_ty.abiAlignment(mod));
+    return @intFromBool(Type.err_int.abiAlignment(mod).compare(.gt, payload_ty.abiAlignment(mod)));
 }
 
 fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u1 {
-    return @intFromBool(Type.err_int.abiAlignment(mod) <= payload_ty.abiAlignment(mod));
+    return @intFromBool(Type.err_int.abiAlignment(mod).compare(.lte, payload_ty.abiAlignment(mod)));
 }
 
 /// Returns true for asm constraint (e.g. "=*m", "=r") if it accepts a memory location
src/codegen/spirv.zig
@@ -792,24 +792,28 @@ pub const DeclGen = struct {
                     },
                     .vector_type => return dg.todo("indirect constant of type {}", .{ty.fmt(mod)}),
                     .struct_type => {
-                        const struct_ty = mod.typeToStruct(ty).?;
-                        if (struct_ty.layout == .Packed) {
+                        const struct_type = mod.typeToStruct(ty).?;
+                        if (struct_type.layout == .Packed) {
                             return dg.todo("packed struct constants", .{});
                         }
 
+                        // TODO iterate with runtime order instead so that struct field
+                        // reordering can be enabled for this backend.
                         const struct_begin = self.size;
-                        for (struct_ty.fields.values(), 0..) |field, i| {
-                            if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue;
+                        for (struct_type.field_types.get(ip), 0..) |field_ty, i_usize| {
+                            const i: u32 = @intCast(i_usize);
+                            if (struct_type.fieldIsComptime(ip, i)) continue;
+                            if (!field_ty.toType().hasRuntimeBits(mod)) continue;
 
                             const field_val = switch (aggregate.storage) {
                                 .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
-                                    .ty = field.ty.toIntern(),
+                                    .ty = field_ty,
                                     .storage = .{ .u64 = bytes[i] },
                                 } }),
                                 .elems => |elems| elems[i],
                                 .repeated_elem => |elem| elem,
                             };
-                            try self.lower(field.ty, field_val.toValue());
+                            try self.lower(field_ty.toType(), field_val.toValue());
 
                             // Add padding if required.
                             // TODO: Add to type generation as well?
@@ -838,7 +842,7 @@ pub const DeclGen = struct {
                     const active_field_ty = union_obj.field_types.get(ip)[active_field].toType();
 
                     const has_tag = layout.tag_size != 0;
-                    const tag_first = layout.tag_align >= layout.payload_align;
+                    const tag_first = layout.tag_align.compare(.gte, layout.payload_align);
 
                     if (has_tag and tag_first) {
                         try self.lower(ty.unionTagTypeSafety(mod).?, un.tag.toValue());
@@ -1094,7 +1098,7 @@ pub const DeclGen = struct {
                     val,
                     .UniformConstant,
                     false,
-                    alignment,
+                    @intCast(alignment.toByteUnits(0)),
                 );
                 log.debug("indirect constant: index = {}", .{@intFromEnum(spv_decl_index)});
                 try self.func.decl_deps.put(self.spv.gpa, spv_decl_index, {});
@@ -1180,7 +1184,7 @@ pub const DeclGen = struct {
         var member_names = std.BoundedArray(CacheString, 4){};
 
         const has_tag = layout.tag_size != 0;
-        const tag_first = layout.tag_align >= layout.payload_align;
+        const tag_first = layout.tag_align.compare(.gte, layout.payload_align);
         const u8_ty_ref = try self.intType(.unsigned, 8); // TODO: What if Int8Type is not enabled?
 
         if (has_tag and tag_first) {
@@ -1333,7 +1337,7 @@ pub const DeclGen = struct {
                 } });
             },
             .Struct => {
-                const struct_ty = switch (ip.indexToKey(ty.toIntern())) {
+                const struct_type = switch (ip.indexToKey(ty.toIntern())) {
                     .anon_struct_type => |tuple| {
                         const member_types = try self.gpa.alloc(CacheRef, tuple.values.len);
                         defer self.gpa.free(member_types);
@@ -1350,13 +1354,12 @@ pub const DeclGen = struct {
                             .member_types = member_types[0..member_index],
                         } });
                     },
-                    .struct_type => |struct_ty| struct_ty,
+                    .struct_type => |struct_type| struct_type,
                     else => unreachable,
                 };
 
-                const struct_obj = mod.structPtrUnwrap(struct_ty.index).?;
-                if (struct_obj.layout == .Packed) {
-                    return try self.resolveType(struct_obj.backing_int_ty, .direct);
+                if (struct_type.layout == .Packed) {
+                    return try self.resolveType(struct_type.backingIntType(ip).toType(), .direct);
                 }
 
                 var member_types = std.ArrayList(CacheRef).init(self.gpa);
@@ -1365,16 +1368,15 @@ pub const DeclGen = struct {
                 var member_names = std.ArrayList(CacheString).init(self.gpa);
                 defer member_names.deinit();
 
-                var it = struct_obj.runtimeFieldIterator(mod);
-                while (it.next()) |field_and_index| {
-                    const field = field_and_index.field;
-                    const index = field_and_index.index;
-                    const field_name = ip.stringToSlice(struct_obj.fields.keys()[index]);
-                    try member_types.append(try self.resolveType(field.ty, .indirect));
+                var it = struct_type.iterateRuntimeOrder(ip);
+                while (it.next()) |field_index| {
+                    const field_ty = struct_type.field_types.get(ip)[field_index];
+                    const field_name = ip.stringToSlice(struct_type.field_names.get(ip)[field_index]);
+                    try member_types.append(try self.resolveType(field_ty.toType(), .indirect));
                     try member_names.append(try self.spv.resolveString(field_name));
                 }
 
-                const name = ip.stringToSlice(try struct_obj.getFullyQualifiedName(self.module));
+                const name = ip.stringToSlice(try mod.declPtr(struct_type.decl.unwrap().?).getFullyQualifiedName(mod));
 
                 return try self.spv.resolve(.{ .struct_type = .{
                     .name = try self.spv.resolveString(name),
@@ -1500,7 +1502,7 @@ pub const DeclGen = struct {
         const error_align = Type.anyerror.abiAlignment(mod);
         const payload_align = payload_ty.abiAlignment(mod);
 
-        const error_first = error_align > payload_align;
+        const error_first = error_align.compare(.gt, payload_align);
         return .{
             .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod),
             .error_first = error_first,
@@ -1662,7 +1664,7 @@ pub const DeclGen = struct {
                 init_val,
                 actual_storage_class,
                 final_storage_class == .Generic,
-                @as(u32, @intCast(decl.alignment.toByteUnits(0))),
+                @intCast(decl.alignment.toByteUnits(0)),
             );
         }
     }
@@ -2603,7 +2605,7 @@ pub const DeclGen = struct {
         if (layout.payload_size == 0) return union_handle;
 
         const tag_ty = un_ty.unionTagTypeSafety(mod).?;
-        const tag_index = @intFromBool(layout.tag_align < layout.payload_align);
+        const tag_index = @intFromBool(layout.tag_align.compare(.lt, layout.payload_align));
         return try self.extractField(tag_ty, union_handle, tag_index);
     }
 
src/link/Elf/Atom.zig
@@ -11,7 +11,7 @@ file_index: File.Index = 0,
 size: u64 = 0,
 
 /// Alignment of this atom as a power of two.
-alignment: u8 = 0,
+alignment: Alignment = .@"1",
 
 /// Index of the input section.
 input_section_index: Index = 0,
@@ -42,6 +42,8 @@ fde_end: u32 = 0,
 prev_index: Index = 0,
 next_index: Index = 0,
 
+pub const Alignment = @import("../../InternPool.zig").Alignment;
+
 pub fn name(self: Atom, elf_file: *Elf) []const u8 {
     return elf_file.strtab.getAssumeExists(self.name_offset);
 }
@@ -112,7 +114,6 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
     const free_list = &meta.free_list;
     const last_atom_index = &meta.last_atom_index;
     const new_atom_ideal_capacity = Elf.padToIdeal(self.size);
-    const alignment = try std.math.powi(u64, 2, self.alignment);
 
     // We use these to indicate our intention to update metadata, placing the new atom,
     // and possibly removing a free list node.
@@ -136,7 +137,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
             const ideal_capacity_end_vaddr = std.math.add(u64, big_atom.value, ideal_capacity) catch ideal_capacity;
             const capacity_end_vaddr = big_atom.value + cap;
             const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
-            const new_start_vaddr = std.mem.alignBackward(u64, new_start_vaddr_unaligned, alignment);
+            const new_start_vaddr = self.alignment.backward(new_start_vaddr_unaligned);
             if (new_start_vaddr < ideal_capacity_end_vaddr) {
                 // Additional bookkeeping here to notice if this free list node
                 // should be deleted because the block that it points to has grown to take up
@@ -163,7 +164,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
         } else if (elf_file.atom(last_atom_index.*)) |last| {
             const ideal_capacity = Elf.padToIdeal(last.size);
             const ideal_capacity_end_vaddr = last.value + ideal_capacity;
-            const new_start_vaddr = std.mem.alignForward(u64, ideal_capacity_end_vaddr, alignment);
+            const new_start_vaddr = self.alignment.forward(ideal_capacity_end_vaddr);
             // Set up the metadata to be updated, after errors are no longer possible.
             atom_placement = last.atom_index;
             break :blk new_start_vaddr;
@@ -192,7 +193,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
             elf_file.debug_aranges_section_dirty = true;
         }
     }
-    shdr.sh_addralign = @max(shdr.sh_addralign, alignment);
+    shdr.sh_addralign = @max(shdr.sh_addralign, self.alignment.toByteUnitsOptional().?);
 
     // This function can also reallocate an atom.
     // In this case we need to "unplug" it from its previous location before
@@ -224,10 +225,8 @@ pub fn shrink(self: *Atom, elf_file: *Elf) void {
 }
 
 pub fn grow(self: *Atom, elf_file: *Elf) !void {
-    const alignment = try std.math.powi(u64, 2, self.alignment);
-    const align_ok = std.mem.alignBackward(u64, self.value, alignment) == self.value;
-    const need_realloc = !align_ok or self.size > self.capacity(elf_file);
-    if (need_realloc) try self.allocate(elf_file);
+    if (!self.alignment.check(self.value) or self.size > self.capacity(elf_file))
+        try self.allocate(elf_file);
 }
 
 pub fn free(self: *Atom, elf_file: *Elf) void {
src/link/Elf/Object.zig
@@ -181,10 +181,10 @@ fn addAtom(self: *Object, shdr: elf.Elf64_Shdr, shndx: u16, name: [:0]const u8,
         const data = try self.shdrContents(shndx);
         const chdr = @as(*align(1) const elf.Elf64_Chdr, @ptrCast(data.ptr)).*;
         atom.size = chdr.ch_size;
-        atom.alignment = math.log2_int(u64, chdr.ch_addralign);
+        atom.alignment = Alignment.fromNonzeroByteUnits(chdr.ch_addralign);
     } else {
         atom.size = shdr.sh_size;
-        atom.alignment = math.log2_int(u64, shdr.sh_addralign);
+        atom.alignment = Alignment.fromNonzeroByteUnits(shdr.sh_addralign);
     }
 }
 
@@ -571,7 +571,7 @@ pub fn convertCommonSymbols(self: *Object, elf_file: *Elf) !void {
         atom.file = self.index;
         atom.size = this_sym.st_size;
         const alignment = this_sym.st_value;
-        atom.alignment = math.log2_int(u64, alignment);
+        atom.alignment = Alignment.fromNonzeroByteUnits(alignment);
 
         var sh_flags: u32 = elf.SHF_ALLOC | elf.SHF_WRITE;
         if (is_tls) sh_flags |= elf.SHF_TLS;
@@ -870,3 +870,4 @@ const Fde = eh_frame.Fde;
 const File = @import("file.zig").File;
 const StringTable = @import("../strtab.zig").StringTable;
 const Symbol = @import("Symbol.zig");
+const Alignment = Atom.Alignment;
src/link/MachO/Atom.zig
@@ -28,13 +28,15 @@ size: u64 = 0,
 
 /// Alignment of this atom as a power of 2.
 /// For instance, aligmment of 0 should be read as 2^0 = 1 byte aligned.
-alignment: u32 = 0,
+alignment: Alignment = .@"1",
 
 /// Points to the previous and next neighbours
 /// TODO use the same trick as with symbols: reserve index 0 as null atom
 next_index: ?Index = null,
 prev_index: ?Index = null,
 
+pub const Alignment = @import("../../InternPool.zig").Alignment;
+
 pub const Index = u32;
 
 pub const Binding = struct {
src/link/MachO/Object.zig
@@ -382,7 +382,7 @@ pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) !
             const out_sect_id = (try Atom.getOutputSection(macho_file, sect)) orelse continue;
             if (sect.size == 0) continue;
 
-            const sect_id = @as(u8, @intCast(id));
+            const sect_id: u8 = @intCast(id);
             const sym_index = self.getSectionAliasSymbolIndex(sect_id);
             const atom_index = try self.createAtomFromSubsection(
                 macho_file,
@@ -391,7 +391,7 @@ pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) !
                 sym_index,
                 1,
                 sect.size,
-                sect.@"align",
+                Alignment.fromLog2Units(sect.@"align"),
                 out_sect_id,
             );
             macho_file.addAtomToSection(atom_index);
@@ -470,7 +470,7 @@ pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) !
                     sym_index,
                     1,
                     atom_size,
-                    sect.@"align",
+                    Alignment.fromLog2Units(sect.@"align"),
                     out_sect_id,
                 );
                 if (!sect.isZerofill()) {
@@ -494,10 +494,10 @@ pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) !
                 else
                     sect.addr + sect.size - addr;
 
-                const atom_align = if (addr > 0)
+                const atom_align = Alignment.fromLog2Units(if (addr > 0)
                     @min(@ctz(addr), sect.@"align")
                 else
-                    sect.@"align";
+                    sect.@"align");
 
                 const atom_index = try self.createAtomFromSubsection(
                     macho_file,
@@ -532,7 +532,7 @@ pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) !
                 sect_start_index,
                 sect_loc.len,
                 sect.size,
-                sect.@"align",
+                Alignment.fromLog2Units(sect.@"align"),
                 out_sect_id,
             );
             if (!sect.isZerofill()) {
@@ -551,11 +551,14 @@ fn createAtomFromSubsection(
     inner_sym_index: u32,
     inner_nsyms_trailing: u32,
     size: u64,
-    alignment: u32,
+    alignment: Alignment,
     out_sect_id: u8,
 ) !Atom.Index {
     const gpa = macho_file.base.allocator;
-    const atom_index = try macho_file.createAtom(sym_index, .{ .size = size, .alignment = alignment });
+    const atom_index = try macho_file.createAtom(sym_index, .{
+        .size = size,
+        .alignment = alignment,
+    });
     const atom = macho_file.getAtomPtr(atom_index);
     atom.inner_sym_index = inner_sym_index;
     atom.inner_nsyms_trailing = inner_nsyms_trailing;
@@ -1115,3 +1118,4 @@ const MachO = @import("../MachO.zig");
 const Platform = @import("load_commands.zig").Platform;
 const SymbolWithLoc = MachO.SymbolWithLoc;
 const UnwindInfo = @import("UnwindInfo.zig");
+const Alignment = Atom.Alignment;
src/link/MachO/thunks.zig
@@ -104,7 +104,7 @@ pub fn createThunks(macho_file: *MachO, sect_id: u8) !void {
 
         while (true) {
             const atom = macho_file.getAtom(group_end);
-            offset = mem.alignForward(u64, offset, try math.powi(u32, 2, atom.alignment));
+            offset = atom.alignment.forward(offset);
 
             const sym = macho_file.getSymbolPtr(atom.getSymbolWithLoc());
             sym.n_value = offset;
@@ -112,7 +112,7 @@ pub fn createThunks(macho_file: *MachO, sect_id: u8) !void {
 
             macho_file.logAtom(group_end, log);
 
-            header.@"align" = @max(header.@"align", atom.alignment);
+            header.@"align" = @max(header.@"align", atom.alignment.toLog2Units());
 
             allocated.putAssumeCapacityNoClobber(group_end, {});
 
@@ -196,7 +196,7 @@ fn allocateThunk(
 
         macho_file.logAtom(atom_index, log);
 
-        header.@"align" = @max(header.@"align", atom.alignment);
+        header.@"align" = @max(header.@"align", atom.alignment.toLog2Units());
 
         if (end_atom_index == atom_index) break;
 
@@ -326,7 +326,10 @@ fn isReachable(
 
 fn createThunkAtom(macho_file: *MachO) !Atom.Index {
     const sym_index = try macho_file.allocateSymbol();
-    const atom_index = try macho_file.createAtom(sym_index, .{ .size = @sizeOf(u32) * 3, .alignment = 2 });
+    const atom_index = try macho_file.createAtom(sym_index, .{
+        .size = @sizeOf(u32) * 3,
+        .alignment = .@"4",
+    });
     const sym = macho_file.getSymbolPtr(.{ .sym_index = sym_index });
     sym.n_type = macho.N_SECT;
     sym.n_sect = macho_file.text_section_index.? + 1;
src/link/MachO/zld.zig
@@ -985,19 +985,16 @@ fn calcSectionSizes(macho_file: *MachO) !void {
 
         while (true) {
             const atom = macho_file.getAtom(atom_index);
-            const atom_alignment = try math.powi(u32, 2, atom.alignment);
-            const atom_offset = mem.alignForward(u64, header.size, atom_alignment);
+            const atom_offset = atom.alignment.forward(header.size);
             const padding = atom_offset - header.size;
 
             const sym = macho_file.getSymbolPtr(atom.getSymbolWithLoc());
             sym.n_value = atom_offset;
 
             header.size += padding + atom.size;
-            header.@"align" = @max(header.@"align", atom.alignment);
+            header.@"align" = @max(header.@"align", atom.alignment.toLog2Units());
 
-            if (atom.next_index) |next_index| {
-                atom_index = next_index;
-            } else break;
+            atom_index = atom.next_index orelse break;
         }
     }
 
src/link/Wasm/Atom.zig
@@ -19,7 +19,7 @@ relocs: std.ArrayListUnmanaged(types.Relocation) = .{},
 /// Contains the binary data of an atom, which can be non-relocated
 code: std.ArrayListUnmanaged(u8) = .{},
 /// For code this is 1, for data this is set to the highest value of all segments
-alignment: u32,
+alignment: Wasm.Alignment,
 /// Offset into the section where the atom lives, this already accounts
 /// for alignment.
 offset: u32,
@@ -43,7 +43,7 @@ pub const Index = u32;
 
 /// Represents a default empty wasm `Atom`
 pub const empty: Atom = .{
-    .alignment = 1,
+    .alignment = .@"1",
     .file = null,
     .next = null,
     .offset = 0,
src/link/Wasm/Object.zig
@@ -8,6 +8,7 @@ const types = @import("types.zig");
 const std = @import("std");
 const Wasm = @import("../Wasm.zig");
 const Symbol = @import("Symbol.zig");
+const Alignment = types.Alignment;
 
 const Allocator = std.mem.Allocator;
 const leb = std.leb;
@@ -88,12 +89,9 @@ const RelocatableData = struct {
     /// meta data of the given object file.
     /// NOTE: Alignment is encoded as a power of 2, so we shift the symbol's
     /// alignment to retrieve the natural alignment.
-    pub fn getAlignment(relocatable_data: RelocatableData, object: *const Object) u32 {
-        if (relocatable_data.type != .data) return 1;
-        const data_alignment = object.segment_info[relocatable_data.index].alignment;
-        if (data_alignment == 0) return 1;
-        // Decode from power of 2 to natural alignment
-        return @as(u32, 1) << @as(u5, @intCast(data_alignment));
+    pub fn getAlignment(relocatable_data: RelocatableData, object: *const Object) Alignment {
+        if (relocatable_data.type != .data) return .@"1";
+        return object.segment_info[relocatable_data.index].alignment;
     }
 
     /// Returns the symbol kind that corresponds to the relocatable section
@@ -671,7 +669,7 @@ fn Parser(comptime ReaderType: type) type {
                         try reader.readNoEof(name);
                         segment.* = .{
                             .name = name,
-                            .alignment = try leb.readULEB128(u32, reader),
+                            .alignment = @enumFromInt(try leb.readULEB128(u32, reader)),
                             .flags = try leb.readULEB128(u32, reader),
                         };
                         log.debug("Found segment: {s} align({d}) flags({b})", .{
@@ -919,7 +917,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
             continue; // found unknown section, so skip parsing into atom as we do not know how to handle it.
         };
 
-        const atom_index = @as(Atom.Index, @intCast(wasm_bin.managed_atoms.items.len));
+        const atom_index: Atom.Index = @intCast(wasm_bin.managed_atoms.items.len);
         const atom = try wasm_bin.managed_atoms.addOne(gpa);
         atom.* = Atom.empty;
         atom.file = object_index;
@@ -984,7 +982,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
 
         const segment: *Wasm.Segment = &wasm_bin.segments.items[final_index];
         if (relocatable_data.type == .data) { //code section and debug sections are 1-byte aligned
-            segment.alignment = @max(segment.alignment, atom.alignment);
+            segment.alignment = segment.alignment.max(atom.alignment);
         }
 
         try wasm_bin.appendAtomAtIndex(final_index, atom_index);
src/link/Wasm/types.zig
@@ -109,11 +109,13 @@ pub const SubsectionType = enum(u8) {
     WASM_SYMBOL_TABLE = 8,
 };
 
+pub const Alignment = @import("../../InternPool.zig").Alignment;
+
 pub const Segment = struct {
     /// Segment's name, encoded as UTF-8 bytes.
     name: []const u8,
     /// The required alignment of the segment, encoded as a power of 2
-    alignment: u32,
+    alignment: Alignment,
     /// Bitfield containing flags for a segment
     flags: u32,
 
src/link/Coff.zig
@@ -1118,7 +1118,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
         },
     };
 
-    const required_alignment = tv.ty.abiAlignment(mod);
+    const required_alignment: u32 = @intCast(tv.ty.abiAlignment(mod).toByteUnits(0));
     const atom = self.getAtomPtr(atom_index);
     atom.size = @as(u32, @intCast(code.len));
     atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, required_alignment);
@@ -1196,7 +1196,7 @@ fn updateLazySymbolAtom(
     const gpa = self.base.allocator;
     const mod = self.base.options.module.?;
 
-    var required_alignment: u32 = undefined;
+    var required_alignment: InternPool.Alignment = .none;
     var code_buffer = std.ArrayList(u8).init(gpa);
     defer code_buffer.deinit();
 
@@ -1240,7 +1240,7 @@ fn updateLazySymbolAtom(
     symbol.section_number = @as(coff.SectionNumber, @enumFromInt(section_index + 1));
     symbol.type = .{ .complex_type = .NULL, .base_type = .NULL };
 
-    const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
+    const vaddr = try self.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits(0)));
     errdefer self.freeAtom(atom_index);
 
     log.debug("allocated atom for {s} at 0x{x}", .{ name, vaddr });
@@ -1322,7 +1322,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, comple
     const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
 
     log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
-    const required_alignment = decl.getAlignment(mod);
+    const required_alignment: u32 = @intCast(decl.getAlignment(mod).toByteUnits(0));
 
     const decl_metadata = self.decls.get(decl_index).?;
     const atom_index = decl_metadata.atom;
src/link/Dwarf.zig
@@ -341,23 +341,22 @@ pub const DeclState = struct {
                             try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
                         }
                     },
-                    .struct_type => |struct_type| s: {
-                        const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :s;
+                    .struct_type => |struct_type| {
                         // DW.AT.name, DW.FORM.string
                         try ty.print(dbg_info_buffer.writer(), mod);
                         try dbg_info_buffer.append(0);
 
-                        if (struct_obj.layout == .Packed) {
+                        if (struct_type.layout == .Packed) {
                             log.debug("TODO implement .debug_info for packed structs", .{});
                             break :blk;
                         }
 
                         for (
-                            struct_obj.fields.keys(),
-                            struct_obj.fields.values(),
-                            0..,
-                        ) |field_name_ip, field, field_index| {
-                            if (!field.ty.hasRuntimeBits(mod)) continue;
+                            struct_type.field_names.get(ip),
+                            struct_type.field_types.get(ip),
+                            struct_type.offsets.get(ip),
+                        ) |field_name_ip, field_ty, field_off| {
+                            if (!field_ty.toType().hasRuntimeBits(mod)) continue;
                             const field_name = ip.stringToSlice(field_name_ip);
                             // DW.AT.member
                             try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2);
@@ -368,9 +367,8 @@ pub const DeclState = struct {
                             // DW.AT.type, DW.FORM.ref4
                             var index = dbg_info_buffer.items.len;
                             try dbg_info_buffer.resize(index + 4);
-                            try self.addTypeRelocGlobal(atom_index, field.ty, @as(u32, @intCast(index)));
+                            try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @intCast(index));
                             // DW.AT.data_member_location, DW.FORM.udata
-                            const field_off = ty.structFieldOffset(field_index, mod);
                             try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
                         }
                     },
@@ -416,8 +414,8 @@ pub const DeclState = struct {
             .Union => {
                 const union_obj = mod.typeToUnion(ty).?;
                 const layout = mod.getUnionLayout(union_obj);
-                const payload_offset = if (layout.tag_align >= layout.payload_align) layout.tag_size else 0;
-                const tag_offset = if (layout.tag_align >= layout.payload_align) 0 else layout.payload_size;
+                const payload_offset = if (layout.tag_align.compare(.gte, layout.payload_align)) layout.tag_size else 0;
+                const tag_offset = if (layout.tag_align.compare(.gte, layout.payload_align)) 0 else layout.payload_size;
                 // TODO this is temporary to match current state of unions in Zig - we don't yet have
                 // safety checks implemented meaning the implicit tag is not yet stored and generated
                 // for untagged unions.
@@ -496,11 +494,11 @@ pub const DeclState = struct {
             .ErrorUnion => {
                 const error_ty = ty.errorUnionSet(mod);
                 const payload_ty = ty.errorUnionPayload(mod);
-                const payload_align = if (payload_ty.isNoReturn(mod)) 0 else payload_ty.abiAlignment(mod);
+                const payload_align = if (payload_ty.isNoReturn(mod)) .none else payload_ty.abiAlignment(mod);
                 const error_align = Type.anyerror.abiAlignment(mod);
                 const abi_size = ty.abiSize(mod);
-                const payload_off = if (error_align >= payload_align) Type.anyerror.abiSize(mod) else 0;
-                const error_off = if (error_align >= payload_align) 0 else payload_ty.abiSize(mod);
+                const payload_off = if (error_align.compare(.gte, payload_align)) Type.anyerror.abiSize(mod) else 0;
+                const error_off = if (error_align.compare(.gte, payload_align)) 0 else payload_ty.abiSize(mod);
 
                 // DW.AT.structure_type
                 try dbg_info_buffer.append(@intFromEnum(AbbrevKind.struct_type));
src/link/Elf.zig
@@ -409,7 +409,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     const image_base = self.calcImageBase();
 
     if (self.phdr_table_index == null) {
-        self.phdr_table_index = @as(u16, @intCast(self.phdrs.items.len));
+        self.phdr_table_index = @intCast(self.phdrs.items.len);
         const p_align: u16 = switch (self.ptr_width) {
             .p32 => @alignOf(elf.Elf32_Phdr),
             .p64 => @alignOf(elf.Elf64_Phdr),
@@ -428,7 +428,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.phdr_table_load_index == null) {
-        self.phdr_table_load_index = @as(u16, @intCast(self.phdrs.items.len));
+        self.phdr_table_load_index = @intCast(self.phdrs.items.len);
         // TODO Same as for GOT
         try self.phdrs.append(gpa, .{
             .p_type = elf.PT_LOAD,
@@ -444,7 +444,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.phdr_load_re_index == null) {
-        self.phdr_load_re_index = @as(u16, @intCast(self.phdrs.items.len));
+        self.phdr_load_re_index = @intCast(self.phdrs.items.len);
         const file_size = self.base.options.program_code_size_hint;
         const p_align = self.page_size;
         const off = self.findFreeSpace(file_size, p_align);
@@ -465,7 +465,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.phdr_got_index == null) {
-        self.phdr_got_index = @as(u16, @intCast(self.phdrs.items.len));
+        self.phdr_got_index = @intCast(self.phdrs.items.len);
         const file_size = @as(u64, ptr_size) * self.base.options.symbol_count_hint;
         // We really only need ptr alignment but since we are using PROGBITS, linux requires
         // page align.
@@ -490,7 +490,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.phdr_load_ro_index == null) {
-        self.phdr_load_ro_index = @as(u16, @intCast(self.phdrs.items.len));
+        self.phdr_load_ro_index = @intCast(self.phdrs.items.len);
         // TODO Find a hint about how much data need to be in rodata ?
         const file_size = 1024;
         // Same reason as for GOT
@@ -513,7 +513,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.phdr_load_rw_index == null) {
-        self.phdr_load_rw_index = @as(u16, @intCast(self.phdrs.items.len));
+        self.phdr_load_rw_index = @intCast(self.phdrs.items.len);
         // TODO Find a hint about how much data need to be in data ?
         const file_size = 1024;
         // Same reason as for GOT
@@ -536,7 +536,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.phdr_load_zerofill_index == null) {
-        self.phdr_load_zerofill_index = @as(u16, @intCast(self.phdrs.items.len));
+        self.phdr_load_zerofill_index = @intCast(self.phdrs.items.len);
         const p_align = if (self.base.options.target.os.tag == .linux) self.page_size else @as(u16, ptr_size);
         const off = self.phdrs.items[self.phdr_load_rw_index.?].p_offset;
         log.debug("found PT_LOAD zerofill free space 0x{x} to 0x{x}", .{ off, off });
@@ -556,7 +556,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.shstrtab_section_index == null) {
-        self.shstrtab_section_index = @as(u16, @intCast(self.shdrs.items.len));
+        self.shstrtab_section_index = @intCast(self.shdrs.items.len);
         assert(self.shstrtab.buffer.items.len == 0);
         try self.shstrtab.buffer.append(gpa, 0); // need a 0 at position 0
         const off = self.findFreeSpace(self.shstrtab.buffer.items.len, 1);
@@ -578,7 +578,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.strtab_section_index == null) {
-        self.strtab_section_index = @as(u16, @intCast(self.shdrs.items.len));
+        self.strtab_section_index = @intCast(self.shdrs.items.len);
         assert(self.strtab.buffer.items.len == 0);
         try self.strtab.buffer.append(gpa, 0); // need a 0 at position 0
         const off = self.findFreeSpace(self.strtab.buffer.items.len, 1);
@@ -600,7 +600,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.text_section_index == null) {
-        self.text_section_index = @as(u16, @intCast(self.shdrs.items.len));
+        self.text_section_index = @intCast(self.shdrs.items.len);
         const phdr = &self.phdrs.items[self.phdr_load_re_index.?];
         try self.shdrs.append(gpa, .{
             .sh_name = try self.shstrtab.insert(gpa, ".text"),
@@ -620,7 +620,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.got_section_index == null) {
-        self.got_section_index = @as(u16, @intCast(self.shdrs.items.len));
+        self.got_section_index = @intCast(self.shdrs.items.len);
         const phdr = &self.phdrs.items[self.phdr_got_index.?];
         try self.shdrs.append(gpa, .{
             .sh_name = try self.shstrtab.insert(gpa, ".got"),
@@ -639,7 +639,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.rodata_section_index == null) {
-        self.rodata_section_index = @as(u16, @intCast(self.shdrs.items.len));
+        self.rodata_section_index = @intCast(self.shdrs.items.len);
         const phdr = &self.phdrs.items[self.phdr_load_ro_index.?];
         try self.shdrs.append(gpa, .{
             .sh_name = try self.shstrtab.insert(gpa, ".rodata"),
@@ -659,7 +659,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.data_section_index == null) {
-        self.data_section_index = @as(u16, @intCast(self.shdrs.items.len));
+        self.data_section_index = @intCast(self.shdrs.items.len);
         const phdr = &self.phdrs.items[self.phdr_load_rw_index.?];
         try self.shdrs.append(gpa, .{
             .sh_name = try self.shstrtab.insert(gpa, ".data"),
@@ -679,7 +679,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.bss_section_index == null) {
-        self.bss_section_index = @as(u16, @intCast(self.shdrs.items.len));
+        self.bss_section_index = @intCast(self.shdrs.items.len);
         const phdr = &self.phdrs.items[self.phdr_load_zerofill_index.?];
         try self.shdrs.append(gpa, .{
             .sh_name = try self.shstrtab.insert(gpa, ".bss"),
@@ -699,7 +699,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.symtab_section_index == null) {
-        self.symtab_section_index = @as(u16, @intCast(self.shdrs.items.len));
+        self.symtab_section_index = @intCast(self.shdrs.items.len);
         const min_align: u16 = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym);
         const each_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym);
         const file_size = self.base.options.symbol_count_hint * each_size;
@@ -714,7 +714,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
             .sh_size = file_size,
             // The section header index of the associated string table.
             .sh_link = self.strtab_section_index.?,
-            .sh_info = @as(u32, @intCast(self.symbols.items.len)),
+            .sh_info = @intCast(self.symbols.items.len),
             .sh_addralign = min_align,
             .sh_entsize = each_size,
         });
@@ -723,7 +723,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
 
     if (self.dwarf) |*dw| {
         if (self.debug_str_section_index == null) {
-            self.debug_str_section_index = @as(u16, @intCast(self.shdrs.items.len));
+            self.debug_str_section_index = @intCast(self.shdrs.items.len);
             assert(dw.strtab.buffer.items.len == 0);
             try dw.strtab.buffer.append(gpa, 0);
             try self.shdrs.append(gpa, .{
@@ -743,7 +743,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
         }
 
         if (self.debug_info_section_index == null) {
-            self.debug_info_section_index = @as(u16, @intCast(self.shdrs.items.len));
+            self.debug_info_section_index = @intCast(self.shdrs.items.len);
             const file_size_hint = 200;
             const p_align = 1;
             const off = self.findFreeSpace(file_size_hint, p_align);
@@ -768,7 +768,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
         }
 
         if (self.debug_abbrev_section_index == null) {
-            self.debug_abbrev_section_index = @as(u16, @intCast(self.shdrs.items.len));
+            self.debug_abbrev_section_index = @intCast(self.shdrs.items.len);
             const file_size_hint = 128;
             const p_align = 1;
             const off = self.findFreeSpace(file_size_hint, p_align);
@@ -793,7 +793,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
         }
 
         if (self.debug_aranges_section_index == null) {
-            self.debug_aranges_section_index = @as(u16, @intCast(self.shdrs.items.len));
+            self.debug_aranges_section_index = @intCast(self.shdrs.items.len);
             const file_size_hint = 160;
             const p_align = 16;
             const off = self.findFreeSpace(file_size_hint, p_align);
@@ -818,7 +818,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
         }
 
         if (self.debug_line_section_index == null) {
-            self.debug_line_section_index = @as(u16, @intCast(self.shdrs.items.len));
+            self.debug_line_section_index = @intCast(self.shdrs.items.len);
             const file_size_hint = 250;
             const p_align = 1;
             const off = self.findFreeSpace(file_size_hint, p_align);
@@ -2666,12 +2666,12 @@ fn updateDeclCode(
 
     const old_size = atom_ptr.size;
     const old_vaddr = atom_ptr.value;
-    atom_ptr.alignment = math.log2_int(u64, required_alignment);
+    atom_ptr.alignment = required_alignment;
     atom_ptr.size = code.len;
 
     if (old_size > 0 and self.base.child_pid == null) {
         const capacity = atom_ptr.capacity(self);
-        const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, sym.value, required_alignment);
+        const need_realloc = code.len > capacity or !required_alignment.check(sym.value);
         if (need_realloc) {
             try atom_ptr.grow(self);
             log.debug("growing {s} from 0x{x} to 0x{x}", .{ decl_name, old_vaddr, atom_ptr.value });
@@ -2869,7 +2869,7 @@ fn updateLazySymbol(self: *Elf, sym: link.File.LazySymbol, symbol_index: Symbol.
     const mod = self.base.options.module.?;
     const zig_module = self.file(self.zig_module_index.?).?.zig_module;
 
-    var required_alignment: u32 = undefined;
+    var required_alignment: InternPool.Alignment = .none;
     var code_buffer = std.ArrayList(u8).init(gpa);
     defer code_buffer.deinit();
 
@@ -2918,7 +2918,7 @@ fn updateLazySymbol(self: *Elf, sym: link.File.LazySymbol, symbol_index: Symbol.
     const atom_ptr = local_sym.atom(self).?;
     atom_ptr.alive = true;
     atom_ptr.name_offset = name_str_index;
-    atom_ptr.alignment = math.log2_int(u64, required_alignment);
+    atom_ptr.alignment = required_alignment;
     atom_ptr.size = code.len;
 
     try atom_ptr.allocate(self);
@@ -2995,7 +2995,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
     const atom_ptr = local_sym.atom(self).?;
     atom_ptr.alive = true;
     atom_ptr.name_offset = name_str_index;
-    atom_ptr.alignment = math.log2_int(u64, required_alignment);
+    atom_ptr.alignment = required_alignment;
     atom_ptr.size = code.len;
 
     try atom_ptr.allocate(self);
src/link/MachO.zig
@@ -1425,7 +1425,7 @@ pub fn allocateSpecialSymbols(self: *MachO) !void {
 
 const CreateAtomOpts = struct {
     size: u64 = 0,
-    alignment: u32 = 0,
+    alignment: Alignment = .@"1",
 };
 
 pub fn createAtom(self: *MachO, sym_index: u32, opts: CreateAtomOpts) !Atom.Index {
@@ -1473,7 +1473,7 @@ pub fn createTentativeDefAtoms(self: *MachO) !void {
 
         const atom_index = try self.createAtom(global.sym_index, .{
             .size = size,
-            .alignment = alignment,
+            .alignment = @enumFromInt(alignment),
         });
         const atom = self.getAtomPtr(atom_index);
         atom.file = global.file;
@@ -1493,7 +1493,7 @@ pub fn createDyldPrivateAtom(self: *MachO) !void {
     const sym_index = try self.allocateSymbol();
     const atom_index = try self.createAtom(sym_index, .{
         .size = @sizeOf(u64),
-        .alignment = 3,
+        .alignment = .@"8",
     });
     try self.atom_by_index_table.putNoClobber(self.base.allocator, sym_index, atom_index);
 
@@ -1510,7 +1510,7 @@ pub fn createDyldPrivateAtom(self: *MachO) !void {
     switch (self.mode) {
         .zld => self.addAtomToSection(atom_index),
         .incremental => {
-            sym.n_value = try self.allocateAtom(atom_index, atom.size, @alignOf(u64));
+            sym.n_value = try self.allocateAtom(atom_index, atom.size, .@"8");
             log.debug("allocated dyld_private atom at 0x{x}", .{sym.n_value});
             var buffer: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64);
             try self.writeAtom(atom_index, &buffer);
@@ -1521,7 +1521,7 @@ pub fn createDyldPrivateAtom(self: *MachO) !void {
 fn createThreadLocalDescriptorAtom(self: *MachO, sym_name: []const u8, target: SymbolWithLoc) !Atom.Index {
     const gpa = self.base.allocator;
     const size = 3 * @sizeOf(u64);
-    const required_alignment: u32 = 1;
+    const required_alignment: Alignment = .@"1";
     const sym_index = try self.allocateSymbol();
     const atom_index = try self.createAtom(sym_index, .{});
     try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index);
@@ -2030,10 +2030,10 @@ fn shrinkAtom(self: *MachO, atom_index: Atom.Index, new_block_size: u64) void {
     // capacity, insert a free list node for it.
 }
 
-fn growAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 {
+fn growAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: Alignment) !u64 {
     const atom = self.getAtom(atom_index);
     const sym = atom.getSymbol(self);
-    const align_ok = mem.alignBackward(u64, sym.n_value, alignment) == sym.n_value;
+    const align_ok = alignment.check(sym.n_value);
     const need_realloc = !align_ok or new_atom_size > atom.capacity(self);
     if (!need_realloc) return sym.n_value;
     return self.allocateAtom(atom_index, new_atom_size, alignment);
@@ -2350,7 +2350,7 @@ fn updateLazySymbolAtom(
     const gpa = self.base.allocator;
     const mod = self.base.options.module.?;
 
-    var required_alignment: u32 = undefined;
+    var required_alignment: Alignment = .none;
     var code_buffer = std.ArrayList(u8).init(gpa);
     defer code_buffer.deinit();
 
@@ -2617,7 +2617,7 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []u8) !u64
         sym.n_desc = 0;
 
         const capacity = atom.capacity(self);
-        const need_realloc = code_len > capacity or !mem.isAlignedGeneric(u64, sym.n_value, required_alignment);
+        const need_realloc = code_len > capacity or !required_alignment.check(sym.n_value);
 
         if (need_realloc) {
             const vaddr = try self.growAtom(atom_index, code_len, required_alignment);
@@ -3204,7 +3204,7 @@ pub fn addAtomToSection(self: *MachO, atom_index: Atom.Index) void {
     self.sections.set(sym.n_sect - 1, section);
 }
 
-fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 {
+fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: Alignment) !u64 {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3247,7 +3247,7 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm
             const ideal_capacity_end_vaddr = math.add(u64, sym.n_value, ideal_capacity) catch ideal_capacity;
             const capacity_end_vaddr = sym.n_value + capacity;
             const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
-            const new_start_vaddr = mem.alignBackward(u64, new_start_vaddr_unaligned, alignment);
+            const new_start_vaddr = alignment.backward(new_start_vaddr_unaligned);
             if (new_start_vaddr < ideal_capacity_end_vaddr) {
                 // Additional bookkeeping here to notice if this free list node
                 // should be deleted because the atom that it points to has grown to take up
@@ -3276,11 +3276,11 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm
             const last_symbol = last.getSymbol(self);
             const ideal_capacity = if (requires_padding) padToIdeal(last.size) else last.size;
             const ideal_capacity_end_vaddr = last_symbol.n_value + ideal_capacity;
-            const new_start_vaddr = mem.alignForward(u64, ideal_capacity_end_vaddr, alignment);
+            const new_start_vaddr = alignment.forward(ideal_capacity_end_vaddr);
             atom_placement = last_index;
             break :blk new_start_vaddr;
         } else {
-            break :blk mem.alignForward(u64, segment.vmaddr, alignment);
+            break :blk alignment.forward(segment.vmaddr);
         }
     };
 
@@ -3295,10 +3295,8 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm
         self.segment_table_dirty = true;
     }
 
-    const align_pow = @as(u32, @intCast(math.log2(alignment)));
-    if (header.@"align" < align_pow) {
-        header.@"align" = align_pow;
-    }
+    assert(alignment != .none);
+    header.@"align" = @min(header.@"align", @intFromEnum(alignment));
     self.getAtomPtr(atom_index).size = new_atom_size;
 
     if (atom.prev_index) |prev_index| {
@@ -3338,7 +3336,7 @@ pub fn getGlobalSymbol(self: *MachO, name: []const u8, lib_name: ?[]const u8) !u
 
 pub fn writeSegmentHeaders(self: *MachO, writer: anytype) !void {
     for (self.segments.items, 0..) |seg, i| {
-        const indexes = self.getSectionIndexes(@as(u8, @intCast(i)));
+        const indexes = self.getSectionIndexes(@intCast(i));
         var out_seg = seg;
         out_seg.cmdsize = @sizeOf(macho.segment_command_64);
         out_seg.nsects = 0;
@@ -5526,6 +5524,7 @@ const Trie = @import("MachO/Trie.zig");
 const Type = @import("../type.zig").Type;
 const TypedValue = @import("../TypedValue.zig");
 const Value = @import("../value.zig").Value;
+const Alignment = Atom.Alignment;
 
 pub const DebugSymbols = @import("MachO/DebugSymbols.zig");
 pub const Bind = @import("MachO/dyld_info/bind.zig").Bind(*const MachO, SymbolWithLoc);
src/link/Plan9.zig
@@ -1106,7 +1106,7 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind
     const gpa = self.base.allocator;
     const mod = self.base.options.module.?;
 
-    var required_alignment: u32 = undefined;
+    var required_alignment: InternPool.Alignment = .none;
     var code_buffer = std.ArrayList(u8).init(gpa);
     defer code_buffer.deinit();
 
src/link/Wasm.zig
@@ -187,8 +187,10 @@ debug_pubtypes_atom: ?Atom.Index = null,
 /// rather than by the linker.
 synthetic_functions: std.ArrayListUnmanaged(Atom.Index) = .{},
 
+pub const Alignment = types.Alignment;
+
 pub const Segment = struct {
-    alignment: u32,
+    alignment: Alignment,
     size: u32,
     offset: u32,
     flags: u32,
@@ -1490,7 +1492,7 @@ fn finishUpdateDecl(wasm: *Wasm, decl_index: Module.Decl.Index, code: []const u8
     try atom.code.appendSlice(wasm.base.allocator, code);
     try wasm.resolved_symbols.put(wasm.base.allocator, atom.symbolLoc(), {});
 
-    atom.size = @as(u32, @intCast(code.len));
+    atom.size = @intCast(code.len);
     if (code.len == 0) return;
     atom.alignment = decl.getAlignment(mod);
 }
@@ -2050,7 +2052,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
     };
 
     const segment: *Segment = &wasm.segments.items[final_index];
-    segment.alignment = @max(segment.alignment, atom.alignment);
+    segment.alignment = segment.alignment.max(atom.alignment);
 
     try wasm.appendAtomAtIndex(final_index, atom_index);
 }
@@ -2121,7 +2123,7 @@ fn allocateAtoms(wasm: *Wasm) !void {
                     }
                 }
             }
-            offset = std.mem.alignForward(u32, offset, atom.alignment);
+            offset = @intCast(atom.alignment.forward(offset));
             atom.offset = offset;
             log.debug("Atom '{s}' allocated from 0x{x:0>8} to 0x{x:0>8} size={d}", .{
                 symbol_loc.getName(wasm),
@@ -2132,7 +2134,7 @@ fn allocateAtoms(wasm: *Wasm) !void {
             offset += atom.size;
             atom_index = atom.prev orelse break;
         }
-        segment.size = std.mem.alignForward(u32, offset, segment.alignment);
+        segment.size = @intCast(segment.alignment.forward(offset));
     }
 }
 
@@ -2351,7 +2353,7 @@ fn createSyntheticFunction(
         .offset = 0,
         .sym_index = loc.index,
         .file = null,
-        .alignment = 1,
+        .alignment = .@"1",
         .next = null,
         .prev = null,
         .code = function_body.moveToUnmanaged(),
@@ -2382,11 +2384,11 @@ pub fn createFunction(
     const atom_index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len));
     const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
     atom.* = .{
-        .size = @as(u32, @intCast(function_body.items.len)),
+        .size = @intCast(function_body.items.len),
         .offset = 0,
         .sym_index = loc.index,
         .file = null,
-        .alignment = 1,
+        .alignment = .@"1",
         .next = null,
         .prev = null,
         .code = function_body.moveToUnmanaged(),
@@ -2734,8 +2736,8 @@ fn setupMemory(wasm: *Wasm) !void {
     const page_size = std.wasm.page_size; // 64kb
     // Use the user-provided stack size or else we use 1MB by default
     const stack_size = wasm.base.options.stack_size_override orelse page_size * 16;
-    const stack_alignment = 16; // wasm's stack alignment as specified by tool-convention
-    const heap_alignment = 16; // wasm's heap alignment as specified by tool-convention
+    const stack_alignment: Alignment = .@"16"; // wasm's stack alignment as specified by tool-convention
+    const heap_alignment: Alignment = .@"16"; // wasm's heap alignment as specified by tool-convention
 
     // Always place the stack at the start by default
     // unless the user specified the global-base flag
@@ -2748,7 +2750,7 @@ fn setupMemory(wasm: *Wasm) !void {
     const is_obj = wasm.base.options.output_mode == .Obj;
 
     if (place_stack_first and !is_obj) {
-        memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment);
+        memory_ptr = stack_alignment.forward(memory_ptr);
         memory_ptr += stack_size;
         // We always put the stack pointer global at index 0
         wasm.wasm_globals.items[0].init.i32_const = @as(i32, @bitCast(@as(u32, @intCast(memory_ptr))));
@@ -2758,7 +2760,7 @@ fn setupMemory(wasm: *Wasm) !void {
     var data_seg_it = wasm.data_segments.iterator();
     while (data_seg_it.next()) |entry| {
         const segment = &wasm.segments.items[entry.value_ptr.*];
-        memory_ptr = std.mem.alignForward(u64, memory_ptr, segment.alignment);
+        memory_ptr = segment.alignment.forward(memory_ptr);
 
         // set TLS-related symbols
         if (mem.eql(u8, entry.key_ptr.*, ".tdata")) {
@@ -2768,7 +2770,7 @@ fn setupMemory(wasm: *Wasm) !void {
             }
             if (wasm.findGlobalSymbol("__tls_align")) |loc| {
                 const sym = loc.getSymbol(wasm);
-                wasm.wasm_globals.items[sym.index - wasm.imported_globals_count].init.i32_const = @intCast(segment.alignment);
+                wasm.wasm_globals.items[sym.index - wasm.imported_globals_count].init.i32_const = @intCast(segment.alignment.toByteUnitsOptional().?);
             }
             if (wasm.findGlobalSymbol("__tls_base")) |loc| {
                 const sym = loc.getSymbol(wasm);
@@ -2795,7 +2797,7 @@ fn setupMemory(wasm: *Wasm) !void {
     }
 
     if (!place_stack_first and !is_obj) {
-        memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment);
+        memory_ptr = stack_alignment.forward(memory_ptr);
         memory_ptr += stack_size;
         wasm.wasm_globals.items[0].init.i32_const = @as(i32, @bitCast(@as(u32, @intCast(memory_ptr))));
     }
@@ -2804,7 +2806,7 @@ fn setupMemory(wasm: *Wasm) !void {
     // We must set its virtual address so it can be used in relocations.
     if (wasm.findGlobalSymbol("__heap_base")) |loc| {
         const symbol = loc.getSymbol(wasm);
-        symbol.virtual_address = @as(u32, @intCast(mem.alignForward(u64, memory_ptr, heap_alignment)));
+        symbol.virtual_address = @intCast(heap_alignment.forward(memory_ptr));
     }
 
     // Setup the max amount of pages
@@ -2879,7 +2881,7 @@ pub fn getMatchingSegment(wasm: *Wasm, object_index: u16, relocatable_index: u32
                     flags |= @intFromEnum(Segment.Flag.WASM_DATA_SEGMENT_IS_PASSIVE);
                 }
                 try wasm.segments.append(wasm.base.allocator, .{
-                    .alignment = 1,
+                    .alignment = .@"1",
                     .size = 0,
                     .offset = 0,
                     .flags = flags,
@@ -2954,7 +2956,7 @@ pub fn getMatchingSegment(wasm: *Wasm, object_index: u16, relocatable_index: u32
 /// Appends a new segment with default field values
 fn appendDummySegment(wasm: *Wasm) !void {
     try wasm.segments.append(wasm.base.allocator, .{
-        .alignment = 1,
+        .alignment = .@"1",
         .size = 0,
         .offset = 0,
         .flags = 0,
@@ -3011,7 +3013,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
     // the pointers into the list using addends which are appended to the relocation.
     const names_atom_index = try wasm.createAtom();
     const names_atom = wasm.getAtomPtr(names_atom_index);
-    names_atom.alignment = 1;
+    names_atom.alignment = .@"1";
     const sym_name = try wasm.string_table.put(wasm.base.allocator, "__zig_err_names");
     const names_symbol = &wasm.symbols.items[names_atom.sym_index];
     names_symbol.* = .{
@@ -3085,7 +3087,7 @@ pub fn createDebugSectionForIndex(wasm: *Wasm, index: *?u32, name: []const u8) !
         .flags = @intFromEnum(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
     };
 
-    atom.alignment = 1; // debug sections are always 1-byte-aligned
+    atom.alignment = .@"1"; // debug sections are always 1-byte-aligned
     return atom_index;
 }
 
@@ -4724,12 +4726,12 @@ fn emitSegmentInfo(wasm: *Wasm, binary_bytes: *std.ArrayList(u8)) !void {
     for (wasm.segment_info.values()) |segment_info| {
         log.debug("Emit segment: {s} align({d}) flags({b})", .{
             segment_info.name,
-            @ctz(segment_info.alignment),
+            segment_info.alignment,
             segment_info.flags,
         });
         try leb.writeULEB128(writer, @as(u32, @intCast(segment_info.name.len)));
         try writer.writeAll(segment_info.name);
-        try leb.writeULEB128(writer, @ctz(segment_info.alignment));
+        try leb.writeULEB128(writer, segment_info.alignment.toLog2Units());
         try leb.writeULEB128(writer, segment_info.flags);
     }
 
src/codegen.zig
@@ -22,6 +22,7 @@ const Type = @import("type.zig").Type;
 const TypedValue = @import("TypedValue.zig");
 const Value = @import("value.zig").Value;
 const Zir = @import("Zir.zig");
+const Alignment = InternPool.Alignment;
 
 pub const Result = union(enum) {
     /// The `code` parameter passed to `generateSymbol` has the value ok.
@@ -116,7 +117,8 @@ pub fn generateLazySymbol(
     bin_file: *link.File,
     src_loc: Module.SrcLoc,
     lazy_sym: link.File.LazySymbol,
-    alignment: *u32,
+    // TODO don't use an "out" parameter like this; put it in the result instead
+    alignment: *Alignment,
     code: *std.ArrayList(u8),
     debug_output: DebugInfoOutput,
     reloc_info: RelocInfo,
@@ -141,7 +143,7 @@ pub fn generateLazySymbol(
     }
 
     if (lazy_sym.ty.isAnyError(mod)) {
-        alignment.* = 4;
+        alignment.* = .@"4";
         const err_names = mod.global_error_set.keys();
         mem.writeInt(u32, try code.addManyAsArray(4), @as(u32, @intCast(err_names.len)), endian);
         var offset = code.items.len;
@@ -157,7 +159,7 @@ pub fn generateLazySymbol(
         mem.writeInt(u32, code.items[offset..][0..4], @as(u32, @intCast(code.items.len)), endian);
         return Result.ok;
     } else if (lazy_sym.ty.zigTypeTag(mod) == .Enum) {
-        alignment.* = 1;
+        alignment.* = .@"1";
         for (lazy_sym.ty.enumFields(mod)) |tag_name_ip| {
             const tag_name = mod.intern_pool.stringToSlice(tag_name_ip);
             try code.ensureUnusedCapacity(tag_name.len + 1);
@@ -273,7 +275,7 @@ pub fn generateSymbol(
             const abi_align = typed_value.ty.abiAlignment(mod);
 
             // error value first when its type is larger than the error union's payload
-            if (error_align > payload_align) {
+            if (error_align.order(payload_align) == .gt) {
                 try code.writer().writeInt(u16, err_val, endian);
             }
 
@@ -291,7 +293,7 @@ pub fn generateSymbol(
                     .fail => |em| return .{ .fail = em },
                 }
                 const unpadded_end = code.items.len - begin;
-                const padded_end = mem.alignForward(u64, unpadded_end, abi_align);
+                const padded_end = abi_align.forward(unpadded_end);
                 const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
 
                 if (padding > 0) {
@@ -300,11 +302,11 @@ pub fn generateSymbol(
             }
 
             // Payload size is larger than error set, so emit our error set last
-            if (error_align <= payload_align) {
+            if (error_align.compare(.lte, payload_align)) {
                 const begin = code.items.len;
                 try code.writer().writeInt(u16, err_val, endian);
                 const unpadded_end = code.items.len - begin;
-                const padded_end = mem.alignForward(u64, unpadded_end, abi_align);
+                const padded_end = abi_align.forward(unpadded_end);
                 const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
 
                 if (padding > 0) {
@@ -474,23 +476,18 @@ pub fn generateSymbol(
                     }
                 }
             },
-            .struct_type => |struct_type| {
-                const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
-
-                if (struct_obj.layout == .Packed) {
-                    const fields = struct_obj.fields.values();
+            .struct_type => |struct_type| switch (struct_type.layout) {
+                .Packed => {
                     const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse
                         return error.Overflow;
                     const current_pos = code.items.len;
                     try code.resize(current_pos + abi_size);
                     var bits: u16 = 0;
 
-                    for (fields, 0..) |field, index| {
-                        const field_ty = field.ty;
-
+                    for (struct_type.field_types.get(ip), 0..) |field_ty, index| {
                         const field_val = switch (aggregate.storage) {
                             .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
-                                .ty = field_ty.toIntern(),
+                                .ty = field_ty,
                                 .storage = .{ .u64 = bytes[index] },
                             } }),
                             .elems => |elems| elems[index],
@@ -499,48 +496,51 @@ pub fn generateSymbol(
 
                         // pointer may point to a decl which must be marked used
                         // but can also result in a relocation. Therefore we handle those separately.
-                        if (field_ty.zigTypeTag(mod) == .Pointer) {
-                            const field_size = math.cast(usize, field_ty.abiSize(mod)) orelse
+                        if (field_ty.toType().zigTypeTag(mod) == .Pointer) {
+                            const field_size = math.cast(usize, field_ty.toType().abiSize(mod)) orelse
                                 return error.Overflow;
                             var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size);
                             defer tmp_list.deinit();
                             switch (try generateSymbol(bin_file, src_loc, .{
-                                .ty = field_ty,
+                                .ty = field_ty.toType(),
                                 .val = field_val.toValue(),
                             }, &tmp_list, debug_output, reloc_info)) {
                                 .ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items),
                                 .fail => |em| return Result{ .fail = em },
                             }
                         } else {
-                            field_val.toValue().writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable;
+                            field_val.toValue().writeToPackedMemory(field_ty.toType(), mod, code.items[current_pos..], bits) catch unreachable;
                         }
-                        bits += @as(u16, @intCast(field_ty.bitSize(mod)));
+                        bits += @as(u16, @intCast(field_ty.toType().bitSize(mod)));
                     }
-                } else {
+                },
+                .Auto, .Extern => {
                     const struct_begin = code.items.len;
-                    const fields = struct_obj.fields.values();
-
-                    var it = typed_value.ty.iterateStructOffsets(mod);
+                    const field_types = struct_type.field_types.get(ip);
+                    const offsets = struct_type.offsets.get(ip);
 
-                    while (it.next()) |field_offset| {
-                        const field_ty = fields[field_offset.field].ty;
-
-                        if (!field_ty.hasRuntimeBits(mod)) continue;
+                    var it = struct_type.iterateRuntimeOrder(ip);
+                    while (it.next()) |field_index| {
+                        const field_ty = field_types[field_index];
+                        if (!field_ty.toType().hasRuntimeBits(mod)) continue;
 
                         const field_val = switch (ip.indexToKey(typed_value.val.toIntern()).aggregate.storage) {
                             .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
-                                .ty = field_ty.toIntern(),
-                                .storage = .{ .u64 = bytes[field_offset.field] },
+                                .ty = field_ty,
+                                .storage = .{ .u64 = bytes[field_index] },
                             } }),
-                            .elems => |elems| elems[field_offset.field],
+                            .elems => |elems| elems[field_index],
                             .repeated_elem => |elem| elem,
                         };
 
-                        const padding = math.cast(usize, field_offset.offset - (code.items.len - struct_begin)) orelse return error.Overflow;
+                        const padding = math.cast(
+                            usize,
+                            offsets[field_index] - (code.items.len - struct_begin),
+                        ) orelse return error.Overflow;
                         if (padding > 0) try code.appendNTimes(0, padding);
 
                         switch (try generateSymbol(bin_file, src_loc, .{
-                            .ty = field_ty,
+                            .ty = field_ty.toType(),
                             .val = field_val.toValue(),
                         }, code, debug_output, reloc_info)) {
                             .ok => {},
@@ -548,9 +548,16 @@ pub fn generateSymbol(
                         }
                     }
 
-                    const padding = math.cast(usize, std.mem.alignForward(u64, it.offset, @max(it.big_align, 1)) - (code.items.len - struct_begin)) orelse return error.Overflow;
+                    const size = struct_type.size(ip).*;
+                    const alignment = struct_type.flagsPtr(ip).alignment.toByteUnitsOptional().?;
+
+                    const padding = math.cast(
+                        usize,
+                        std.mem.alignForward(u64, size, @max(alignment, 1)) -
+                            (code.items.len - struct_begin),
+                    ) orelse return error.Overflow;
                     if (padding > 0) try code.appendNTimes(0, padding);
-                }
+                },
             },
             else => unreachable,
         },
@@ -565,7 +572,7 @@ pub fn generateSymbol(
             }
 
             // Check if we should store the tag first.
-            if (layout.tag_size > 0 and layout.tag_align >= layout.payload_align) {
+            if (layout.tag_size > 0 and layout.tag_align.compare(.gte, layout.payload_align)) {
                 switch (try generateSymbol(bin_file, src_loc, .{
                     .ty = typed_value.ty.unionTagType(mod).?,
                     .val = un.tag.toValue(),
@@ -595,7 +602,7 @@ pub fn generateSymbol(
                 }
             }
 
-            if (layout.tag_size > 0 and layout.tag_align < layout.payload_align) {
+            if (layout.tag_size > 0 and layout.tag_align.compare(.lt, layout.payload_align)) {
                 switch (try generateSymbol(bin_file, src_loc, .{
                     .ty = union_obj.enum_tag_ty.toType(),
                     .val = un.tag.toValue(),
@@ -695,10 +702,10 @@ fn lowerParentPtr(
                             @intCast(field.index),
                             mod,
                         )),
-                        .Packed => if (mod.typeToStruct(base_type.toType())) |struct_obj|
-                            math.divExact(u16, struct_obj.packedFieldBitOffset(
-                                mod,
-                                @intCast(field.index),
+                        .Packed => if (mod.typeToStruct(base_type.toType())) |struct_type|
+                            math.divExact(u16, mod.structPackedFieldBitOffset(
+                                struct_type,
+                                field.index,
                             ), 8) catch |err| switch (err) {
                                 error.UnexpectedRemainder => 0,
                                 error.DivisionByZero => unreachable,
@@ -844,12 +851,12 @@ fn genDeclRef(
     // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
     if (tv.ty.castPtrToFn(mod)) |fn_ty| {
         if (mod.typeToFunc(fn_ty).?.is_generic) {
-            return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(mod) });
+            return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(mod).toByteUnitsOptional().? });
         }
     } else if (tv.ty.zigTypeTag(mod) == .Pointer) {
         const elem_ty = tv.ty.elemType2(mod);
         if (!elem_ty.hasRuntimeBits(mod)) {
-            return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(mod) });
+            return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(mod).toByteUnitsOptional().? });
         }
     }
 
@@ -1036,10 +1043,10 @@ pub fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u64 {
     if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0;
     const payload_align = payload_ty.abiAlignment(mod);
     const error_align = Type.anyerror.abiAlignment(mod);
-    if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+    if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
         return 0;
     } else {
-        return mem.alignForward(u64, Type.anyerror.abiSize(mod), payload_align);
+        return payload_align.forward(Type.anyerror.abiSize(mod));
     }
 }
 
@@ -1047,8 +1054,8 @@ pub fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u64 {
     if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0;
     const payload_align = payload_ty.abiAlignment(mod);
     const error_align = Type.anyerror.abiAlignment(mod);
-    if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
-        return mem.alignForward(u64, payload_ty.abiSize(mod), error_align);
+    if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+        return error_align.forward(payload_ty.abiSize(mod));
     } else {
         return 0;
     }
src/InternPool.zig
@@ -1,7 +1,7 @@
 //! All interned objects have both a value and a type.
 //! This data structure is self-contained, with the following exceptions:
-//! * type_struct via Module.Struct.Index
-//! * type_opaque via Module.Namespace.Index and Module.Decl.Index
+//! * Module.Namespace has a pointer to Module.File
+//! * Module.Decl has a pointer to Module.CaptureScope
 
 /// Maps `Key` to `Index`. `Key` objects are not stored anywhere; they are
 /// constructed lazily.
@@ -39,17 +39,11 @@ allocated_namespaces: std.SegmentedList(Module.Namespace, 0) = .{},
 /// Same pattern as with `decls_free_list`.
 namespaces_free_list: std.ArrayListUnmanaged(Module.Namespace.Index) = .{},
 
-/// Struct objects are stored in this data structure because:
-/// * They contain pointers such as the field maps.
-/// * They need to be mutated after creation.
-allocated_structs: std.SegmentedList(Module.Struct, 0) = .{},
-/// When a Struct object is freed from `allocated_structs`, it is pushed into this stack.
-structs_free_list: std.ArrayListUnmanaged(Module.Struct.Index) = .{},
-
 /// Some types such as enums, structs, and unions need to store mappings from field names
 /// to field index, or value to field index. In such cases, they will store the underlying
 /// field names and values directly, relying on one of these maps, stored separately,
 /// to provide lookup.
+/// These are not serialized; it is computed upon deserialization.
 maps: std.ArrayListUnmanaged(FieldMap) = .{},
 
 /// Used for finding the index inside `string_bytes`.
@@ -365,11 +359,264 @@ pub const Key = union(enum) {
         namespace: Module.Namespace.Index,
     };
 
-    pub const StructType = extern struct {
-        /// The `none` tag is used to represent a struct with no fields.
-        index: Module.Struct.OptionalIndex,
-        /// May be `none` if the struct has no declarations.
+    /// Although packed structs and non-packed structs are encoded differently,
+    /// this struct is used for both categories since they share some common
+    /// functionality.
+    pub const StructType = struct {
+        extra_index: u32,
+        /// `none` when the struct is `@TypeOf(.{})`.
+        decl: Module.Decl.OptionalIndex,
+        /// `none` when the struct has no declarations.
         namespace: Module.Namespace.OptionalIndex,
+        /// Index of the struct_decl ZIR instruction.
+        zir_index: Zir.Inst.Index,
+        layout: std.builtin.Type.ContainerLayout,
+        field_names: NullTerminatedString.Slice,
+        field_types: Index.Slice,
+        field_inits: Index.Slice,
+        field_aligns: Alignment.Slice,
+        runtime_order: RuntimeOrder.Slice,
+        comptime_bits: ComptimeBits,
+        offsets: Offsets,
+        names_map: MapIndex,
+
+        pub const ComptimeBits = struct {
+            start: u32,
+            len: u32,
+
+            pub fn get(this: @This(), ip: *const InternPool) []u32 {
+                return ip.extra.items[this.start..][0..this.len];
+            }
+
+            pub fn getBit(this: @This(), ip: *const InternPool, i: usize) bool {
+                if (this.len == 0) return false;
+                return @as(u1, @truncate(this.get(ip)[i / 32] >> @intCast(i % 32))) != 0;
+            }
+
+            pub fn setBit(this: @This(), ip: *const InternPool, i: usize) void {
+                this.get(ip)[i / 32] |= @as(u32, 1) << @intCast(i % 32);
+            }
+
+            pub fn clearBit(this: @This(), ip: *const InternPool, i: usize) void {
+                this.get(ip)[i / 32] &= ~(@as(u32, 1) << @intCast(i % 32));
+            }
+        };
+
+        pub const Offsets = struct {
+            start: u32,
+            len: u32,
+
+            pub fn get(this: @This(), ip: *const InternPool) []u32 {
+                return @ptrCast(ip.extra.items[this.start..][0..this.len]);
+            }
+        };
+
+        pub const RuntimeOrder = enum(u32) {
+            /// Placeholder until layout is resolved.
+            unresolved = std.math.maxInt(u32) - 0,
+            /// Field not present at runtime
+            omitted = std.math.maxInt(u32) - 1,
+            _,
+
+            pub const Slice = struct {
+                start: u32,
+                len: u32,
+
+                pub fn get(slice: Slice, ip: *const InternPool) []RuntimeOrder {
+                    return @ptrCast(ip.extra.items[slice.start..][0..slice.len]);
+                }
+            };
+
+            pub fn toInt(i: @This()) ?u32 {
+                return switch (i) {
+                    .omitted => null,
+                    .unresolved => unreachable,
+                    else => @intFromEnum(i),
+                };
+            }
+        };
+
+        /// Look up field index based on field name.
+        pub fn nameIndex(self: StructType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
+            if (self.decl == .none) return null; // empty_struct_type
+            const map = &ip.maps.items[@intFromEnum(self.names_map)];
+            const adapter: NullTerminatedString.Adapter = .{ .strings = self.field_names.get(ip) };
+            const field_index = map.getIndexAdapted(name, adapter) orelse return null;
+            return @intCast(field_index);
+        }
+
+        /// Returns the already-existing field with the same name, if any.
+        pub fn addFieldName(
+            self: @This(),
+            ip: *InternPool,
+            name: NullTerminatedString,
+        ) ?u32 {
+            return ip.addFieldName(self.names_map, self.field_names.start, name);
+        }
+
+        pub fn fieldAlign(s: @This(), ip: *const InternPool, i: usize) Alignment {
+            if (s.field_aligns.len == 0) return .none;
+            return s.field_aligns.get(ip)[i];
+        }
+
+        pub fn fieldInit(s: @This(), ip: *const InternPool, i: usize) Index {
+            if (s.field_inits.len == 0) return .none;
+            return s.field_inits.get(ip)[i];
+        }
+
+        /// Returns `none` in the case the struct is a tuple.
+        pub fn fieldName(s: @This(), ip: *const InternPool, i: usize) OptionalNullTerminatedString {
+            if (s.field_names.len == 0) return .none;
+            return s.field_names.get(ip)[i].toOptional();
+        }
+
+        pub fn fieldIsComptime(s: @This(), ip: *const InternPool, i: usize) bool {
+            return s.comptime_bits.getBit(ip, i);
+        }
+
+        pub fn setFieldComptime(s: @This(), ip: *InternPool, i: usize) void {
+            s.comptime_bits.setBit(ip, i);
+        }
+
+        /// Reads the non-opv flag calculated during AstGen. Used to short-circuit more
+        /// complicated logic.
+        pub fn knownNonOpv(s: @This(), ip: *InternPool) bool {
+            return switch (s.layout) {
+                .Packed => false,
+                .Auto, .Extern => s.flagsPtr(ip).known_non_opv,
+            };
+        }
+
+        /// The returned pointer expires with any addition to the `InternPool`.
+        /// Asserts the struct is not packed.
+        pub fn flagsPtr(self: @This(), ip: *InternPool) *Tag.TypeStruct.Flags {
+            assert(self.layout != .Packed);
+            const flags_field_index = std.meta.fieldIndex(Tag.TypeStruct, "flags").?;
+            return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
+        }
+
+        pub fn assumeRuntimeBitsIfFieldTypesWip(s: @This(), ip: *InternPool) bool {
+            if (s.layout == .Packed) return false;
+            const flags_ptr = s.flagsPtr(ip);
+            if (flags_ptr.field_types_wip) {
+                flags_ptr.assumed_runtime_bits = true;
+                return true;
+            }
+            return false;
+        }
+
+        pub fn setLayoutWip(s: @This(), ip: *InternPool) bool {
+            if (s.layout == .Packed) return false;
+            const flags_ptr = s.flagsPtr(ip);
+            if (flags_ptr.field_types_wip or flags_ptr.layout_wip) {
+                return true;
+            }
+            flags_ptr.layout_wip = true;
+            return false;
+        }
+
+        pub fn setFullyResolved(s: @This(), ip: *InternPool) bool {
+            if (s.layout == .Packed) return true;
+            const flags_ptr = s.flagsPtr(ip);
+            if (flags_ptr.fully_resolved) return true;
+            flags_ptr.fully_resolved = true;
+            return false;
+        }
+
+        pub fn clearFullyResolved(s: @This(), ip: *InternPool) void {
+            s.flagsPtr(ip).fully_resolved = false;
+        }
+
+        pub fn setRequiresComptime(s: @This(), ip: *InternPool) void {
+            assert(s.layout != .Packed);
+            const flags_ptr = s.flagsPtr(ip);
+            // Layout is resolved (and non-existent) in the case of a comptime-known struct.
+            flags_ptr.layout_resolved = true;
+            flags_ptr.requires_comptime = .yes;
+        }
+
+        /// The returned pointer expires with any addition to the `InternPool`.
+        /// Asserts the struct is not packed.
+        pub fn size(self: @This(), ip: *InternPool) *u32 {
+            assert(self.layout != .Packed);
+            const size_field_index = std.meta.fieldIndex(Tag.TypeStruct, "size").?;
+            return @ptrCast(&ip.extra.items[self.extra_index + size_field_index]);
+        }
+
+        /// The backing integer type of the packed struct. Whether zig chooses
+        /// this type or the user specifies it, it is stored here. This will be
+        /// set to `none` until the layout is resolved.
+        /// Asserts the struct is packed.
+        pub fn backingIntType(s: @This(), ip: *const InternPool) *Index {
+            assert(s.layout == .Packed);
+            const field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "backing_int_ty").?;
+            return @ptrCast(&ip.extra.items[s.extra_index + field_index]);
+        }
+
+        /// Asserts the struct is not packed.
+        pub fn setZirIndex(s: @This(), ip: *InternPool, new_zir_index: Zir.Inst.Index) void {
+            assert(s.layout != .Packed);
+            const field_index = std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?;
+            ip.extra.items[s.extra_index + field_index] = new_zir_index;
+        }
+
+        pub fn haveFieldTypes(s: @This(), ip: *const InternPool) bool {
+            const types = s.field_types.get(ip);
+            return types.len == 0 or types[0] != .none;
+        }
+
+        pub fn haveLayout(s: @This(), ip: *InternPool) bool {
+            return switch (s.layout) {
+                .Packed => s.haveFieldTypes(ip),
+                .Auto, .Extern => s.flagsPtr(ip).layout_resolved,
+            };
+        }
+
+        pub fn isTuple(s: @This(), ip: *InternPool) bool {
+            return s.layout != .Packed and s.flagsPtr(ip).is_tuple;
+        }
+
+        pub fn hasReorderedFields(s: @This(), ip: *InternPool) bool {
+            return s.layout == .Auto and s.flagsPtr(ip).has_reordered_fields;
+        }
+
+        pub const RuntimeOrderIterator = struct {
+            ip: *InternPool,
+            field_index: u32,
+            struct_type: InternPool.Key.StructType,
+
+            pub fn next(it: *@This()) ?u32 {
+                var i = it.field_index;
+
+                if (i >= it.struct_type.field_types.len)
+                    return null;
+
+                if (it.struct_type.hasReorderedFields(it.ip)) {
+                    it.field_index += 1;
+                    return it.struct_type.runtime_order.get(it.ip)[i].toInt();
+                }
+
+                while (it.struct_type.fieldIsComptime(it.ip, i)) {
+                    i += 1;
+                    if (i >= it.struct_type.field_types.len)
+                        return null;
+                }
+
+                it.field_index = i + 1;
+                return i;
+            }
+        };
+
+        /// Iterates over non-comptime fields in the order they are laid out in memory at runtime.
+        /// Asserts the struct is not packed.
+        pub fn iterateRuntimeOrder(s: @This(), ip: *InternPool) RuntimeOrderIterator {
+            assert(s.layout != .Packed);
+            return .{
+                .ip = ip,
+                .field_index = 0,
+                .struct_type = s,
+            };
+        }
     };
 
     pub const AnonStructType = struct {
@@ -870,7 +1117,6 @@ pub const Key = union(enum) {
             .simple_type,
             .simple_value,
             .opt,
-            .struct_type,
             .undef,
             .err,
             .enum_literal,
@@ -893,6 +1139,7 @@ pub const Key = union(enum) {
             .enum_type,
             .variable,
             .union_type,
+            .struct_type,
             => |x| Hash.hash(seed, asBytes(&x.decl)),
 
             .int => |int| {
@@ -969,11 +1216,11 @@ pub const Key = union(enum) {
 
                 if (child == .u8_type) {
                     switch (aggregate.storage) {
-                        .bytes => |bytes| for (bytes[0..@as(usize, @intCast(len))]) |byte| {
+                        .bytes => |bytes| for (bytes[0..@intCast(len)]) |byte| {
                             std.hash.autoHash(&hasher, KeyTag.int);
                             std.hash.autoHash(&hasher, byte);
                         },
-                        .elems => |elems| for (elems[0..@as(usize, @intCast(len))]) |elem| {
+                        .elems => |elems| for (elems[0..@intCast(len)]) |elem| {
                             const elem_key = ip.indexToKey(elem);
                             std.hash.autoHash(&hasher, @as(KeyTag, elem_key));
                             switch (elem_key) {
@@ -1123,10 +1370,6 @@ pub const Key = union(enum) {
                 const b_info = b.opt;
                 return std.meta.eql(a_info, b_info);
             },
-            .struct_type => |a_info| {
-                const b_info = b.struct_type;
-                return std.meta.eql(a_info, b_info);
-            },
             .un => |a_info| {
                 const b_info = b.un;
                 return std.meta.eql(a_info, b_info);
@@ -1298,6 +1541,10 @@ pub const Key = union(enum) {
                 const b_info = b.union_type;
                 return a_info.decl == b_info.decl;
             },
+            .struct_type => |a_info| {
+                const b_info = b.struct_type;
+                return a_info.decl == b_info.decl;
+            },
             .aggregate => |a_info| {
                 const b_info = b.aggregate;
                 if (a_info.ty != b_info.ty) return false;
@@ -1433,6 +1680,8 @@ pub const Key = union(enum) {
     }
 };
 
+pub const RequiresComptime = enum(u2) { no, yes, unknown, wip };
+
 // Unlike `Tag.TypeUnion` which is an encoding, and `Key.UnionType` which is a
 // minimal hashmap key, this type is a convenience type that contains info
 // needed by semantic analysis.
@@ -1474,8 +1723,6 @@ pub const UnionType = struct {
         }
     };
 
-    pub const RequiresComptime = enum(u2) { no, yes, unknown, wip };
-
     pub const Status = enum(u3) {
         none,
         field_types_wip,
@@ -1814,9 +2061,11 @@ pub const Index = enum(u32) {
         type_enum_nonexhaustive: DataIsExtraIndexOfEnumExplicit,
         simple_type: struct { data: SimpleType },
         type_opaque: struct { data: *Key.OpaqueType },
-        type_struct: struct { data: Module.Struct.OptionalIndex },
+        type_struct: struct { data: *Tag.TypeStruct },
         type_struct_ns: struct { data: Module.Namespace.Index },
         type_struct_anon: DataIsExtraIndexOfTypeStructAnon,
+        type_struct_packed: struct { data: *Tag.TypeStructPacked },
+        type_struct_packed_inits: struct { data: *Tag.TypeStructPacked },
         type_tuple_anon: DataIsExtraIndexOfTypeStructAnon,
         type_union: struct { data: *Tag.TypeUnion },
         type_function: struct {
@@ -2241,17 +2490,22 @@ pub const Tag = enum(u8) {
     /// An opaque type.
     /// data is index of Key.OpaqueType in extra.
     type_opaque,
-    /// A struct type.
-    /// data is Module.Struct.OptionalIndex
-    /// The `none` tag is used to represent `@TypeOf(.{})`.
+    /// A non-packed struct type.
+    /// data is 0 or extra index of `TypeStruct`.
+    /// data == 0 represents `@TypeOf(.{})`.
     type_struct,
-    /// A struct type that has only a namespace; no fields, and there is no
-    /// Module.Struct object allocated for it.
+    /// A non-packed struct type that has only a namespace; no fields.
     /// data is Module.Namespace.Index.
     type_struct_ns,
     /// An AnonStructType which stores types, names, and values for fields.
     /// data is extra index of `TypeStructAnon`.
     type_struct_anon,
+    /// A packed struct, no fields have any init values.
+    /// data is extra index of `TypeStructPacked`.
+    type_struct_packed,
+    /// A packed struct, one or more fields have init values.
+    /// data is extra index of `TypeStructPacked`.
+    type_struct_packed_inits,
     /// An AnonStructType which has only types and values for fields.
     /// data is extra index of `TypeStructAnon`.
     type_tuple_anon,
@@ -2461,9 +2715,10 @@ pub const Tag = enum(u8) {
             .type_enum_nonexhaustive => EnumExplicit,
             .simple_type => unreachable,
             .type_opaque => OpaqueType,
-            .type_struct => unreachable,
+            .type_struct => TypeStruct,
             .type_struct_ns => unreachable,
             .type_struct_anon => TypeStructAnon,
+            .type_struct_packed, .type_struct_packed_inits => TypeStructPacked,
             .type_tuple_anon => TypeStructAnon,
             .type_union => TypeUnion,
             .type_function => TypeFunction,
@@ -2634,11 +2889,90 @@ pub const Tag = enum(u8) {
             any_aligned_fields: bool,
             layout: std.builtin.Type.ContainerLayout,
             status: UnionType.Status,
-            requires_comptime: UnionType.RequiresComptime,
+            requires_comptime: RequiresComptime,
             assumed_runtime_bits: bool,
             _: u21 = 0,
         };
     };
+
+    /// Trailing:
+    /// 0. type: Index for each fields_len
+    /// 1. name: NullTerminatedString for each fields_len
+    /// 2. init: Index for each fields_len // if tag is type_struct_packed_inits
+    pub const TypeStructPacked = struct {
+        decl: Module.Decl.Index,
+        zir_index: Zir.Inst.Index,
+        fields_len: u32,
+        namespace: Module.Namespace.OptionalIndex,
+        backing_int_ty: Index,
+        names_map: MapIndex,
+    };
+
+    /// At first I thought of storing the denormalized data externally, such as...
+    ///
+    /// * runtime field order
+    /// * calculated field offsets
+    /// * size and alignment of the struct
+    ///
+    /// ...since these can be computed based on the other data here. However,
+    /// this data does need to be memoized, and therefore stored in memory
+    /// while the compiler is running, in order to avoid O(N^2) logic in many
+    /// places. Since the data can be stored compactly in the InternPool
+    /// representation, it is better for memory usage to store denormalized data
+    /// here, and potentially also better for performance as well. It's also simpler
+    /// than coming up with some other scheme for the data.
+    ///
+    /// Trailing:
+    /// 0. type: Index for each field in declared order
+    /// 1. if not is_tuple:
+    ///    names_map: MapIndex,
+    ///    name: NullTerminatedString // for each field in declared order
+    /// 2. if any_default_inits:
+    ///    init: Index // for each field in declared order
+    /// 3. if has_namespace:
+    ///    namespace: Module.Namespace.Index
+    /// 4. if any_aligned_fields:
+    ///    align: Alignment // for each field in declared order
+    /// 5. if any_comptime_fields:
+    ///    field_is_comptime_bits: u32 // minimal number of u32s needed, LSB is field 0
+    /// 6. if has_reordered_fields:
+    ///    field_index: RuntimeOrder // for each field in runtime order
+    /// 7. field_offset: u32 // for each field in declared order, undef until layout_resolved
+    pub const TypeStruct = struct {
+        decl: Module.Decl.Index,
+        zir_index: Zir.Inst.Index,
+        fields_len: u32,
+        flags: Flags,
+        size: u32,
+
+        pub const Flags = packed struct(u32) {
+            has_runtime_order: bool,
+            is_extern: bool,
+            known_non_opv: bool,
+            requires_comptime: RequiresComptime,
+            is_tuple: bool,
+            assumed_runtime_bits: bool,
+            has_namespace: bool,
+            has_reordered_fields: bool,
+            any_comptime_fields: bool,
+            any_default_inits: bool,
+            any_aligned_fields: bool,
+            /// `undefined` until the layout_resolved
+            alignment: Alignment,
+            /// Dependency loop detection when resolving field types.
+            field_types_wip: bool,
+            /// Dependency loop detection when resolving struct layout.
+            layout_wip: bool,
+            /// Determines whether `size`, `alignment`, runtime field order, and
+            /// field offets are populated.
+            layout_resolved: bool,
+            // The types and all its fields have had their layout resolved. Even through pointer,
+            // which `layout_resolved` does not ensure.
+            fully_resolved: bool,
+
+            _: u10 = 0,
+        };
+    };
 };
 
 /// State that is mutable during semantic analysis. This data is not used for
@@ -2764,20 +3098,26 @@ pub const SimpleValue = enum(u32) {
 
 /// Stored as a power-of-two, with one special value to indicate none.
 pub const Alignment = enum(u6) {
+    @"1" = 0,
+    @"2" = 1,
+    @"4" = 2,
+    @"8" = 3,
+    @"16" = 4,
+    @"32" = 5,
     none = std.math.maxInt(u6),
     _,
 
     pub fn toByteUnitsOptional(a: Alignment) ?u64 {
         return switch (a) {
             .none => null,
-            _ => @as(u64, 1) << @intFromEnum(a),
+            else => @as(u64, 1) << @intFromEnum(a),
         };
     }
 
     pub fn toByteUnits(a: Alignment, default: u64) u64 {
         return switch (a) {
             .none => default,
-            _ => @as(u64, 1) << @intFromEnum(a),
+            else => @as(u64, 1) << @intFromEnum(a),
         };
     }
 
@@ -2792,11 +3132,65 @@ pub const Alignment = enum(u6) {
         return fromByteUnits(n);
     }
 
+    pub fn toLog2Units(a: Alignment) u6 {
+        assert(a != .none);
+        return @intFromEnum(a);
+    }
+
+    /// This is just a glorified `@enumFromInt` but using it can help
+    /// document the intended conversion.
+    /// The parameter uses a u32 for convenience at the callsite.
+    pub fn fromLog2Units(a: u32) Alignment {
+        assert(a != @intFromEnum(Alignment.none));
+        return @enumFromInt(a);
+    }
+
     pub fn order(lhs: Alignment, rhs: Alignment) std.math.Order {
-        assert(lhs != .none and rhs != .none);
+        assert(lhs != .none);
+        assert(rhs != .none);
         return std.math.order(@intFromEnum(lhs), @intFromEnum(rhs));
     }
 
+    pub fn compare(lhs: Alignment, op: std.math.CompareOperator, rhs: Alignment) bool {
+        assert(lhs != .none);
+        assert(rhs != .none);
+        return std.math.compare(@intFromEnum(lhs), op, @intFromEnum(rhs));
+    }
+
+    /// Treats `none` as zero.
+    pub fn max(lhs: Alignment, rhs: Alignment) Alignment {
+        if (lhs == .none) return rhs;
+        if (rhs == .none) return lhs;
+        return @enumFromInt(@max(@intFromEnum(lhs), @intFromEnum(rhs)));
+    }
+
+    /// Treats `none` as maximum value.
+    pub fn min(lhs: Alignment, rhs: Alignment) Alignment {
+        if (lhs == .none) return rhs;
+        if (rhs == .none) return lhs;
+        return @enumFromInt(@min(@intFromEnum(lhs), @intFromEnum(rhs)));
+    }
+
+    /// Align an address forwards to this alignment.
+    pub fn forward(a: Alignment, addr: u64) u64 {
+        assert(a != .none);
+        const x = (@as(u64, 1) << @intFromEnum(a)) - 1;
+        return (addr + x) & ~x;
+    }
+
+    /// Align an address backwards to this alignment.
+    pub fn backward(a: Alignment, addr: u64) u64 {
+        assert(a != .none);
+        const x = (@as(u64, 1) << @intFromEnum(a)) - 1;
+        return addr & ~x;
+    }
+
+    /// Check if an address is aligned to this amount.
+    pub fn check(a: Alignment, addr: u64) bool {
+        assert(a != .none);
+        return @ctz(addr) >= @intFromEnum(a);
+    }
+
     /// An array of `Alignment` objects existing within the `extra` array.
     /// This type exists to provide a struct with lifetime that is
     /// not invalidated when items are added to the `InternPool`.
@@ -2811,6 +3205,16 @@ pub const Alignment = enum(u6) {
             return @ptrCast(bytes[0..slice.len]);
         }
     };
+
+    const LlvmBuilderAlignment = @import("codegen/llvm/Builder.zig").Alignment;
+
+    pub fn toLlvm(this: @This()) LlvmBuilderAlignment {
+        return @enumFromInt(@intFromEnum(this));
+    }
+
+    pub fn fromLlvm(other: LlvmBuilderAlignment) @This() {
+        return @enumFromInt(@intFromEnum(other));
+    }
 };
 
 /// Used for non-sentineled arrays that have length fitting in u32, as well as
@@ -3065,9 +3469,6 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void {
     ip.limbs.deinit(gpa);
     ip.string_bytes.deinit(gpa);
 
-    ip.structs_free_list.deinit(gpa);
-    ip.allocated_structs.deinit(gpa);
-
     ip.decls_free_list.deinit(gpa);
     ip.allocated_decls.deinit(gpa);
 
@@ -3149,24 +3550,43 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
         },
 
         .type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) },
-        .type_struct => {
-            const struct_index: Module.Struct.OptionalIndex = @enumFromInt(data);
-            const namespace = if (struct_index.unwrap()) |i|
-                ip.structPtrConst(i).namespace.toOptional()
-            else
-                .none;
-            return .{ .struct_type = .{
-                .index = struct_index,
-                .namespace = namespace,
-            } };
-        },
+
+        .type_struct => .{ .struct_type = if (data == 0) .{
+            .extra_index = 0,
+            .namespace = .none,
+            .decl = .none,
+            .zir_index = @as(u32, undefined),
+            .layout = .Auto,
+            .field_names = .{ .start = 0, .len = 0 },
+            .field_types = .{ .start = 0, .len = 0 },
+            .field_inits = .{ .start = 0, .len = 0 },
+            .field_aligns = .{ .start = 0, .len = 0 },
+            .runtime_order = .{ .start = 0, .len = 0 },
+            .comptime_bits = .{ .start = 0, .len = 0 },
+            .offsets = .{ .start = 0, .len = 0 },
+            .names_map = undefined,
+        } else extraStructType(ip, data) },
+
         .type_struct_ns => .{ .struct_type = .{
-            .index = .none,
+            .extra_index = 0,
             .namespace = @as(Module.Namespace.Index, @enumFromInt(data)).toOptional(),
+            .decl = .none,
+            .zir_index = @as(u32, undefined),
+            .layout = .Auto,
+            .field_names = .{ .start = 0, .len = 0 },
+            .field_types = .{ .start = 0, .len = 0 },
+            .field_inits = .{ .start = 0, .len = 0 },
+            .field_aligns = .{ .start = 0, .len = 0 },
+            .runtime_order = .{ .start = 0, .len = 0 },
+            .comptime_bits = .{ .start = 0, .len = 0 },
+            .offsets = .{ .start = 0, .len = 0 },
+            .names_map = undefined,
         } },
 
         .type_struct_anon => .{ .anon_struct_type = extraTypeStructAnon(ip, data) },
         .type_tuple_anon => .{ .anon_struct_type = extraTypeTupleAnon(ip, data) },
+        .type_struct_packed => .{ .struct_type = extraPackedStructType(ip, data, false) },
+        .type_struct_packed_inits => .{ .struct_type = extraPackedStructType(ip, data, true) },
         .type_union => .{ .union_type = extraUnionType(ip, data) },
 
         .type_enum_auto => {
@@ -3476,10 +3896,15 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
                     const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len];
                     return .{ .aggregate = .{
                         .ty = ty,
-                        .storage = .{ .elems = @as([]const Index, @ptrCast(values)) },
+                        .storage = .{ .elems = @ptrCast(values) },
                     } };
                 },
 
+                .type_struct_packed, .type_struct_packed_inits => {
+                    // a packed struct has a 0-bit backing type
+                    @panic("TODO");
+                },
+
                 .type_enum_auto,
                 .type_enum_explicit,
                 .type_union,
@@ -3490,7 +3915,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
         },
         .bytes => {
             const extra = ip.extraData(Bytes, data);
-            const len = @as(u32, @intCast(ip.aggregateTypeLenIncludingSentinel(extra.ty)));
+            const len: u32 = @intCast(ip.aggregateTypeLenIncludingSentinel(extra.ty));
             return .{ .aggregate = .{
                 .ty = extra.ty,
                 .storage = .{ .bytes = ip.string_bytes.items[@intFromEnum(extra.bytes)..][0..len] },
@@ -3498,8 +3923,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
         },
         .aggregate => {
             const extra = ip.extraDataTrail(Tag.Aggregate, data);
-            const len = @as(u32, @intCast(ip.aggregateTypeLenIncludingSentinel(extra.data.ty)));
-            const fields = @as([]const Index, @ptrCast(ip.extra.items[extra.end..][0..len]));
+            const len: u32 = @intCast(ip.aggregateTypeLenIncludingSentinel(extra.data.ty));
+            const fields: []const Index = @ptrCast(ip.extra.items[extra.end..][0..len]);
             return .{ .aggregate = .{
                 .ty = extra.data.ty,
                 .storage = .{ .elems = fields },
@@ -3603,6 +4028,44 @@ fn extraTypeTupleAnon(ip: *const InternPool, extra_index: u32) Key.AnonStructTyp
     };
 }
 
+fn extraStructType(ip: *const InternPool, extra_index: u32) Key.StructType {
+    _ = ip;
+    _ = extra_index;
+    @panic("TODO");
+}
+
+fn extraPackedStructType(ip: *const InternPool, extra_index: u32, inits: bool) Key.StructType {
+    const type_struct_packed = ip.extraDataTrail(Tag.TypeStructPacked, extra_index);
+    const fields_len = type_struct_packed.data.fields_len;
+    return .{
+        .extra_index = extra_index,
+        .decl = type_struct_packed.data.decl.toOptional(),
+        .namespace = type_struct_packed.data.namespace,
+        .zir_index = type_struct_packed.data.zir_index,
+        .layout = .Packed,
+        .field_types = .{
+            .start = type_struct_packed.end,
+            .len = fields_len,
+        },
+        .field_names = .{
+            .start = type_struct_packed.end + fields_len,
+            .len = fields_len,
+        },
+        .field_inits = if (inits) .{
+            .start = type_struct_packed.end + fields_len + fields_len,
+            .len = fields_len,
+        } else .{
+            .start = 0,
+            .len = 0,
+        },
+        .field_aligns = .{ .start = 0, .len = 0 },
+        .runtime_order = .{ .start = 0, .len = 0 },
+        .comptime_bits = .{ .start = 0, .len = 0 },
+        .offsets = .{ .start = 0, .len = 0 },
+        .names_map = type_struct_packed.data.names_map,
+    };
+}
+
 fn extraFuncType(ip: *const InternPool, extra_index: u32) Key.FuncType {
     const type_function = ip.extraDataTrail(Tag.TypeFunction, extra_index);
     var index: usize = type_function.end;
@@ -3831,8 +4294,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
         .error_set_type => |error_set_type| {
             assert(error_set_type.names_map == .none);
             assert(std.sort.isSorted(NullTerminatedString, error_set_type.names.get(ip), {}, NullTerminatedString.indexLessThan));
-            const names_map = try ip.addMap(gpa);
-            try addStringsToMap(ip, gpa, names_map, error_set_type.names.get(ip));
+            const names = error_set_type.names.get(ip);
+            const names_map = try ip.addMap(gpa, names.len);
+            addStringsToMap(ip, names_map, names);
             const names_len = error_set_type.names.len;
             try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.ErrorSet).Struct.fields.len + names_len);
             ip.items.appendAssumeCapacity(.{
@@ -3877,21 +4341,8 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
             });
         },
 
-        .struct_type => |struct_type| {
-            ip.items.appendAssumeCapacity(if (struct_type.index.unwrap()) |i| .{
-                .tag = .type_struct,
-                .data = @intFromEnum(i),
-            } else if (struct_type.namespace.unwrap()) |i| .{
-                .tag = .type_struct_ns,
-                .data = @intFromEnum(i),
-            } else .{
-                .tag = .type_struct,
-                .data = @intFromEnum(Module.Struct.OptionalIndex.none),
-            });
-        },
-
+        .struct_type => unreachable, // use getStructType() instead
         .anon_struct_type => unreachable, // use getAnonStructType() instead
-
         .union_type => unreachable, // use getUnionType() instead
 
         .opaque_type => |opaque_type| {
@@ -3994,7 +4445,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                                         },
                                         .struct_type => |struct_type| {
                                             assert(ptr.addr == .field);
-                                            assert(base_index.index < ip.structPtrUnwrapConst(struct_type.index).?.fields.count());
+                                            assert(base_index.index < struct_type.field_types.len);
                                         },
                                         .union_type => |union_key| {
                                             const union_type = ip.loadUnionType(union_key);
@@ -4388,12 +4839,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                         assert(ip.typeOf(elem) == child);
                     }
                 },
-                .struct_type => |struct_type| {
-                    for (
-                        aggregate.storage.values(),
-                        ip.structPtrUnwrapConst(struct_type.index).?.fields.values(),
-                    ) |elem, field| {
-                        assert(ip.typeOf(elem) == field.ty.toIntern());
+                .struct_type => |t| {
+                    for (aggregate.storage.values(), t.field_types.get(ip)) |elem, field_ty| {
+                        assert(ip.typeOf(elem) == field_ty);
                     }
                 },
                 .anon_struct_type => |anon_struct_type| {
@@ -4635,6 +5083,28 @@ pub fn getUnionType(ip: *InternPool, gpa: Allocator, ini: UnionTypeInit) Allocat
     return @enumFromInt(ip.items.len - 1);
 }
 
+pub const StructTypeInit = struct {
+    decl: Module.Decl.Index,
+    namespace: Module.Namespace.OptionalIndex,
+    layout: std.builtin.Type.ContainerLayout,
+    zir_index: Zir.Inst.Index,
+    fields_len: u32,
+    known_non_opv: bool,
+    requires_comptime: RequiresComptime,
+    is_tuple: bool,
+};
+
+pub fn getStructType(
+    ip: *InternPool,
+    gpa: Allocator,
+    ini: StructTypeInit,
+) Allocator.Error!Index {
+    _ = ip;
+    _ = gpa;
+    _ = ini;
+    @panic("TODO");
+}
+
 pub const AnonStructTypeInit = struct {
     types: []const Index,
     /// This may be empty, indicating this is a tuple.
@@ -4997,10 +5467,10 @@ pub fn getErrorSetType(
     });
     errdefer ip.items.len -= 1;
 
-    const names_map = try ip.addMap(gpa);
+    const names_map = try ip.addMap(gpa, names.len);
     errdefer _ = ip.maps.pop();
 
-    try addStringsToMap(ip, gpa, names_map, names);
+    addStringsToMap(ip, names_map, names);
 
     return @enumFromInt(ip.items.len - 1);
 }
@@ -5299,19 +5769,9 @@ pub const IncompleteEnumType = struct {
     pub fn addFieldName(
         self: @This(),
         ip: *InternPool,
-        gpa: Allocator,
         name: NullTerminatedString,
-    ) Allocator.Error!?u32 {
-        const map = &ip.maps.items[@intFromEnum(self.names_map)];
-        const field_index = map.count();
-        const strings = ip.extra.items[self.names_start..][0..field_index];
-        const adapter: NullTerminatedString.Adapter = .{
-            .strings = @as([]const NullTerminatedString, @ptrCast(strings)),
-        };
-        const gop = try map.getOrPutAdapted(gpa, name, adapter);
-        if (gop.found_existing) return @intCast(gop.index);
-        ip.extra.items[self.names_start + field_index] = @intFromEnum(name);
-        return null;
+    ) ?u32 {
+        return ip.addFieldName(self.names_map, self.names_start, name);
     }
 
     /// Returns the already-existing field with the same value, if any.
@@ -5319,17 +5779,14 @@ pub const IncompleteEnumType = struct {
     pub fn addFieldValue(
         self: @This(),
         ip: *InternPool,
-        gpa: Allocator,
         value: Index,
-    ) Allocator.Error!?u32 {
+    ) ?u32 {
         assert(ip.typeOf(value) == @as(Index, @enumFromInt(ip.extra.items[self.tag_ty_index])));
         const map = &ip.maps.items[@intFromEnum(self.values_map.unwrap().?)];
         const field_index = map.count();
         const indexes = ip.extra.items[self.values_start..][0..field_index];
-        const adapter: Index.Adapter = .{
-            .indexes = @as([]const Index, @ptrCast(indexes)),
-        };
-        const gop = try map.getOrPutAdapted(gpa, value, adapter);
+        const adapter: Index.Adapter = .{ .indexes = @ptrCast(indexes) };
+        const gop = map.getOrPutAssumeCapacityAdapted(value, adapter);
         if (gop.found_existing) return @intCast(gop.index);
         ip.extra.items[self.values_start + field_index] = @intFromEnum(value);
         return null;
@@ -5370,7 +5827,7 @@ fn getIncompleteEnumAuto(
     const gop = try ip.map.getOrPutAdapted(gpa, enum_type.toKey(), adapter);
     assert(!gop.found_existing);
 
-    const names_map = try ip.addMap(gpa);
+    const names_map = try ip.addMap(gpa, enum_type.fields_len);
 
     const extra_fields_len: u32 = @typeInfo(EnumAuto).Struct.fields.len;
     try ip.extra.ensureUnusedCapacity(gpa, extra_fields_len + enum_type.fields_len);
@@ -5390,7 +5847,7 @@ fn getIncompleteEnumAuto(
     });
     ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), enum_type.fields_len);
     return .{
-        .index = @as(Index, @enumFromInt(ip.items.len - 1)),
+        .index = @enumFromInt(ip.items.len - 1),
         .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?,
         .names_map = names_map,
         .names_start = extra_index + extra_fields_len,
@@ -5412,9 +5869,9 @@ fn getIncompleteEnumExplicit(
     const gop = try ip.map.getOrPutAdapted(gpa, enum_type.toKey(), adapter);
     assert(!gop.found_existing);
 
-    const names_map = try ip.addMap(gpa);
+    const names_map = try ip.addMap(gpa, enum_type.fields_len);
     const values_map: OptionalMapIndex = if (!enum_type.has_values) .none else m: {
-        const values_map = try ip.addMap(gpa);
+        const values_map = try ip.addMap(gpa, enum_type.fields_len);
         break :m values_map.toOptional();
     };
 
@@ -5441,7 +5898,7 @@ fn getIncompleteEnumExplicit(
     // This is both fields and values (if present).
     ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), reserved_len);
     return .{
-        .index = @as(Index, @enumFromInt(ip.items.len - 1)),
+        .index = @enumFromInt(ip.items.len - 1),
         .tag_ty_index = extra_index + std.meta.fieldIndex(EnumExplicit, "int_tag_type").?,
         .names_map = names_map,
         .names_start = extra_index + extra_fields_len,
@@ -5484,8 +5941,8 @@ pub fn getEnum(ip: *InternPool, gpa: Allocator, ini: GetEnumInit) Allocator.Erro
 
     switch (ini.tag_mode) {
         .auto => {
-            const names_map = try ip.addMap(gpa);
-            try addStringsToMap(ip, gpa, names_map, ini.names);
+            const names_map = try ip.addMap(gpa, ini.names.len);
+            addStringsToMap(ip, names_map, ini.names);
 
             const fields_len: u32 = @intCast(ini.names.len);
             try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len +
@@ -5514,12 +5971,12 @@ pub fn finishGetEnum(
     ini: GetEnumInit,
     tag: Tag,
 ) Allocator.Error!Index {
-    const names_map = try ip.addMap(gpa);
-    try addStringsToMap(ip, gpa, names_map, ini.names);
+    const names_map = try ip.addMap(gpa, ini.names.len);
+    addStringsToMap(ip, names_map, ini.names);
 
     const values_map: OptionalMapIndex = if (ini.values.len == 0) .none else m: {
-        const values_map = try ip.addMap(gpa);
-        try addIndexesToMap(ip, gpa, values_map, ini.values);
+        const values_map = try ip.addMap(gpa, ini.values.len);
+        addIndexesToMap(ip, values_map, ini.values);
         break :m values_map.toOptional();
     };
     const fields_len: u32 = @intCast(ini.names.len);
@@ -5553,35 +6010,35 @@ pub fn getAssumeExists(ip: *const InternPool, key: Key) Index {
 
 fn addStringsToMap(
     ip: *InternPool,
-    gpa: Allocator,
     map_index: MapIndex,
     strings: []const NullTerminatedString,
-) Allocator.Error!void {
+) void {
     const map = &ip.maps.items[@intFromEnum(map_index)];
     const adapter: NullTerminatedString.Adapter = .{ .strings = strings };
     for (strings) |string| {
-        const gop = try map.getOrPutAdapted(gpa, string, adapter);
+        const gop = map.getOrPutAssumeCapacityAdapted(string, adapter);
         assert(!gop.found_existing);
     }
 }
 
 fn addIndexesToMap(
     ip: *InternPool,
-    gpa: Allocator,
     map_index: MapIndex,
     indexes: []const Index,
-) Allocator.Error!void {
+) void {
     const map = &ip.maps.items[@intFromEnum(map_index)];
     const adapter: Index.Adapter = .{ .indexes = indexes };
     for (indexes) |index| {
-        const gop = try map.getOrPutAdapted(gpa, index, adapter);
+        const gop = map.getOrPutAssumeCapacityAdapted(index, adapter);
         assert(!gop.found_existing);
     }
 }
 
-fn addMap(ip: *InternPool, gpa: Allocator) Allocator.Error!MapIndex {
+fn addMap(ip: *InternPool, gpa: Allocator, cap: usize) Allocator.Error!MapIndex {
     const ptr = try ip.maps.addOne(gpa);
+    errdefer _ = ip.maps.pop();
     ptr.* = .{};
+    try ptr.ensureTotalCapacity(gpa, cap);
     return @enumFromInt(ip.maps.items.len - 1);
 }
 
@@ -5632,8 +6089,9 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
             Tag.TypePointer.Flags,
             Tag.TypeFunction.Flags,
             Tag.TypePointer.PackedOffset,
-            Tag.Variable.Flags,
             Tag.TypeUnion.Flags,
+            Tag.TypeStruct.Flags,
+            Tag.Variable.Flags,
             => @bitCast(@field(extra, field.name)),
 
             else => @compileError("bad field type: " ++ @typeName(field.type)),
@@ -5705,6 +6163,7 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct
             Tag.TypeFunction.Flags,
             Tag.TypePointer.PackedOffset,
             Tag.TypeUnion.Flags,
+            Tag.TypeStruct.Flags,
             Tag.Variable.Flags,
             FuncAnalysis,
             => @bitCast(int32),
@@ -6093,8 +6552,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
                 const new_elem_ty = switch (ip.indexToKey(new_ty)) {
                     inline .array_type, .vector_type => |seq_type| seq_type.child,
                     .anon_struct_type => |anon_struct_type| anon_struct_type.types.get(ip)[i],
-                    .struct_type => |struct_type| ip.structPtr(struct_type.index.unwrap().?)
-                        .fields.values()[i].ty.toIntern(),
+                    .struct_type => |struct_type| struct_type.field_types.get(ip)[i],
                     else => unreachable,
                 };
                 elem.* = try ip.getCoerced(gpa, elem.*, new_elem_ty);
@@ -6206,25 +6664,6 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Ind
     } });
 }
 
-pub fn indexToStructType(ip: *const InternPool, val: Index) Module.Struct.OptionalIndex {
-    assert(val != .none);
-    const tags = ip.items.items(.tag);
-    if (tags[@intFromEnum(val)] != .type_struct) return .none;
-    const datas = ip.items.items(.data);
-    return @as(Module.Struct.Index, @enumFromInt(datas[@intFromEnum(val)])).toOptional();
-}
-
-pub fn indexToUnionType(ip: *const InternPool, val: Index) Module.Union.OptionalIndex {
-    assert(val != .none);
-    const tags = ip.items.items(.tag);
-    switch (tags[@intFromEnum(val)]) {
-        .type_union => {},
-        else => return .none,
-    }
-    const datas = ip.items.items(.data);
-    return @as(Module.Union.Index, @enumFromInt(datas[@intFromEnum(val)])).toOptional();
-}
-
 pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType {
     assert(val != .none);
     const tags = ip.items.items(.tag);
@@ -6337,20 +6776,16 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
     const items_size = (1 + 4) * ip.items.len;
     const extra_size = 4 * ip.extra.items.len;
     const limbs_size = 8 * ip.limbs.items.len;
-    // TODO: fields size is not taken into account
-    const structs_size = ip.allocated_structs.len *
-        (@sizeOf(Module.Struct) + @sizeOf(Module.Namespace));
     const decls_size = ip.allocated_decls.len * @sizeOf(Module.Decl);
 
     // TODO: map overhead size is not taken into account
-    const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size + structs_size + decls_size;
+    const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size + decls_size;
 
     std.debug.print(
         \\InternPool size: {d} bytes
         \\  {d} items: {d} bytes
         \\  {d} extra: {d} bytes
         \\  {d} limbs: {d} bytes
-        \\  {d} structs: {d} bytes
         \\  {d} decls: {d} bytes
         \\
     , .{
@@ -6361,8 +6796,6 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
         extra_size,
         ip.limbs.items.len,
         limbs_size,
-        ip.allocated_structs.len,
-        structs_size,
         ip.allocated_decls.len,
         decls_size,
     });
@@ -6399,17 +6832,40 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
             .type_enum_auto => @sizeOf(EnumAuto),
             .type_opaque => @sizeOf(Key.OpaqueType),
             .type_struct => b: {
-                const struct_index = @as(Module.Struct.Index, @enumFromInt(data));
-                const struct_obj = ip.structPtrConst(struct_index);
-                break :b @sizeOf(Module.Struct) +
-                    @sizeOf(Module.Namespace) +
-                    (struct_obj.fields.count() * @sizeOf(Module.Struct.Field));
+                const info = ip.extraData(Tag.TypeStruct, data);
+                var ints: usize = @typeInfo(Tag.TypeStruct).Struct.fields.len;
+                ints += info.fields_len; // types
+                if (!info.flags.is_tuple) {
+                    ints += 1; // names_map
+                    ints += info.fields_len; // names
+                }
+                if (info.flags.any_default_inits)
+                    ints += info.fields_len; // inits
+                ints += @intFromBool(info.flags.has_namespace); // namespace
+                if (info.flags.any_aligned_fields)
+                    ints += (info.fields_len + 3) / 4; // aligns
+                if (info.flags.any_comptime_fields)
+                    ints += (info.fields_len + 31) / 32; // comptime bits
+                if (info.flags.has_reordered_fields)
+                    ints += info.fields_len; // runtime order
+                ints += info.fields_len; // offsets
+                break :b @sizeOf(u32) * ints;
             },
             .type_struct_ns => @sizeOf(Module.Namespace),
             .type_struct_anon => b: {
                 const info = ip.extraData(TypeStructAnon, data);
                 break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 3 * info.fields_len);
             },
+            .type_struct_packed => b: {
+                const info = ip.extraData(Tag.TypeStructPacked, data);
+                break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len +
+                    info.fields_len + info.fields_len);
+            },
+            .type_struct_packed_inits => b: {
+                const info = ip.extraData(Tag.TypeStructPacked, data);
+                break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len +
+                    info.fields_len + info.fields_len + info.fields_len);
+            },
             .type_tuple_anon => b: {
                 const info = ip.extraData(TypeStructAnon, data);
                 break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 2 * info.fields_len);
@@ -6562,6 +7018,8 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void {
             .type_struct,
             .type_struct_ns,
             .type_struct_anon,
+            .type_struct_packed,
+            .type_struct_packed_inits,
             .type_tuple_anon,
             .type_union,
             .type_function,
@@ -6677,18 +7135,6 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator)
     try bw.flush();
 }
 
-pub fn structPtr(ip: *InternPool, index: Module.Struct.Index) *Module.Struct {
-    return ip.allocated_structs.at(@intFromEnum(index));
-}
-
-pub fn structPtrConst(ip: *const InternPool, index: Module.Struct.Index) *const Module.Struct {
-    return ip.allocated_structs.at(@intFromEnum(index));
-}
-
-pub fn structPtrUnwrapConst(ip: *const InternPool, index: Module.Struct.OptionalIndex) ?*const Module.Struct {
-    return structPtrConst(ip, index.unwrap() orelse return null);
-}
-
 pub fn declPtr(ip: *InternPool, index: Module.Decl.Index) *Module.Decl {
     return ip.allocated_decls.at(@intFromEnum(index));
 }
@@ -6701,28 +7147,6 @@ pub fn namespacePtr(ip: *InternPool, index: Module.Namespace.Index) *Module.Name
     return ip.allocated_namespaces.at(@intFromEnum(index));
 }
 
-pub fn createStruct(
-    ip: *InternPool,
-    gpa: Allocator,
-    initialization: Module.Struct,
-) Allocator.Error!Module.Struct.Index {
-    if (ip.structs_free_list.popOrNull()) |index| {
-        ip.allocated_structs.at(@intFromEnum(index)).* = initialization;
-        return index;
-    }
-    const ptr = try ip.allocated_structs.addOne(gpa);
-    ptr.* = initialization;
-    return @enumFromInt(ip.allocated_structs.len - 1);
-}
-
-pub fn destroyStruct(ip: *InternPool, gpa: Allocator, index: Module.Struct.Index) void {
-    ip.structPtr(index).* = undefined;
-    ip.structs_free_list.append(gpa, index) catch {
-        // In order to keep `destroyStruct` a non-fallible function, we ignore memory
-        // allocation failures here, instead leaking the Struct until garbage collection.
-    };
-}
-
 pub fn createDecl(
     ip: *InternPool,
     gpa: Allocator,
@@ -6967,6 +7391,8 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
             .type_struct,
             .type_struct_ns,
             .type_struct_anon,
+            .type_struct_packed,
+            .type_struct_packed_inits,
             .type_tuple_anon,
             .type_union,
             .type_function,
@@ -7056,7 +7482,7 @@ pub fn toEnum(ip: *const InternPool, comptime E: type, i: Index) E {
 
 pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 {
     return switch (ip.indexToKey(ty)) {
-        .struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(),
+        .struct_type => |struct_type| struct_type.field_types.len,
         .anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
         .array_type => |array_type| array_type.len,
         .vector_type => |vector_type| vector_type.len,
@@ -7066,7 +7492,7 @@ pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 {
 
 pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 {
     return switch (ip.indexToKey(ty)) {
-        .struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(),
+        .struct_type => |struct_type| struct_type.field_types.len,
         .anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
         .array_type => |array_type| array_type.len + @intFromBool(array_type.sentinel != .none),
         .vector_type => |vector_type| vector_type.len,
@@ -7301,6 +7727,8 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois
             .type_struct,
             .type_struct_ns,
             .type_struct_anon,
+            .type_struct_packed,
+            .type_struct_packed_inits,
             .type_tuple_anon,
             => .Struct,
 
@@ -7526,6 +7954,40 @@ pub fn resolveBuiltinType(ip: *InternPool, want_index: Index, resolved_index: In
             .data = @intFromEnum(SimpleValue.@"unreachable"),
         });
     } else {
-        // TODO: add the index to a free-list for reuse
+        // Here we could add the index to a free-list for reuse, but since
+        // there is so little garbage created this way it's not worth it.
     }
 }
+
+pub fn anonStructFieldTypes(ip: *const InternPool, i: Index) []const Index {
+    return ip.indexToKey(i).anon_struct_type.types;
+}
+
+pub fn anonStructFieldsLen(ip: *const InternPool, i: Index) u32 {
+    return @intCast(ip.indexToKey(i).anon_struct_type.types.len);
+}
+
+/// Asserts the type is a struct.
+pub fn structDecl(ip: *const InternPool, i: Index) Module.Decl.OptionalIndex {
+    return switch (ip.indexToKey(i)) {
+        .struct_type => |t| t.decl,
+        else => unreachable,
+    };
+}
+
+/// Returns the already-existing field with the same name, if any.
+pub fn addFieldName(
+    ip: *InternPool,
+    names_map: MapIndex,
+    names_start: u32,
+    name: NullTerminatedString,
+) ?u32 {
+    const map = &ip.maps.items[@intFromEnum(names_map)];
+    const field_index = map.count();
+    const strings = ip.extra.items[names_start..][0..field_index];
+    const adapter: NullTerminatedString.Adapter = .{ .strings = @ptrCast(strings) };
+    const gop = map.getOrPutAssumeCapacityAdapted(name, adapter);
+    if (gop.found_existing) return @intCast(gop.index);
+    ip.extra.items[names_start + field_index] = @intFromEnum(name);
+    return null;
+}
src/Module.zig
@@ -105,8 +105,6 @@ comptime_capture_scopes: std.AutoArrayHashMapUnmanaged(CaptureScope.Key, InternP
 
 /// To be eliminated in a future commit by moving more data into InternPool.
 /// Current uses that must be eliminated:
-/// * Struct comptime_args
-/// * Struct optimized_order
 /// * comptime pointer mutation
 /// This memory lives until the Module is destroyed.
 tmp_hack_arena: std.heap.ArenaAllocator,
@@ -678,14 +676,10 @@ pub const Decl = struct {
 
     /// If the Decl owns its value and it is a struct, return it,
     /// otherwise null.
-    pub fn getOwnedStruct(decl: Decl, mod: *Module) ?*Struct {
-        return mod.structPtrUnwrap(decl.getOwnedStructIndex(mod));
-    }
-
-    pub fn getOwnedStructIndex(decl: Decl, mod: *Module) Struct.OptionalIndex {
-        if (!decl.owns_tv) return .none;
-        if (decl.val.ip_index == .none) return .none;
-        return mod.intern_pool.indexToStructType(decl.val.toIntern());
+    pub fn getOwnedStruct(decl: Decl, mod: *Module) ?InternPool.Key.StructType {
+        if (!decl.owns_tv) return null;
+        if (decl.val.ip_index == .none) return null;
+        return mod.typeToStruct(decl.val.toType());
     }
 
     /// If the Decl owns its value and it is a union, return it,
@@ -795,9 +789,10 @@ pub const Decl = struct {
         return decl.getExternDecl(mod) != .none;
     }
 
-    pub fn getAlignment(decl: Decl, mod: *Module) u32 {
+    pub fn getAlignment(decl: Decl, mod: *Module) Alignment {
         assert(decl.has_tv);
-        return @as(u32, @intCast(decl.alignment.toByteUnitsOptional() orelse decl.ty.abiAlignment(mod)));
+        if (decl.alignment != .none) return decl.alignment;
+        return decl.ty.abiAlignment(mod);
     }
 };
 
@@ -806,218 +801,6 @@ pub const EmitH = struct {
     fwd_decl: ArrayListUnmanaged(u8) = .{},
 };
 
-pub const PropertyBoolean = enum { no, yes, unknown, wip };
-
-/// Represents the data that a struct declaration provides.
-pub const Struct = struct {
-    /// Set of field names in declaration order.
-    fields: Fields,
-    /// Represents the declarations inside this struct.
-    namespace: Namespace.Index,
-    /// The Decl that corresponds to the struct itself.
-    owner_decl: Decl.Index,
-    /// Index of the struct_decl ZIR instruction.
-    zir_index: Zir.Inst.Index,
-    /// Indexes into `fields` sorted to be most memory efficient.
-    optimized_order: ?[*]u32 = null,
-    layout: std.builtin.Type.ContainerLayout,
-    /// If the layout is not packed, this is the noreturn type.
-    /// If the layout is packed, this is the backing integer type of the packed struct.
-    /// Whether zig chooses this type or the user specifies it, it is stored here.
-    /// This will be set to the noreturn type until status is `have_layout`.
-    backing_int_ty: Type = Type.noreturn,
-    status: enum {
-        none,
-        field_types_wip,
-        have_field_types,
-        layout_wip,
-        have_layout,
-        fully_resolved_wip,
-        // The types and all its fields have had their layout resolved. Even through pointer,
-        // which `have_layout` does not ensure.
-        fully_resolved,
-    },
-    /// If true, has more than one possible value. However it may still be non-runtime type
-    /// if it is a comptime-only type.
-    /// If false, resolving the fields is necessary to determine whether the type has only
-    /// one possible value.
-    known_non_opv: bool,
-    requires_comptime: PropertyBoolean = .unknown,
-    have_field_inits: bool = false,
-    is_tuple: bool,
-    assumed_runtime_bits: bool = false,
-
-    pub const Index = enum(u32) {
-        _,
-
-        pub fn toOptional(i: Index) OptionalIndex {
-            return @as(OptionalIndex, @enumFromInt(@intFromEnum(i)));
-        }
-    };
-
-    pub const OptionalIndex = enum(u32) {
-        none = std.math.maxInt(u32),
-        _,
-
-        pub fn init(oi: ?Index) OptionalIndex {
-            return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none)));
-        }
-
-        pub fn unwrap(oi: OptionalIndex) ?Index {
-            if (oi == .none) return null;
-            return @as(Index, @enumFromInt(@intFromEnum(oi)));
-        }
-    };
-
-    pub const Fields = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, Field);
-
-    /// The `Type` and `Value` memory is owned by the arena of the Struct's owner_decl.
-    pub const Field = struct {
-        /// Uses `noreturn` to indicate `anytype`.
-        /// undefined until `status` is >= `have_field_types`.
-        ty: Type,
-        /// Uses `none` to indicate no default.
-        default_val: InternPool.Index,
-        /// Zero means to use the ABI alignment of the type.
-        abi_align: Alignment,
-        /// undefined until `status` is `have_layout`.
-        offset: u32,
-        /// If true then `default_val` is the comptime field value.
-        is_comptime: bool,
-
-        /// Returns the field alignment. If the struct is packed, returns 0.
-        /// Keep implementation in sync with `Sema.structFieldAlignment`.
-        pub fn alignment(
-            field: Field,
-            mod: *Module,
-            layout: std.builtin.Type.ContainerLayout,
-        ) u32 {
-            if (field.abi_align.toByteUnitsOptional()) |abi_align| {
-                assert(layout != .Packed);
-                return @as(u32, @intCast(abi_align));
-            }
-
-            const target = mod.getTarget();
-
-            switch (layout) {
-                .Packed => return 0,
-                .Auto => {
-                    if (target.ofmt == .c) {
-                        return alignmentExtern(field, mod);
-                    } else {
-                        return field.ty.abiAlignment(mod);
-                    }
-                },
-                .Extern => return alignmentExtern(field, mod),
-            }
-        }
-
-        pub fn alignmentExtern(field: Field, mod: *Module) u32 {
-            // This logic is duplicated in Type.abiAlignmentAdvanced.
-            const ty_abi_align = field.ty.abiAlignment(mod);
-
-            if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) {
-                // The C ABI requires 128 bit integer fields of structs
-                // to be 16-bytes aligned.
-                return @max(ty_abi_align, 16);
-            }
-
-            return ty_abi_align;
-        }
-    };
-
-    /// Used in `optimized_order` to indicate field that is not present in the
-    /// runtime version of the struct.
-    pub const omitted_field = std.math.maxInt(u32);
-
-    pub fn getFullyQualifiedName(s: *Struct, mod: *Module) !InternPool.NullTerminatedString {
-        return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod);
-    }
-
-    pub fn srcLoc(s: Struct, mod: *Module) SrcLoc {
-        return mod.declPtr(s.owner_decl).srcLoc(mod);
-    }
-
-    pub fn haveFieldTypes(s: Struct) bool {
-        return switch (s.status) {
-            .none,
-            .field_types_wip,
-            => false,
-            .have_field_types,
-            .layout_wip,
-            .have_layout,
-            .fully_resolved_wip,
-            .fully_resolved,
-            => true,
-        };
-    }
-
-    pub fn haveLayout(s: Struct) bool {
-        return switch (s.status) {
-            .none,
-            .field_types_wip,
-            .have_field_types,
-            .layout_wip,
-            => false,
-            .have_layout,
-            .fully_resolved_wip,
-            .fully_resolved,
-            => true,
-        };
-    }
-
-    pub fn packedFieldBitOffset(s: Struct, mod: *Module, index: usize) u16 {
-        assert(s.layout == .Packed);
-        assert(s.haveLayout());
-        var bit_sum: u64 = 0;
-        for (s.fields.values(), 0..) |field, i| {
-            if (i == index) {
-                return @as(u16, @intCast(bit_sum));
-            }
-            bit_sum += field.ty.bitSize(mod);
-        }
-        unreachable; // index out of bounds
-    }
-
-    pub const RuntimeFieldIterator = struct {
-        module: *Module,
-        struct_obj: *const Struct,
-        index: u32 = 0,
-
-        pub const FieldAndIndex = struct {
-            field: Field,
-            index: u32,
-        };
-
-        pub fn next(it: *RuntimeFieldIterator) ?FieldAndIndex {
-            const mod = it.module;
-            while (true) {
-                var i = it.index;
-                it.index += 1;
-                if (it.struct_obj.fields.count() <= i)
-                    return null;
-
-                if (it.struct_obj.optimized_order) |some| {
-                    i = some[i];
-                    if (i == Module.Struct.omitted_field) return null;
-                }
-                const field = it.struct_obj.fields.values()[i];
-
-                if (!field.is_comptime and field.ty.hasRuntimeBits(mod)) {
-                    return FieldAndIndex{ .index = i, .field = field };
-                }
-            }
-        }
-    };
-
-    pub fn runtimeFieldIterator(s: *const Struct, module: *Module) RuntimeFieldIterator {
-        return .{
-            .struct_obj = s,
-            .module = module,
-        };
-    }
-};
-
 pub const DeclAdapter = struct {
     mod: *Module,
 
@@ -2893,20 +2676,10 @@ pub fn namespacePtr(mod: *Module, index: Namespace.Index) *Namespace {
     return mod.intern_pool.namespacePtr(index);
 }
 
-pub fn structPtr(mod: *Module, index: Struct.Index) *Struct {
-    return mod.intern_pool.structPtr(index);
-}
-
 pub fn namespacePtrUnwrap(mod: *Module, index: Namespace.OptionalIndex) ?*Namespace {
     return mod.namespacePtr(index.unwrap() orelse return null);
 }
 
-/// This one accepts an index from the InternPool and asserts that it is not
-/// the anonymous empty struct type.
-pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct {
-    return mod.structPtr(index.unwrap() orelse return null);
-}
-
 /// Returns true if and only if the Decl is the top level struct associated with a File.
 pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool {
     const decl = mod.declPtr(decl_index);
@@ -3351,11 +3124,11 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void {
 
         if (!decl.owns_tv) continue;
 
-        if (decl.getOwnedStruct(mod)) |struct_obj| {
-            struct_obj.zir_index = inst_map.get(struct_obj.zir_index) orelse {
+        if (decl.getOwnedStruct(mod)) |struct_type| {
+            struct_type.setZirIndex(ip, inst_map.get(struct_type.zir_index) orelse {
                 try file.deleted_decls.append(gpa, decl_index);
                 continue;
-            };
+            });
         }
 
         if (decl.getOwnedUnion(mod)) |union_type| {
@@ -3870,36 +3643,16 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
     const new_decl = mod.declPtr(new_decl_index);
     errdefer @panic("TODO error handling");
 
-    const struct_index = try mod.createStruct(.{
-        .owner_decl = new_decl_index,
-        .fields = .{},
-        .zir_index = undefined, // set below
-        .layout = .Auto,
-        .status = .none,
-        .known_non_opv = undefined,
-        .is_tuple = undefined, // set below
-        .namespace = new_namespace_index,
-    });
-    errdefer mod.destroyStruct(struct_index);
-
-    const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{
-        .index = struct_index.toOptional(),
-        .namespace = new_namespace_index.toOptional(),
-    } });
-    // TODO: figure out InternPool removals for incremental compilation
-    //errdefer mod.intern_pool.remove(struct_ty);
-
-    new_namespace.ty = struct_ty.toType();
     file.root_decl = new_decl_index.toOptional();
 
     new_decl.name = try file.fullyQualifiedName(mod);
+    new_decl.name_fully_qualified = true;
     new_decl.src_line = 0;
     new_decl.is_pub = true;
     new_decl.is_exported = false;
     new_decl.has_align = false;
     new_decl.has_linksection_or_addrspace = false;
     new_decl.ty = Type.type;
-    new_decl.val = struct_ty.toValue();
     new_decl.alignment = .none;
     new_decl.@"linksection" = .none;
     new_decl.has_tv = true;
@@ -3907,75 +3660,76 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
     new_decl.alive = true; // This Decl corresponds to a File and is therefore always alive.
     new_decl.analysis = .in_progress;
     new_decl.generation = mod.generation;
-    new_decl.name_fully_qualified = true;
 
-    if (file.status == .success_zir) {
-        assert(file.zir_loaded);
-        const main_struct_inst = Zir.main_struct_inst;
-        const struct_obj = mod.structPtr(struct_index);
-        struct_obj.zir_index = main_struct_inst;
-        const extended = file.zir.instructions.items(.data)[main_struct_inst].extended;
-        const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small));
-        struct_obj.is_tuple = small.is_tuple;
-
-        var sema_arena = std.heap.ArenaAllocator.init(gpa);
-        defer sema_arena.deinit();
-        const sema_arena_allocator = sema_arena.allocator();
-
-        var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
-        defer comptime_mutable_decls.deinit();
-
-        var sema: Sema = .{
-            .mod = mod,
-            .gpa = gpa,
-            .arena = sema_arena_allocator,
-            .code = file.zir,
-            .owner_decl = new_decl,
-            .owner_decl_index = new_decl_index,
-            .func_index = .none,
-            .func_is_naked = false,
-            .fn_ret_ty = Type.void,
-            .fn_ret_ty_ies = null,
-            .owner_func_index = .none,
-            .comptime_mutable_decls = &comptime_mutable_decls,
-        };
-        defer sema.deinit();
+    if (file.status != .success_zir) {
+        new_decl.analysis = .file_failure;
+        return;
+    }
+    assert(file.zir_loaded);
 
-        if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_index)) |_| {
-            for (comptime_mutable_decls.items) |decl_index| {
-                const decl = mod.declPtr(decl_index);
-                _ = try decl.internValue(mod);
-            }
-            new_decl.analysis = .complete;
-        } else |err| switch (err) {
-            error.OutOfMemory => return error.OutOfMemory,
-            error.AnalysisFail => {},
-        }
+    var sema_arena = std.heap.ArenaAllocator.init(gpa);
+    defer sema_arena.deinit();
+    const sema_arena_allocator = sema_arena.allocator();
 
-        if (mod.comp.whole_cache_manifest) |whole_cache_manifest| {
-            const source = file.getSource(gpa) catch |err| {
-                try reportRetryableFileError(mod, file, "unable to load source: {s}", .{@errorName(err)});
-                return error.AnalysisFail;
-            };
+    var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
+    defer comptime_mutable_decls.deinit();
 
-            const resolved_path = std.fs.path.resolve(
-                gpa,
-                if (file.pkg.root_src_directory.path) |pkg_path|
-                    &[_][]const u8{ pkg_path, file.sub_file_path }
-                else
-                    &[_][]const u8{file.sub_file_path},
-            ) catch |err| {
-                try reportRetryableFileError(mod, file, "unable to resolve path: {s}", .{@errorName(err)});
-                return error.AnalysisFail;
-            };
-            errdefer gpa.free(resolved_path);
+    var sema: Sema = .{
+        .mod = mod,
+        .gpa = gpa,
+        .arena = sema_arena_allocator,
+        .code = file.zir,
+        .owner_decl = new_decl,
+        .owner_decl_index = new_decl_index,
+        .func_index = .none,
+        .func_is_naked = false,
+        .fn_ret_ty = Type.void,
+        .fn_ret_ty_ies = null,
+        .owner_func_index = .none,
+        .comptime_mutable_decls = &comptime_mutable_decls,
+    };
+    defer sema.deinit();
 
-            mod.comp.whole_cache_manifest_mutex.lock();
-            defer mod.comp.whole_cache_manifest_mutex.unlock();
-            try whole_cache_manifest.addFilePostContents(resolved_path, source.bytes, source.stat);
-        }
-    } else {
-        new_decl.analysis = .file_failure;
+    const main_struct_inst = Zir.main_struct_inst;
+    const struct_ty = sema.getStructType(
+        new_decl_index,
+        new_namespace_index,
+        main_struct_inst,
+    ) catch |err| switch (err) {
+        error.OutOfMemory => return error.OutOfMemory,
+    };
+    // TODO: figure out InternPool removals for incremental compilation
+    //errdefer ip.remove(struct_ty);
+    for (comptime_mutable_decls.items) |decl_index| {
+        const decl = mod.declPtr(decl_index);
+        _ = try decl.internValue(mod);
+    }
+
+    new_namespace.ty = struct_ty.toType();
+    new_decl.val = struct_ty.toValue();
+    new_decl.analysis = .complete;
+
+    if (mod.comp.whole_cache_manifest) |whole_cache_manifest| {
+        const source = file.getSource(gpa) catch |err| {
+            try reportRetryableFileError(mod, file, "unable to load source: {s}", .{@errorName(err)});
+            return error.AnalysisFail;
+        };
+
+        const resolved_path = std.fs.path.resolve(
+            gpa,
+            if (file.pkg.root_src_directory.path) |pkg_path|
+                &[_][]const u8{ pkg_path, file.sub_file_path }
+            else
+                &[_][]const u8{file.sub_file_path},
+        ) catch |err| {
+            try reportRetryableFileError(mod, file, "unable to resolve path: {s}", .{@errorName(err)});
+            return error.AnalysisFail;
+        };
+        errdefer gpa.free(resolved_path);
+
+        mod.comp.whole_cache_manifest_mutex.lock();
+        defer mod.comp.whole_cache_manifest_mutex.unlock();
+        try whole_cache_manifest.addFilePostContents(resolved_path, source.bytes, source.stat);
     }
 }
 
@@ -4057,12 +3811,12 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
 
     if (mod.declIsRoot(decl_index)) {
         const main_struct_inst = Zir.main_struct_inst;
-        const struct_index = decl.getOwnedStructIndex(mod).unwrap().?;
-        const struct_obj = mod.structPtr(struct_index);
-        // This might not have gotten set in `semaFile` if the first time had
-        // a ZIR failure, so we set it here in case.
-        struct_obj.zir_index = main_struct_inst;
-        try sema.analyzeStructDecl(decl, main_struct_inst, struct_index);
+        const struct_type = decl.getOwnedStruct(mod).?;
+        assert(struct_type.zir_index == main_struct_inst);
+        if (true) @panic("TODO");
+        // why did the code used to have this? I don't see how struct_type could have
+        // been created already without the analyzeStructDecl logic already called on it.
+        //try sema.analyzeStructDecl(decl, main_struct_inst, struct_type);
         decl.analysis = .complete;
         decl.generation = mod.generation;
         return false;
@@ -5241,14 +4995,6 @@ pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void {
     return mod.intern_pool.destroyNamespace(mod.gpa, index);
 }
 
-pub fn createStruct(mod: *Module, initialization: Struct) Allocator.Error!Struct.Index {
-    return mod.intern_pool.createStruct(mod.gpa, initialization);
-}
-
-pub fn destroyStruct(mod: *Module, index: Struct.Index) void {
-    return mod.intern_pool.destroyStruct(mod.gpa, index);
-}
-
 pub fn allocateNewDecl(
     mod: *Module,
     namespace: Namespace.Index,
@@ -6210,10 +5956,10 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type
     // type, we change it to 0 here. If this causes an assertion trip because the
     // pointee type needs to be resolved more, that needs to be done before calling
     // this ptr() function.
-    if (info.flags.alignment.toByteUnitsOptional()) |info_align| {
-        if (have_elem_layout and info_align == info.child.toType().abiAlignment(mod)) {
-            canon_info.flags.alignment = .none;
-        }
+    if (info.flags.alignment != .none and have_elem_layout and
+        info.flags.alignment == info.child.toType().abiAlignment(mod))
+    {
+        canon_info.flags.alignment = .none;
     }
 
     switch (info.flags.vector_index) {
@@ -6483,7 +6229,7 @@ pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 {
             return @as(u16, @intCast(big.bitCountTwosComp()));
         },
         .lazy_align => |lazy_ty| {
-            return Type.smallestUnsignedBits(lazy_ty.toType().abiAlignment(mod)) + @intFromBool(sign);
+            return Type.smallestUnsignedBits(lazy_ty.toType().abiAlignment(mod).toByteUnits(0)) + @intFromBool(sign);
         },
         .lazy_size => |lazy_ty| {
             return Type.smallestUnsignedBits(lazy_ty.toType().abiSize(mod)) + @intFromBool(sign);
@@ -6639,20 +6385,30 @@ pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.I
 /// * `@TypeOf(.{})`
 /// * A struct which has no fields (`struct {}`).
 /// * Not a struct.
-pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct {
+pub fn typeToStruct(mod: *Module, ty: Type) ?InternPool.Key.StructType {
     if (ty.ip_index == .none) return null;
-    const struct_index = mod.intern_pool.indexToStructType(ty.toIntern()).unwrap() orelse return null;
-    return mod.structPtr(struct_index);
+    return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+        .struct_type => |t| t,
+        else => null,
+    };
+}
+
+pub fn typeToPackedStruct(mod: *Module, ty: Type) ?InternPool.Key.StructType {
+    if (ty.ip_index == .none) return null;
+    return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+        .struct_type => |t| if (t.layout == .Packed) t else null,
+        else => null,
+    };
 }
 
 /// This asserts that the union's enum tag type has been resolved.
 pub fn typeToUnion(mod: *Module, ty: Type) ?InternPool.UnionType {
     if (ty.ip_index == .none) return null;
     const ip = &mod.intern_pool;
-    switch (ip.indexToKey(ty.ip_index)) {
-        .union_type => |k| return ip.loadUnionType(k),
-        else => return null,
-    }
+    return switch (ip.indexToKey(ty.ip_index)) {
+        .union_type => |k| ip.loadUnionType(k),
+        else => null,
+    };
 }
 
 pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType {
@@ -6741,13 +6497,13 @@ pub fn getParamName(mod: *Module, func_index: InternPool.Index, index: u32) [:0]
 
 pub const UnionLayout = struct {
     abi_size: u64,
-    abi_align: u32,
+    abi_align: Alignment,
     most_aligned_field: u32,
     most_aligned_field_size: u64,
     biggest_field: u32,
     payload_size: u64,
-    payload_align: u32,
-    tag_align: u32,
+    payload_align: Alignment,
+    tag_align: Alignment,
     tag_size: u64,
     padding: u32,
 };
@@ -6759,35 +6515,37 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.UnionType) UnionLayout {
     var most_aligned_field_size: u64 = undefined;
     var biggest_field: u32 = undefined;
     var payload_size: u64 = 0;
-    var payload_align: u32 = 0;
+    var payload_align: Alignment = .@"1";
     for (u.field_types.get(ip), 0..) |field_ty, i| {
         if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
 
-        const field_align = u.fieldAlign(ip, @intCast(i)).toByteUnitsOptional() orelse
+        const explicit_align = u.fieldAlign(ip, @intCast(i));
+        const field_align = if (explicit_align != .none)
+            explicit_align
+        else
             field_ty.toType().abiAlignment(mod);
         const field_size = field_ty.toType().abiSize(mod);
         if (field_size > payload_size) {
             payload_size = field_size;
             biggest_field = @intCast(i);
         }
-        if (field_align > payload_align) {
-            payload_align = @intCast(field_align);
+        if (field_align.compare(.gte, payload_align)) {
+            payload_align = field_align;
             most_aligned_field = @intCast(i);
             most_aligned_field_size = field_size;
         }
     }
-    payload_align = @max(payload_align, 1);
     const have_tag = u.flagsPtr(ip).runtime_tag.hasTag();
     if (!have_tag or !u.enum_tag_ty.toType().hasRuntimeBits(mod)) {
         return .{
-            .abi_size = std.mem.alignForward(u64, payload_size, payload_align),
+            .abi_size = payload_align.forward(payload_size),
             .abi_align = payload_align,
             .most_aligned_field = most_aligned_field,
             .most_aligned_field_size = most_aligned_field_size,
             .biggest_field = biggest_field,
             .payload_size = payload_size,
             .payload_align = payload_align,
-            .tag_align = 0,
+            .tag_align = .none,
             .tag_size = 0,
             .padding = 0,
         };
@@ -6795,29 +6553,29 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.UnionType) UnionLayout {
     // Put the tag before or after the payload depending on which one's
     // alignment is greater.
     const tag_size = u.enum_tag_ty.toType().abiSize(mod);
-    const tag_align = @max(1, u.enum_tag_ty.toType().abiAlignment(mod));
+    const tag_align = u.enum_tag_ty.toType().abiAlignment(mod).max(.@"1");
     var size: u64 = 0;
     var padding: u32 = undefined;
-    if (tag_align >= payload_align) {
+    if (tag_align.compare(.gte, payload_align)) {
         // {Tag, Payload}
         size += tag_size;
-        size = std.mem.alignForward(u64, size, payload_align);
+        size = payload_align.forward(size);
         size += payload_size;
         const prev_size = size;
-        size = std.mem.alignForward(u64, size, tag_align);
-        padding = @as(u32, @intCast(size - prev_size));
+        size = tag_align.forward(size);
+        padding = @intCast(size - prev_size);
     } else {
         // {Payload, Tag}
         size += payload_size;
-        size = std.mem.alignForward(u64, size, tag_align);
+        size = tag_align.forward(size);
         size += tag_size;
         const prev_size = size;
-        size = std.mem.alignForward(u64, size, payload_align);
-        padding = @as(u32, @intCast(size - prev_size));
+        size = payload_align.forward(size);
+        padding = @intCast(size - prev_size);
     }
     return .{
         .abi_size = size,
-        .abi_align = @max(tag_align, payload_align),
+        .abi_align = tag_align.max(payload_align),
         .most_aligned_field = most_aligned_field,
         .most_aligned_field_size = most_aligned_field_size,
         .biggest_field = biggest_field,
@@ -6834,17 +6592,16 @@ pub fn unionAbiSize(mod: *Module, u: InternPool.UnionType) u64 {
 }
 
 /// Returns 0 if the union is represented with 0 bits at runtime.
-/// TODO: this returns alignment in byte units should should be a u64
-pub fn unionAbiAlignment(mod: *Module, u: InternPool.UnionType) u32 {
+pub fn unionAbiAlignment(mod: *Module, u: InternPool.UnionType) Alignment {
     const ip = &mod.intern_pool;
     const have_tag = u.flagsPtr(ip).runtime_tag.hasTag();
-    var max_align: u32 = 0;
+    var max_align: Alignment = .none;
     if (have_tag) max_align = u.enum_tag_ty.toType().abiAlignment(mod);
     for (u.field_types.get(ip), 0..) |field_ty, field_index| {
         if (!field_ty.toType().hasRuntimeBits(mod)) continue;
 
         const field_align = mod.unionFieldNormalAlignment(u, @intCast(field_index));
-        max_align = @max(max_align, field_align);
+        max_align = max_align.max(field_align);
     }
     return max_align;
 }
@@ -6852,10 +6609,10 @@ pub fn unionAbiAlignment(mod: *Module, u: InternPool.UnionType) u32 {
 /// Returns the field alignment, assuming the union is not packed.
 /// Keep implementation in sync with `Sema.unionFieldAlignment`.
 /// Prefer to call that function instead of this one during Sema.
-/// TODO: this returns alignment in byte units should should be a u64
-pub fn unionFieldNormalAlignment(mod: *Module, u: InternPool.UnionType, field_index: u32) u32 {
+pub fn unionFieldNormalAlignment(mod: *Module, u: InternPool.UnionType, field_index: u32) Alignment {
     const ip = &mod.intern_pool;
-    if (u.fieldAlign(ip, field_index).toByteUnitsOptional()) |a| return @intCast(a);
+    const field_align = u.fieldAlign(ip, field_index);
+    if (field_align != .none) return field_align;
     const field_ty = u.field_types.get(ip)[field_index].toType();
     return field_ty.abiAlignment(mod);
 }
@@ -6866,3 +6623,64 @@ pub fn unionTagFieldIndex(mod: *Module, u: InternPool.UnionType, enum_tag: Value
     const enum_type = ip.indexToKey(u.enum_tag_ty).enum_type;
     return enum_type.tagValueIndex(ip, enum_tag.toIntern());
 }
+
+/// Returns the field alignment of a non-packed struct in byte units.
+/// Keep implementation in sync with `Sema.structFieldAlignment`.
+/// asserts the layout is not packed.
+pub fn structFieldAlignment(
+    mod: *Module,
+    explicit_alignment: InternPool.Alignment,
+    field_ty: Type,
+    layout: std.builtin.Type.ContainerLayout,
+) Alignment {
+    assert(layout != .Packed);
+    if (explicit_alignment != .none) return explicit_alignment;
+    switch (layout) {
+        .Packed => unreachable,
+        .Auto => {
+            if (mod.getTarget().ofmt == .c) {
+                return structFieldAlignmentExtern(mod, field_ty);
+            } else {
+                return field_ty.abiAlignment(mod);
+            }
+        },
+        .Extern => return structFieldAlignmentExtern(mod, field_ty),
+    }
+}
+
+/// Returns the field alignment of an extern struct in byte units.
+/// This logic is duplicated in Type.abiAlignmentAdvanced.
+pub fn structFieldAlignmentExtern(mod: *Module, field_ty: Type) Alignment {
+    const ty_abi_align = field_ty.abiAlignment(mod);
+
+    if (field_ty.isAbiInt(mod) and field_ty.intInfo(mod).bits >= 128) {
+        // The C ABI requires 128 bit integer fields of structs
+        // to be 16-bytes aligned.
+        return ty_abi_align.max(.@"16");
+    }
+
+    return ty_abi_align;
+}
+
+/// TODO: avoid linear search by storing these in trailing data of packed struct types
+/// then packedStructFieldByteOffset can be expressed in terms of bits / 8, fixing
+/// that one too.
+/// https://github.com/ziglang/zig/issues/17178
+pub fn structPackedFieldBitOffset(
+    mod: *Module,
+    struct_type: InternPool.Key.StructType,
+    field_index: usize,
+) u16 {
+    const ip = &mod.intern_pool;
+    assert(struct_type.layout == .Packed);
+    assert(struct_type.haveLayout(ip));
+    var bit_sum: u64 = 0;
+    for (0..struct_type.field_types.len) |i| {
+        if (i == field_index) {
+            return @intCast(bit_sum);
+        }
+        const field_ty = struct_type.field_types.get(ip)[i].toType();
+        bit_sum += field_ty.bitSize(mod);
+    }
+    unreachable; // index out of bounds
+}
src/Sema.zig
@@ -2221,8 +2221,8 @@ fn failWithInvalidComptimeFieldStore(sema: *Sema, block: *Block, init_src: LazyS
         const msg = try sema.errMsg(block, init_src, "value stored in comptime field does not match the default value of the field", .{});
         errdefer msg.destroy(sema.gpa);
 
-        const struct_ty = mod.typeToStruct(container_ty) orelse break :msg msg;
-        const default_value_src = mod.fieldSrcLoc(struct_ty.owner_decl, .{
+        const struct_type = mod.typeToStruct(container_ty) orelse break :msg msg;
+        const default_value_src = mod.fieldSrcLoc(struct_type.decl.unwrap().?, .{
             .index = field_index,
             .range = .value,
         });
@@ -2504,23 +2504,22 @@ fn analyzeAsAlign(
     const alignment_big = try sema.analyzeAsInt(block, src, air_ref, align_ty, .{
         .needed_comptime_reason = "alignment must be comptime-known",
     });
-    const alignment: u32 = @intCast(alignment_big); // We coerce to u29 in the prev line.
-    try sema.validateAlign(block, src, alignment);
-    return Alignment.fromNonzeroByteUnits(alignment);
+    return sema.validateAlign(block, src, alignment_big);
 }
 
 fn validateAlign(
     sema: *Sema,
     block: *Block,
     src: LazySrcLoc,
-    alignment: u32,
-) !void {
+    alignment: u64,
+) !Alignment {
     if (alignment == 0) return sema.fail(block, src, "alignment must be >= 1", .{});
     if (!std.math.isPowerOfTwo(alignment)) {
         return sema.fail(block, src, "alignment value '{d}' is not a power of two", .{
             alignment,
         });
     }
+    return Alignment.fromNonzeroByteUnits(alignment);
 }
 
 pub fn resolveAlign(
@@ -2801,26 +2800,26 @@ fn coerceResultPtr(
     }
 }
 
-pub fn analyzeStructDecl(
+pub fn getStructType(
     sema: *Sema,
-    new_decl: *Decl,
-    inst: Zir.Inst.Index,
-    struct_index: Module.Struct.Index,
-) SemaError!void {
+    decl: Module.Decl.Index,
+    namespace: Module.Namespace.Index,
+    zir_index: Zir.Inst.Index,
+) !InternPool.Index {
     const mod = sema.mod;
-    const struct_obj = mod.structPtr(struct_index);
-    const extended = sema.code.instructions.items(.data)[inst].extended;
+    const gpa = sema.gpa;
+    const ip = &mod.intern_pool;
+    const extended = sema.code.instructions.items(.data)[zir_index].extended;
     assert(extended.opcode == .struct_decl);
     const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
 
-    struct_obj.known_non_opv = small.known_non_opv;
-    if (small.known_comptime_only) {
-        struct_obj.requires_comptime = .yes;
-    }
-
     var extra_index: usize = extended.operand;
     extra_index += @intFromBool(small.has_src_node);
-    extra_index += @intFromBool(small.has_fields_len);
+    const fields_len = if (small.has_fields_len) blk: {
+        const fields_len = sema.code.extra[extra_index];
+        extra_index += 1;
+        break :blk fields_len;
+    } else 0;
     const decls_len = if (small.has_decls_len) blk: {
         const decls_len = sema.code.extra[extra_index];
         extra_index += 1;
@@ -2837,7 +2836,20 @@ pub fn analyzeStructDecl(
         }
     }
 
-    _ = try mod.scanNamespace(struct_obj.namespace, extra_index, decls_len, new_decl);
+    extra_index = try mod.scanNamespace(namespace, extra_index, decls_len, mod.declPtr(decl));
+
+    const ty = try ip.getStructType(gpa, .{
+        .decl = decl,
+        .namespace = namespace.toOptional(),
+        .zir_index = zir_index,
+        .layout = small.layout,
+        .known_non_opv = small.known_non_opv,
+        .is_tuple = small.is_tuple,
+        .fields_len = fields_len,
+        .requires_comptime = if (small.known_comptime_only) .yes else .unknown,
+    });
+
+    return ty;
 }
 
 fn zirStructDecl(
@@ -2847,7 +2859,7 @@ fn zirStructDecl(
     inst: Zir.Inst.Index,
 ) CompileError!Air.Inst.Ref {
     const mod = sema.mod;
-    const gpa = sema.gpa;
+    const ip = &mod.intern_pool;
     const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
     const src: LazySrcLoc = if (small.has_src_node) blk: {
         const node_offset: i32 = @bitCast(sema.code.extra[extended.operand]);
@@ -2874,37 +2886,21 @@ fn zirStructDecl(
     const new_namespace = mod.namespacePtr(new_namespace_index);
     errdefer mod.destroyNamespace(new_namespace_index);
 
-    const struct_index = try mod.createStruct(.{
-        .owner_decl = new_decl_index,
-        .fields = .{},
-        .zir_index = inst,
-        .layout = small.layout,
-        .status = .none,
-        .known_non_opv = undefined,
-        .is_tuple = small.is_tuple,
-        .namespace = new_namespace_index,
-    });
-    errdefer mod.destroyStruct(struct_index);
-
     const struct_ty = ty: {
-        const ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{
-            .index = struct_index.toOptional(),
-            .namespace = new_namespace_index.toOptional(),
-        } });
+        const ty = try sema.getStructType(new_decl_index, new_namespace_index, inst);
         if (sema.builtin_type_target_index != .none) {
-            mod.intern_pool.resolveBuiltinType(sema.builtin_type_target_index, ty);
+            ip.resolveBuiltinType(sema.builtin_type_target_index, ty);
             break :ty sema.builtin_type_target_index;
         }
         break :ty ty;
     };
     // TODO: figure out InternPool removals for incremental compilation
-    //errdefer mod.intern_pool.remove(struct_ty);
+    //errdefer ip.remove(struct_ty);
 
     new_decl.ty = Type.type;
     new_decl.val = struct_ty.toValue();
     new_namespace.ty = struct_ty.toType();
 
-    try sema.analyzeStructDecl(new_decl, inst, struct_index);
     const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
     try mod.finalizeAnonDecl(new_decl_index);
     return decl_val;
@@ -3196,7 +3192,7 @@ fn zirEnumDecl(
         extra_index += 1;
 
         const field_name = try mod.intern_pool.getOrPutString(gpa, field_name_zir);
-        if (try incomplete_enum.addFieldName(&mod.intern_pool, gpa, field_name)) |other_index| {
+        if (incomplete_enum.addFieldName(&mod.intern_pool, field_name)) |other_index| {
             const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy;
             const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy;
             const msg = msg: {
@@ -3227,7 +3223,7 @@ fn zirEnumDecl(
             };
             if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) break :overflow true;
             last_tag_val = try mod.getCoerced(last_tag_val.?, int_tag_ty);
-            if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, last_tag_val.?.toIntern())) |other_index| {
+            if (incomplete_enum.addFieldValue(&mod.intern_pool, last_tag_val.?.toIntern())) |other_index| {
                 const value_src = mod.fieldSrcLoc(new_decl_index, .{
                     .index = field_i,
                     .range = .value,
@@ -3249,7 +3245,7 @@ fn zirEnumDecl(
             else
                 try mod.intValue(int_tag_ty, 0);
             if (overflow != null) break :overflow true;
-            if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, last_tag_val.?.toIntern())) |other_index| {
+            if (incomplete_enum.addFieldValue(&mod.intern_pool, last_tag_val.?.toIntern())) |other_index| {
                 const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy;
                 const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy;
                 const msg = msg: {
@@ -4723,10 +4719,11 @@ fn validateStructInit(
         }
 
         if (root_msg) |msg| {
-            if (mod.typeToStruct(struct_ty)) |struct_obj| {
-                const fqn = try struct_obj.getFullyQualifiedName(mod);
+            if (mod.typeToStruct(struct_ty)) |struct_type| {
+                const decl = mod.declPtr(struct_type.decl.unwrap().?);
+                const fqn = try decl.getFullyQualifiedName(mod);
                 try mod.errNoteNonLazy(
-                    struct_obj.srcLoc(mod),
+                    decl.srcLoc(mod),
                     msg,
                     "struct '{}' declared here",
                     .{fqn.fmt(ip)},
@@ -4853,10 +4850,11 @@ fn validateStructInit(
     }
 
     if (root_msg) |msg| {
-        if (mod.typeToStruct(struct_ty)) |struct_obj| {
-            const fqn = try struct_obj.getFullyQualifiedName(mod);
+        if (mod.typeToStruct(struct_ty)) |struct_type| {
+            const decl = mod.declPtr(struct_type.decl.unwrap().?);
+            const fqn = try decl.getFullyQualifiedName(mod);
             try mod.errNoteNonLazy(
-                struct_obj.srcLoc(mod),
+                decl.srcLoc(mod),
                 msg,
                 "struct '{}' declared here",
                 .{fqn.fmt(ip)},
@@ -5255,14 +5253,14 @@ fn failWithBadMemberAccess(
 fn failWithBadStructFieldAccess(
     sema: *Sema,
     block: *Block,
-    struct_obj: *Module.Struct,
+    struct_type: InternPool.Key.StructType,
     field_src: LazySrcLoc,
     field_name: InternPool.NullTerminatedString,
 ) CompileError {
     const mod = sema.mod;
     const gpa = sema.gpa;
-
-    const fqn = try struct_obj.getFullyQualifiedName(mod);
+    const decl = mod.declPtr(struct_type.decl.unwrap().?);
+    const fqn = try decl.getFullyQualifiedName(mod);
 
     const msg = msg: {
         const msg = try sema.errMsg(
@@ -5272,7 +5270,7 @@ fn failWithBadStructFieldAccess(
             .{ field_name.fmt(&mod.intern_pool), fqn.fmt(&mod.intern_pool) },
         );
         errdefer msg.destroy(gpa);
-        try mod.errNoteNonLazy(struct_obj.srcLoc(mod), msg, "struct declared here", .{});
+        try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "struct declared here", .{});
         break :msg msg;
     };
     return sema.failWithOwnedErrorMsg(block, msg);
@@ -12953,9 +12951,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 }
             },
             .struct_type => |struct_type| {
-                const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :hf false;
-                assert(struct_obj.haveFieldTypes());
-                break :hf struct_obj.fields.contains(field_name);
+                break :hf struct_type.nameIndex(ip, field_name) != null;
             },
             .union_type => |union_type| {
                 const union_obj = ip.loadUnionType(union_type);
@@ -16907,7 +16903,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 // calling_convention: CallingConvention,
                 (try mod.enumValueFieldIndex(callconv_ty, @intFromEnum(func_ty_info.cc))).toIntern(),
                 // alignment: comptime_int,
-                (try mod.intValue(Type.comptime_int, ty.abiAlignment(mod))).toIntern(),
+                (try mod.intValue(Type.comptime_int, ty.abiAlignment(mod).toByteUnits(0))).toIntern(),
                 // is_generic: bool,
                 Value.makeBool(func_ty_info.is_generic).toIntern(),
                 // is_var_args: bool,
@@ -17461,7 +17457,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
 
                 const alignment = switch (layout) {
                     .Auto, .Extern => try sema.unionFieldAlignment(union_obj, @intCast(i)),
-                    .Packed => 0,
+                    .Packed => .none,
                 };
 
                 const field_ty = union_obj.field_types.get(ip)[i];
@@ -17471,7 +17467,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                     // type: type,
                     field_ty,
                     // alignment: comptime_int,
-                    (try mod.intValue(Type.comptime_int, alignment)).toIntern(),
+                    (try mod.intValue(Type.comptime_int, alignment.toByteUnits(0))).toIntern(),
                 };
                 field_val.* = try mod.intern(.{ .aggregate = .{
                     .ty = union_field_ty.toIntern(),
@@ -17578,7 +17574,6 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
             };
 
             try sema.resolveTypeLayout(ty); // Getting alignment requires type layout
-            const layout = ty.containerLayout(mod);
 
             var struct_field_vals: []InternPool.Index = &.{};
             defer gpa.free(struct_field_vals);
@@ -17633,7 +17628,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                                 // is_comptime: bool,
                                 Value.makeBool(is_comptime).toIntern(),
                                 // alignment: comptime_int,
-                                (try mod.intValue(Type.comptime_int, field_ty.toType().abiAlignment(mod))).toIntern(),
+                                (try mod.intValue(Type.comptime_int, field_ty.toType().abiAlignment(mod).toByteUnits(0))).toIntern(),
                             };
                             struct_field_val.* = try mod.intern(.{ .aggregate = .{
                                 .ty = struct_field_ty.toIntern(),
@@ -17645,14 +17640,13 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                     .struct_type => |s| s,
                     else => unreachable,
                 };
-                const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :fv;
-                struct_field_vals = try gpa.alloc(InternPool.Index, struct_obj.fields.count());
-
-                for (
-                    struct_field_vals,
-                    struct_obj.fields.keys(),
-                    struct_obj.fields.values(),
-                ) |*field_val, name_nts, field| {
+                struct_field_vals = try gpa.alloc(InternPool.Index, struct_type.field_types.len);
+
+                for (struct_field_vals, 0..) |*field_val, i| {
+                    const name_nts = struct_type.fieldName(ip, i).unwrap().?;
+                    const field_ty = struct_type.field_types.get(ip)[i].toType();
+                    const field_init = struct_type.fieldInit(ip, i);
+                    const field_is_comptime = struct_type.fieldIsComptime(ip, i);
                     // TODO: write something like getCoercedInts to avoid needing to dupe
                     const name = try sema.arena.dupe(u8, ip.stringToSlice(name_nts));
                     const name_val = v: {
@@ -17677,24 +17671,25 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                         } });
                     };
 
-                    const opt_default_val = if (field.default_val == .none)
-                        null
-                    else
-                        field.default_val.toValue();
-                    const default_val_ptr = try sema.optRefValue(block, field.ty, opt_default_val);
-                    const alignment = field.alignment(mod, layout);
+                    const opt_default_val = if (field_init == .none) null else field_init.toValue();
+                    const default_val_ptr = try sema.optRefValue(block, field_ty, opt_default_val);
+                    const alignment = mod.structFieldAlignment(
+                        struct_type.field_aligns.get(ip)[i],
+                        field_ty,
+                        struct_type.layout,
+                    );
 
                     const struct_field_fields = .{
                         // name: []const u8,
                         name_val,
                         // type: type,
-                        field.ty.toIntern(),
+                        field_ty.toIntern(),
                         // default_value: ?*const anyopaque,
                         default_val_ptr.toIntern(),
                         // is_comptime: bool,
-                        Value.makeBool(field.is_comptime).toIntern(),
+                        Value.makeBool(field_is_comptime).toIntern(),
                         // alignment: comptime_int,
-                        (try mod.intValue(Type.comptime_int, alignment)).toIntern(),
+                        (try mod.intValue(Type.comptime_int, alignment.toByteUnits(0))).toIntern(),
                     };
                     field_val.* = try mod.intern(.{ .aggregate = .{
                         .ty = struct_field_ty.toIntern(),
@@ -17733,11 +17728,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
 
             const backing_integer_val = try mod.intern(.{ .opt = .{
                 .ty = (try mod.optionalType(.type_type)).toIntern(),
-                .val = if (layout == .Packed) val: {
-                    const struct_obj = mod.typeToStruct(ty).?;
-                    assert(struct_obj.haveLayout());
-                    assert(struct_obj.backing_int_ty.isInt(mod));
-                    break :val struct_obj.backing_int_ty.toIntern();
+                .val = if (mod.typeToPackedStruct(ty)) |packed_struct| val: {
+                    assert(packed_struct.backingIntType(ip).toType().isInt(mod));
+                    break :val packed_struct.backingIntType(ip).*;
                 } else .none,
             } });
 
@@ -17754,6 +17747,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 break :t decl.val.toType();
             };
 
+            const layout = ty.containerLayout(mod);
+
             const field_values = [_]InternPool.Index{
                 // layout: ContainerLayout,
                 (try mod.enumValueFieldIndex(container_layout_ty, @intFromEnum(layout))).toIntern(),
@@ -18924,9 +18919,8 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
             },
             else => {},
         }
-        const abi_align: u32 = @intCast((try val.getUnsignedIntAdvanced(mod, sema)).?);
-        try sema.validateAlign(block, align_src, abi_align);
-        break :blk Alignment.fromByteUnits(abi_align);
+        const align_bytes = (try val.getUnsignedIntAdvanced(mod, sema)).?;
+        break :blk try sema.validateAlign(block, align_src, align_bytes);
     } else .none;
 
     const address_space: std.builtin.AddressSpace = if (inst_data.flags.has_addrspace) blk: {
@@ -19291,12 +19285,12 @@ fn finishStructInit(
             }
         },
         .struct_type => |struct_type| {
-            const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
-            for (struct_obj.fields.values(), 0..) |field, i| {
+            for (0..struct_type.field_types.len) |i| {
                 if (field_inits[i] != .none) continue;
 
-                if (field.default_val == .none) {
-                    const field_name = struct_obj.fields.keys()[i];
+                const field_init = struct_type.field_inits.get(ip)[i];
+                if (field_init == .none) {
+                    const field_name = struct_type.field_names.get(ip)[i];
                     const template = "missing struct field: {}";
                     const args = .{field_name.fmt(ip)};
                     if (root_msg) |msg| {
@@ -19305,7 +19299,7 @@ fn finishStructInit(
                         root_msg = try sema.errMsg(block, init_src, template, args);
                     }
                 } else {
-                    field_inits[i] = Air.internedToRef(field.default_val);
+                    field_inits[i] = Air.internedToRef(field_init);
                 }
             }
         },
@@ -19313,10 +19307,11 @@ fn finishStructInit(
     }
 
     if (root_msg) |msg| {
-        if (mod.typeToStruct(struct_ty)) |struct_obj| {
-            const fqn = try struct_obj.getFullyQualifiedName(mod);
+        if (mod.typeToStruct(struct_ty)) |struct_type| {
+            const decl = mod.declPtr(struct_type.decl.unwrap().?);
+            const fqn = try decl.getFullyQualifiedName(mod);
             try mod.errNoteNonLazy(
-                struct_obj.srcLoc(mod),
+                decl.srcLoc(mod),
                 msg,
                 "struct '{}' declared here",
                 .{fqn.fmt(ip)},
@@ -19848,10 +19843,10 @@ fn fieldType(
                     return Air.internedToRef(anon_struct.types.get(ip)[field_index]);
                 },
                 .struct_type => |struct_type| {
-                    const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
-                    const field = struct_obj.fields.get(field_name) orelse
-                        return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name);
-                    return Air.internedToRef(field.ty.toIntern());
+                    const field_index = struct_type.nameIndex(ip, field_name) orelse
+                        return sema.failWithBadStructFieldAccess(block, struct_type, field_src, field_name);
+                    const field_ty = struct_type.field_types.get(ip)[field_index];
+                    return Air.internedToRef(field_ty);
                 },
                 else => unreachable,
             },
@@ -20167,14 +20162,14 @@ fn zirReify(
         .AnyFrame => return sema.failWithUseOfAsync(block, src),
         .EnumLiteral => return .enum_literal_type,
         .Int => {
-            const fields = ip.typeOf(union_val.val).toType().structFields(mod);
+            const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
             const signedness_val = try union_val.val.toValue().fieldValue(
                 mod,
-                fields.getIndex(try ip.getOrPutString(gpa, "signedness")).?,
+                struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "signedness")).?,
             );
             const bits_val = try union_val.val.toValue().fieldValue(
                 mod,
-                fields.getIndex(try ip.getOrPutString(gpa, "bits")).?,
+                struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "bits")).?,
             );
 
             const signedness = mod.toEnum(std.builtin.Signedness, signedness_val);
@@ -20183,11 +20178,13 @@ fn zirReify(
             return Air.internedToRef(ty.toIntern());
         },
         .Vector => {
-            const fields = ip.typeOf(union_val.val).toType().structFields(mod);
-            const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+            const len_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "len"),
             ).?);
-            const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const child_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "child"),
             ).?);
 
@@ -20203,8 +20200,9 @@ fn zirReify(
             return Air.internedToRef(ty.toIntern());
         },
         .Float => {
-            const fields = ip.typeOf(union_val.val).toType().structFields(mod);
-            const bits_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+            const bits_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "bits"),
             ).?);
 
@@ -20220,29 +20218,37 @@ fn zirReify(
             return Air.internedToRef(ty.toIntern());
         },
         .Pointer => {
-            const fields = ip.typeOf(union_val.val).toType().structFields(mod);
-            const size_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+            const size_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "size"),
             ).?);
-            const is_const_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const is_const_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "is_const"),
             ).?);
-            const is_volatile_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const is_volatile_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "is_volatile"),
             ).?);
-            const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const alignment_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "alignment"),
             ).?);
-            const address_space_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const address_space_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "address_space"),
             ).?);
-            const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const child_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "child"),
             ).?);
-            const is_allowzero_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const is_allowzero_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "is_allowzero"),
             ).?);
-            const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const sentinel_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "sentinel"),
             ).?);
 
@@ -20322,14 +20328,17 @@ fn zirReify(
             return Air.internedToRef(ty.toIntern());
         },
         .Array => {
-            const fields = ip.typeOf(union_val.val).toType().structFields(mod);
-            const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+            const len_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "len"),
             ).?);
-            const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const child_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "child"),
             ).?);
-            const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const sentinel_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "sentinel"),
             ).?);
 
@@ -20348,8 +20357,9 @@ fn zirReify(
             return Air.internedToRef(ty.toIntern());
         },
         .Optional => {
-            const fields = ip.typeOf(union_val.val).toType().structFields(mod);
-            const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+            const child_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "child"),
             ).?);
 
@@ -20359,11 +20369,13 @@ fn zirReify(
             return Air.internedToRef(ty.toIntern());
         },
         .ErrorUnion => {
-            const fields = ip.typeOf(union_val.val).toType().structFields(mod);
-            const error_set_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+            const error_set_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "error_set"),
             ).?);
-            const payload_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const payload_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "payload"),
             ).?);
 
@@ -20386,8 +20398,9 @@ fn zirReify(
             try names.ensureUnusedCapacity(sema.arena, len);
             for (0..len) |i| {
                 const elem_val = try payload_val.elemValue(mod, i);
-                const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod);
-                const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
+                const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
+                const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
+                    ip,
                     try ip.getOrPutString(gpa, "name"),
                 ).?);
 
@@ -20405,20 +20418,25 @@ fn zirReify(
             return Air.internedToRef(ty.toIntern());
         },
         .Struct => {
-            const fields = ip.typeOf(union_val.val).toType().structFields(mod);
-            const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+            const layout_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "layout"),
             ).?);
-            const backing_integer_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const backing_integer_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "backing_integer"),
             ).?);
-            const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const fields_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "fields"),
             ).?);
-            const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const decls_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "decls"),
             ).?);
-            const is_tuple_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const is_tuple_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "is_tuple"),
             ).?);
 
@@ -20436,17 +20454,21 @@ fn zirReify(
             return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_val, name_strategy, is_tuple_val.toBool());
         },
         .Enum => {
-            const fields = ip.typeOf(union_val.val).toType().structFields(mod);
-            const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+            const tag_type_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "tag_type"),
             ).?);
-            const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const fields_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "fields"),
             ).?);
-            const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const decls_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "decls"),
             ).?);
-            const is_exhaustive_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const is_exhaustive_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "is_exhaustive"),
             ).?);
 
@@ -20496,11 +20518,13 @@ fn zirReify(
 
             for (0..fields_len) |field_i| {
                 const elem_val = try fields_val.elemValue(mod, field_i);
-                const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod);
-                const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
+                const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
+                const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
+                    ip,
                     try ip.getOrPutString(gpa, "name"),
                 ).?);
-                const value_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
+                const value_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
+                    ip,
                     try ip.getOrPutString(gpa, "value"),
                 ).?);
 
@@ -20515,7 +20539,7 @@ fn zirReify(
                     });
                 }
 
-                if (try incomplete_enum.addFieldName(ip, gpa, field_name)) |other_index| {
+                if (incomplete_enum.addFieldName(ip, field_name)) |other_index| {
                     const msg = msg: {
                         const msg = try sema.errMsg(block, src, "duplicate enum field '{}'", .{
                             field_name.fmt(ip),
@@ -20528,7 +20552,7 @@ fn zirReify(
                     return sema.failWithOwnedErrorMsg(block, msg);
                 }
 
-                if (try incomplete_enum.addFieldValue(ip, gpa, (try mod.getCoerced(value_val, int_tag_ty)).toIntern())) |other| {
+                if (incomplete_enum.addFieldValue(ip, (try mod.getCoerced(value_val, int_tag_ty)).toIntern())) |other| {
                     const msg = msg: {
                         const msg = try sema.errMsg(block, src, "enum tag value {} already taken", .{value_val.fmtValue(Type.comptime_int, mod)});
                         errdefer msg.destroy(gpa);
@@ -20545,8 +20569,9 @@ fn zirReify(
             return decl_val;
         },
         .Opaque => {
-            const fields = ip.typeOf(union_val.val).toType().structFields(mod);
-            const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+            const decls_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "decls"),
             ).?);
 
@@ -20594,17 +20619,21 @@ fn zirReify(
             return decl_val;
         },
         .Union => {
-            const fields = ip.typeOf(union_val.val).toType().structFields(mod);
-            const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+            const layout_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "layout"),
             ).?);
-            const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const tag_type_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "tag_type"),
             ).?);
-            const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const fields_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "fields"),
             ).?);
-            const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const decls_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "decls"),
             ).?);
 
@@ -20644,14 +20673,17 @@ fn zirReify(
 
             for (0..fields_len) |i| {
                 const elem_val = try fields_val.elemValue(mod, i);
-                const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod);
-                const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
+                const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
+                const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
+                    ip,
                     try ip.getOrPutString(gpa, "name"),
                 ).?);
-                const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
+                const type_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
+                    ip,
                     try ip.getOrPutString(gpa, "type"),
                 ).?);
-                const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
+                const alignment_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
+                    ip,
                     try ip.getOrPutString(gpa, "alignment"),
                 ).?);
 
@@ -20812,23 +20844,29 @@ fn zirReify(
             return decl_val;
         },
         .Fn => {
-            const fields = ip.typeOf(union_val.val).toType().structFields(mod);
-            const calling_convention_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+            const calling_convention_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "calling_convention"),
             ).?);
-            const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const alignment_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "alignment"),
             ).?);
-            const is_generic_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const is_generic_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "is_generic"),
             ).?);
-            const is_var_args_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const is_var_args_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "is_var_args"),
             ).?);
-            const return_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const return_type_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "return_type"),
             ).?);
-            const params_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
+            const params_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+                ip,
                 try ip.getOrPutString(gpa, "params"),
             ).?);
 
@@ -20844,15 +20882,9 @@ fn zirReify(
             }
 
             const alignment = alignment: {
-                if (!try sema.intFitsInType(alignment_val, Type.u32, null)) {
-                    return sema.fail(block, src, "alignment must fit in 'u32'", .{});
-                }
-                const alignment: u29 = @intCast(alignment_val.toUnsignedInt(mod));
-                if (alignment == target_util.defaultFunctionAlignment(target)) {
-                    break :alignment .none;
-                } else {
-                    break :alignment Alignment.fromByteUnits(alignment);
-                }
+                const alignment = try sema.validateAlign(block, src, alignment_val.toUnsignedInt(mod));
+                const default = target_util.defaultFunctionAlignment(target);
+                break :alignment if (alignment == default) .none else alignment;
             };
             const return_type = return_type_val.optionalValue(mod) orelse
                 return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{});
@@ -20863,14 +20895,17 @@ fn zirReify(
             var noalias_bits: u32 = 0;
             for (param_types, 0..) |*param_type, i| {
                 const elem_val = try params_val.elemValue(mod, i);
-                const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod);
-                const param_is_generic_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
+                const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
+                const param_is_generic_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
+                    ip,
                     try ip.getOrPutString(gpa, "is_generic"),
                 ).?);
-                const param_is_noalias_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
+                const param_is_noalias_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
+                    ip,
                     try ip.getOrPutString(gpa, "is_noalias"),
                 ).?);
-                const opt_param_type_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
+                const opt_param_type_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
+                    ip,
                     try ip.getOrPutString(gpa, "type"),
                 ).?);
 
@@ -20931,6 +20966,8 @@ fn reifyStruct(
         .Auto => {},
     };
 
+    const fields_len: u32 = @intCast(try sema.usizeCast(block, src, fields_val.sliceLen(mod)));
+
     // Because these three things each reference each other, `undefined`
     // placeholders are used before being set after the struct type gains an
     // InternPool index.
@@ -20946,58 +20983,45 @@ fn reifyStruct(
         mod.abortAnonDecl(new_decl_index);
     }
 
-    const new_namespace_index = try mod.createNamespace(.{
-        .parent = block.namespace.toOptional(),
-        .ty = undefined,
-        .file_scope = block.getFileScope(mod),
-    });
-    const new_namespace = mod.namespacePtr(new_namespace_index);
-    errdefer mod.destroyNamespace(new_namespace_index);
-
-    const struct_index = try mod.createStruct(.{
-        .owner_decl = new_decl_index,
-        .fields = .{},
+    const ty = try ip.getStructType(gpa, .{
+        .decl = new_decl_index,
+        .namespace = .none,
         .zir_index = inst,
         .layout = layout,
-        .status = .have_field_types,
         .known_non_opv = false,
+        .fields_len = fields_len,
+        .requires_comptime = .unknown,
         .is_tuple = is_tuple,
-        .namespace = new_namespace_index,
     });
-    const struct_obj = mod.structPtr(struct_index);
-    errdefer mod.destroyStruct(struct_index);
-
-    const struct_ty = try ip.get(gpa, .{ .struct_type = .{
-        .index = struct_index.toOptional(),
-        .namespace = new_namespace_index.toOptional(),
-    } });
     // TODO: figure out InternPool removals for incremental compilation
-    //errdefer ip.remove(struct_ty);
+    //errdefer ip.remove(ty);
+    const struct_type = ip.indexToKey(ty).struct_type;
 
     new_decl.ty = Type.type;
-    new_decl.val = struct_ty.toValue();
-    new_namespace.ty = struct_ty.toType();
+    new_decl.val = ty.toValue();
 
     // Fields
-    const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod));
-    try struct_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len);
-    var i: usize = 0;
-    while (i < fields_len) : (i += 1) {
+    for (0..fields_len) |i| {
         const elem_val = try fields_val.elemValue(mod, i);
-        const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod);
-        const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
+        const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
+        const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
+            ip,
             try ip.getOrPutString(gpa, "name"),
         ).?);
-        const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
+        const type_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
+            ip,
             try ip.getOrPutString(gpa, "type"),
         ).?);
-        const default_value_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
+        const default_value_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
+            ip,
             try ip.getOrPutString(gpa, "default_value"),
         ).?);
-        const is_comptime_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
+        const is_comptime_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
+            ip,
             try ip.getOrPutString(gpa, "is_comptime"),
         ).?);
-        const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
+        const alignment_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
+            ip,
             try ip.getOrPutString(gpa, "alignment"),
         ).?);
 
@@ -21033,9 +21057,8 @@ fn reifyStruct(
                 );
             }
         }
-        const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name);
-        if (gop.found_existing) {
-            // TODO: better source location
+        if (struct_type.addFieldName(ip, field_name)) |prev_index| {
+            _ = prev_index; // TODO: better source location
             return sema.fail(block, src, "duplicate struct field {}", .{field_name.fmt(ip)});
         }
 
@@ -21051,13 +21074,11 @@ fn reifyStruct(
             return sema.fail(block, src, "comptime field without default initialization value", .{});
         }
 
-        gop.value_ptr.* = .{
-            .ty = field_ty,
-            .abi_align = Alignment.fromByteUnits(abi_align),
-            .default_val = default_val,
-            .is_comptime = is_comptime_val.toBool(),
-            .offset = undefined,
-        };
+        struct_type.field_types.get(ip)[i] = field_ty.toIntern();
+        struct_type.field_aligns.get(ip)[i] = Alignment.fromByteUnits(abi_align);
+        struct_type.field_inits.get(ip)[i] = default_val;
+        if (is_comptime_val.toBool())
+            struct_type.setFieldComptime(ip, i);
 
         if (field_ty.zigTypeTag(mod) == .Opaque) {
             const msg = msg: {
@@ -21079,7 +21100,7 @@ fn reifyStruct(
             };
             return sema.failWithOwnedErrorMsg(block, msg);
         }
-        if (struct_obj.layout == .Extern and !try sema.validateExternType(field_ty, .struct_field)) {
+        if (layout == .Extern and !try sema.validateExternType(field_ty, .struct_field)) {
             const msg = msg: {
                 const msg = try sema.errMsg(block, src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
                 errdefer msg.destroy(gpa);
@@ -21091,7 +21112,7 @@ fn reifyStruct(
                 break :msg msg;
             };
             return sema.failWithOwnedErrorMsg(block, msg);
-        } else if (struct_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) {
+        } else if (layout == .Packed and !(validatePackedType(field_ty, mod))) {
             const msg = msg: {
                 const msg = try sema.errMsg(block, src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
                 errdefer msg.destroy(gpa);
@@ -21107,13 +21128,12 @@ fn reifyStruct(
     }
 
     if (layout == .Packed) {
-        struct_obj.status = .layout_wip;
-
-        for (struct_obj.fields.values(), 0..) |field, index| {
-            sema.resolveTypeLayout(field.ty) catch |err| switch (err) {
+        for (0..struct_type.field_types.len) |index| {
+            const field_ty = struct_type.field_types.get(ip)[index].toType();
+            sema.resolveTypeLayout(field_ty) catch |err| switch (err) {
                 error.AnalysisFail => {
                     const msg = sema.err orelse return err;
-                    try sema.addFieldErrNote(struct_ty.toType(), index, msg, "while checking this field", .{});
+                    try sema.addFieldErrNote(ty.toType(), index, msg, "while checking this field", .{});
                     return err;
                 },
                 else => return err,
@@ -21121,19 +21141,18 @@ fn reifyStruct(
         }
 
         var fields_bit_sum: u64 = 0;
-        for (struct_obj.fields.values()) |field| {
-            fields_bit_sum += field.ty.bitSize(mod);
+        for (struct_type.field_types.get(ip)) |field_ty| {
+            fields_bit_sum += field_ty.toType().bitSize(mod);
         }
 
-        if (backing_int_val.optionalValue(mod)) |payload| {
-            const backing_int_ty = payload.toType();
+        if (backing_int_val.optionalValue(mod)) |backing_int_ty_val| {
+            const backing_int_ty = backing_int_ty_val.toType();
             try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum);
-            struct_obj.backing_int_ty = backing_int_ty;
+            struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
         } else {
-            struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(fields_bit_sum));
+            const backing_int_ty = try mod.intType(.unsigned, @intCast(fields_bit_sum));
+            struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
         }
-
-        struct_obj.status = .have_layout;
     }
 
     const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
@@ -21439,8 +21458,9 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
                 const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize);
                 try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null);
             }
-            if (ptr_align > 1) {
-                const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, ptr_align - 1)).toIntern());
+            if (ptr_align.compare(.gt, .@"1")) {
+                const align_bytes_minus_1 = ptr_align.toByteUnitsOptional().? - 1;
+                const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern());
                 const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1);
                 const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
                 try sema.addSafetyCheck(block, src, is_aligned, .incorrect_alignment);
@@ -21458,8 +21478,9 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
                 const is_non_zero = try block.addBinOp(.cmp_neq, elem_coerced, .zero_usize);
                 try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null);
             }
-            if (ptr_align > 1) {
-                const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, ptr_align - 1)).toIntern());
+            if (ptr_align.compare(.gt, .@"1")) {
+                const align_bytes_minus_1 = ptr_align.toByteUnitsOptional().? - 1;
+                const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern());
                 const remainder = try block.addBinOp(.bit_and, elem_coerced, align_minus_1);
                 const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
                 try sema.addSafetyCheck(block, src, is_aligned, .incorrect_alignment);
@@ -21476,12 +21497,19 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
     return block.addAggregateInit(dest_ty, new_elems);
 }
 
-fn ptrFromIntVal(sema: *Sema, block: *Block, operand_src: LazySrcLoc, operand_val: Value, ptr_ty: Type, ptr_align: u32) !Value {
+fn ptrFromIntVal(
+    sema: *Sema,
+    block: *Block,
+    operand_src: LazySrcLoc,
+    operand_val: Value,
+    ptr_ty: Type,
+    ptr_align: Alignment,
+) !Value {
     const mod = sema.mod;
     const addr = operand_val.toUnsignedInt(mod);
     if (!ptr_ty.isAllowzeroPtr(mod) and addr == 0)
         return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(sema.mod)});
-    if (addr != 0 and ptr_align != 0 and addr % ptr_align != 0)
+    if (addr != 0 and ptr_align != .none and !ptr_align.check(addr))
         return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(sema.mod)});
 
     return switch (ptr_ty.zigTypeTag(mod)) {
@@ -21795,10 +21823,18 @@ fn ptrCastFull(
         // TODO: vector index?
     }
 
-    const src_align = src_info.flags.alignment.toByteUnitsOptional() orelse src_info.child.toType().abiAlignment(mod);
-    const dest_align = dest_info.flags.alignment.toByteUnitsOptional() orelse dest_info.child.toType().abiAlignment(mod);
+    const src_align = if (src_info.flags.alignment != .none)
+        src_info.flags.alignment
+    else
+        src_info.child.toType().abiAlignment(mod);
+
+    const dest_align = if (dest_info.flags.alignment != .none)
+        dest_info.flags.alignment
+    else
+        dest_info.child.toType().abiAlignment(mod);
+
     if (!flags.align_cast) {
-        if (dest_align > src_align) {
+        if (dest_align.compare(.gt, src_align)) {
             return sema.failWithOwnedErrorMsg(block, msg: {
                 const msg = try sema.errMsg(block, src, "cast increases pointer alignment", .{});
                 errdefer msg.destroy(sema.gpa);
@@ -21891,10 +21927,13 @@ fn ptrCastFull(
             if (!dest_ty.ptrAllowsZero(mod) and ptr_val.isNull(mod)) {
                 return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)});
             }
-            if (dest_align > src_align) {
+            if (dest_align.compare(.gt, src_align)) {
                 if (try ptr_val.getUnsignedIntAdvanced(mod, null)) |addr| {
-                    if (addr % dest_align != 0) {
-                        return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, dest_align });
+                    if (!dest_align.check(addr)) {
+                        return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{
+                            addr,
+                            dest_align.toByteUnitsOptional().?,
+                        });
                     }
                 }
             }
@@ -21928,8 +21967,12 @@ fn ptrCastFull(
         try sema.addSafetyCheck(block, src, ok, .cast_to_null);
     }
 
-    if (block.wantSafety() and dest_align > src_align and try sema.typeHasRuntimeBits(dest_info.child.toType())) {
-        const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, dest_align - 1)).toIntern());
+    if (block.wantSafety() and
+        dest_align.compare(.gt, src_align) and
+        try sema.typeHasRuntimeBits(dest_info.child.toType()))
+    {
+        const align_bytes_minus_1 = dest_align.toByteUnitsOptional().? - 1;
+        const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern());
         const ptr_int = try block.addUnOp(.int_from_ptr, ptr);
         const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1);
         const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
@@ -22285,6 +22328,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
     });
 
     const mod = sema.mod;
+    const ip = &mod.intern_pool;
     try sema.resolveTypeLayout(ty);
     switch (ty.zigTypeTag(mod)) {
         .Struct => {},
@@ -22300,7 +22344,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
     }
 
     const field_index = if (ty.isTuple(mod)) blk: {
-        if (mod.intern_pool.stringEqlSlice(field_name, "len")) {
+        if (ip.stringEqlSlice(field_name, "len")) {
             return sema.fail(block, src, "no offset available for 'len' field of tuple", .{});
         }
         break :blk try sema.tupleFieldIndex(block, ty, field_name, rhs_src);
@@ -22313,12 +22357,13 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
     switch (ty.containerLayout(mod)) {
         .Packed => {
             var bit_sum: u64 = 0;
-            const fields = ty.structFields(mod);
-            for (fields.values(), 0..) |field, i| {
+            const struct_type = ip.indexToKey(ty.toIntern()).struct_type;
+            for (0..struct_type.field_types.len) |i| {
                 if (i == field_index) {
                     return bit_sum;
                 }
-                bit_sum += field.ty.bitSize(mod);
+                const field_ty = struct_type.field_types.get(ip)[i].toType();
+                bit_sum += field_ty.bitSize(mod);
             } else unreachable;
         },
         else => return ty.structFieldOffset(field_index, mod) * 8,
@@ -23717,8 +23762,8 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
         return sema.fail(block, src, "TODO handle packed structs/unions with @fieldParentPtr", .{});
     } else {
         ptr_ty_data.flags.alignment = blk: {
-            if (mod.typeToStruct(parent_ty)) |struct_obj| {
-                break :blk struct_obj.fields.values()[field_index].abi_align;
+            if (mod.typeToStruct(parent_ty)) |struct_type| {
+                break :blk struct_type.field_aligns.get(ip)[field_index];
             } else if (mod.typeToUnion(parent_ty)) |union_obj| {
                 break :blk union_obj.fieldAlign(ip, field_index);
             } else {
@@ -24528,13 +24573,9 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
         if (val.isGenericPoison()) {
             break :blk null;
         }
-        const alignment: u32 = @intCast(val.toUnsignedInt(mod));
-        try sema.validateAlign(block, align_src, alignment);
-        if (alignment == target_util.defaultFunctionAlignment(target)) {
-            break :blk .none;
-        } else {
-            break :blk Alignment.fromNonzeroByteUnits(alignment);
-        }
+        const alignment = try sema.validateAlign(block, align_src, val.toUnsignedInt(mod));
+        const default = target_util.defaultFunctionAlignment(target);
+        break :blk if (alignment == default) .none else alignment;
     } else if (extra.data.bits.has_align_ref) blk: {
         const align_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
         extra_index += 1;
@@ -24546,13 +24587,9 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
             },
             else => |e| return e,
         };
-        const alignment: u32 = @intCast(align_tv.val.toUnsignedInt(mod));
-        try sema.validateAlign(block, align_src, alignment);
-        if (alignment == target_util.defaultFunctionAlignment(target)) {
-            break :blk .none;
-        } else {
-            break :blk Alignment.fromNonzeroByteUnits(alignment);
-        }
+        const alignment = try sema.validateAlign(block, align_src, align_tv.val.toUnsignedInt(mod));
+        const default = target_util.defaultFunctionAlignment(target);
+        break :blk if (alignment == default) .none else alignment;
     } else .none;
 
     const @"addrspace": ?std.builtin.AddressSpace = if (extra.data.bits.has_addrspace_body) blk: {
@@ -25237,16 +25274,17 @@ fn explainWhyTypeIsComptimeInner(
         .Struct => {
             if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return;
 
-            if (mod.typeToStruct(ty)) |struct_obj| {
-                for (struct_obj.fields.values(), 0..) |field, i| {
-                    const field_src_loc = mod.fieldSrcLoc(struct_obj.owner_decl, .{
+            if (mod.typeToStruct(ty)) |struct_type| {
+                for (0..struct_type.field_types.len) |i| {
+                    const field_ty = struct_type.field_types.get(ip)[i].toType();
+                    const field_src_loc = mod.fieldSrcLoc(struct_type.decl.unwrap().?, .{
                         .index = i,
                         .range = .type,
                     });
 
-                    if (try sema.typeRequiresComptime(field.ty)) {
+                    if (try sema.typeRequiresComptime(field_ty)) {
                         try mod.errNoteNonLazy(field_src_loc, msg, "struct requires comptime because of this field", .{});
-                        try sema.explainWhyTypeIsComptimeInner(msg, field_src_loc, field.ty, type_set);
+                        try sema.explainWhyTypeIsComptimeInner(msg, field_src_loc, field_ty, type_set);
                     }
                 }
             }
@@ -26297,13 +26335,12 @@ fn fieldCallBind(
         switch (concrete_ty.zigTypeTag(mod)) {
             .Struct => {
                 try sema.resolveTypeFields(concrete_ty);
-                if (mod.typeToStruct(concrete_ty)) |struct_obj| {
-                    const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
+                if (mod.typeToStruct(concrete_ty)) |struct_type| {
+                    const field_index = struct_type.nameIndex(ip, field_name) orelse
                         break :find_field;
-                    const field_index: u32 = @intCast(field_index_usize);
-                    const field = struct_obj.fields.values()[field_index];
+                    const field_ty = struct_type.field_types.get(ip)[field_index].toType();
 
-                    return sema.finishFieldCallBind(block, src, ptr_ty, field.ty, field_index, object_ptr);
+                    return sema.finishFieldCallBind(block, src, ptr_ty, field_ty, field_index, object_ptr);
                 } else if (concrete_ty.isTuple(mod)) {
                     if (ip.stringEqlSlice(field_name, "len")) {
                         return .{ .direct = try mod.intRef(Type.usize, concrete_ty.structFieldCount(mod)) };
@@ -26526,13 +26563,14 @@ fn structFieldPtr(
     initializing: bool,
 ) CompileError!Air.Inst.Ref {
     const mod = sema.mod;
+    const ip = &mod.intern_pool;
     assert(struct_ty.zigTypeTag(mod) == .Struct);
 
     try sema.resolveTypeFields(struct_ty);
     try sema.resolveStructLayout(struct_ty);
 
     if (struct_ty.isTuple(mod)) {
-        if (mod.intern_pool.stringEqlSlice(field_name, "len")) {
+        if (ip.stringEqlSlice(field_name, "len")) {
             const len_inst = try mod.intRef(Type.usize, struct_ty.structFieldCount(mod));
             return sema.analyzeRef(block, src, len_inst);
         }
@@ -26543,11 +26581,10 @@ fn structFieldPtr(
         return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing);
     }
 
-    const struct_obj = mod.typeToStruct(struct_ty).?;
+    const struct_type = mod.typeToStruct(struct_ty).?;
 
-    const field_index_big = struct_obj.fields.getIndex(field_name) orelse
-        return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name);
-    const field_index: u32 = @intCast(field_index_big);
+    const field_index = struct_type.nameIndex(ip, field_name) orelse
+        return sema.failWithBadStructFieldAccess(block, struct_type, field_name_src, field_name);
 
     return sema.structFieldPtrByIndex(block, src, struct_ptr, field_index, field_name_src, struct_ty, initializing);
 }
@@ -26563,17 +26600,18 @@ fn structFieldPtrByIndex(
     initializing: bool,
 ) CompileError!Air.Inst.Ref {
     const mod = sema.mod;
+    const ip = &mod.intern_pool;
     if (struct_ty.isAnonStruct(mod)) {
         return sema.tupleFieldPtr(block, src, struct_ptr, field_src, field_index, initializing);
     }
 
-    const struct_obj = mod.typeToStruct(struct_ty).?;
-    const field = struct_obj.fields.values()[field_index];
+    const struct_type = mod.typeToStruct(struct_ty).?;
+    const field_ty = struct_type.field_types.get(ip)[field_index];
     const struct_ptr_ty = sema.typeOf(struct_ptr);
     const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod);
 
     var ptr_ty_data: InternPool.Key.PtrType = .{
-        .child = field.ty.toIntern(),
+        .child = field_ty,
         .flags = .{
             .is_const = struct_ptr_ty_info.flags.is_const,
             .is_volatile = struct_ptr_ty_info.flags.is_volatile,
@@ -26583,20 +26621,23 @@ fn structFieldPtrByIndex(
 
     const target = mod.getTarget();
 
-    const parent_align = struct_ptr_ty_info.flags.alignment.toByteUnitsOptional() orelse
+    const parent_align = if (struct_ptr_ty_info.flags.alignment != .none)
+        struct_ptr_ty_info.flags.alignment
+    else
         try sema.typeAbiAlignment(struct_ptr_ty_info.child.toType());
 
-    if (struct_obj.layout == .Packed) {
+    if (struct_type.layout == .Packed) {
         comptime assert(Type.packed_struct_layout_version == 2);
 
         var running_bits: u16 = 0;
-        for (struct_obj.fields.values(), 0..) |f, i| {
-            if (!(try sema.typeHasRuntimeBits(f.ty))) continue;
+        for (0..struct_type.field_types.len) |i| {
+            const f_ty = struct_type.field_types.get(ip)[i].toType();
+            if (!(try sema.typeHasRuntimeBits(f_ty))) continue;
 
             if (i == field_index) {
                 ptr_ty_data.packed_offset.bit_offset = running_bits;
             }
-            running_bits += @intCast(f.ty.bitSize(mod));
+            running_bits += @intCast(f_ty.bitSize(mod));
         }
         ptr_ty_data.packed_offset.host_size = (running_bits + 7) / 8;
 
@@ -26607,7 +26648,7 @@ fn structFieldPtrByIndex(
             ptr_ty_data.packed_offset.bit_offset += struct_ptr_ty_info.packed_offset.bit_offset;
         }
 
-        ptr_ty_data.flags.alignment = Alignment.fromByteUnits(parent_align);
+        ptr_ty_data.flags.alignment = parent_align;
 
         // If the field happens to be byte-aligned, simplify the pointer type.
         // The pointee type bit size must match its ABI byte size so that loads and stores
@@ -26617,38 +26658,43 @@ fn structFieldPtrByIndex(
         // targets before adding the necessary complications to this code. This will not
         // cause miscompilations; it only means the field pointer uses bit masking when it
         // might not be strictly necessary.
-        if (parent_align != 0 and ptr_ty_data.packed_offset.bit_offset % 8 == 0 and
+        if (parent_align != .none and ptr_ty_data.packed_offset.bit_offset % 8 == 0 and
             target.cpu.arch.endian() == .Little)
         {
             const elem_size_bytes = ptr_ty_data.child.toType().abiSize(mod);
             const elem_size_bits = ptr_ty_data.child.toType().bitSize(mod);
             if (elem_size_bytes * 8 == elem_size_bits) {
                 const byte_offset = ptr_ty_data.packed_offset.bit_offset / 8;
-                const new_align: Alignment = @enumFromInt(@ctz(byte_offset | parent_align));
+                const new_align: Alignment = @enumFromInt(@ctz(byte_offset | parent_align.toByteUnitsOptional().?));
                 assert(new_align != .none);
                 ptr_ty_data.flags.alignment = new_align;
                 ptr_ty_data.packed_offset = .{ .host_size = 0, .bit_offset = 0 };
             }
         }
-    } else if (struct_obj.layout == .Extern) {
+    } else if (struct_type.layout == .Extern) {
         // For extern structs, field aligment might be bigger than type's natural alignment. Eg, in
         // `extern struct { x: u32, y: u16 }` the second field is aligned as u32.
         const field_offset = struct_ty.structFieldOffset(field_index, mod);
-        ptr_ty_data.flags.alignment = Alignment.fromByteUnits(
-            if (parent_align == 0) 0 else std.math.gcd(field_offset, parent_align),
-        );
+        ptr_ty_data.flags.alignment = if (parent_align == .none)
+            .none
+        else
+            @enumFromInt(@min(@intFromEnum(parent_align), @ctz(field_offset)));
     } else {
         // Our alignment is capped at the field alignment
-        const field_align = try sema.structFieldAlignment(field, struct_obj.layout);
-        ptr_ty_data.flags.alignment = Alignment.fromByteUnits(@min(field_align, parent_align));
+        const field_align = try sema.structFieldAlignment(
+            struct_type.field_aligns.get(ip)[field_index],
+            field_ty.toType(),
+            struct_type.layout,
+        );
+        ptr_ty_data.flags.alignment = field_align.min(parent_align);
     }
 
     const ptr_field_ty = try mod.ptrType(ptr_ty_data);
 
-    if (field.is_comptime) {
+    if (struct_type.comptime_bits.getBit(ip, field_index)) {
         const val = try mod.intern(.{ .ptr = .{
             .ty = ptr_field_ty.toIntern(),
-            .addr = .{ .comptime_field = field.default_val },
+            .addr = .{ .comptime_field = struct_type.field_inits.get(ip)[field_index] },
         } });
         return Air.internedToRef(val);
     }
@@ -26678,33 +26724,33 @@ fn structFieldVal(
     struct_ty: Type,
 ) CompileError!Air.Inst.Ref {
     const mod = sema.mod;
+    const ip = &mod.intern_pool;
     assert(struct_ty.zigTypeTag(mod) == .Struct);
 
     try sema.resolveTypeFields(struct_ty);
-    switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) {
+    switch (ip.indexToKey(struct_ty.toIntern())) {
         .struct_type => |struct_type| {
-            const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
-            if (struct_obj.is_tuple) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty);
-
-            const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
-                return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name);
-            const field_index: u32 = @intCast(field_index_usize);
-            const field = struct_obj.fields.values()[field_index];
+            if (struct_type.isTuple(ip))
+                return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty);
 
-            if (field.is_comptime) {
-                return Air.internedToRef(field.default_val);
+            const field_index = struct_type.nameIndex(ip, field_name) orelse
+                return sema.failWithBadStructFieldAccess(block, struct_type, field_name_src, field_name);
+            if (struct_type.comptime_bits.getBit(ip, field_index)) {
+                return Air.internedToRef(struct_type.field_inits.get(ip)[field_index]);
             }
 
+            const field_ty = struct_type.field_types.get(ip)[field_index].toType();
+
             if (try sema.resolveMaybeUndefVal(struct_byval)) |struct_val| {
-                if (struct_val.isUndef(mod)) return mod.undefRef(field.ty);
-                if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| {
+                if (struct_val.isUndef(mod)) return mod.undefRef(field_ty);
+                if ((try sema.typeHasOnePossibleValue(field_ty))) |opv| {
                     return Air.internedToRef(opv.toIntern());
                 }
                 return Air.internedToRef((try struct_val.fieldValue(mod, field_index)).toIntern());
             }
 
             try sema.requireRuntimeBlock(block, src, null);
-            return block.addStructFieldVal(struct_byval, field_index, field.ty);
+            return block.addStructFieldVal(struct_byval, field_index, field_ty);
         },
         .anon_struct_type => |anon_struct| {
             if (anon_struct.names.len == 0) {
@@ -26823,9 +26869,12 @@ fn unionFieldPtr(
             .is_volatile = union_ptr_info.flags.is_volatile,
             .address_space = union_ptr_info.flags.address_space,
             .alignment = if (union_obj.getLayout(ip) == .Auto) blk: {
-                const union_align = union_ptr_info.flags.alignment.toByteUnitsOptional() orelse try sema.typeAbiAlignment(union_ty);
+                const union_align = if (union_ptr_info.flags.alignment != .none)
+                    union_ptr_info.flags.alignment
+                else
+                    try sema.typeAbiAlignment(union_ty);
                 const field_align = try sema.unionFieldAlignment(union_obj, field_index);
-                break :blk InternPool.Alignment.fromByteUnits(@min(union_align, field_align));
+                break :blk union_align.min(field_align);
             } else union_ptr_info.flags.alignment,
         },
         .packed_offset = union_ptr_info.packed_offset,
@@ -28266,7 +28315,7 @@ const InMemoryCoercionResult = union(enum) {
     ptr_qualifiers: Qualifiers,
     ptr_allowzero: Pair,
     ptr_bit_range: BitRange,
-    ptr_alignment: IntPair,
+    ptr_alignment: AlignPair,
     double_ptr_to_anyopaque: Pair,
     slice_to_anyopaque: Pair,
 
@@ -28312,6 +28361,11 @@ const InMemoryCoercionResult = union(enum) {
         wanted: u64,
     };
 
+    const AlignPair = struct {
+        actual: Alignment,
+        wanted: Alignment,
+    };
+
     const Size = struct {
         actual: std.builtin.Type.Pointer.Size,
         wanted: std.builtin.Type.Pointer.Size,
@@ -29133,13 +29187,17 @@ fn coerceInMemoryAllowedPtrs(
     if (src_info.flags.alignment != .none or dest_info.flags.alignment != .none or
         dest_info.child != src_info.child)
     {
-        const src_align = src_info.flags.alignment.toByteUnitsOptional() orelse
+        const src_align = if (src_info.flags.alignment != .none)
+            src_info.flags.alignment
+        else
             src_info.child.toType().abiAlignment(mod);
 
-        const dest_align = dest_info.flags.alignment.toByteUnitsOptional() orelse
+        const dest_align = if (dest_info.flags.alignment != .none)
+            dest_info.flags.alignment
+        else
             dest_info.child.toType().abiAlignment(mod);
 
-        if (dest_align > src_align) {
+        if (dest_align.compare(.gt, src_align)) {
             return InMemoryCoercionResult{ .ptr_alignment = .{
                 .actual = src_align,
                 .wanted = dest_align,
@@ -30378,13 +30436,17 @@ fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_resul
     if (inst_info.flags.alignment == .none and dest_info.flags.alignment == .none) return true;
     if (len0) return true;
 
-    const inst_align = inst_info.flags.alignment.toByteUnitsOptional() orelse
+    const inst_align = if (inst_info.flags.alignment != .none)
+        inst_info.flags.alignment
+    else
         inst_info.child.toType().abiAlignment(mod);
 
-    const dest_align = dest_info.flags.alignment.toByteUnitsOptional() orelse
+    const dest_align = if (dest_info.flags.alignment != .none)
+        dest_info.flags.alignment
+    else
         dest_info.child.toType().abiAlignment(mod);
 
-    if (dest_align > inst_align) {
+    if (dest_align.compare(.gt, inst_align)) {
         in_memory_result.* = .{ .ptr_alignment = .{
             .actual = inst_align,
             .wanted = dest_align,
@@ -30598,7 +30660,7 @@ fn coerceAnonStructToUnion(
         else
             .{ .count = anon_struct_type.names.len },
         .struct_type => |struct_type| name: {
-            const field_names = mod.structPtrUnwrap(struct_type.index).?.fields.keys();
+            const field_names = struct_type.field_names.get(ip);
             break :name if (field_names.len == 1)
                 .{ .name = field_names[0] }
             else
@@ -30869,8 +30931,8 @@ fn coerceTupleToStruct(
         return sema.coerceTupleToTuple(block, struct_ty, inst, inst_src);
     }
 
-    const fields = struct_ty.structFields(mod);
-    const field_vals = try sema.arena.alloc(InternPool.Index, fields.count());
+    const struct_type = mod.typeToStruct(struct_ty).?;
+    const field_vals = try sema.arena.alloc(InternPool.Index, struct_type.field_types.len);
     const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len);
     @memset(field_refs, .none);
 
@@ -30878,10 +30940,7 @@ fn coerceTupleToStruct(
     var runtime_src: ?LazySrcLoc = null;
     const field_count = switch (ip.indexToKey(inst_ty.toIntern())) {
         .anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
-        .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj|
-            struct_obj.fields.count()
-        else
-            0,
+        .struct_type => |s| s.field_types.len,
         else => unreachable,
     };
     for (0..field_count) |field_index_usize| {
@@ -30893,22 +30952,23 @@ fn coerceTupleToStruct(
                 anon_struct_type.names.get(ip)[field_i]
             else
                 try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}),
-            .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i],
+            .struct_type => |s| s.field_names.get(ip)[field_i],
             else => unreachable,
         };
         const field_index = try sema.structFieldIndex(block, struct_ty, field_name, field_src);
-        const field = fields.values()[field_index];
+        const field_ty = struct_type.field_types.get(ip)[field_index].toType();
         const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i);
-        const coerced = try sema.coerce(block, field.ty, elem_ref, field_src);
+        const coerced = try sema.coerce(block, field_ty, elem_ref, field_src);
         field_refs[field_index] = coerced;
-        if (field.is_comptime) {
+        if (struct_type.comptime_bits.getBit(ip, field_index)) {
             const init_val = (try sema.resolveMaybeUndefVal(coerced)) orelse {
                 return sema.failWithNeededComptime(block, field_src, .{
                     .needed_comptime_reason = "value stored in comptime field must be comptime-known",
                 });
             };
 
-            if (!init_val.eql(field.default_val.toValue(), field.ty, sema.mod)) {
+            const field_init = struct_type.field_inits.get(ip)[field_index].toValue();
+            if (!init_val.eql(field_init, field_ty, sema.mod)) {
                 return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i);
             }
         }
@@ -30928,10 +30988,10 @@ fn coerceTupleToStruct(
     for (field_refs, 0..) |*field_ref, i| {
         if (field_ref.* != .none) continue;
 
-        const field_name = fields.keys()[i];
-        const field = fields.values()[i];
+        const field_name = struct_type.field_names.get(ip)[i];
+        const field_default_val = struct_type.field_inits.get(ip)[i];
         const field_src = inst_src; // TODO better source location
-        if (field.default_val == .none) {
+        if (field_default_val == .none) {
             const template = "missing struct field: {}";
             const args = .{field_name.fmt(ip)};
             if (root_msg) |msg| {
@@ -30942,9 +31002,9 @@ fn coerceTupleToStruct(
             continue;
         }
         if (runtime_src == null) {
-            field_vals[i] = field.default_val;
+            field_vals[i] = field_default_val;
         } else {
-            field_ref.* = Air.internedToRef(field.default_val);
+            field_ref.* = Air.internedToRef(field_default_val);
         }
     }
 
@@ -30980,10 +31040,7 @@ fn coerceTupleToTuple(
     const ip = &mod.intern_pool;
     const dest_field_count = switch (ip.indexToKey(tuple_ty.toIntern())) {
         .anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
-        .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj|
-            struct_obj.fields.count()
-        else
-            0,
+        .struct_type => |struct_type| struct_type.field_types.len,
         else => unreachable,
     };
     const field_vals = try sema.arena.alloc(InternPool.Index, dest_field_count);
@@ -30993,10 +31050,7 @@ fn coerceTupleToTuple(
     const inst_ty = sema.typeOf(inst);
     const src_field_count = switch (ip.indexToKey(inst_ty.toIntern())) {
         .anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
-        .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj|
-            struct_obj.fields.count()
-        else
-            0,
+        .struct_type => |struct_type| struct_type.field_types.len,
         else => unreachable,
     };
     if (src_field_count > dest_field_count) return error.NotCoercible;
@@ -31011,7 +31065,7 @@ fn coerceTupleToTuple(
                 anon_struct_type.names.get(ip)[field_i]
             else
                 try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}),
-            .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i],
+            .struct_type => |struct_type| struct_type.field_names.get(ip)[field_i],
             else => unreachable,
         };
 
@@ -31019,20 +31073,20 @@ fn coerceTupleToTuple(
             return sema.fail(block, field_src, "cannot assign to 'len' field of tuple", .{});
 
         const field_ty = switch (ip.indexToKey(tuple_ty.toIntern())) {
-            .anon_struct_type => |anon_struct_type| anon_struct_type.types.get(ip)[field_index_usize].toType(),
-            .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].ty,
+            .anon_struct_type => |anon_struct_type| anon_struct_type.types.get(ip)[field_index_usize],
+            .struct_type => |struct_type| struct_type.field_types.get(ip)[field_index_usize],
             else => unreachable,
         };
         const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) {
             .anon_struct_type => |anon_struct_type| anon_struct_type.values.get(ip)[field_index_usize],
-            .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].default_val,
+            .struct_type => |struct_type| struct_type.field_inits.get(ip)[field_index_usize],
             else => unreachable,
         };
 
         const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_src);
 
         const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i);
-        const coerced = try sema.coerce(block, field_ty, elem_ref, field_src);
+        const coerced = try sema.coerce(block, field_ty.toType(), elem_ref, field_src);
         field_refs[field_index] = coerced;
         if (default_val != .none) {
             const init_val = (try sema.resolveMaybeUndefVal(coerced)) orelse {
@@ -31041,7 +31095,7 @@ fn coerceTupleToTuple(
                 });
             };
 
-            if (!init_val.eql(default_val.toValue(), field_ty, sema.mod)) {
+            if (!init_val.eql(default_val.toValue(), field_ty.toType(), sema.mod)) {
                 return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i);
             }
         }
@@ -31063,7 +31117,7 @@ fn coerceTupleToTuple(
 
         const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) {
             .anon_struct_type => |anon_struct_type| anon_struct_type.values.get(ip)[i],
-            .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[i].default_val,
+            .struct_type => |struct_type| struct_type.field_inits.get(ip)[i],
             else => unreachable,
         };
 
@@ -33181,12 +33235,17 @@ fn resolvePeerTypesInner(
                 }
 
                 // Note that the align can be always non-zero; Module.ptrType will canonicalize it
-                ptr_info.flags.alignment = Alignment.fromByteUnits(@min(
-                    ptr_info.flags.alignment.toByteUnitsOptional() orelse
+                ptr_info.flags.alignment = InternPool.Alignment.min(
+                    if (ptr_info.flags.alignment != .none)
+                        ptr_info.flags.alignment
+                    else
                         ptr_info.child.toType().abiAlignment(mod),
-                    peer_info.flags.alignment.toByteUnitsOptional() orelse
+
+                    if (peer_info.flags.alignment != .none)
+                        peer_info.flags.alignment
+                    else
                         peer_info.child.toType().abiAlignment(mod),
-                ));
+                );
                 if (ptr_info.flags.address_space != peer_info.flags.address_space) {
                     return .{ .conflict = .{
                         .peer_idx_a = first_idx,
@@ -33260,12 +33319,17 @@ fn resolvePeerTypesInner(
                 } };
 
                 // Note that the align can be always non-zero; Type.ptr will canonicalize it
-                ptr_info.flags.alignment = Alignment.fromByteUnits(@min(
-                    ptr_info.flags.alignment.toByteUnitsOptional() orelse
+                ptr_info.flags.alignment = Alignment.min(
+                    if (ptr_info.flags.alignment != .none)
+                        ptr_info.flags.alignment
+                    else
                         ptr_info.child.toType().abiAlignment(mod),
-                    peer_info.flags.alignment.toByteUnitsOptional() orelse
+
+                    if (peer_info.flags.alignment != .none)
+                        peer_info.flags.alignment
+                    else
                         peer_info.child.toType().abiAlignment(mod),
-                ));
+                );
 
                 if (ptr_info.flags.address_space != peer_info.flags.address_space) {
                     return generic_err;
@@ -34191,103 +34255,117 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void {
 }
 
 fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
-    const mod = sema.mod;
     try sema.resolveTypeFields(ty);
-    if (mod.typeToStruct(ty)) |struct_obj| {
-        switch (struct_obj.status) {
-            .none, .have_field_types => {},
-            .field_types_wip, .layout_wip => {
-                const msg = try Module.ErrorMsg.create(
-                    sema.gpa,
-                    struct_obj.srcLoc(mod),
-                    "struct '{}' depends on itself",
-                    .{ty.fmt(mod)},
-                );
-                return sema.failWithOwnedErrorMsg(null, msg);
+
+    const mod = sema.mod;
+    const ip = &mod.intern_pool;
+    const struct_type = mod.typeToStruct(ty) orelse return;
+
+    if (struct_type.haveLayout(ip))
+        return;
+
+    if (struct_type.layout == .Packed) {
+        try semaBackingIntType(mod, struct_type);
+        return;
+    }
+
+    if (struct_type.setLayoutWip(ip)) {
+        const msg = try Module.ErrorMsg.create(
+            sema.gpa,
+            mod.declPtr(struct_type.decl.unwrap().?).srcLoc(mod),
+            "struct '{}' depends on itself",
+            .{ty.fmt(mod)},
+        );
+        return sema.failWithOwnedErrorMsg(null, msg);
+    }
+
+    if (try sema.typeRequiresComptime(ty))
+        return;
+
+    const aligns = try sema.arena.alloc(Alignment, struct_type.field_types.len);
+    const sizes = try sema.arena.alloc(u64, struct_type.field_types.len);
+
+    for (aligns, sizes, 0..) |*field_align, *field_size, i| {
+        const field_ty = struct_type.field_types.get(ip)[i].toType();
+        field_size.* = sema.typeAbiSize(field_ty) catch |err| switch (err) {
+            error.AnalysisFail => {
+                const msg = sema.err orelse return err;
+                try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{});
+                return err;
             },
-            .have_layout, .fully_resolved_wip, .fully_resolved => return,
-        }
-        const prev_status = struct_obj.status;
-        errdefer if (struct_obj.status == .layout_wip) {
-            struct_obj.status = prev_status;
+            else => return err,
         };
+        field_align.* = try sema.structFieldAlignment(
+            struct_type.fieldAlign(ip, i),
+            field_ty,
+            struct_type.layout,
+        );
+    }
 
-        struct_obj.status = .layout_wip;
-        for (struct_obj.fields.values(), 0..) |field, i| {
-            sema.resolveTypeLayout(field.ty) catch |err| switch (err) {
-                error.AnalysisFail => {
-                    const msg = sema.err orelse return err;
-                    try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{});
-                    return err;
-                },
-                else => return err,
-            };
-        }
+    if (struct_type.flagsPtr(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
+        const msg = try Module.ErrorMsg.create(
+            sema.gpa,
+            mod.declPtr(struct_type.decl.unwrap().?).srcLoc(mod),
+            "struct layout depends on it having runtime bits",
+            .{},
+        );
+        return sema.failWithOwnedErrorMsg(null, msg);
+    }
 
-        if (struct_obj.layout == .Packed) {
-            try semaBackingIntType(mod, struct_obj);
+    if (struct_type.hasReorderedFields(ip)) {
+        for (sizes, struct_type.runtime_order.get(ip), 0..) |size, *ro, i| {
+            ro.* = if (size != 0) @enumFromInt(i) else .omitted;
         }
 
-        struct_obj.status = .have_layout;
-        _ = try sema.typeRequiresComptime(ty);
+        const RuntimeOrder = InternPool.Key.StructType.RuntimeOrder;
 
-        if (struct_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
-            const msg = try Module.ErrorMsg.create(
-                sema.gpa,
-                struct_obj.srcLoc(mod),
-                "struct layout depends on it having runtime bits",
-                .{},
-            );
-            return sema.failWithOwnedErrorMsg(null, msg);
-        }
+        const AlignSortContext = struct {
+            aligns: []const Alignment,
 
-        if (struct_obj.layout == .Auto and !struct_obj.is_tuple and
-            mod.backendSupportsFeature(.field_reordering))
-        {
-            const optimized_order = try mod.tmp_hack_arena.allocator().alloc(u32, struct_obj.fields.count());
-
-            for (struct_obj.fields.values(), 0..) |field, i| {
-                optimized_order[i] = if (try sema.typeHasRuntimeBits(field.ty))
-                    @intCast(i)
-                else
-                    Module.Struct.omitted_field;
+            fn lessThan(ctx: @This(), a: RuntimeOrder, b: RuntimeOrder) bool {
+                if (a == .omitted) return false;
+                if (b == .omitted) return true;
+                const a_align = ctx.aligns[@intFromEnum(a)];
+                const b_align = ctx.aligns[@intFromEnum(b)];
+                return a_align.compare(.gt, b_align);
             }
+        };
+        mem.sortUnstable(RuntimeOrder, struct_type.runtime_order.get(ip), AlignSortContext{
+            .aligns = aligns,
+        }, AlignSortContext.lessThan);
+    }
 
-            const AlignSortContext = struct {
-                struct_obj: *Module.Struct,
-                sema: *Sema,
-
-                fn lessThan(ctx: @This(), a: u32, b: u32) bool {
-                    const m = ctx.sema.mod;
-                    if (a == Module.Struct.omitted_field) return false;
-                    if (b == Module.Struct.omitted_field) return true;
-                    return ctx.struct_obj.fields.values()[a].ty.abiAlignment(m) >
-                        ctx.struct_obj.fields.values()[b].ty.abiAlignment(m);
-                }
-            };
-            mem.sort(u32, optimized_order, AlignSortContext{
-                .struct_obj = struct_obj,
-                .sema = sema,
-            }, AlignSortContext.lessThan);
-            struct_obj.optimized_order = optimized_order.ptr;
-        }
+    // Calculate size, alignment, and field offsets.
+    const offsets = struct_type.offsets.get(ip);
+    var it = struct_type.iterateRuntimeOrder(ip);
+    var offset: u64 = 0;
+    var big_align: Alignment = .none;
+    while (it.next()) |i| {
+        big_align = big_align.max(aligns[i]);
+        offsets[i] = @intCast(aligns[i].forward(offset));
+        offset = offsets[i] + sizes[i];
     }
-    // otherwise it's a tuple; no need to resolve anything
+    struct_type.size(ip).* = @intCast(big_align.forward(offset));
+    const flags = struct_type.flagsPtr(ip);
+    flags.alignment = big_align;
+    flags.layout_resolved = true;
 }
 
-fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!void {
+fn semaBackingIntType(mod: *Module, struct_type: InternPool.Key.StructType) CompileError!void {
     const gpa = mod.gpa;
+    const ip = &mod.intern_pool;
 
     var fields_bit_sum: u64 = 0;
-    for (struct_obj.fields.values()) |field| {
-        fields_bit_sum += field.ty.bitSize(mod);
+    for (0..struct_type.field_types.len) |i| {
+        const field_ty = struct_type.field_types.get(ip)[i].toType();
+        fields_bit_sum += field_ty.bitSize(mod);
     }
 
-    const decl_index = struct_obj.owner_decl;
+    const decl_index = struct_type.decl.unwrap().?;
     const decl = mod.declPtr(decl_index);
 
-    const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir;
-    const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended;
+    const zir = mod.namespacePtr(struct_type.namespace.unwrap().?).file_scope.zir;
+    const extended = zir.instructions.items(.data)[struct_type.zir_index].extended;
     assert(extended.opcode == .struct_decl);
     const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
 
@@ -34326,7 +34404,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
             .parent = null,
             .sema = &sema,
             .src_decl = decl_index,
-            .namespace = struct_obj.namespace,
+            .namespace = struct_type.namespace.unwrap() orelse decl.src_namespace,
             .wip_capture_scope = try mod.createCaptureScope(decl.src_scope),
             .instructions = .{},
             .inlining = null,
@@ -34341,13 +34419,13 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
                 break :blk try sema.resolveType(&block, backing_int_src, backing_int_ref);
             } else {
                 const body = zir.extra[extra_index..][0..backing_int_body_len];
-                const ty_ref = try sema.resolveBody(&block, body, struct_obj.zir_index);
+                const ty_ref = try sema.resolveBody(&block, body, struct_type.zir_index);
                 break :blk try sema.analyzeAsType(&block, backing_int_src, ty_ref);
             }
         };
 
         try sema.checkBackingIntType(&block, backing_int_src, backing_int_ty, fields_bit_sum);
-        struct_obj.backing_int_ty = backing_int_ty;
+        struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
         for (comptime_mutable_decls.items) |ct_decl_index| {
             const ct_decl = mod.declPtr(ct_decl_index);
             _ = try ct_decl.internValue(mod);
@@ -34374,7 +34452,8 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
                 .parent = null,
                 .sema = &sema,
                 .src_decl = decl_index,
-                .namespace = struct_obj.namespace,
+                .namespace = struct_type.namespace.unwrap() orelse
+                    mod.declPtr(struct_type.decl.unwrap().?).src_namespace,
                 .wip_capture_scope = undefined,
                 .instructions = .{},
                 .inlining = null,
@@ -34382,7 +34461,8 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
             };
             return sema.fail(&block, LazySrcLoc.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum});
         }
-        struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(fields_bit_sum));
+        const backing_int_ty = try mod.intType(.unsigned, @intCast(fields_bit_sum));
+        struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
     }
 }
 
@@ -34532,30 +34612,20 @@ fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void {
     try sema.resolveStructLayout(ty);
 
     const mod = sema.mod;
-    try sema.resolveTypeFields(ty);
-    const struct_obj = mod.typeToStruct(ty).?;
+    const ip = &mod.intern_pool;
+    const struct_type = mod.typeToStruct(ty).?;
 
-    switch (struct_obj.status) {
-        .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {},
-        .fully_resolved_wip, .fully_resolved => return,
-    }
+    if (struct_type.setFullyResolved(ip)) return;
+    errdefer struct_type.clearFullyResolved(ip);
 
-    {
-        // After we have resolve struct layout we have to go over the fields again to
-        // make sure pointer fields get their child types resolved as well.
-        // See also similar code for unions.
-        const prev_status = struct_obj.status;
-        errdefer struct_obj.status = prev_status;
+    // After we have resolve struct layout we have to go over the fields again to
+    // make sure pointer fields get their child types resolved as well.
+    // See also similar code for unions.
 
-        struct_obj.status = .fully_resolved_wip;
-        for (struct_obj.fields.values()) |field| {
-            try sema.resolveTypeFully(field.ty);
-        }
-        struct_obj.status = .fully_resolved;
+    for (0..struct_type.field_types.len) |i| {
+        const field_ty = struct_type.field_types.get(ip)[i].toType();
+        try sema.resolveTypeFully(field_ty);
     }
-
-    // And let's not forget comptime-only status.
-    _ = try sema.typeRequiresComptime(ty);
 }
 
 fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void {
@@ -34591,8 +34661,10 @@ fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void {
 
 pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!void {
     const mod = sema.mod;
+    const ip = &mod.intern_pool;
+    const ty_ip = ty.toIntern();
 
-    switch (ty.toIntern()) {
+    switch (ty_ip) {
         .var_args_param_type => unreachable,
 
         .none => unreachable,
@@ -34673,20 +34745,15 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!void {
         .empty_struct => unreachable,
         .generic_poison => unreachable,
 
-        else => switch (mod.intern_pool.items.items(.tag)[@intFromEnum(ty.toIntern())]) {
+        else => switch (ip.items.items(.tag)[@intFromEnum(ty_ip)]) {
             .type_struct,
             .type_struct_ns,
-            .type_union,
-            .simple_type,
-            => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
-                .struct_type => |struct_type| {
-                    const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return;
-                    try sema.resolveTypeFieldsStruct(ty, struct_obj);
-                },
-                .union_type => |union_type| try sema.resolveTypeFieldsUnion(ty, union_type),
-                .simple_type => |simple_type| try sema.resolveSimpleType(simple_type),
-                else => unreachable,
-            },
+            .type_struct_packed,
+            .type_struct_packed_inits,
+            => try sema.resolveTypeFieldsStruct(ty_ip, ip.indexToKey(ty_ip).struct_type),
+
+            .type_union => try sema.resolveTypeFieldsUnion(ty_ip.toType(), ip.indexToKey(ty_ip).union_type),
+            .simple_type => try sema.resolveSimpleType(ip.indexToKey(ty_ip).simple_type),
             else => {},
         },
     }
@@ -34716,43 +34783,44 @@ fn resolveSimpleType(sema: *Sema, simple_type: InternPool.SimpleType) CompileErr
 
 fn resolveTypeFieldsStruct(
     sema: *Sema,
-    ty: Type,
-    struct_obj: *Module.Struct,
+    ty: InternPool.Index,
+    struct_type: InternPool.Key.StructType,
 ) CompileError!void {
-    switch (sema.mod.declPtr(struct_obj.owner_decl).analysis) {
+    const mod = sema.mod;
+    const ip = &mod.intern_pool;
+    // If there is no owner decl it means the struct has no fields.
+    const owner_decl = struct_type.decl.unwrap() orelse return;
+
+    switch (mod.declPtr(owner_decl).analysis) {
         .file_failure,
         .dependency_failure,
         .sema_failure,
         .sema_failure_retryable,
         => {
             sema.owner_decl.analysis = .dependency_failure;
-            sema.owner_decl.generation = sema.mod.generation;
+            sema.owner_decl.generation = mod.generation;
             return error.AnalysisFail;
         },
         else => {},
     }
-    switch (struct_obj.status) {
-        .none => {},
-        .field_types_wip => {
-            const msg = try Module.ErrorMsg.create(
-                sema.gpa,
-                struct_obj.srcLoc(sema.mod),
-                "struct '{}' depends on itself",
-                .{ty.fmt(sema.mod)},
-            );
-            return sema.failWithOwnedErrorMsg(null, msg);
-        },
-        .have_field_types,
-        .have_layout,
-        .layout_wip,
-        .fully_resolved_wip,
-        .fully_resolved,
-        => return,
+
+    if (struct_type.haveFieldTypes(ip))
+        return;
+
+    if (struct_type.flagsPtr(ip).field_types_wip) {
+        const msg = try Module.ErrorMsg.create(
+            sema.gpa,
+            mod.declPtr(owner_decl).srcLoc(mod),
+            "struct '{}' depends on itself",
+            .{ty.toType().fmt(mod)},
+        );
+        return sema.failWithOwnedErrorMsg(null, msg);
     }
 
-    struct_obj.status = .field_types_wip;
-    errdefer struct_obj.status = .none;
-    try semaStructFields(sema.mod, struct_obj);
+    struct_type.flagsPtr(ip).field_types_wip = true;
+    errdefer struct_type.flagsPtr(ip).field_types_wip = false;
+
+    try semaStructFields(mod, sema.arena, struct_type);
 }
 
 fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Key.UnionType) CompileError!void {
@@ -34936,12 +35004,19 @@ fn resolveInferredErrorSetTy(
     }
 }
 
-fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void {
+fn semaStructFields(
+    mod: *Module,
+    arena: Allocator,
+    struct_type: InternPool.Key.StructType,
+) CompileError!void {
     const gpa = mod.gpa;
     const ip = &mod.intern_pool;
-    const decl_index = struct_obj.owner_decl;
-    const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir;
-    const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended;
+    const decl_index = struct_type.decl.unwrap() orelse return;
+    const decl = mod.declPtr(decl_index);
+    const namespace_index = struct_type.namespace.unwrap() orelse decl.src_namespace;
+    const zir = mod.namespacePtr(namespace_index).file_scope.zir;
+    const zir_index = struct_type.zir_index;
+    const extended = zir.instructions.items(.data)[zir_index].extended;
     assert(extended.opcode == .struct_decl);
     const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
     var extra_index: usize = extended.operand;
@@ -34977,18 +35052,16 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
     while (decls_it.next()) |_| {}
     extra_index = decls_it.extra_index;
 
-    if (fields_len == 0) {
-        if (struct_obj.layout == .Packed) {
-            try semaBackingIntType(mod, struct_obj);
-        }
-        struct_obj.status = .have_layout;
-        return;
-    }
-
-    const decl = mod.declPtr(decl_index);
-
-    var analysis_arena = std.heap.ArenaAllocator.init(gpa);
-    defer analysis_arena.deinit();
+    if (fields_len == 0) switch (struct_type.layout) {
+        .Packed => {
+            try semaBackingIntType(mod, struct_type);
+            return;
+        },
+        .Auto, .Extern => {
+            struct_type.flagsPtr(ip).layout_resolved = true;
+            return;
+        },
+    };
 
     var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
     defer comptime_mutable_decls.deinit();
@@ -34996,7 +35069,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
     var sema: Sema = .{
         .mod = mod,
         .gpa = gpa,
-        .arena = analysis_arena.allocator(),
+        .arena = arena,
         .code = zir,
         .owner_decl = decl,
         .owner_decl_index = decl_index,
@@ -35013,7 +35086,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
         .parent = null,
         .sema = &sema,
         .src_decl = decl_index,
-        .namespace = struct_obj.namespace,
+        .namespace = namespace_index,
         .wip_capture_scope = try mod.createCaptureScope(decl.src_scope),
         .instructions = .{},
         .inlining = null,
@@ -35021,9 +35094,6 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
     };
     defer assert(block_scope.instructions.items.len == 0);
 
-    struct_obj.fields = .{};
-    try struct_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len);
-
     const Field = struct {
         type_body_len: u32 = 0,
         align_body_len: u32 = 0,
@@ -35031,7 +35101,9 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
         type_ref: Zir.Inst.Ref = .none,
     };
     const fields = try sema.arena.alloc(Field, fields_len);
+
     var any_inits = false;
+    var any_aligned = false;
 
     {
         const bits_per_field = 4;
@@ -35056,9 +35128,11 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
             const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0;
             cur_bit_bag >>= 1;
 
-            var field_name_zir: ?[:0]const u8 = null;
+            if (is_comptime) struct_type.setFieldComptime(ip, field_i);
+
+            var opt_field_name_zir: ?[:0]const u8 = null;
             if (!small.is_tuple) {
-                field_name_zir = zir.nullTerminatedString(zir.extra[extra_index]);
+                opt_field_name_zir = zir.nullTerminatedString(zir.extra[extra_index]);
                 extra_index += 1;
             }
             extra_index += 1; // doc_comment
@@ -35073,37 +35147,27 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
             extra_index += 1;
 
             // This string needs to outlive the ZIR code.
-            const field_name = try ip.getOrPutString(gpa, if (field_name_zir) |s|
-                s
-            else
-                try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}));
-
-            const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name);
-            if (gop.found_existing) {
-                const msg = msg: {
-                    const field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i }).lazy;
-                    const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{}'", .{field_name.fmt(ip)});
-                    errdefer msg.destroy(gpa);
+            if (opt_field_name_zir) |field_name_zir| {
+                const field_name = try ip.getOrPutString(gpa, field_name_zir);
+                if (struct_type.addFieldName(ip, field_name)) |other_index| {
+                    const msg = msg: {
+                        const field_src = mod.fieldSrcLoc(decl_index, .{ .index = field_i }).lazy;
+                        const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{}'", .{field_name.fmt(ip)});
+                        errdefer msg.destroy(gpa);
 
-                    const prev_field_index = struct_obj.fields.getIndex(field_name).?;
-                    const prev_field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = prev_field_index });
-                    try mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{});
-                    try sema.errNote(&block_scope, src, msg, "struct declared here", .{});
-                    break :msg msg;
-                };
-                return sema.failWithOwnedErrorMsg(&block_scope, msg);
+                        const prev_field_src = mod.fieldSrcLoc(decl_index, .{ .index = other_index });
+                        try mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{});
+                        try sema.errNote(&block_scope, src, msg, "struct declared here", .{});
+                        break :msg msg;
+                    };
+                    return sema.failWithOwnedErrorMsg(&block_scope, msg);
+                }
             }
-            gop.value_ptr.* = .{
-                .ty = Type.noreturn,
-                .abi_align = .none,
-                .default_val = .none,
-                .is_comptime = is_comptime,
-                .offset = undefined,
-            };
 
             if (has_align) {
                 fields[field_i].align_body_len = zir.extra[extra_index];
                 extra_index += 1;
+                any_aligned = true;
             }
             if (has_init) {
                 fields[field_i].init_body_len = zir.extra[extra_index];
@@ -35122,7 +35186,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
             if (zir_field.type_ref != .none) {
                 break :ty sema.resolveType(&block_scope, .unneeded, zir_field.type_ref) catch |err| switch (err) {
                     error.NeededSourceLocation => {
-                        const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
+                        const ty_src = mod.fieldSrcLoc(decl_index, .{
                             .index = field_i,
                             .range = .type,
                         }).lazy;
@@ -35135,10 +35199,10 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
             assert(zir_field.type_body_len != 0);
             const body = zir.extra[extra_index..][0..zir_field.type_body_len];
             extra_index += body.len;
-            const ty_ref = try sema.resolveBody(&block_scope, body, struct_obj.zir_index);
+            const ty_ref = try sema.resolveBody(&block_scope, body, zir_index);
             break :ty sema.analyzeAsType(&block_scope, .unneeded, ty_ref) catch |err| switch (err) {
                 error.NeededSourceLocation => {
-                    const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
+                    const ty_src = mod.fieldSrcLoc(decl_index, .{
                         .index = field_i,
                         .range = .type,
                     }).lazy;
@@ -35152,12 +35216,11 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
             return error.GenericPoison;
         }
 
-        const field = &struct_obj.fields.values()[field_i];
-        field.ty = field_ty;
+        struct_type.field_types.get(ip)[field_i] = field_ty.toIntern();
 
         if (field_ty.zigTypeTag(mod) == .Opaque) {
             const msg = msg: {
-                const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
+                const ty_src = mod.fieldSrcLoc(decl_index, .{
                     .index = field_i,
                     .range = .type,
                 }).lazy;
@@ -35171,7 +35234,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
         }
         if (field_ty.zigTypeTag(mod) == .NoReturn) {
             const msg = msg: {
-                const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
+                const ty_src = mod.fieldSrcLoc(decl_index, .{
                     .index = field_i,
                     .range = .type,
                 }).lazy;
@@ -35183,45 +35246,49 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
             };
             return sema.failWithOwnedErrorMsg(&block_scope, msg);
         }
-        if (struct_obj.layout == .Extern and !try sema.validateExternType(field.ty, .struct_field)) {
-            const msg = msg: {
-                const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
-                    .index = field_i,
-                    .range = .type,
-                });
-                const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)});
-                errdefer msg.destroy(sema.gpa);
+        switch (struct_type.layout) {
+            .Extern => if (!try sema.validateExternType(field_ty, .struct_field)) {
+                const msg = msg: {
+                    const ty_src = mod.fieldSrcLoc(decl_index, .{
+                        .index = field_i,
+                        .range = .type,
+                    });
+                    const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
+                    errdefer msg.destroy(sema.gpa);
 
-                try sema.explainWhyTypeIsNotExtern(msg, ty_src, field.ty, .struct_field);
+                    try sema.explainWhyTypeIsNotExtern(msg, ty_src, field_ty, .struct_field);
 
-                try sema.addDeclaredHereNote(msg, field.ty);
-                break :msg msg;
-            };
-            return sema.failWithOwnedErrorMsg(&block_scope, msg);
-        } else if (struct_obj.layout == .Packed and !(validatePackedType(field.ty, mod))) {
-            const msg = msg: {
-                const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
-                    .index = field_i,
-                    .range = .type,
-                });
-                const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)});
-                errdefer msg.destroy(sema.gpa);
+                    try sema.addDeclaredHereNote(msg, field_ty);
+                    break :msg msg;
+                };
+                return sema.failWithOwnedErrorMsg(&block_scope, msg);
+            },
+            .Packed => if (!validatePackedType(field_ty, mod)) {
+                const msg = msg: {
+                    const ty_src = mod.fieldSrcLoc(decl_index, .{
+                        .index = field_i,
+                        .range = .type,
+                    });
+                    const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
+                    errdefer msg.destroy(sema.gpa);
 
-                try sema.explainWhyTypeIsNotPacked(msg, ty_src, field.ty);
+                    try sema.explainWhyTypeIsNotPacked(msg, ty_src, field_ty);
 
-                try sema.addDeclaredHereNote(msg, field.ty);
-                break :msg msg;
-            };
-            return sema.failWithOwnedErrorMsg(&block_scope, msg);
+                    try sema.addDeclaredHereNote(msg, field_ty);
+                    break :msg msg;
+                };
+                return sema.failWithOwnedErrorMsg(&block_scope, msg);
+            },
+            else => {},
         }
 
         if (zir_field.align_body_len > 0) {
             const body = zir.extra[extra_index..][0..zir_field.align_body_len];
             extra_index += body.len;
-            const align_ref = try sema.resolveBody(&block_scope, body, struct_obj.zir_index);
-            field.abi_align = sema.analyzeAsAlign(&block_scope, .unneeded, align_ref) catch |err| switch (err) {
+            const align_ref = try sema.resolveBody(&block_scope, body, zir_index);
+            const field_align = sema.analyzeAsAlign(&block_scope, .unneeded, align_ref) catch |err| switch (err) {
                 error.NeededSourceLocation => {
-                    const align_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
+                    const align_src = mod.fieldSrcLoc(decl_index, .{
                         .index = field_i,
                         .range = .alignment,
                     }).lazy;
@@ -35230,36 +35297,38 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
                 },
                 else => |e| return e,
             };
+            struct_type.field_aligns.get(ip)[field_i] = field_align;
         }
 
         extra_index += zir_field.init_body_len;
     }
 
-    struct_obj.status = .have_field_types;
+    // TODO: there seems to be no mechanism to catch when an init depends on
+    // another init that hasn't been resolved.
 
     if (any_inits) {
         extra_index = bodies_index;
         for (fields, 0..) |zir_field, field_i| {
+            const field_ty = struct_type.field_types.get(ip)[field_i].toType();
             extra_index += zir_field.type_body_len;
             extra_index += zir_field.align_body_len;
             if (zir_field.init_body_len > 0) {
                 const body = zir.extra[extra_index..][0..zir_field.init_body_len];
                 extra_index += body.len;
-                const init = try sema.resolveBody(&block_scope, body, struct_obj.zir_index);
-                const field = &struct_obj.fields.values()[field_i];
-                const coerced = sema.coerce(&block_scope, field.ty, init, .unneeded) catch |err| switch (err) {
+                const init = try sema.resolveBody(&block_scope, body, zir_index);
+                const coerced = sema.coerce(&block_scope, field_ty, init, .unneeded) catch |err| switch (err) {
                     error.NeededSourceLocation => {
-                        const init_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
+                        const init_src = mod.fieldSrcLoc(decl_index, .{
                             .index = field_i,
                             .range = .value,
                         }).lazy;
-                        _ = try sema.coerce(&block_scope, field.ty, init, init_src);
+                        _ = try sema.coerce(&block_scope, field_ty, init, init_src);
                         unreachable;
                     },
                     else => |e| return e,
                 };
                 const default_val = (try sema.resolveMaybeUndefVal(coerced)) orelse {
-                    const init_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
+                    const init_src = mod.fieldSrcLoc(decl_index, .{
                         .index = field_i,
                         .range = .value,
                     }).lazy;
@@ -35267,7 +35336,8 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
                         .needed_comptime_reason = "struct field default value must be comptime-known",
                     });
                 };
-                field.default_val = try default_val.intern(field.ty, mod);
+                const field_init = try default_val.intern(field_ty, mod);
+                struct_type.field_inits.get(ip)[field_i] = field_init;
             }
         }
     }
@@ -35275,8 +35345,6 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
         const ct_decl = mod.declPtr(ct_decl_index);
         _ = try ct_decl.internValue(mod);
     }
-
-    struct_obj.have_field_inits = true;
 }
 
 fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.UnionType) CompileError!void {
@@ -36060,6 +36128,8 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
             .type_struct,
             .type_struct_ns,
             .type_struct_anon,
+            .type_struct_packed,
+            .type_struct_packed_inits,
             .type_tuple_anon,
             .type_union,
             => switch (ip.indexToKey(ty.toIntern())) {
@@ -36081,41 +36151,46 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
 
                 .struct_type => |struct_type| {
                     try sema.resolveTypeFields(ty);
-                    if (mod.structPtrUnwrap(struct_type.index)) |s| {
-                        const field_vals = try sema.arena.alloc(InternPool.Index, s.fields.count());
-                        for (field_vals, s.fields.values(), 0..) |*field_val, field, i| {
-                            if (field.is_comptime) {
-                                field_val.* = field.default_val;
-                                continue;
-                            }
-                            if (field.ty.eql(ty, mod)) {
-                                const msg = try Module.ErrorMsg.create(
-                                    sema.gpa,
-                                    s.srcLoc(mod),
-                                    "struct '{}' depends on itself",
-                                    .{ty.fmt(mod)},
-                                );
-                                try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{});
-                                return sema.failWithOwnedErrorMsg(null, msg);
-                            }
-                            if (try sema.typeHasOnePossibleValue(field.ty)) |field_opv| {
-                                field_val.* = try field_opv.intern(field.ty, mod);
-                            } else return null;
-                        }
 
-                        // In this case the struct has no runtime-known fields and
+                    if (struct_type.field_types.len == 0) {
+                        // In this case the struct has no fields at all and
                         // therefore has one possible value.
                         return (try mod.intern(.{ .aggregate = .{
                             .ty = ty.toIntern(),
-                            .storage = .{ .elems = field_vals },
+                            .storage = .{ .elems = &.{} },
                         } })).toValue();
                     }
 
-                    // In this case the struct has no fields at all and
+                    const field_vals = try sema.arena.alloc(
+                        InternPool.Index,
+                        struct_type.field_types.len,
+                    );
+                    for (field_vals, 0..) |*field_val, i| {
+                        if (struct_type.comptime_bits.getBit(ip, i)) {
+                            field_val.* = struct_type.field_inits.get(ip)[i];
+                            continue;
+                        }
+                        const field_ty = struct_type.field_types.get(ip)[i].toType();
+                        if (field_ty.eql(ty, mod)) {
+                            const msg = try Module.ErrorMsg.create(
+                                sema.gpa,
+                                mod.declPtr(struct_type.decl.unwrap().?).srcLoc(mod),
+                                "struct '{}' depends on itself",
+                                .{ty.fmt(mod)},
+                            );
+                            try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{});
+                            return sema.failWithOwnedErrorMsg(null, msg);
+                        }
+                        if (try sema.typeHasOnePossibleValue(field_ty)) |field_opv| {
+                            field_val.* = try field_opv.intern(field_ty, mod);
+                        } else return null;
+                    }
+
+                    // In this case the struct has no runtime-known fields and
                     // therefore has one possible value.
                     return (try mod.intern(.{ .aggregate = .{
                         .ty = ty.toIntern(),
-                        .storage = .{ .elems = &.{} },
+                        .storage = .{ .elems = field_vals },
                     } })).toValue();
                 },
 
@@ -36574,25 +36649,32 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
                 => true,
             },
             .struct_type => |struct_type| {
-                const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
-                switch (struct_obj.requires_comptime) {
+                if (struct_type.layout == .Packed) {
+                    // packed structs cannot be comptime-only because they have a well-defined
+                    // memory layout and every field has a well-defined bit pattern.
+                    return false;
+                }
+                switch (struct_type.flagsPtr(ip).requires_comptime) {
                     .no, .wip => return false,
                     .yes => return true,
                     .unknown => {
-                        if (struct_obj.status == .field_types_wip)
+                        if (struct_type.flagsPtr(ip).field_types_wip)
                             return false;
 
-                        try sema.resolveTypeFieldsStruct(ty, struct_obj);
+                        try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type);
+
+                        struct_type.flagsPtr(ip).requires_comptime = .wip;
 
-                        struct_obj.requires_comptime = .wip;
-                        for (struct_obj.fields.values()) |field| {
-                            if (field.is_comptime) continue;
-                            if (try sema.typeRequiresComptime(field.ty)) {
-                                struct_obj.requires_comptime = .yes;
+                        for (0..struct_type.field_types.len) |i_usize| {
+                            const i: u32 = @intCast(i_usize);
+                            if (struct_type.fieldIsComptime(ip, i)) continue;
+                            const field_ty = struct_type.field_types.get(ip)[i];
+                            if (try sema.typeRequiresComptime(field_ty.toType())) {
+                                struct_type.setRequiresComptime(ip);
                                 return true;
                             }
                         }
-                        struct_obj.requires_comptime = .no;
+                        struct_type.flagsPtr(ip).requires_comptime = .no;
                         return false;
                     },
                 }
@@ -36673,40 +36755,41 @@ fn typeAbiSize(sema: *Sema, ty: Type) !u64 {
     return ty.abiSize(sema.mod);
 }
 
-fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!u32 {
+fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!Alignment {
     return (try ty.abiAlignmentAdvanced(sema.mod, .{ .sema = sema })).scalar;
 }
 
 /// Not valid to call for packed unions.
 /// Keep implementation in sync with `Module.unionFieldNormalAlignment`.
-/// TODO: this returns alignment in byte units should should be a u64
-fn unionFieldAlignment(sema: *Sema, u: InternPool.UnionType, field_index: u32) !u32 {
+fn unionFieldAlignment(sema: *Sema, u: InternPool.UnionType, field_index: u32) !Alignment {
     const mod = sema.mod;
     const ip = &mod.intern_pool;
-    if (u.fieldAlign(ip, field_index).toByteUnitsOptional()) |a| return @intCast(a);
+    const field_align = u.fieldAlign(ip, field_index);
+    if (field_align != .none) return field_align;
     const field_ty = u.field_types.get(ip)[field_index].toType();
-    if (field_ty.isNoReturn(sema.mod)) return 0;
-    return @intCast(try sema.typeAbiAlignment(field_ty));
+    if (field_ty.isNoReturn(sema.mod)) return .none;
+    return sema.typeAbiAlignment(field_ty);
 }
 
-/// Keep implementation in sync with `Module.Struct.Field.alignment`.
-fn structFieldAlignment(sema: *Sema, field: Module.Struct.Field, layout: std.builtin.Type.ContainerLayout) !u32 {
+/// Keep implementation in sync with `Module.structFieldAlignment`.
+fn structFieldAlignment(
+    sema: *Sema,
+    explicit_alignment: InternPool.Alignment,
+    field_ty: Type,
+    layout: std.builtin.Type.ContainerLayout,
+) !Alignment {
+    if (explicit_alignment != .none)
+        return explicit_alignment;
     const mod = sema.mod;
-    if (field.abi_align.toByteUnitsOptional()) |a| {
-        assert(layout != .Packed);
-        return @intCast(a);
-    }
     switch (layout) {
-        .Packed => return 0,
-        .Auto => if (mod.getTarget().ofmt != .c) {
-            return sema.typeAbiAlignment(field.ty);
-        },
+        .Packed => return .none,
+        .Auto => if (mod.getTarget().ofmt != .c) return sema.typeAbiAlignment(field_ty),
         .Extern => {},
     }
     // extern
-    const ty_abi_align = try sema.typeAbiAlignment(field.ty);
-    if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) {
-        return @max(ty_abi_align, 16);
+    const ty_abi_align = try sema.typeAbiAlignment(field_ty);
+    if (field_ty.isAbiInt(mod) and field_ty.intInfo(mod).bits >= 128) {
+        return ty_abi_align.max(.@"16");
     }
     return ty_abi_align;
 }
@@ -36752,14 +36835,14 @@ fn structFieldIndex(
     field_src: LazySrcLoc,
 ) !u32 {
     const mod = sema.mod;
+    const ip = &mod.intern_pool;
     try sema.resolveTypeFields(struct_ty);
     if (struct_ty.isAnonStruct(mod)) {
         return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src);
     } else {
-        const struct_obj = mod.typeToStruct(struct_ty).?;
-        const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
-            return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name);
-        return @intCast(field_index_usize);
+        const struct_type = mod.typeToStruct(struct_ty).?;
+        return struct_type.nameIndex(ip, field_name) orelse
+            return sema.failWithBadStructFieldAccess(block, struct_type, field_src, field_name);
     }
 }
 
@@ -36776,13 +36859,7 @@ fn anonStructFieldIndex(
         .anon_struct_type => |anon_struct_type| for (anon_struct_type.names.get(ip), 0..) |name, i| {
             if (name == field_name) return @intCast(i);
         },
-        .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| {
-            for (struct_obj.fields.keys(), 0..) |name, i| {
-                if (name == field_name) {
-                    return @intCast(i);
-                }
-            }
-        },
+        .struct_type => |struct_type| if (struct_type.nameIndex(ip, field_name)) |i| return i,
         else => unreachable,
     }
     return sema.fail(block, field_src, "no field named '{}' in anonymous struct '{}'", .{
@@ -37167,8 +37244,8 @@ fn intFitsInType(
                     // If it is u16 or bigger we know the alignment fits without resolving it.
                     if (info.bits >= max_needed_bits) return true;
                     const x = try sema.typeAbiAlignment(lazy_ty.toType());
-                    if (x == 0) return true;
-                    const actual_needed_bits = std.math.log2(x) + 1 + @intFromBool(info.signedness == .signed);
+                    if (x == .none) return true;
+                    const actual_needed_bits = @as(usize, x.toLog2Units()) + 1 + @intFromBool(info.signedness == .signed);
                     return info.bits >= actual_needed_bits;
                 },
                 .lazy_size => |lazy_ty| {
@@ -37381,7 +37458,7 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
 
     const vector_info: struct {
         host_size: u16 = 0,
-        alignment: u32 = 0,
+        alignment: Alignment = .none,
         vector_index: VI = .none,
     } = if (parent_ty.isVector(mod) and ptr_info.flags.size == .One) blk: {
         const elem_bits = elem_ty.bitSize(mod);
@@ -37391,7 +37468,7 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
 
         break :blk .{
             .host_size = @intCast(parent_ty.arrayLen(mod)),
-            .alignment = @intCast(parent_ty.abiAlignment(mod)),
+            .alignment = parent_ty.abiAlignment(mod),
             .vector_index = if (offset) |some| @enumFromInt(some) else .runtime,
         };
     } else .{};
@@ -37399,9 +37476,9 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
     const alignment: Alignment = a: {
         // Calculate the new pointer alignment.
         if (ptr_info.flags.alignment == .none) {
-            if (vector_info.alignment != 0) break :a Alignment.fromNonzeroByteUnits(vector_info.alignment);
-            // ABI-aligned pointer. Any pointer arithmetic maintains the same ABI-alignedness.
-            break :a .none;
+            // In case of an ABI-aligned pointer, any pointer arithmetic
+            // maintains the same ABI-alignedness.
+            break :a vector_info.alignment;
         }
         // If the addend is not a comptime-known value we can still count on
         // it being a multiple of the type size.
@@ -37413,7 +37490,7 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
         // non zero).
         const new_align: Alignment = @enumFromInt(@min(
             @ctz(addend),
-            @intFromEnum(ptr_info.flags.alignment),
+            ptr_info.flags.alignment.toLog2Units(),
         ));
         assert(new_align != .none);
         break :a new_align;
src/target.zig
@@ -1,6 +1,7 @@
 const std = @import("std");
 const Type = @import("type.zig").Type;
 const AddressSpace = std.builtin.AddressSpace;
+const Alignment = @import("InternPool.zig").Alignment;
 
 pub const ArchOsAbi = struct {
     arch: std.Target.Cpu.Arch,
@@ -595,13 +596,13 @@ pub fn llvmMachineAbi(target: std.Target) ?[:0]const u8 {
 }
 
 /// This function returns 1 if function alignment is not observable or settable.
-pub fn defaultFunctionAlignment(target: std.Target) u32 {
+pub fn defaultFunctionAlignment(target: std.Target) Alignment {
     return switch (target.cpu.arch) {
-        .arm, .armeb => 4,
-        .aarch64, .aarch64_32, .aarch64_be => 4,
-        .sparc, .sparcel, .sparc64 => 4,
-        .riscv64 => 2,
-        else => 1,
+        .arm, .armeb => .@"4",
+        .aarch64, .aarch64_32, .aarch64_be => .@"4",
+        .sparc, .sparcel, .sparc64 => .@"4",
+        .riscv64 => .@"2",
+        else => .@"1",
     };
 }
 
src/type.zig
@@ -9,6 +9,7 @@ const target_util = @import("target.zig");
 const TypedValue = @import("TypedValue.zig");
 const Sema = @import("Sema.zig");
 const InternPool = @import("InternPool.zig");
+const Alignment = InternPool.Alignment;
 
 /// Both types and values are canonically represented by a single 32-bit integer
 /// which is an index into an `InternPool` data structure.
@@ -196,7 +197,9 @@ pub const Type = struct {
                     info.packed_offset.host_size != 0 or
                     info.flags.vector_index != .none)
                 {
-                    const alignment = info.flags.alignment.toByteUnitsOptional() orelse
+                    const alignment = if (info.flags.alignment != .none)
+                        info.flags.alignment
+                    else
                         info.child.toType().abiAlignment(mod);
                     try writer.print("align({d}", .{alignment});
 
@@ -315,8 +318,8 @@ pub const Type = struct {
                 .generic_poison => unreachable,
             },
             .struct_type => |struct_type| {
-                if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| {
-                    const decl = mod.declPtr(struct_obj.owner_decl);
+                if (struct_type.decl.unwrap()) |decl_index| {
+                    const decl = mod.declPtr(decl_index);
                     try decl.renderFullyQualifiedName(mod, writer);
                 } else if (struct_type.namespace.unwrap()) |namespace_index| {
                     const namespace = mod.namespacePtr(namespace_index);
@@ -561,24 +564,20 @@ pub const Type = struct {
                     .generic_poison => unreachable,
                 },
                 .struct_type => |struct_type| {
-                    const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse {
-                        // This struct has no fields.
-                        return false;
-                    };
-                    if (struct_obj.status == .field_types_wip) {
+                    if (struct_type.assumeRuntimeBitsIfFieldTypesWip(ip)) {
                         // In this case, we guess that hasRuntimeBits() for this type is true,
                         // and then later if our guess was incorrect, we emit a compile error.
-                        struct_obj.assumed_runtime_bits = true;
                         return true;
                     }
                     switch (strat) {
                         .sema => |sema| _ = try sema.resolveTypeFields(ty),
-                        .eager => assert(struct_obj.haveFieldTypes()),
-                        .lazy => if (!struct_obj.haveFieldTypes()) return error.NeedLazy,
+                        .eager => assert(struct_type.haveFieldTypes(ip)),
+                        .lazy => if (!struct_type.haveFieldTypes(ip)) return error.NeedLazy,
                     }
-                    for (struct_obj.fields.values()) |field| {
-                        if (field.is_comptime) continue;
-                        if (try field.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
+                    for (0..struct_type.field_types.len) |i| {
+                        if (struct_type.comptime_bits.getBit(ip, i)) continue;
+                        const field_ty = struct_type.field_types.get(ip)[i].toType();
+                        if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
                             return true;
                     } else {
                         return false;
@@ -728,11 +727,8 @@ pub const Type = struct {
                 => false,
             },
             .struct_type => |struct_type| {
-                const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse {
-                    // Struct with no fields has a well-defined layout of no bits.
-                    return true;
-                };
-                return struct_obj.layout != .Auto;
+                // Struct with no fields have a well-defined layout of no bits.
+                return struct_type.layout != .Auto or struct_type.field_types.len == 0;
             },
             .union_type => |union_type| switch (union_type.flagsPtr(ip).runtime_tag) {
                 .none, .safety => union_type.flagsPtr(ip).layout != .Auto,
@@ -806,22 +802,23 @@ pub const Type = struct {
         return mod.intern_pool.isNoReturn(ty.toIntern());
     }
 
-    /// Returns 0 if the pointer is naturally aligned and the element type is 0-bit.
-    pub fn ptrAlignment(ty: Type, mod: *Module) u32 {
+    /// Returns `none` if the pointer is naturally aligned and the element type is 0-bit.
+    pub fn ptrAlignment(ty: Type, mod: *Module) Alignment {
         return ptrAlignmentAdvanced(ty, mod, null) catch unreachable;
     }
 
-    pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !u32 {
+    pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !Alignment {
         return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
             .ptr_type => |ptr_type| {
-                if (ptr_type.flags.alignment.toByteUnitsOptional()) |a| {
-                    return @as(u32, @intCast(a));
-                } else if (opt_sema) |sema| {
+                if (ptr_type.flags.alignment != .none)
+                    return ptr_type.flags.alignment;
+
+                if (opt_sema) |sema| {
                     const res = try ptr_type.child.toType().abiAlignmentAdvanced(mod, .{ .sema = sema });
                     return res.scalar;
-                } else {
-                    return (ptr_type.child.toType().abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
                 }
+
+                return (ptr_type.child.toType().abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
             },
             .opt_type => |child| child.toType().ptrAlignmentAdvanced(mod, opt_sema),
             else => unreachable,
@@ -836,8 +833,8 @@ pub const Type = struct {
         };
     }
 
-    /// Returns 0 for 0-bit types.
-    pub fn abiAlignment(ty: Type, mod: *Module) u32 {
+    /// Returns `none` for 0-bit types.
+    pub fn abiAlignment(ty: Type, mod: *Module) Alignment {
         return (ty.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
     }
 
@@ -846,12 +843,12 @@ pub const Type = struct {
     pub fn lazyAbiAlignment(ty: Type, mod: *Module) !Value {
         switch (try ty.abiAlignmentAdvanced(mod, .lazy)) {
             .val => |val| return val,
-            .scalar => |x| return mod.intValue(Type.comptime_int, x),
+            .scalar => |x| return mod.intValue(Type.comptime_int, x.toByteUnitsOptional().?),
         }
     }
 
     pub const AbiAlignmentAdvanced = union(enum) {
-        scalar: u32,
+        scalar: Alignment,
         val: Value,
     };
 
@@ -881,36 +878,36 @@ pub const Type = struct {
         };
 
         switch (ty.toIntern()) {
-            .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = 0 },
+            .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = .none },
             else => switch (ip.indexToKey(ty.toIntern())) {
                 .int_type => |int_type| {
-                    if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 };
-                    return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(int_type.bits, target) };
+                    if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = .none };
+                    return .{ .scalar = intAbiAlignment(int_type.bits, target) };
                 },
                 .ptr_type, .anyframe_type => {
-                    return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) };
+                    return .{ .scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)) };
                 },
                 .array_type => |array_type| {
                     return array_type.child.toType().abiAlignmentAdvanced(mod, strat);
                 },
                 .vector_type => |vector_type| {
                     const bits_u64 = try bitSizeAdvanced(vector_type.child.toType(), mod, opt_sema);
-                    const bits = @as(u32, @intCast(bits_u64));
+                    const bits: u32 = @intCast(bits_u64);
                     const bytes = ((bits * vector_type.len) + 7) / 8;
                     const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes);
-                    return AbiAlignmentAdvanced{ .scalar = alignment };
+                    return .{ .scalar = Alignment.fromByteUnits(alignment) };
                 },
 
                 .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat),
                 .error_union_type => |info| return abiAlignmentAdvancedErrorUnion(ty, mod, strat, info.payload_type.toType()),
 
                 // TODO revisit this when we have the concept of the error tag type
-                .error_set_type, .inferred_error_set_type => return AbiAlignmentAdvanced{ .scalar = 2 },
+                .error_set_type, .inferred_error_set_type => return .{ .scalar = .@"2" },
 
                 // represents machine code; not a pointer
-                .func_type => |func_type| return AbiAlignmentAdvanced{
-                    .scalar = if (func_type.alignment.toByteUnitsOptional()) |a|
-                        @as(u32, @intCast(a))
+                .func_type => |func_type| return .{
+                    .scalar = if (func_type.alignment != .none)
+                        func_type.alignment
                     else
                         target_util.defaultFunctionAlignment(target),
                 },
@@ -926,47 +923,49 @@ pub const Type = struct {
                     .call_modifier,
                     .prefetch_options,
                     .anyopaque,
-                    => return AbiAlignmentAdvanced{ .scalar = 1 },
+                    => return .{ .scalar = .@"1" },
 
                     .usize,
                     .isize,
                     .export_options,
                     .extern_options,
-                    => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
-
-                    .c_char => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.char) },
-                    .c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) },
-                    .c_ushort => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ushort) },
-                    .c_int => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.int) },
-                    .c_uint => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.uint) },
-                    .c_long => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.long) },
-                    .c_ulong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulong) },
-                    .c_longlong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longlong) },
-                    .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulonglong) },
-                    .c_longdouble => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) },
-
-                    .f16 => return AbiAlignmentAdvanced{ .scalar = 2 },
-                    .f32 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.float) },
+                    => return .{
+                        .scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)),
+                    },
+
+                    .c_char => return .{ .scalar = cTypeAlign(target, .char) },
+                    .c_short => return .{ .scalar = cTypeAlign(target, .short) },
+                    .c_ushort => return .{ .scalar = cTypeAlign(target, .ushort) },
+                    .c_int => return .{ .scalar = cTypeAlign(target, .int) },
+                    .c_uint => return .{ .scalar = cTypeAlign(target, .uint) },
+                    .c_long => return .{ .scalar = cTypeAlign(target, .long) },
+                    .c_ulong => return .{ .scalar = cTypeAlign(target, .ulong) },
+                    .c_longlong => return .{ .scalar = cTypeAlign(target, .longlong) },
+                    .c_ulonglong => return .{ .scalar = cTypeAlign(target, .ulonglong) },
+                    .c_longdouble => return .{ .scalar = cTypeAlign(target, .longdouble) },
+
+                    .f16 => return .{ .scalar = .@"2" },
+                    .f32 => return .{ .scalar = cTypeAlign(target, .float) },
                     .f64 => switch (target.c_type_bit_size(.double)) {
-                        64 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.double) },
-                        else => return AbiAlignmentAdvanced{ .scalar = 8 },
+                        64 => return .{ .scalar = cTypeAlign(target, .double) },
+                        else => return .{ .scalar = .@"8" },
                     },
                     .f80 => switch (target.c_type_bit_size(.longdouble)) {
-                        80 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) },
+                        80 => return .{ .scalar = cTypeAlign(target, .longdouble) },
                         else => {
                             const u80_ty: Type = .{ .ip_index = .u80_type };
-                            return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, mod) };
+                            return .{ .scalar = abiAlignment(u80_ty, mod) };
                         },
                     },
                     .f128 => switch (target.c_type_bit_size(.longdouble)) {
-                        128 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) },
-                        else => return AbiAlignmentAdvanced{ .scalar = 16 },
+                        128 => return .{ .scalar = cTypeAlign(target, .longdouble) },
+                        else => return .{ .scalar = .@"16" },
                     },
 
                     // TODO revisit this when we have the concept of the error tag type
                     .anyerror,
                     .adhoc_inferred_error_set,
-                    => return AbiAlignmentAdvanced{ .scalar = 2 },
+                    => return .{ .scalar = .@"2" },
 
                     .void,
                     .type,
@@ -976,89 +975,57 @@ pub const Type = struct {
                     .undefined,
                     .enum_literal,
                     .type_info,
-                    => return AbiAlignmentAdvanced{ .scalar = 0 },
+                    => return .{ .scalar = .none },
 
                     .noreturn => unreachable,
                     .generic_poison => unreachable,
                 },
                 .struct_type => |struct_type| {
-                    const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse
-                        return AbiAlignmentAdvanced{ .scalar = 0 };
-
-                    if (opt_sema) |sema| {
-                        if (struct_obj.status == .field_types_wip) {
-                            // We'll guess "pointer-aligned", if the struct has an
-                            // underaligned pointer field then some allocations
-                            // might require explicit alignment.
-                            return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) };
-                        }
-                        _ = try sema.resolveTypeFields(ty);
-                    }
-                    if (!struct_obj.haveFieldTypes()) switch (strat) {
-                        .eager => unreachable, // struct layout not resolved
-                        .sema => unreachable, // handled above
-                        .lazy => return .{ .val = (try mod.intern(.{ .int = .{
-                            .ty = .comptime_int_type,
-                            .storage = .{ .lazy_align = ty.toIntern() },
-                        } })).toValue() },
-                    };
-                    if (struct_obj.layout == .Packed) {
+                    if (struct_type.layout == .Packed) {
                         switch (strat) {
                             .sema => |sema| try sema.resolveTypeLayout(ty),
-                            .lazy => if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{
-                                .ty = .comptime_int_type,
-                                .storage = .{ .lazy_align = ty.toIntern() },
-                            } })).toValue() },
+                            .lazy => if (struct_type.backingIntType(ip).* == .none) return .{
+                                .val = (try mod.intern(.{ .int = .{
+                                    .ty = .comptime_int_type,
+                                    .storage = .{ .lazy_align = ty.toIntern() },
+                                } })).toValue(),
+                            },
                             .eager => {},
                         }
-                        assert(struct_obj.haveLayout());
-                        return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(mod) };
+                        assert(struct_type.backingIntType(ip).* != .none);
+                        return .{ .scalar = struct_type.backingIntType(ip).toType().abiAlignment(mod) };
                     }
 
-                    const fields = ty.structFields(mod);
-                    var big_align: u32 = 0;
-                    for (fields.values()) |field| {
-                        if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
-                            error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{
-                                .ty = .comptime_int_type,
-                                .storage = .{ .lazy_align = ty.toIntern() },
-                            } })).toValue() },
-                            else => |e| return e,
-                        })) continue;
+                    const flags = struct_type.flagsPtr(ip).*;
+                    if (flags.layout_resolved)
+                        return .{ .scalar = flags.alignment };
 
-                        const field_align = @as(u32, @intCast(field.abi_align.toByteUnitsOptional() orelse
-                            switch (try field.ty.abiAlignmentAdvanced(mod, strat)) {
-                            .scalar => |a| a,
-                            .val => switch (strat) {
-                                .eager => unreachable, // struct layout not resolved
-                                .sema => unreachable, // handled above
-                                .lazy => return .{ .val = (try mod.intern(.{ .int = .{
-                                    .ty = .comptime_int_type,
-                                    .storage = .{ .lazy_align = ty.toIntern() },
-                                } })).toValue() },
-                            },
-                        }));
-                        big_align = @max(big_align, field_align);
-
-                        // This logic is duplicated in Module.Struct.Field.alignment.
-                        if (struct_obj.layout == .Extern or target.ofmt == .c) {
-                            if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) {
-                                // The C ABI requires 128 bit integer fields of structs
-                                // to be 16-bytes aligned.
-                                big_align = @max(big_align, 16);
+                    switch (strat) {
+                        .eager => unreachable, // struct layout not resolved
+                        .sema => |sema| {
+                            if (flags.field_types_wip) {
+                                // We'll guess "pointer-aligned", if the struct has an
+                                // underaligned pointer field then some allocations
+                                // might require explicit alignment.
+                                return .{ .scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)) };
                             }
-                        }
+                            try sema.resolveTypeLayout(ty);
+                            return .{ .scalar = struct_type.flagsPtr(ip).alignment };
+                        },
+                        .lazy => return .{ .val = (try mod.intern(.{ .int = .{
+                            .ty = .comptime_int_type,
+                            .storage = .{ .lazy_align = ty.toIntern() },
+                        } })).toValue() },
                     }
-                    return AbiAlignmentAdvanced{ .scalar = big_align };
                 },
                 .anon_struct_type => |tuple| {
-                    var big_align: u32 = 0;
+                    var big_align: Alignment = .none;
                     for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
                         if (val != .none) continue; // comptime field
                         if (!(field_ty.toType().hasRuntimeBits(mod))) continue;
 
                         switch (try field_ty.toType().abiAlignmentAdvanced(mod, strat)) {
-                            .scalar => |field_align| big_align = @max(big_align, field_align),
+                            .scalar => |field_align| big_align = big_align.max(field_align),
                             .val => switch (strat) {
                                 .eager => unreachable, // field type alignment not resolved
                                 .sema => unreachable, // passed to abiAlignmentAdvanced above
@@ -1069,7 +1036,7 @@ pub const Type = struct {
                             },
                         }
                     }
-                    return AbiAlignmentAdvanced{ .scalar = big_align };
+                    return .{ .scalar = big_align };
                 },
 
                 .union_type => |union_type| {
@@ -1078,7 +1045,7 @@ pub const Type = struct {
                             // We'll guess "pointer-aligned", if the union has an
                             // underaligned pointer field then some allocations
                             // might require explicit alignment.
-                            return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) };
+                            return .{ .scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)) };
                         }
                         _ = try sema.resolveTypeFields(ty);
                     }
@@ -1095,13 +1062,13 @@ pub const Type = struct {
                         if (union_obj.hasTag(ip)) {
                             return abiAlignmentAdvanced(union_obj.enum_tag_ty.toType(), mod, strat);
                         } else {
-                            return AbiAlignmentAdvanced{
-                                .scalar = @intFromBool(union_obj.flagsPtr(ip).layout == .Extern),
+                            return .{
+                                .scalar = Alignment.fromByteUnits(@intFromBool(union_obj.flagsPtr(ip).layout == .Extern)),
                             };
                         }
                     }
 
-                    var max_align: u32 = 0;
+                    var max_align: Alignment = .none;
                     if (union_obj.hasTag(ip)) max_align = union_obj.enum_tag_ty.toType().abiAlignment(mod);
                     for (0..union_obj.field_names.len) |field_index| {
                         const field_ty = union_obj.field_types.get(ip)[field_index].toType();
@@ -1117,8 +1084,9 @@ pub const Type = struct {
                             else => |e| return e,
                         })) continue;
 
-                        const field_align_bytes: u32 = @intCast(field_align.toByteUnitsOptional() orelse
-                            switch (try field_ty.abiAlignmentAdvanced(mod, strat)) {
+                        const field_align_bytes: Alignment = if (field_align != .none)
+                            field_align
+                        else switch (try field_ty.abiAlignmentAdvanced(mod, strat)) {
                             .scalar => |a| a,
                             .val => switch (strat) {
                                 .eager => unreachable, // struct layout not resolved
@@ -1128,13 +1096,15 @@ pub const Type = struct {
                                     .storage = .{ .lazy_align = ty.toIntern() },
                                 } })).toValue() },
                             },
-                        });
-                        max_align = @max(max_align, field_align_bytes);
+                        };
+                        max_align = max_align.max(field_align_bytes);
                     }
-                    return AbiAlignmentAdvanced{ .scalar = max_align };
+                    return .{ .scalar = max_align };
+                },
+                .opaque_type => return .{ .scalar = .@"1" },
+                .enum_type => |enum_type| return .{
+                    .scalar = enum_type.tag_ty.toType().abiAlignment(mod),
                 },
-                .opaque_type => return AbiAlignmentAdvanced{ .scalar = 1 },
-                .enum_type => |enum_type| return AbiAlignmentAdvanced{ .scalar = enum_type.tag_ty.toType().abiAlignment(mod) },
 
                 // values, not types
                 .undef,
@@ -1179,20 +1149,15 @@ pub const Type = struct {
                     } })).toValue() },
                     else => |e| return e,
                 })) {
-                    return AbiAlignmentAdvanced{ .scalar = code_align };
+                    return .{ .scalar = code_align };
                 }
-                return AbiAlignmentAdvanced{ .scalar = @max(
-                    code_align,
+                return .{ .scalar = code_align.max(
                     (try payload_ty.abiAlignmentAdvanced(mod, strat)).scalar,
                 ) };
             },
             .lazy => {
                 switch (try payload_ty.abiAlignmentAdvanced(mod, strat)) {
-                    .scalar => |payload_align| {
-                        return AbiAlignmentAdvanced{
-                            .scalar = @max(code_align, payload_align),
-                        };
-                    },
+                    .scalar => |payload_align| return .{ .scalar = code_align.max(payload_align) },
                     .val => {},
                 }
                 return .{ .val = (try mod.intern(.{ .int = .{
@@ -1212,9 +1177,11 @@ pub const Type = struct {
         const child_type = ty.optionalChild(mod);
 
         switch (child_type.zigTypeTag(mod)) {
-            .Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
+            .Pointer => return .{
+                .scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)),
+            },
             .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat),
-            .NoReturn => return AbiAlignmentAdvanced{ .scalar = 0 },
+            .NoReturn => return .{ .scalar = .none },
             else => {},
         }
 
@@ -1227,12 +1194,12 @@ pub const Type = struct {
                     } })).toValue() },
                     else => |e| return e,
                 })) {
-                    return AbiAlignmentAdvanced{ .scalar = 1 };
+                    return .{ .scalar = .@"1" };
                 }
                 return child_type.abiAlignmentAdvanced(mod, strat);
             },
             .lazy => switch (try child_type.abiAlignmentAdvanced(mod, strat)) {
-                .scalar => |x| return AbiAlignmentAdvanced{ .scalar = @max(x, 1) },
+                .scalar => |x| return .{ .scalar = x.max(.@"1") },
                 .val => return .{ .val = (try mod.intern(.{ .int = .{
                     .ty = .comptime_int_type,
                     .storage = .{ .lazy_align = ty.toIntern() },
@@ -1310,8 +1277,7 @@ pub const Type = struct {
                             .storage = .{ .lazy_size = ty.toIntern() },
                         } })).toValue() },
                     };
-                    const elem_bits_u64 = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema);
-                    const elem_bits = @as(u32, @intCast(elem_bits_u64));
+                    const elem_bits = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema);
                     const total_bits = elem_bits * vector_type.len;
                     const total_bytes = (total_bits + 7) / 8;
                     const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) {
@@ -1321,8 +1287,7 @@ pub const Type = struct {
                             .storage = .{ .lazy_size = ty.toIntern() },
                         } })).toValue() },
                     };
-                    const result = std.mem.alignForward(u32, total_bytes, alignment);
-                    return AbiSizeAdvanced{ .scalar = result };
+                    return AbiSizeAdvanced{ .scalar = alignment.forward(total_bytes) };
                 },
 
                 .opt_type => return ty.abiSizeAdvancedOptional(mod, strat),
@@ -1360,16 +1325,16 @@ pub const Type = struct {
                     };
 
                     var size: u64 = 0;
-                    if (code_align > payload_align) {
+                    if (code_align.compare(.gt, payload_align)) {
                         size += code_size;
-                        size = std.mem.alignForward(u64, size, payload_align);
+                        size = payload_align.forward(size);
                         size += payload_size;
-                        size = std.mem.alignForward(u64, size, code_align);
+                        size = code_align.forward(size);
                     } else {
                         size += payload_size;
-                        size = std.mem.alignForward(u64, size, code_align);
+                        size = code_align.forward(size);
                         size += code_size;
-                        size = std.mem.alignForward(u64, size, payload_align);
+                        size = payload_align.forward(size);
                     }
                     return AbiSizeAdvanced{ .scalar = size };
                 },
@@ -1435,41 +1400,43 @@ pub const Type = struct {
                     .noreturn => unreachable,
                     .generic_poison => unreachable,
                 },
-                .struct_type => |struct_type| switch (ty.containerLayout(mod)) {
-                    .Packed => {
-                        const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse
-                            return AbiSizeAdvanced{ .scalar = 0 };
-
-                        switch (strat) {
-                            .sema => |sema| try sema.resolveTypeLayout(ty),
-                            .lazy => if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{
-                                .ty = .comptime_int_type,
-                                .storage = .{ .lazy_size = ty.toIntern() },
-                            } })).toValue() },
-                            .eager => {},
-                        }
-                        assert(struct_obj.haveLayout());
-                        return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(mod) };
-                    },
-                    else => {
-                        switch (strat) {
-                            .sema => |sema| try sema.resolveTypeLayout(ty),
-                            .lazy => {
-                                const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse
-                                    return AbiSizeAdvanced{ .scalar = 0 };
-                                if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{
-                                    .ty = .comptime_int_type,
-                                    .storage = .{ .lazy_size = ty.toIntern() },
-                                } })).toValue() };
+                .struct_type => |struct_type| {
+                    switch (strat) {
+                        .sema => |sema| try sema.resolveTypeLayout(ty),
+                        .lazy => switch (struct_type.layout) {
+                            .Packed => {
+                                if (struct_type.backingIntType(ip).* == .none) return .{
+                                    .val = (try mod.intern(.{ .int = .{
+                                        .ty = .comptime_int_type,
+                                        .storage = .{ .lazy_size = ty.toIntern() },
+                                    } })).toValue(),
+                                };
                             },
-                            .eager => {},
-                        }
-                        const field_count = ty.structFieldCount(mod);
-                        if (field_count == 0) {
-                            return AbiSizeAdvanced{ .scalar = 0 };
-                        }
-                        return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) };
-                    },
+                            .Auto, .Extern => {
+                                if (!struct_type.haveLayout(ip)) return .{
+                                    .val = (try mod.intern(.{ .int = .{
+                                        .ty = .comptime_int_type,
+                                        .storage = .{ .lazy_size = ty.toIntern() },
+                                    } })).toValue(),
+                                };
+                            },
+                        },
+                        .eager => {},
+                    }
+                    switch (struct_type.layout) {
+                        .Packed => {
+                            return .{
+                                .scalar = struct_type.backingIntType(ip).toType().abiSize(mod),
+                            };
+                        },
+                        .Auto, .Extern => {
+                            const field_count = ty.structFieldCount(mod);
+                            if (field_count == 0) {
+                                return .{ .scalar = 0 };
+                            }
+                            return .{ .scalar = ty.structFieldOffset(field_count, mod) };
+                        },
+                    }
                 },
                 .anon_struct_type => |tuple| {
                     switch (strat) {
@@ -1565,20 +1532,19 @@ pub const Type = struct {
         // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal
         // to the child type's ABI alignment.
         return AbiSizeAdvanced{
-            .scalar = child_ty.abiAlignment(mod) + payload_size,
+            .scalar = child_ty.abiAlignment(mod).toByteUnits(0) + payload_size,
         };
     }
 
     fn intAbiSize(bits: u16, target: Target) u64 {
-        const alignment = intAbiAlignment(bits, target);
-        return std.mem.alignForward(u64, @as(u16, @intCast((@as(u17, bits) + 7) / 8)), alignment);
+        return intAbiAlignment(bits, target).forward(@as(u16, @intCast((@as(u17, bits) + 7) / 8)));
     }
 
-    fn intAbiAlignment(bits: u16, target: Target) u32 {
-        return @min(
+    fn intAbiAlignment(bits: u16, target: Target) Alignment {
+        return Alignment.fromByteUnits(@min(
             std.math.ceilPowerOfTwoPromote(u16, @as(u16, @intCast((@as(u17, bits) + 7) / 8))),
             target.maxIntAlignment(),
-        );
+        ));
     }
 
     pub fn bitSize(ty: Type, mod: *Module) u64 {
@@ -1610,7 +1576,7 @@ pub const Type = struct {
                 const len = array_type.len + @intFromBool(array_type.sentinel != .none);
                 if (len == 0) return 0;
                 const elem_ty = array_type.child.toType();
-                const elem_size = @max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod));
+                const elem_size = @max(elem_ty.abiAlignment(mod).toByteUnits(0), elem_ty.abiSize(mod));
                 if (elem_size == 0) return 0;
                 const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema);
                 return (len - 1) * 8 * elem_size + elem_bit_size;
@@ -1675,26 +1641,24 @@ pub const Type = struct {
                 .enum_literal => unreachable,
                 .generic_poison => unreachable,
 
-                .atomic_order => unreachable, // missing call to resolveTypeFields
-                .atomic_rmw_op => unreachable, // missing call to resolveTypeFields
-                .calling_convention => unreachable, // missing call to resolveTypeFields
-                .address_space => unreachable, // missing call to resolveTypeFields
-                .float_mode => unreachable, // missing call to resolveTypeFields
-                .reduce_op => unreachable, // missing call to resolveTypeFields
-                .call_modifier => unreachable, // missing call to resolveTypeFields
-                .prefetch_options => unreachable, // missing call to resolveTypeFields
-                .export_options => unreachable, // missing call to resolveTypeFields
-                .extern_options => unreachable, // missing call to resolveTypeFields
-                .type_info => unreachable, // missing call to resolveTypeFields
+                .atomic_order => unreachable,
+                .atomic_rmw_op => unreachable,
+                .calling_convention => unreachable,
+                .address_space => unreachable,
+                .float_mode => unreachable,
+                .reduce_op => unreachable,
+                .call_modifier => unreachable,
+                .prefetch_options => unreachable,
+                .export_options => unreachable,
+                .extern_options => unreachable,
+                .type_info => unreachable,
             },
             .struct_type => |struct_type| {
-                const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0;
-                if (struct_obj.layout != .Packed) {
-                    return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
+                if (struct_type.layout == .Packed) {
+                    if (opt_sema) |sema| _ = try sema.resolveTypeLayout(ty);
+                    return try struct_type.backingIntType(ip).*.toType().bitSizeAdvanced(mod, opt_sema);
                 }
-                if (opt_sema) |sema| _ = try sema.resolveTypeLayout(ty);
-                assert(struct_obj.haveLayout());
-                return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema);
+                return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
             },
 
             .anon_struct_type => {
@@ -1749,13 +1713,7 @@ pub const Type = struct {
     pub fn layoutIsResolved(ty: Type, mod: *Module) bool {
         const ip = &mod.intern_pool;
         return switch (ip.indexToKey(ty.toIntern())) {
-            .struct_type => |struct_type| {
-                if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| {
-                    return struct_obj.haveLayout();
-                } else {
-                    return true;
-                }
-            },
+            .struct_type => |struct_type| struct_type.haveLayout(ip),
             .union_type => |union_type| union_type.haveLayout(ip),
             .array_type => |array_type| {
                 if ((array_type.len + @intFromBool(array_type.sentinel != .none)) == 0) return true;
@@ -2020,10 +1978,7 @@ pub const Type = struct {
     pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout {
         const ip = &mod.intern_pool;
         return switch (ip.indexToKey(ty.toIntern())) {
-            .struct_type => |struct_type| {
-                const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .Auto;
-                return struct_obj.layout;
-            },
+            .struct_type => |struct_type| struct_type.layout,
             .anon_struct_type => .Auto,
             .union_type => |union_type| union_type.flagsPtr(ip).layout,
             else => unreachable,
@@ -2136,10 +2091,6 @@ pub const Type = struct {
         return switch (ip.indexToKey(ty.toIntern())) {
             .vector_type => |vector_type| vector_type.len,
             .array_type => |array_type| array_type.len,
-            .struct_type => |struct_type| {
-                const struct_obj = ip.structPtrUnwrapConst(struct_type.index) orelse return 0;
-                return struct_obj.fields.count();
-            },
             .anon_struct_type => |tuple| tuple.types.len,
 
             else => unreachable,
@@ -2214,6 +2165,7 @@ pub const Type = struct {
 
     /// Asserts the type is an integer, enum, error set, or vector of one of them.
     pub fn intInfo(starting_ty: Type, mod: *Module) InternPool.Key.IntType {
+        const ip = &mod.intern_pool;
         const target = mod.getTarget();
         var ty = starting_ty;
 
@@ -2233,13 +2185,9 @@ pub const Type = struct {
             .c_ulong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) },
             .c_longlong_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) },
             .c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) },
-            else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+            else => switch (ip.indexToKey(ty.toIntern())) {
                 .int_type => |int_type| return int_type,
-                .struct_type => |struct_type| {
-                    const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
-                    assert(struct_obj.layout == .Packed);
-                    ty = struct_obj.backing_int_ty;
-                },
+                .struct_type => |t| ty = t.backingIntType(ip).*.toType(),
                 .enum_type => |enum_type| ty = enum_type.tag_ty.toType(),
                 .vector_type => |vector_type| ty = vector_type.child.toType(),
 
@@ -2503,33 +2451,28 @@ pub const Type = struct {
                     .generic_poison => unreachable,
                 },
                 .struct_type => |struct_type| {
-                    if (mod.structPtrUnwrap(struct_type.index)) |s| {
-                        assert(s.haveFieldTypes());
-                        const field_vals = try mod.gpa.alloc(InternPool.Index, s.fields.count());
-                        defer mod.gpa.free(field_vals);
-                        for (field_vals, s.fields.values()) |*field_val, field| {
-                            if (field.is_comptime) {
-                                field_val.* = field.default_val;
-                                continue;
-                            }
-                            if (try field.ty.onePossibleValue(mod)) |field_opv| {
-                                field_val.* = try field_opv.intern(field.ty, mod);
-                            } else return null;
+                    assert(struct_type.haveFieldTypes(ip));
+                    if (struct_type.knownNonOpv(ip))
+                        return null;
+                    const field_vals = try mod.gpa.alloc(InternPool.Index, struct_type.field_types.len);
+                    defer mod.gpa.free(field_vals);
+                    for (field_vals, 0..) |*field_val, i_usize| {
+                        const i: u32 = @intCast(i_usize);
+                        if (struct_type.fieldIsComptime(ip, i)) {
+                            field_val.* = struct_type.field_inits.get(ip)[i];
+                            continue;
                         }
-
-                        // In this case the struct has no runtime-known fields and
-                        // therefore has one possible value.
-                        return (try mod.intern(.{ .aggregate = .{
-                            .ty = ty.toIntern(),
-                            .storage = .{ .elems = field_vals },
-                        } })).toValue();
+                        const field_ty = struct_type.field_types.get(ip)[i].toType();
+                        if (try field_ty.onePossibleValue(mod)) |field_opv| {
+                            field_val.* = try field_opv.intern(field_ty, mod);
+                        } else return null;
                     }
 
-                    // In this case the struct has no fields at all and
+                    // In this case the struct has no runtime-known fields and
                     // therefore has one possible value.
                     return (try mod.intern(.{ .aggregate = .{
                         .ty = ty.toIntern(),
-                        .storage = .{ .elems = &.{} },
+                        .storage = .{ .elems = field_vals },
                     } })).toValue();
                 },
 
@@ -2715,18 +2658,20 @@ pub const Type = struct {
                     => true,
                 },
                 .struct_type => |struct_type| {
+                    // packed structs cannot be comptime-only because they have a well-defined
+                    // memory layout and every field has a well-defined bit pattern.
+                    if (struct_type.layout == .Packed)
+                        return false;
+
                     // A struct with no fields is not comptime-only.
-                    const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
-                    switch (struct_obj.requires_comptime) {
-                        .wip, .unknown => {
-                            // Return false to avoid incorrect dependency loops.
-                            // This will be handled correctly once merged with
-                            // `Sema.typeRequiresComptime`.
-                            return false;
-                        },
-                        .no => return false,
-                        .yes => return true,
-                    }
+                    return switch (struct_type.flagsPtr(ip).requires_comptime) {
+                        // Return false to avoid incorrect dependency loops.
+                        // This will be handled correctly once merged with
+                        // `Sema.typeRequiresComptime`.
+                        .wip, .unknown => false,
+                        .no => false,
+                        .yes => true,
+                    };
                 },
 
                 .anon_struct_type => |tuple| {
@@ -2982,37 +2927,19 @@ pub const Type = struct {
         return enum_type.tagValueIndex(ip, int_tag);
     }
 
-    pub fn structFields(ty: Type, mod: *Module) Module.Struct.Fields {
-        switch (mod.intern_pool.indexToKey(ty.toIntern())) {
-            .struct_type => |struct_type| {
-                const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .{};
-                assert(struct_obj.haveFieldTypes());
-                return struct_obj.fields;
-            },
-            else => unreachable,
-        }
-    }
-
     pub fn structFieldName(ty: Type, field_index: usize, mod: *Module) InternPool.NullTerminatedString {
         const ip = &mod.intern_pool;
         return switch (ip.indexToKey(ty.toIntern())) {
-            .struct_type => |struct_type| {
-                const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
-                assert(struct_obj.haveFieldTypes());
-                return struct_obj.fields.keys()[field_index];
-            },
+            .struct_type => |struct_type| struct_type.field_names.get(ip)[field_index],
             .anon_struct_type => |anon_struct| anon_struct.names.get(ip)[field_index],
             else => unreachable,
         };
     }
 
     pub fn structFieldCount(ty: Type, mod: *Module) usize {
-        return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
-            .struct_type => |struct_type| {
-                const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0;
-                assert(struct_obj.haveFieldTypes());
-                return struct_obj.fields.count();
-            },
+        const ip = &mod.intern_pool;
+        return switch (ip.indexToKey(ty.toIntern())) {
+            .struct_type => |struct_type| struct_type.field_types.len,
             .anon_struct_type => |anon_struct| anon_struct.types.len,
             else => unreachable,
         };
@@ -3022,11 +2949,7 @@ pub const Type = struct {
     pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type {
         const ip = &mod.intern_pool;
         return switch (ip.indexToKey(ty.toIntern())) {
-            .struct_type => |struct_type| {
-                const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
-                assert(struct_obj.haveFieldTypes());
-                return struct_obj.fields.values()[index].ty;
-            },
+            .struct_type => |struct_type| struct_type.field_types.get(ip)[index].toType(),
             .union_type => |union_type| {
                 const union_obj = ip.loadUnionType(union_type);
                 return union_obj.field_types.get(ip)[index].toType();
@@ -3036,13 +2959,14 @@ pub const Type = struct {
         };
     }
 
-    pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) u32 {
+    pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) Alignment {
         const ip = &mod.intern_pool;
         switch (ip.indexToKey(ty.toIntern())) {
             .struct_type => |struct_type| {
-                const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
-                assert(struct_obj.layout != .Packed);
-                return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout);
+                assert(struct_type.layout != .Packed);
+                const explicit_align = struct_type.field_aligns.get(ip)[index];
+                const field_ty = struct_type.field_types.get(ip)[index].toType();
+                return mod.structFieldAlignment(explicit_align, field_ty, struct_type.layout);
             },
             .anon_struct_type => |anon_struct| {
                 return anon_struct.types.get(ip)[index].toType().abiAlignment(mod);
@@ -3059,8 +2983,7 @@ pub const Type = struct {
         const ip = &mod.intern_pool;
         switch (ip.indexToKey(ty.toIntern())) {
             .struct_type => |struct_type| {
-                const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
-                const val = struct_obj.fields.values()[index].default_val;
+                const val = struct_type.field_inits.get(ip)[index];
                 // TODO: avoid using `unreachable` to indicate this.
                 if (val == .none) return Value.@"unreachable";
                 return val.toValue();
@@ -3079,12 +3002,10 @@ pub const Type = struct {
         const ip = &mod.intern_pool;
         switch (ip.indexToKey(ty.toIntern())) {
             .struct_type => |struct_type| {
-                const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
-                const field = struct_obj.fields.values()[index];
-                if (field.is_comptime) {
-                    return field.default_val.toValue();
+                if (struct_type.comptime_bits.getBit(ip, index)) {
+                    return struct_type.field_inits.get(ip)[index].toValue();
                 } else {
-                    return field.ty.onePossibleValue(mod);
+                    return struct_type.field_types.get(ip)[index].toType().onePossibleValue(mod);
                 }
             },
             .anon_struct_type => |tuple| {
@@ -3102,30 +3023,25 @@ pub const Type = struct {
     pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool {
         const ip = &mod.intern_pool;
         return switch (ip.indexToKey(ty.toIntern())) {
-            .struct_type => |struct_type| {
-                const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
-                if (struct_obj.layout == .Packed) return false;
-                const field = struct_obj.fields.values()[index];
-                return field.is_comptime;
-            },
+            .struct_type => |struct_type| struct_type.fieldIsComptime(ip, index),
             .anon_struct_type => |anon_struct| anon_struct.values.get(ip)[index] != .none,
             else => unreachable,
         };
     }
 
     pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *Module) u32 {
-        const struct_type = mod.intern_pool.indexToKey(ty.toIntern()).struct_type;
-        const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
-        assert(struct_obj.layout == .Packed);
+        const ip = &mod.intern_pool;
+        const struct_type = ip.indexToKey(ty.toIntern()).struct_type;
+        assert(struct_type.layout == .Packed);
         comptime assert(Type.packed_struct_layout_version == 2);
 
         var bit_offset: u16 = undefined;
         var elem_size_bits: u16 = undefined;
         var running_bits: u16 = 0;
-        for (struct_obj.fields.values(), 0..) |f, i| {
-            if (!f.ty.hasRuntimeBits(mod)) continue;
+        for (struct_type.field_types.get(ip), 0..) |field_ty, i| {
+            if (!field_ty.toType().hasRuntimeBits(mod)) continue;
 
-            const field_bits = @as(u16, @intCast(f.ty.bitSize(mod)));
+            const field_bits: u16 = @intCast(field_ty.toType().bitSize(mod));
             if (i == field_index) {
                 bit_offset = running_bits;
                 elem_size_bits = field_bits;
@@ -3141,68 +3057,19 @@ pub const Type = struct {
         offset: u64,
     };
 
-    pub const StructOffsetIterator = struct {
-        field: usize = 0,
-        offset: u64 = 0,
-        big_align: u32 = 0,
-        struct_obj: *Module.Struct,
-        module: *Module,
-
-        pub fn next(it: *StructOffsetIterator) ?FieldOffset {
-            const mod = it.module;
-            var i = it.field;
-            if (it.struct_obj.fields.count() <= i)
-                return null;
-
-            if (it.struct_obj.optimized_order) |some| {
-                i = some[i];
-                if (i == Module.Struct.omitted_field) return null;
-            }
-            const field = it.struct_obj.fields.values()[i];
-            it.field += 1;
-
-            if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) {
-                return FieldOffset{ .field = i, .offset = it.offset };
-            }
-
-            const field_align = field.alignment(mod, it.struct_obj.layout);
-            it.big_align = @max(it.big_align, field_align);
-            const field_offset = std.mem.alignForward(u64, it.offset, field_align);
-            it.offset = field_offset + field.ty.abiSize(mod);
-            return FieldOffset{ .field = i, .offset = field_offset };
-        }
-    };
-
-    /// Get an iterator that iterates over all the struct field, returning the field and
-    /// offset of that field. Asserts that the type is a non-packed struct.
-    pub fn iterateStructOffsets(ty: Type, mod: *Module) StructOffsetIterator {
-        const struct_type = mod.intern_pool.indexToKey(ty.toIntern()).struct_type;
-        const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
-        assert(struct_obj.haveLayout());
-        assert(struct_obj.layout != .Packed);
-        return .{ .struct_obj = struct_obj, .module = mod };
-    }
-
     /// Supports structs and unions.
     pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 {
         const ip = &mod.intern_pool;
         switch (ip.indexToKey(ty.toIntern())) {
             .struct_type => |struct_type| {
-                const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
-                assert(struct_obj.haveLayout());
-                assert(struct_obj.layout != .Packed);
-                var it = ty.iterateStructOffsets(mod);
-                while (it.next()) |field_offset| {
-                    if (index == field_offset.field)
-                        return field_offset.offset;
-                }
-
-                return std.mem.alignForward(u64, it.offset, @max(it.big_align, 1));
+                assert(struct_type.haveLayout(ip));
+                assert(struct_type.layout != .Packed);
+                return struct_type.offsets.get(ip)[index];
             },
 
             .anon_struct_type => |tuple| {
                 var offset: u64 = 0;
-                var big_align: u32 = 0;
+                var big_align: Alignment = .none;
 
                 for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| {
                     if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) {
@@ -3212,12 +3079,12 @@ pub const Type = struct {
                     }
 
                     const field_align = field_ty.toType().abiAlignment(mod);
-                    big_align = @max(big_align, field_align);
-                    offset = std.mem.alignForward(u64, offset, field_align);
+                    big_align = big_align.max(field_align);
+                    offset = field_align.forward(offset);
                     if (i == index) return offset;
                     offset += field_ty.toType().abiSize(mod);
                 }
-                offset = std.mem.alignForward(u64, offset, @max(big_align, 1));
+                offset = big_align.max(.@"1").forward(offset);
                 return offset;
             },
 
@@ -3226,9 +3093,9 @@ pub const Type = struct {
                     return 0;
                 const union_obj = ip.loadUnionType(union_type);
                 const layout = mod.getUnionLayout(union_obj);
-                if (layout.tag_align >= layout.payload_align) {
+                if (layout.tag_align.compare(.gte, layout.payload_align)) {
                     // {Tag, Payload}
-                    return std.mem.alignForward(u64, layout.tag_size, layout.payload_align);
+                    return layout.payload_align.forward(layout.tag_size);
                 } else {
                     // {Payload, Tag}
                     return 0;
@@ -3246,8 +3113,7 @@ pub const Type = struct {
     pub fn declSrcLocOrNull(ty: Type, mod: *Module) ?Module.SrcLoc {
         return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
             .struct_type => |struct_type| {
-                const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
-                return struct_obj.srcLoc(mod);
+                return mod.declPtr(struct_type.decl.unwrap() orelse return null).srcLoc(mod);
             },
             .union_type => |union_type| {
                 return mod.declPtr(union_type.decl).srcLoc(mod);
@@ -3264,10 +3130,7 @@ pub const Type = struct {
 
     pub fn getOwnerDeclOrNull(ty: Type, mod: *Module) ?Module.Decl.Index {
         return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
-            .struct_type => |struct_type| {
-                const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return null;
-                return struct_obj.owner_decl;
-            },
+            .struct_type => |struct_type| struct_type.decl.unwrap(),
             .union_type => |union_type| union_type.decl,
             .opaque_type => |opaque_type| opaque_type.decl,
             .enum_type => |enum_type| enum_type.decl,
@@ -3280,10 +3143,12 @@ pub const Type = struct {
     }
 
     pub fn isTuple(ty: Type, mod: *Module) bool {
-        return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+        const ip = &mod.intern_pool;
+        return switch (ip.indexToKey(ty.toIntern())) {
             .struct_type => |struct_type| {
-                const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
-                return struct_obj.is_tuple;
+                if (struct_type.layout == .Packed) return false;
+                if (struct_type.decl == .none) return false;
+                return struct_type.flagsPtr(ip).is_tuple;
             },
             .anon_struct_type => |anon_struct| anon_struct.names.len == 0,
             else => false,
@@ -3299,10 +3164,12 @@ pub const Type = struct {
     }
 
     pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool {
-        return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+        const ip = &mod.intern_pool;
+        return switch (ip.indexToKey(ty.toIntern())) {
             .struct_type => |struct_type| {
-                const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
-                return struct_obj.is_tuple;
+                if (struct_type.layout == .Packed) return false;
+                if (struct_type.decl == .none) return false;
+                return struct_type.flagsPtr(ip).is_tuple;
             },
             .anon_struct_type => true,
             else => false,
@@ -3391,3 +3258,7 @@ pub const Type = struct {
     /// to packed struct layout to find out all the places in the codebase you need to edit!
     pub const packed_struct_layout_version = 2;
 };
+
+fn cTypeAlign(target: Target, c_type: Target.CType) Alignment {
+    return Alignment.fromByteUnits(target.c_type_alignment(c_type));
+}
src/TypedValue.zig
@@ -432,7 +432,7 @@ fn printAggregate(
             if (i != 0) try writer.writeAll(", ");
 
             const field_name = switch (ip.indexToKey(ty.toIntern())) {
-                .struct_type => |x| mod.structPtrUnwrap(x.index).?.fields.keys()[i].toOptional(),
+                .struct_type => |x| x.field_names.get(ip)[i].toOptional(),
                 .anon_struct_type => |x| if (x.isTuple()) .none else x.names.get(ip)[i].toOptional(),
                 else => unreachable,
             };
src/value.zig
@@ -462,7 +462,7 @@ pub const Value = struct {
                         if (opt_sema) |sema| try sema.resolveTypeLayout(ty.toType());
                         const x = switch (int.storage) {
                             else => unreachable,
-                            .lazy_align => ty.toType().abiAlignment(mod),
+                            .lazy_align => ty.toType().abiAlignment(mod).toByteUnits(0),
                             .lazy_size => ty.toType().abiSize(mod),
                         };
                         return BigIntMutable.init(&space.limbs, x).toConst();
@@ -523,9 +523,9 @@ pub const Value = struct {
                     .u64 => |x| x,
                     .i64 => |x| std.math.cast(u64, x),
                     .lazy_align => |ty| if (opt_sema) |sema|
-                        (try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar
+                        (try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits(0)
                     else
-                        ty.toType().abiAlignment(mod),
+                        ty.toType().abiAlignment(mod).toByteUnits(0),
                     .lazy_size => |ty| if (opt_sema) |sema|
                         (try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar
                     else
@@ -569,9 +569,9 @@ pub const Value = struct {
                 .int => |int| switch (int.storage) {
                     .big_int => |big_int| big_int.to(i64) catch unreachable,
                     .i64 => |x| x,
-                    .u64 => |x| @as(i64, @intCast(x)),
-                    .lazy_align => |ty| @as(i64, @intCast(ty.toType().abiAlignment(mod))),
-                    .lazy_size => |ty| @as(i64, @intCast(ty.toType().abiSize(mod))),
+                    .u64 => |x| @intCast(x),
+                    .lazy_align => |ty| @intCast(ty.toType().abiAlignment(mod).toByteUnits(0)),
+                    .lazy_size => |ty| @intCast(ty.toType().abiSize(mod)),
                 },
                 else => unreachable,
             },
@@ -612,10 +612,11 @@ pub const Value = struct {
         const target = mod.getTarget();
         const endian = target.cpu.arch.endian();
         if (val.isUndef(mod)) {
-            const size = @as(usize, @intCast(ty.abiSize(mod)));
+            const size: usize = @intCast(ty.abiSize(mod));
             @memset(buffer[0..size], 0xaa);
             return;
         }
+        const ip = &mod.intern_pool;
         switch (ty.zigTypeTag(mod)) {
             .Void => {},
             .Bool => {
@@ -656,40 +657,44 @@ pub const Value = struct {
                 const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
                 return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
             },
-            .Struct => switch (ty.containerLayout(mod)) {
-                .Auto => return error.IllDefinedMemoryLayout,
-                .Extern => for (ty.structFields(mod).values(), 0..) |field, i| {
-                    const off = @as(usize, @intCast(ty.structFieldOffset(i, mod)));
-                    const field_val = switch (val.ip_index) {
-                        .none => switch (val.tag()) {
-                            .bytes => {
-                                buffer[off] = val.castTag(.bytes).?.data[i];
-                                continue;
-                            },
-                            .aggregate => val.castTag(.aggregate).?.data[i],
-                            .repeated => val.castTag(.repeated).?.data,
-                            else => unreachable,
-                        },
-                        else => switch (mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage) {
-                            .bytes => |bytes| {
-                                buffer[off] = bytes[i];
-                                continue;
+            .Struct => {
+                const struct_type = mod.typeToStruct(ty) orelse return error.IllDefinedMemoryLayout;
+                switch (struct_type.layout) {
+                    .Auto => return error.IllDefinedMemoryLayout,
+                    .Extern => for (0..struct_type.field_types.len) |i| {
+                        const off: usize = @intCast(ty.structFieldOffset(i, mod));
+                        const field_val = switch (val.ip_index) {
+                            .none => switch (val.tag()) {
+                                .bytes => {
+                                    buffer[off] = val.castTag(.bytes).?.data[i];
+                                    continue;
+                                },
+                                .aggregate => val.castTag(.aggregate).?.data[i],
+                                .repeated => val.castTag(.repeated).?.data,
+                                else => unreachable,
                             },
-                            .elems => |elems| elems[i],
-                            .repeated_elem => |elem| elem,
-                        }.toValue(),
-                    };
-                    try writeToMemory(field_val, field.ty, mod, buffer[off..]);
-                },
-                .Packed => {
-                    const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
-                    return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
-                },
+                            else => switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
+                                .bytes => |bytes| {
+                                    buffer[off] = bytes[i];
+                                    continue;
+                                },
+                                .elems => |elems| elems[i],
+                                .repeated_elem => |elem| elem,
+                            }.toValue(),
+                        };
+                        const field_ty = struct_type.field_types.get(ip)[i].toType();
+                        try writeToMemory(field_val, field_ty, mod, buffer[off..]);
+                    },
+                    .Packed => {
+                        const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
+                        return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
+                    },
+                }
             },
             .ErrorSet => {
                 // TODO revisit this when we have the concept of the error tag type
                 const Int = u16;
-                const name = switch (mod.intern_pool.indexToKey(val.toIntern())) {
+                const name = switch (ip.indexToKey(val.toIntern())) {
                     .err => |err| err.name,
                     .error_union => |error_union| error_union.val.err_name,
                     else => unreachable,
@@ -790,24 +795,24 @@ pub const Value = struct {
                     bits += elem_bit_size;
                 }
             },
-            .Struct => switch (ty.containerLayout(mod)) {
-                .Auto => unreachable, // Sema is supposed to have emitted a compile error already
-                .Extern => unreachable, // Handled in non-packed writeToMemory
-                .Packed => {
-                    var bits: u16 = 0;
-                    const fields = ty.structFields(mod).values();
-                    const storage = ip.indexToKey(val.toIntern()).aggregate.storage;
-                    for (fields, 0..) |field, i| {
-                        const field_bits = @as(u16, @intCast(field.ty.bitSize(mod)));
-                        const field_val = switch (storage) {
-                            .bytes => unreachable,
-                            .elems => |elems| elems[i],
-                            .repeated_elem => |elem| elem,
-                        };
-                        try field_val.toValue().writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits);
-                        bits += field_bits;
-                    }
-                },
+            .Struct => {
+                const struct_type = ip.indexToKey(ty.toIntern()).struct_type;
+                // Sema is supposed to have emitted a compile error already in the case of Auto,
+                // and Extern is handled in non-packed writeToMemory.
+                assert(struct_type.layout == .Packed);
+                var bits: u16 = 0;
+                const storage = ip.indexToKey(val.toIntern()).aggregate.storage;
+                for (0..struct_type.field_types.len) |i| {
+                    const field_ty = struct_type.field_types.get(ip)[i].toType();
+                    const field_bits: u16 = @intCast(field_ty.bitSize(mod));
+                    const field_val = switch (storage) {
+                        .bytes => unreachable,
+                        .elems => |elems| elems[i],
+                        .repeated_elem => |elem| elem,
+                    };
+                    try field_val.toValue().writeToPackedMemory(field_ty, mod, buffer, bit_offset + bits);
+                    bits += field_bits;
+                }
             },
             .Union => {
                 const union_obj = mod.typeToUnion(ty).?;
@@ -852,6 +857,7 @@ pub const Value = struct {
         buffer: []const u8,
         arena: Allocator,
     ) Allocator.Error!Value {
+        const ip = &mod.intern_pool;
         const target = mod.getTarget();
         const endian = target.cpu.arch.endian();
         switch (ty.zigTypeTag(mod)) {
@@ -926,25 +932,29 @@ pub const Value = struct {
                 const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
                 return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
             },
-            .Struct => switch (ty.containerLayout(mod)) {
-                .Auto => unreachable, // Sema is supposed to have emitted a compile error already
-                .Extern => {
-                    const fields = ty.structFields(mod).values();
-                    const field_vals = try arena.alloc(InternPool.Index, fields.len);
-                    for (field_vals, fields, 0..) |*field_val, field, i| {
-                        const off = @as(usize, @intCast(ty.structFieldOffset(i, mod)));
-                        const sz = @as(usize, @intCast(field.ty.abiSize(mod)));
-                        field_val.* = try (try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena)).intern(field.ty, mod);
-                    }
-                    return (try mod.intern(.{ .aggregate = .{
-                        .ty = ty.toIntern(),
-                        .storage = .{ .elems = field_vals },
-                    } })).toValue();
-                },
-                .Packed => {
-                    const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
-                    return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
-                },
+            .Struct => {
+                const struct_type = mod.typeToStruct(ty).?;
+                switch (struct_type.layout) {
+                    .Auto => unreachable, // Sema is supposed to have emitted a compile error already
+                    .Extern => {
+                        const field_types = struct_type.field_types;
+                        const field_vals = try arena.alloc(InternPool.Index, field_types.len);
+                        for (field_vals, 0..) |*field_val, i| {
+                            const field_ty = field_types.get(ip)[i].toType();
+                            const off: usize = @intCast(ty.structFieldOffset(i, mod));
+                            const sz: usize = @intCast(field_ty.abiSize(mod));
+                            field_val.* = try (try readFromMemory(field_ty, mod, buffer[off..(off + sz)], arena)).intern(field_ty, mod);
+                        }
+                        return (try mod.intern(.{ .aggregate = .{
+                            .ty = ty.toIntern(),
+                            .storage = .{ .elems = field_vals },
+                        } })).toValue();
+                    },
+                    .Packed => {
+                        const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
+                        return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
+                    },
+                }
             },
             .ErrorSet => {
                 // TODO revisit this when we have the concept of the error tag type
@@ -992,6 +1002,7 @@ pub const Value = struct {
         bit_offset: usize,
         arena: Allocator,
     ) Allocator.Error!Value {
+        const ip = &mod.intern_pool;
         const target = mod.getTarget();
         const endian = target.cpu.arch.endian();
         switch (ty.zigTypeTag(mod)) {
@@ -1070,23 +1081,22 @@ pub const Value = struct {
                     .storage = .{ .elems = elems },
                 } })).toValue();
             },
-            .Struct => switch (ty.containerLayout(mod)) {
-                .Auto => unreachable, // Sema is supposed to have emitted a compile error already
-                .Extern => unreachable, // Handled by non-packed readFromMemory
-                .Packed => {
-                    var bits: u16 = 0;
-                    const fields = ty.structFields(mod).values();
-                    const field_vals = try arena.alloc(InternPool.Index, fields.len);
-                    for (fields, 0..) |field, i| {
-                        const field_bits = @as(u16, @intCast(field.ty.bitSize(mod)));
-                        field_vals[i] = try (try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena)).intern(field.ty, mod);
-                        bits += field_bits;
-                    }
-                    return (try mod.intern(.{ .aggregate = .{
-                        .ty = ty.toIntern(),
-                        .storage = .{ .elems = field_vals },
-                    } })).toValue();
-                },
+            .Struct => {
+                // Sema is supposed to have emitted a compile error already for Auto layout structs,
+                // and Extern is handled by non-packed readFromMemory.
+                const struct_type = mod.typeToPackedStruct(ty).?;
+                var bits: u16 = 0;
+                const field_vals = try arena.alloc(InternPool.Index, struct_type.field_types.len);
+                for (field_vals, 0..) |*field_val, i| {
+                    const field_ty = struct_type.field_types.get(ip)[i].toType();
+                    const field_bits: u16 = @intCast(field_ty.bitSize(mod));
+                    field_val.* = try (try readFromPackedMemory(field_ty, mod, buffer, bit_offset + bits, arena)).intern(field_ty, mod);
+                    bits += field_bits;
+                }
+                return (try mod.intern(.{ .aggregate = .{
+                    .ty = ty.toIntern(),
+                    .storage = .{ .elems = field_vals },
+                } })).toValue();
             },
             .Pointer => {
                 assert(!ty.isSlice(mod)); // No well defined layout.
@@ -1105,18 +1115,18 @@ pub const Value = struct {
     pub fn toFloat(val: Value, comptime T: type, mod: *Module) T {
         return switch (mod.intern_pool.indexToKey(val.toIntern())) {
             .int => |int| switch (int.storage) {
-                .big_int => |big_int| @as(T, @floatCast(bigIntToFloat(big_int.limbs, big_int.positive))),
+                .big_int => |big_int| @floatCast(bigIntToFloat(big_int.limbs, big_int.positive)),
                 inline .u64, .i64 => |x| {
                     if (T == f80) {
                         @panic("TODO we can't lower this properly on non-x86 llvm backend yet");
                     }
-                    return @as(T, @floatFromInt(x));
+                    return @floatFromInt(x);
                 },
-                .lazy_align => |ty| @as(T, @floatFromInt(ty.toType().abiAlignment(mod))),
-                .lazy_size => |ty| @as(T, @floatFromInt(ty.toType().abiSize(mod))),
+                .lazy_align => |ty| @floatFromInt(ty.toType().abiAlignment(mod).toByteUnits(0)),
+                .lazy_size => |ty| @floatFromInt(ty.toType().abiSize(mod)),
             },
             .float => |float| switch (float.storage) {
-                inline else => |x| @as(T, @floatCast(x)),
+                inline else => |x| @floatCast(x),
             },
             else => unreachable,
         };
@@ -1875,9 +1885,9 @@ pub const Value = struct {
                 },
                 inline .u64, .i64 => |x| floatFromIntInner(x, float_ty, mod),
                 .lazy_align => |ty| if (opt_sema) |sema| {
-                    return floatFromIntInner((try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod);
+                    return floatFromIntInner((try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits(0), float_ty, mod);
                 } else {
-                    return floatFromIntInner(ty.toType().abiAlignment(mod), float_ty, mod);
+                    return floatFromIntInner(ty.toType().abiAlignment(mod).toByteUnits(0), float_ty, mod);
                 },
                 .lazy_size => |ty| if (opt_sema) |sema| {
                     return floatFromIntInner((try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod);
@@ -1892,11 +1902,11 @@ pub const Value = struct {
     fn floatFromIntInner(x: anytype, dest_ty: Type, mod: *Module) !Value {
         const target = mod.getTarget();
         const storage: InternPool.Key.Float.Storage = switch (dest_ty.floatBits(target)) {
-            16 => .{ .f16 = @as(f16, @floatFromInt(x)) },
-            32 => .{ .f32 = @as(f32, @floatFromInt(x)) },
-            64 => .{ .f64 = @as(f64, @floatFromInt(x)) },
-            80 => .{ .f80 = @as(f80, @floatFromInt(x)) },
-            128 => .{ .f128 = @as(f128, @floatFromInt(x)) },
+            16 => .{ .f16 = @floatFromInt(x) },
+            32 => .{ .f32 = @floatFromInt(x) },
+            64 => .{ .f64 = @floatFromInt(x) },
+            80 => .{ .f80 = @floatFromInt(x) },
+            128 => .{ .f128 = @floatFromInt(x) },
             else => unreachable,
         };
         return (try mod.intern(.{ .float = .{