Commit e1f3fc6ce2

Jacob Young <jacobly0@users.noreply.github.com>
2025-09-22 05:14:28
Coff2: create a new linker from scratch
1 parent d5f09f5
lib/std/array_hash_map.zig
@@ -50,7 +50,7 @@ pub fn eqlString(a: []const u8, b: []const u8) bool {
 }
 
 pub fn hashString(s: []const u8) u32 {
-    return @as(u32, @truncate(std.hash.Wyhash.hash(0, s)));
+    return @truncate(std.hash.Wyhash.hash(0, s));
 }
 
 /// Deprecated in favor of `ArrayHashMapWithAllocator` (no code changes needed)
lib/std/coff.zig
@@ -2,70 +2,9 @@ const std = @import("std.zig");
 const assert = std.debug.assert;
 const mem = std.mem;
 
-pub const CoffHeaderFlags = packed struct {
-    /// Image only, Windows CE, and Microsoft Windows NT and later.
-    /// This indicates that the file does not contain base relocations
-    /// and must therefore be loaded at its preferred base address.
-    /// If the base address is not available, the loader reports an error.
-    /// The default behavior of the linker is to strip base relocations
-    /// from executable (EXE) files.
-    RELOCS_STRIPPED: u1 = 0,
-
-    /// Image only. This indicates that the image file is valid and can be run.
-    /// If this flag is not set, it indicates a linker error.
-    EXECUTABLE_IMAGE: u1 = 0,
-
-    /// COFF line numbers have been removed. This flag is deprecated and should be zero.
-    LINE_NUMS_STRIPPED: u1 = 0,
-
-    /// COFF symbol table entries for local symbols have been removed.
-    /// This flag is deprecated and should be zero.
-    LOCAL_SYMS_STRIPPED: u1 = 0,
-
-    /// Obsolete. Aggressively trim working set.
-    /// This flag is deprecated for Windows 2000 and later and must be zero.
-    AGGRESSIVE_WS_TRIM: u1 = 0,
-
-    /// Application can handle > 2-GB addresses.
-    LARGE_ADDRESS_AWARE: u1 = 0,
-
-    /// This flag is reserved for future use.
-    RESERVED: u1 = 0,
-
-    /// Little endian: the least significant bit (LSB) precedes the
-    /// most significant bit (MSB) in memory. This flag is deprecated and should be zero.
-    BYTES_REVERSED_LO: u1 = 0,
-
-    /// Machine is based on a 32-bit-word architecture.
-    @"32BIT_MACHINE": u1 = 0,
-
-    /// Debugging information is removed from the image file.
-    DEBUG_STRIPPED: u1 = 0,
-
-    /// If the image is on removable media, fully load it and copy it to the swap file.
-    REMOVABLE_RUN_FROM_SWAP: u1 = 0,
-
-    /// If the image is on network media, fully load it and copy it to the swap file.
-    NET_RUN_FROM_SWAP: u1 = 0,
-
-    /// The image file is a system file, not a user program.
-    SYSTEM: u1 = 0,
-
-    /// The image file is a dynamic-link library (DLL).
-    /// Such files are considered executable files for almost all purposes,
-    /// although they cannot be directly run.
-    DLL: u1 = 0,
-
-    /// The file should be run only on a uniprocessor machine.
-    UP_SYSTEM_ONLY: u1 = 0,
-
-    /// Big endian: the MSB precedes the LSB in memory. This flag is deprecated and should be zero.
-    BYTES_REVERSED_HI: u1 = 0,
-};
-
-pub const CoffHeader = extern struct {
+pub const Header = extern struct {
     /// The number that identifies the type of target machine.
-    machine: MachineType,
+    machine: IMAGE.FILE.MACHINE,
 
     /// The number of sections. This indicates the size of the section table, which immediately follows the headers.
     number_of_sections: u16,
@@ -88,49 +27,110 @@ pub const CoffHeader = extern struct {
     size_of_optional_header: u16,
 
     /// The flags that indicate the attributes of the file.
-    flags: CoffHeaderFlags,
+    flags: Header.Flags,
+
+    pub const Flags = packed struct(u16) {
+        /// Image only, Windows CE, and Microsoft Windows NT and later.
+        /// This indicates that the file does not contain base relocations
+        /// and must therefore be loaded at its preferred base address.
+        /// If the base address is not available, the loader reports an error.
+        /// The default behavior of the linker is to strip base relocations
+        /// from executable (EXE) files.
+        RELOCS_STRIPPED: bool = false,
+
+        /// Image only. This indicates that the image file is valid and can be run.
+        /// If this flag is not set, it indicates a linker error.
+        EXECUTABLE_IMAGE: bool = false,
+
+        /// COFF line numbers have been removed. This flag is deprecated and should be zero.
+        LINE_NUMS_STRIPPED: bool = false,
+
+        /// COFF symbol table entries for local symbols have been removed.
+        /// This flag is deprecated and should be zero.
+        LOCAL_SYMS_STRIPPED: bool = false,
+
+        /// Obsolete. Aggressively trim working set.
+        /// This flag is deprecated for Windows 2000 and later and must be zero.
+        AGGRESSIVE_WS_TRIM: bool = false,
+
+        /// Application can handle > 2-GB addresses.
+        LARGE_ADDRESS_AWARE: bool = false,
+
+        /// This flag is reserved for future use.
+        RESERVED: bool = false,
+
+        /// Little endian: the least significant bit (LSB) precedes the
+        /// most significant bit (MSB) in memory. This flag is deprecated and should be zero.
+        BYTES_REVERSED_LO: bool = false,
+
+        /// Machine is based on a 32-bit-word architecture.
+        @"32BIT_MACHINE": bool = false,
+
+        /// Debugging information is removed from the image file.
+        DEBUG_STRIPPED: bool = false,
+
+        /// If the image is on removable media, fully load it and copy it to the swap file.
+        REMOVABLE_RUN_FROM_SWAP: bool = false,
+
+        /// If the image is on network media, fully load it and copy it to the swap file.
+        NET_RUN_FROM_SWAP: bool = false,
+
+        /// The image file is a system file, not a user program.
+        SYSTEM: bool = false,
+
+        /// The image file is a dynamic-link library (DLL).
+        /// Such files are considered executable files for almost all purposes,
+        /// although they cannot be directly run.
+        DLL: bool = false,
+
+        /// The file should be run only on a uniprocessor machine.
+        UP_SYSTEM_ONLY: bool = false,
+
+        /// Big endian: the MSB precedes the LSB in memory. This flag is deprecated and should be zero.
+        BYTES_REVERSED_HI: bool = false,
+    };
 };
 
 // OptionalHeader.magic values
 // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms680339(v=vs.85).aspx
-pub const IMAGE_NT_OPTIONAL_HDR32_MAGIC = 0x10b;
-pub const IMAGE_NT_OPTIONAL_HDR64_MAGIC = 0x20b;
+pub const IMAGE_NT_OPTIONAL_HDR32_MAGIC = @intFromEnum(OptionalHeader.Magic.PE32);
+pub const IMAGE_NT_OPTIONAL_HDR64_MAGIC = @intFromEnum(OptionalHeader.Magic.@"PE32+");
 
-pub const DllFlags = packed struct {
+pub const DllFlags = packed struct(u16) {
     _reserved_0: u5 = 0,
 
     /// Image can handle a high entropy 64-bit virtual address space.
-    HIGH_ENTROPY_VA: u1 = 0,
+    HIGH_ENTROPY_VA: bool = false,
 
     /// DLL can be relocated at load time.
-    DYNAMIC_BASE: u1 = 0,
+    DYNAMIC_BASE: bool = false,
 
     /// Code Integrity checks are enforced.
-    FORCE_INTEGRITY: u1 = 0,
+    FORCE_INTEGRITY: bool = false,
 
     /// Image is NX compatible.
-    NX_COMPAT: u1 = 0,
+    NX_COMPAT: bool = false,
 
     /// Isolation aware, but do not isolate the image.
-    NO_ISOLATION: u1 = 0,
+    NO_ISOLATION: bool = false,
 
     /// Does not use structured exception (SE) handling. No SE handler may be called in this image.
-    NO_SEH: u1 = 0,
+    NO_SEH: bool = false,
 
     /// Do not bind the image.
-    NO_BIND: u1 = 0,
+    NO_BIND: bool = false,
 
     /// Image must execute in an AppContainer.
-    APPCONTAINER: u1 = 0,
+    APPCONTAINER: bool = false,
 
     /// A WDM driver.
-    WDM_DRIVER: u1 = 0,
+    WDM_DRIVER: bool = false,
 
     /// Image supports Control Flow Guard.
-    GUARD_CF: u1 = 0,
+    GUARD_CF: bool = false,
 
     /// Terminal Server aware.
-    TERMINAL_SERVER_AWARE: u1 = 0,
+    TERMINAL_SERVER_AWARE: bool = false,
 };
 
 pub const Subsystem = enum(u16) {
@@ -180,7 +180,7 @@ pub const Subsystem = enum(u16) {
 };
 
 pub const OptionalHeader = extern struct {
-    magic: u16,
+    magic: OptionalHeader.Magic,
     major_linker_version: u8,
     minor_linker_version: u8,
     size_of_code: u32,
@@ -188,71 +188,63 @@ pub const OptionalHeader = extern struct {
     size_of_uninitialized_data: u32,
     address_of_entry_point: u32,
     base_of_code: u32,
-};
 
-pub const OptionalHeaderPE32 = extern struct {
-    magic: u16,
-    major_linker_version: u8,
-    minor_linker_version: u8,
-    size_of_code: u32,
-    size_of_initialized_data: u32,
-    size_of_uninitialized_data: u32,
-    address_of_entry_point: u32,
-    base_of_code: u32,
-    base_of_data: u32,
-    image_base: u32,
-    section_alignment: u32,
-    file_alignment: u32,
-    major_operating_system_version: u16,
-    minor_operating_system_version: u16,
-    major_image_version: u16,
-    minor_image_version: u16,
-    major_subsystem_version: u16,
-    minor_subsystem_version: u16,
-    win32_version_value: u32,
-    size_of_image: u32,
-    size_of_headers: u32,
-    checksum: u32,
-    subsystem: Subsystem,
-    dll_flags: DllFlags,
-    size_of_stack_reserve: u32,
-    size_of_stack_commit: u32,
-    size_of_heap_reserve: u32,
-    size_of_heap_commit: u32,
-    loader_flags: u32,
-    number_of_rva_and_sizes: u32,
-};
+    pub const Magic = enum(u16) {
+        PE32 = 0x10b,
+        @"PE32+" = 0x20b,
+        _,
+    };
 
-pub const OptionalHeaderPE64 = extern struct {
-    magic: u16,
-    major_linker_version: u8,
-    minor_linker_version: u8,
-    size_of_code: u32,
-    size_of_initialized_data: u32,
-    size_of_uninitialized_data: u32,
-    address_of_entry_point: u32,
-    base_of_code: u32,
-    image_base: u64,
-    section_alignment: u32,
-    file_alignment: u32,
-    major_operating_system_version: u16,
-    minor_operating_system_version: u16,
-    major_image_version: u16,
-    minor_image_version: u16,
-    major_subsystem_version: u16,
-    minor_subsystem_version: u16,
-    win32_version_value: u32,
-    size_of_image: u32,
-    size_of_headers: u32,
-    checksum: u32,
-    subsystem: Subsystem,
-    dll_flags: DllFlags,
-    size_of_stack_reserve: u64,
-    size_of_stack_commit: u64,
-    size_of_heap_reserve: u64,
-    size_of_heap_commit: u64,
-    loader_flags: u32,
-    number_of_rva_and_sizes: u32,
+    pub const PE32 = extern struct {
+        standard: OptionalHeader,
+        base_of_data: u32,
+        image_base: u32,
+        section_alignment: u32,
+        file_alignment: u32,
+        major_operating_system_version: u16,
+        minor_operating_system_version: u16,
+        major_image_version: u16,
+        minor_image_version: u16,
+        major_subsystem_version: u16,
+        minor_subsystem_version: u16,
+        win32_version_value: u32,
+        size_of_image: u32,
+        size_of_headers: u32,
+        checksum: u32,
+        subsystem: Subsystem,
+        dll_flags: DllFlags,
+        size_of_stack_reserve: u32,
+        size_of_stack_commit: u32,
+        size_of_heap_reserve: u32,
+        size_of_heap_commit: u32,
+        loader_flags: u32,
+        number_of_rva_and_sizes: u32,
+    };
+
+    pub const @"PE32+" = extern struct {
+        standard: OptionalHeader,
+        image_base: u64,
+        section_alignment: u32,
+        file_alignment: u32,
+        major_operating_system_version: u16,
+        minor_operating_system_version: u16,
+        major_image_version: u16,
+        minor_image_version: u16,
+        major_subsystem_version: u16,
+        minor_subsystem_version: u16,
+        win32_version_value: u32,
+        size_of_image: u32,
+        size_of_headers: u32,
+        checksum: u32,
+        subsystem: Subsystem,
+        dll_flags: DllFlags,
+        size_of_stack_reserve: u64,
+        size_of_stack_commit: u64,
+        size_of_heap_reserve: u64,
+        size_of_heap_commit: u64,
+        loader_flags: u32,
+        number_of_rva_and_sizes: u32,
+    };
 };
 
 pub const IMAGE_NUMBEROF_DIRECTORY_ENTRIES = 16;
@@ -319,7 +311,7 @@ pub const BaseRelocationDirectoryEntry = extern struct {
     block_size: u32,
 };
 
-pub const BaseRelocation = packed struct {
+pub const BaseRelocation = packed struct(u16) {
     /// Stored in the remaining 12 bits of the WORD, an offset from the starting address that was specified in the Page RVA field for the block.
     /// This offset specifies where the base relocation is to be applied.
     offset: u12,
@@ -447,12 +439,12 @@ pub const ImportDirectoryEntry = extern struct {
 };
 
 pub const ImportLookupEntry32 = struct {
-    pub const ByName = packed struct {
+    pub const ByName = packed struct(u32) {
         name_table_rva: u31,
         flag: u1 = 0,
     };
 
-    pub const ByOrdinal = packed struct {
+    pub const ByOrdinal = packed struct(u32) {
         ordinal_number: u16,
         unused: u15 = 0,
         flag: u1 = 1,
@@ -472,13 +464,13 @@ pub const ImportLookupEntry32 = struct {
 };
 
 pub const ImportLookupEntry64 = struct {
-    pub const ByName = packed struct {
+    pub const ByName = packed struct(u64) {
         name_table_rva: u31,
         unused: u32 = 0,
         flag: u1 = 0,
     };
 
-    pub const ByOrdinal = packed struct {
+    pub const ByOrdinal = packed struct(u64) {
         ordinal_number: u16,
         unused: u47 = 0,
         flag: u1 = 1,
@@ -519,7 +511,7 @@ pub const SectionHeader = extern struct {
     pointer_to_linenumbers: u32,
     number_of_relocations: u16,
     number_of_linenumbers: u16,
-    flags: SectionHeaderFlags,
+    flags: SectionHeader.Flags,
 
     pub fn getName(self: *align(1) const SectionHeader) ?[]const u8 {
         if (self.name[0] == '/') return null;
@@ -546,109 +538,121 @@ pub const SectionHeader = extern struct {
     }
 
     pub fn isCode(self: SectionHeader) bool {
-        return self.flags.CNT_CODE == 0b1;
+        return self.flags.CNT_CODE;
     }
 
     pub fn isComdat(self: SectionHeader) bool {
-        return self.flags.LNK_COMDAT == 0b1;
+        return self.flags.LNK_COMDAT;
     }
-};
 
-pub const SectionHeaderFlags = packed struct {
-    _reserved_0: u3 = 0,
+    pub const Flags = packed struct(u32) {
+        SCALE_INDEX: bool = false,
+
+        unused1: u2 = 0,
 
-    /// The section should not be padded to the next boundary.
-    /// This flag is obsolete and is replaced by IMAGE_SCN_ALIGN_1BYTES.
-    /// This is valid only for object files.
-    TYPE_NO_PAD: u1 = 0,
+        /// The section should not be padded to the next boundary.
+        /// This flag is obsolete and is replaced by `.ALIGN = .@"1BYTES"`.
+        /// This is valid only for object files.
+        TYPE_NO_PAD: bool = false,
 
-    _reserved_1: u1 = 0,
+        unused4: u1 = 0,
 
-    /// The section contains executable code.
-    CNT_CODE: u1 = 0,
+        /// The section contains executable code.
+        CNT_CODE: bool = false,
 
-    /// The section contains initialized data.
-    CNT_INITIALIZED_DATA: u1 = 0,
+        /// The section contains initialized data.
+        CNT_INITIALIZED_DATA: bool = false,
 
-    /// The section contains uninitialized data.
-    CNT_UNINITIALIZED_DATA: u1 = 0,
+        /// The section contains uninitialized data.
+        CNT_UNINITIALIZED_DATA: bool = false,
 
-    /// Reserved for future use.
-    LNK_OTHER: u1 = 0,
+        /// Reserved for future use.
+        LNK_OTHER: bool = false,
 
-    /// The section contains comments or other information.
-    /// The .drectve section has this type.
-    /// This is valid for object files only.
-    LNK_INFO: u1 = 0,
+        /// The section contains comments or other information.
+        /// The .drectve section has this type.
+        /// This is valid for object files only.
+        LNK_INFO: bool = false,
 
-    _reserved_2: u1 = 0,
+        unused10: u1 = 0,
 
-    /// The section will not become part of the image.
-    /// This is valid only for object files.
-    LNK_REMOVE: u1 = 0,
+        /// The section will not become part of the image.
+        /// This is valid only for object files.
+        LNK_REMOVE: bool = false,
 
-    /// The section contains COMDAT data.
-    /// For more information, see COMDAT Sections (Object Only).
-    /// This is valid only for object files.
-    LNK_COMDAT: u1 = 0,
+        /// The section contains COMDAT data.
+        /// For more information, see COMDAT Sections (Object Only).
+        /// This is valid only for object files.
+        LNK_COMDAT: bool = false,
 
-    _reserved_3: u2 = 0,
+        unused13: u2 = 0,
 
-    /// The section contains data referenced through the global pointer (GP).
-    GPREL: u1 = 0,
+        union14: packed union {
+            mask: u1,
+            /// The section contains data referenced through the global pointer (GP).
+            GPREL: bool,
+            MEM_FARDATA: bool,
+        } = .{ .mask = 0 },
 
-    /// Reserved for future use.
-    MEM_PURGEABLE: u1 = 0,
+        unused15: u1 = 0,
 
-    /// Reserved for future use.
-    MEM_16BIT: u1 = 0,
+        union16: packed union {
+            mask: u1,
+            MEM_PURGEABLE: bool,
+            MEM_16BIT: bool,
+        } = .{ .mask = 0 },
 
-    /// Reserved for future use.
-    MEM_LOCKED: u1 = 0,
+        /// Reserved for future use.
+        MEM_LOCKED: bool = false,
 
-    /// Reserved for future use.
-    MEM_PRELOAD: u1 = 0,
+        /// Reserved for future use.
+        MEM_PRELOAD: bool = false,
 
-    /// Takes on multiple values according to flags:
-    /// pub const IMAGE_SCN_ALIGN_1BYTES: u32 = 0x100000;
-    /// pub const IMAGE_SCN_ALIGN_2BYTES: u32 = 0x200000;
-    /// pub const IMAGE_SCN_ALIGN_4BYTES: u32 = 0x300000;
-    /// pub const IMAGE_SCN_ALIGN_8BYTES: u32 = 0x400000;
-    /// pub const IMAGE_SCN_ALIGN_16BYTES: u32 = 0x500000;
-    /// pub const IMAGE_SCN_ALIGN_32BYTES: u32 = 0x600000;
-    /// pub const IMAGE_SCN_ALIGN_64BYTES: u32 = 0x700000;
-    /// pub const IMAGE_SCN_ALIGN_128BYTES: u32 = 0x800000;
-    /// pub const IMAGE_SCN_ALIGN_256BYTES: u32 = 0x900000;
-    /// pub const IMAGE_SCN_ALIGN_512BYTES: u32 = 0xA00000;
-    /// pub const IMAGE_SCN_ALIGN_1024BYTES: u32 = 0xB00000;
-    /// pub const IMAGE_SCN_ALIGN_2048BYTES: u32 = 0xC00000;
-    /// pub const IMAGE_SCN_ALIGN_4096BYTES: u32 = 0xD00000;
-    /// pub const IMAGE_SCN_ALIGN_8192BYTES: u32 = 0xE00000;
-    ALIGN: u4 = 0,
+        ALIGN: SectionHeader.Flags.Align = .NONE,
 
-    /// The section contains extended relocations.
-    LNK_NRELOC_OVFL: u1 = 0,
+        /// The section contains extended relocations.
+        LNK_NRELOC_OVFL: bool = false,
 
-    /// The section can be discarded as needed.
-    MEM_DISCARDABLE: u1 = 0,
+        /// The section can be discarded as needed.
+        MEM_DISCARDABLE: bool = false,
 
-    /// The section cannot be cached.
-    MEM_NOT_CACHED: u1 = 0,
+        /// The section cannot be cached.
+        MEM_NOT_CACHED: bool = false,
 
-    /// The section is not pageable.
-    MEM_NOT_PAGED: u1 = 0,
+        /// The section is not pageable.
+        MEM_NOT_PAGED: bool = false,
 
-    /// The section can be shared in memory.
-    MEM_SHARED: u1 = 0,
+        /// The section can be shared in memory.
+        MEM_SHARED: bool = false,
 
-    /// The section can be executed as code.
-    MEM_EXECUTE: u1 = 0,
+        /// The section can be executed as code.
+        MEM_EXECUTE: bool = false,
 
-    /// The section can be read.
-    MEM_READ: u1 = 0,
+        /// The section can be read.
+        MEM_READ: bool = false,
 
-    /// The section can be written to.
-    MEM_WRITE: u1 = 0,
+        /// The section can be written to.
+        MEM_WRITE: bool = false,
+
+        pub const Align = enum(u4) {
+            NONE = 0,
+            @"1BYTES" = 1,
+            @"2BYTES" = 2,
+            @"4BYTES" = 3,
+            @"8BYTES" = 4,
+            @"16BYTES" = 5,
+            @"32BYTES" = 6,
+            @"64BYTES" = 7,
+            @"128BYTES" = 8,
+            @"256BYTES" = 9,
+            @"512BYTES" = 10,
+            @"1024BYTES" = 11,
+            @"2048BYTES" = 12,
+            @"4096BYTES" = 13,
+            @"8192BYTES" = 14,
+            _,
+        };
+    };
 };
 
 pub const Symbol = struct {
@@ -691,7 +695,7 @@ pub const SectionNumber = enum(u16) {
     _,
 };
 
-pub const SymType = packed struct {
+pub const SymType = packed struct(u16) {
     complex_type: ComplexType,
     base_type: BaseType,
 };
@@ -982,87 +986,7 @@ pub const DebugInfoDefinition = struct {
     unused_3: [2]u8,
 };
 
-pub const MachineType = enum(u16) {
-    UNKNOWN = 0x0,
-    /// Alpha AXP, 32-bit address space
-    ALPHA = 0x184,
-    /// Alpha 64, 64-bit address space
-    ALPHA64 = 0x284,
-    /// Matsushita AM33
-    AM33 = 0x1d3,
-    /// x64
-    X64 = 0x8664,
-    /// ARM little endian
-    ARM = 0x1c0,
-    /// ARM64 little endian
-    ARM64 = 0xaa64,
-    /// ARM64EC
-    ARM64EC = 0xa641,
-    /// ARM64X
-    ARM64X = 0xa64e,
-    /// ARM Thumb-2 little endian
-    ARMNT = 0x1c4,
-    /// CEE
-    CEE = 0xc0ee,
-    /// CEF
-    CEF = 0xcef,
-    /// Hybrid PE
-    CHPE_X86 = 0x3a64,
-    /// EFI byte code
-    EBC = 0xebc,
-    /// Intel 386 or later processors and compatible processors
-    I386 = 0x14c,
-    /// Intel Itanium processor family
-    IA64 = 0x200,
-    /// LoongArch32
-    LOONGARCH32 = 0x6232,
-    /// LoongArch64
-    LOONGARCH64 = 0x6264,
-    /// Mitsubishi M32R little endian
-    M32R = 0x9041,
-    /// MIPS16
-    MIPS16 = 0x266,
-    /// MIPS with FPU
-    MIPSFPU = 0x366,
-    /// MIPS16 with FPU
-    MIPSFPU16 = 0x466,
-    /// Power PC little endian
-    POWERPC = 0x1f0,
-    /// Power PC with floating point support
-    POWERPCFP = 0x1f1,
-    /// MIPS little endian
-    R3000 = 0x162,
-    /// MIPS little endian
-    R4000 = 0x166,
-    /// MIPS little endian
-    R10000 = 0x168,
-    /// RISC-V 32-bit address space
-    RISCV32 = 0x5032,
-    /// RISC-V 64-bit address space
-    RISCV64 = 0x5064,
-    /// RISC-V 128-bit address space
-    RISCV128 = 0x5128,
-    /// Hitachi SH3
-    SH3 = 0x1a2,
-    /// Hitachi SH3 DSP
-    SH3DSP = 0x1a3,
-    /// SH3E little-endian
-    SH3E = 0x1a4,
-    /// Hitachi SH4
-    SH4 = 0x1a6,
-    /// Hitachi SH5
-    SH5 = 0x1a8,
-    /// Thumb
-    THUMB = 0x1c2,
-    /// Infineon
-    TRICORE = 0x520,
-    /// MIPS little-endian WCE v2
-    WCEMIPSV2 = 0x169,
-
-    _,
-};
-
-pub const CoffError = error{
+pub const Error = error{
     InvalidPEMagic,
     InvalidPEHeader,
     InvalidMachine,
@@ -1104,7 +1028,7 @@ pub const Coff = struct {
 
         // Do some basic validation upfront
         if (is_image) {
-            const coff_header = coff.getCoffHeader();
+            const coff_header = coff.getHeader();
             if (coff_header.size_of_optional_header == 0) return error.MissingPEHeader;
         }
 
@@ -1161,31 +1085,31 @@ pub const Coff = struct {
         return self.data[start .. start + len];
     }
 
-    pub fn getCoffHeader(self: Coff) CoffHeader {
-        return @as(*align(1) const CoffHeader, @ptrCast(self.data[self.coff_header_offset..][0..@sizeOf(CoffHeader)])).*;
+    pub fn getHeader(self: Coff) Header {
+        return @as(*align(1) const Header, @ptrCast(self.data[self.coff_header_offset..][0..@sizeOf(Header)])).*;
     }
 
     pub fn getOptionalHeader(self: Coff) OptionalHeader {
         assert(self.is_image);
-        const offset = self.coff_header_offset + @sizeOf(CoffHeader);
+        const offset = self.coff_header_offset + @sizeOf(Header);
         return @as(*align(1) const OptionalHeader, @ptrCast(self.data[offset..][0..@sizeOf(OptionalHeader)])).*;
     }
 
-    pub fn getOptionalHeader32(self: Coff) OptionalHeaderPE32 {
+    pub fn getOptionalHeader32(self: Coff) OptionalHeader.PE32 {
         assert(self.is_image);
-        const offset = self.coff_header_offset + @sizeOf(CoffHeader);
-        return @as(*align(1) const OptionalHeaderPE32, @ptrCast(self.data[offset..][0..@sizeOf(OptionalHeaderPE32)])).*;
+        const offset = self.coff_header_offset + @sizeOf(Header);
+        return @as(*align(1) const OptionalHeader.PE32, @ptrCast(self.data[offset..][0..@sizeOf(OptionalHeader.PE32)])).*;
     }
 
-    pub fn getOptionalHeader64(self: Coff) OptionalHeaderPE64 {
+    pub fn getOptionalHeader64(self: Coff) OptionalHeader.@"PE32+" {
         assert(self.is_image);
-        const offset = self.coff_header_offset + @sizeOf(CoffHeader);
-        return @as(*align(1) const OptionalHeaderPE64, @ptrCast(self.data[offset..][0..@sizeOf(OptionalHeaderPE64)])).*;
+        const offset = self.coff_header_offset + @sizeOf(Header);
+        return @as(*align(1) const OptionalHeader.@"PE32+", @ptrCast(self.data[offset..][0..@sizeOf(OptionalHeader.@"PE32+")])).*;
     }
 
     pub fn getImageBase(self: Coff) u64 {
         const hdr = self.getOptionalHeader();
-        return switch (hdr.magic) {
+        return switch (@intFromEnum(hdr.magic)) {
             IMAGE_NT_OPTIONAL_HDR32_MAGIC => self.getOptionalHeader32().image_base,
             IMAGE_NT_OPTIONAL_HDR64_MAGIC => self.getOptionalHeader64().image_base,
             else => unreachable, // We assume we have validated the header already
@@ -1194,7 +1118,7 @@ pub const Coff = struct {
 
     pub fn getNumberOfDataDirectories(self: Coff) u32 {
         const hdr = self.getOptionalHeader();
-        return switch (hdr.magic) {
+        return switch (@intFromEnum(hdr.magic)) {
             IMAGE_NT_OPTIONAL_HDR32_MAGIC => self.getOptionalHeader32().number_of_rva_and_sizes,
             IMAGE_NT_OPTIONAL_HDR64_MAGIC => self.getOptionalHeader64().number_of_rva_and_sizes,
             else => unreachable, // We assume we have validated the header already
@@ -1203,17 +1127,17 @@ pub const Coff = struct {
 
     pub fn getDataDirectories(self: *const Coff) []align(1) const ImageDataDirectory {
         const hdr = self.getOptionalHeader();
-        const size: usize = switch (hdr.magic) {
-            IMAGE_NT_OPTIONAL_HDR32_MAGIC => @sizeOf(OptionalHeaderPE32),
-            IMAGE_NT_OPTIONAL_HDR64_MAGIC => @sizeOf(OptionalHeaderPE64),
+        const size: usize = switch (@intFromEnum(hdr.magic)) {
+            IMAGE_NT_OPTIONAL_HDR32_MAGIC => @sizeOf(OptionalHeader.PE32),
+            IMAGE_NT_OPTIONAL_HDR64_MAGIC => @sizeOf(OptionalHeader.@"PE32+"),
             else => unreachable, // We assume we have validated the header already
         };
-        const offset = self.coff_header_offset + @sizeOf(CoffHeader) + size;
+        const offset = self.coff_header_offset + @sizeOf(Header) + size;
         return @as([*]align(1) const ImageDataDirectory, @ptrCast(self.data[offset..]))[0..self.getNumberOfDataDirectories()];
     }
 
     pub fn getSymtab(self: *const Coff) ?Symtab {
-        const coff_header = self.getCoffHeader();
+        const coff_header = self.getHeader();
         if (coff_header.pointer_to_symbol_table == 0) return null;
 
         const offset = coff_header.pointer_to_symbol_table;
@@ -1222,7 +1146,7 @@ pub const Coff = struct {
     }
 
     pub fn getStrtab(self: *const Coff) error{InvalidStrtabSize}!?Strtab {
-        const coff_header = self.getCoffHeader();
+        const coff_header = self.getHeader();
         if (coff_header.pointer_to_symbol_table == 0) return null;
 
         const offset = coff_header.pointer_to_symbol_table + Symbol.sizeOf() * coff_header.number_of_symbols;
@@ -1238,8 +1162,8 @@ pub const Coff = struct {
     }
 
     pub fn getSectionHeaders(self: *const Coff) []align(1) const SectionHeader {
-        const coff_header = self.getCoffHeader();
-        const offset = self.coff_header_offset + @sizeOf(CoffHeader) + coff_header.size_of_optional_header;
+        const coff_header = self.getHeader();
+        const offset = self.coff_header_offset + @sizeOf(Header) + coff_header.size_of_optional_header;
         return @as([*]align(1) const SectionHeader, @ptrCast(self.data.ptr + offset))[0..coff_header.number_of_sections];
     }
 
@@ -1414,14 +1338,14 @@ pub const Strtab = struct {
 };
 
 pub const ImportHeader = extern struct {
-    sig1: MachineType,
+    sig1: IMAGE.FILE.MACHINE,
     sig2: u16,
     version: u16,
-    machine: MachineType,
+    machine: IMAGE.FILE.MACHINE,
     time_date_stamp: u32,
     size_of_data: u32,
     hint: u16,
-    types: packed struct {
+    types: packed struct(u32) {
         type: ImportType,
         name_type: ImportNameType,
         reserved: u11,
@@ -1461,119 +1385,534 @@ pub const Relocation = extern struct {
     type: u16,
 };
 
-pub const ImageRelAmd64 = enum(u16) {
-    /// The relocation is ignored.
-    absolute = 0,
-
-    /// The 64-bit VA of the relocation target.
-    addr64 = 1,
-
-    /// The 32-bit VA of the relocation target.
-    addr32 = 2,
-
-    /// The 32-bit address without an image base.
-    addr32nb = 3,
-
-    /// The 32-bit relative address from the byte following the relocation.
-    rel32 = 4,
-
-    /// The 32-bit address relative to byte distance 1 from the relocation.
-    rel32_1 = 5,
-
-    /// The 32-bit address relative to byte distance 2 from the relocation.
-    rel32_2 = 6,
-
-    /// The 32-bit address relative to byte distance 3 from the relocation.
-    rel32_3 = 7,
-
-    /// The 32-bit address relative to byte distance 4 from the relocation.
-    rel32_4 = 8,
-
-    /// The 32-bit address relative to byte distance 5 from the relocation.
-    rel32_5 = 9,
-
-    /// The 16-bit section index of the section that contains the target.
-    /// This is used to support debugging information.
-    section = 10,
-
-    /// The 32-bit offset of the target from the beginning of its section.
-    /// This is used to support debugging information and static thread local storage.
-    secrel = 11,
-
-    /// A 7-bit unsigned offset from the base of the section that contains the target.
-    secrel7 = 12,
-
-    /// CLR tokens.
-    token = 13,
-
-    /// A 32-bit signed span-dependent value emitted into the object.
-    srel32 = 14,
-
-    /// A pair that must immediately follow every span-dependent value.
-    pair = 15,
-
-    /// A 32-bit signed span-dependent value that is applied at link time.
-    sspan32 = 16,
-
-    _,
-};
-
-pub const ImageRelArm64 = enum(u16) {
-    /// The relocation is ignored.
-    absolute = 0,
-
-    /// The 32-bit VA of the target.
-    addr32 = 1,
-
-    /// The 32-bit RVA of the target.
-    addr32nb = 2,
-
-    /// The 26-bit relative displacement to the target, for B and BL instructions.
-    branch26 = 3,
-
-    /// The page base of the target, for ADRP instruction.
-    pagebase_rel21 = 4,
-
-    /// The 21-bit relative displacement to the target, for instruction ADR.
-    rel21 = 5,
-
-    /// The 12-bit page offset of the target, for instructions ADD/ADDS (immediate) with zero shift.
-    pageoffset_12a = 6,
-
-    /// The 12-bit page offset of the target, for instruction LDR (indexed, unsigned immediate).
-    pageoffset_12l = 7,
-
-    /// The 32-bit offset of the target from the beginning of its section.
-    /// This is used to support debugging information and static thread local storage.
-    secrel = 8,
-
-    /// Bit 0:11 of section offset of the target for instructions ADD/ADDS (immediate) with zero shift.
-    low12a = 9,
+pub const IMAGE = struct {
+    pub const FILE = struct {
+        /// Machine Types
+        /// The Machine field has one of the following values, which specify the CPU type.
+        /// An image file can be run only on the specified machine or on a system that emulates the specified machine.
+        pub const MACHINE = enum(u16) {
+            /// The content of this field is assumed to be applicable to any machine type
+            UNKNOWN = 0x0,
+            /// Alpha AXP, 32-bit address space
+            ALPHA = 0x184,
+            /// Alpha 64, 64-bit address space
+            ALPHA64 = 0x284,
+            /// Matsushita AM33
+            AM33 = 0x1d3,
+            /// x64
+            AMD64 = 0x8664,
+            /// ARM little endian
+            ARM = 0x1c0,
+            /// ARM64 little endian
+            ARM64 = 0xaa64,
+            /// ABI that enables interoperability between native ARM64 and emulated x64 code.
+            ARM64EC = 0xA641,
+            /// Binary format that allows both native ARM64 and ARM64EC code to coexist in the same file.
+            ARM64X = 0xA64E,
+            /// ARM Thumb-2 little endian
+            ARMNT = 0x1c4,
+            /// EFI byte code
+            EBC = 0xebc,
+            /// Intel 386 or later processors and compatible processors
+            I386 = 0x14c,
+            /// Intel Itanium processor family
+            IA64 = 0x200,
+            /// LoongArch 32-bit processor family
+            LOONGARCH32 = 0x6232,
+            /// LoongArch 64-bit processor family
+            LOONGARCH64 = 0x6264,
+            /// Mitsubishi M32R little endian
+            M32R = 0x9041,
+            /// MIPS16
+            MIPS16 = 0x266,
+            /// MIPS with FPU
+            MIPSFPU = 0x366,
+            /// MIPS16 with FPU
+            MIPSFPU16 = 0x466,
+            /// Power PC little endian
+            POWERPC = 0x1f0,
+            /// Power PC with floating point support
+            POWERPCFP = 0x1f1,
+            /// MIPS I compatible 32-bit big endian
+            R3000BE = 0x160,
+            /// MIPS I compatible 32-bit little endian
+            R3000 = 0x162,
+            /// MIPS III compatible 64-bit little endian
+            R4000 = 0x166,
+            /// MIPS IV compatible 64-bit little endian
+            R10000 = 0x168,
+            /// RISC-V 32-bit address space
+            RISCV32 = 0x5032,
+            /// RISC-V 64-bit address space
+            RISCV64 = 0x5064,
+            /// RISC-V 128-bit address space
+            RISCV128 = 0x5128,
+            /// Hitachi SH3
+            SH3 = 0x1a2,
+            /// Hitachi SH3 DSP
+            SH3DSP = 0x1a3,
+            /// Hitachi SH4
+            SH4 = 0x1a6,
+            /// Hitachi SH5
+            SH5 = 0x1a8,
+            /// Thumb
+            THUMB = 0x1c2,
+            /// MIPS little-endian WCE v2
+            WCEMIPSV2 = 0x169,
+            _,
+            /// AXP 64 (Same as Alpha 64)
+            pub const AXP64: IMAGE.FILE.MACHINE = .ALPHA64;
+        };
+    };
 
-    /// Bit 12:23 of section offset of the target, for instructions ADD/ADDS (immediate) with zero shift.
-    high12a = 10,
+    pub const REL = struct {
+        /// x64 Processors
+        /// The following relocation type indicators are defined for x64 and compatible processors.
+        pub const AMD64 = enum(u16) {
+            /// The relocation is ignored.
+            ABSOLUTE = 0x0000,
+            /// The 64-bit VA of the relocation target.
+            ADDR64 = 0x0001,
+            /// The 32-bit VA of the relocation target.
+            ADDR32 = 0x0002,
+            /// The 32-bit address without an image base (RVA).
+            ADDR32NB = 0x0003,
+            /// The 32-bit relative address from the byte following the relocation.
+            REL32 = 0x0004,
+            /// The 32-bit address relative to byte distance 1 from the relocation.
+            REL32_1 = 0x0005,
+            /// The 32-bit address relative to byte distance 2 from the relocation.
+            REL32_2 = 0x0006,
+            /// The 32-bit address relative to byte distance 3 from the relocation.
+            REL32_3 = 0x0007,
+            /// The 32-bit address relative to byte distance 4 from the relocation.
+            REL32_4 = 0x0008,
+            /// The 32-bit address relative to byte distance 5 from the relocation.
+            REL32_5 = 0x0009,
+            /// The 16-bit section index of the section that contains the target.
+            /// This is used to support debugging information.
+            SECTION = 0x000A,
+            /// The 32-bit offset of the target from the beginning of its section.
+            /// This is used to support debugging information and static thread local storage.
+            SECREL = 0x000B,
+            /// A 7-bit unsigned offset from the base of the section that contains the target.
+            SECREL7 = 0x000C,
+            /// CLR tokens.
+            TOKEN = 0x000D,
+            /// A 32-bit signed span-dependent value emitted into the object.
+            SREL32 = 0x000E,
+            /// A pair that must immediately follow every span-dependent value.
+            PAIR = 0x000F,
+            /// A 32-bit signed span-dependent value that is applied at link time.
+            SSPAN32 = 0x0010,
+            _,
+        };
 
-    /// Bit 0:11 of section offset of the target, for instruction LDR (indexed, unsigned immediate).
-    low12l = 11,
+        /// ARM Processors
+        /// The following relocation type indicators are defined for ARM processors.
+        pub const ARM = enum(u16) {
+            /// The relocation is ignored.
+            ABSOLUTE = 0x0000,
+            /// The 32-bit VA of the target.
+            ADDR32 = 0x0001,
+            /// The 32-bit RVA of the target.
+            ADDR32NB = 0x0002,
+            /// The 24-bit relative displacement to the target.
+            BRANCH24 = 0x0003,
+            /// The reference to a subroutine call.
+            /// The reference consists of two 16-bit instructions with 11-bit offsets.
+            BRANCH11 = 0x0004,
+            /// The 32-bit relative address from the byte following the relocation.
+            REL32 = 0x000A,
+            /// The 16-bit section index of the section that contains the target.
+            /// This is used to support debugging information.
+            SECTION = 0x000E,
+            /// The 32-bit offset of the target from the beginning of its section.
+            /// This is used to support debugging information and static thread local storage.
+            SECREL = 0x000F,
+            /// The 32-bit VA of the target.
+            /// This relocation is applied using a MOVW instruction for the low 16 bits followed by a MOVT for the high 16 bits.
+            MOV32 = 0x0010,
+            /// The 32-bit VA of the target.
+            /// This relocation is applied using a MOVW instruction for the low 16 bits followed by a MOVT for the high 16 bits.
+            THUMB_MOV32 = 0x0011,
+            /// The instruction is fixed up with the 21-bit relative displacement to the 2-byte aligned target.
+            /// The least significant bit of the displacement is always zero and is not stored.
+            /// This relocation corresponds to a Thumb-2 32-bit conditional B instruction.
+            THUMB_BRANCH20 = 0x0012,
+            Unused = 0x0013,
+            /// The instruction is fixed up with the 25-bit relative displacement to the 2-byte aligned target.
+            /// The least significant bit of the displacement is zero and is not stored.This relocation corresponds to a Thumb-2 B instruction.
+            THUMB_BRANCH24 = 0x0014,
+            /// The instruction is fixed up with the 25-bit relative displacement to the 4-byte aligned target.
+            /// The low 2 bits of the displacement are zero and are not stored.
+            /// This relocation corresponds to a Thumb-2 BLX instruction.
+            THUMB_BLX23 = 0x0015,
+            /// The relocation is valid only when it immediately follows a ARM_REFHI or THUMB_REFHI.
+            /// Its SymbolTableIndex contains a displacement and not an index into the symbol table.
+            PAIR = 0x0016,
+            _,
+        };
 
-    /// CLR token.
-    token = 12,
+        /// ARM64 Processors
+        /// The following relocation type indicators are defined for ARM64 processors.
+        pub const ARM64 = enum(u16) {
+            /// The relocation is ignored.
+            ABSOLUTE = 0x0000,
+            /// The 32-bit VA of the target.
+            ADDR32 = 0x0001,
+            /// The 32-bit RVA of the target.
+            ADDR32NB = 0x0002,
+            /// The 26-bit relative displacement to the target, for B and BL instructions.
+            BRANCH26 = 0x0003,
+            /// The page base of the target, for ADRP instruction.
+            PAGEBASE_REL21 = 0x0004,
+            /// The 12-bit relative displacement to the target, for instruction ADR
+            REL21 = 0x0005,
+            /// The 12-bit page offset of the target, for instructions ADD/ADDS (immediate) with zero shift.
+            PAGEOFFSET_12A = 0x0006,
+            /// The 12-bit page offset of the target, for instruction LDR (indexed, unsigned immediate).
+            PAGEOFFSET_12L = 0x0007,
+            /// The 32-bit offset of the target from the beginning of its section.
+            /// This is used to support debugging information and static thread local storage.
+            SECREL = 0x0008,
+            /// Bit 0:11 of section offset of the target, for instructions ADD/ADDS (immediate) with zero shift.
+            SECREL_LOW12A = 0x0009,
+            /// Bit 12:23 of section offset of the target, for instructions ADD/ADDS (immediate) with zero shift.
+            SECREL_HIGH12A = 0x000A,
+            /// Bit 0:11 of section offset of the target, for instruction LDR (indexed, unsigned immediate).
+            SECREL_LOW12L = 0x000B,
+            /// CLR token.
+            TOKEN = 0x000C,
+            /// The 16-bit section index of the section that contains the target.
+            /// This is used to support debugging information.
+            SECTION = 0x000D,
+            /// The 64-bit VA of the relocation target.
+            ADDR64 = 0x000E,
+            /// The 19-bit offset to the relocation target, for conditional B instruction.
+            BRANCH19 = 0x000F,
+            /// The 14-bit offset to the relocation target, for instructions TBZ and TBNZ.
+            BRANCH14 = 0x0010,
+            /// The 32-bit relative address from the byte following the relocation.
+            REL32 = 0x0011,
+            _,
+        };
 
-    /// The 16-bit section index of the section that contains the target.
-    /// This is used to support debugging information.
-    section = 13,
+        /// Hitachi SuperH Processors
+        /// The following relocation type indicators are defined for SH3 and SH4 processors.
+        /// SH5-specific relocations are noted as SHM (SH Media).
+        pub const SH = enum(u16) {
+            /// The relocation is ignored.
+            @"3_ABSOLUTE" = 0x0000,
+            /// A reference to the 16-bit location that contains the VA of the target symbol.
+            @"3_DIRECT16" = 0x0001,
+            /// The 32-bit VA of the target symbol.
+            @"3_DIRECT32" = 0x0002,
+            /// A reference to the 8-bit location that contains the VA of the target symbol.
+            @"3_DIRECT8" = 0x0003,
+            /// A reference to the 8-bit instruction that contains the effective 16-bit VA of the target symbol.
+            @"3_DIRECT8_WORD" = 0x0004,
+            /// A reference to the 8-bit instruction that contains the effective 32-bit VA of the target symbol.
+            @"3_DIRECT8_LONG" = 0x0005,
+            /// A reference to the 8-bit location whose low 4 bits contain the VA of the target symbol.
+            @"3_DIRECT4" = 0x0006,
+            /// A reference to the 8-bit instruction whose low 4 bits contain the effective 16-bit VA of the target symbol.
+            @"3_DIRECT4_WORD" = 0x0007,
+            /// A reference to the 8-bit instruction whose low 4 bits contain the effective 32-bit VA of the target symbol.
+            @"3_DIRECT4_LONG" = 0x0008,
+            /// A reference to the 8-bit instruction that contains the effective 16-bit relative offset of the target symbol.
+            @"3_PCREL8_WORD" = 0x0009,
+            /// A reference to the 8-bit instruction that contains the effective 32-bit relative offset of the target symbol.
+            @"3_PCREL8_LONG" = 0x000A,
+            /// A reference to the 16-bit instruction whose low 12 bits contain the effective 16-bit relative offset of the target symbol.
+            @"3_PCREL12_WORD" = 0x000B,
+            /// A reference to a 32-bit location that is the VA of the section that contains the target symbol.
+            @"3_STARTOF_SECTION" = 0x000C,
+            /// A reference to the 32-bit location that is the size of the section that contains the target symbol.
+            @"3_SIZEOF_SECTION" = 0x000D,
+            /// The 16-bit section index of the section that contains the target.
+            /// This is used to support debugging information.
+            @"3_SECTION" = 0x000E,
+            /// The 32-bit offset of the target from the beginning of its section.
+            /// This is used to support debugging information and static thread local storage.
+            @"3_SECREL" = 0x000F,
+            /// The 32-bit RVA of the target symbol.
+            @"3_DIRECT32_NB" = 0x0010,
+            /// GP relative.
+            @"3_GPREL4_LONG" = 0x0011,
+            /// CLR token.
+            @"3_TOKEN" = 0x0012,
+            /// The offset from the current instruction in longwords.
+            /// If the NOMODE bit is not set, insert the inverse of the low bit at bit 32 to select PTA or PTB.
+            M_PCRELPT = 0x0013,
+            /// The low 16 bits of the 32-bit address.
+            M_REFLO = 0x0014,
+            /// The high 16 bits of the 32-bit address.
+            M_REFHALF = 0x0015,
+            /// The low 16 bits of the relative address.
+            M_RELLO = 0x0016,
+            /// The high 16 bits of the relative address.
+            M_RELHALF = 0x0017,
+            /// The relocation is valid only when it immediately follows a REFHALF, RELHALF, or RELLO relocation.
+            /// The SymbolTableIndex field of the relocation contains a displacement and not an index into the symbol table.
+            M_PAIR = 0x0018,
+            /// The relocation ignores section mode.
+            M_NOMODE = 0x8000,
+            _,
+        };
 
-    /// The 64-bit VA of the relocation target.
-    addr64 = 14,
+        /// IBM PowerPC Processors
+        /// The following relocation type indicators are defined for PowerPC processors.
+        pub const PPC = enum(u16) {
+            /// The relocation is ignored.
+            ABSOLUTE = 0x0000,
+            /// The 64-bit VA of the target.
+            ADDR64 = 0x0001,
+            /// The 32-bit VA of the target.
+            ADDR32 = 0x0002,
+            /// The low 24 bits of the VA of the target.
+            /// This is valid only when the target symbol is absolute and can be sign-extended to its original value.
+            ADDR24 = 0x0003,
+            /// The low 16 bits of the target's VA.
+            ADDR16 = 0x0004,
+            /// The low 14 bits of the target's VA.
+            /// This is valid only when the target symbol is absolute and can be sign-extended to its original value.
+            ADDR14 = 0x0005,
+            /// A 24-bit PC-relative offset to the symbol's location.
+            REL24 = 0x0006,
+            /// A 14-bit PC-relative offset to the symbol's location.
+            REL14 = 0x0007,
+            /// The 32-bit RVA of the target.
+            ADDR32NB = 0x000A,
+            /// The 32-bit offset of the target from the beginning of its section.
+            /// This is used to support debugging information and static thread local storage.
+            SECREL = 0x000B,
+            /// The 16-bit section index of the section that contains the target.
+            /// This is used to support debugging information.
+            SECTION = 0x000C,
+            /// The 16-bit offset of the target from the beginning of its section.
+            /// This is used to support debugging information and static thread local storage.
+            SECREL16 = 0x000F,
+            /// The high 16 bits of the target's 32-bit VA.
+            /// This is used for the first instruction in a two-instruction sequence that loads a full address.
+            /// This relocation must be immediately followed by a PAIR relocation whose SymbolTableIndex contains a signed 16-bit displacement that is added to the upper 16 bits that was taken from the location that is being relocated.
+            REFHI = 0x0010,
+            /// The low 16 bits of the target's VA.
+            REFLO = 0x0011,
+            /// A relocation that is valid only when it immediately follows a REFHI or SECRELHI relocation.
+            /// Its SymbolTableIndex contains a displacement and not an index into the symbol table.
+            PAIR = 0x0012,
+            /// The low 16 bits of the 32-bit offset of the target from the beginning of its section.
+            SECRELLO = 0x0013,
+            /// The 16-bit signed displacement of the target relative to the GP register.
+            GPREL = 0x0015,
+            /// The CLR token.
+            TOKEN = 0x0016,
+            _,
+        };
 
-    /// The 19-bit offset to the relocation target, for conditional B instruction.
-    branch19 = 15,
+        /// Intel 386 Processors
+        /// The following relocation type indicators are defined for Intel 386 and compatible processors.
+        pub const I386 = enum(u16) {
+            /// The relocation is ignored.
+            ABSOLUTE = 0x0000,
+            /// Not supported.
+            DIR16 = 0x0001,
+            /// Not supported.
+            REL16 = 0x0002,
+            /// The target's 32-bit VA.
+            DIR32 = 0x0006,
+            /// The target's 32-bit RVA.
+            DIR32NB = 0x0007,
+            /// Not supported.
+            SEG12 = 0x0009,
+            /// The 16-bit section index of the section that contains the target.
+            /// This is used to support debugging information.
+            SECTION = 0x000A,
+            /// The 32-bit offset of the target from the beginning of its section.
+            /// This is used to support debugging information and static thread local storage.
+            SECREL = 0x000B,
+            /// The CLR token.
+            TOKEN = 0x000C,
+            /// A 7-bit offset from the base of the section that contains the target.
+            SECREL7 = 0x000D,
+            /// The 32-bit relative displacement to the target.
+            /// This supports the x86 relative branch and call instructions.
+            REL32 = 0x0014,
+            _,
+        };
 
-    /// The 14-bit offset to the relocation target, for instructions TBZ and TBNZ.
-    branch14 = 16,
+        /// Intel Itanium Processor Family (IPF)
+        /// The following relocation type indicators are defined for the Intel Itanium processor family and compatible processors.
+        /// Note that relocations on instructions use the bundle's offset and slot number for the relocation offset.
+        pub const IA64 = enum(u16) {
+            /// The relocation is ignored.
+            ABSOLUTE = 0x0000,
+            /// The instruction relocation can be followed by an ADDEND relocation whose value is added to the target address before it is inserted into the specified slot in the IMM14 bundle.
+            /// The relocation target must be absolute or the image must be fixed.
+            IMM14 = 0x0001,
+            /// The instruction relocation can be followed by an ADDEND relocation whose value is added to the target address before it is inserted into the specified slot in the IMM22 bundle.
+            /// The relocation target must be absolute or the image must be fixed.
+            IMM22 = 0x0002,
+            /// The slot number of this relocation must be one (1).
+            /// The relocation can be followed by an ADDEND relocation whose value is added to the target address before it is stored in all three slots of the IMM64 bundle.
+            IMM64 = 0x0003,
+            /// The target's 32-bit VA.
+            /// This is supported only for /LARGEADDRESSAWARE:NO images.
+            DIR32 = 0x0004,
+            /// The target's 64-bit VA.
+            DIR64 = 0x0005,
+            /// The instruction is fixed up with the 25-bit relative displacement to the 16-bit aligned target.
+            /// The low 4 bits of the displacement are zero and are not stored.
+            PCREL21B = 0x0006,
+            /// The instruction is fixed up with the 25-bit relative displacement to the 16-bit aligned target.
+            /// The low 4 bits of the displacement, which are zero, are not stored.
+            PCREL21M = 0x0007,
+            /// The LSBs of this relocation's offset must contain the slot number whereas the rest is the bundle address.
+            /// The bundle is fixed up with the 25-bit relative displacement to the 16-bit aligned target.
+            /// The low 4 bits of the displacement are zero and are not stored.
+            PCREL21F = 0x0008,
+            /// The instruction relocation can be followed by an ADDEND relocation whose value is added to the target address and then a 22-bit GP-relative offset that is calculated and applied to the GPREL22 bundle.
+            GPREL22 = 0x0009,
+            /// The instruction is fixed up with the 22-bit GP-relative offset to the target symbol's literal table entry.
+            /// The linker creates this literal table entry based on this relocation and the ADDEND relocation that might follow.
+            LTOFF22 = 0x000A,
+            /// The 16-bit section index of the section contains the target.
+            /// This is used to support debugging information.
+            SECTION = 0x000B,
+            /// The instruction is fixed up with the 22-bit offset of the target from the beginning of its section.
+            /// This relocation can be followed immediately by an ADDEND relocation, whose Value field contains the 32-bit unsigned offset of the target from the beginning of the section.
+            SECREL22 = 0x000C,
+            /// The slot number for this relocation must be one (1).
+            /// The instruction is fixed up with the 64-bit offset of the target from the beginning of its section.
+            /// This relocation can be followed immediately by an ADDEND relocation whose Value field contains the 32-bit unsigned offset of the target from the beginning of the section.
+            SECREL64I = 0x000D,
+            /// The address of data to be fixed up with the 32-bit offset of the target from the beginning of its section.
+            SECREL32 = 0x000E,
+            /// The target's 32-bit RVA.
+            DIR32NB = 0x0010,
+            /// This is applied to a signed 14-bit immediate that contains the difference between two relocatable targets.
+            /// This is a declarative field for the linker that indicates that the compiler has already emitted this value.
+            SREL14 = 0x0011,
+            /// This is applied to a signed 22-bit immediate that contains the difference between two relocatable targets.
+            /// This is a declarative field for the linker that indicates that the compiler has already emitted this value.
+            SREL22 = 0x0012,
+            /// This is applied to a signed 32-bit immediate that contains the difference between two relocatable values.
+            /// This is a declarative field for the linker that indicates that the compiler has already emitted this value.
+            SREL32 = 0x0013,
+            /// This is applied to an unsigned 32-bit immediate that contains the difference between two relocatable values.
+            /// This is a declarative field for the linker that indicates that the compiler has already emitted this value.
+            UREL32 = 0x0014,
+            /// A 60-bit PC-relative fixup that always stays as a BRL instruction of an MLX bundle.
+            PCREL60X = 0x0015,
+            /// A 60-bit PC-relative fixup.
+            /// If the target displacement fits in a signed 25-bit field, convert the entire bundle to an MBB bundle with NOP.B in slot 1 and a 25-bit BR instruction (with the 4 lowest bits all zero and dropped) in slot 2.
+            PCREL60B = 0x0016,
+            /// A 60-bit PC-relative fixup.
+            /// If the target displacement fits in a signed 25-bit field, convert the entire bundle to an MFB bundle with NOP.F in slot 1 and a 25-bit (4 lowest bits all zero and dropped) BR instruction in slot 2.
+            PCREL60F = 0x0017,
+            /// A 60-bit PC-relative fixup.
+            /// If the target displacement fits in a signed 25-bit field, convert the entire bundle to an MIB bundle with NOP.I in slot 1 and a 25-bit (4 lowest bits all zero and dropped) BR instruction in slot 2.
+            PCREL60I = 0x0018,
+            /// A 60-bit PC-relative fixup.
+            /// If the target displacement fits in a signed 25-bit field, convert the entire bundle to an MMB bundle with NOP.M in slot 1 and a 25-bit (4 lowest bits all zero and dropped) BR instruction in slot 2.
+            PCREL60M = 0x0019,
+            /// A 64-bit GP-relative fixup.
+            IMMGPREL64 = 0x001a,
+            /// A CLR token.
+            TOKEN = 0x001b,
+            /// A 32-bit GP-relative fixup.
+            GPREL32 = 0x001c,
+            /// The relocation is valid only when it immediately follows one of the following relocations: IMM14, IMM22, IMM64, GPREL22, LTOFF22, LTOFF64, SECREL22, SECREL64I, or SECREL32.
+            /// Its value contains the addend to apply to instructions within a bundle, not for data.
+            ADDEND = 0x001F,
+            _,
+        };
 
-    /// The 32-bit relative address from the byte following the relocation.
-    rel32 = 17,
+        /// MIPS Processors
+        /// The following relocation type indicators are defined for MIPS processors.
+        pub const MIPS = enum(u16) {
+            /// The relocation is ignored.
+            ABSOLUTE = 0x0000,
+            /// The high 16 bits of the target's 32-bit VA.
+            REFHALF = 0x0001,
+            /// The target's 32-bit VA.
+            REFWORD = 0x0002,
+            /// The low 26 bits of the target's VA.
+            /// This supports the MIPS J and JAL instructions.
+            JMPADDR = 0x0003,
+            /// The high 16 bits of the target's 32-bit VA.
+            /// This is used for the first instruction in a two-instruction sequence that loads a full address.
+            /// This relocation must be immediately followed by a PAIR relocation whose SymbolTableIndex contains a signed 16-bit displacement that is added to the upper 16 bits that are taken from the location that is being relocated.
+            REFHI = 0x0004,
+            /// The low 16 bits of the target's VA.
+            REFLO = 0x0005,
+            /// A 16-bit signed displacement of the target relative to the GP register.
+            GPREL = 0x0006,
+            /// The same as IMAGE_REL_MIPS_GPREL.
+            LITERAL = 0x0007,
+            /// The 16-bit section index of the section contains the target.
+            /// This is used to support debugging information.
+            SECTION = 0x000A,
+            /// The 32-bit offset of the target from the beginning of its section.
+            /// This is used to support debugging information and static thread local storage.
+            SECREL = 0x000B,
+            /// The low 16 bits of the 32-bit offset of the target from the beginning of its section.
+            SECRELLO = 0x000C,
+            /// The high 16 bits of the 32-bit offset of the target from the beginning of its section.
+            /// An IMAGE_REL_MIPS_PAIR relocation must immediately follow this one.
+            /// The SymbolTableIndex of the PAIR relocation contains a signed 16-bit displacement that is added to the upper 16 bits that are taken from the location that is being relocated.
+            SECRELHI = 0x000D,
+            /// The low 26 bits of the target's VA.
+            /// This supports the MIPS16 JAL instruction.
+            JMPADDR16 = 0x0010,
+            /// The target's 32-bit RVA.
+            REFWORDNB = 0x0022,
+            /// The relocation is valid only when it immediately follows a REFHI or SECRELHI relocation.
+            /// Its SymbolTableIndex contains a displacement and not an index into the symbol table.
+            PAIR = 0x0025,
+            _,
+        };
 
-    _,
+        /// Mitsubishi M32R
+        /// The following relocation type indicators are defined for the Mitsubishi M32R processors.
+        pub const M32R = enum(u16) {
+            /// The relocation is ignored.
+            ABSOLUTE = 0x0000,
+            /// The target's 32-bit VA.
+            ADDR32 = 0x0001,
+            /// The target's 32-bit RVA.
+            ADDR32NB = 0x0002,
+            /// The target's 24-bit VA.
+            ADDR24 = 0x0003,
+            /// The target's 16-bit offset from the GP register.
+            GPREL16 = 0x0004,
+            /// The target's 24-bit offset from the program counter (PC), shifted left by 2 bits and sign-extended
+            PCREL24 = 0x0005,
+            /// The target's 16-bit offset from the PC, shifted left by 2 bits and sign-extended
+            PCREL16 = 0x0006,
+            /// The target's 8-bit offset from the PC, shifted left by 2 bits and sign-extended
+            PCREL8 = 0x0007,
+            /// The 16 MSBs of the target VA.
+            REFHALF = 0x0008,
+            /// The 16 MSBs of the target VA, adjusted for LSB sign extension.
+            /// This is used for the first instruction in a two-instruction sequence that loads a full 32-bit address.
+            /// This relocation must be immediately followed by a PAIR relocation whose SymbolTableIndex contains a signed 16-bit displacement that is added to the upper 16 bits that are taken from the location that is being relocated.
+            REFHI = 0x0009,
+            /// The 16 LSBs of the target VA.
+            REFLO = 0x000A,
+            /// The relocation must follow the REFHI relocation.
+            /// Its SymbolTableIndex contains a displacement and not an index into the symbol table.
+            PAIR = 0x000B,
+            /// The 16-bit section index of the section that contains the target.
+            /// This is used to support debugging information.
+            SECTION = 0x000C,
+            /// The 32-bit offset of the target from the beginning of its section.
+            /// This is used to support debugging information and static thread local storage.
+            SECREL = 0x000D,
+            /// The CLR token.
+            TOKEN = 0x000E,
+            _,
+        };
+    };
 };
lib/std/heap.zig
@@ -78,13 +78,15 @@ pub fn defaultQueryPageSize() usize {
     };
     var size = global.cached_result.load(.unordered);
     if (size > 0) return size;
-    size = switch (builtin.os.tag) {
-        .linux => if (builtin.link_libc) @intCast(std.c.sysconf(@intFromEnum(std.c._SC.PAGESIZE))) else std.os.linux.getauxval(std.elf.AT_PAGESZ),
-        .driverkit, .ios, .macos, .tvos, .visionos, .watchos => blk: {
+    size = size: switch (builtin.os.tag) {
+        .linux => if (builtin.link_libc)
+            @max(std.c.sysconf(@intFromEnum(std.c._SC.PAGESIZE)), 0)
+        else
+            std.os.linux.getauxval(std.elf.AT_PAGESZ),
+        .driverkit, .ios, .macos, .tvos, .visionos, .watchos => {
             const task_port = std.c.mach_task_self();
             // mach_task_self may fail "if there are any resource failures or other errors".
-            if (task_port == std.c.TASK.NULL)
-                break :blk 0;
+            if (task_port == std.c.TASK.NULL) break :size 0;
             var info_count = std.c.TASK.VM.INFO_COUNT;
             var vm_info: std.c.task_vm_info_data_t = undefined;
             vm_info.page_size = 0;
@@ -94,21 +96,28 @@ pub fn defaultQueryPageSize() usize {
                 @as(std.c.task_info_t, @ptrCast(&vm_info)),
                 &info_count,
             );
-            assert(vm_info.page_size != 0);
-            break :blk @intCast(vm_info.page_size);
+            break :size @intCast(vm_info.page_size);
         },
-        .windows => blk: {
-            var info: std.os.windows.SYSTEM_INFO = undefined;
-            std.os.windows.kernel32.GetSystemInfo(&info);
-            break :blk info.dwPageSize;
+        .windows => {
+            var sbi: windows.SYSTEM_BASIC_INFORMATION = undefined;
+            switch (windows.ntdll.NtQuerySystemInformation(
+                .SystemBasicInformation,
+                &sbi,
+                @sizeOf(windows.SYSTEM_BASIC_INFORMATION),
+                null,
+            )) {
+                .SUCCESS => break :size sbi.PageSize,
+                else => break :size 0,
+            }
         },
         else => if (builtin.link_libc)
-            @intCast(std.c.sysconf(@intFromEnum(std.c._SC.PAGESIZE)))
+            @max(std.c.sysconf(@intFromEnum(std.c._SC.PAGESIZE)), 0)
         else if (builtin.os.tag == .freestanding or builtin.os.tag == .other)
             @compileError("unsupported target: freestanding/other")
         else
             @compileError("pageSize on " ++ @tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " is not supported without linking libc, using the default implementation"),
     };
+    if (size == 0) size = page_size_max;
 
     assert(size >= page_size_min);
     assert(size <= page_size_max);
lib/std/Target.zig
@@ -1082,7 +1082,7 @@ pub fn toElfMachine(target: *const Target) std.elf.EM {
     };
 }
 
-pub fn toCoffMachine(target: *const Target) std.coff.MachineType {
+pub fn toCoffMachine(target: *const Target) std.coff.IMAGE.FILE.MACHINE {
     return switch (target.cpu.arch) {
         .arm => .ARM,
         .thumb => .ARMNT,
@@ -1092,7 +1092,7 @@ pub fn toCoffMachine(target: *const Target) std.coff.MachineType {
         .riscv32 => .RISCV32,
         .riscv64 => .RISCV64,
         .x86 => .I386,
-        .x86_64 => .X64,
+        .x86_64 => .AMD64,
 
         .amdgcn,
         .arc,
src/codegen/x86_64/Emit.zig
@@ -89,6 +89,7 @@ pub fn emitMir(emit: *Emit) Error!void {
             }
             var reloc_info_buf: [2]RelocInfo = undefined;
             var reloc_info_index: usize = 0;
+            const ip = &emit.pt.zcu.intern_pool;
             while (lowered_relocs.len > 0 and
                 lowered_relocs[0].lowered_inst_index == lowered_index) : ({
                 lowered_relocs = lowered_relocs[1..];
@@ -114,7 +115,6 @@ pub fn emitMir(emit: *Emit) Error!void {
                                 return error.EmitFail;
                             },
                         };
-                        const ip = &emit.pt.zcu.intern_pool;
                         break :target switch (ip.getNav(nav).status) {
                             .unresolved => unreachable,
                             .type_resolved => |type_resolved| .{
@@ -175,6 +175,8 @@ pub fn emitMir(emit: *Emit) Error!void {
                                 coff_file.getAtom(atom).getSymbolIndex().?
                             else |err|
                                 return emit.fail("{s} creating lazy symbol", .{@errorName(err)})
+                        else if (emit.bin_file.cast(.coff2)) |elf|
+                            @intFromEnum(try elf.lazySymbol(lazy_sym))
                         else
                             return emit.fail("lazy symbols unimplemented for {s}", .{@tagName(emit.bin_file.tag)}),
                         .is_extern = false,
@@ -190,8 +192,13 @@ pub fn emitMir(emit: *Emit) Error!void {
                             try macho_file.getGlobalSymbol(extern_func.toSlice(&emit.lower.mir).?, null)
                         else if (emit.bin_file.cast(.coff)) |coff_file|
                             try coff_file.getGlobalSymbol(extern_func.toSlice(&emit.lower.mir).?, "compiler_rt")
-                        else
-                            return emit.fail("external symbol unimplemented for {s}", .{@tagName(emit.bin_file.tag)}),
+                        else if (emit.bin_file.cast(.coff2)) |coff| @intFromEnum(try coff.globalSymbol(
+                            extern_func.toSlice(&emit.lower.mir).?,
+                            switch (comp.compiler_rt_strat) {
+                                .none, .lib, .obj, .zcu => null,
+                                .dyn_lib => "compiler_rt",
+                            },
+                        )) else return emit.fail("external symbol unimplemented for {s}", .{@tagName(emit.bin_file.tag)}),
                         .is_extern = true,
                         .type = .symbol,
                     },
@@ -314,6 +321,18 @@ pub fn emitMir(emit: *Emit) Error!void {
                             }, emit.lower.target), reloc_info),
                             else => unreachable,
                         }
+                    } else if (emit.bin_file.cast(.coff2)) |_| {
+                        switch (lowered_inst.encoding.mnemonic) {
+                            .lea => try emit.encodeInst(try .new(.none, .lea, &.{
+                                lowered_inst.ops[0],
+                                .{ .mem = .initRip(.none, 0) },
+                            }, emit.lower.target), reloc_info),
+                            .mov => try emit.encodeInst(try .new(.none, .mov, &.{
+                                lowered_inst.ops[0],
+                                .{ .mem = .initRip(lowered_inst.ops[reloc.op_index].mem.sib.ptr_size, 0) },
+                            }, emit.lower.target), reloc_info),
+                            else => unreachable,
+                        }
                     } else return emit.fail("TODO implement relocs for {s}", .{
                         @tagName(emit.bin_file.tag),
                     });
@@ -683,7 +702,7 @@ pub fn emitMir(emit: *Emit) Error!void {
                 table_reloc.source_offset,
                 @enumFromInt(emit.atom_index),
                 @as(i64, table_offset) + table_reloc.target_offset,
-                .{ .x86_64 = .@"32" },
+                .{ .X86_64 = .@"32" },
             );
             for (emit.lower.mir.table) |entry| {
                 try elf.addReloc(
@@ -691,7 +710,7 @@ pub fn emitMir(emit: *Emit) Error!void {
                     table_offset,
                     @enumFromInt(emit.atom_index),
                     emit.code_offset_mapping.items[entry],
-                    .{ .x86_64 = .@"64" },
+                    .{ .X86_64 = .@"64" },
                 );
                 table_offset += ptr_size;
             }
@@ -800,7 +819,7 @@ fn encodeInst(emit: *Emit, lowered_inst: Instruction, reloc_info: []const RelocI
             end_offset - 4,
             @enumFromInt(reloc.target.index),
             reloc.off,
-            .{ .x86_64 = .@"32" },
+            .{ .X86_64 = .@"32" },
         ) else if (emit.bin_file.cast(.coff)) |coff_file| {
             const atom_index = coff_file.getAtomIndexForSymbol(
                 .{ .sym_index = emit.atom_index, .file = null },
@@ -816,7 +835,13 @@ fn encodeInst(emit: *Emit, lowered_inst: Instruction, reloc_info: []const RelocI
                 .pcrel = true,
                 .length = 2,
             });
-        } else unreachable,
+        } else if (emit.bin_file.cast(.coff2)) |coff| try coff.addReloc(
+            @enumFromInt(emit.atom_index),
+            end_offset - 4,
+            @enumFromInt(reloc.target.index),
+            reloc.off,
+            .{ .AMD64 = .REL32 },
+        ) else unreachable,
         .branch => if (emit.bin_file.cast(.elf)) |elf_file| {
             const zo = elf_file.zigObjectPtr().?;
             const atom = zo.symbol(emit.atom_index).atom(elf_file).?;
@@ -831,7 +856,7 @@ fn encodeInst(emit: *Emit, lowered_inst: Instruction, reloc_info: []const RelocI
             end_offset - 4,
             @enumFromInt(reloc.target.index),
             reloc.off - 4,
-            .{ .x86_64 = .PC32 },
+            .{ .X86_64 = .PC32 },
         ) else if (emit.bin_file.cast(.macho)) |macho_file| {
             const zo = macho_file.getZigObject().?;
             const atom = zo.symbols.items[emit.atom_index].getAtom(macho_file).?;
@@ -863,7 +888,13 @@ fn encodeInst(emit: *Emit, lowered_inst: Instruction, reloc_info: []const RelocI
                 .pcrel = true,
                 .length = 2,
             });
-        } else return emit.fail("TODO implement {s} reloc for {s}", .{
+        } else if (emit.bin_file.cast(.coff2)) |coff| try coff.addReloc(
+            @enumFromInt(emit.atom_index),
+            end_offset - 4,
+            @enumFromInt(reloc.target.index),
+            reloc.off,
+            .{ .AMD64 = .REL32 },
+        ) else return emit.fail("TODO implement {s} reloc for {s}", .{
             @tagName(reloc.target.type), @tagName(emit.bin_file.tag),
         }),
         .tls => if (emit.bin_file.cast(.elf)) |elf_file| {
@@ -892,7 +923,7 @@ fn encodeInst(emit: *Emit, lowered_inst: Instruction, reloc_info: []const RelocI
             end_offset - 4,
             @enumFromInt(reloc.target.index),
             reloc.off,
-            .{ .x86_64 = .TPOFF32 },
+            .{ .X86_64 = .TPOFF32 },
         ) else if (emit.bin_file.cast(.macho)) |macho_file| {
             const zo = macho_file.getZigObject().?;
             const atom = zo.symbols.items[emit.atom_index].getAtom(macho_file).?;
src/Compilation/Config.zig
@@ -438,6 +438,8 @@ pub fn resolve(options: Options) ResolveError!Config {
 
         if (options.use_new_linker) |x| break :b x;
 
+        if (target.ofmt == .coff) break :b true;
+
         break :b options.incremental;
     };
 
src/link/Coff.zig
@@ -125,11 +125,11 @@ const UavTable = std.AutoHashMapUnmanaged(InternPool.Index, AvMetadata);
 const RelocTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Relocation));
 const BaseRelocationTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(u32));
 
-const default_file_alignment: u16 = 0x200;
-const default_size_of_stack_reserve: u32 = 0x1000000;
-const default_size_of_stack_commit: u32 = 0x1000;
-const default_size_of_heap_reserve: u32 = 0x100000;
-const default_size_of_heap_commit: u32 = 0x1000;
+pub const default_file_alignment: u16 = 0x200;
+pub const default_size_of_stack_reserve: u32 = 0x1000000;
+pub const default_size_of_stack_commit: u32 = 0x1000;
+pub const default_size_of_heap_reserve: u32 = 0x100000;
+pub const default_size_of_heap_commit: u32 = 0x1000;
 
 const Section = struct {
     header: coff_util.SectionHeader,
@@ -334,51 +334,51 @@ pub fn createEmpty(
     if (coff.text_section_index == null) {
         const file_size: u32 = @intCast(options.program_code_size_hint);
         coff.text_section_index = try coff.allocateSection(".text", file_size, .{
-            .CNT_CODE = 1,
-            .MEM_EXECUTE = 1,
-            .MEM_READ = 1,
+            .CNT_CODE = true,
+            .MEM_EXECUTE = true,
+            .MEM_READ = true,
         });
     }
 
     if (coff.got_section_index == null) {
         const file_size = @as(u32, @intCast(options.symbol_count_hint)) * coff.ptr_width.size();
         coff.got_section_index = try coff.allocateSection(".got", file_size, .{
-            .CNT_INITIALIZED_DATA = 1,
-            .MEM_READ = 1,
+            .CNT_INITIALIZED_DATA = true,
+            .MEM_READ = true,
         });
     }
 
     if (coff.rdata_section_index == null) {
         const file_size: u32 = coff.page_size;
         coff.rdata_section_index = try coff.allocateSection(".rdata", file_size, .{
-            .CNT_INITIALIZED_DATA = 1,
-            .MEM_READ = 1,
+            .CNT_INITIALIZED_DATA = true,
+            .MEM_READ = true,
         });
     }
 
     if (coff.data_section_index == null) {
         const file_size: u32 = coff.page_size;
         coff.data_section_index = try coff.allocateSection(".data", file_size, .{
-            .CNT_INITIALIZED_DATA = 1,
-            .MEM_READ = 1,
-            .MEM_WRITE = 1,
+            .CNT_INITIALIZED_DATA = true,
+            .MEM_READ = true,
+            .MEM_WRITE = true,
         });
     }
 
     if (coff.idata_section_index == null) {
         const file_size = @as(u32, @intCast(options.symbol_count_hint)) * coff.ptr_width.size();
         coff.idata_section_index = try coff.allocateSection(".idata", file_size, .{
-            .CNT_INITIALIZED_DATA = 1,
-            .MEM_READ = 1,
+            .CNT_INITIALIZED_DATA = true,
+            .MEM_READ = true,
         });
     }
 
     if (coff.reloc_section_index == null) {
         const file_size = @as(u32, @intCast(options.symbol_count_hint)) * @sizeOf(coff_util.BaseRelocation);
         coff.reloc_section_index = try coff.allocateSection(".reloc", file_size, .{
-            .CNT_INITIALIZED_DATA = 1,
-            .MEM_DISCARDABLE = 1,
-            .MEM_READ = 1,
+            .CNT_INITIALIZED_DATA = true,
+            .MEM_DISCARDABLE = true,
+            .MEM_READ = true,
         });
     }
 
@@ -477,7 +477,7 @@ pub fn deinit(coff: *Coff) void {
     coff.base_relocs.deinit(gpa);
 }
 
-fn allocateSection(coff: *Coff, name: []const u8, size: u32, flags: coff_util.SectionHeaderFlags) !u16 {
+fn allocateSection(coff: *Coff, name: []const u8, size: u32, flags: coff_util.SectionHeader.Flags) !u16 {
     const index = @as(u16, @intCast(coff.sections.slice().len));
     const off = coff.findFreeSpace(size, default_file_alignment);
     // Memory is always allocated in sequence
@@ -836,7 +836,7 @@ fn writeAtom(coff: *Coff, atom_index: Atom.Index, code: []u8, resolve_relocs: bo
                 try debugMem(gpa, handle, pvaddr, mem_code);
             }
 
-            if (section.header.flags.MEM_WRITE == 0) {
+            if (!section.header.flags.MEM_WRITE) {
                 writeMemProtected(handle, pvaddr, mem_code) catch |err| {
                     log.warn("writing to protected memory failed with error: {s}", .{@errorName(err)});
                 };
@@ -2227,21 +2227,21 @@ fn writeHeader(coff: *Coff) !void {
     mem.writeInt(u32, buffer.writer.buffer[0x3c..][0..4], msdos_stub.len, .little);
 
     writer.writeAll("PE\x00\x00") catch unreachable;
-    var flags = coff_util.CoffHeaderFlags{
-        .EXECUTABLE_IMAGE = 1,
-        .DEBUG_STRIPPED = 1, // TODO
+    var flags: coff_util.Header.Flags = .{
+        .EXECUTABLE_IMAGE = true,
+        .DEBUG_STRIPPED = true, // TODO
     };
     switch (coff.ptr_width) {
-        .p32 => flags.@"32BIT_MACHINE" = 1,
-        .p64 => flags.LARGE_ADDRESS_AWARE = 1,
+        .p32 => flags.@"32BIT_MACHINE" = true,
+        .p64 => flags.LARGE_ADDRESS_AWARE = true,
     }
     if (coff.base.comp.config.output_mode == .Lib and coff.base.comp.config.link_mode == .dynamic) {
-        flags.DLL = 1;
+        flags.DLL = true;
     }
 
     const timestamp = if (coff.repro) 0 else std.time.timestamp();
     const size_of_optional_header = @as(u16, @intCast(coff.getOptionalHeaderSize() + coff.getDataDirectoryHeadersSize()));
-    var coff_header = coff_util.CoffHeader{
+    var coff_header: coff_util.Header = .{
         .machine = target.toCoffMachine(),
         .number_of_sections = @as(u16, @intCast(coff.sections.slice().len)), // TODO what if we prune a section
         .time_date_stamp = @as(u32, @truncate(@as(u64, @bitCast(timestamp)))),
@@ -2254,10 +2254,10 @@ fn writeHeader(coff: *Coff) !void {
     writer.writeAll(mem.asBytes(&coff_header)) catch unreachable;
 
     const dll_flags: coff_util.DllFlags = .{
-        .HIGH_ENTROPY_VA = 1, // TODO do we want to permit non-PIE builds at all?
-        .DYNAMIC_BASE = 1,
-        .TERMINAL_SERVER_AWARE = 1, // We are not a legacy app
-        .NX_COMPAT = 1, // We are compatible with Data Execution Prevention
+        .HIGH_ENTROPY_VA = true, // TODO do we want to permit non-PIE builds at all?
+        .DYNAMIC_BASE = true,
+        .TERMINAL_SERVER_AWARE = true, // We are not a legacy app
+        .NX_COMPAT = true, // We are compatible with Data Execution Prevention
     };
     const subsystem: coff_util.Subsystem = .WINDOWS_CUI;
     const size_of_image: u32 = coff.getSizeOfImage();
@@ -2269,13 +2269,13 @@ fn writeHeader(coff: *Coff) !void {
     var size_of_initialized_data: u32 = 0;
     var size_of_uninitialized_data: u32 = 0;
     for (coff.sections.items(.header)) |header| {
-        if (header.flags.CNT_CODE == 1) {
+        if (header.flags.CNT_CODE) {
             size_of_code += header.size_of_raw_data;
         }
-        if (header.flags.CNT_INITIALIZED_DATA == 1) {
+        if (header.flags.CNT_INITIALIZED_DATA) {
             size_of_initialized_data += header.size_of_raw_data;
         }
-        if (header.flags.CNT_UNINITIALIZED_DATA == 1) {
+        if (header.flags.CNT_UNINITIALIZED_DATA) {
             size_of_uninitialized_data += header.size_of_raw_data;
         }
     }
@@ -2283,7 +2283,7 @@ fn writeHeader(coff: *Coff) !void {
     switch (coff.ptr_width) {
         .p32 => {
             var opt_header = coff_util.OptionalHeaderPE32{
-                .magic = coff_util.IMAGE_NT_OPTIONAL_HDR32_MAGIC,
+                .magic = .PE32,
                 .major_linker_version = 0,
                 .minor_linker_version = 0,
                 .size_of_code = size_of_code,
@@ -2318,7 +2318,7 @@ fn writeHeader(coff: *Coff) !void {
         },
         .p64 => {
             var opt_header = coff_util.OptionalHeaderPE64{
-                .magic = coff_util.IMAGE_NT_OPTIONAL_HDR64_MAGIC,
+                .magic = .@"PE32+",
                 .major_linker_version = 0,
                 .minor_linker_version = 0,
                 .size_of_code = size_of_code,
@@ -2422,7 +2422,7 @@ fn allocatedVirtualSize(coff: *Coff, start: u32) u32 {
 
 fn getSizeOfHeaders(coff: Coff) u32 {
     const msdos_hdr_size = msdos_stub.len + 4;
-    return @as(u32, @intCast(msdos_hdr_size + @sizeOf(coff_util.CoffHeader) + coff.getOptionalHeaderSize() +
+    return @as(u32, @intCast(msdos_hdr_size + @sizeOf(coff_util.Header) + coff.getOptionalHeaderSize() +
         coff.getDataDirectoryHeadersSize() + coff.getSectionHeadersSize()));
 }
 
@@ -2443,7 +2443,7 @@ fn getSectionHeadersSize(coff: Coff) u32 {
 
 fn getDataDirectoryHeadersOffset(coff: Coff) u32 {
     const msdos_hdr_size = msdos_stub.len + 4;
-    return @as(u32, @intCast(msdos_hdr_size + @sizeOf(coff_util.CoffHeader) + coff.getOptionalHeaderSize()));
+    return @as(u32, @intCast(msdos_hdr_size + @sizeOf(coff_util.Header) + coff.getOptionalHeaderSize()));
 }
 
 fn getSectionHeadersOffset(coff: Coff) u32 {
@@ -3116,7 +3116,7 @@ fn pwriteAll(coff: *Coff, bytes: []const u8, offset: u64) error{LinkFailure}!voi
 /// A "page" is 512 bytes.
 /// A "long" is 4 bytes.
 /// A "word" is 2 bytes.
-const msdos_stub: [120]u8 = .{
+pub const msdos_stub: [120]u8 = .{
     'M', 'Z', // Magic number. Stands for Mark Zbikowski (designer of the MS-DOS executable format).
     0x78, 0x00, // Number of bytes in the last page. This matches the size of this entire MS-DOS stub.
     0x01, 0x00, // Number of pages.
src/link/Coff2.zig
@@ -0,0 +1,2128 @@
+base: link.File,
+endian: std.builtin.Endian,
+mf: MappedFile,
+nodes: std.MultiArrayList(Node),
+import_table: ImportTable,
+strings: std.HashMapUnmanaged(
+    u32,
+    void,
+    std.hash_map.StringIndexContext,
+    std.hash_map.default_max_load_percentage,
+),
+string_bytes: std.ArrayList(u8),
+section_table: std.ArrayList(Symbol.Index),
+symbol_table: std.ArrayList(Symbol),
+globals: std.AutoArrayHashMapUnmanaged(GlobalName, Symbol.Index),
+global_pending_index: u32,
+navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, Symbol.Index),
+uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Symbol.Index),
+lazy: std.EnumArray(link.File.LazySymbol.Kind, struct {
+    map: std.AutoArrayHashMapUnmanaged(InternPool.Index, Symbol.Index),
+    pending_index: u32,
+}),
+pending_uavs: std.AutoArrayHashMapUnmanaged(Node.UavMapIndex, struct {
+    alignment: InternPool.Alignment,
+    src_loc: Zcu.LazySrcLoc,
+}),
+relocs: std.ArrayList(Reloc),
+/// This is hiding actual bugs with global symbols! Reconsider once they are implemented correctly.
+entry_hack: Symbol.Index,
+
+pub const Node = union(enum) {
+    file,
+    header,
+    signature,
+    coff_header,
+    optional_header,
+    data_directories,
+    section_table,
+    section: Symbol.Index,
+    import_directory_table,
+    import_lookup_table: u32,
+    import_address_table: u32,
+    import_hint_name_table: u32,
+    global: GlobalMapIndex,
+    nav: NavMapIndex,
+    uav: UavMapIndex,
+    lazy_code: LazyMapRef.Index(.code),
+    lazy_const_data: LazyMapRef.Index(.const_data),
+
+    pub const GlobalMapIndex = enum(u32) {
+        _,
+
+        pub fn globalName(gmi: GlobalMapIndex, coff: *const Coff) GlobalName {
+            return coff.globals.keys()[@intFromEnum(gmi)];
+        }
+
+        pub fn symbol(gmi: GlobalMapIndex, coff: *const Coff) Symbol.Index {
+            return coff.globals.values()[@intFromEnum(gmi)];
+        }
+    };
+
+    pub const NavMapIndex = enum(u32) {
+        _,
+
+        pub fn navIndex(nmi: NavMapIndex, coff: *const Coff) InternPool.Nav.Index {
+            return coff.navs.keys()[@intFromEnum(nmi)];
+        }
+
+        pub fn symbol(nmi: NavMapIndex, coff: *const Coff) Symbol.Index {
+            return coff.navs.values()[@intFromEnum(nmi)];
+        }
+    };
+
+    pub const UavMapIndex = enum(u32) {
+        _,
+
+        pub fn uavValue(umi: UavMapIndex, coff: *const Coff) InternPool.Index {
+            return coff.uavs.keys()[@intFromEnum(umi)];
+        }
+
+        pub fn symbol(umi: UavMapIndex, coff: *const Coff) Symbol.Index {
+            return coff.uavs.values()[@intFromEnum(umi)];
+        }
+    };
+
+    pub const LazyMapRef = struct {
+        kind: link.File.LazySymbol.Kind,
+        index: u32,
+
+        pub fn Index(comptime kind: link.File.LazySymbol.Kind) type {
+            return enum(u32) {
+                _,
+
+                pub fn ref(lmi: @This()) LazyMapRef {
+                    return .{ .kind = kind, .index = @intFromEnum(lmi) };
+                }
+
+                pub fn lazySymbol(lmi: @This(), coff: *const Coff) link.File.LazySymbol {
+                    return lmi.ref().lazySymbol(coff);
+                }
+
+                pub fn symbol(lmi: @This(), coff: *const Coff) Symbol.Index {
+                    return lmi.ref().symbol(coff);
+                }
+            };
+        }
+
+        pub fn lazySymbol(lmr: LazyMapRef, coff: *const Coff) link.File.LazySymbol {
+            return .{ .kind = lmr.kind, .ty = coff.lazy.getPtrConst(lmr.kind).map.keys()[lmr.index] };
+        }
+
+        pub fn symbol(lmr: LazyMapRef, coff: *const Coff) Symbol.Index {
+            return coff.lazy.getPtrConst(lmr.kind).map.values()[lmr.index];
+        }
+    };
+
+    pub const Tag = @typeInfo(Node).@"union".tag_type.?;
+
+    const known_count = @typeInfo(@TypeOf(known)).@"struct".fields.len;
+    const known = known: {
+        const Known = enum {
+            file,
+            header,
+            signature,
+            coff_header,
+            optional_header,
+            data_directories,
+            section_table,
+        };
+        var mut_known: std.enums.EnumFieldStruct(Known, MappedFile.Node.Index, null) = undefined;
+        for (@typeInfo(Known).@"enum".fields) |field|
+            @field(mut_known, field.name) = @enumFromInt(field.value);
+        break :known mut_known;
+    };
+
+    comptime {
+        if (!std.debug.runtime_safety) std.debug.assert(@sizeOf(Node) == 8);
+    }
+};
+
+pub const DataDirectory = enum {
+    export_table,
+    import_table,
+    resorce_table,
+    exception_table,
+    certificate_table,
+    base_relocation_table,
+    debug,
+    architecture,
+    global_ptr,
+    tls_table,
+    load_config_table,
+    bound_import,
+    import_address_table,
+    delay_import_descriptor,
+    clr_runtime_header,
+    reserved,
+};
+
+pub const ImportTable = struct {
+    directory_table_ni: MappedFile.Node.Index,
+    dlls: std.AutoArrayHashMapUnmanaged(void, Dll),
+
+    pub const Dll = struct {
+        import_lookup_table_ni: MappedFile.Node.Index,
+        import_address_table_si: Symbol.Index,
+        import_hint_name_table_ni: MappedFile.Node.Index,
+        len: u32,
+        hint_name_len: u32,
+    };
+
+    const Adapter = struct {
+        coff: *Coff,
+
+        pub fn eql(adapter: Adapter, lhs_key: []const u8, _: void, rhs_index: usize) bool {
+            const coff = adapter.coff;
+            const dll_name = coff.import_table.dlls.values()[rhs_index]
+                .import_hint_name_table_ni.sliceConst(&coff.mf);
+            return std.mem.startsWith(u8, dll_name, lhs_key) and
+                std.mem.startsWith(u8, dll_name[lhs_key.len..], ".dll\x00");
+        }
+
+        pub fn hash(_: Adapter, key: []const u8) u32 {
+            assert(std.mem.indexOfScalar(u8, key, 0) == null);
+            return std.array_hash_map.hashString(key);
+        }
+    };
+};
+
+pub const String = enum(u32) {
+    _,
+
+    pub const Optional = enum(u32) {
+        none = std.math.maxInt(u32),
+        _,
+
+        pub fn unwrap(os: String.Optional) ?String {
+            return switch (os) {
+                else => |s| @enumFromInt(@intFromEnum(s)),
+                .none => null,
+            };
+        }
+
+        pub fn toSlice(os: String.Optional, coff: *Coff) ?[:0]const u8 {
+            return (os.unwrap() orelse return null).toSlice(coff);
+        }
+    };
+
+    pub fn toSlice(s: String, coff: *Coff) [:0]const u8 {
+        const slice = coff.string_bytes.items[@intFromEnum(s)..];
+        return slice[0..std.mem.indexOfScalar(u8, slice, 0).? :0];
+    }
+
+    pub fn toOptional(s: String) String.Optional {
+        return @enumFromInt(@intFromEnum(s));
+    }
+};
+
+pub const GlobalName = struct { name: String, lib_name: String.Optional };
+
+pub const Symbol = struct {
+    ni: MappedFile.Node.Index,
+    rva: u32,
+    size: u32,
+    /// Relocations contained within this symbol
+    loc_relocs: Reloc.Index,
+    /// Relocations targeting this symbol
+    target_relocs: Reloc.Index,
+    section_number: SectionNumber,
+    data_directory: ?DataDirectory,
+    unused0: u32 = 0,
+    unused1: u32 = 0,
+
+    pub const SectionNumber = enum(i16) {
+        UNDEFINED = 0,
+        ABSOLUTE = -1,
+        DEBUG = -2,
+        _,
+
+        fn toIndex(sn: SectionNumber) u15 {
+            return @intCast(@intFromEnum(sn) - 1);
+        }
+
+        pub fn symbol(sn: SectionNumber, coff: *const Coff) Symbol.Index {
+            return coff.section_table.items[sn.toIndex()];
+        }
+
+        pub fn header(sn: SectionNumber, coff: *Coff) *std.coff.SectionHeader {
+            return &coff.sectionTableSlice()[sn.toIndex()];
+        }
+    };
+
+    pub const Index = enum(u32) {
+        null,
+        data,
+        idata,
+        rdata,
+        text,
+        _,
+
+        const known_count = @typeInfo(Index).@"enum".fields.len;
+
+        pub fn get(si: Symbol.Index, coff: *Coff) *Symbol {
+            return &coff.symbol_table.items[@intFromEnum(si)];
+        }
+
+        pub fn node(si: Symbol.Index, coff: *Coff) MappedFile.Node.Index {
+            const ni = si.get(coff).ni;
+            assert(ni != .none);
+            return ni;
+        }
+
+        pub fn flushMoved(si: Symbol.Index, coff: *Coff) void {
+            const sym = si.get(coff);
+            sym.rva = coff.computeNodeRva(sym.ni);
+            if (si == coff.entry_hack)
+                coff.targetStore(&coff.optionalHeaderStandardPtr().address_of_entry_point, sym.rva);
+            si.applyLocationRelocs(coff);
+            si.applyTargetRelocs(coff);
+        }
+
+        pub fn applyLocationRelocs(si: Symbol.Index, coff: *Coff) void {
+            for (coff.relocs.items[@intFromEnum(si.get(coff).loc_relocs)..]) |*reloc| {
+                if (reloc.loc != si) break;
+                reloc.apply(coff);
+            }
+        }
+
+        pub fn applyTargetRelocs(si: Symbol.Index, coff: *Coff) void {
+            var ri = si.get(coff).target_relocs;
+            while (ri != .none) {
+                const reloc = ri.get(coff);
+                assert(reloc.target == si);
+                reloc.apply(coff);
+                ri = reloc.next;
+            }
+        }
+
+        pub fn deleteLocationRelocs(si: Symbol.Index, coff: *Coff) void {
+            const sym = si.get(coff);
+            for (coff.relocs.items[@intFromEnum(sym.loc_relocs)..]) |*reloc| {
+                if (reloc.loc != si) break;
+                reloc.delete(coff);
+            }
+            sym.loc_relocs = .none;
+        }
+    };
+
+    comptime {
+        if (!std.debug.runtime_safety) std.debug.assert(@sizeOf(Symbol) == 32);
+    }
+};
+
+pub const Reloc = extern struct {
+    type: Reloc.Type,
+    prev: Reloc.Index,
+    next: Reloc.Index,
+    loc: Symbol.Index,
+    target: Symbol.Index,
+    unused: u32,
+    offset: u64,
+    addend: i64,
+
+    pub const Type = extern union {
+        AMD64: std.coff.IMAGE.REL.AMD64,
+        ARM: std.coff.IMAGE.REL.ARM,
+        ARM64: std.coff.IMAGE.REL.ARM64,
+        SH: std.coff.IMAGE.REL.SH,
+        PPC: std.coff.IMAGE.REL.PPC,
+        I386: std.coff.IMAGE.REL.I386,
+        IA64: std.coff.IMAGE.REL.IA64,
+        MIPS: std.coff.IMAGE.REL.MIPS,
+        M32R: std.coff.IMAGE.REL.M32R,
+    };
+
+    pub const Index = enum(u32) {
+        none = std.math.maxInt(u32),
+        _,
+
+        pub fn get(si: Reloc.Index, coff: *Coff) *Reloc {
+            return &coff.relocs.items[@intFromEnum(si)];
+        }
+    };
+
+    pub fn apply(reloc: *const Reloc, coff: *Coff) void {
+        const loc_sym = reloc.loc.get(coff);
+        switch (loc_sym.ni) {
+            .none => return,
+            else => |ni| if (ni.hasMoved(&coff.mf)) return,
+        }
+        const target_sym = reloc.target.get(coff);
+        switch (target_sym.ni) {
+            .none => return,
+            else => |ni| if (ni.hasMoved(&coff.mf)) return,
+        }
+        const loc_slice = loc_sym.ni.slice(&coff.mf)[@intCast(reloc.offset)..];
+        const target_rva = target_sym.rva +% @as(u64, @bitCast(reloc.addend));
+        const target_endian = coff.targetEndian();
+        switch (coff.targetLoad(&coff.headerPtr().machine)) {
+            else => |machine| @panic(@tagName(machine)),
+            .AMD64 => switch (reloc.type.AMD64) {
+                else => |kind| @panic(@tagName(kind)),
+                .ABSOLUTE => {},
+                .ADDR64 => std.mem.writeInt(
+                    u64,
+                    loc_slice[0..8],
+                    coff.optionalHeaderField(.image_base) + target_rva,
+                    target_endian,
+                ),
+                .ADDR32 => std.mem.writeInt(
+                    u32,
+                    loc_slice[0..4],
+                    @intCast(coff.optionalHeaderField(.image_base) + target_rva),
+                    target_endian,
+                ),
+                .ADDR32NB => std.mem.writeInt(
+                    u32,
+                    loc_slice[0..4],
+                    @intCast(target_rva),
+                    target_endian,
+                ),
+                .REL32 => std.mem.writeInt(
+                    i32,
+                    loc_slice[0..4],
+                    @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 4)))),
+                    target_endian,
+                ),
+                .REL32_1 => std.mem.writeInt(
+                    i32,
+                    loc_slice[0..4],
+                    @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 5)))),
+                    target_endian,
+                ),
+                .REL32_2 => std.mem.writeInt(
+                    i32,
+                    loc_slice[0..4],
+                    @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 6)))),
+                    target_endian,
+                ),
+                .REL32_3 => std.mem.writeInt(
+                    i32,
+                    loc_slice[0..4],
+                    @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 7)))),
+                    target_endian,
+                ),
+                .REL32_4 => std.mem.writeInt(
+                    i32,
+                    loc_slice[0..4],
+                    @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 8)))),
+                    target_endian,
+                ),
+                .REL32_5 => std.mem.writeInt(
+                    i32,
+                    loc_slice[0..4],
+                    @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 9)))),
+                    target_endian,
+                ),
+            },
+            .I386 => switch (reloc.type.I386) {
+                else => |kind| @panic(@tagName(kind)),
+                .ABSOLUTE => {},
+                .DIR16 => std.mem.writeInt(
+                    u16,
+                    loc_slice[0..2],
+                    @intCast(coff.optionalHeaderField(.image_base) + target_rva),
+                    target_endian,
+                ),
+                .REL16 => std.mem.writeInt(
+                    i16,
+                    loc_slice[0..2],
+                    @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 2)))),
+                    target_endian,
+                ),
+                .DIR32 => std.mem.writeInt(
+                    u32,
+                    loc_slice[0..4],
+                    @intCast(coff.optionalHeaderField(.image_base) + target_rva),
+                    target_endian,
+                ),
+                .DIR32NB => std.mem.writeInt(
+                    u32,
+                    loc_slice[0..4],
+                    @intCast(target_rva),
+                    target_endian,
+                ),
+                .REL32 => std.mem.writeInt(
+                    i32,
+                    loc_slice[0..4],
+                    @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 4)))),
+                    target_endian,
+                ),
+            },
+        }
+    }
+
+    pub fn delete(reloc: *Reloc, coff: *Coff) void {
+        switch (reloc.prev) {
+            .none => {
+                const target = reloc.target.get(coff);
+                assert(target.target_relocs.get(coff) == reloc);
+                target.target_relocs = reloc.next;
+            },
+            else => |prev| prev.get(coff).next = reloc.next,
+        }
+        switch (reloc.next) {
+            .none => {},
+            else => |next| next.get(coff).prev = reloc.prev,
+        }
+        reloc.* = undefined;
+    }
+
+    comptime {
+        if (!std.debug.runtime_safety) std.debug.assert(@sizeOf(Reloc) == 40);
+    }
+};
+
+pub fn open(
+    arena: std.mem.Allocator,
+    comp: *Compilation,
+    path: std.Build.Cache.Path,
+    options: link.File.OpenOptions,
+) !*Coff {
+    return create(arena, comp, path, options);
+}
+pub fn createEmpty(
+    arena: std.mem.Allocator,
+    comp: *Compilation,
+    path: std.Build.Cache.Path,
+    options: link.File.OpenOptions,
+) !*Coff {
+    return create(arena, comp, path, options);
+}
+fn create(
+    arena: std.mem.Allocator,
+    comp: *Compilation,
+    path: std.Build.Cache.Path,
+    options: link.File.OpenOptions,
+) !*Coff {
+    const target = &comp.root_mod.resolved_target.result;
+    assert(target.ofmt == .coff);
+    const is_image = switch (comp.config.output_mode) {
+        .Exe => true,
+        .Lib => switch (comp.config.link_mode) {
+            .static => false,
+            .dynamic => true,
+        },
+        .Obj => false,
+    };
+    const machine = target.toCoffMachine();
+    const timestamp: u32 = if (options.repro) 0 else @truncate(@as(u64, @bitCast(std.time.timestamp())));
+    const major_subsystem_version = options.major_subsystem_version orelse 6;
+    const minor_subsystem_version = options.minor_subsystem_version orelse 0;
+    const magic: std.coff.OptionalHeader.Magic = switch (target.ptrBitWidth()) {
+        0...32 => .PE32,
+        33...64 => .@"PE32+",
+        else => return error.UnsupportedCOFFArchitecture,
+    };
+    const section_align: std.mem.Alignment = switch (machine) {
+        .AMD64, .I386 => @enumFromInt(12),
+        .SH3, .SH3DSP, .SH4, .SH5 => @enumFromInt(12),
+        .MIPS16, .MIPSFPU, .MIPSFPU16, .WCEMIPSV2 => @enumFromInt(12),
+        .POWERPC, .POWERPCFP => @enumFromInt(12),
+        .ALPHA, .ALPHA64 => @enumFromInt(13),
+        .IA64 => @enumFromInt(13),
+        .ARM => @enumFromInt(12),
+        else => return error.UnsupportedCOFFArchitecture,
+    };
+
+    const coff = try arena.create(Coff);
+    const file = try path.root_dir.handle.createFile(path.sub_path, .{
+        .read = true,
+        .mode = link.File.determineMode(comp.config.output_mode, comp.config.link_mode),
+    });
+    errdefer file.close();
+    coff.* = .{
+        .base = .{
+            .tag = .coff2,
+
+            .comp = comp,
+            .emit = path,
+
+            .file = file,
+            .gc_sections = false,
+            .print_gc_sections = false,
+            .build_id = .none,
+            .allow_shlib_undefined = false,
+            .stack_size = 0,
+        },
+        .endian = target.cpu.arch.endian(),
+        .mf = try .init(file, comp.gpa),
+        .nodes = .empty,
+        .import_table = .{
+            .directory_table_ni = .none,
+            .dlls = .empty,
+        },
+        .strings = .empty,
+        .string_bytes = .empty,
+        .section_table = .empty,
+        .symbol_table = .empty,
+        .globals = .empty,
+        .global_pending_index = 0,
+        .navs = .empty,
+        .uavs = .empty,
+        .lazy = .initFill(.{
+            .map = .empty,
+            .pending_index = 0,
+        }),
+        .pending_uavs = .empty,
+        .relocs = .empty,
+        .entry_hack = .null,
+    };
+    errdefer coff.deinit();
+
+    try coff.initHeaders(
+        is_image,
+        machine,
+        timestamp,
+        major_subsystem_version,
+        minor_subsystem_version,
+        magic,
+        section_align,
+    );
+    return coff;
+}
+
+pub fn deinit(coff: *Coff) void {
+    const gpa = coff.base.comp.gpa;
+    coff.mf.deinit(gpa);
+    coff.nodes.deinit(gpa);
+    coff.import_table.dlls.deinit(gpa);
+    coff.strings.deinit(gpa);
+    coff.string_bytes.deinit(gpa);
+    coff.section_table.deinit(gpa);
+    coff.symbol_table.deinit(gpa);
+    coff.globals.deinit(gpa);
+    coff.navs.deinit(gpa);
+    coff.uavs.deinit(gpa);
+    for (&coff.lazy.values) |*lazy| lazy.map.deinit(gpa);
+    coff.pending_uavs.deinit(gpa);
+    coff.relocs.deinit(gpa);
+    coff.* = undefined;
+}
+
+fn initHeaders(
+    coff: *Coff,
+    is_image: bool,
+    machine: std.coff.IMAGE.FILE.MACHINE,
+    timestamp: u32,
+    major_subsystem_version: u16,
+    minor_subsystem_version: u16,
+    magic: std.coff.OptionalHeader.Magic,
+    section_align: std.mem.Alignment,
+) !void {
+    const comp = coff.base.comp;
+    const gpa = comp.gpa;
+    const file_align: std.mem.Alignment =
+        comptime .fromByteUnits(link.File.Coff.default_file_alignment);
+    const target_endian = coff.targetEndian();
+
+    const optional_header_size: u16 = if (is_image) switch (magic) {
+        _ => unreachable,
+        inline else => |ct_magic| @sizeOf(@field(std.coff.OptionalHeader, @tagName(ct_magic))),
+    } else 0;
+    const data_directories_len = @typeInfo(DataDirectory).@"enum".fields.len;
+    const data_directories_size: u16 = if (is_image)
+        @sizeOf(std.coff.ImageDataDirectory) * data_directories_len
+    else
+        0;
+
+    try coff.nodes.ensureTotalCapacity(gpa, Node.known_count);
+    coff.nodes.appendAssumeCapacity(.file);
+
+    const header_ni = Node.known.header;
+    assert(header_ni == try coff.mf.addOnlyChildNode(gpa, .root, .{
+        .alignment = coff.mf.flags.block_size,
+        .fixed = true,
+    }));
+    coff.nodes.appendAssumeCapacity(.header);
+
+    const signature_ni = Node.known.signature;
+    assert(signature_ni == try coff.mf.addOnlyChildNode(gpa, header_ni, .{
+        .size = (if (is_image) link.File.Coff.msdos_stub.len else 0) + "PE\x00\x00".len,
+        .alignment = .@"4",
+        .fixed = true,
+    }));
+    coff.nodes.appendAssumeCapacity(.signature);
+    {
+        const signature_slice = signature_ni.slice(&coff.mf);
+        if (is_image)
+            @memcpy(signature_slice[0..link.File.Coff.msdos_stub.len], &link.File.Coff.msdos_stub);
+        @memcpy(signature_slice[signature_slice.len - 4 ..], "PE\x00\x00");
+    }
+
+    const coff_header_ni = Node.known.coff_header;
+    assert(coff_header_ni == try coff.mf.addLastChildNode(gpa, header_ni, .{
+        .size = @sizeOf(std.coff.Header),
+        .alignment = .@"4",
+        .fixed = true,
+    }));
+    coff.nodes.appendAssumeCapacity(.coff_header);
+    {
+        const coff_header: *std.coff.Header = @ptrCast(@alignCast(coff_header_ni.slice(&coff.mf)));
+        coff_header.* = .{
+            .machine = machine,
+            .number_of_sections = 0,
+            .time_date_stamp = timestamp,
+            .pointer_to_symbol_table = 0,
+            .number_of_symbols = 0,
+            .size_of_optional_header = optional_header_size + data_directories_size,
+            .flags = .{
+                .RELOCS_STRIPPED = is_image,
+                .EXECUTABLE_IMAGE = is_image,
+                .DEBUG_STRIPPED = true,
+                .@"32BIT_MACHINE" = magic == .PE32,
+                .LARGE_ADDRESS_AWARE = magic == .@"PE32+",
+                .DLL = comp.config.output_mode == .Lib and comp.config.link_mode == .dynamic,
+            },
+        };
+        if (target_endian != native_endian) std.mem.byteSwapAllFields(std.coff.Header, coff_header);
+    }
+
+    const optional_header_ni = Node.known.optional_header;
+    assert(optional_header_ni == try coff.mf.addLastChildNode(gpa, header_ni, .{
+        .size = optional_header_size,
+        .alignment = .@"4",
+        .fixed = true,
+    }));
+    coff.nodes.appendAssumeCapacity(.optional_header);
+    if (is_image) switch (magic) {
+        _ => unreachable,
+        .PE32 => {
+            const optional_header: *std.coff.OptionalHeader.PE32 =
+                @ptrCast(@alignCast(optional_header_ni.slice(&coff.mf)));
+            optional_header.* = .{
+                .standard = .{
+                    .magic = .PE32,
+                    .major_linker_version = 0,
+                    .minor_linker_version = 0,
+                    .size_of_code = 0,
+                    .size_of_initialized_data = 0,
+                    .size_of_uninitialized_data = 0,
+                    .address_of_entry_point = 0,
+                    .base_of_code = 0,
+                },
+                .base_of_data = 0,
+                .image_base = switch (coff.base.comp.config.output_mode) {
+                    .Exe => 0x400000,
+                    .Lib => switch (coff.base.comp.config.link_mode) {
+                        .static => 0,
+                        .dynamic => 0x10000000,
+                    },
+                    .Obj => 0,
+                },
+                .section_alignment = @intCast(section_align.toByteUnits()),
+                .file_alignment = @intCast(file_align.toByteUnits()),
+                .major_operating_system_version = 6,
+                .minor_operating_system_version = 0,
+                .major_image_version = 0,
+                .minor_image_version = 0,
+                .major_subsystem_version = major_subsystem_version,
+                .minor_subsystem_version = minor_subsystem_version,
+                .win32_version_value = 0,
+                .size_of_image = 0,
+                .size_of_headers = 0,
+                .checksum = 0,
+                .subsystem = .WINDOWS_CUI,
+                .dll_flags = .{
+                    .HIGH_ENTROPY_VA = true,
+                    .DYNAMIC_BASE = true,
+                    .TERMINAL_SERVER_AWARE = true,
+                    .NX_COMPAT = true,
+                },
+                .size_of_stack_reserve = link.File.Coff.default_size_of_stack_reserve,
+                .size_of_stack_commit = link.File.Coff.default_size_of_stack_commit,
+                .size_of_heap_reserve = link.File.Coff.default_size_of_heap_reserve,
+                .size_of_heap_commit = link.File.Coff.default_size_of_heap_commit,
+                .loader_flags = 0,
+                .number_of_rva_and_sizes = data_directories_len,
+            };
+            if (target_endian != native_endian)
+                std.mem.byteSwapAllFields(std.coff.OptionalHeader.PE32, optional_header);
+        },
+        .@"PE32+" => {
+            const header: *std.coff.OptionalHeader.@"PE32+" =
+                @ptrCast(@alignCast(optional_header_ni.slice(&coff.mf)));
+            header.* = .{
+                .standard = .{
+                    .magic = .@"PE32+",
+                    .major_linker_version = 0,
+                    .minor_linker_version = 0,
+                    .size_of_code = 0,
+                    .size_of_initialized_data = 0,
+                    .size_of_uninitialized_data = 0,
+                    .address_of_entry_point = 0,
+                    .base_of_code = 0,
+                },
+                .image_base = switch (coff.base.comp.config.output_mode) {
+                    .Exe => 0x140000000,
+                    .Lib => switch (coff.base.comp.config.link_mode) {
+                        .static => 0,
+                        .dynamic => 0x180000000,
+                    },
+                    .Obj => 0,
+                },
+                .section_alignment = @intCast(section_align.toByteUnits()),
+                .file_alignment = @intCast(file_align.toByteUnits()),
+                .major_operating_system_version = 6,
+                .minor_operating_system_version = 0,
+                .major_image_version = 0,
+                .minor_image_version = 0,
+                .major_subsystem_version = major_subsystem_version,
+                .minor_subsystem_version = minor_subsystem_version,
+                .win32_version_value = 0,
+                .size_of_image = 0,
+                .size_of_headers = 0,
+                .checksum = 0,
+                .subsystem = .WINDOWS_CUI,
+                .dll_flags = .{
+                    .HIGH_ENTROPY_VA = true,
+                    .DYNAMIC_BASE = true,
+                    .TERMINAL_SERVER_AWARE = true,
+                    .NX_COMPAT = true,
+                },
+                .size_of_stack_reserve = link.File.Coff.default_size_of_stack_reserve,
+                .size_of_stack_commit = link.File.Coff.default_size_of_stack_commit,
+                .size_of_heap_reserve = link.File.Coff.default_size_of_heap_reserve,
+                .size_of_heap_commit = link.File.Coff.default_size_of_heap_commit,
+                .loader_flags = 0,
+                .number_of_rva_and_sizes = data_directories_len,
+            };
+            if (target_endian != native_endian)
+                std.mem.byteSwapAllFields(std.coff.OptionalHeader.@"PE32+", header);
+        },
+    };
+
+    const data_directories_ni = Node.known.data_directories;
+    assert(data_directories_ni == try coff.mf.addLastChildNode(gpa, header_ni, .{
+        .size = data_directories_size,
+        .alignment = .@"4",
+        .fixed = true,
+    }));
+    coff.nodes.appendAssumeCapacity(.data_directories);
+    {
+        const data_directories: *[data_directories_len]std.coff.ImageDataDirectory =
+            @ptrCast(@alignCast(data_directories_ni.slice(&coff.mf)));
+        @memset(data_directories, .{ .virtual_address = 0, .size = 0 });
+        if (target_endian != native_endian) for (data_directories) |*data_directory|
+            std.mem.byteSwapAllFields(std.coff.ImageDataDirectory, data_directory);
+    }
+
+    const section_table_ni = Node.known.section_table;
+    assert(section_table_ni == try coff.mf.addLastChildNode(gpa, header_ni, .{
+        .alignment = .@"4",
+        .fixed = true,
+    }));
+    coff.nodes.appendAssumeCapacity(.section_table);
+
+    assert(coff.nodes.len == Node.known_count);
+
+    try coff.symbol_table.ensureTotalCapacity(gpa, Symbol.Index.known_count);
+    coff.symbol_table.addOneAssumeCapacity().* = .{
+        .ni = .none,
+        .rva = 0,
+        .size = 0,
+        .loc_relocs = .none,
+        .target_relocs = .none,
+        .section_number = .UNDEFINED,
+        .data_directory = null,
+    };
+    assert(try coff.addSection(".data", null, .{
+        .CNT_INITIALIZED_DATA = true,
+        .MEM_READ = true,
+        .MEM_WRITE = true,
+    }) == .data);
+    assert(try coff.addSection(".idata", .import_table, .{
+        .CNT_INITIALIZED_DATA = true,
+        .MEM_READ = true,
+    }) == .idata);
+    assert(try coff.addSection(".rdata", null, .{
+        .CNT_INITIALIZED_DATA = true,
+        .MEM_READ = true,
+    }) == .rdata);
+    assert(try coff.addSection(".text", null, .{
+        .CNT_CODE = true,
+        .MEM_EXECUTE = true,
+        .MEM_READ = true,
+    }) == .text);
+    coff.import_table.directory_table_ni = try coff.mf.addLastChildNode(
+        gpa,
+        Symbol.Index.idata.node(coff),
+        .{
+            .alignment = .@"4",
+            .fixed = true,
+        },
+    );
+    coff.nodes.appendAssumeCapacity(.import_directory_table);
+    assert(coff.symbol_table.items.len == Symbol.Index.known_count);
+}
+
+fn getNode(coff: *const Coff, ni: MappedFile.Node.Index) Node {
+    return coff.nodes.get(@intFromEnum(ni));
+}
+fn computeNodeRva(coff: *Coff, ni: MappedFile.Node.Index) u32 {
+    var section_offset: u32 = 0;
+    var parent_ni = ni;
+    while (true) {
+        assert(parent_ni != .none);
+        switch (coff.getNode(parent_ni)) {
+            else => {},
+            .section => |si| return si.get(coff).rva + section_offset,
+        }
+        const parent_offset, _ = parent_ni.location(&coff.mf).resolve(&coff.mf);
+        section_offset += @intCast(parent_offset);
+        parent_ni = parent_ni.parent(&coff.mf);
+    }
+}
+
+pub inline fn targetEndian(coff: *const Coff) std.builtin.Endian {
+    return coff.endian;
+}
+fn targetLoad(coff: *const Coff, ptr: anytype) @typeInfo(@TypeOf(ptr)).pointer.child {
+    const Child = @typeInfo(@TypeOf(ptr)).pointer.child;
+    return switch (@typeInfo(Child)) {
+        else => @compileError(@typeName(Child)),
+        .int => std.mem.toNative(Child, ptr.*, coff.targetEndian()),
+        .@"enum" => |@"enum"| @enumFromInt(coff.targetLoad(@as(*@"enum".tag_type, @ptrCast(ptr)))),
+        .@"struct" => |@"struct"| @bitCast(
+            coff.targetLoad(@as(*@"struct".backing_integer.?, @ptrCast(ptr))),
+        ),
+    };
+}
+fn targetStore(coff: *const Coff, ptr: anytype, val: @typeInfo(@TypeOf(ptr)).pointer.child) void {
+    const Child = @typeInfo(@TypeOf(ptr)).pointer.child;
+    return switch (@typeInfo(Child)) {
+        else => @compileError(@typeName(Child)),
+        .int => ptr.* = std.mem.nativeTo(Child, val, coff.targetEndian()),
+        .@"enum" => |@"enum"| coff.targetStore(
+            @as(*@"enum".tag_type, @ptrCast(ptr)),
+            @intFromEnum(val),
+        ),
+        .@"struct" => |@"struct"| coff.targetStore(
+            @as(*@"struct".backing_integer.?, @ptrCast(ptr)),
+            @bitCast(val),
+        ),
+    };
+}
+
+pub fn headerPtr(coff: *Coff) *std.coff.Header {
+    return @ptrCast(@alignCast(Node.known.coff_header.slice(&coff.mf)));
+}
+
+pub fn optionalHeaderStandardPtr(coff: *Coff) *std.coff.OptionalHeader {
+    return @ptrCast(@alignCast(
+        Node.known.optional_header.slice(&coff.mf)[0..@sizeOf(std.coff.OptionalHeader)],
+    ));
+}
+
+pub const OptionalHeaderPtr = union(std.coff.OptionalHeader.Magic) {
+    PE32: *std.coff.OptionalHeader.PE32,
+    @"PE32+": *std.coff.OptionalHeader.@"PE32+",
+};
+pub fn optionalHeaderPtr(coff: *Coff) OptionalHeaderPtr {
+    const slice = Node.known.optional_header.slice(&coff.mf);
+    return switch (coff.targetLoad(&coff.optionalHeaderStandardPtr().magic)) {
+        _ => unreachable,
+        inline else => |magic| @unionInit(
+            OptionalHeaderPtr,
+            @tagName(magic),
+            @ptrCast(@alignCast(slice)),
+        ),
+    };
+}
+pub fn optionalHeaderField(
+    coff: *Coff,
+    comptime field: std.meta.FieldEnum(std.coff.OptionalHeader.@"PE32+"),
+) @FieldType(std.coff.OptionalHeader.@"PE32+", @tagName(field)) {
+    return switch (coff.optionalHeaderPtr()) {
+        inline else => |optional_header| coff.targetLoad(&@field(optional_header, @tagName(field))),
+    };
+}
+
+pub fn dataDirectoriesSlice(coff: *Coff) []std.coff.ImageDataDirectory {
+    return @ptrCast(@alignCast(Node.known.data_directories.slice(&coff.mf)));
+}
+
+pub fn sectionTableSlice(coff: *Coff) []std.coff.SectionHeader {
+    return @ptrCast(@alignCast(Node.known.section_table.slice(&coff.mf)));
+}
+
+fn addSymbolAssumeCapacity(coff: *Coff) Symbol.Index {
+    defer coff.symbol_table.addOneAssumeCapacity().* = .{
+        .ni = .none,
+        .rva = 0,
+        .size = 0,
+        .loc_relocs = .none,
+        .target_relocs = .none,
+        .section_number = .UNDEFINED,
+        .data_directory = null,
+    };
+    return @enumFromInt(coff.symbol_table.items.len);
+}
+
+fn initSymbolAssumeCapacity(coff: *Coff) !Symbol.Index {
+    const si = coff.addSymbolAssumeCapacity();
+    return si;
+}
+
+fn getOrPutString(coff: *Coff, string: []const u8) !String {
+    const gpa = coff.base.comp.gpa;
+    try coff.string_bytes.ensureUnusedCapacity(gpa, string.len + 1);
+    const gop = try coff.strings.getOrPutContextAdapted(
+        gpa,
+        string,
+        std.hash_map.StringIndexAdapter{ .bytes = &coff.string_bytes },
+        .{ .bytes = &coff.string_bytes },
+    );
+    if (!gop.found_existing) {
+        gop.key_ptr.* = @intCast(coff.string_bytes.items.len);
+        gop.value_ptr.* = {};
+        coff.string_bytes.appendSliceAssumeCapacity(string);
+        coff.string_bytes.appendAssumeCapacity(0);
+    }
+    return @enumFromInt(gop.key_ptr.*);
+}
+
+fn getOrPutOptionalString(coff: *Coff, string: ?[]const u8) !String.Optional {
+    return (try coff.getOrPutString(string orelse return .none)).toOptional();
+}
+
+pub fn globalSymbol(coff: *Coff, name: []const u8, lib_name: ?[]const u8) !Symbol.Index {
+    const gpa = coff.base.comp.gpa;
+    try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
+    const sym_gop = try coff.globals.getOrPut(gpa, .{
+        .name = try coff.getOrPutString(name),
+        .lib_name = try coff.getOrPutOptionalString(lib_name),
+    });
+    if (!sym_gop.found_existing) {
+        sym_gop.value_ptr.* = coff.addSymbolAssumeCapacity();
+        coff.base.comp.link_synth_prog_node.increaseEstimatedTotalItems(1);
+    }
+    return sym_gop.value_ptr.*;
+}
+
+fn navMapIndex(coff: *Coff, zcu: *Zcu, nav_index: InternPool.Nav.Index) !Node.NavMapIndex {
+    const gpa = zcu.gpa;
+    try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
+    const sym_gop = try coff.navs.getOrPut(gpa, nav_index);
+    if (!sym_gop.found_existing) sym_gop.value_ptr.* = coff.addSymbolAssumeCapacity();
+    return @enumFromInt(sym_gop.index);
+}
+pub fn navSymbol(coff: *Coff, zcu: *Zcu, nav_index: InternPool.Nav.Index) !Symbol.Index {
+    const ip = &zcu.intern_pool;
+    const nav = ip.getNav(nav_index);
+    if (nav.getExtern(ip)) |@"extern"| return coff.globalSymbol(
+        @"extern".name.toSlice(ip),
+        @"extern".lib_name.toSlice(ip),
+    );
+    const nmi = try coff.navMapIndex(zcu, nav_index);
+    return nmi.symbol(coff);
+}
+
+fn uavMapIndex(coff: *Coff, uav_val: InternPool.Index) !Node.UavMapIndex {
+    const gpa = coff.base.comp.gpa;
+    try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
+    const sym_gop = try coff.uavs.getOrPut(gpa, uav_val);
+    if (!sym_gop.found_existing) sym_gop.value_ptr.* = coff.addSymbolAssumeCapacity();
+    return @enumFromInt(sym_gop.index);
+}
+pub fn uavSymbol(coff: *Coff, uav_val: InternPool.Index) !Symbol.Index {
+    const umi = try coff.uavMapIndex(uav_val);
+    return umi.symbol(coff);
+}
+
+pub fn lazySymbol(coff: *Coff, lazy: link.File.LazySymbol) !Symbol.Index {
+    const gpa = coff.base.comp.gpa;
+    try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
+    const sym_gop = try coff.lazy.getPtr(lazy.kind).map.getOrPut(gpa, lazy.ty);
+    if (!sym_gop.found_existing) {
+        sym_gop.value_ptr.* = try coff.initSymbolAssumeCapacity();
+        coff.base.comp.link_synth_prog_node.increaseEstimatedTotalItems(1);
+    }
+    return sym_gop.value_ptr.*;
+}
+
+pub fn getNavVAddr(
+    coff: *Coff,
+    pt: Zcu.PerThread,
+    nav: InternPool.Nav.Index,
+    reloc_info: link.File.RelocInfo,
+) !u64 {
+    return coff.getVAddr(reloc_info, try coff.navSymbol(pt.zcu, nav));
+}
+
+pub fn getUavVAddr(
+    coff: *Coff,
+    uav: InternPool.Index,
+    reloc_info: link.File.RelocInfo,
+) !u64 {
+    return coff.getVAddr(reloc_info, try coff.uavSymbol(uav));
+}
+
+pub fn getVAddr(coff: *Coff, reloc_info: link.File.RelocInfo, target_si: Symbol.Index) !u64 {
+    try coff.addReloc(
+        @enumFromInt(reloc_info.parent.atom_index),
+        reloc_info.offset,
+        target_si,
+        reloc_info.addend,
+        switch (coff.targetLoad(&coff.headerPtr().machine)) {
+            else => unreachable,
+            .AMD64 => .{ .AMD64 = .ADDR64 },
+            .I386 => .{ .I386 = .DIR32 },
+        },
+    );
+    return coff.optionalHeaderField(.image_base) + target_si.get(coff).rva;
+}
+
+fn addSection(
+    coff: *Coff,
+    name: []const u8,
+    maybe_data_directory: ?DataDirectory,
+    flags: std.coff.SectionHeader.Flags,
+) !Symbol.Index {
+    const gpa = coff.base.comp.gpa;
+    try coff.nodes.ensureUnusedCapacity(gpa, 1);
+    try coff.section_table.ensureUnusedCapacity(gpa, 1);
+    try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
+
+    const coff_header = coff.headerPtr();
+    const section_index = coff.targetLoad(&coff_header.number_of_sections);
+    const section_table_len = section_index + 1;
+    coff.targetStore(&coff_header.number_of_sections, section_table_len);
+    try Node.known.section_table.resize(
+        &coff.mf,
+        gpa,
+        @sizeOf(std.coff.SectionHeader) * section_table_len,
+    );
+    const ni = try coff.mf.addLastChildNode(gpa, .root, .{
+        .alignment = coff.mf.flags.block_size,
+        .moved = true,
+        .bubbles_moved = false,
+    });
+    const si = coff.addSymbolAssumeCapacity();
+    coff.section_table.appendAssumeCapacity(si);
+    coff.nodes.appendAssumeCapacity(.{ .section = si });
+    const section_table = coff.sectionTableSlice();
+    const virtual_size = coff.optionalHeaderField(.section_alignment);
+    const rva: u32 = switch (section_index) {
+        0 => @intCast(Node.known.header.location(&coff.mf).resolve(&coff.mf)[1]),
+        else => coff.section_table.items[section_index - 1].get(coff).rva +
+            coff.targetLoad(&section_table[section_index - 1].virtual_size),
+    };
+    {
+        const sym = si.get(coff);
+        sym.ni = ni;
+        sym.rva = rva;
+        sym.section_number = @enumFromInt(section_table_len);
+        sym.data_directory = maybe_data_directory;
+    }
+    const section = &section_table[section_index];
+    section.* = .{
+        .name = undefined,
+        .virtual_size = virtual_size,
+        .virtual_address = rva,
+        .size_of_raw_data = 0,
+        .pointer_to_raw_data = 0,
+        .pointer_to_relocations = 0,
+        .pointer_to_linenumbers = 0,
+        .number_of_relocations = 0,
+        .number_of_linenumbers = 0,
+        .flags = flags,
+    };
+    @memcpy(section.name[0..name.len], name);
+    @memset(section.name[name.len..], 0);
+    if (coff.targetEndian() != native_endian)
+        std.mem.byteSwapAllFields(std.coff.SectionHeader, section);
+    if (maybe_data_directory) |data_directory|
+        coff.dataDirectoriesSlice()[@intFromEnum(data_directory)] = .{
+            .virtual_address = section.virtual_address,
+            .size = section.virtual_size,
+        };
+    switch (coff.optionalHeaderPtr()) {
+        inline else => |optional_header| coff.targetStore(
+            &optional_header.size_of_image,
+            @intCast(rva + virtual_size),
+        ),
+    }
+    return si;
+}
+
+pub fn addReloc(
+    coff: *Coff,
+    loc_si: Symbol.Index,
+    offset: u64,
+    target_si: Symbol.Index,
+    addend: i64,
+    @"type": Reloc.Type,
+) !void {
+    const gpa = coff.base.comp.gpa;
+    const target = target_si.get(coff);
+    const ri: Reloc.Index = @enumFromInt(coff.relocs.items.len);
+    (try coff.relocs.addOne(gpa)).* = .{
+        .type = @"type",
+        .prev = .none,
+        .next = target.target_relocs,
+        .loc = loc_si,
+        .target = target_si,
+        .unused = 0,
+        .offset = offset,
+        .addend = addend,
+    };
+    switch (target.target_relocs) {
+        .none => {},
+        else => |target_ri| target_ri.get(coff).prev = ri,
+    }
+    target.target_relocs = ri;
+}
+
+pub fn prelink(coff: *Coff, prog_node: std.Progress.Node) void {
+    _ = coff;
+    _ = prog_node;
+}
+
+pub fn updateNav(coff: *Coff, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void {
+    coff.updateNavInner(pt, nav_index) catch |err| switch (err) {
+        error.OutOfMemory,
+        error.Overflow,
+        error.RelocationNotByteAligned,
+        => |e| return e,
+        else => |e| return coff.base.cgFail(nav_index, "linker failed to update variable: {t}", .{e}),
+    };
+}
+fn updateNavInner(coff: *Coff, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void {
+    const zcu = pt.zcu;
+    const gpa = zcu.gpa;
+    const ip = &zcu.intern_pool;
+
+    const nav = ip.getNav(nav_index);
+    const nav_val = nav.status.fully_resolved.val;
+    const nav_init, const is_threadlocal = switch (ip.indexToKey(nav_val)) {
+        else => .{ nav_val, false },
+        .variable => |variable| .{ variable.init, variable.is_threadlocal },
+        .@"extern" => return,
+        .func => .{ .none, false },
+    };
+    if (nav_init == .none or !Type.fromInterned(ip.typeOf(nav_init)).hasRuntimeBits(zcu)) return;
+
+    const nmi = try coff.navMapIndex(zcu, nav_index);
+    const si = nmi.symbol(coff);
+    const ni = ni: {
+        const sym = si.get(coff);
+        switch (sym.ni) {
+            .none => {
+                try coff.nodes.ensureUnusedCapacity(gpa, 1);
+                _ = is_threadlocal;
+                const ni = try coff.mf.addLastChildNode(gpa, Symbol.Index.data.node(coff), .{
+                    .alignment = pt.navAlignment(nav_index).toStdMem(),
+                    .moved = true,
+                });
+                coff.nodes.appendAssumeCapacity(.{ .nav = nmi });
+                sym.ni = ni;
+                sym.section_number = Symbol.Index.data.get(coff).section_number;
+            },
+            else => si.deleteLocationRelocs(coff),
+        }
+        assert(sym.loc_relocs == .none);
+        sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
+        break :ni sym.ni;
+    };
+
+    var nw: MappedFile.Node.Writer = undefined;
+    ni.writer(&coff.mf, gpa, &nw);
+    defer nw.deinit();
+    codegen.generateSymbol(
+        &coff.base,
+        pt,
+        zcu.navSrcLoc(nav_index),
+        .fromInterned(nav_init),
+        &nw.interface,
+        .{ .atom_index = @intFromEnum(si) },
+    ) catch |err| switch (err) {
+        error.WriteFailed => return error.OutOfMemory,
+        else => |e| return e,
+    };
+    si.get(coff).size = @intCast(nw.interface.end);
+    si.applyLocationRelocs(coff);
+}
+
+pub fn lowerUav(
+    coff: *Coff,
+    pt: Zcu.PerThread,
+    uav_val: InternPool.Index,
+    uav_align: InternPool.Alignment,
+    src_loc: Zcu.LazySrcLoc,
+) !codegen.SymbolResult {
+    const zcu = pt.zcu;
+    const gpa = zcu.gpa;
+
+    try coff.pending_uavs.ensureUnusedCapacity(gpa, 1);
+    const umi = try coff.uavMapIndex(uav_val);
+    const si = umi.symbol(coff);
+    if (switch (si.get(coff).ni) {
+        .none => true,
+        else => |ni| uav_align.toStdMem().order(ni.alignment(&coff.mf)).compare(.gt),
+    }) {
+        const gop = coff.pending_uavs.getOrPutAssumeCapacity(umi);
+        if (gop.found_existing) {
+            gop.value_ptr.alignment = gop.value_ptr.alignment.max(uav_align);
+        } else {
+            gop.value_ptr.* = .{
+                .alignment = uav_align,
+                .src_loc = src_loc,
+            };
+            coff.base.comp.link_const_prog_node.increaseEstimatedTotalItems(1);
+        }
+    }
+    return .{ .sym_index = @intFromEnum(si) };
+}
+
+pub fn updateFunc(
+    coff: *Coff,
+    pt: Zcu.PerThread,
+    func_index: InternPool.Index,
+    mir: *const codegen.AnyMir,
+) !void {
+    coff.updateFuncInner(pt, func_index, mir) catch |err| switch (err) {
+        error.OutOfMemory,
+        error.Overflow,
+        error.RelocationNotByteAligned,
+        error.CodegenFail,
+        => |e| return e,
+        else => |e| return coff.base.cgFail(
+            pt.zcu.funcInfo(func_index).owner_nav,
+            "linker failed to update function: {s}",
+            .{@errorName(e)},
+        ),
+    };
+}
+fn updateFuncInner(
+    coff: *Coff,
+    pt: Zcu.PerThread,
+    func_index: InternPool.Index,
+    mir: *const codegen.AnyMir,
+) !void {
+    const zcu = pt.zcu;
+    const gpa = zcu.gpa;
+    const ip = &zcu.intern_pool;
+    const func = zcu.funcInfo(func_index);
+    const nav = ip.getNav(func.owner_nav);
+
+    const nmi = try coff.navMapIndex(zcu, func.owner_nav);
+    const si = nmi.symbol(coff);
+    log.debug("updateFunc({f}) = {d}", .{ nav.fqn.fmt(ip), si });
+    const ni = ni: {
+        const sym = si.get(coff);
+        switch (sym.ni) {
+            .none => {
+                try coff.nodes.ensureUnusedCapacity(gpa, 1);
+                const mod = zcu.navFileScope(func.owner_nav).mod.?;
+                const target = &mod.resolved_target.result;
+                const ni = try coff.mf.addLastChildNode(gpa, Symbol.Index.text.node(coff), .{
+                    .alignment = switch (nav.status.fully_resolved.alignment) {
+                        .none => switch (mod.optimize_mode) {
+                            .Debug,
+                            .ReleaseSafe,
+                            .ReleaseFast,
+                            => target_util.defaultFunctionAlignment(target),
+                            .ReleaseSmall => target_util.minFunctionAlignment(target),
+                        },
+                        else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
+                    }.toStdMem(),
+                    .moved = true,
+                });
+                coff.nodes.appendAssumeCapacity(.{ .nav = nmi });
+                sym.ni = ni;
+                sym.section_number = Symbol.Index.text.get(coff).section_number;
+            },
+            else => si.deleteLocationRelocs(coff),
+        }
+        assert(sym.loc_relocs == .none);
+        sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
+        break :ni sym.ni;
+    };
+
+    var nw: MappedFile.Node.Writer = undefined;
+    ni.writer(&coff.mf, gpa, &nw);
+    defer nw.deinit();
+    codegen.emitFunction(
+        &coff.base,
+        pt,
+        zcu.navSrcLoc(func.owner_nav),
+        func_index,
+        @intFromEnum(si),
+        mir,
+        &nw.interface,
+        .none,
+    ) catch |err| switch (err) {
+        error.WriteFailed => return nw.err.?,
+        else => |e| return e,
+    };
+    si.get(coff).size = @intCast(nw.interface.end);
+    si.applyLocationRelocs(coff);
+}
+
+pub fn updateErrorData(coff: *Coff, pt: Zcu.PerThread) !void {
+    coff.flushLazy(pt, .{
+        .kind = .const_data,
+        .index = @intCast(coff.lazy.getPtr(.const_data).map.getIndex(.anyerror_type) orelse return),
+    }) catch |err| switch (err) {
+        error.OutOfMemory => return error.OutOfMemory,
+        error.CodegenFail => return error.LinkFailure,
+        else => |e| return coff.base.comp.link_diags.fail("updateErrorData failed {t}", .{e}),
+    };
+}
+
+pub fn flush(
+    coff: *Coff,
+    arena: std.mem.Allocator,
+    tid: Zcu.PerThread.Id,
+    prog_node: std.Progress.Node,
+) !void {
+    _ = arena;
+    _ = prog_node;
+    while (try coff.idle(tid)) {}
+
+    // hack for stage2_x86_64 + coff
+    const comp = coff.base.comp;
+    if (comp.compiler_rt_dyn_lib) |crt_file| {
+        const gpa = comp.gpa;
+        const compiler_rt_sub_path = try std.fs.path.join(gpa, &.{
+            std.fs.path.dirname(coff.base.emit.sub_path) orelse "",
+            std.fs.path.basename(crt_file.full_object_path.sub_path),
+        });
+        defer gpa.free(compiler_rt_sub_path);
+        crt_file.full_object_path.root_dir.handle.copyFile(
+            crt_file.full_object_path.sub_path,
+            coff.base.emit.root_dir.handle,
+            compiler_rt_sub_path,
+            .{},
+        ) catch |err| switch (err) {
+            else => |e| return comp.link_diags.fail("Copy '{s}' failed: {s}", .{
+                compiler_rt_sub_path,
+                @errorName(e),
+            }),
+        };
+    }
+}
+
+pub fn idle(coff: *Coff, tid: Zcu.PerThread.Id) !bool {
+    const comp = coff.base.comp;
+    task: {
+        while (coff.pending_uavs.pop()) |pending_uav| {
+            const sub_prog_node = coff.idleProgNode(
+                tid,
+                comp.link_const_prog_node,
+                .{ .uav = pending_uav.key },
+            );
+            defer sub_prog_node.end();
+            coff.flushUav(
+                .{ .zcu = coff.base.comp.zcu.?, .tid = tid },
+                pending_uav.key,
+                pending_uav.value.alignment,
+                pending_uav.value.src_loc,
+            ) catch |err| switch (err) {
+                error.OutOfMemory => return error.OutOfMemory,
+                else => |e| return coff.base.comp.link_diags.fail(
+                    "linker failed to lower constant: {t}",
+                    .{e},
+                ),
+            };
+            break :task;
+        }
+        if (coff.global_pending_index < coff.globals.count()) {
+            const pt: Zcu.PerThread = .{ .zcu = coff.base.comp.zcu.?, .tid = tid };
+            const gmi: Node.GlobalMapIndex = @enumFromInt(coff.global_pending_index);
+            coff.global_pending_index += 1;
+            const sub_prog_node = comp.link_synth_prog_node.start(
+                gmi.globalName(coff).name.toSlice(coff),
+                0,
+            );
+            defer sub_prog_node.end();
+            coff.flushGlobal(pt, gmi) catch |err| switch (err) {
+                error.OutOfMemory => return error.OutOfMemory,
+                else => |e| return coff.base.comp.link_diags.fail(
+                    "linker failed to lower constant: {t}",
+                    .{e},
+                ),
+            };
+            break :task;
+        }
+        var lazy_it = coff.lazy.iterator();
+        while (lazy_it.next()) |lazy| if (lazy.value.pending_index < lazy.value.map.count()) {
+            const pt: Zcu.PerThread = .{ .zcu = coff.base.comp.zcu.?, .tid = tid };
+            const lmr: Node.LazyMapRef = .{ .kind = lazy.key, .index = lazy.value.pending_index };
+            lazy.value.pending_index += 1;
+            const kind = switch (lmr.kind) {
+                .code => "code",
+                .const_data => "data",
+            };
+            var name: [std.Progress.Node.max_name_len]u8 = undefined;
+            const sub_prog_node = comp.link_synth_prog_node.start(
+                std.fmt.bufPrint(&name, "lazy {s} for {f}", .{
+                    kind,
+                    Type.fromInterned(lmr.lazySymbol(coff).ty).fmt(pt),
+                }) catch &name,
+                0,
+            );
+            defer sub_prog_node.end();
+            coff.flushLazy(pt, lmr) catch |err| switch (err) {
+                error.OutOfMemory => return error.OutOfMemory,
+                else => |e| return coff.base.comp.link_diags.fail(
+                    "linker failed to lower lazy {s}: {t}",
+                    .{ kind, e },
+                ),
+            };
+            break :task;
+        };
+        while (coff.mf.updates.pop()) |ni| {
+            const clean_moved = ni.cleanMoved(&coff.mf);
+            const clean_resized = ni.cleanResized(&coff.mf);
+            if (clean_moved or clean_resized) {
+                const sub_prog_node = coff.idleProgNode(tid, coff.mf.update_prog_node, coff.getNode(ni));
+                defer sub_prog_node.end();
+                if (clean_moved) try coff.flushMoved(ni);
+                if (clean_resized) try coff.flushResized(ni);
+                break :task;
+            } else coff.mf.update_prog_node.completeOne();
+        }
+    }
+    if (coff.pending_uavs.count() > 0) return true;
+    for (&coff.lazy.values) |lazy| if (lazy.map.count() > lazy.pending_index) return true;
+    if (coff.mf.updates.items.len > 0) return true;
+    return false;
+}
+
+fn idleProgNode(
+    coff: *Coff,
+    tid: Zcu.PerThread.Id,
+    prog_node: std.Progress.Node,
+    node: Node,
+) std.Progress.Node {
+    var name: [std.Progress.Node.max_name_len]u8 = undefined;
+    return prog_node.start(name: switch (node) {
+        else => |tag| @tagName(tag),
+        .section => |si| std.mem.sliceTo(&si.get(coff).section_number.header(coff).name, 0),
+        .nav => |nmi| {
+            const ip = &coff.base.comp.zcu.?.intern_pool;
+            break :name ip.getNav(nmi.navIndex(coff)).fqn.toSlice(ip);
+        },
+        .uav => |umi| std.fmt.bufPrint(&name, "{f}", .{
+            Value.fromInterned(umi.uavValue(coff)).fmtValue(.{
+                .zcu = coff.base.comp.zcu.?,
+                .tid = tid,
+            }),
+        }) catch &name,
+    }, 0);
+}
+
+fn flushUav(
+    coff: *Coff,
+    pt: Zcu.PerThread,
+    umi: Node.UavMapIndex,
+    uav_align: InternPool.Alignment,
+    src_loc: Zcu.LazySrcLoc,
+) !void {
+    const zcu = pt.zcu;
+    const gpa = zcu.gpa;
+
+    const uav_val = umi.uavValue(coff);
+    const si = umi.symbol(coff);
+    const ni = ni: {
+        const sym = si.get(coff);
+        switch (sym.ni) {
+            .none => {
+                try coff.nodes.ensureUnusedCapacity(gpa, 1);
+                const ni = try coff.mf.addLastChildNode(gpa, Symbol.Index.data.node(coff), .{
+                    .alignment = uav_align.toStdMem(),
+                    .moved = true,
+                });
+                coff.nodes.appendAssumeCapacity(.{ .uav = umi });
+                sym.ni = ni;
+                sym.section_number = Symbol.Index.data.get(coff).section_number;
+            },
+            else => {
+                if (sym.ni.alignment(&coff.mf).order(uav_align.toStdMem()).compare(.gte)) return;
+                si.deleteLocationRelocs(coff);
+            },
+        }
+        assert(sym.loc_relocs == .none);
+        sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
+        break :ni sym.ni;
+    };
+
+    var nw: MappedFile.Node.Writer = undefined;
+    ni.writer(&coff.mf, gpa, &nw);
+    defer nw.deinit();
+    codegen.generateSymbol(
+        &coff.base,
+        pt,
+        src_loc,
+        .fromInterned(uav_val),
+        &nw.interface,
+        .{ .atom_index = @intFromEnum(si) },
+    ) catch |err| switch (err) {
+        error.WriteFailed => return error.OutOfMemory,
+        else => |e| return e,
+    };
+    si.get(coff).size = @intCast(nw.interface.end);
+    si.applyLocationRelocs(coff);
+}
+
+fn flushGlobal(coff: *Coff, pt: Zcu.PerThread, gmi: Node.GlobalMapIndex) !void {
+    const zcu = pt.zcu;
+    const comp = zcu.comp;
+    const gpa = zcu.gpa;
+    const gn = gmi.globalName(coff);
+    if (gn.lib_name.toSlice(coff)) |lib_name| {
+        const name = gn.name.toSlice(coff);
+        try coff.nodes.ensureUnusedCapacity(gpa, 4);
+        try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
+
+        const target_endian = coff.targetEndian();
+        const magic = coff.targetLoad(&coff.optionalHeaderStandardPtr().magic);
+        const addr_size: u64, const addr_align: std.mem.Alignment = switch (magic) {
+            _ => unreachable,
+            .PE32 => .{ 4, .@"4" },
+            .@"PE32+" => .{ 8, .@"8" },
+        };
+
+        const gop = try coff.import_table.dlls.getOrPutAdapted(
+            gpa,
+            lib_name,
+            ImportTable.Adapter{ .coff = coff },
+        );
+        const import_hint_name_align: std.mem.Alignment = .@"2";
+        if (!gop.found_existing) {
+            errdefer _ = coff.import_table.dlls.pop();
+            try coff.import_table.directory_table_ni.resize(
+                &coff.mf,
+                gpa,
+                @sizeOf(std.coff.ImportDirectoryEntry) * (gop.index + 2),
+            );
+            const import_hint_name_table_len =
+                import_hint_name_align.forward(lib_name.len + ".dll".len + 1);
+            const idata_section_ni = Symbol.Index.idata.node(coff);
+            const import_lookup_table_ni = try coff.mf.addLastChildNode(gpa, idata_section_ni, .{
+                .size = addr_size * 2,
+                .alignment = addr_align,
+                .moved = true,
+            });
+            const import_address_table_ni = try coff.mf.addLastChildNode(gpa, idata_section_ni, .{
+                .size = addr_size * 2,
+                .alignment = addr_align,
+                .moved = true,
+            });
+            const import_address_table_si = coff.addSymbolAssumeCapacity();
+            {
+                const import_address_table_sym = import_address_table_si.get(coff);
+                import_address_table_sym.ni = import_address_table_ni;
+                assert(import_address_table_sym.loc_relocs == .none);
+                import_address_table_sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
+                import_address_table_sym.section_number = Symbol.Index.idata.get(coff).section_number;
+            }
+            const import_hint_name_table_ni = try coff.mf.addLastChildNode(gpa, idata_section_ni, .{
+                .size = import_hint_name_table_len,
+                .alignment = import_hint_name_align,
+                .moved = true,
+            });
+            gop.value_ptr.* = .{
+                .import_lookup_table_ni = import_lookup_table_ni,
+                .import_address_table_si = import_address_table_si,
+                .import_hint_name_table_ni = import_hint_name_table_ni,
+                .len = 0,
+                .hint_name_len = @intCast(import_hint_name_table_len),
+            };
+            const import_hint_name_slice = import_hint_name_table_ni.slice(&coff.mf);
+            @memcpy(import_hint_name_slice[0..lib_name.len], lib_name);
+            @memcpy(import_hint_name_slice[lib_name.len..][0..".dll".len], ".dll");
+            @memset(import_hint_name_slice[lib_name.len + ".dll".len ..], 0);
+            coff.nodes.appendAssumeCapacity(.{ .import_lookup_table = @intCast(gop.index) });
+            coff.nodes.appendAssumeCapacity(.{ .import_address_table = @intCast(gop.index) });
+            coff.nodes.appendAssumeCapacity(.{ .import_hint_name_table = @intCast(gop.index) });
+
+            const import_directory_table: []std.coff.ImportDirectoryEntry =
+                @ptrCast(@alignCast(coff.import_table.directory_table_ni.slice(&coff.mf)));
+            import_directory_table[gop.index..][0..2].* = .{ .{
+                .import_lookup_table_rva = coff.computeNodeRva(import_lookup_table_ni),
+                .time_date_stamp = 0,
+                .forwarder_chain = 0,
+                .name_rva = coff.computeNodeRva(import_hint_name_table_ni),
+                .import_address_table_rva = coff.computeNodeRva(import_address_table_ni),
+            }, .{
+                .import_lookup_table_rva = 0,
+                .time_date_stamp = 0,
+                .forwarder_chain = 0,
+                .name_rva = 0,
+                .import_address_table_rva = 0,
+            } };
+        }
+        const import_symbol_index = gop.value_ptr.len;
+        gop.value_ptr.len = import_symbol_index + 1;
+        const new_symbol_table_size = addr_size * (import_symbol_index + 2);
+        const import_hint_name_index = gop.value_ptr.hint_name_len;
+        gop.value_ptr.hint_name_len = @intCast(
+            import_hint_name_align.forward(import_hint_name_index + 2 + name.len + 1),
+        );
+        try gop.value_ptr.import_lookup_table_ni.resize(&coff.mf, gpa, new_symbol_table_size);
+        const import_address_table_ni = gop.value_ptr.import_address_table_si.node(coff);
+        try import_address_table_ni.resize(&coff.mf, gpa, new_symbol_table_size);
+        try gop.value_ptr.import_hint_name_table_ni.resize(&coff.mf, gpa, gop.value_ptr.hint_name_len);
+        const import_lookup_slice = gop.value_ptr.import_lookup_table_ni.slice(&coff.mf);
+        const import_address_slice = import_address_table_ni.slice(&coff.mf);
+        const import_hint_name_slice = gop.value_ptr.import_hint_name_table_ni.slice(&coff.mf);
+        @memset(import_hint_name_slice[import_hint_name_index..][0..2], 0);
+        @memcpy(import_hint_name_slice[import_hint_name_index + 2 ..][0..name.len], name);
+        @memset(import_hint_name_slice[import_hint_name_index + 2 + name.len ..], 0);
+        const import_hint_name_rva =
+            coff.computeNodeRva(gop.value_ptr.import_hint_name_table_ni) + import_hint_name_index;
+        switch (magic) {
+            _ => unreachable,
+            inline .PE32, .@"PE32+" => |ct_magic| {
+                const Addr = switch (ct_magic) {
+                    _ => comptime unreachable,
+                    .PE32 => u32,
+                    .@"PE32+" => u64,
+                };
+                const import_lookup_table: []Addr = @ptrCast(@alignCast(import_lookup_slice));
+                const import_address_table: []Addr = @ptrCast(@alignCast(import_address_slice));
+                const import_hint_name_rvas: [2]Addr = .{
+                    std.mem.nativeTo(Addr, @intCast(import_hint_name_rva), target_endian),
+                    std.mem.nativeTo(Addr, 0, target_endian),
+                };
+                import_lookup_table[import_symbol_index..][0..2].* = import_hint_name_rvas;
+                import_address_table[import_symbol_index..][0..2].* = import_hint_name_rvas;
+            },
+        }
+        const si = gmi.symbol(coff);
+        const sym = si.get(coff);
+        sym.section_number = Symbol.Index.text.get(coff).section_number;
+        assert(sym.loc_relocs == .none);
+        sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
+        switch (coff.targetLoad(&coff.headerPtr().machine)) {
+            else => |tag| @panic(@tagName(tag)),
+            .AMD64 => {
+                const init = [_]u8{ 0xff, 0x25, 0x00, 0x00, 0x00, 0x00 };
+                const target = &comp.root_mod.resolved_target.result;
+                const ni = try coff.mf.addLastChildNode(gpa, Symbol.Index.text.node(coff), .{
+                    .alignment = switch (comp.root_mod.optimize_mode) {
+                        .Debug,
+                        .ReleaseSafe,
+                        .ReleaseFast,
+                        => target_util.defaultFunctionAlignment(target),
+                        .ReleaseSmall => target_util.minFunctionAlignment(target),
+                    }.toStdMem(),
+                    .size = init.len,
+                });
+                @memcpy(ni.slice(&coff.mf)[0..init.len], &init);
+                sym.ni = ni;
+                sym.size = init.len;
+                try coff.addReloc(
+                    si,
+                    init.len - 4,
+                    gop.value_ptr.import_address_table_si,
+                    @intCast(addr_size * import_symbol_index),
+                    .{ .AMD64 = .REL32 },
+                );
+            },
+        }
+        coff.nodes.appendAssumeCapacity(.{ .global = gmi });
+        sym.rva = coff.computeNodeRva(sym.ni);
+        si.applyLocationRelocs(coff);
+    }
+}
+
+fn flushLazy(coff: *Coff, pt: Zcu.PerThread, lmr: Node.LazyMapRef) !void {
+    const zcu = pt.zcu;
+    const gpa = zcu.gpa;
+
+    const lazy = lmr.lazySymbol(coff);
+    const si = lmr.symbol(coff);
+    const ni = ni: {
+        const sym = si.get(coff);
+        switch (sym.ni) {
+            .none => {
+                try coff.nodes.ensureUnusedCapacity(gpa, 1);
+                const sec_si: Symbol.Index = switch (lazy.kind) {
+                    .code => .text,
+                    .const_data => .rdata,
+                };
+                const ni = try coff.mf.addLastChildNode(gpa, sec_si.node(coff), .{ .moved = true });
+                coff.nodes.appendAssumeCapacity(switch (lazy.kind) {
+                    .code => .{ .lazy_code = @enumFromInt(lmr.index) },
+                    .const_data => .{ .lazy_const_data = @enumFromInt(lmr.index) },
+                });
+                sym.ni = ni;
+                sym.section_number = sec_si.get(coff).section_number;
+            },
+            else => si.deleteLocationRelocs(coff),
+        }
+        assert(sym.loc_relocs == .none);
+        sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
+        break :ni sym.ni;
+    };
+
+    var required_alignment: InternPool.Alignment = .none;
+    var nw: MappedFile.Node.Writer = undefined;
+    ni.writer(&coff.mf, gpa, &nw);
+    defer nw.deinit();
+    try codegen.generateLazySymbol(
+        &coff.base,
+        pt,
+        Type.fromInterned(lazy.ty).srcLocOrNull(pt.zcu) orelse .unneeded,
+        lazy,
+        &required_alignment,
+        &nw.interface,
+        .none,
+        .{ .atom_index = @intFromEnum(si) },
+    );
+    si.get(coff).size = @intCast(nw.interface.end);
+    si.applyLocationRelocs(coff);
+}
+
+fn flushMoved(coff: *Coff, ni: MappedFile.Node.Index) !void {
+    const node = coff.getNode(ni);
+    switch (node) {
+        else => |tag| @panic(@tagName(tag)),
+        .section => |si| return coff.targetStore(
+            &si.get(coff).section_number.header(coff).pointer_to_raw_data,
+            @intCast(ni.fileLocation(&coff.mf, false).offset),
+        ),
+        .import_directory_table => {},
+        .import_lookup_table => |import_directory_table_index| {
+            const import_directory_table: []std.coff.ImportDirectoryEntry =
+                @ptrCast(@alignCast(coff.import_table.directory_table_ni.slice(&coff.mf)));
+            const import_directory_entry = &import_directory_table[import_directory_table_index];
+            coff.targetStore(&import_directory_entry.import_lookup_table_rva, coff.computeNodeRva(ni));
+        },
+        .import_address_table => |import_directory_table_index| {
+            const import_directory_table: []std.coff.ImportDirectoryEntry =
+                @ptrCast(@alignCast(coff.import_table.directory_table_ni.slice(&coff.mf)));
+            const import_directory_entry = &import_directory_table[import_directory_table_index];
+            coff.targetStore(&import_directory_entry.import_lookup_table_rva, coff.computeNodeRva(ni));
+            const import_address_table_si =
+                coff.import_table.dlls.values()[import_directory_table_index].import_address_table_si;
+            import_address_table_si.flushMoved(coff);
+            coff.targetStore(
+                &import_directory_entry.import_address_table_rva,
+                import_address_table_si.get(coff).rva,
+            );
+        },
+        .import_hint_name_table => |import_directory_table_index| {
+            const target_endian = coff.targetEndian();
+            const magic = coff.targetLoad(&coff.optionalHeaderStandardPtr().magic);
+            const import_directory_table: []std.coff.ImportDirectoryEntry =
+                @ptrCast(@alignCast(coff.import_table.directory_table_ni.slice(&coff.mf)));
+            const import_directory_entry = &import_directory_table[import_directory_table_index];
+            const import_hint_name_rva = coff.computeNodeRva(ni);
+            coff.targetStore(&import_directory_entry.name_rva, import_hint_name_rva);
+            const import_entry = &coff.import_table.dlls.values()[import_directory_table_index];
+            const import_lookup_slice = import_entry.import_lookup_table_ni.slice(&coff.mf);
+            const import_address_slice =
+                import_entry.import_address_table_si.node(coff).slice(&coff.mf);
+            const import_hint_name_slice = ni.slice(&coff.mf);
+            const import_hint_name_align = ni.alignment(&coff.mf);
+            var import_hint_name_index: u32 = 0;
+            for (0..import_entry.len) |import_symbol_index| {
+                import_hint_name_index = @intCast(import_hint_name_align.forward(
+                    std.mem.indexOfScalarPos(
+                        u8,
+                        import_hint_name_slice,
+                        import_hint_name_index,
+                        0,
+                    ).? + 1,
+                ));
+                switch (magic) {
+                    _ => unreachable,
+                    inline .PE32, .@"PE32+" => |ct_magic| {
+                        const Addr = switch (ct_magic) {
+                            _ => comptime unreachable,
+                            .PE32 => u32,
+                            .@"PE32+" => u64,
+                        };
+                        const import_lookup_table: []Addr = @ptrCast(@alignCast(import_lookup_slice));
+                        const import_address_table: []Addr = @ptrCast(@alignCast(import_address_slice));
+                        const rva = std.mem.nativeTo(
+                            Addr,
+                            import_hint_name_rva + import_hint_name_index,
+                            target_endian,
+                        );
+                        import_lookup_table[import_symbol_index] = rva;
+                        import_address_table[import_symbol_index] = rva;
+                    },
+                }
+                import_hint_name_index += 2;
+            }
+        },
+        inline .global,
+        .nav,
+        .uav,
+        .lazy_code,
+        .lazy_const_data,
+        => |mi| mi.symbol(coff).flushMoved(coff),
+    }
+    try ni.childrenMoved(coff.base.comp.gpa, &coff.mf);
+}
+
+fn flushResized(coff: *Coff, ni: MappedFile.Node.Index) !void {
+    _, const size = ni.location(&coff.mf).resolve(&coff.mf);
+    const node = coff.getNode(ni);
+    switch (node) {
+        else => |tag| @panic(@tagName(tag)),
+        .file => {},
+        .header => {
+            switch (coff.optionalHeaderPtr()) {
+                inline else => |optional_header| coff.targetStore(
+                    &optional_header.size_of_headers,
+                    @intCast(size),
+                ),
+            }
+            if (size > coff.section_table.items[0].get(coff).rva) try coff.virtualSlide(
+                0,
+                std.mem.alignForward(
+                    u32,
+                    @intCast(size * 4),
+                    coff.optionalHeaderField(.section_alignment),
+                ),
+            );
+        },
+        .section_table => {},
+        .section => |si| {
+            const sym = si.get(coff);
+            const section_table = coff.sectionTableSlice();
+            const section_index = sym.section_number.toIndex();
+            const section = &section_table[section_index];
+            coff.targetStore(&section.size_of_raw_data, @intCast(size));
+            if (size > coff.targetLoad(&section.virtual_size)) {
+                const virtual_size = std.mem.alignForward(
+                    u32,
+                    @intCast(size * 4),
+                    coff.optionalHeaderField(.section_alignment),
+                );
+                coff.targetStore(&section.virtual_size, virtual_size);
+                if (sym.data_directory) |data_directory|
+                    coff.dataDirectoriesSlice()[@intFromEnum(data_directory)].size =
+                        section.virtual_size;
+                try coff.virtualSlide(section_index + 1, sym.rva + virtual_size);
+            }
+        },
+        .import_directory_table,
+        .import_lookup_table,
+        .import_address_table,
+        .import_hint_name_table,
+        .global,
+        .nav,
+        .uav,
+        .lazy_code,
+        .lazy_const_data,
+        => {},
+    }
+}
+
+fn virtualSlide(coff: *Coff, start_section_index: usize, start_rva: u32) !void {
+    const section_table = coff.sectionTableSlice();
+    var rva = start_rva;
+    for (
+        coff.section_table.items[start_section_index..],
+        section_table[start_section_index..],
+    ) |section_si, *section| {
+        const section_sym = section_si.get(coff);
+        section_sym.rva = rva;
+        coff.targetStore(&section.virtual_address, rva);
+        if (section_sym.data_directory) |data_directory|
+            coff.dataDirectoriesSlice()[@intFromEnum(data_directory)].virtual_address =
+                section.virtual_address;
+        try section_sym.ni.childrenMoved(coff.base.comp.gpa, &coff.mf);
+        rva += coff.targetLoad(&section.virtual_size);
+    }
+    switch (coff.optionalHeaderPtr()) {
+        inline else => |optional_header| coff.targetStore(
+            &optional_header.size_of_image,
+            @intCast(rva),
+        ),
+    }
+}
+
+pub fn updateExports(
+    coff: *Coff,
+    pt: Zcu.PerThread,
+    exported: Zcu.Exported,
+    export_indices: []const Zcu.Export.Index,
+) !void {
+    return coff.updateExportsInner(pt, exported, export_indices) catch |err| switch (err) {
+        error.OutOfMemory => error.OutOfMemory,
+        error.LinkFailure => error.AnalysisFail,
+    };
+}
+fn updateExportsInner(
+    coff: *Coff,
+    pt: Zcu.PerThread,
+    exported: Zcu.Exported,
+    export_indices: []const Zcu.Export.Index,
+) !void {
+    const zcu = pt.zcu;
+    const gpa = zcu.gpa;
+    const ip = &zcu.intern_pool;
+
+    switch (exported) {
+        .nav => |nav| log.debug("updateExports({f})", .{ip.getNav(nav).fqn.fmt(ip)}),
+        .uav => |uav| log.debug("updateExports(@as({f}, {f}))", .{
+            Type.fromInterned(ip.typeOf(uav)).fmt(pt),
+            Value.fromInterned(uav).fmtValue(pt),
+        }),
+    }
+    try coff.symbol_table.ensureUnusedCapacity(gpa, export_indices.len);
+    const exported_si: Symbol.Index = switch (exported) {
+        .nav => |nav| try coff.navSymbol(zcu, nav),
+        .uav => |uav| @enumFromInt(switch (try coff.lowerUav(
+            pt,
+            uav,
+            Type.fromInterned(ip.typeOf(uav)).abiAlignment(zcu),
+            export_indices[0].ptr(zcu).src,
+        )) {
+            .sym_index => |si| si,
+            .fail => |em| {
+                defer em.destroy(gpa);
+                return coff.base.comp.link_diags.fail("{s}", .{em.msg});
+            },
+        }),
+    };
+    while (try coff.idle(pt.tid)) {}
+    const exported_ni = exported_si.node(coff);
+    const exported_sym = exported_si.get(coff);
+    for (export_indices) |export_index| {
+        const @"export" = export_index.ptr(zcu);
+        const export_si = try coff.globalSymbol(@"export".opts.name.toSlice(ip), null);
+        const export_sym = export_si.get(coff);
+        export_sym.ni = exported_ni;
+        export_sym.rva = exported_sym.rva;
+        export_sym.size = exported_sym.size;
+        export_sym.section_number = exported_sym.section_number;
+        export_si.applyTargetRelocs(coff);
+        if (@"export".opts.name.eqlSlice("wWinMainCRTStartup", ip)) {
+            coff.entry_hack = exported_si;
+            coff.optionalHeaderStandardPtr().address_of_entry_point = exported_sym.rva;
+        }
+    }
+}
+
+pub fn deleteExport(coff: *Coff, exported: Zcu.Exported, name: InternPool.NullTerminatedString) void {
+    _ = coff;
+    _ = exported;
+    _ = name;
+}
+
+pub fn dump(coff: *Coff, tid: Zcu.PerThread.Id) void {
+    const w = std.debug.lockStderrWriter(&.{});
+    defer std.debug.unlockStderrWriter();
+    coff.printNode(tid, w, .root, 0) catch {};
+}
+
+pub fn printNode(
+    coff: *Coff,
+    tid: Zcu.PerThread.Id,
+    w: *std.Io.Writer,
+    ni: MappedFile.Node.Index,
+    indent: usize,
+) !void {
+    const node = coff.getNode(ni);
+    try w.splatByteAll(' ', indent);
+    try w.writeAll(@tagName(node));
+    switch (node) {
+        else => {},
+        .section => |si| try w.print("({s})", .{
+            std.mem.sliceTo(&si.get(coff).section_number.header(coff).name, 0),
+        }),
+        .import_lookup_table,
+        .import_address_table,
+        .import_hint_name_table,
+        => |import_directory_table_index| try w.print("({s})", .{
+            std.mem.sliceTo(coff.import_table.dlls.values()[import_directory_table_index]
+                .import_hint_name_table_ni.sliceConst(&coff.mf), 0),
+        }),
+        .global => |gmi| {
+            const gn = gmi.globalName(coff);
+            try w.writeByte('(');
+            if (gn.lib_name.toSlice(coff)) |lib_name| try w.print("{s}.dll, ", .{lib_name});
+            try w.print("{s})", .{gn.name.toSlice(coff)});
+        },
+        .nav => |nmi| {
+            const zcu = coff.base.comp.zcu.?;
+            const ip = &zcu.intern_pool;
+            const nav = ip.getNav(nmi.navIndex(coff));
+            try w.print("({f}, {f})", .{
+                Type.fromInterned(nav.typeOf(ip)).fmt(.{ .zcu = zcu, .tid = tid }),
+                nav.fqn.fmt(ip),
+            });
+        },
+        .uav => |umi| {
+            const zcu = coff.base.comp.zcu.?;
+            const val: Value = .fromInterned(umi.uavValue(coff));
+            try w.print("({f}, {f})", .{
+                val.typeOf(zcu).fmt(.{ .zcu = zcu, .tid = tid }),
+                val.fmtValue(.{ .zcu = zcu, .tid = tid }),
+            });
+        },
+        inline .lazy_code, .lazy_const_data => |lmi| try w.print("({f})", .{
+            Type.fromInterned(lmi.lazySymbol(coff).ty).fmt(.{
+                .zcu = coff.base.comp.zcu.?,
+                .tid = tid,
+            }),
+        }),
+    }
+    {
+        const mf_node = &coff.mf.nodes.items[@intFromEnum(ni)];
+        const off, const size = mf_node.location().resolve(&coff.mf);
+        try w.print(" index={d} offset=0x{x} size=0x{x} align=0x{x}{s}{s}{s}{s}\n", .{
+            @intFromEnum(ni),
+            off,
+            size,
+            mf_node.flags.alignment.toByteUnits(),
+            if (mf_node.flags.fixed) " fixed" else "",
+            if (mf_node.flags.moved) " moved" else "",
+            if (mf_node.flags.resized) " resized" else "",
+            if (mf_node.flags.has_content) " has_content" else "",
+        });
+    }
+    var leaf = true;
+    var child_it = ni.children(&coff.mf);
+    while (child_it.next()) |child_ni| {
+        leaf = false;
+        try coff.printNode(tid, w, child_ni, indent + 1);
+    }
+    if (leaf) {
+        const file_loc = ni.fileLocation(&coff.mf, false);
+        if (file_loc.size == 0) return;
+        var address = file_loc.offset;
+        const line_len = 0x10;
+        var line_it = std.mem.window(
+            u8,
+            coff.mf.contents[@intCast(file_loc.offset)..][0..@intCast(file_loc.size)],
+            line_len,
+            line_len,
+        );
+        while (line_it.next()) |line_bytes| : (address += line_len) {
+            try w.splatByteAll(' ', indent + 1);
+            try w.print("{x:0>8}  ", .{address});
+            for (line_bytes) |byte| try w.print("{x:0>2} ", .{byte});
+            try w.splatByteAll(' ', 3 * (line_len - line_bytes.len) + 1);
+            for (line_bytes) |byte| try w.writeByte(if (std.ascii.isPrint(byte)) byte else '.');
+            try w.writeByte('\n');
+        }
+    }
+}
+
+const assert = std.debug.assert;
+const builtin = @import("builtin");
+const codegen = @import("../codegen.zig");
+const Compilation = @import("../Compilation.zig");
+const Coff = @This();
+const InternPool = @import("../InternPool.zig");
+const link = @import("../link.zig");
+const log = std.log.scoped(.link);
+const MappedFile = @import("MappedFile.zig");
+const native_endian = builtin.cpu.arch.endian();
+const std = @import("std");
+const target_util = @import("../target.zig");
+const Type = @import("../Type.zig");
+const Value = @import("../Value.zig");
+const Zcu = @import("../Zcu.zig");
src/link/Elf2.zig
@@ -11,7 +11,7 @@ lazy: std.EnumArray(link.File.LazySymbol.Kind, struct {
     map: std.AutoArrayHashMapUnmanaged(InternPool.Index, Symbol.Index),
     pending_index: u32,
 }),
-pending_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, struct {
+pending_uavs: std.AutoArrayHashMapUnmanaged(Node.UavMapIndex, struct {
     alignment: InternPool.Alignment,
     src_loc: Zcu.LazySrcLoc,
 }),
@@ -25,10 +25,65 @@ pub const Node = union(enum) {
     shdr,
     segment: u32,
     section: Symbol.Index,
-    nav: InternPool.Nav.Index,
-    uav: InternPool.Index,
-    lazy_code: InternPool.Index,
-    lazy_const_data: InternPool.Index,
+    nav: NavMapIndex,
+    uav: UavMapIndex,
+    lazy_code: LazyMapRef.Index(.code),
+    lazy_const_data: LazyMapRef.Index(.const_data),
+
+    pub const NavMapIndex = enum(u32) {
+        _,
+
+        pub fn navIndex(nmi: NavMapIndex, elf: *const Elf) InternPool.Nav.Index {
+            return elf.navs.keys()[@intFromEnum(nmi)];
+        }
+
+        pub fn symbol(nmi: NavMapIndex, elf: *const Elf) Symbol.Index {
+            return elf.navs.values()[@intFromEnum(nmi)];
+        }
+    };
+
+    pub const UavMapIndex = enum(u32) {
+        _,
+
+        pub fn uavValue(umi: UavMapIndex, elf: *const Elf) InternPool.Index {
+            return elf.uavs.keys()[@intFromEnum(umi)];
+        }
+
+        pub fn symbol(umi: UavMapIndex, elf: *const Elf) Symbol.Index {
+            return elf.uavs.values()[@intFromEnum(umi)];
+        }
+    };
+
+    pub const LazyMapRef = struct {
+        kind: link.File.LazySymbol.Kind,
+        index: u32,
+
+        pub fn Index(comptime kind: link.File.LazySymbol.Kind) type {
+            return enum(u32) {
+                _,
+
+                pub fn ref(lmi: @This()) LazyMapRef {
+                    return .{ .kind = kind, .index = @intFromEnum(lmi) };
+                }
+
+                pub fn lazySymbol(lmi: @This(), elf: *const Elf) link.File.LazySymbol {
+                    return lmi.ref().lazySymbol(elf);
+                }
+
+                pub fn symbol(lmi: @This(), elf: *const Elf) Symbol.Index {
+                    return lmi.ref().symbol(elf);
+                }
+            };
+        }
+
+        pub fn lazySymbol(lmr: LazyMapRef, elf: *const Elf) link.File.LazySymbol {
+            return .{ .kind = lmr.kind, .ty = elf.lazy.getPtrConst(lmr.kind).map.keys()[lmr.index] };
+        }
+
+        pub fn symbol(lmr: LazyMapRef, elf: *const Elf) Symbol.Index {
+            return elf.lazy.getPtrConst(lmr.kind).map.values()[lmr.index];
+        }
+    };
 
     pub const Tag = @typeInfo(Node).@"union".tag_type.?;
 
@@ -43,11 +98,7 @@ pub const Node = union(enum) {
             seg_text,
             seg_data,
         };
-        var mut_known: std.enums.EnumFieldStruct(
-            Known,
-            MappedFile.Node.Index,
-            null,
-        ) = undefined;
+        var mut_known: std.enums.EnumFieldStruct(Known, MappedFile.Node.Index, null) = undefined;
         for (@typeInfo(Known).@"enum".fields) |field|
             @field(mut_known, field.name) = @enumFromInt(field.value);
         break :known mut_known;
@@ -223,10 +274,10 @@ pub const Reloc = extern struct {
     addend: i64,
 
     pub const Type = extern union {
-        x86_64: std.elf.R_X86_64,
-        aarch64: std.elf.R_AARCH64,
-        riscv: std.elf.R_RISCV,
-        ppc64: std.elf.R_PPC64,
+        X86_64: std.elf.R_X86_64,
+        AARCH64: std.elf.R_AARCH64,
+        RISCV: std.elf.R_RISCV,
+        PPC64: std.elf.R_PPC64,
     };
 
     pub const Index = enum(u32) {
@@ -239,7 +290,7 @@ pub const Reloc = extern struct {
     };
 
     pub fn apply(reloc: *const Reloc, elf: *Elf) void {
-        const target_endian = elf.endian();
+        const target_endian = elf.targetEndian();
         switch (reloc.loc.get(elf).ni) {
             .none => return,
             else => |ni| if (ni.hasMoved(&elf.mf)) return,
@@ -274,7 +325,7 @@ pub const Reloc = extern struct {
                 ) +% @as(u64, @bitCast(reloc.addend));
                 switch (elf.ehdrField(.machine)) {
                     else => |machine| @panic(@tagName(machine)),
-                    .X86_64 => switch (reloc.type.x86_64) {
+                    .X86_64 => switch (reloc.type.X86_64) {
                         else => |kind| @panic(@tagName(kind)),
                         .@"64" => std.mem.writeInt(
                             u64,
@@ -394,37 +445,7 @@ fn create(
         },
         .Obj => .REL,
     };
-    const machine: std.elf.EM = switch (target.cpu.arch) {
-        .spirv32, .spirv64, .wasm32, .wasm64 => .NONE,
-        .sparc => .SPARC,
-        .x86 => .@"386",
-        .m68k => .@"68K",
-        .mips, .mipsel, .mips64, .mips64el => .MIPS,
-        .powerpc, .powerpcle => .PPC,
-        .powerpc64, .powerpc64le => .PPC64,
-        .s390x => .S390,
-        .arm, .armeb, .thumb, .thumbeb => .ARM,
-        .hexagon => .SH,
-        .sparc64 => .SPARCV9,
-        .arc => .ARC,
-        .x86_64 => .X86_64,
-        .or1k => .OR1K,
-        .xtensa => .XTENSA,
-        .msp430 => .MSP430,
-        .avr => .AVR,
-        .nvptx, .nvptx64 => .CUDA,
-        .kalimba => .CSR_KALIMBA,
-        .aarch64, .aarch64_be => .AARCH64,
-        .xcore => .XCORE,
-        .amdgcn => .AMDGPU,
-        .riscv32, .riscv32be, .riscv64, .riscv64be => .RISCV,
-        .lanai => .LANAI,
-        .bpfel, .bpfeb => .BPF,
-        .ve => .VE,
-        .csky => .CSKY,
-        .loongarch32, .loongarch64 => .LOONGARCH,
-        .propeller => if (target.cpu.has(.propeller, .p2)) .PROPELLER2 else .PROPELLER,
-    };
+    const machine = target.toElfMachine();
     const maybe_interp = switch (comp.config.output_mode) {
         .Exe, .Lib => switch (comp.config.link_mode) {
             .static => null,
@@ -479,7 +500,7 @@ fn create(
 
     switch (class) {
         .NONE, _ => unreachable,
-        inline .@"32", .@"64" => |ct_class| try elf.initHeaders(
+        inline else => |ct_class| try elf.initHeaders(
             ct_class,
             data,
             osabi,
@@ -567,30 +588,31 @@ fn initHeaders(
         .fixed = true,
     }));
     elf.nodes.appendAssumeCapacity(.ehdr);
-
-    const ehdr: *ElfN.Ehdr = @ptrCast(@alignCast(ehdr_ni.slice(&elf.mf)));
-    const EI = std.elf.EI;
-    @memcpy(ehdr.ident[0..std.elf.MAGIC.len], std.elf.MAGIC);
-    ehdr.ident[EI.CLASS] = @intFromEnum(class);
-    ehdr.ident[EI.DATA] = @intFromEnum(data);
-    ehdr.ident[EI.VERSION] = 1;
-    ehdr.ident[EI.OSABI] = @intFromEnum(osabi);
-    ehdr.ident[EI.ABIVERSION] = 0;
-    @memset(ehdr.ident[EI.PAD..], 0);
-    ehdr.type = @"type";
-    ehdr.machine = machine;
-    ehdr.version = 1;
-    ehdr.entry = 0;
-    ehdr.phoff = 0;
-    ehdr.shoff = 0;
-    ehdr.flags = 0;
-    ehdr.ehsize = @sizeOf(ElfN.Ehdr);
-    ehdr.phentsize = @sizeOf(ElfN.Phdr);
-    ehdr.phnum = @min(phnum, std.elf.PN_XNUM);
-    ehdr.shentsize = @sizeOf(ElfN.Shdr);
-    ehdr.shnum = 1;
-    ehdr.shstrndx = 0;
-    if (target_endian != native_endian) std.mem.byteSwapAllFields(ElfN.Ehdr, ehdr);
+    {
+        const ehdr: *ElfN.Ehdr = @ptrCast(@alignCast(ehdr_ni.slice(&elf.mf)));
+        const EI = std.elf.EI;
+        @memcpy(ehdr.ident[0..std.elf.MAGIC.len], std.elf.MAGIC);
+        ehdr.ident[EI.CLASS] = @intFromEnum(class);
+        ehdr.ident[EI.DATA] = @intFromEnum(data);
+        ehdr.ident[EI.VERSION] = 1;
+        ehdr.ident[EI.OSABI] = @intFromEnum(osabi);
+        ehdr.ident[EI.ABIVERSION] = 0;
+        @memset(ehdr.ident[EI.PAD..], 0);
+        ehdr.type = @"type";
+        ehdr.machine = machine;
+        ehdr.version = 1;
+        ehdr.entry = 0;
+        ehdr.phoff = 0;
+        ehdr.shoff = 0;
+        ehdr.flags = 0;
+        ehdr.ehsize = @sizeOf(ElfN.Ehdr);
+        ehdr.phentsize = @sizeOf(ElfN.Phdr);
+        ehdr.phnum = @min(phnum, std.elf.PN_XNUM);
+        ehdr.shentsize = @sizeOf(ElfN.Shdr);
+        ehdr.shnum = 1;
+        ehdr.shstrndx = 0;
+        if (target_endian != native_endian) std.mem.byteSwapAllFields(ElfN.Ehdr, ehdr);
+    }
 
     const phdr_ni = Node.known.phdr;
     assert(phdr_ni == try elf.mf.addLastChildNode(gpa, seg_rodata_ni, .{
@@ -750,7 +772,10 @@ fn initHeaders(
         },
         .shndx = std.elf.SHN_UNDEF,
     };
-    ehdr.shstrndx = ehdr.shnum;
+    {
+        const ehdr = @field(elf.ehdrPtr(), @tagName(class));
+        ehdr.shstrndx = ehdr.shnum;
+    }
     assert(try elf.addSection(seg_rodata_ni, .{
         .type = std.elf.SHT_STRTAB,
         .addralign = elf.mf.flags.block_size,
@@ -821,6 +846,24 @@ fn getNode(elf: *Elf, ni: MappedFile.Node.Index) Node {
     return elf.nodes.get(@intFromEnum(ni));
 }
 
+pub fn identClass(elf: *Elf) std.elf.CLASS {
+    return @enumFromInt(elf.mf.contents[std.elf.EI.CLASS]);
+}
+
+pub fn identData(elf: *Elf) std.elf.DATA {
+    return @enumFromInt(elf.mf.contents[std.elf.EI.DATA]);
+}
+fn endianForData(data: std.elf.DATA) std.builtin.Endian {
+    return switch (data) {
+        .NONE, _ => unreachable,
+        .@"2LSB" => .little,
+        .@"2MSB" => .big,
+    };
+}
+pub fn targetEndian(elf: *Elf) std.builtin.Endian {
+    return endianForData(elf.identData());
+}
+
 pub const EhdrPtr = union(std.elf.CLASS) {
     NONE: noreturn,
     @"32": *std.elf.Elf32.Ehdr,
@@ -830,7 +873,7 @@ pub fn ehdrPtr(elf: *Elf) EhdrPtr {
     const slice = Node.known.ehdr.slice(&elf.mf);
     return switch (elf.identClass()) {
         .NONE, _ => unreachable,
-        inline .@"32", .@"64" => |class| @unionInit(
+        inline else => |class| @unionInit(
             EhdrPtr,
             @tagName(class),
             @ptrCast(@alignCast(slice)),
@@ -841,35 +884,15 @@ pub fn ehdrField(
     elf: *Elf,
     comptime field: enum { type, machine },
 ) @FieldType(std.elf.Elf32.Ehdr, @tagName(field)) {
-    const Field = @FieldType(std.elf.Elf32.Ehdr, @tagName(field));
-    comptime assert(@FieldType(std.elf.Elf64.Ehdr, @tagName(field)) == Field);
     return @enumFromInt(std.mem.toNative(
-        @typeInfo(Field).@"enum".tag_type,
+        @typeInfo(@FieldType(std.elf.Elf32.Ehdr, @tagName(field))).@"enum".tag_type,
         @intFromEnum(switch (elf.ehdrPtr()) {
             inline else => |ehdr| @field(ehdr, @tagName(field)),
         }),
-        elf.endian(),
+        elf.targetEndian(),
     ));
 }
 
-pub fn identClass(elf: *Elf) std.elf.CLASS {
-    return @enumFromInt(elf.mf.contents[std.elf.EI.CLASS]);
-}
-
-pub fn identData(elf: *Elf) std.elf.DATA {
-    return @enumFromInt(elf.mf.contents[std.elf.EI.DATA]);
-}
-fn endianForData(data: std.elf.DATA) std.builtin.Endian {
-    return switch (data) {
-        .NONE, _ => unreachable,
-        .@"2LSB" => .little,
-        .@"2MSB" => .big,
-    };
-}
-pub fn endian(elf: *Elf) std.builtin.Endian {
-    return endianForData(elf.identData());
-}
-
 fn baseAddrForType(@"type": std.elf.ET) u64 {
     return switch (@"type") {
         else => 0,
@@ -889,7 +912,7 @@ pub fn phdrSlice(elf: *Elf) PhdrSlice {
     const slice = Node.known.phdr.slice(&elf.mf);
     return switch (elf.identClass()) {
         .NONE, _ => unreachable,
-        inline .@"32", .@"64" => |class| @unionInit(
+        inline else => |class| @unionInit(
             PhdrSlice,
             @tagName(class),
             @ptrCast(@alignCast(slice)),
@@ -906,7 +929,7 @@ pub fn shdrSlice(elf: *Elf) ShdrSlice {
     const slice = Node.known.shdr.slice(&elf.mf);
     return switch (elf.identClass()) {
         .NONE, _ => unreachable,
-        inline .@"32", .@"64" => |class| @unionInit(
+        inline else => |class| @unionInit(
             ShdrSlice,
             @tagName(class),
             @ptrCast(@alignCast(slice)),
@@ -923,7 +946,7 @@ pub fn symSlice(elf: *Elf) SymSlice {
     const slice = Symbol.Index.symtab.node(elf).slice(&elf.mf);
     return switch (elf.identClass()) {
         .NONE, _ => unreachable,
-        inline .@"32", .@"64" => |class| @unionInit(
+        inline else => |class| @unionInit(
             SymSlice,
             @tagName(class),
             @ptrCast(@alignCast(slice)),
@@ -942,7 +965,7 @@ pub fn symPtr(elf: *Elf, si: Symbol.Index) SymPtr {
     };
 }
 
-fn addSymbolAssumeCapacity(elf: *Elf) !Symbol.Index {
+fn addSymbolAssumeCapacity(elf: *Elf) Symbol.Index {
     defer elf.symtab.addOneAssumeCapacity().* = .{
         .ni = .none,
         .loc_relocs = .none,
@@ -953,30 +976,27 @@ fn addSymbolAssumeCapacity(elf: *Elf) !Symbol.Index {
 }
 
 fn initSymbolAssumeCapacity(elf: *Elf, opts: Symbol.Index.InitOptions) !Symbol.Index {
-    const si = try elf.addSymbolAssumeCapacity();
+    const si = elf.addSymbolAssumeCapacity();
     try si.init(elf, opts);
     return si;
 }
 
-pub fn globalSymbol(
-    elf: *Elf,
-    opts: struct {
-        name: []const u8,
-        type: std.elf.STT,
-        bind: std.elf.STB = .GLOBAL,
-        visibility: std.elf.STV = .DEFAULT,
-    },
-) !Symbol.Index {
+pub fn globalSymbol(elf: *Elf, opts: struct {
+    name: []const u8,
+    type: std.elf.STT,
+    bind: std.elf.STB = .GLOBAL,
+    visibility: std.elf.STV = .DEFAULT,
+}) !Symbol.Index {
     const gpa = elf.base.comp.gpa;
     try elf.symtab.ensureUnusedCapacity(gpa, 1);
-    const sym_gop = try elf.globals.getOrPut(gpa, try elf.string(.strtab, opts.name));
-    if (!sym_gop.found_existing) sym_gop.value_ptr.* = try elf.initSymbolAssumeCapacity(.{
+    const global_gop = try elf.globals.getOrPut(gpa, try elf.string(.strtab, opts.name));
+    if (!global_gop.found_existing) global_gop.value_ptr.* = try elf.initSymbolAssumeCapacity(.{
         .name = opts.name,
         .type = opts.type,
         .bind = opts.bind,
         .visibility = opts.visibility,
     });
-    return sym_gop.value_ptr.*;
+    return global_gop.value_ptr.*;
 }
 
 fn navType(
@@ -1008,8 +1028,19 @@ fn navType(
         },
     };
 }
-pub fn navSymbol(elf: *Elf, zcu: *Zcu, nav_index: InternPool.Nav.Index) !Symbol.Index {
+fn navMapIndex(elf: *Elf, zcu: *Zcu, nav_index: InternPool.Nav.Index) !Node.NavMapIndex {
     const gpa = zcu.gpa;
+    const ip = &zcu.intern_pool;
+    const nav = ip.getNav(nav_index);
+    try elf.symtab.ensureUnusedCapacity(gpa, 1);
+    const nav_gop = try elf.navs.getOrPut(gpa, nav_index);
+    if (!nav_gop.found_existing) nav_gop.value_ptr.* = try elf.initSymbolAssumeCapacity(.{
+        .name = nav.fqn.toSlice(ip),
+        .type = navType(ip, nav.status, elf.base.comp.config.any_non_single_threaded),
+    });
+    return @enumFromInt(nav_gop.index);
+}
+pub fn navSymbol(elf: *Elf, zcu: *Zcu, nav_index: InternPool.Nav.Index) !Symbol.Index {
     const ip = &zcu.intern_pool;
     const nav = ip.getNav(nav_index);
     if (nav.getExtern(ip)) |@"extern"| return elf.globalSymbol(.{
@@ -1027,40 +1058,37 @@ pub fn navSymbol(elf: *Elf, zcu: *Zcu, nav_index: InternPool.Nav.Index) !Symbol.
             .protected => .PROTECTED,
         },
     });
-    try elf.symtab.ensureUnusedCapacity(gpa, 1);
-    const sym_gop = try elf.navs.getOrPut(gpa, nav_index);
-    if (!sym_gop.found_existing) {
-        sym_gop.value_ptr.* = try elf.initSymbolAssumeCapacity(.{
-            .name = nav.fqn.toSlice(ip),
-            .type = navType(ip, nav.status, elf.base.comp.config.any_non_single_threaded),
-        });
-    }
-    return sym_gop.value_ptr.*;
+    const nmi = try elf.navMapIndex(zcu, nav_index);
+    return nmi.symbol(elf);
 }
 
-pub fn uavSymbol(elf: *Elf, uav_val: InternPool.Index) !Symbol.Index {
+fn uavMapIndex(elf: *Elf, uav_val: InternPool.Index) !Node.UavMapIndex {
     const gpa = elf.base.comp.gpa;
     try elf.symtab.ensureUnusedCapacity(gpa, 1);
-    const sym_gop = try elf.uavs.getOrPut(gpa, uav_val);
-    if (!sym_gop.found_existing)
-        sym_gop.value_ptr.* = try elf.initSymbolAssumeCapacity(.{ .type = .OBJECT });
-    return sym_gop.value_ptr.*;
+    const uav_gop = try elf.uavs.getOrPut(gpa, uav_val);
+    if (!uav_gop.found_existing)
+        uav_gop.value_ptr.* = try elf.initSymbolAssumeCapacity(.{ .type = .OBJECT });
+    return @enumFromInt(uav_gop.index);
+}
+pub fn uavSymbol(elf: *Elf, uav_val: InternPool.Index) !Symbol.Index {
+    const umi = try elf.uavMapIndex(uav_val);
+    return umi.symbol(elf);
 }
 
 pub fn lazySymbol(elf: *Elf, lazy: link.File.LazySymbol) !Symbol.Index {
     const gpa = elf.base.comp.gpa;
     try elf.symtab.ensureUnusedCapacity(gpa, 1);
-    const sym_gop = try elf.lazy.getPtr(lazy.kind).map.getOrPut(gpa, lazy.ty);
-    if (!sym_gop.found_existing) {
-        sym_gop.value_ptr.* = try elf.initSymbolAssumeCapacity(.{
+    const lazy_gop = try elf.lazy.getPtr(lazy.kind).map.getOrPut(gpa, lazy.ty);
+    if (!lazy_gop.found_existing) {
+        lazy_gop.value_ptr.* = try elf.initSymbolAssumeCapacity(.{
             .type = switch (lazy.kind) {
                 .code => .FUNC,
                 .const_data => .OBJECT,
             },
         });
-        elf.base.comp.link_lazy_prog_node.increaseEstimatedTotalItems(1);
+        elf.base.comp.link_synth_prog_node.increaseEstimatedTotalItems(1);
     }
-    return sym_gop.value_ptr.*;
+    return lazy_gop.value_ptr.*;
 }
 
 pub fn getNavVAddr(
@@ -1088,7 +1116,7 @@ pub fn getVAddr(elf: *Elf, reloc_info: link.File.RelocInfo, target_si: Symbol.In
         reloc_info.addend,
         switch (elf.ehdrField(.machine)) {
             else => unreachable,
-            .X86_64 => .{ .x86_64 = switch (elf.identClass()) {
+            .X86_64 => .{ .X86_64 = switch (elf.identClass()) {
                 .NONE, _ => unreachable,
                 .@"32" => .@"32",
                 .@"64" => .@"64",
@@ -1107,7 +1135,7 @@ fn addSection(elf: *Elf, segment_ni: MappedFile.Node.Index, opts: struct {
     entsize: std.elf.Word = 0,
 }) !Symbol.Index {
     const gpa = elf.base.comp.gpa;
-    const target_endian = elf.endian();
+    const target_endian = elf.targetEndian();
     try elf.nodes.ensureUnusedCapacity(gpa, 1);
     try elf.symtab.ensureUnusedCapacity(gpa, 1);
 
@@ -1127,7 +1155,7 @@ fn addSection(elf: *Elf, segment_ni: MappedFile.Node.Index, opts: struct {
         .size = opts.size,
         .moved = true,
     });
-    const si = try elf.addSymbolAssumeCapacity();
+    const si = elf.addSymbolAssumeCapacity();
     elf.nodes.appendAssumeCapacity(.{ .section = si });
     si.get(elf).ni = ni;
     try si.init(elf, .{
@@ -1160,7 +1188,7 @@ fn addSection(elf: *Elf, segment_ni: MappedFile.Node.Index, opts: struct {
 fn renameSection(elf: *Elf, si: Symbol.Index, name: []const u8) !void {
     const strtab_entry = try elf.string(.strtab, name);
     const shstrtab_entry = try elf.string(.shstrtab, name);
-    const target_endian = elf.endian();
+    const target_endian = elf.targetEndian();
     switch (elf.shdrSlice()) {
         inline else => |shdr, class| {
             const sym = @field(elf.symPtr(si), @tagName(class));
@@ -1173,7 +1201,7 @@ fn renameSection(elf: *Elf, si: Symbol.Index, name: []const u8) !void {
 }
 
 fn linkSections(elf: *Elf, si: Symbol.Index, link_si: Symbol.Index) !void {
-    const target_endian = elf.endian();
+    const target_endian = elf.targetEndian();
     switch (elf.shdrSlice()) {
         inline else => |shdr, class| {
             const sym = @field(elf.symPtr(si), @tagName(class));
@@ -1184,7 +1212,7 @@ fn linkSections(elf: *Elf, si: Symbol.Index, link_si: Symbol.Index) !void {
 }
 
 fn sectionName(elf: *Elf, si: Symbol.Index) [:0]const u8 {
-    const target_endian = elf.endian();
+    const target_endian = elf.targetEndian();
     const name = Symbol.Index.shstrtab.node(elf).slice(&elf.mf)[name: switch (elf.shdrSlice()) {
         inline else => |shndx, class| {
             const sym = @field(elf.symPtr(si), @tagName(class));
@@ -1263,7 +1291,8 @@ fn updateNavInner(elf: *Elf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index)
     };
     if (nav_init == .none or !Type.fromInterned(ip.typeOf(nav_init)).hasRuntimeBits(zcu)) return;
 
-    const si = try elf.navSymbol(zcu, nav_index);
+    const nmi = try elf.navMapIndex(zcu, nav_index);
+    const si = nmi.symbol(elf);
     const ni = ni: {
         const sym = si.get(elf);
         switch (sym.ni) {
@@ -1275,7 +1304,7 @@ fn updateNavInner(elf: *Elf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index)
                     .alignment = pt.navAlignment(nav_index).toStdMem(),
                     .moved = true,
                 });
-                elf.nodes.appendAssumeCapacity(.{ .nav = nav_index });
+                elf.nodes.appendAssumeCapacity(.{ .nav = nmi });
                 sym.ni = ni;
                 switch (elf.symPtr(si)) {
                     inline else => |sym_ptr, class| sym_ptr.shndx =
@@ -1289,28 +1318,24 @@ fn updateNavInner(elf: *Elf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index)
         break :ni sym.ni;
     };
 
-    const size = size: {
-        var nw: MappedFile.Node.Writer = undefined;
-        ni.writer(&elf.mf, gpa, &nw);
-        defer nw.deinit();
-        codegen.generateSymbol(
-            &elf.base,
-            pt,
-            zcu.navSrcLoc(nav_index),
-            .fromInterned(nav_init),
-            &nw.interface,
-            .{ .atom_index = @intFromEnum(si) },
-        ) catch |err| switch (err) {
-            error.WriteFailed => return error.OutOfMemory,
-            else => |e| return e,
-        };
-        break :size nw.interface.end;
+    var nw: MappedFile.Node.Writer = undefined;
+    ni.writer(&elf.mf, gpa, &nw);
+    defer nw.deinit();
+    codegen.generateSymbol(
+        &elf.base,
+        pt,
+        zcu.navSrcLoc(nav_index),
+        .fromInterned(nav_init),
+        &nw.interface,
+        .{ .atom_index = @intFromEnum(si) },
+    ) catch |err| switch (err) {
+        error.WriteFailed => return error.OutOfMemory,
+        else => |e| return e,
     };
-
-    const target_endian = elf.endian();
+    const target_endian = elf.targetEndian();
     switch (elf.symPtr(si)) {
         inline else => |sym| sym.size =
-            std.mem.nativeTo(@TypeOf(sym.size), @intCast(size), target_endian),
+            std.mem.nativeTo(@TypeOf(sym.size), @intCast(nw.interface.end), target_endian),
     }
     si.applyLocationRelocs(elf);
 }
@@ -1326,7 +1351,7 @@ pub fn lowerUav(
     const gpa = zcu.gpa;
 
     try elf.pending_uavs.ensureUnusedCapacity(gpa, 1);
-    const si = elf.uavSymbol(uav_val) catch |err| switch (err) {
+    const umi = elf.uavMapIndex(uav_val) catch |err| switch (err) {
         error.OutOfMemory => return error.OutOfMemory,
         else => |e| return .{ .fail = try Zcu.ErrorMsg.create(
             gpa,
@@ -1335,11 +1360,12 @@ pub fn lowerUav(
             .{@errorName(e)},
         ) },
     };
+    const si = umi.symbol(elf);
     if (switch (si.get(elf).ni) {
         .none => true,
         else => |ni| uav_align.toStdMem().order(ni.alignment(&elf.mf)).compare(.gt),
     }) {
-        const gop = elf.pending_uavs.getOrPutAssumeCapacity(uav_val);
+        const gop = elf.pending_uavs.getOrPutAssumeCapacity(umi);
         if (gop.found_existing) {
             gop.value_ptr.alignment = gop.value_ptr.alignment.max(uav_align);
         } else {
@@ -1347,7 +1373,7 @@ pub fn lowerUav(
                 .alignment = uav_align,
                 .src_loc = src_loc,
             };
-            elf.base.comp.link_uav_prog_node.increaseEstimatedTotalItems(1);
+            elf.base.comp.link_const_prog_node.increaseEstimatedTotalItems(1);
         }
     }
     return .{ .sym_index = @intFromEnum(si) };
@@ -1384,7 +1410,8 @@ fn updateFuncInner(
     const func = zcu.funcInfo(func_index);
     const nav = ip.getNav(func.owner_nav);
 
-    const si = try elf.navSymbol(zcu, func.owner_nav);
+    const nmi = try elf.navMapIndex(zcu, func.owner_nav);
+    const si = nmi.symbol(elf);
     log.debug("updateFunc({f}) = {d}", .{ nav.fqn.fmt(ip), si });
     const ni = ni: {
         const sym = si.get(elf);
@@ -1406,7 +1433,7 @@ fn updateFuncInner(
                     }.toStdMem(),
                     .moved = true,
                 });
-                elf.nodes.appendAssumeCapacity(.{ .nav = func.owner_nav });
+                elf.nodes.appendAssumeCapacity(.{ .nav = nmi });
                 sym.ni = ni;
                 switch (elf.symPtr(si)) {
                     inline else => |sym_ptr, class| sym_ptr.shndx =
@@ -1420,37 +1447,35 @@ fn updateFuncInner(
         break :ni sym.ni;
     };
 
-    const size = size: {
-        var nw: MappedFile.Node.Writer = undefined;
-        ni.writer(&elf.mf, gpa, &nw);
-        defer nw.deinit();
-        codegen.emitFunction(
-            &elf.base,
-            pt,
-            zcu.navSrcLoc(func.owner_nav),
-            func_index,
-            @intFromEnum(si),
-            mir,
-            &nw.interface,
-            .none,
-        ) catch |err| switch (err) {
-            error.WriteFailed => return nw.err.?,
-            else => |e| return e,
-        };
-        break :size nw.interface.end;
+    var nw: MappedFile.Node.Writer = undefined;
+    ni.writer(&elf.mf, gpa, &nw);
+    defer nw.deinit();
+    codegen.emitFunction(
+        &elf.base,
+        pt,
+        zcu.navSrcLoc(func.owner_nav),
+        func_index,
+        @intFromEnum(si),
+        mir,
+        &nw.interface,
+        .none,
+    ) catch |err| switch (err) {
+        error.WriteFailed => return nw.err.?,
+        else => |e| return e,
     };
-
-    const target_endian = elf.endian();
+    const target_endian = elf.targetEndian();
     switch (elf.symPtr(si)) {
         inline else => |sym| sym.size =
-            std.mem.nativeTo(@TypeOf(sym.size), @intCast(size), target_endian),
+            std.mem.nativeTo(@TypeOf(sym.size), @intCast(nw.interface.end), target_endian),
     }
     si.applyLocationRelocs(elf);
 }
 
 pub fn updateErrorData(elf: *Elf, pt: Zcu.PerThread) !void {
-    const si = elf.lazy.getPtr(.const_data).map.get(.anyerror_type) orelse return;
-    elf.flushLazy(pt, .{ .kind = .const_data, .ty = .anyerror_type }, si) catch |err| switch (err) {
+    elf.flushLazy(pt, .{
+        .kind = .const_data,
+        .index = @intCast(elf.lazy.getPtr(.const_data).map.getIndex(.anyerror_type) orelse return),
+    }) catch |err| switch (err) {
         error.OutOfMemory => return error.OutOfMemory,
         error.CodegenFail => return error.LinkFailure,
         else => |e| return elf.base.comp.link_diags.fail("updateErrorData failed {t}", .{e}),
@@ -1472,14 +1497,13 @@ pub fn idle(elf: *Elf, tid: Zcu.PerThread.Id) !bool {
     const comp = elf.base.comp;
     task: {
         while (elf.pending_uavs.pop()) |pending_uav| {
-            const sub_prog_node =
-                elf.idleProgNode(
-                    tid,
-                    comp.link_uav_prog_node,
-                    .{ .uav = pending_uav.key },
-                );
+            const sub_prog_node = elf.idleProgNode(
+                tid,
+                comp.link_const_prog_node,
+                .{ .uav = pending_uav.key },
+            );
             defer sub_prog_node.end();
-            break :task elf.flushUav(
+            elf.flushUav(
                 .{ .zcu = elf.base.comp.zcu.?, .tid = tid },
                 pending_uav.key,
                 pending_uav.value.alignment,
@@ -1491,37 +1515,34 @@ pub fn idle(elf: *Elf, tid: Zcu.PerThread.Id) !bool {
                     .{e},
                 ),
             };
+            break :task;
         }
         var lazy_it = elf.lazy.iterator();
-        while (lazy_it.next()) |lazy| for (
-            lazy.value.map.keys()[lazy.value.pending_index..],
-            lazy.value.map.values()[lazy.value.pending_index..],
-        ) |ty, si| {
-            lazy.value.pending_index += 1;
+        while (lazy_it.next()) |lazy| if (lazy.value.pending_index < lazy.value.map.count()) {
             const pt: Zcu.PerThread = .{ .zcu = elf.base.comp.zcu.?, .tid = tid };
-            const kind = switch (lazy.key) {
+            const lmr: Node.LazyMapRef = .{ .kind = lazy.key, .index = lazy.value.pending_index };
+            lazy.value.pending_index += 1;
+            const kind = switch (lmr.kind) {
                 .code => "code",
                 .const_data => "data",
             };
             var name: [std.Progress.Node.max_name_len]u8 = undefined;
-            const sub_prog_node = comp.link_lazy_prog_node.start(
+            const sub_prog_node = comp.link_synth_prog_node.start(
                 std.fmt.bufPrint(&name, "lazy {s} for {f}", .{
                     kind,
-                    Type.fromInterned(ty).fmt(pt),
+                    Type.fromInterned(lmr.lazySymbol(elf).ty).fmt(pt),
                 }) catch &name,
                 0,
             );
             defer sub_prog_node.end();
-            break :task elf.flushLazy(pt, .{
-                .kind = lazy.key,
-                .ty = ty,
-            }, si) catch |err| switch (err) {
+            elf.flushLazy(pt, lmr) catch |err| switch (err) {
                 error.OutOfMemory => return error.OutOfMemory,
                 else => |e| return elf.base.comp.link_diags.fail(
                     "linker failed to lower lazy {s}: {t}",
                     .{ kind, e },
                 ),
             };
+            break :task;
         };
         while (elf.mf.updates.pop()) |ni| {
             const clean_moved = ni.cleanMoved(&elf.mf);
@@ -1551,12 +1572,12 @@ fn idleProgNode(
     return prog_node.start(name: switch (node) {
         else => |tag| @tagName(tag),
         .section => |si| elf.sectionName(si),
-        .nav => |nav| {
+        .nav => |nmi| {
             const ip = &elf.base.comp.zcu.?.intern_pool;
-            break :name ip.getNav(nav).fqn.toSlice(ip);
+            break :name ip.getNav(nmi.navIndex(elf)).fqn.toSlice(ip);
         },
-        .uav => |uav| std.fmt.bufPrint(&name, "{f}", .{
-            Value.fromInterned(uav).fmtValue(.{ .zcu = elf.base.comp.zcu.?, .tid = tid }),
+        .uav => |umi| std.fmt.bufPrint(&name, "{f}", .{
+            Value.fromInterned(umi.uavValue(elf)).fmtValue(.{ .zcu = elf.base.comp.zcu.?, .tid = tid }),
         }) catch &name,
     }, 0);
 }
@@ -1564,14 +1585,15 @@ fn idleProgNode(
 fn flushUav(
     elf: *Elf,
     pt: Zcu.PerThread,
-    uav_val: InternPool.Index,
+    umi: Node.UavMapIndex,
     uav_align: InternPool.Alignment,
     src_loc: Zcu.LazySrcLoc,
 ) !void {
     const zcu = pt.zcu;
     const gpa = zcu.gpa;
 
-    const si = try elf.uavSymbol(uav_val);
+    const uav_val = umi.uavValue(elf);
+    const si = umi.symbol(elf);
     const ni = ni: {
         const sym = si.get(elf);
         switch (sym.ni) {
@@ -1581,7 +1603,7 @@ fn flushUav(
                     .alignment = uav_align.toStdMem(),
                     .moved = true,
                 });
-                elf.nodes.appendAssumeCapacity(.{ .uav = uav_val });
+                elf.nodes.appendAssumeCapacity(.{ .uav = umi });
                 sym.ni = ni;
                 switch (elf.symPtr(si)) {
                     inline else => |sym_ptr, class| sym_ptr.shndx =
@@ -1598,36 +1620,34 @@ fn flushUav(
         break :ni sym.ni;
     };
 
-    const size = size: {
-        var nw: MappedFile.Node.Writer = undefined;
-        ni.writer(&elf.mf, gpa, &nw);
-        defer nw.deinit();
-        codegen.generateSymbol(
-            &elf.base,
-            pt,
-            src_loc,
-            .fromInterned(uav_val),
-            &nw.interface,
-            .{ .atom_index = @intFromEnum(si) },
-        ) catch |err| switch (err) {
-            error.WriteFailed => return error.OutOfMemory,
-            else => |e| return e,
-        };
-        break :size nw.interface.end;
+    var nw: MappedFile.Node.Writer = undefined;
+    ni.writer(&elf.mf, gpa, &nw);
+    defer nw.deinit();
+    codegen.generateSymbol(
+        &elf.base,
+        pt,
+        src_loc,
+        .fromInterned(uav_val),
+        &nw.interface,
+        .{ .atom_index = @intFromEnum(si) },
+    ) catch |err| switch (err) {
+        error.WriteFailed => return error.OutOfMemory,
+        else => |e| return e,
     };
-
-    const target_endian = elf.endian();
+    const target_endian = elf.targetEndian();
     switch (elf.symPtr(si)) {
         inline else => |sym| sym.size =
-            std.mem.nativeTo(@TypeOf(sym.size), @intCast(size), target_endian),
+            std.mem.nativeTo(@TypeOf(sym.size), @intCast(nw.interface.end), target_endian),
     }
     si.applyLocationRelocs(elf);
 }
 
-fn flushLazy(elf: *Elf, pt: Zcu.PerThread, lazy: link.File.LazySymbol, si: Symbol.Index) !void {
+fn flushLazy(elf: *Elf, pt: Zcu.PerThread, lmr: Node.LazyMapRef) !void {
     const zcu = pt.zcu;
     const gpa = zcu.gpa;
 
+    const lazy = lmr.lazySymbol(elf);
+    const si = lmr.symbol(elf);
     const ni = ni: {
         const sym = si.get(elf);
         switch (sym.ni) {
@@ -1639,8 +1659,8 @@ fn flushLazy(elf: *Elf, pt: Zcu.PerThread, lazy: link.File.LazySymbol, si: Symbo
                 };
                 const ni = try elf.mf.addLastChildNode(gpa, sec_si.node(elf), .{ .moved = true });
                 elf.nodes.appendAssumeCapacity(switch (lazy.kind) {
-                    .code => .{ .lazy_code = lazy.ty },
-                    .const_data => .{ .lazy_const_data = lazy.ty },
+                    .code => .{ .lazy_code = @enumFromInt(lmr.index) },
+                    .const_data => .{ .lazy_const_data = @enumFromInt(lmr.index) },
                 });
                 sym.ni = ni;
                 switch (elf.symPtr(si)) {
@@ -1655,34 +1675,30 @@ fn flushLazy(elf: *Elf, pt: Zcu.PerThread, lazy: link.File.LazySymbol, si: Symbo
         break :ni sym.ni;
     };
 
-    const size = size: {
-        var required_alignment: InternPool.Alignment = .none;
-        var nw: MappedFile.Node.Writer = undefined;
-        ni.writer(&elf.mf, gpa, &nw);
-        defer nw.deinit();
-        try codegen.generateLazySymbol(
-            &elf.base,
-            pt,
-            Type.fromInterned(lazy.ty).srcLocOrNull(pt.zcu) orelse .unneeded,
-            lazy,
-            &required_alignment,
-            &nw.interface,
-            .none,
-            .{ .atom_index = @intFromEnum(si) },
-        );
-        break :size nw.interface.end;
-    };
-
-    const target_endian = elf.endian();
+    var required_alignment: InternPool.Alignment = .none;
+    var nw: MappedFile.Node.Writer = undefined;
+    ni.writer(&elf.mf, gpa, &nw);
+    defer nw.deinit();
+    try codegen.generateLazySymbol(
+        &elf.base,
+        pt,
+        Type.fromInterned(lazy.ty).srcLocOrNull(pt.zcu) orelse .unneeded,
+        lazy,
+        &required_alignment,
+        &nw.interface,
+        .none,
+        .{ .atom_index = @intFromEnum(si) },
+    );
+    const target_endian = elf.targetEndian();
     switch (elf.symPtr(si)) {
         inline else => |sym| sym.size =
-            std.mem.nativeTo(@TypeOf(sym.size), @intCast(size), target_endian),
+            std.mem.nativeTo(@TypeOf(sym.size), @intCast(nw.interface.end), target_endian),
     }
     si.applyLocationRelocs(elf);
 }
 
 fn flushMoved(elf: *Elf, ni: MappedFile.Node.Index) !void {
-    const target_endian = elf.endian();
+    const target_endian = elf.targetEndian();
     const file_offset = ni.fileLocation(&elf.mf, false).offset;
     const node = elf.getNode(ni);
     switch (node) {
@@ -1738,11 +1754,8 @@ fn flushMoved(elf: *Elf, ni: MappedFile.Node.Index) !void {
         .nav, .uav, .lazy_code, .lazy_const_data => {
             const si = switch (node) {
                 else => unreachable,
-                .nav => |nav| elf.navs.get(nav),
-                .uav => |uav| elf.uavs.get(uav),
-                .lazy_code => |ty| elf.lazy.getPtr(.code).map.get(ty),
-                .lazy_const_data => |ty| elf.lazy.getPtr(.const_data).map.get(ty),
-            }.?;
+                inline .nav, .uav, .lazy_code, .lazy_const_data => |mi| mi.symbol(elf),
+            };
             switch (elf.shdrSlice()) {
                 inline else => |shdr, class| {
                     const sym = @field(elf.symPtr(si), @tagName(class));
@@ -1773,7 +1786,7 @@ fn flushMoved(elf: *Elf, ni: MappedFile.Node.Index) !void {
 }
 
 fn flushResized(elf: *Elf, ni: MappedFile.Node.Index) !void {
-    const target_endian = elf.endian();
+    const target_endian = elf.targetEndian();
     _, const size = ni.location(&elf.mf).resolve(&elf.mf);
     const node = elf.getNode(ni);
     switch (node) {
@@ -1957,65 +1970,74 @@ pub fn printNode(
     indent: usize,
 ) !void {
     const node = elf.getNode(ni);
-    const mf_node = &elf.mf.nodes.items[@intFromEnum(ni)];
-    const off, const size = mf_node.location().resolve(&elf.mf);
     try w.splatByteAll(' ', indent);
     try w.writeAll(@tagName(node));
     switch (node) {
         else => {},
         .section => |si| try w.print("({s})", .{elf.sectionName(si)}),
-        .nav => |nav_index| {
+        .nav => |nmi| {
             const zcu = elf.base.comp.zcu.?;
             const ip = &zcu.intern_pool;
-            const nav = ip.getNav(nav_index);
+            const nav = ip.getNav(nmi.navIndex(elf));
             try w.print("({f}, {f})", .{
                 Type.fromInterned(nav.typeOf(ip)).fmt(.{ .zcu = zcu, .tid = tid }),
                 nav.fqn.fmt(ip),
             });
         },
-        .uav => |uav| {
+        .uav => |umi| {
             const zcu = elf.base.comp.zcu.?;
-            const val: Value = .fromInterned(uav);
+            const val: Value = .fromInterned(umi.uavValue(elf));
             try w.print("({f}, {f})", .{
                 val.typeOf(zcu).fmt(.{ .zcu = zcu, .tid = tid }),
                 val.fmtValue(.{ .zcu = zcu, .tid = tid }),
             });
         },
+        inline .lazy_code, .lazy_const_data => |lmi| try w.print("({f})", .{
+            Type.fromInterned(lmi.lazySymbol(elf).ty).fmt(.{
+                .zcu = elf.base.comp.zcu.?,
+                .tid = tid,
+            }),
+        }),
     }
-    try w.print(" index={d} offset=0x{x} size=0x{x} align=0x{x}{s}{s}{s}{s}\n", .{
-        @intFromEnum(ni),
-        off,
-        size,
-        mf_node.flags.alignment.toByteUnits(),
-        if (mf_node.flags.fixed) " fixed" else "",
-        if (mf_node.flags.moved) " moved" else "",
-        if (mf_node.flags.resized) " resized" else "",
-        if (mf_node.flags.has_content) " has_content" else "",
-    });
-    var child_ni = mf_node.first;
-    switch (child_ni) {
-        .none => {
-            const file_loc = ni.fileLocation(&elf.mf, false);
-            if (file_loc.size == 0) return;
-            var address = file_loc.offset;
-            const line_len = 0x10;
-            var line_it = std.mem.window(
-                u8,
-                elf.mf.contents[@intCast(file_loc.offset)..][0..@intCast(file_loc.size)],
-                line_len,
-                line_len,
-            );
-            while (line_it.next()) |line_bytes| : (address += line_len) {
-                try w.splatByteAll(' ', indent + 1);
-                try w.print("{x:0>8}", .{address});
-                for (line_bytes) |byte| try w.print(" {x:0>2}", .{byte});
-                try w.writeByte('\n');
-            }
-        },
-        else => while (child_ni != .none) {
-            try elf.printNode(tid, w, child_ni, indent + 1);
-            child_ni = elf.mf.nodes.items[@intFromEnum(child_ni)].next;
-        },
+    {
+        const mf_node = &elf.mf.nodes.items[@intFromEnum(ni)];
+        const off, const size = mf_node.location().resolve(&elf.mf);
+        try w.print(" index={d} offset=0x{x} size=0x{x} align=0x{x}{s}{s}{s}{s}\n", .{
+            @intFromEnum(ni),
+            off,
+            size,
+            mf_node.flags.alignment.toByteUnits(),
+            if (mf_node.flags.fixed) " fixed" else "",
+            if (mf_node.flags.moved) " moved" else "",
+            if (mf_node.flags.resized) " resized" else "",
+            if (mf_node.flags.has_content) " has_content" else "",
+        });
+    }
+    var leaf = true;
+    var child_it = ni.children(&elf.mf);
+    while (child_it.next()) |child_ni| {
+        leaf = false;
+        try elf.printNode(tid, w, child_ni, indent + 1);
+    }
+    if (leaf) {
+        const file_loc = ni.fileLocation(&elf.mf, false);
+        if (file_loc.size == 0) return;
+        var address = file_loc.offset;
+        const line_len = 0x10;
+        var line_it = std.mem.window(
+            u8,
+            elf.mf.contents[@intCast(file_loc.offset)..][0..@intCast(file_loc.size)],
+            line_len,
+            line_len,
+        );
+        while (line_it.next()) |line_bytes| : (address += line_len) {
+            try w.splatByteAll(' ', indent + 1);
+            try w.print("{x:0>8}  ", .{address});
+            for (line_bytes) |byte| try w.print("{x:0>2} ", .{byte});
+            try w.splatByteAll(' ', 3 * (line_len - line_bytes.len) + 1);
+            for (line_bytes) |byte| try w.writeByte(if (std.ascii.isPrint(byte)) byte else '.');
+            try w.writeByte('\n');
+        }
     }
 }
 
src/link/MappedFile.zig
@@ -34,17 +34,28 @@ pub fn init(file: std.fs.File, gpa: std.mem.Allocator) !MappedFile {
         .writers = .{},
     };
     errdefer mf.deinit(gpa);
-    const size: u64, const blksize = if (is_windows)
-        .{ try windows.GetFileSizeEx(file.handle), 1 }
-    else stat: {
+    const size: u64, const block_size = stat: {
+        if (is_windows) {
+            var sbi: windows.SYSTEM_BASIC_INFORMATION = undefined;
+            break :stat .{
+                try windows.GetFileSizeEx(file.handle),
+                switch (windows.ntdll.NtQuerySystemInformation(
+                    .SystemBasicInformation,
+                    &sbi,
+                    @sizeOf(windows.SYSTEM_BASIC_INFORMATION),
+                    null,
+                )) {
+                    .SUCCESS => @max(sbi.PageSize, sbi.AllocationGranularity),
+                    else => std.heap.page_size_max,
+                },
+            };
+        }
         const stat = try std.posix.fstat(mf.file.handle);
         if (!std.posix.S.ISREG(stat.mode)) return error.PathAlreadyExists;
-        break :stat .{ @bitCast(stat.size), stat.blksize };
+        break :stat .{ @bitCast(stat.size), @max(std.heap.pageSize(), stat.blksize) };
     };
     mf.flags = .{
-        .block_size = .fromByteUnits(
-            std.math.ceilPowerOfTwoAssert(usize, @max(std.heap.pageSize(), blksize)),
-        ),
+        .block_size = .fromByteUnits(std.math.ceilPowerOfTwoAssert(usize, block_size)),
         .copy_file_range_unsupported = false,
         .fallocate_insert_range_unsupported = false,
         .fallocate_punch_hole_unsupported = false,
@@ -90,9 +101,11 @@ pub const Node = extern struct {
         resized: bool,
         /// Whether this node might contain non-zero bytes.
         has_content: bool,
+        /// Whether a moved event on this node bubbles down to children.
+        bubbles_moved: bool,
         unused: @Type(.{ .int = .{
             .signedness = .unsigned,
-            .bits = 32 - @bitSizeOf(std.mem.Alignment) - 5,
+            .bits = 32 - @bitSizeOf(std.mem.Alignment) - 6,
         } }) = 0,
     };
 
@@ -136,6 +149,25 @@ pub const Node = extern struct {
             return &mf.nodes.items[@intFromEnum(ni)];
         }
 
+        pub fn parent(ni: Node.Index, mf: *const MappedFile) Node.Index {
+            return ni.get(mf).parent;
+        }
+
+        pub const ChildIterator = struct {
+            mf: *const MappedFile,
+            ni: Node.Index,
+
+            pub fn next(it: *ChildIterator) ?Node.Index {
+                const ni = it.ni;
+                if (ni == .none) return null;
+                it.ni = ni.get(it.mf).next;
+                return ni;
+            }
+        };
+        pub fn children(ni: Node.Index, mf: *const MappedFile) ChildIterator {
+            return .{ .mf = mf, .ni = ni.get(mf).first };
+        }
+
         pub fn childrenMoved(ni: Node.Index, gpa: std.mem.Allocator, mf: *MappedFile) !void {
             var child_ni = ni.get(mf).last;
             while (child_ni != .none) {
@@ -147,9 +179,10 @@ pub const Node = extern struct {
         pub fn hasMoved(ni: Node.Index, mf: *const MappedFile) bool {
             var parent_ni = ni;
             while (parent_ni != Node.Index.root) {
-                const parent = parent_ni.get(mf);
-                if (parent.flags.moved) return true;
-                parent_ni = parent.parent;
+                const parent_node = parent_ni.get(mf);
+                if (!parent_node.flags.bubbles_moved) break;
+                if (parent_node.flags.moved) return true;
+                parent_ni = parent_node.parent;
             }
             return false;
         }
@@ -163,12 +196,7 @@ pub const Node = extern struct {
             return node_moved.*;
         }
         fn movedAssumeCapacity(ni: Node.Index, mf: *MappedFile) void {
-            var parent_ni = ni;
-            while (parent_ni != Node.Index.root) {
-                const parent_node = parent_ni.get(mf);
-                if (parent_node.flags.moved) return;
-                parent_ni = parent_node.parent;
-            }
+            if (ni.hasMoved(mf)) return;
             const node = ni.get(mf);
             node.flags.moved = true;
             if (node.flags.resized) return;
@@ -242,10 +270,10 @@ pub const Node = extern struct {
             var offset, const size = ni.location(mf).resolve(mf);
             var parent_ni = ni;
             while (true) {
-                const parent = parent_ni.get(mf);
-                if (set_has_content) parent.flags.has_content = true;
+                const parent_node = parent_ni.get(mf);
+                if (set_has_content) parent_node.flags.has_content = true;
                 if (parent_ni == .none) break;
-                parent_ni = parent.parent;
+                parent_ni = parent_node.parent;
                 offset += parent_ni.location(mf).resolve(mf)[0];
             }
             return .{ .offset = offset, .size = size };
@@ -449,6 +477,7 @@ fn addNode(mf: *MappedFile, gpa: std.mem.Allocator, opts: struct {
             .moved = true,
             .resized = true,
             .has_content = false,
+            .bubbles_moved = opts.add_node.bubbles_moved,
         },
         .location_payload = location_payload,
     };
@@ -471,6 +500,7 @@ pub const AddNodeOptions = struct {
     fixed: bool = false,
     moved: bool = false,
     resized: bool = false,
+    bubbles_moved: bool = true,
 };
 
 pub fn addOnlyChildNode(
src/codegen.zig
@@ -993,6 +993,8 @@ pub fn genNavRef(
             },
             .link_once => unreachable,
         }
+    } else if (lf.cast(.coff2)) |coff| {
+        return .{ .sym_index = @intFromEnum(try coff.navSymbol(zcu, nav_index)) };
     } else {
         const msg = try ErrorMsg.create(zcu.gpa, src_loc, "TODO genNavRef for target {}", .{target});
         return .{ .fail = msg };
src/Compilation.zig
@@ -256,8 +256,8 @@ test_filters: []const []const u8,
 
 link_task_wait_group: WaitGroup = .{},
 link_prog_node: std.Progress.Node = .none,
-link_uav_prog_node: std.Progress.Node = .none,
-link_lazy_prog_node: std.Progress.Node = .none,
+link_const_prog_node: std.Progress.Node = .none,
+link_synth_prog_node: std.Progress.Node = .none,
 
 llvm_opt_bisect_limit: c_int,
 
@@ -1982,13 +1982,13 @@ pub fn create(gpa: Allocator, arena: Allocator, diag: *CreateDiagnostic, options
             };
             if (have_zcu and (!need_llvm or use_llvm)) {
                 if (output_mode == .Obj) break :s .zcu;
-                if (options.config.use_new_linker) break :s .zcu;
                 switch (target_util.zigBackend(target, use_llvm)) {
                     else => {},
                     .stage2_aarch64, .stage2_x86_64 => if (target.ofmt == .coff) {
                         break :s if (is_exe_or_dyn_lib) .dyn_lib else .zcu;
                     },
                 }
+                if (options.config.use_new_linker) break :s .zcu;
             }
             if (need_llvm and !build_options.have_llvm) break :s .none; // impossible to build without llvm
             if (is_exe_or_dyn_lib) break :s .lib;
@@ -3081,22 +3081,30 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
         comp.link_prog_node = main_progress_node.start("Linking", 0);
         if (lf.cast(.elf2)) |elf| {
             comp.link_prog_node.increaseEstimatedTotalItems(3);
-            comp.link_uav_prog_node = comp.link_prog_node.start("Constants", 0);
-            comp.link_lazy_prog_node = comp.link_prog_node.start("Synthetics", 0);
+            comp.link_const_prog_node = comp.link_prog_node.start("Constants", 0);
+            comp.link_synth_prog_node = comp.link_prog_node.start("Synthetics", 0);
             elf.mf.update_prog_node = comp.link_prog_node.start("Relocations", elf.mf.updates.items.len);
+        } else if (lf.cast(.coff2)) |coff| {
+            comp.link_prog_node.increaseEstimatedTotalItems(3);
+            comp.link_const_prog_node = comp.link_prog_node.start("Constants", 0);
+            comp.link_synth_prog_node = comp.link_prog_node.start("Synthetics", 0);
+            coff.mf.update_prog_node = comp.link_prog_node.start("Relocations", coff.mf.updates.items.len);
         }
     }
     defer {
         comp.link_prog_node.end();
         comp.link_prog_node = .none;
-        comp.link_uav_prog_node.end();
-        comp.link_uav_prog_node = .none;
-        comp.link_lazy_prog_node.end();
-        comp.link_lazy_prog_node = .none;
+        comp.link_const_prog_node.end();
+        comp.link_const_prog_node = .none;
+        comp.link_synth_prog_node.end();
+        comp.link_synth_prog_node = .none;
         if (comp.bin_file) |lf| {
             if (lf.cast(.elf2)) |elf| {
                 elf.mf.update_prog_node.end();
                 elf.mf.update_prog_node = .none;
+            } else if (lf.cast(.coff2)) |coff| {
+                coff.mf.update_prog_node.end();
+                coff.mf.update_prog_node = .none;
             }
         }
     }
src/dev.zig
@@ -96,6 +96,7 @@ pub const Env = enum {
                 .spirv_backend,
                 .lld_linker,
                 .coff_linker,
+                .coff2_linker,
                 .elf_linker,
                 .elf2_linker,
                 .macho_linker,
@@ -284,6 +285,7 @@ pub const Feature = enum {
 
     lld_linker,
     coff_linker,
+    coff2_linker,
     elf_linker,
     elf2_linker,
     macho_linker,
src/InternPool.zig
@@ -11919,10 +11919,10 @@ pub fn getString(ip: *InternPool, key: []const u8) OptionalNullTerminatedString
     var map_index = hash;
     while (true) : (map_index += 1) {
         map_index &= map_mask;
-        const entry = map.at(map_index);
-        const index = entry.acquire().unwrap() orelse return null;
+        const entry = &map.entries[map_index];
+        const index = entry.value.unwrap() orelse return .none;
         if (entry.hash != hash) continue;
-        if (index.eqlSlice(key, ip)) return index;
+        if (index.eqlSlice(key, ip)) return index.toOptional();
     }
 }
 
src/link.zig
@@ -610,27 +610,20 @@ pub const File = struct {
                         }
                     }
                 }
-                const output_mode = comp.config.output_mode;
-                const link_mode = comp.config.link_mode;
-                base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{
-                    .truncate = false,
-                    .read = true,
-                    .mode = determineMode(output_mode, link_mode),
-                });
+                base.file = try emit.root_dir.handle.openFile(emit.sub_path, .{ .mode = .read_write });
             },
-            .elf2 => {
-                const elf = base.cast(.elf2).?;
-                if (base.file == null) {
-                    elf.mf.file = try base.emit.root_dir.handle.createFile(base.emit.sub_path, .{
-                        .truncate = false,
-                        .read = true,
-                        .mode = determineMode(comp.config.output_mode, comp.config.link_mode),
-                    });
-                    base.file = elf.mf.file;
-                    try elf.mf.ensureTotalCapacity(
-                        @intCast(elf.mf.nodes.items[0].location().resolve(&elf.mf)[1]),
-                    );
-                }
+            .elf2, .coff2 => if (base.file == null) {
+                const mf = if (base.cast(.elf2)) |elf|
+                    &elf.mf
+                else if (base.cast(.coff2)) |coff|
+                    &coff.mf
+                else
+                    unreachable;
+                mf.file = try base.emit.root_dir.handle.openFile(base.emit.sub_path, .{
+                    .mode = .read_write,
+                });
+                base.file = mf.file;
+                try mf.ensureTotalCapacity(@intCast(mf.nodes.items[0].location().resolve(mf)[1]));
             },
             .c, .spirv => dev.checkAny(&.{ .c_linker, .spirv_linker }),
             .plan9 => unreachable,
@@ -654,12 +647,9 @@ pub const File = struct {
     pub fn makeExecutable(base: *File) !void {
         dev.check(.make_executable);
         const comp = base.comp;
-        const output_mode = comp.config.output_mode;
-        const link_mode = comp.config.link_mode;
-
-        switch (output_mode) {
+        switch (comp.config.output_mode) {
             .Obj => return,
-            .Lib => switch (link_mode) {
+            .Lib => switch (comp.config.link_mode) {
                 .static => return,
                 .dynamic => {},
             },
@@ -702,15 +692,18 @@ pub const File = struct {
                     }
                 }
             },
-            .elf2 => {
-                const elf = base.cast(.elf2).?;
-                if (base.file) |f| {
-                    elf.mf.unmap();
-                    assert(elf.mf.file.handle == f.handle);
-                    elf.mf.file = undefined;
-                    f.close();
-                    base.file = null;
-                }
+            .elf2, .coff2 => if (base.file) |f| {
+                const mf = if (base.cast(.elf2)) |elf|
+                    &elf.mf
+                else if (base.cast(.coff2)) |coff|
+                    &coff.mf
+                else
+                    unreachable;
+                mf.unmap();
+                assert(mf.file.handle == f.handle);
+                mf.file = undefined;
+                f.close();
+                base.file = null;
             },
             .c, .spirv => dev.checkAny(&.{ .c_linker, .spirv_linker }),
             .plan9 => unreachable,
@@ -828,7 +821,7 @@ pub const File = struct {
             .spirv => {},
             .goff, .xcoff => {},
             .plan9 => unreachable,
-            .elf2 => {},
+            .elf2, .coff2 => {},
             inline else => |tag| {
                 dev.check(tag.devFeature());
                 return @as(*tag.Type(), @fieldParentPtr("base", base)).updateLineNumber(pt, ti_id);
@@ -864,7 +857,7 @@ pub const File = struct {
     pub fn idle(base: *File, tid: Zcu.PerThread.Id) !bool {
         switch (base.tag) {
             else => return false,
-            inline .elf2 => |tag| {
+            inline .elf2, .coff2 => |tag| {
                 dev.check(tag.devFeature());
                 return @as(*tag.Type(), @fieldParentPtr("base", base)).idle(tid);
             },
@@ -874,7 +867,7 @@ pub const File = struct {
     pub fn updateErrorData(base: *File, pt: Zcu.PerThread) !void {
         switch (base.tag) {
             else => {},
-            inline .elf2 => |tag| {
+            inline .elf2, .coff2 => |tag| {
                 dev.check(tag.devFeature());
                 return @as(*tag.Type(), @fieldParentPtr("base", base)).updateErrorData(pt);
             },
@@ -1155,7 +1148,7 @@ pub const File = struct {
         if (base.zcu_object_basename != null) return;
 
         switch (base.tag) {
-            inline .elf2, .wasm => |tag| {
+            inline .elf2, .coff2, .wasm => |tag| {
                 dev.check(tag.devFeature());
                 return @as(*tag.Type(), @fieldParentPtr("base", base)).prelink(base.comp.link_prog_node);
             },
@@ -1165,6 +1158,7 @@ pub const File = struct {
 
     pub const Tag = enum {
         coff,
+        coff2,
         elf,
         elf2,
         macho,
@@ -1179,6 +1173,7 @@ pub const File = struct {
         pub fn Type(comptime tag: Tag) type {
             return switch (tag) {
                 .coff => Coff,
+                .coff2 => Coff2,
                 .elf => Elf,
                 .elf2 => Elf2,
                 .macho => MachO,
@@ -1194,7 +1189,7 @@ pub const File = struct {
 
         fn fromObjectFormat(ofmt: std.Target.ObjectFormat, use_new_linker: bool) Tag {
             return switch (ofmt) {
-                .coff => .coff,
+                .coff => if (use_new_linker) .coff2 else .coff,
                 .elf => if (use_new_linker) .elf2 else .elf,
                 .macho => .macho,
                 .wasm => .wasm,
@@ -1280,6 +1275,7 @@ pub const File = struct {
     pub const Lld = @import("link/Lld.zig");
     pub const C = @import("link/C.zig");
     pub const Coff = @import("link/Coff.zig");
+    pub const Coff2 = @import("link/Coff2.zig");
     pub const Elf = @import("link/Elf.zig");
     pub const Elf2 = @import("link/Elf2.zig");
     pub const MachO = @import("link/MachO.zig");
src/target.zig
@@ -233,7 +233,7 @@ pub fn hasLldSupport(ofmt: std.Target.ObjectFormat) bool {
 
 pub fn hasNewLinkerSupport(ofmt: std.Target.ObjectFormat, backend: std.builtin.CompilerBackend) bool {
     return switch (ofmt) {
-        .elf => switch (backend) {
+        .elf, .coff => switch (backend) {
             .stage2_x86_64 => true,
             else => false,
         },
test/behavior/x86_64/binary.zig
@@ -5255,7 +5255,8 @@ inline fn mod(comptime Type: type, lhs: Type, rhs: Type) Type {
     return @mod(lhs, rhs);
 }
 test mod {
-    if (@import("builtin").object_format == .coff and @import("builtin").target.abi != .gnu) return error.SkipZigTest;
+    const builtin = @import("builtin");
+    if (builtin.object_format == .coff and builtin.abi != .gnu) return error.SkipZigTest;
     const test_mod = binary(mod, .{});
     try test_mod.testInts();
     try test_mod.testIntVectors();
test/behavior/cast.zig
@@ -1650,7 +1650,6 @@ test "coerce between pointers of compatible differently-named floats" {
     if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows and !builtin.link_libc) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
 
     if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) {
@@ -2883,7 +2882,6 @@ test "@intFromFloat vector boundary cases" {
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
 
     const S = struct {
         fn case(comptime I: type, unshifted_inputs: [2]f32, expected: [2]I) !void {
test/behavior/export_keyword.zig
@@ -43,7 +43,6 @@ export fn testPackedStuff(a: *const PackedStruct, b: *const PackedUnion) void {
 }
 
 test "export function alias" {
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
 
     _ = struct {
test/behavior/extern.zig
@@ -16,7 +16,6 @@ export var a_mystery_symbol: i32 = 1234;
 
 test "function extern symbol" {
     if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
 
     const a = @extern(*const fn () callconv(.c) i32, .{ .name = "a_mystery_function" });
@@ -29,7 +28,6 @@ export fn a_mystery_function() i32 {
 
 test "function extern symbol matches extern decl" {
     if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
 
     const S = struct {
test/behavior/floatop.zig
@@ -158,7 +158,6 @@ test "cmp f80/c_longdouble" {
     if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
 
     try testCmp(f80);
     try comptime testCmp(f80);
@@ -283,7 +282,6 @@ test "vector cmp f80/c_longdouble" {
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
 
     try testCmpVector(f80);
     try comptime testCmpVector(f80);
@@ -396,7 +394,6 @@ test "@sqrt f80/f128/c_longdouble" {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
 
     if (builtin.os.tag == .freebsd) {
         // TODO https://github.com/ziglang/zig/issues/10875
@@ -526,7 +523,6 @@ test "@sin f80/f128/c_longdouble" {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
 
     try testSin(f80);
     comptime try testSin(f80);
@@ -596,7 +592,6 @@ test "@cos f80/f128/c_longdouble" {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
 
     try testCos(f80);
     try comptime testCos(f80);
@@ -666,7 +661,6 @@ test "@tan f80/f128/c_longdouble" {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
 
     try testTan(f80);
     try comptime testTan(f80);
@@ -736,7 +730,6 @@ test "@exp f80/f128/c_longdouble" {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
 
     try testExp(f80);
     try comptime testExp(f80);
@@ -810,7 +803,6 @@ test "@exp2 f80/f128/c_longdouble" {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
 
     try testExp2(f80);
     try comptime testExp2(f80);
@@ -879,7 +871,6 @@ test "@log f80/f128/c_longdouble" {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
 
     try testLog(f80);
     try comptime testLog(f80);
@@ -946,7 +937,6 @@ test "@log2 f80/f128/c_longdouble" {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
 
     try testLog2(f80);
     try comptime testLog2(f80);
@@ -1019,7 +1009,6 @@ test "@log10 f80/f128/c_longdouble" {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
 
     try testLog10(f80);
     try comptime testLog10(f80);
@@ -1086,7 +1075,6 @@ test "@abs f80/f128/c_longdouble" {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
 
     try testFabs(f80);
     try comptime testFabs(f80);
@@ -1204,7 +1192,6 @@ test "@floor f80/f128/c_longdouble" {
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
 
@@ -1295,7 +1282,6 @@ test "@ceil f80/f128/c_longdouble" {
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
 
@@ -1388,7 +1374,6 @@ test "@trunc f80/f128/c_longdouble" {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
 
     if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) {
         // https://github.com/ziglang/zig/issues/12602
@@ -1485,7 +1470,6 @@ test "neg f80/f128/c_longdouble" {
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
 
     try testNeg(f80);
     try comptime testNeg(f80);
@@ -1741,7 +1725,6 @@ test "comptime calls are only memoized when float arguments are bit-for-bit equa
 test "result location forwarded through unary float builtins" {
     if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
 
test/behavior/import_c_keywords.zig
@@ -31,7 +31,6 @@ test "import c keywords" {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
 
     try std.testing.expect(int == .c_keyword_variable);
test/behavior/math.zig
@@ -1416,7 +1416,6 @@ test "remainder division" {
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
 
@@ -1425,6 +1424,8 @@ test "remainder division" {
         return error.SkipZigTest;
     }
 
+    if (builtin.zig_backend == .stage2_x86_64 and builtin.object_format == .coff and builtin.abi != .gnu) return error.SkipZigTest;
+
     try comptime remdiv(f16);
     try comptime remdiv(f32);
     try comptime remdiv(f64);
@@ -1496,9 +1497,10 @@ test "float modulo division using @mod" {
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
 
+    if (builtin.zig_backend == .stage2_x86_64 and builtin.object_format == .coff and builtin.abi != .gnu) return error.SkipZigTest;
+
     try comptime fmod(f16);
     try comptime fmod(f32);
     try comptime fmod(f64);
@@ -1686,7 +1688,6 @@ test "signed zeros are represented properly" {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
 
     const S = struct {
         fn doTheTest() !void {
@@ -1824,7 +1825,8 @@ test "float divide by zero" {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
+
+    if (builtin.zig_backend == .stage2_x86_64 and builtin.object_format == .coff and builtin.abi != .gnu) return error.SkipZigTest;
 
     const S = struct {
         fn doTheTest(comptime F: type, zero: F, one: F) !void {
test/behavior/multiple_externs_with_conflicting_types.zig
@@ -14,7 +14,6 @@ test "call extern function defined with conflicting type" {
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-    if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
 
     @import("conflicting_externs/a.zig").issue529(null);
test/incremental/add_decl
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 //#target=wasm32-wasi-selfhosted
test/incremental/add_decl_namespaced
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 //#target=wasm32-wasi-selfhosted
test/incremental/analysis_error_and_syntax_error
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted
test/incremental/bad_import
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted
test/incremental/change_embed_file
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted
test/incremental/change_enum_tag_type
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted
test/incremental/change_exports
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 
test/incremental/change_fn_type
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #update=initial version
test/incremental/change_generic_line_number
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=wasm32-wasi-selfhosted
 #update=initial version
 #file=main.zig
test/incremental/change_line_number
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=wasm32-wasi-selfhosted
 #update=initial version
 #file=main.zig
test/incremental/change_module
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted
test/incremental/change_panic_handler
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #update=initial version
test/incremental/change_panic_handler_explicit
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #update=initial version
test/incremental/change_shift_op
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted
test/incremental/change_struct_same_fields
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted
test/incremental/change_zon_file
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 //#target=wasm32-wasi-selfhosted
test/incremental/change_zon_file_no_result_type
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 //#target=wasm32-wasi-selfhosted
test/incremental/compile_error_then_log
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted
test/incremental/compile_log
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted
test/incremental/delete_comptime_decls
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted
test/incremental/dependency_on_type_of_inferred_global
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted
test/incremental/fix_astgen_failure
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted
test/incremental/function_becomes_inline
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #update=non-inline version
test/incremental/hello
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted
test/incremental/make_decl_pub
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted
test/incremental/modify_inline_fn
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted
test/incremental/move_src
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted
test/incremental/no_change_preserves_tag_names
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 //#target=wasm32-wasi-selfhosted
test/incremental/recursive_function_becomes_non_recursive
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted
test/incremental/remove_enum_field
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted
test/incremental/remove_invalid_union_backing_enum
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted
test/incremental/temporary_parse_error
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted
test/incremental/type_becomes_comptime_only
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted
test/incremental/unreferenced_error
@@ -1,4 +1,5 @@
 #target=x86_64-linux-selfhosted
+#target=x86_64-windows-selfhosted
 #target=x86_64-linux-cbe
 #target=x86_64-windows-cbe
 #target=wasm32-wasi-selfhosted