Commit 2bef0715c7

Andrew Kelley <andrew@ziglang.org>
2023-12-12 05:16:49
move a large chunk of linker logic away from "options"
These options are only supposed to be provided to the initialization functions, resolved, and then computed values stored in the appropriate place (base struct or the object-format-specific structs). Many more to go...
1 parent 12de7e3
src/codegen/llvm.zig
@@ -854,15 +854,21 @@ pub const Object = struct {
     pub const DITypeMap = std.AutoArrayHashMapUnmanaged(InternPool.Index, AnnotatedDITypePtr);
 
     pub fn create(arena: Allocator, options: link.File.OpenOptions) !*Object {
-        const gpa = options.comp.gpa;
-        const llvm_target_triple = try targetTriple(arena, options.target);
+        if (build_options.only_c) unreachable;
+        const comp = options.comp;
+        const gpa = comp.gpa;
+        const target = comp.root_mod.resolved_target.result;
+        const llvm_target_triple = try targetTriple(arena, target);
+        const strip = comp.root_mod.strip;
+        const optimize_mode = comp.root_mod.optimize_mode;
+        const pic = comp.root_mod.pic;
 
         var builder = try Builder.init(.{
             .allocator = gpa,
-            .use_lib_llvm = options.use_lib_llvm,
-            .strip = options.strip or !options.use_lib_llvm, // TODO
-            .name = options.root_name,
-            .target = options.target,
+            .use_lib_llvm = comp.config.use_lib_llvm,
+            .strip = strip or !comp.config.use_lib_llvm, // TODO
+            .name = comp.root_name,
+            .target = target,
             .triple = llvm_target_triple,
         });
         errdefer builder.deinit();
@@ -870,10 +876,18 @@ pub const Object = struct {
         var target_machine: if (build_options.have_llvm) *llvm.TargetMachine else void = undefined;
         var target_data: if (build_options.have_llvm) *llvm.TargetData else void = undefined;
         if (builder.useLibLlvm()) {
-            if (!options.strip) {
-                switch (options.target.ofmt) {
-                    .coff => builder.llvm.module.?.addModuleCodeViewFlag(),
-                    else => builder.llvm.module.?.addModuleDebugInfoFlag(options.dwarf_format == std.dwarf.Format.@"64"),
+            debug_info: {
+                const debug_format = options.debug_format orelse b: {
+                    if (strip) break :b .strip;
+                    break :b switch (target.ofmt) {
+                        .coff => .code_view,
+                        else => .{ .dwarf = .@"32" },
+                    };
+                };
+                switch (debug_format) {
+                    .strip => break :debug_info,
+                    .code_view => builder.llvm.module.?.addModuleCodeViewFlag(),
+                    .dwarf => |f| builder.llvm.module.?.addModuleDebugInfoFlag(f == .@"64"),
                 }
                 builder.llvm.di_builder = builder.llvm.module.?.createDIBuilder(true);
 
@@ -892,8 +906,8 @@ pub const Object = struct {
                 // TODO: the only concern I have with this is WASI as either host or target, should
                 // we leave the paths as relative then?
                 const compile_unit_dir_z = blk: {
-                    if (options.module) |mod| m: {
-                        const d = try mod.root_mod.root.joinStringZ(arena, "");
+                    if (comp.module) |zcu| m: {
+                        const d = try zcu.root_mod.root.joinStringZ(arena, "");
                         if (d.len == 0) break :m;
                         if (std.fs.path.isAbsolute(d)) break :blk d;
                         break :blk std.fs.realpathAlloc(arena, d) catch d;
@@ -903,9 +917,9 @@ pub const Object = struct {
 
                 builder.llvm.di_compile_unit = builder.llvm.di_builder.?.createCompileUnit(
                     DW.LANG.C99,
-                    builder.llvm.di_builder.?.createFile(options.root_name, compile_unit_dir_z),
+                    builder.llvm.di_builder.?.createFile(comp.root_name, compile_unit_dir_z),
                     producer.slice(&builder).?,
-                    options.optimize_mode != .Debug,
+                    optimize_mode != .Debug,
                     "", // flags
                     0, // runtime version
                     "", // split name
@@ -914,19 +928,19 @@ pub const Object = struct {
                 );
             }
 
-            const opt_level: llvm.CodeGenOptLevel = if (options.optimize_mode == .Debug)
+            const opt_level: llvm.CodeGenOptLevel = if (optimize_mode == .Debug)
                 .None
             else
                 .Aggressive;
 
-            const reloc_mode: llvm.RelocMode = if (options.pic)
+            const reloc_mode: llvm.RelocMode = if (pic)
                 .PIC
-            else if (options.link_mode == .Dynamic)
+            else if (comp.config.link_mode == .Dynamic)
                 llvm.RelocMode.DynamicNoPIC
             else
                 .Static;
 
-            const code_model: llvm.CodeModel = switch (options.machine_code_model) {
+            const code_model: llvm.CodeModel = switch (comp.root_mod.code_model) {
                 .default => .Default,
                 .tiny => .Tiny,
                 .small => .Small,
@@ -941,15 +955,15 @@ pub const Object = struct {
             target_machine = llvm.TargetMachine.create(
                 builder.llvm.target.?,
                 builder.target_triple.slice(&builder).?,
-                if (options.target.cpu.model.llvm_name) |s| s.ptr else null,
-                options.llvm_cpu_features,
+                if (target.cpu.model.llvm_name) |s| s.ptr else null,
+                comp.root_mod.resolved_target.llvm_cpu_features.?,
                 opt_level,
                 reloc_mode,
                 code_model,
-                options.function_sections,
-                options.data_sections,
+                options.function_sections orelse false,
+                options.data_sections orelse false,
                 float_abi,
-                if (target_util.llvmMachineAbi(options.target)) |s| s.ptr else null,
+                if (target_util.llvmMachineAbi(target)) |s| s.ptr else null,
             );
             errdefer target_machine.dispose();
 
@@ -958,15 +972,15 @@ pub const Object = struct {
 
             builder.llvm.module.?.setModuleDataLayout(target_data);
 
-            if (options.pic) builder.llvm.module.?.setModulePICLevel();
-            if (options.pie) builder.llvm.module.?.setModulePIELevel();
+            if (pic) builder.llvm.module.?.setModulePICLevel();
+            if (comp.config.pie) builder.llvm.module.?.setModulePIELevel();
             if (code_model != .Default) builder.llvm.module.?.setModuleCodeModel(code_model);
 
-            if (options.opt_bisect_limit >= 0) {
-                builder.llvm.context.setOptBisectLimit(std.math.lossyCast(c_int, options.opt_bisect_limit));
+            if (comp.llvm_opt_bisect_limit >= 0) {
+                builder.llvm.context.setOptBisectLimit(comp.llvm_opt_bisect_limit);
             }
 
-            builder.data_layout = try builder.fmt("{}", .{DataLayoutBuilder{ .target = options.target }});
+            builder.data_layout = try builder.fmt("{}", .{DataLayoutBuilder{ .target = target }});
             if (std.debug.runtime_safety) {
                 const rep = target_data.stringRep();
                 defer llvm.disposeMessage(rep);
@@ -981,13 +995,13 @@ pub const Object = struct {
         obj.* = .{
             .gpa = gpa,
             .builder = builder,
-            .module = options.module.?,
+            .module = comp.module.?,
             .di_map = .{},
             .di_builder = if (builder.useLibLlvm()) builder.llvm.di_builder else null, // TODO
             .di_compile_unit = if (builder.useLibLlvm()) builder.llvm.di_compile_unit else null,
             .target_machine = target_machine,
             .target_data = target_data,
-            .target = options.target,
+            .target = target,
             .decl_map = .{},
             .anon_decl_map = .{},
             .named_enum_map = .{},
src/Compilation/Config.zig
@@ -137,7 +137,11 @@ pub fn resolve(options: Options) !Config {
     const use_llvm = b: {
         // If emitting to LLVM bitcode object format, must use LLVM backend.
         if (options.emit_llvm_ir or options.emit_llvm_bc) {
-            if (options.use_llvm == false) return error.EmittingLlvmModuleRequiresLlvmBackend;
+            if (options.use_llvm == false)
+                return error.EmittingLlvmModuleRequiresLlvmBackend;
+            if (!target_util.hasLlvmSupport(target, target.ofmt))
+                return error.LlvmLacksTargetSupport;
+
             break :b true;
         }
 
@@ -147,6 +151,12 @@ pub fn resolve(options: Options) !Config {
             break :b false;
         }
 
+        // If Zig does not support the target, then we can't use it.
+        if (target_util.zigBackend(target, false) == .other) {
+            if (options.use_llvm == false) return error.ZigLacksTargetSupport;
+            break :b true;
+        }
+
         if (options.use_llvm) |x| break :b x;
 
         // If we have no zig code to compile, no need for LLVM.
@@ -166,16 +176,23 @@ pub fn resolve(options: Options) !Config {
         break :b !target_util.selfHostedBackendIsAsRobustAsLlvm(target);
     };
 
-    if (!use_lib_llvm and use_llvm and options.emit_bin) {
-        // Explicit request to use LLVM to produce an object file, but without
-        // using LLVM libraries. Impossible.
-        return error.EmittingBinaryRequiresLlvmLibrary;
+    if (options.emit_bin) {
+        if (!use_lib_llvm and use_llvm) {
+            // Explicit request to use LLVM to produce an object file, but without
+            // using LLVM libraries. Impossible.
+            return error.EmittingBinaryRequiresLlvmLibrary;
+        }
+
+        if (target_util.zigBackend(target, use_llvm) == .other) {
+            // There is no compiler backend available for this target.
+            return error.ZigLacksTargetSupport;
+        }
     }
 
     // Make a decision on whether to use LLD or our own linker.
     const use_lld = b: {
-        if (target.isDarwin()) {
-            if (options.use_lld == true) return error.LldIncompatibleOs;
+        if (!target_util.hasLldSupport(target.ofmt)) {
+            if (options.use_lld == true) return error.LldIncompatibleObjectFormat;
             break :b false;
         }
 
@@ -184,11 +201,6 @@ pub fn resolve(options: Options) !Config {
             break :b false;
         }
 
-        if (target.ofmt == .c) {
-            if (options.use_lld == true) return error.LldIncompatibleObjectFormat;
-            break :b false;
-        }
-
         if (options.lto == true) {
             if (options.use_lld == false) return error.LtoRequiresLld;
             break :b true;
src/link/Coff/lld.zig
@@ -25,8 +25,8 @@ pub fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
     defer arena_allocator.deinit();
     const arena = arena_allocator.allocator();
 
-    const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
-    const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path});
+    const directory = self.base.emit.directory; // Just an alias to make it shorter to type.
+    const full_out_path = try directory.join(arena, &[_][]const u8{self.base.emit.sub_path});
 
     // If there is no Zig code to compile, then we should skip flushing the output file because it
     // will not be part of the linker line anyway.
@@ -50,6 +50,7 @@ pub fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
     const is_exe_or_dyn_lib = is_dyn_lib or self.base.options.output_mode == .Exe;
     const link_in_crt = self.base.options.link_libc and is_exe_or_dyn_lib;
     const target = self.base.options.target;
+    const optimize_mode = self.base.comp.root_mod.optimize_mode;
 
     // See link/Elf.zig for comments on how this mechanism works.
     const id_symlink_basename = "lld.id";
@@ -79,7 +80,7 @@ pub fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
         }
         try man.addOptionalFile(module_obj_path);
         man.hash.addOptionalBytes(self.base.options.entry);
-        man.hash.addOptional(self.base.options.stack_size_override);
+        man.hash.add(self.base.stack_size);
         man.hash.addOptional(self.base.options.image_base_override);
         man.hash.addListOfBytes(self.base.options.lib_dirs);
         man.hash.add(self.base.options.skip_linker_dependencies);
@@ -93,14 +94,14 @@ pub fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
                 }
             }
         }
-        try link.hashAddSystemLibs(&man, self.base.options.system_libs);
+        try link.hashAddSystemLibs(&man, self.base.comp.system_libs);
         man.hash.addListOfBytes(self.base.options.force_undefined_symbols.keys());
         man.hash.addOptional(self.base.options.subsystem);
         man.hash.add(self.base.options.is_test);
         man.hash.add(self.base.options.tsaware);
         man.hash.add(self.base.options.nxcompat);
         man.hash.add(self.base.options.dynamicbase);
-        man.hash.addOptional(self.base.options.allow_shlib_undefined);
+        man.hash.addOptional(self.base.allow_shlib_undefined);
         // strip does not need to go into the linker hash because it is part of the hash namespace
         man.hash.addOptional(self.base.options.major_subsystem_version);
         man.hash.addOptional(self.base.options.minor_subsystem_version);
@@ -185,15 +186,14 @@ pub fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
             try argv.append(try allocPrint(arena, "-VERSION:{}.{}", .{ version.major, version.minor }));
         }
         if (self.base.options.lto) {
-            switch (self.base.options.optimize_mode) {
+            switch (optimize_mode) {
                 .Debug => {},
                 .ReleaseSmall => try argv.append("-OPT:lldlto=2"),
                 .ReleaseFast, .ReleaseSafe => try argv.append("-OPT:lldlto=3"),
             }
         }
         if (self.base.options.output_mode == .Exe) {
-            const stack_size = self.base.options.stack_size_override orelse 16777216;
-            try argv.append(try allocPrint(arena, "-STACK:{d}", .{stack_size}));
+            try argv.append(try allocPrint(arena, "-STACK:{d}", .{self.base.stack_size}));
         }
         if (self.base.options.image_base_override) |image_base| {
             try argv.append(try std.fmt.allocPrint(arena, "-BASE:{d}", .{image_base}));
@@ -232,10 +232,8 @@ pub fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
         if (!self.base.options.dynamicbase) {
             try argv.append("-dynamicbase:NO");
         }
-        if (self.base.options.allow_shlib_undefined) |allow_shlib_undefined| {
-            if (allow_shlib_undefined) {
-                try argv.append("-FORCE:UNRESOLVED");
-            }
+        if (self.base.allow_shlib_undefined) {
+            try argv.append("-FORCE:UNRESOLVED");
         }
 
         try argv.append(try allocPrint(arena, "-OUT:{s}", .{full_out_path}));
@@ -419,7 +417,7 @@ pub fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
                         try argv.append(try comp.get_libc_crt_file(arena, "uuid.lib"));
 
                         for (mingw.always_link_libs) |name| {
-                            if (!self.base.options.system_libs.contains(name)) {
+                            if (!self.base.comp.system_libs.contains(name)) {
                                 const lib_basename = try allocPrint(arena, "{s}.lib", .{name});
                                 try argv.append(try comp.get_libc_crt_file(arena, lib_basename));
                             }
@@ -429,7 +427,7 @@ pub fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
                             .Dynamic => "",
                             .Static => "lib",
                         };
-                        const d_str = switch (self.base.options.optimize_mode) {
+                        const d_str = switch (optimize_mode) {
                             .Debug => "d",
                             else => "",
                         };
@@ -489,8 +487,8 @@ pub fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
             if (comp.compiler_rt_lib) |lib| try argv.append(lib.full_object_path);
         }
 
-        try argv.ensureUnusedCapacity(self.base.options.system_libs.count());
-        for (self.base.options.system_libs.keys()) |key| {
+        try argv.ensureUnusedCapacity(self.base.comp.system_libs.count());
+        for (self.base.comp.system_libs.keys()) |key| {
             const lib_basename = try allocPrint(arena, "{s}.lib", .{key});
             if (comp.crt_files.get(lib_basename)) |crt_file| {
                 argv.appendAssumeCapacity(crt_file.full_object_path);
@@ -516,7 +514,7 @@ pub fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
             return error.DllImportLibraryNotFound;
         }
 
-        if (self.base.options.verbose_link) {
+        if (self.base.comp.verbose_link) {
             // Skip over our own name so that the LLD linker name is the first argv item.
             Compilation.dump_argv(argv.items[1..]);
         }
src/link/Elf/ZigObject.zig
@@ -76,7 +76,7 @@ pub const symbol_mask: u32 = 0x7fffffff;
 pub const SHN_ATOM: u16 = 0x100;
 
 pub fn init(self: *ZigObject, elf_file: *Elf) !void {
-    const gpa = elf_file.base.allocator;
+    const gpa = elf_file.base.comp.gpa;
 
     try self.atoms.append(gpa, 0); // null input section
     try self.relocs.append(gpa, .{}); // null relocs section
@@ -96,7 +96,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf) !void {
     esym.st_shndx = elf.SHN_ABS;
     symbol_ptr.esym_index = esym_index;
 
-    if (!elf_file.base.options.strip) {
+    if (elf_file.base.debug_format != .strip) {
         self.dwarf = Dwarf.init(gpa, &elf_file.base, .dwarf32);
     }
 }
@@ -155,13 +155,13 @@ pub fn deinit(self: *ZigObject, allocator: Allocator) void {
 pub fn flushModule(self: *ZigObject, elf_file: *Elf) !void {
     // Handle any lazy symbols that were emitted by incremental compilation.
     if (self.lazy_syms.getPtr(.none)) |metadata| {
-        const module = elf_file.base.options.module.?;
+        const zcu = elf_file.base.comp.module.?;
 
         // Most lazy symbols can be updated on first use, but
         // anyerror needs to wait for everything to be flushed.
         if (metadata.text_state != .unused) self.updateLazySymbol(
             elf_file,
-            link.File.LazySymbol.initDecl(.code, null, module),
+            link.File.LazySymbol.initDecl(.code, null, zcu),
             metadata.text_symbol_index,
         ) catch |err| return switch (err) {
             error.CodegenFail => error.FlushFailure,
@@ -169,7 +169,7 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf) !void {
         };
         if (metadata.rodata_state != .unused) self.updateLazySymbol(
             elf_file,
-            link.File.LazySymbol.initDecl(.const_data, null, module),
+            link.File.LazySymbol.initDecl(.const_data, null, zcu),
             metadata.rodata_symbol_index,
         ) catch |err| return switch (err) {
             error.CodegenFail => error.FlushFailure,
@@ -182,7 +182,8 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf) !void {
     }
 
     if (self.dwarf) |*dw| {
-        try dw.flushModule(elf_file.base.options.module.?);
+        const zcu = elf_file.base.comp.module.?;
+        try dw.flushModule(zcu);
 
         // TODO I need to re-think how to handle ZigObject's debug sections AND debug sections
         // extracted from input object files correctly.
@@ -195,7 +196,7 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf) !void {
             const text_shdr = elf_file.shdrs.items[elf_file.zig_text_section_index.?];
             const low_pc = text_shdr.sh_addr;
             const high_pc = text_shdr.sh_addr + text_shdr.sh_size;
-            try dw.writeDbgInfoHeader(elf_file.base.options.module.?, low_pc, high_pc);
+            try dw.writeDbgInfoHeader(zcu, low_pc, high_pc);
             self.debug_info_header_dirty = false;
         }
 
@@ -268,7 +269,7 @@ pub fn addGlobalEsym(self: *ZigObject, allocator: Allocator) !Symbol.Index {
 }
 
 pub fn addAtom(self: *ZigObject, elf_file: *Elf) !Symbol.Index {
-    const gpa = elf_file.base.allocator;
+    const gpa = elf_file.base.comp.gpa;
     const atom_index = try elf_file.addAtom();
     const symbol_index = try elf_file.addSymbol();
     const esym_index = try self.addLocalEsym(gpa);
@@ -411,6 +412,7 @@ pub fn allocateTlvAtoms(self: ZigObject, elf_file: *Elf) void {
 }
 
 pub fn scanRelocs(self: *ZigObject, elf_file: *Elf, undefs: anytype) !void {
+    const gpa = elf_file.base.comp.gpa;
     for (self.atoms.items) |atom_index| {
         const atom = elf_file.atom(atom_index) orelse continue;
         if (!atom.flags.alive) continue;
@@ -421,7 +423,7 @@ pub fn scanRelocs(self: *ZigObject, elf_file: *Elf, undefs: anytype) !void {
             // Perhaps it would make sense to save the code until flushModule where we
             // would free all of generated code?
             const code = try self.codeAlloc(elf_file, atom_index);
-            defer elf_file.base.allocator.free(code);
+            defer gpa.free(code);
             try atom.scanRelocs(elf_file, code, undefs);
         } else try atom.scanRelocs(elf_file, null, undefs);
     }
@@ -447,7 +449,7 @@ pub fn markLive(self: *ZigObject, elf_file: *Elf) void {
 /// We need this so that we can write to an archive.
 /// TODO implement writing ZigObject data directly to a buffer instead.
 pub fn readFileContents(self: *ZigObject, elf_file: *Elf) !void {
-    const gpa = elf_file.base.allocator;
+    const gpa = elf_file.base.comp.gpa;
     const shsize: u64 = switch (elf_file.ptr_width) {
         .p32 => @sizeOf(elf.Elf32_Shdr),
         .p64 => @sizeOf(elf.Elf64_Shdr),
@@ -465,7 +467,7 @@ pub fn readFileContents(self: *ZigObject, elf_file: *Elf) !void {
 }
 
 pub fn updateArSymtab(self: ZigObject, ar_symtab: *Archive.ArSymtab, elf_file: *Elf) error{OutOfMemory}!void {
-    const gpa = elf_file.base.allocator;
+    const gpa = elf_file.base.comp.gpa;
 
     try ar_symtab.symtab.ensureUnusedCapacity(gpa, self.globals().len);
 
@@ -508,7 +510,7 @@ pub fn addAtomsToRelaSections(self: ZigObject, elf_file: *Elf) !void {
         const out_shdr = elf_file.shdrs.items[out_shndx];
         if (out_shdr.sh_type == elf.SHT_NOBITS) continue;
 
-        const gpa = elf_file.base.allocator;
+        const gpa = elf_file.base.comp.gpa;
         const sec = elf_file.output_rela_sections.getPtr(out_shndx).?;
         try sec.atom_list.append(gpa, atom_index);
     }
@@ -602,7 +604,7 @@ pub fn asFile(self: *ZigObject) File {
 /// Returns atom's code.
 /// Caller owns the memory.
 pub fn codeAlloc(self: ZigObject, elf_file: *Elf, atom_index: Atom.Index) ![]u8 {
-    const gpa = elf_file.base.allocator;
+    const gpa = elf_file.base.comp.gpa;
     const atom = elf_file.atom(atom_index).?;
     assert(atom.file_index == self.index);
     const shdr = &elf_file.shdrs.items[atom.outputShndx().?];
@@ -668,8 +670,8 @@ pub fn lowerAnonDecl(
     explicit_alignment: InternPool.Alignment,
     src_loc: Module.SrcLoc,
 ) !codegen.Result {
-    const gpa = elf_file.base.allocator;
-    const mod = elf_file.base.options.module.?;
+    const gpa = elf_file.base.comp.gpa;
+    const mod = elf_file.base.comp.module.?;
     const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val));
     const decl_alignment = switch (explicit_alignment) {
         .none => ty.abiAlignment(mod),
@@ -716,8 +718,8 @@ pub fn getOrCreateMetadataForLazySymbol(
     elf_file: *Elf,
     lazy_sym: link.File.LazySymbol,
 ) !Symbol.Index {
-    const gpa = elf_file.base.allocator;
-    const mod = elf_file.base.options.module.?;
+    const gpa = elf_file.base.comp.gpa;
+    const mod = elf_file.base.comp.module.?;
     const gop = try self.lazy_syms.getOrPut(gpa, lazy_sym.getDecl(mod));
     errdefer _ = if (!gop.found_existing) self.lazy_syms.pop();
     if (!gop.found_existing) gop.value_ptr.* = .{};
@@ -752,25 +754,28 @@ pub fn getOrCreateMetadataForLazySymbol(
 }
 
 fn freeUnnamedConsts(self: *ZigObject, elf_file: *Elf, decl_index: InternPool.DeclIndex) void {
+    const gpa = elf_file.base.comp.gpa;
     const unnamed_consts = self.unnamed_consts.getPtr(decl_index) orelse return;
     for (unnamed_consts.items) |sym_index| {
         self.freeDeclMetadata(elf_file, sym_index);
     }
-    unnamed_consts.clearAndFree(elf_file.base.allocator);
+    unnamed_consts.clearAndFree(gpa);
 }
 
 fn freeDeclMetadata(self: *ZigObject, elf_file: *Elf, sym_index: Symbol.Index) void {
     _ = self;
+    const gpa = elf_file.base.comp.gpa;
     const sym = elf_file.symbol(sym_index);
     sym.atom(elf_file).?.free(elf_file);
     log.debug("adding %{d} to local symbols free list", .{sym_index});
-    elf_file.symbols_free_list.append(elf_file.base.allocator, sym_index) catch {};
+    elf_file.symbols_free_list.append(gpa, sym_index) catch {};
     elf_file.symbols.items[sym_index] = .{};
     // TODO free GOT entry here
 }
 
 pub fn freeDecl(self: *ZigObject, elf_file: *Elf, decl_index: InternPool.DeclIndex) void {
-    const mod = elf_file.base.options.module.?;
+    const gpa = elf_file.base.comp.gpa;
+    const mod = elf_file.base.comp.module.?;
     const decl = mod.declPtr(decl_index);
 
     log.debug("freeDecl {*}", .{decl});
@@ -780,7 +785,7 @@ pub fn freeDecl(self: *ZigObject, elf_file: *Elf, decl_index: InternPool.DeclInd
         const sym_index = kv.value.symbol_index;
         self.freeDeclMetadata(elf_file, sym_index);
         self.freeUnnamedConsts(elf_file, decl_index);
-        kv.value.exports.deinit(elf_file.base.allocator);
+        kv.value.exports.deinit(gpa);
     }
 
     if (self.dwarf) |*dw| {
@@ -793,15 +798,16 @@ pub fn getOrCreateMetadataForDecl(
     elf_file: *Elf,
     decl_index: InternPool.DeclIndex,
 ) !Symbol.Index {
-    const gop = try self.decls.getOrPut(elf_file.base.allocator, decl_index);
+    const gpa = elf_file.base.comp.gpa;
+    const gop = try self.decls.getOrPut(gpa, decl_index);
     if (!gop.found_existing) {
-        const single_threaded = elf_file.base.options.single_threaded;
+        const any_non_single_threaded = elf_file.base.comp.config.any_non_single_threaded;
         const symbol_index = try self.addAtom(elf_file);
-        const mod = elf_file.base.options.module.?;
+        const mod = elf_file.base.comp.module.?;
         const decl = mod.declPtr(decl_index);
         const sym = elf_file.symbol(symbol_index);
         if (decl.getOwnedVariable(mod)) |variable| {
-            if (variable.is_threadlocal and !single_threaded) {
+            if (variable.is_threadlocal and any_non_single_threaded) {
                 sym.flags.is_tls = true;
             }
         }
@@ -820,13 +826,13 @@ fn getDeclShdrIndex(
     code: []const u8,
 ) error{OutOfMemory}!u16 {
     _ = self;
-    const mod = elf_file.base.options.module.?;
-    const single_threaded = elf_file.base.options.single_threaded;
+    const mod = elf_file.base.comp.module.?;
+    const any_non_single_threaded = elf_file.base.comp.config.any_non_single_threaded;
     const shdr_index = switch (decl.ty.zigTypeTag(mod)) {
         .Fn => elf_file.zig_text_section_index.?,
         else => blk: {
             if (decl.getOwnedVariable(mod)) |variable| {
-                if (variable.is_threadlocal and !single_threaded) {
+                if (variable.is_threadlocal and any_non_single_threaded) {
                     const is_all_zeroes = for (code) |byte| {
                         if (byte != 0) break false;
                     } else true;
@@ -846,9 +852,12 @@ fn getDeclShdrIndex(
                 }
                 if (variable.is_const) break :blk elf_file.zig_data_rel_ro_section_index.?;
                 if (Value.fromInterned(variable.init).isUndefDeep(mod)) {
-                    const mode = elf_file.base.options.optimize_mode;
-                    if (mode == .Debug or mode == .ReleaseSafe) break :blk elf_file.zig_data_section_index.?;
-                    break :blk elf_file.zig_bss_section_index.?;
+                    // TODO: get the optimize_mode from the Module that owns the decl instead
+                    // of using the root module here.
+                    break :blk switch (elf_file.base.comp.root_mod.optimize_mode) {
+                        .Debug, .ReleaseSafe => elf_file.zig_data_section_index.?,
+                        .ReleaseFast, .ReleaseSmall => elf_file.zig_bss_section_index.?,
+                    };
                 }
                 // TODO I blatantly copied the logic from the Wasm linker, but is there a less
                 // intrusive check for all zeroes than this?
@@ -873,8 +882,8 @@ fn updateDeclCode(
     code: []const u8,
     stt_bits: u8,
 ) !void {
-    const gpa = elf_file.base.allocator;
-    const mod = elf_file.base.options.module.?;
+    const gpa = elf_file.base.comp.gpa;
+    const mod = elf_file.base.comp.module.?;
     const decl = mod.declPtr(decl_index);
     const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
 
@@ -971,8 +980,8 @@ fn updateTlv(
     shndx: u16,
     code: []const u8,
 ) !void {
-    const gpa = elf_file.base.allocator;
-    const mod = elf_file.base.options.module.?;
+    const gpa = elf_file.base.comp.gpa;
+    const mod = elf_file.base.comp.module.?;
     const decl = mod.declPtr(decl_index);
     const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
 
@@ -1026,6 +1035,7 @@ pub fn updateFunc(
     const tracy = trace(@src());
     defer tracy.end();
 
+    const gpa = elf_file.base.comp.gpa;
     const func = mod.funcInfo(func_index);
     const decl_index = func.owner_decl;
     const decl = mod.declPtr(decl_index);
@@ -1034,7 +1044,7 @@ pub fn updateFunc(
     self.freeUnnamedConsts(elf_file, decl_index);
     elf_file.symbol(sym_index).atom(elf_file).?.freeRelocs(elf_file);
 
-    var code_buffer = std.ArrayList(u8).init(elf_file.base.allocator);
+    var code_buffer = std.ArrayList(u8).init(gpa);
     defer code_buffer.deinit();
 
     var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null;
@@ -1117,7 +1127,8 @@ pub fn updateDecl(
     const sym_index = try self.getOrCreateMetadataForDecl(elf_file, decl_index);
     elf_file.symbol(sym_index).atom(elf_file).?.freeRelocs(elf_file);
 
-    var code_buffer = std.ArrayList(u8).init(elf_file.base.allocator);
+    const gpa = elf_file.base.comp.gpa;
+    var code_buffer = std.ArrayList(u8).init(gpa);
     defer code_buffer.deinit();
 
     var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null;
@@ -1179,8 +1190,8 @@ fn updateLazySymbol(
     sym: link.File.LazySymbol,
     symbol_index: Symbol.Index,
 ) !void {
-    const gpa = elf_file.base.allocator;
-    const mod = elf_file.base.options.module.?;
+    const gpa = elf_file.base.comp.gpa;
+    const mod = elf_file.base.comp.module.?;
 
     var required_alignment: InternPool.Alignment = .none;
     var code_buffer = std.ArrayList(u8).init(gpa);
@@ -1261,8 +1272,8 @@ pub fn lowerUnnamedConst(
     typed_value: TypedValue,
     decl_index: InternPool.DeclIndex,
 ) !u32 {
-    const gpa = elf_file.base.allocator;
-    const mod = elf_file.base.options.module.?;
+    const gpa = elf_file.base.comp.gpa;
+    const mod = elf_file.base.comp.module.?;
     const gop = try self.unnamed_consts.getOrPut(gpa, decl_index);
     if (!gop.found_existing) {
         gop.value_ptr.* = .{};
@@ -1308,7 +1319,7 @@ fn lowerConst(
     output_section_index: u16,
     src_loc: Module.SrcLoc,
 ) !LowerConstResult {
-    const gpa = elf_file.base.allocator;
+    const gpa = elf_file.base.comp.gpa;
 
     var code_buffer = std.ArrayList(u8).init(gpa);
     defer code_buffer.deinit();
@@ -1364,7 +1375,7 @@ pub fn updateExports(
     const tracy = trace(@src());
     defer tracy.end();
 
-    const gpa = elf_file.base.allocator;
+    const gpa = elf_file.base.comp.gpa;
     const metadata = switch (exported) {
         .decl_index => |decl_index| blk: {
             _ = try self.getOrCreateMetadataForDecl(elf_file, decl_index);
@@ -1467,7 +1478,7 @@ pub fn deleteDeclExport(
     name: InternPool.NullTerminatedString,
 ) void {
     const metadata = self.decls.getPtr(decl_index) orelse return;
-    const mod = elf_file.base.options.module.?;
+    const mod = elf_file.base.comp.module.?;
     const exp_name = mod.intern_pool.stringToSlice(name);
     const esym_index = metadata.@"export"(self, exp_name) orelse return;
     log.debug("deleting export '{s}'", .{exp_name});
@@ -1485,7 +1496,7 @@ pub fn deleteDeclExport(
 
 pub fn getGlobalSymbol(self: *ZigObject, elf_file: *Elf, name: []const u8, lib_name: ?[]const u8) !u32 {
     _ = lib_name;
-    const gpa = elf_file.base.allocator;
+    const gpa = elf_file.base.comp.gpa;
     const off = try self.strtab.insert(gpa, name);
     const lookup_gop = try self.globals_lookup.getOrPut(gpa, off);
     if (!lookup_gop.found_existing) {
src/link/MachO/zld.zig
@@ -6,20 +6,21 @@ pub fn linkWithZld(
     const tracy = trace(@src());
     defer tracy.end();
 
-    const gpa = macho_file.base.allocator;
-    const options = &macho_file.base.options;
-    const target = options.target;
+    const gpa = macho_file.base.comp.gpa;
+    const target = macho_file.base.comp.root_mod.resolved_target.result;
+    const emit = macho_file.base.emit;
 
     var arena_allocator = std.heap.ArenaAllocator.init(gpa);
     defer arena_allocator.deinit();
     const arena = arena_allocator.allocator();
 
-    const directory = options.emit.?.directory; // Just an alias to make it shorter to type.
-    const full_out_path = try directory.join(arena, &[_][]const u8{options.emit.?.sub_path});
+    const directory = emit.directory; // Just an alias to make it shorter to type.
+    const full_out_path = try directory.join(arena, &[_][]const u8{emit.?.sub_path});
+    const opt_zcu = macho_file.base.comp.module;
 
     // If there is no Zig code to compile, then we should skip flushing the output file because it
     // will not be part of the linker line anyway.
-    const module_obj_path: ?[]const u8 = if (options.module != null) blk: {
+    const module_obj_path: ?[]const u8 = if (opt_zcu != null) blk: {
         try macho_file.flushModule(comp, prog_node);
 
         if (fs.path.dirname(full_out_path)) |dirname| {
@@ -34,22 +35,24 @@ pub fn linkWithZld(
     sub_prog_node.context.refresh();
     defer sub_prog_node.end();
 
+    const output_mode = macho_file.base.comp.config.output_mode;
+    const link_mode = macho_file.base.comp.config.link_mode;
     const cpu_arch = target.cpu.arch;
-    const is_lib = options.output_mode == .Lib;
-    const is_dyn_lib = options.link_mode == .Dynamic and is_lib;
-    const is_exe_or_dyn_lib = is_dyn_lib or options.output_mode == .Exe;
-    const stack_size = options.stack_size_override orelse 0;
-    const is_debug_build = options.optimize_mode == .Debug;
-    const gc_sections = options.gc_sections orelse !is_debug_build;
+    const is_lib = output_mode == .Lib;
+    const is_dyn_lib = link_mode == .Dynamic and is_lib;
+    const is_exe_or_dyn_lib = is_dyn_lib or output_mode == .Exe;
+    const stack_size = macho_file.base.stack_size;
 
     const id_symlink_basename = "zld.id";
 
     var man: Cache.Manifest = undefined;
-    defer if (!options.disable_lld_caching) man.deinit();
+    defer if (!macho_file.base.disable_lld_caching) man.deinit();
 
     var digest: [Cache.hex_digest_len]u8 = undefined;
 
-    if (!options.disable_lld_caching) {
+    const objects = macho_file.base.comp.objects;
+
+    if (!macho_file.base.disable_lld_caching) {
         man = comp.cache_parent.obtain();
 
         // We are about to obtain this lock, so here we give other processes a chance first.
@@ -57,7 +60,7 @@ pub fn linkWithZld(
 
         comptime assert(Compilation.link_hash_implementation_version == 10);
 
-        for (options.objects) |obj| {
+        for (objects) |obj| {
             _ = try man.addFile(obj.path, null);
             man.hash.add(obj.must_link);
         }
@@ -68,24 +71,22 @@ pub fn linkWithZld(
         // We can skip hashing libc and libc++ components that we are in charge of building from Zig
         // installation sources because they are always a product of the compiler version + target information.
         man.hash.add(stack_size);
-        man.hash.addOptional(options.pagezero_size);
-        man.hash.addOptional(options.headerpad_size);
-        man.hash.add(options.headerpad_max_install_names);
-        man.hash.add(gc_sections);
-        man.hash.add(options.dead_strip_dylibs);
-        man.hash.add(options.strip);
-        man.hash.addListOfBytes(options.lib_dirs);
-        man.hash.addListOfBytes(options.framework_dirs);
-        try link.hashAddFrameworks(&man, options.frameworks);
-        man.hash.addListOfBytes(options.rpath_list);
+        man.hash.addOptional(macho_file.pagezero_vmsize);
+        man.hash.addOptional(macho_file.headerpad_size);
+        man.hash.add(macho_file.headerpad_max_install_names);
+        man.hash.add(macho_file.base.gc_sections);
+        man.hash.add(macho_file.dead_strip_dylibs);
+        man.hash.add(macho_file.base.comp.root_mod.strip);
+        try MachO.hashAddFrameworks(&man, macho_file.frameworks);
+        man.hash.addListOfBytes(macho_file.rpath_list);
         if (is_dyn_lib) {
-            man.hash.addOptionalBytes(options.install_name);
-            man.hash.addOptional(options.version);
+            man.hash.addOptionalBytes(macho_file.install_name);
+            man.hash.addOptional(comp.version);
         }
-        try link.hashAddSystemLibs(&man, options.system_libs);
-        man.hash.addOptionalBytes(options.sysroot);
-        man.hash.addListOfBytes(options.force_undefined_symbols.keys());
-        try man.addOptionalFile(options.entitlements);
+        try link.hashAddSystemLibs(&man, comp.system_libs);
+        man.hash.addOptionalBytes(comp.sysroot);
+        man.hash.addListOfBytes(macho_file.base.force_undefined_symbols.keys());
+        try man.addOptionalFile(macho_file.entitlements);
 
         // We don't actually care whether it's a cache hit or miss; we just
         // need the digest and the lock.
@@ -125,13 +126,13 @@ pub fn linkWithZld(
         };
     }
 
-    if (options.output_mode == .Obj) {
+    if (output_mode == .Obj) {
         // LLD's MachO driver does not support the equivalent of `-r` so we do a simple file copy
         // here. TODO: think carefully about how we can avoid this redundant operation when doing
         // build-obj. See also the corresponding TODO in linkAsArchive.
         const the_object_path = blk: {
-            if (options.objects.len != 0) {
-                break :blk options.objects[0].path;
+            if (objects.len != 0) {
+                break :blk objects[0].path;
             }
 
             if (comp.c_object_table.count() != 0)
@@ -150,7 +151,7 @@ pub fn linkWithZld(
             try fs.cwd().copyFile(the_object_path, fs.cwd(), full_out_path, .{});
         }
     } else {
-        const sub_path = options.emit.?.sub_path;
+        const sub_path = emit.?.sub_path;
 
         const old_file = macho_file.base.file; // TODO is this needed at all?
         defer macho_file.base.file = old_file;
@@ -158,7 +159,7 @@ pub fn linkWithZld(
         const file = try directory.handle.createFile(sub_path, .{
             .truncate = true,
             .read = true,
-            .mode = link.determineMode(options.*),
+            .mode = link.File.determineMode(false, output_mode, link_mode),
         });
         defer file.close();
         macho_file.base.file = file;
@@ -175,8 +176,8 @@ pub fn linkWithZld(
 
         // Positional arguments to the linker such as object files and static archives.
         var positionals = std.ArrayList(Compilation.LinkObject).init(arena);
-        try positionals.ensureUnusedCapacity(options.objects.len);
-        positionals.appendSliceAssumeCapacity(options.objects);
+        try positionals.ensureUnusedCapacity(objects.len);
+        positionals.appendSliceAssumeCapacity(objects);
 
         for (comp.c_object_table.keys()) |key| {
             try positionals.append(.{ .path = key.status.success.object_path });
@@ -190,7 +191,7 @@ pub fn linkWithZld(
         if (comp.compiler_rt_obj) |obj| try positionals.append(.{ .path = obj.full_object_path });
 
         // libc++ dep
-        if (options.link_libcpp) {
+        if (comp.config.link_libcpp) {
             try positionals.ensureUnusedCapacity(2);
             positionals.appendAssumeCapacity(.{ .path = comp.libcxxabi_static_lib.?.full_object_path });
             positionals.appendAssumeCapacity(.{ .path = comp.libcxx_static_lib.?.full_object_path });
@@ -199,23 +200,23 @@ pub fn linkWithZld(
         var libs = std.StringArrayHashMap(link.SystemLib).init(arena);
 
         {
-            const vals = options.system_libs.values();
+            const vals = comp.system_libs.values();
             try libs.ensureUnusedCapacity(vals.len);
             for (vals) |v| libs.putAssumeCapacity(v.path.?, v);
         }
 
         {
-            try libs.ensureUnusedCapacity(options.frameworks.len);
-            for (options.frameworks) |v| libs.putAssumeCapacity(v.path, .{
+            try libs.ensureUnusedCapacity(macho_file.frameworks.len);
+            for (macho_file.frameworks) |v| libs.putAssumeCapacity(v.path, .{
                 .needed = v.needed,
                 .weak = v.weak,
                 .path = v.path,
             });
         }
 
-        try macho_file.resolveLibSystem(arena, comp, options.lib_dirs, &libs);
+        try macho_file.resolveLibSystem(arena, comp, &libs);
 
-        if (options.verbose_link) {
+        if (comp.verbose_link) {
             var argv = std.ArrayList([]const u8).init(arena);
 
             try argv.append("zig");
@@ -228,14 +229,14 @@ pub fn linkWithZld(
             if (is_dyn_lib) {
                 try argv.append("-dylib");
 
-                if (options.install_name) |install_name| {
+                if (macho_file.install_name) |install_name| {
                     try argv.append("-install_name");
                     try argv.append(install_name);
                 }
             }
 
             {
-                const platform = Platform.fromTarget(options.target);
+                const platform = Platform.fromTarget(target);
                 try argv.append("-platform_version");
                 try argv.append(@tagName(platform.os_tag));
                 try argv.append(try std.fmt.allocPrint(arena, "{}", .{platform.version}));
@@ -248,44 +249,39 @@ pub fn linkWithZld(
                 }
             }
 
-            if (options.sysroot) |syslibroot| {
+            if (macho_file.sysroot) |syslibroot| {
                 try argv.append("-syslibroot");
                 try argv.append(syslibroot);
             }
 
-            for (options.rpath_list) |rpath| {
+            for (macho_file.rpath_list) |rpath| {
                 try argv.append("-rpath");
                 try argv.append(rpath);
             }
 
-            if (options.pagezero_size) |pagezero_size| {
-                try argv.append("-pagezero_size");
-                try argv.append(try std.fmt.allocPrint(arena, "0x{x}", .{pagezero_size}));
-            }
-
-            if (options.headerpad_size) |headerpad_size| {
-                try argv.append("-headerpad_size");
-                try argv.append(try std.fmt.allocPrint(arena, "0x{x}", .{headerpad_size}));
-            }
+            try argv.appendSlice(&.{
+                "-pagezero_size",  try std.fmt.allocPrint(arena, "0x{x}", .{macho_file.pagezero_size}),
+                "-headerpad_size", try std.fmt.allocPrint(arena, "0x{x}", .{macho_file.headerpad_size}),
+            });
 
-            if (options.headerpad_max_install_names) {
+            if (macho_file.headerpad_max_install_names) {
                 try argv.append("-headerpad_max_install_names");
             }
 
-            if (gc_sections) {
+            if (macho_file.base.gc_sections) {
                 try argv.append("-dead_strip");
             }
 
-            if (options.dead_strip_dylibs) {
+            if (macho_file.dead_strip_dylibs) {
                 try argv.append("-dead_strip_dylibs");
             }
 
-            if (options.entry) |entry| {
+            if (comp.config.entry) |entry| {
                 try argv.append("-e");
                 try argv.append(entry);
             }
 
-            for (options.objects) |obj| {
+            for (objects) |obj| {
                 if (obj.must_link) {
                     try argv.append("-force_load");
                 }
@@ -303,7 +299,7 @@ pub fn linkWithZld(
             if (comp.compiler_rt_lib) |lib| try argv.append(lib.full_object_path);
             if (comp.compiler_rt_obj) |obj| try argv.append(obj.full_object_path);
 
-            if (options.link_libcpp) {
+            if (comp.config.link_libcpp) {
                 try argv.append(comp.libcxxabi_static_lib.?.full_object_path);
                 try argv.append(comp.libcxx_static_lib.?.full_object_path);
             }
@@ -313,8 +309,8 @@ pub fn linkWithZld(
 
             try argv.append("-lSystem");
 
-            for (options.system_libs.keys()) |l_name| {
-                const info = options.system_libs.get(l_name).?;
+            for (comp.system_libs.keys()) |l_name| {
+                const info = comp.system_libs.get(l_name).?;
                 const arg = if (info.needed)
                     try std.fmt.allocPrint(arena, "-needed-l{s}", .{l_name})
                 else if (info.weak)
@@ -324,11 +320,7 @@ pub fn linkWithZld(
                 try argv.append(arg);
             }
 
-            for (options.lib_dirs) |lib_dir| {
-                try argv.append(try std.fmt.allocPrint(arena, "-L{s}", .{lib_dir}));
-            }
-
-            for (options.frameworks) |framework| {
+            for (macho_file.frameworks) |framework| {
                 const name = std.fs.path.stem(framework.path);
                 const arg = if (framework.needed)
                     try std.fmt.allocPrint(arena, "-needed_framework {s}", .{name})
@@ -339,11 +331,7 @@ pub fn linkWithZld(
                 try argv.append(arg);
             }
 
-            for (options.framework_dirs) |framework_dir| {
-                try argv.append(try std.fmt.allocPrint(arena, "-F{s}", .{framework_dir}));
-            }
-
-            if (is_dyn_lib and (options.allow_shlib_undefined orelse false)) {
+            if (is_dyn_lib and macho_file.base.allow_shlib_undefined) {
                 try argv.append("-undefined");
                 try argv.append("dynamic_lookup");
             }
@@ -412,7 +400,7 @@ pub fn linkWithZld(
             };
         }
 
-        if (gc_sections) {
+        if (macho_file.base.gc_sections) {
             try dead_strip.gcAtoms(macho_file);
         }
 
@@ -519,7 +507,7 @@ pub fn linkWithZld(
             // where the code signature goes into.
             var codesig = CodeSignature.init(MachO.getPageSize(cpu_arch));
             codesig.code_directory.ident = fs.path.basename(full_out_path);
-            if (options.entitlements) |path| {
+            if (macho_file.entitlements) |path| {
                 try codesig.addEntitlements(gpa, path);
             }
             try macho_file.writeCodeSignaturePadding(&codesig);
@@ -539,7 +527,7 @@ pub fn linkWithZld(
         try lc_writer.writeStruct(macho_file.dysymtab_cmd);
         try load_commands.writeDylinkerLC(lc_writer);
 
-        switch (macho_file.base.options.output_mode) {
+        switch (output_mode) {
             .Exe => blk: {
                 const seg_id = macho_file.header_segment_cmd_index.?;
                 const seg = macho_file.segments.items[seg_id];
@@ -555,10 +543,10 @@ pub fn linkWithZld(
 
                 try lc_writer.writeStruct(macho.entry_point_command{
                     .entryoff = @as(u32, @intCast(addr - seg.vmaddr)),
-                    .stacksize = macho_file.base.options.stack_size_override orelse 0,
+                    .stacksize = macho_file.base.stack_size,
                 });
             },
-            .Lib => if (macho_file.base.options.link_mode == .Dynamic) {
+            .Lib => if (link_mode == .Dynamic) {
                 try load_commands.writeDylibIdLC(gpa, &macho_file.base.options, lc_writer);
             },
             else => {},
@@ -598,11 +586,11 @@ pub fn linkWithZld(
 
         if (codesig) |*csig| {
             try macho_file.writeCodeSignature(comp, csig); // code signing always comes last
-            try MachO.invalidateKernelCache(directory.handle, macho_file.base.options.emit.?.sub_path);
+            try MachO.invalidateKernelCache(directory.handle, macho_file.base.emit.sub_path);
         }
     }
 
-    if (!options.disable_lld_caching) {
+    if (!macho_file.base.disable_lld_caching) {
         // Update the file with the digest. If it fails we can continue; it only
         // means that the next invocation will have an unnecessary cache miss.
         Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
@@ -622,12 +610,11 @@ pub fn linkWithZld(
 
 fn createSegments(macho_file: *MachO) !void {
     const gpa = macho_file.base.allocator;
-    const pagezero_vmsize = macho_file.base.options.pagezero_size orelse MachO.default_pagezero_vmsize;
     const page_size = MachO.getPageSize(macho_file.base.options.target.cpu.arch);
-    const aligned_pagezero_vmsize = mem.alignBackward(u64, pagezero_vmsize, page_size);
+    const aligned_pagezero_vmsize = mem.alignBackward(u64, macho_file.pagezero_vmsize, page_size);
     if (macho_file.base.options.output_mode != .Lib and aligned_pagezero_vmsize > 0) {
-        if (aligned_pagezero_vmsize != pagezero_vmsize) {
-            log.warn("requested __PAGEZERO size (0x{x}) is not page aligned", .{pagezero_vmsize});
+        if (aligned_pagezero_vmsize != macho_file.pagezero_vmsize) {
+            log.warn("requested __PAGEZERO size (0x{x}) is not page aligned", .{macho_file.pagezero_vmsize});
             log.warn("  rounding down to 0x{x}", .{aligned_pagezero_vmsize});
         }
         macho_file.pagezero_segment_cmd_index = @intCast(macho_file.segments.items.len);
src/link/C.zig
@@ -5,6 +5,7 @@ const Allocator = std.mem.Allocator;
 const fs = std.fs;
 
 const C = @This();
+const build_options = @import("build_options");
 const Module = @import("../Module.zig");
 const InternPool = @import("../InternPool.zig");
 const Alignment = InternPool.Alignment;
@@ -91,28 +92,40 @@ pub fn addString(this: *C, s: []const u8) Allocator.Error!String {
     };
 }
 
-pub fn openPath(gpa: Allocator, sub_path: []const u8, options: link.Options) !*C {
+pub fn open(arena: Allocator, options: link.File.OpenOptions) !*C {
     assert(options.target.ofmt == .c);
+    const optimize_mode = options.comp.root_mod.optimize_mode;
+    const use_lld = build_options.have_llvm and options.comp.config.use_lld;
+    const use_llvm = options.comp.config.use_llvm;
 
-    if (options.use_llvm) return error.LLVMHasNoCBackend;
-    if (options.use_lld) return error.LLDHasNoCBackend;
+    // These are caught by `Compilation.Config.resolve`.
+    assert(!use_lld);
+    assert(!use_llvm);
 
-    const file = try options.emit.?.directory.handle.createFile(sub_path, .{
+    const emit = options.emit;
+
+    const file = try emit.directory.handle.createFile(emit.sub_path, .{
         // Truncation is done on `flush`.
         .truncate = false,
         .mode = link.determineMode(options),
     });
     errdefer file.close();
 
-    const c_file = try gpa.create(C);
-    errdefer gpa.destroy(c_file);
+    const c_file = try arena.create(C);
 
     c_file.* = .{
         .base = .{
             .tag = .c,
-            .options = options,
+            .comp = options.comp,
+            .emit = emit,
+            .gc_sections = options.gc_sections orelse optimize_mode != .Debug,
+            .stack_size = options.stack_size orelse 16777216,
+            .allow_shlib_undefined = options.allow_shlib_undefined orelse false,
             .file = file,
-            .allocator = gpa,
+            .disable_lld_caching = options.disable_lld_caching,
+            .build_id = options.build_id,
+            .rpath_list = options.rpath_list,
+            .force_undefined_symbols = options.force_undefined_symbols,
         },
     };
 
@@ -120,7 +133,7 @@ pub fn openPath(gpa: Allocator, sub_path: []const u8, options: link.Options) !*C
 }
 
 pub fn deinit(self: *C) void {
-    const gpa = self.base.allocator;
+    const gpa = self.base.comp.gpa;
 
     for (self.decl_table.values()) |*db| {
         db.deinit(gpa);
@@ -141,7 +154,7 @@ pub fn deinit(self: *C) void {
 }
 
 pub fn freeDecl(self: *C, decl_index: InternPool.DeclIndex) void {
-    const gpa = self.base.allocator;
+    const gpa = self.base.comp.gpa;
     if (self.decl_table.fetchSwapRemove(decl_index)) |kv| {
         var decl_block = kv.value;
         decl_block.deinit(gpa);
@@ -155,7 +168,7 @@ pub fn updateFunc(
     air: Air,
     liveness: Liveness,
 ) !void {
-    const gpa = self.base.allocator;
+    const gpa = self.base.comp.gpa;
 
     const func = module.funcInfo(func_index);
     const decl_index = func.owner_decl;
@@ -223,7 +236,7 @@ pub fn updateFunc(
 }
 
 fn updateAnonDecl(self: *C, module: *Module, i: usize) !void {
-    const gpa = self.base.allocator;
+    const gpa = self.base.comp.gpa;
     const anon_decl = self.anon_decls.keys()[i];
 
     const fwd_decl = &self.fwd_decl_buf;
@@ -285,7 +298,7 @@ pub fn updateDecl(self: *C, module: *Module, decl_index: InternPool.DeclIndex) !
     const tracy = trace(@src());
     defer tracy.end();
 
-    const gpa = self.base.allocator;
+    const gpa = self.base.comp.gpa;
 
     const gop = try self.decl_table.getOrPut(gpa, decl_index);
     if (!gop.found_existing) {
@@ -352,7 +365,8 @@ pub fn flush(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) !void
 }
 
 fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) {
-    var defines = std.ArrayList(u8).init(self.base.allocator);
+    const gpa = self.base.comp.gpa;
+    var defines = std.ArrayList(u8).init(gpa);
     errdefer defines.deinit();
     const writer = defines.writer();
     switch (target.abi) {
@@ -371,7 +385,7 @@ pub fn flushModule(self: *C, _: *Compilation, prog_node: *std.Progress.Node) !vo
     sub_prog_node.activate();
     defer sub_prog_node.end();
 
-    const gpa = self.base.allocator;
+    const gpa = self.base.comp.gpa;
     const module = self.base.options.module.?;
 
     {
@@ -520,7 +534,7 @@ fn flushCTypes(
     pass: codegen.DeclGen.Pass,
     decl_ctypes: codegen.CType.Store,
 ) FlushDeclError!void {
-    const gpa = self.base.allocator;
+    const gpa = self.base.comp.gpa;
     const mod = self.base.options.module.?;
 
     const decl_ctypes_len = decl_ctypes.count();
@@ -601,7 +615,7 @@ fn flushCTypes(
 }
 
 fn flushErrDecls(self: *C, ctypes: *codegen.CType.Store) FlushDeclError!void {
-    const gpa = self.base.allocator;
+    const gpa = self.base.comp.gpa;
 
     const fwd_decl = &self.lazy_fwd_decl_buf;
     const code = &self.lazy_code_buf;
@@ -643,7 +657,7 @@ fn flushLazyFn(
     ctypes: *codegen.CType.Store,
     lazy_fn: codegen.LazyFnMap.Entry,
 ) FlushDeclError!void {
-    const gpa = self.base.allocator;
+    const gpa = self.base.comp.gpa;
 
     const fwd_decl = &self.lazy_fwd_decl_buf;
     const code = &self.lazy_code_buf;
@@ -683,7 +697,7 @@ fn flushLazyFn(
 }
 
 fn flushLazyFns(self: *C, f: *Flush, lazy_fns: codegen.LazyFnMap) FlushDeclError!void {
-    const gpa = self.base.allocator;
+    const gpa = self.base.comp.gpa;
     try f.lazy_fns.ensureUnusedCapacity(gpa, @intCast(lazy_fns.count()));
 
     var it = lazy_fns.iterator();
@@ -702,7 +716,7 @@ fn flushDeclBlock(
     export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void),
     extern_symbol_name: InternPool.OptionalNullTerminatedString,
 ) FlushDeclError!void {
-    const gpa = self.base.allocator;
+    const gpa = self.base.comp.gpa;
     try self.flushLazyFns(f, decl_block.lazy_fns);
     try f.all_buffers.ensureUnusedCapacity(gpa, 1);
     fwd_decl: {
src/link/Coff.zig
@@ -232,7 +232,7 @@ pub fn open(arena: Allocator, options: link.File.OpenOptions) !*Coff {
     errdefer self.base.destroy();
 
     const use_lld = build_options.have_llvm and options.comp.config.use_lld;
-    const use_llvm = build_options.have_llvm and options.comp.config.use_llvm;
+    const use_llvm = options.comp.config.use_llvm;
 
     if (use_lld and use_llvm) {
         // LLVM emits the object file; LLD links it into the final product.
@@ -353,6 +353,7 @@ pub fn open(arena: Allocator, options: link.File.OpenOptions) !*Coff {
 
 pub fn createEmpty(arena: Allocator, options: link.File.OpenOptions) !*Coff {
     const target = options.comp.root_mod.resolved_target.result;
+    const optimize_mode = options.comp.root_mod.optimize_mode;
     const ptr_width: PtrWidth = switch (target.ptrBitWidth()) {
         0...32 => .p32,
         33...64 => .p64,
@@ -367,14 +368,24 @@ pub fn createEmpty(arena: Allocator, options: link.File.OpenOptions) !*Coff {
             .tag = .coff,
             .comp = options.comp,
             .emit = options.emit,
+            .stack_size = options.stack_size orelse 16777216,
+            .gc_sections = options.gc_sections orelse (optimize_mode != .Debug),
+            .allow_shlib_undefined = options.allow_shlib_undefined orelse false,
             .file = null,
+            .disable_lld_caching = options.disable_lld_caching,
+            .build_id = options.build_id,
+            .rpath_list = options.rpath_list,
+            .force_undefined_symbols = options.force_undefined_symbols,
+            .debug_format = options.debug_format orelse .code_view,
+            .function_sections = options.function_sections,
+            .data_sections = options.data_sections,
         },
         .ptr_width = ptr_width,
         .page_size = page_size,
         .data_directories = comptime mem.zeroes([coff.IMAGE_NUMBEROF_DIRECTORY_ENTRIES]coff.ImageDataDirectory),
     };
 
-    const use_llvm = build_options.have_llvm and options.comp.config.use_llvm;
+    const use_llvm = options.comp.config.use_llvm;
     if (use_llvm and options.comp.config.have_zcu) {
         self.llvm_object = try LlvmObject.create(arena, options);
     }
@@ -1494,8 +1505,6 @@ pub fn updateExports(
 
     if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, exports);
 
-    if (self.base.options.emit == null) return;
-
     const gpa = self.base.comp.gpa;
 
     const metadata = switch (exported) {
@@ -1645,13 +1654,7 @@ fn resolveGlobalSymbol(self: *Coff, current: SymbolWithLoc) !void {
 }
 
 pub fn flush(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Node) link.File.FlushError!void {
-    if (self.base.options.emit == null) {
-        if (self.llvm_object) |llvm_object| {
-            return try llvm_object.flushModule(comp, prog_node);
-        }
-        return;
-    }
-    const use_lld = build_options.have_llvm and self.base.options.use_lld;
+    const use_lld = build_options.have_llvm and self.base.comp.config.use_lld;
     if (use_lld) {
         return lld.linkWithLLD(self, comp, prog_node);
     }
src/link/Elf.zig
@@ -206,7 +206,10 @@ pub fn open(arena: Allocator, options: link.File.OpenOptions) !*Elf {
     assert(target.ofmt == .elf);
 
     const use_lld = build_options.have_llvm and options.comp.config.use_lld;
-    const use_llvm = build_options.have_llvm and options.comp.config.use_llvm;
+    const use_llvm = options.comp.config.use_llvm;
+    const opt_zcu = options.comp.module;
+    const output_mode = options.comp.config.output_mode;
+    const link_mode = options.comp.config.link_mode;
 
     const self = try createEmpty(arena, options);
     errdefer self.base.destroy();
@@ -216,8 +219,8 @@ pub fn open(arena: Allocator, options: link.File.OpenOptions) !*Elf {
         return self;
     }
 
-    const is_obj = options.output_mode == .Obj;
-    const is_obj_or_ar = is_obj or (options.output_mode == .Lib and options.link_mode == .Static);
+    const is_obj = output_mode == .Obj;
+    const is_obj_or_ar = is_obj or (output_mode == .Lib and link_mode == .Static);
 
     const sub_path = if (!use_lld) options.emit.sub_path else p: {
         // Open a temporary object file, not the final output file because we
@@ -229,10 +232,10 @@ pub fn open(arena: Allocator, options: link.File.OpenOptions) !*Elf {
         break :p o_file_path;
     };
 
-    self.base.file = try options.emit.?.directory.handle.createFile(sub_path, .{
+    self.base.file = try options.emit.directory.handle.createFile(sub_path, .{
         .truncate = false,
         .read = true,
-        .mode = link.determineMode(options),
+        .mode = link.File.determineMode(use_lld, output_mode, link_mode),
     });
 
     const gpa = options.comp.gpa;
@@ -292,24 +295,34 @@ pub fn open(arena: Allocator, options: link.File.OpenOptions) !*Elf {
         });
     }
 
-    if (options.module != null and !options.use_llvm) {
-        const index = @as(File.Index, @intCast(try self.files.addOne(gpa)));
-        self.files.set(index, .{ .zig_object = .{
-            .index = index,
-            .path = try std.fmt.allocPrint(arena, "{s}.o", .{std.fs.path.stem(
-                options.module.?.main_mod.root_src_path,
-            )}),
-        } });
-        self.zig_object_index = index;
-        try self.zigObjectPtr().?.init(self);
-        try self.initMetadata();
+    if (opt_zcu) |zcu| {
+        if (!use_llvm) {
+            const index: File.Index = @intCast(try self.files.addOne(gpa));
+            self.files.set(index, .{ .zig_object = .{
+                .index = index,
+                .path = try std.fmt.allocPrint(arena, "{s}.o", .{std.fs.path.stem(
+                    zcu.main_mod.root_src_path,
+                )}),
+            } });
+            self.zig_object_index = index;
+            try self.zigObjectPtr().?.init(self);
+            try self.initMetadata(.{
+                .symbol_count_hint = options.symbol_count_hint,
+                .program_code_size_hint = options.program_code_size_hint,
+            });
+        }
     }
 
     return self;
 }
 
 pub fn createEmpty(arena: Allocator, options: link.File.OpenOptions) !*Elf {
+    const use_llvm = options.comp.config.use_llvm;
+    const optimize_mode = options.comp.root_mod.optimize_mode;
     const target = options.comp.root_mod.resolved_target.result;
+    const output_mode = options.comp.config.output_mode;
+    const link_mode = options.comp.config.link_mode;
+    const is_native_os = options.comp.root_mod.resolved_target.is_native_os;
     const ptr_width: PtrWidth = switch (target.ptrBitWidth()) {
         0...32 => .p32,
         33...64 => .p64,
@@ -322,7 +335,7 @@ pub fn createEmpty(arena: Allocator, options: link.File.OpenOptions) !*Elf {
         .sparc64 => 0x2000,
         else => 0x1000,
     };
-    const is_dyn_lib = options.output_mode == .Lib and options.link_mode == .Dynamic;
+    const is_dyn_lib = output_mode == .Lib and link_mode == .Dynamic;
     const default_sym_version: elf.Elf64_Versym = if (is_dyn_lib or options.rdynamic)
         elf.VER_NDX_GLOBAL
     else
@@ -333,13 +346,23 @@ pub fn createEmpty(arena: Allocator, options: link.File.OpenOptions) !*Elf {
             .tag = .elf,
             .comp = options.comp,
             .emit = options.emit,
+            .gc_sections = options.gc_sections orelse (optimize_mode != .Debug and output_mode != .Obj),
+            .stack_size = options.stack_size orelse 16777216,
+            .allow_shlib_undefined = options.allow_shlib_undefined orelse !is_native_os,
             .file = null,
+            .disable_lld_caching = options.disable_lld_caching,
+            .build_id = options.build_id,
+            .rpath_list = options.rpath_list,
+            .force_undefined_symbols = options.force_undefined_symbols,
+            .debug_format = options.debug_format orelse .{ .dwarf = .@"32" },
+            .function_sections = options.function_sections,
+            .data_sections = options.data_sections,
         },
         .ptr_width = ptr_width,
         .page_size = page_size,
         .default_sym_version = default_sym_version,
     };
-    if (options.use_llvm and options.comp.config.have_zcu) {
+    if (use_llvm and options.comp.config.have_zcu) {
         self.llvm_object = try LlvmObject.create(arena, options);
     }
 
@@ -504,8 +527,13 @@ fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u64) u64 {
     return start;
 }
 
+pub const InitMetadataOptions = struct {
+    symbol_count_hint: u64,
+    program_code_size_hint: u64,
+};
+
 /// TODO move to ZigObject
-pub fn initMetadata(self: *Elf) !void {
+pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
     const gpa = self.base.comp.gpa;
     const ptr_size = self.ptrWidthBytes();
     const target = self.base.comp.root_mod.resolved_target.result;
@@ -515,7 +543,7 @@ pub fn initMetadata(self: *Elf) !void {
 
     const fillSection = struct {
         fn fillSection(elf_file: *Elf, shdr: *elf.Elf64_Shdr, size: u64, phndx: ?u16) void {
-            if (elf_file.isRelocatable()) {
+            if (elf_file.base.isRelocatable()) {
                 const off = elf_file.findFreeSpace(size, shdr.sh_addralign);
                 shdr.sh_offset = off;
                 shdr.sh_size = size;
@@ -530,9 +558,9 @@ pub fn initMetadata(self: *Elf) !void {
 
     comptime assert(number_of_zig_segments == 5);
 
-    if (!self.isRelocatable()) {
+    if (!self.base.isRelocatable()) {
         if (self.phdr_zig_load_re_index == null) {
-            const filesz = self.base.options.program_code_size_hint;
+            const filesz = options.program_code_size_hint;
             const off = self.findFreeSpace(filesz, self.page_size);
             self.phdr_zig_load_re_index = try self.addPhdr(.{
                 .type = elf.PT_LOAD,
@@ -549,7 +577,7 @@ pub fn initMetadata(self: *Elf) !void {
             // We really only need ptr alignment but since we are using PROGBITS, linux requires
             // page align.
             const alignment = if (is_linux) self.page_size else @as(u16, ptr_size);
-            const filesz = @as(u64, ptr_size) * self.base.options.symbol_count_hint;
+            const filesz = @as(u64, ptr_size) * options.symbol_count_hint;
             const off = self.findFreeSpace(filesz, alignment);
             self.phdr_zig_got_index = try self.addPhdr(.{
                 .type = elf.PT_LOAD,
@@ -613,8 +641,8 @@ pub fn initMetadata(self: *Elf) !void {
             .offset = std.math.maxInt(u64),
         });
         const shdr = &self.shdrs.items[self.zig_text_section_index.?];
-        fillSection(self, shdr, self.base.options.program_code_size_hint, self.phdr_zig_load_re_index);
-        if (self.isRelocatable()) {
+        fillSection(self, shdr, options.program_code_size_hint, self.phdr_zig_load_re_index);
+        if (self.base.isRelocatable()) {
             const rela_shndx = try self.addRelaShdr(".rela.text.zig", self.zig_text_section_index.?);
             try self.output_rela_sections.putNoClobber(gpa, self.zig_text_section_index.?, .{
                 .shndx = rela_shndx,
@@ -630,7 +658,7 @@ pub fn initMetadata(self: *Elf) !void {
         try self.last_atom_and_free_list_table.putNoClobber(gpa, self.zig_text_section_index.?, .{});
     }
 
-    if (self.zig_got_section_index == null and !self.isRelocatable()) {
+    if (self.zig_got_section_index == null and !self.base.isRelocatable()) {
         self.zig_got_section_index = try self.addSection(.{
             .name = ".got.zig",
             .type = elf.SHT_PROGBITS,
@@ -661,7 +689,7 @@ pub fn initMetadata(self: *Elf) !void {
         });
         const shdr = &self.shdrs.items[self.zig_data_rel_ro_section_index.?];
         fillSection(self, shdr, 1024, self.phdr_zig_load_ro_index);
-        if (self.isRelocatable()) {
+        if (self.base.isRelocatable()) {
             const rela_shndx = try self.addRelaShdr(
                 ".rela.data.rel.ro.zig",
                 self.zig_data_rel_ro_section_index.?,
@@ -690,7 +718,7 @@ pub fn initMetadata(self: *Elf) !void {
         });
         const shdr = &self.shdrs.items[self.zig_data_section_index.?];
         fillSection(self, shdr, 1024, self.phdr_zig_load_rw_index);
-        if (self.isRelocatable()) {
+        if (self.base.isRelocatable()) {
             const rela_shndx = try self.addRelaShdr(
                 ".rela.data.zig",
                 self.zig_data_section_index.?,
@@ -930,13 +958,7 @@ pub fn markDirty(self: *Elf, shdr_index: u16) void {
 }
 
 pub fn flush(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) link.File.FlushError!void {
-    if (self.base.options.emit == null) {
-        if (self.llvm_object) |llvm_object| {
-            try llvm_object.flushModule(comp, prog_node);
-        }
-        return;
-    }
-    const use_lld = build_options.have_llvm and self.base.options.use_lld;
+    const use_lld = build_options.have_llvm and self.base.comp.config.use_lld;
     if (use_lld) {
         return self.linkWithLLD(comp, prog_node);
     }
@@ -950,7 +972,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
     if (self.llvm_object) |llvm_object| {
         try llvm_object.flushModule(comp, prog_node);
 
-        const use_lld = build_options.have_llvm and self.base.options.use_lld;
+        const use_lld = build_options.have_llvm and self.base.comp.config.use_lld;
         if (use_lld) return;
     }
 
@@ -959,13 +981,14 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
     sub_prog_node.activate();
     defer sub_prog_node.end();
 
-    var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
+    var arena_allocator = std.heap.ArenaAllocator.init(gpa);
     defer arena_allocator.deinit();
     const arena = arena_allocator.allocator();
 
     const target = self.base.comp.root_mod.resolved_target.result;
-    const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
-    const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path});
+    const link_mode = self.base.comp.config.link_mode;
+    const directory = self.base.emit.directory; // Just an alias to make it shorter to type.
+    const full_out_path = try directory.join(arena, &[_][]const u8{self.base.emit.sub_path});
     const module_obj_path: ?[]const u8 = if (self.base.intermediary_basename) |path| blk: {
         if (fs.path.dirname(full_out_path)) |dirname| {
             break :blk try fs.path.join(arena, &.{ dirname, path });
@@ -973,10 +996,10 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
             break :blk path;
         }
     } else null;
-    const gc_sections = self.base.options.gc_sections orelse false;
+    const gc_sections = self.base.gc_sections;
 
     // --verbose-link
-    if (self.base.options.verbose_link) try self.dumpArgv(comp);
+    if (self.base.comp.verbose_link) try self.dumpArgv(comp);
 
     const csu = try CsuObjects.init(arena, self.base.options, comp);
     const compiler_rt_path: ?[]const u8 = blk: {
@@ -986,8 +1009,8 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
     };
 
     if (self.zigObjectPtr()) |zig_object| try zig_object.flushModule(self);
-    if (self.isStaticLib()) return self.flushStaticLib(comp, module_obj_path);
-    if (self.isObject()) return self.flushObject(comp, module_obj_path);
+    if (self.base.isStaticLib()) return self.flushStaticLib(comp, module_obj_path);
+    if (self.base.isObject()) return self.flushObject(comp, module_obj_path);
 
     // Here we will parse input positional and library files (if referenced).
     // This will roughly match in any linker backend we support.
@@ -1011,7 +1034,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
     if (module_obj_path) |path| try positionals.append(.{ .path = path });
 
     // rpaths
-    var rpath_table = std.StringArrayHashMap(void).init(self.base.allocator);
+    var rpath_table = std.StringArrayHashMap(void).init(gpa);
     defer rpath_table.deinit();
 
     for (self.base.options.rpath_list) |rpath| {
@@ -1019,10 +1042,10 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
     }
 
     if (self.base.options.each_lib_rpath) {
-        var test_path = std.ArrayList(u8).init(self.base.allocator);
+        var test_path = std.ArrayList(u8).init(gpa);
         defer test_path.deinit();
         for (self.base.options.lib_dirs) |lib_dir_path| {
-            for (self.base.options.system_libs.keys()) |link_lib| {
+            for (self.base.comp.system_libs.keys()) |link_lib| {
                 if (!(try self.accessLibPath(&test_path, null, lib_dir_path, link_lib, .Dynamic)))
                     continue;
                 _ = try rpath_table.put(lib_dir_path, {});
@@ -1064,8 +1087,8 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
 
     var system_libs = std.ArrayList(SystemLib).init(arena);
 
-    try system_libs.ensureUnusedCapacity(self.base.options.system_libs.values().len);
-    for (self.base.options.system_libs.values()) |lib_info| {
+    try system_libs.ensureUnusedCapacity(self.base.comp.system_libs.values().len);
+    for (self.base.comp.system_libs.values()) |lib_info| {
         system_libs.appendAssumeCapacity(.{ .needed = lib_info.needed, .path = lib_info.path.? });
     }
 
@@ -1127,7 +1150,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
                 .path = try comp.get_libc_crt_file(arena, "libc_nonshared.a"),
             });
         } else if (target.isMusl()) {
-            const path = try comp.get_libc_crt_file(arena, switch (self.base.options.link_mode) {
+            const path = try comp.get_libc_crt_file(arena, switch (link_mode) {
                 .Static => "libc.a",
                 .Dynamic => "libc.so",
             });
@@ -1224,7 +1247,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
     if (self.entry_index == null) {
         const entry: ?[]const u8 = entry: {
             if (self.base.options.entry) |entry| break :entry entry;
-            if (!self.isDynLib()) break :entry "_start";
+            if (!self.base.isDynLib()) break :entry "_start";
             break :entry null;
         };
         self.entry_index = if (entry) |name| self.globalByName(name) else null;
@@ -1301,7 +1324,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
     try self.writeAtoms();
     try self.writeSyntheticSections();
 
-    if (self.entry_index == null and self.isExe()) {
+    if (self.entry_index == null and self.base.isExe()) {
         log.debug("flushing. no_entry_point_found = true", .{});
         self.error_flags.no_entry_point_found = true;
     } else {
@@ -1531,13 +1554,15 @@ pub fn flushObject(self: *Elf, comp: *Compilation, module_obj_path: ?[]const u8)
 
 /// --verbose-link output
 fn dumpArgv(self: *Elf, comp: *Compilation) !void {
-    var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
+    const gpa = self.base.comp.gpa;
+    var arena_allocator = std.heap.ArenaAllocator.init(gpa);
     defer arena_allocator.deinit();
     const arena = arena_allocator.allocator();
 
     const target = self.base.comp.root_mod.resolved_target.result;
-    const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
-    const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path});
+    const link_mode = self.base.comp.config.link_mode;
+    const directory = self.base.emit.directory; // Just an alias to make it shorter to type.
+    const full_out_path = try directory.join(arena, &[_][]const u8{self.base.emit.sub_path});
     const module_obj_path: ?[]const u8 = if (self.base.intermediary_basename) |path| blk: {
         if (fs.path.dirname(full_out_path)) |dirname| {
             break :blk try fs.path.join(arena, &.{ dirname, path });
@@ -1545,7 +1570,6 @@ fn dumpArgv(self: *Elf, comp: *Compilation) !void {
             break :blk path;
         }
     } else null;
-    const gc_sections = self.base.options.gc_sections orelse false;
 
     const csu = try CsuObjects.init(arena, self.base.options, comp);
     const compiler_rt_path: ?[]const u8 = blk: {
@@ -1558,20 +1582,20 @@ fn dumpArgv(self: *Elf, comp: *Compilation) !void {
 
     try argv.append("zig");
 
-    if (self.isStaticLib()) {
+    if (self.base.isStaticLib()) {
         try argv.append("ar");
     } else {
         try argv.append("ld");
     }
 
-    if (self.isObject()) {
+    if (self.base.isObject()) {
         try argv.append("-r");
     }
 
     try argv.append("-o");
     try argv.append(full_out_path);
 
-    if (self.isRelocatable()) {
+    if (self.base.isRelocatable()) {
         for (self.base.options.objects) |obj| {
             try argv.append(obj.path);
         }
@@ -1591,7 +1615,7 @@ fn dumpArgv(self: *Elf, comp: *Compilation) !void {
             }
         }
 
-        if (self.isDynLib()) {
+        if (self.base.isDynLib()) {
             if (self.base.options.soname) |name| {
                 try argv.append("-soname");
                 try argv.append(name);
@@ -1624,16 +1648,16 @@ fn dumpArgv(self: *Elf, comp: *Compilation) !void {
             }
         }
 
-        if (self.base.options.stack_size_override) |ss| {
-            try argv.append("-z");
-            try argv.append(try std.fmt.allocPrint(arena, "stack-size={d}", .{ss}));
-        }
+        try argv.appendSlice(&.{
+            "-z",
+            try std.fmt.allocPrint(arena, "stack-size={d}", .{self.base.stack_size}),
+        });
 
         if (self.base.options.image_base_override) |image_base| {
             try argv.append(try std.fmt.allocPrint(arena, "--image-base={d}", .{image_base}));
         }
 
-        if (gc_sections) {
+        if (self.base.gc_sections) {
             try argv.append("--gc-sections");
         }
 
@@ -1666,11 +1690,11 @@ fn dumpArgv(self: *Elf, comp: *Compilation) !void {
 
         if (self.isStatic()) {
             try argv.append("-static");
-        } else if (self.isDynLib()) {
+        } else if (self.base.isDynLib()) {
             try argv.append("-shared");
         }
 
-        if (self.base.options.pie and self.isExe()) {
+        if (self.base.options.pie and self.base.isExe()) {
             try argv.append("-pie");
         }
 
@@ -1741,11 +1765,11 @@ fn dumpArgv(self: *Elf, comp: *Compilation) !void {
         // Shared libraries.
         // Worst-case, we need an --as-needed argument for every lib, as well
         // as one before and one after.
-        try argv.ensureUnusedCapacity(self.base.options.system_libs.keys().len * 2 + 2);
+        try argv.ensureUnusedCapacity(self.base.comp.system_libs.keys().len * 2 + 2);
         argv.appendAssumeCapacity("--as-needed");
         var as_needed = true;
 
-        for (self.base.options.system_libs.values()) |lib_info| {
+        for (self.base.comp.system_libs.values()) |lib_info| {
             const lib_as_needed = !lib_info.needed;
             switch ((@as(u2, @intFromBool(lib_as_needed)) << 1) | @intFromBool(as_needed)) {
                 0b00, 0b11 => {},
@@ -1780,7 +1804,7 @@ fn dumpArgv(self: *Elf, comp: *Compilation) !void {
         // libc dep
         if (self.base.options.link_libc) {
             if (self.base.options.libc_installation != null) {
-                const needs_grouping = self.base.options.link_mode == .Static;
+                const needs_grouping = link_mode == .Static;
                 if (needs_grouping) try argv.append("--start-group");
                 try argv.appendSlice(target_util.libcFullLinkFlags(target));
                 if (needs_grouping) try argv.append("--end-group");
@@ -1793,7 +1817,7 @@ fn dumpArgv(self: *Elf, comp: *Compilation) !void {
                 }
                 try argv.append(try comp.get_libc_crt_file(arena, "libc_nonshared.a"));
             } else if (target.isMusl()) {
-                try argv.append(try comp.get_libc_crt_file(arena, switch (self.base.options.link_mode) {
+                try argv.append(try comp.get_libc_crt_file(arena, switch (link_mode) {
                     .Static => "libc.a",
                     .Dynamic => "libc.so",
                 }));
@@ -2006,6 +2030,7 @@ fn accessLibPath(
     lib_name: []const u8,
     link_mode: ?std.builtin.LinkMode,
 ) !bool {
+    const gpa = self.base.comp.gpa;
     const sep = fs.path.sep_str;
     const target = self.base.comp.root_mod.resolved_target.result;
     test_path.clearRetainingCapacity();
@@ -2021,7 +2046,7 @@ fn accessLibPath(
         suffix,
     });
     if (checked_paths) |cpaths| {
-        try cpaths.append(try self.base.allocator.dupe(u8, test_path.items));
+        try cpaths.append(try gpa.dupe(u8, test_path.items));
     }
     fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
         error.FileNotFound => return false,
@@ -2150,7 +2175,7 @@ fn markImportsExports(self: *Elf) void {
                 }
                 if (file_ptr.index() == file_index) {
                     global.flags.@"export" = true;
-                    if (elf_file.isDynLib() and vis != .PROTECTED) {
+                    if (elf_file.base.isDynLib() and vis != .PROTECTED) {
                         global.flags.import = true;
                     }
                 }
@@ -2158,7 +2183,7 @@ fn markImportsExports(self: *Elf) void {
         }
     }.mark;
 
-    if (!self.isDynLib()) {
+    if (!self.base.isDynLib()) {
         for (self.shared_objects.items) |index| {
             for (self.file(index).?.globals()) |global_index| {
                 const global = self.symbol(global_index);
@@ -2274,12 +2299,13 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
     const tracy = trace(@src());
     defer tracy.end();
 
-    var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
+    const gpa = self.base.comp.gpa;
+    var arena_allocator = std.heap.ArenaAllocator.init(gpa);
     defer arena_allocator.deinit();
     const arena = arena_allocator.allocator();
 
-    const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
-    const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path});
+    const directory = self.base.emit.directory; // Just an alias to make it shorter to type.
+    const full_out_path = try directory.join(arena, &[_][]const u8{self.base.emit.sub_path});
 
     // If there is no Zig code to compile, then we should skip flushing the output file because it
     // will not be part of the linker line anyway.
@@ -2298,16 +2324,15 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
     sub_prog_node.context.refresh();
     defer sub_prog_node.end();
 
-    const is_obj = self.base.options.output_mode == .Obj;
-    const is_lib = self.base.options.output_mode == .Lib;
-    const is_dyn_lib = self.base.options.link_mode == .Dynamic and is_lib;
-    const is_exe_or_dyn_lib = is_dyn_lib or self.base.options.output_mode == .Exe;
+    const output_mode = self.base.comp.config.output_mode;
+    const is_obj = output_mode == .Obj;
+    const is_lib = output_mode == .Lib;
+    const link_mode = self.base.comp.config.link_mode;
+    const is_dyn_lib = link_mode == .Dynamic and is_lib;
+    const is_exe_or_dyn_lib = is_dyn_lib or output_mode == .Exe;
     const have_dynamic_linker = self.base.options.link_libc and
-        self.base.options.link_mode == .Dynamic and is_exe_or_dyn_lib;
+        link_mode == .Dynamic and is_exe_or_dyn_lib;
     const target = self.base.comp.root_mod.resolved_target.result;
-    const gc_sections = self.base.options.gc_sections orelse !is_obj;
-    const stack_size = self.base.options.stack_size_override orelse 16777216;
-    const allow_shlib_undefined = self.base.options.allow_shlib_undefined orelse !self.base.options.is_native_os;
     const compiler_rt_path: ?[]const u8 = blk: {
         if (comp.compiler_rt_lib) |x| break :blk x.full_object_path;
         if (comp.compiler_rt_obj) |x| break :blk x.full_object_path;
@@ -2354,7 +2379,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
         // installation sources because they are always a product of the compiler version + target information.
         man.hash.addOptionalBytes(self.base.options.entry);
         man.hash.addOptional(self.base.options.image_base_override);
-        man.hash.add(gc_sections);
+        man.hash.add(self.base.gc_sections);
         man.hash.addOptional(self.base.options.sort_section);
         man.hash.add(self.base.options.eh_frame_hdr);
         man.hash.add(self.base.options.emit_relocs);
@@ -2362,9 +2387,9 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
         man.hash.addListOfBytes(self.base.options.lib_dirs);
         man.hash.addListOfBytes(self.base.options.rpath_list);
         man.hash.add(self.base.options.each_lib_rpath);
-        if (self.base.options.output_mode == .Exe) {
-            man.hash.add(stack_size);
-            man.hash.add(self.base.options.build_id);
+        if (output_mode == .Exe) {
+            man.hash.add(self.base.stack_size);
+            man.hash.add(self.base.build_id);
         }
         man.hash.addListOfBytes(self.base.options.symbol_wrap_set.keys());
         man.hash.add(self.base.options.skip_linker_dependencies);
@@ -2390,9 +2415,9 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
         }
         man.hash.addOptionalBytes(self.base.options.soname);
         man.hash.addOptional(self.base.options.version);
-        try link.hashAddSystemLibs(&man, self.base.options.system_libs);
+        try link.hashAddSystemLibs(&man, self.base.comp.system_libs);
         man.hash.addListOfBytes(self.base.options.force_undefined_symbols.keys());
-        man.hash.add(allow_shlib_undefined);
+        man.hash.add(self.base.allow_shlib_undefined);
         man.hash.add(self.base.options.bind_global_refs_locally);
         man.hash.add(self.base.options.compress_debug_sections);
         man.hash.add(self.base.options.tsan);
@@ -2432,7 +2457,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
     // copy when generating relocatables. Normally, we would expect `lld -r` to work.
     // However, because LLD wants to resolve BPF relocations which it shouldn't, it fails
     // before even generating the relocatable.
-    if (self.base.options.output_mode == .Obj and
+    if (output_mode == .Obj and
         (self.base.options.lto or target.isBpfFreestanding()))
     {
         // In this case we must do a simple file copy
@@ -2459,7 +2484,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
         }
     } else {
         // Create an LLD command line and invoke it.
-        var argv = std.ArrayList([]const u8).init(self.base.allocator);
+        var argv = std.ArrayList([]const u8).init(gpa);
         defer argv.deinit();
         // We will invoke ourselves as a child process to gain access to LLD.
         // This is necessary because LLD does not behave properly as a library -
@@ -2503,15 +2528,17 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
             .both => {}, // this is the default
         }
 
-        if (self.base.options.output_mode == .Exe) {
-            try argv.append("-z");
-            try argv.append(try std.fmt.allocPrint(arena, "stack-size={d}", .{stack_size}));
+        if (output_mode == .Exe) {
+            try argv.appendSlice(&.{
+                "-z",
+                try std.fmt.allocPrint(arena, "stack-size={d}", .{self.base.stack_size}),
+            });
 
-            switch (self.base.options.build_id) {
+            switch (self.base.build_id) {
                 .none => {},
                 .fast, .uuid, .sha1, .md5 => {
                     try argv.append(try std.fmt.allocPrint(arena, "--build-id={s}", .{
-                        @tagName(self.base.options.build_id),
+                        @tagName(self.base.build_id),
                     }));
                 },
                 .hexstring => |hs| {
@@ -2536,7 +2563,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
             try argv.append(arg);
         }
 
-        if (gc_sections) {
+        if (self.base.gc_sections) {
             try argv.append("--gc-sections");
         }
 
@@ -2615,7 +2642,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
             try argv.append(arg);
         }
 
-        if (self.base.options.link_mode == .Static) {
+        if (link_mode == .Static) {
             if (target.cpu.arch.isArmOrThumb()) {
                 try argv.append("-Bstatic");
             } else {
@@ -2625,7 +2652,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
             try argv.append("-shared");
         }
 
-        if (self.base.options.pie and self.base.options.output_mode == .Exe) {
+        if (self.base.options.pie and output_mode == .Exe) {
             try argv.append("-pie");
         }
 
@@ -2648,7 +2675,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
         if (csu.crtbegin) |v| try argv.append(v);
 
         // rpaths
-        var rpath_table = std.StringHashMap(void).init(self.base.allocator);
+        var rpath_table = std.StringHashMap(void).init(gpa);
         defer rpath_table.deinit();
         for (self.base.options.rpath_list) |rpath| {
             if ((try rpath_table.fetchPut(rpath, {})) == null) {
@@ -2664,7 +2691,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
         if (self.base.options.each_lib_rpath) {
             var test_path = std.ArrayList(u8).init(arena);
             for (self.base.options.lib_dirs) |lib_dir_path| {
-                for (self.base.options.system_libs.keys()) |link_lib| {
+                for (self.base.comp.system_libs.keys()) |link_lib| {
                     if (!(try self.accessLibPath(&test_path, null, lib_dir_path, link_lib, .Dynamic)))
                         continue;
                     if ((try rpath_table.fetchPut(lib_dir_path, {})) == null) {
@@ -2763,8 +2790,8 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
 
         // Shared libraries.
         if (is_exe_or_dyn_lib) {
-            const system_libs = self.base.options.system_libs.keys();
-            const system_libs_values = self.base.options.system_libs.values();
+            const system_libs = self.base.comp.system_libs.keys();
+            const system_libs_values = self.base.comp.system_libs.values();
 
             // Worst-case, we need an --as-needed argument for every lib, as well
             // as one before and one after.
@@ -2813,7 +2840,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
             self.error_flags.missing_libc = false;
             if (self.base.options.link_libc) {
                 if (self.base.options.libc_installation != null) {
-                    const needs_grouping = self.base.options.link_mode == .Static;
+                    const needs_grouping = link_mode == .Static;
                     if (needs_grouping) try argv.append("--start-group");
                     try argv.appendSlice(target_util.libcFullLinkFlags(target));
                     if (needs_grouping) try argv.append("--end-group");
@@ -2826,7 +2853,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
                     }
                     try argv.append(try comp.get_libc_crt_file(arena, "libc_nonshared.a"));
                 } else if (target.isMusl()) {
-                    try argv.append(try comp.get_libc_crt_file(arena, switch (self.base.options.link_mode) {
+                    try argv.append(try comp.get_libc_crt_file(arena, switch (link_mode) {
                         .Static => "libc.a",
                         .Dynamic => "libc.so",
                     }));
@@ -2847,7 +2874,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
         if (csu.crtend) |v| try argv.append(v);
         if (csu.crtn) |v| try argv.append(v);
 
-        if (allow_shlib_undefined) {
+        if (self.base.allow_shlib_undefined) {
             try argv.append("--allow-shlib-undefined");
         }
 
@@ -2861,7 +2888,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
             try argv.append("-Bsymbolic");
         }
 
-        if (self.base.options.verbose_link) {
+        if (self.base.comp.verbose_link) {
             // Skip over our own name so that the LLD linker name is the first argv item.
             Compilation.dump_argv(argv.items[1..]);
         }
@@ -3087,10 +3114,12 @@ fn writeElfHeader(self: *Elf) !void {
 
     assert(index == 16);
 
-    const elf_type: elf.ET = switch (self.base.options.output_mode) {
+    const output_mode = self.base.comp.config.output_mode;
+    const link_mode = self.base.comp.config.link_mode;
+    const elf_type: elf.ET = switch (output_mode) {
         .Exe => if (self.base.options.pie) .DYN else .EXEC,
         .Obj => .REL,
-        .Lib => switch (self.base.options.link_mode) {
+        .Lib => switch (link_mode) {
             .Static => @as(elf.ET, .REL),
             .Dynamic => .DYN,
         },
@@ -3216,7 +3245,6 @@ pub fn updateExports(
         @panic("Attempted to compile for object format that was disabled by build configuration");
     }
     if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, exports);
-    if (self.base.options.emit == null) return;
     return self.zigObjectPtr().?.updateExports(self, mod, exported, exports);
 }
 
@@ -3280,6 +3308,8 @@ fn addLinkerDefinedSymbols(self: *Elf) !void {
 }
 
 fn allocateLinkerDefinedSymbols(self: *Elf) void {
+    const link_mode = self.base.comp.config.link_mode;
+
     // _DYNAMIC
     if (self.dynamic_section_index) |shndx| {
         const shdr = &self.shdrs.items[shndx];
@@ -3362,7 +3392,7 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
 
     // __rela_iplt_start, __rela_iplt_end
     if (self.rela_dyn_section_index) |shndx| blk: {
-        if (self.base.options.link_mode != .Static or self.base.options.pie) break :blk;
+        if (link_mode != .Static or self.base.options.pie) break :blk;
         const shdr = &self.shdrs.items[shndx];
         const end_addr = shdr.sh_addr + shdr.sh_size;
         const start_addr = end_addr - self.calcNumIRelativeRelocs() * @sizeOf(elf.Elf64_Rela);
@@ -3531,7 +3561,7 @@ fn initSyntheticSections(self: *Elf) !void {
         });
     }
 
-    if (self.isDynLib() or self.shared_objects.items.len > 0 or self.base.options.pie) {
+    if (self.base.isDynLib() or self.shared_objects.items.len > 0 or self.base.options.pie) {
         self.dynstrtab_section_index = try self.addSection(.{
             .name = ".dynstr",
             .flags = elf.SHF_ALLOC,
@@ -3716,7 +3746,7 @@ fn initSpecialPhdrs(self: *Elf) !void {
     self.phdr_gnu_stack_index = try self.addPhdr(.{
         .type = elf.PT_GNU_STACK,
         .flags = elf.PF_W | elf.PF_R,
-        .memsz = self.base.options.stack_size_override orelse 0,
+        .memsz = self.base.stack_size,
         .@"align" = 1,
     });
 
@@ -3822,7 +3852,7 @@ fn setDynamicSection(self: *Elf, rpaths: []const []const u8) !void {
         try self.dynamic.addNeeded(shared_object, self);
     }
 
-    if (self.isDynLib()) {
+    if (self.base.isDynLib()) {
         if (self.base.options.soname) |soname| {
             try self.dynamic.setSoname(soname, self);
         }
@@ -3837,8 +3867,9 @@ fn sortDynamicSymtab(self: *Elf) void {
 }
 
 fn setVersionSymtab(self: *Elf) !void {
+    const gpa = self.base.comp.gpa;
     if (self.versym_section_index == null) return;
-    try self.versym.resize(self.base.allocator, self.dynsym.count());
+    try self.versym.resize(gpa, self.dynsym.count());
     self.versym.items[0] = elf.VER_NDX_LOCAL;
     for (self.dynsym.entries.items, 1..) |entry, i| {
         const sym = self.symbol(entry.symbol_index);
@@ -5597,38 +5628,14 @@ const CsuObjects = struct {
 };
 
 pub fn calcImageBase(self: Elf) u64 {
-    if (self.isDynLib()) return 0;
-    if (self.isExe() and self.base.options.pie) return 0;
+    if (self.base.isDynLib()) return 0;
+    if (self.base.isExe() and self.base.options.pie) return 0;
     return self.base.options.image_base_override orelse switch (self.ptr_width) {
         .p32 => 0x1000,
         .p64 => 0x1000000,
     };
 }
 
-pub fn isStatic(self: Elf) bool {
-    return self.base.options.link_mode == .Static;
-}
-
-pub fn isObject(self: Elf) bool {
-    return self.base.options.output_mode == .Obj;
-}
-
-pub fn isExe(self: Elf) bool {
-    return self.base.options.output_mode == .Exe;
-}
-
-pub fn isStaticLib(self: Elf) bool {
-    return self.base.options.output_mode == .Lib and self.isStatic();
-}
-
-pub fn isRelocatable(self: Elf) bool {
-    return self.isObject() or self.isStaticLib();
-}
-
-pub fn isDynLib(self: Elf) bool {
-    return self.base.options.output_mode == .Lib and !self.isStatic();
-}
-
 pub fn isZigSection(self: Elf, shndx: u16) bool {
     inline for (&[_]?u16{
         self.zig_text_section_index,
@@ -5668,8 +5675,9 @@ fn addPhdr(self: *Elf, opts: struct {
     filesz: u64 = 0,
     memsz: u64 = 0,
 }) error{OutOfMemory}!u16 {
+    const gpa = self.base.comp.gpa;
     const index = @as(u16, @intCast(self.phdrs.items.len));
-    try self.phdrs.append(self.base.allocator, .{
+    try self.phdrs.append(gpa, .{
         .p_type = opts.type,
         .p_flags = opts.flags,
         .p_offset = opts.offset,
@@ -5818,8 +5826,9 @@ pub fn atom(self: *Elf, atom_index: Atom.Index) ?*Atom {
 }
 
 pub fn addAtom(self: *Elf) !Atom.Index {
+    const gpa = self.base.comp.gpa;
     const index = @as(Atom.Index, @intCast(self.atoms.items.len));
-    const atom_ptr = try self.atoms.addOne(self.base.allocator);
+    const atom_ptr = try self.atoms.addOne(gpa);
     atom_ptr.* = .{ .atom_index = index };
     return index;
 }
@@ -5841,7 +5850,8 @@ pub fn symbol(self: *Elf, sym_index: Symbol.Index) *Symbol {
 }
 
 pub fn addSymbol(self: *Elf) !Symbol.Index {
-    try self.symbols.ensureUnusedCapacity(self.base.allocator, 1);
+    const gpa = self.base.comp.gpa;
+    try self.symbols.ensureUnusedCapacity(gpa, 1);
     const index = blk: {
         if (self.symbols_free_list.popOrNull()) |index| {
             log.debug("  (reusing symbol index {d})", .{index});
@@ -5858,8 +5868,9 @@ pub fn addSymbol(self: *Elf) !Symbol.Index {
 }
 
 pub fn addSymbolExtra(self: *Elf, extra: Symbol.Extra) !u32 {
+    const gpa = self.base.comp.gpa;
     const fields = @typeInfo(Symbol.Extra).Struct.fields;
-    try self.symbols_extra.ensureUnusedCapacity(self.base.allocator, fields.len);
+    try self.symbols_extra.ensureUnusedCapacity(gpa, fields.len);
     return self.addSymbolExtraAssumeCapacity(extra);
 }
 
@@ -5959,8 +5970,9 @@ pub fn getOrCreateComdatGroupOwner(self: *Elf, name: [:0]const u8) !GetOrCreateC
 }
 
 pub fn addComdatGroup(self: *Elf) !ComdatGroup.Index {
+    const gpa = self.base.comp.gpa;
     const index = @as(ComdatGroup.Index, @intCast(self.comdat_groups.items.len));
-    _ = try self.comdat_groups.addOne(self.base.allocator);
+    _ = try self.comdat_groups.addOne(gpa);
     return index;
 }
 
@@ -6023,14 +6035,16 @@ const ErrorWithNotes = struct {
 };
 
 pub fn addErrorWithNotes(self: *Elf, note_count: usize) error{OutOfMemory}!ErrorWithNotes {
-    try self.misc_errors.ensureUnusedCapacity(self.base.allocator, 1);
+    const gpa = self.base.comp.gpa;
+    try self.misc_errors.ensureUnusedCapacity(gpa, 1);
     return self.addErrorWithNotesAssumeCapacity(note_count);
 }
 
 fn addErrorWithNotesAssumeCapacity(self: *Elf, note_count: usize) error{OutOfMemory}!ErrorWithNotes {
+    const gpa = self.base.comp.gpa;
     const index = self.misc_errors.items.len;
     const err = self.misc_errors.addOneAssumeCapacity();
-    err.* = .{ .msg = undefined, .notes = try self.base.allocator.alloc(link.File.ErrorMsg, note_count) };
+    err.* = .{ .msg = undefined, .notes = try gpa.alloc(link.File.ErrorMsg, note_count) };
     return .{ .index = index };
 }
 
@@ -6040,9 +6054,10 @@ pub fn getShString(self: Elf, off: u32) [:0]const u8 {
 }
 
 pub fn insertShString(self: *Elf, name: [:0]const u8) error{OutOfMemory}!u32 {
+    const gpa = self.base.comp.gpa;
     const off = @as(u32, @intCast(self.shstrtab.items.len));
-    try self.shstrtab.ensureUnusedCapacity(self.base.allocator, name.len + 1);
-    self.shstrtab.writer(self.base.allocator).print("{s}\x00", .{name}) catch unreachable;
+    try self.shstrtab.ensureUnusedCapacity(gpa, name.len + 1);
+    self.shstrtab.writer(gpa).print("{s}\x00", .{name}) catch unreachable;
     return off;
 }
 
@@ -6052,9 +6067,10 @@ pub fn getDynString(self: Elf, off: u32) [:0]const u8 {
 }
 
 pub fn insertDynString(self: *Elf, name: []const u8) error{OutOfMemory}!u32 {
+    const gpa = self.base.comp.gpa;
     const off = @as(u32, @intCast(self.dynstrtab.items.len));
-    try self.dynstrtab.ensureUnusedCapacity(self.base.allocator, name.len + 1);
-    self.dynstrtab.writer(self.base.allocator).print("{s}\x00", .{name}) catch unreachable;
+    try self.dynstrtab.ensureUnusedCapacity(gpa, name.len + 1);
+    self.dynstrtab.writer(gpa).print("{s}\x00", .{name}) catch unreachable;
     return off;
 }
 
src/link/MachO.zig
@@ -144,6 +144,35 @@ tlv_table: TlvSymbolTable = .{},
 hot_state: if (is_hot_update_compatible) HotUpdateState else struct {} = .{},
 
 darwin_sdk_layout: ?SdkLayout,
+/// Size of the __PAGEZERO segment.
+pagezero_vmsize: u64,
+/// Minimum space for future expansion of the load commands.
+headerpad_size: u32,
+/// Set enough space as if all paths were MATPATHLEN.
+headerpad_max_install_names: bool,
+/// Remove dylibs that are unreachable by the entry point or exported symbols.
+dead_strip_dylibs: bool,
+frameworks: []const Framework,
+/// Install name for the dylib.
+/// TODO: unify with soname
+install_name: ?[]const u8,
+/// Path to entitlements file.
+entitlements: ?[]const u8,
+
+/// When adding a new field, remember to update `hashAddFrameworks`.
+pub const Framework = struct {
+    needed: bool = false,
+    weak: bool = false,
+    path: []const u8,
+};
+
+pub fn hashAddFrameworks(man: *Cache.Manifest, hm: []const Framework) !void {
+    for (hm) |value| {
+        man.hash.add(value.needed);
+        man.hash.add(value.weak);
+        _ = try man.addFile(value.path, null);
+    }
+}
 
 /// The filesystem layout of darwin SDK elements.
 pub const SdkLayout = enum {
@@ -156,12 +185,14 @@ pub const SdkLayout = enum {
 pub fn open(arena: Allocator, options: link.File.OpenOptions) !*MachO {
     if (build_options.only_c) unreachable;
     const target = options.comp.root_mod.resolved_target.result;
+    const use_lld = build_options.have_llvm and options.comp.config.use_lld;
+    const use_llvm = options.comp.config.use_llvm;
     assert(target.ofmt == .macho);
 
     const gpa = options.comp.gpa;
     const emit = options.emit;
     const mode: Mode = mode: {
-        if (options.use_llvm or options.module == null or options.cache_mode == .whole)
+        if (use_llvm or options.module == null or options.cache_mode == .whole)
             break :mode .zld;
         break :mode .incremental;
     };
@@ -192,7 +223,11 @@ pub fn open(arena: Allocator, options: link.File.OpenOptions) !*MachO {
     const file = try emit.directory.handle.createFile(sub_path, .{
         .truncate = false,
         .read = true,
-        .mode = link.determineMode(options),
+        .mode = link.File.determineMode(
+            use_lld,
+            options.comp.config.output_mode,
+            options.comp.config.link_mode,
+        ),
     });
     self.base.file = file;
 
@@ -242,21 +277,37 @@ pub fn open(arena: Allocator, options: link.File.OpenOptions) !*MachO {
 
 pub fn createEmpty(arena: Allocator, options: link.File.OpenOptions) !*MachO {
     const self = try arena.create(MachO);
+    const optimize_mode = options.comp.root_mod.optimize_mode;
+    const use_llvm = options.comp.config.use_llvm;
 
     self.* = .{
         .base = .{
             .tag = .macho,
             .comp = options.comp,
             .emit = options.emit,
+            .gc_sections = options.gc_sections orelse (optimize_mode != .Debug),
+            .stack_size = options.stack_size orelse 16777216,
+            .allow_shlib_undefined = options.allow_shlib_undefined orelse false,
             .file = null,
+            .disable_lld_caching = options.disable_lld_caching,
+            .build_id = options.build_id,
+            .rpath_list = options.rpath_list,
+            .force_undefined_symbols = options.force_undefined_symbols,
+            .debug_format = options.debug_format orelse .{ .dwarf = .@"32" },
+            .function_sections = options.function_sections,
+            .data_sections = options.data_sections,
         },
-        .mode = if (options.use_llvm or options.module == null or options.cache_mode == .whole)
+        .mode = if (use_llvm or options.module == null or options.cache_mode == .whole)
             .zld
         else
             .incremental,
+        .pagezero_vmsize = options.pagezero_size orelse default_pagezero_vmsize,
+        .headerpad_size = options.headerpad_size orelse default_headerpad_size,
+        .headerpad_max_install_names = options.headerpad_max_install_names,
+        .dead_strip_dylibs = options.dead_strip_dylibs,
     };
 
-    if (options.use_llvm and options.module != null) {
+    if (use_llvm and options.module != null) {
         self.llvm_object = try LlvmObject.create(arena, options);
     }
 
@@ -267,8 +318,9 @@ pub fn createEmpty(arena: Allocator, options: link.File.OpenOptions) !*MachO {
 
 pub fn flush(self: *MachO, comp: *Compilation, prog_node: *std.Progress.Node) link.File.FlushError!void {
     const gpa = self.base.comp.gpa;
+    const output_mode = self.base.comp.config.output_mode;
 
-    if (self.base.options.output_mode == .Lib and self.base.options.link_mode == .Static) {
+    if (output_mode == .Lib and self.base.options.link_mode == .Static) {
         if (build_options.have_llvm) {
             return self.base.linkAsArchive(comp, prog_node);
         } else {
@@ -303,6 +355,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
     sub_prog_node.activate();
     defer sub_prog_node.end();
 
+    const output_mode = self.base.comp.config.output_mode;
     const module = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented;
 
     if (self.lazy_syms.getPtr(.none)) |metadata| {
@@ -335,7 +388,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
     }
 
     var libs = std.StringArrayHashMap(link.SystemLib).init(arena);
-    try self.resolveLibSystem(arena, comp, &.{}, &libs);
+    try self.resolveLibSystem(arena, comp, &libs);
 
     const id_symlink_basename = "link.id";
 
@@ -446,7 +499,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
     try self.createDyldPrivateAtom();
     try self.writeStubHelperPreamble();
 
-    if (self.base.options.output_mode == .Exe and self.getEntryPoint() != null) {
+    if (output_mode == .Exe and self.getEntryPoint() != null) {
         const global = self.getEntryPoint().?;
         if (self.getSymbol(global).undf()) {
             // We do one additional check here in case the entry point was found in one of the dylibs.
@@ -517,8 +570,8 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
         // The most important here is to have the correct vm and filesize of the __LINKEDIT segment
         // where the code signature goes into.
         var codesig = CodeSignature.init(getPageSize(self.base.options.target.cpu.arch));
-        codesig.code_directory.ident = self.base.options.emit.?.sub_path;
-        if (self.base.options.entitlements) |path| {
+        codesig.code_directory.ident = self.base.emit.sub_path;
+        if (self.entitlements) |path| {
             try codesig.addEntitlements(gpa, path);
         }
         try self.writeCodeSignaturePadding(&codesig);
@@ -536,7 +589,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
     try lc_writer.writeStruct(self.dysymtab_cmd);
     try load_commands.writeDylinkerLC(lc_writer);
 
-    switch (self.base.options.output_mode) {
+    switch (output_mode) {
         .Exe => blk: {
             const seg_id = self.header_segment_cmd_index.?;
             const seg = self.segments.items[seg_id];
@@ -552,7 +605,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
 
             try lc_writer.writeStruct(macho.entry_point_command{
                 .entryoff = @as(u32, @intCast(addr - seg.vmaddr)),
-                .stacksize = self.base.options.stack_size_override orelse 0,
+                .stacksize = self.base.stack_size,
             });
         },
         .Lib => if (self.base.options.link_mode == .Dynamic) {
@@ -591,7 +644,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
 
     if (codesig) |*csig| {
         try self.writeCodeSignature(comp, csig); // code signing always comes last
-        const emit = self.base.options.emit.?;
+        const emit = self.base.emit;
         try invalidateKernelCache(emit.directory.handle, emit.sub_path);
     }
 
@@ -642,34 +695,20 @@ pub fn resolveLibSystem(
     self: *MachO,
     arena: Allocator,
     comp: *Compilation,
-    search_dirs: []const []const u8,
     out_libs: anytype,
 ) !void {
-    const gpa = self.base.comp.gpa;
-    var tmp_arena_allocator = std.heap.ArenaAllocator.init(gpa);
-    defer tmp_arena_allocator.deinit();
-    const tmp_arena = tmp_arena_allocator.allocator();
-
-    var test_path = std.ArrayList(u8).init(tmp_arena);
-    var checked_paths = std.ArrayList([]const u8).init(tmp_arena);
+    var test_path = std.ArrayList(u8).init(arena);
+    var checked_paths = std.ArrayList([]const u8).init(arena);
 
     success: {
-        for (search_dirs) |dir| if (try accessLibPath(
-            tmp_arena,
-            &test_path,
-            &checked_paths,
-            dir,
-            "libSystem",
-        )) break :success;
-
         if (self.base.options.darwin_sdk_layout) |sdk_layout| switch (sdk_layout) {
             .sdk => {
-                const dir = try fs.path.join(tmp_arena, &[_][]const u8{ self.base.options.sysroot.?, "usr", "lib" });
-                if (try accessLibPath(tmp_arena, &test_path, &checked_paths, dir, "libSystem")) break :success;
+                const dir = try fs.path.join(arena, &[_][]const u8{ self.base.options.sysroot.?, "usr", "lib" });
+                if (try accessLibPath(arena, &test_path, &checked_paths, dir, "libSystem")) break :success;
             },
             .vendored => {
-                const dir = try comp.zig_lib_directory.join(tmp_arena, &[_][]const u8{ "libc", "darwin" });
-                if (try accessLibPath(tmp_arena, &test_path, &checked_paths, dir, "libSystem")) break :success;
+                const dir = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "darwin" });
+                if (try accessLibPath(arena, &test_path, &checked_paths, dir, "libSystem")) break :success;
             },
         };
 
@@ -1082,7 +1121,7 @@ fn addDylib(self: *MachO, dylib: Dylib, dylib_options: DylibOpts, ctx: *ParseErr
     try self.dylibs.append(gpa, dylib);
 
     const should_link_dylib_even_if_unreachable = blk: {
-        if (self.base.options.dead_strip_dylibs and !dylib_options.needed) break :blk false;
+        if (self.dead_strip_dylibs and !dylib_options.needed) break :blk false;
         break :blk !(dylib_options.dependent or self.referenced_dylibs.contains(gop.value_ptr.*));
     };
 
@@ -1597,7 +1636,8 @@ fn createThreadLocalDescriptorAtom(self: *MachO, sym_name: []const u8, target: S
 }
 
 pub fn createMhExecuteHeaderSymbol(self: *MachO) !void {
-    if (self.base.options.output_mode != .Exe) return;
+    const output_mode = self.base.comp.config.output_mode;
+    if (output_mode != .Exe) return;
 
     const gpa = self.base.comp.gpa;
     const sym_index = try self.allocateSymbol();
@@ -1647,10 +1687,11 @@ pub fn createDsoHandleSymbol(self: *MachO) !void {
 }
 
 pub fn resolveSymbols(self: *MachO) !void {
+    const output_mode = self.base.comp.config.output_mode;
     // We add the specified entrypoint as the first unresolved symbols so that
     // we search for it in libraries should there be no object files specified
     // on the linker line.
-    if (self.base.options.output_mode == .Exe) {
+    if (output_mode == .Exe) {
         const entry_name = self.base.options.entry orelse load_commands.default_entry_point;
         _ = try self.addUndefined(entry_name, .{});
     }
@@ -1867,9 +1908,10 @@ fn resolveSymbolsInDylibs(self: *MachO) !void {
 }
 
 fn resolveSymbolsAtLoading(self: *MachO) !void {
-    const is_lib = self.base.options.output_mode == .Lib;
+    const output_mode = self.base.comp.config.output_mode;
+    const is_lib = output_mode == .Lib;
     const is_dyn_lib = self.base.options.link_mode == .Dynamic and is_lib;
-    const allow_undef = is_dyn_lib and (self.base.options.allow_shlib_undefined orelse false);
+    const allow_undef = is_dyn_lib and self.base.allow_shlib_undefined;
 
     var next_sym: usize = 0;
     while (next_sym < self.unresolved.count()) {
@@ -2674,12 +2716,12 @@ fn getDeclOutputSection(self: *MachO, decl_index: InternPool.DeclIndex) u8 {
     const val = decl.val;
     const mod = self.base.options.module.?;
     const zig_ty = ty.zigTypeTag(mod);
-    const mode = self.base.options.optimize_mode;
-    const single_threaded = self.base.options.single_threaded;
+    const any_non_single_threaded = self.base.comp.config.any_non_single_threaded;
+    const optimize_mode = self.base.comp.root_mod.optimize_mode;
     const sect_id: u8 = blk: {
         // TODO finish and audit this function
         if (val.isUndefDeep(mod)) {
-            if (mode == .ReleaseFast or mode == .ReleaseSmall) {
+            if (optimize_mode == .ReleaseFast or optimize_mode == .ReleaseSmall) {
                 @panic("TODO __DATA,__bss");
             } else {
                 break :blk self.data_section_index.?;
@@ -2687,7 +2729,7 @@ fn getDeclOutputSection(self: *MachO, decl_index: InternPool.DeclIndex) u8 {
         }
 
         if (val.getVariable(mod)) |variable| {
-            if (variable.is_threadlocal and !single_threaded) {
+            if (variable.is_threadlocal and any_non_single_threaded) {
                 break :blk self.thread_data_section_index.?;
             }
             break :blk self.data_section_index.?;
@@ -2796,8 +2838,6 @@ pub fn updateExports(
     if (self.llvm_object) |llvm_object|
         return llvm_object.updateExports(mod, exported, exports);
 
-    if (self.base.options.emit == null) return;
-
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3093,7 +3133,7 @@ fn populateMissingMetadata(self: *MachO) !void {
     if (self.header_segment_cmd_index == null) {
         // The first __TEXT segment is immovable and covers MachO header and load commands.
         self.header_segment_cmd_index = @as(u8, @intCast(self.segments.items.len));
-        const ideal_size = @max(self.base.options.headerpad_size orelse 0, default_headerpad_size);
+        const ideal_size = self.headerpad_size;
         const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), getPageSize(cpu_arch));
 
         log.debug("found __TEXT segment (header-only) free space 0x{x} to 0x{x}", .{ 0, needed_size });
@@ -3222,13 +3262,13 @@ fn populateMissingMetadata(self: *MachO) !void {
 }
 
 fn calcPagezeroSize(self: *MachO) u64 {
-    const pagezero_vmsize = self.base.options.pagezero_size orelse default_pagezero_vmsize;
+    const output_mode = self.base.comp.config.output_mode;
     const page_size = getPageSize(self.base.options.target.cpu.arch);
-    const aligned_pagezero_vmsize = mem.alignBackward(u64, pagezero_vmsize, page_size);
-    if (self.base.options.output_mode == .Lib) return 0;
+    const aligned_pagezero_vmsize = mem.alignBackward(u64, self.pagezero_vmsize, page_size);
+    if (output_mode == .Lib) return 0;
     if (aligned_pagezero_vmsize == 0) return 0;
-    if (aligned_pagezero_vmsize != pagezero_vmsize) {
-        log.warn("requested __PAGEZERO size (0x{x}) is not page aligned", .{pagezero_vmsize});
+    if (aligned_pagezero_vmsize != self.pagezero_vmsize) {
+        log.warn("requested __PAGEZERO size (0x{x}) is not page aligned", .{self.pagezero_vmsize});
         log.warn("  rounding down to 0x{x}", .{aligned_pagezero_vmsize});
     }
     return aligned_pagezero_vmsize;
@@ -4685,6 +4725,7 @@ pub fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void {
 }
 
 pub fn writeCodeSignature(self: *MachO, comp: *const Compilation, code_sig: *CodeSignature) !void {
+    const output_mode = self.base.comp.config.output_mode;
     const seg_id = self.header_segment_cmd_index.?;
     const seg = self.segments.items[seg_id];
     const offset = self.codesig_cmd.dataoff;
@@ -4698,7 +4739,7 @@ pub fn writeCodeSignature(self: *MachO, comp: *const Compilation, code_sig: *Cod
         .exec_seg_base = seg.fileoff,
         .exec_seg_limit = seg.filesize,
         .file_size = offset,
-        .output_mode = self.base.options.output_mode,
+        .output_mode = output_mode,
     }, buffer.writer());
     assert(buffer.items.len == code_sig.size());
 
@@ -4712,6 +4753,8 @@ pub fn writeCodeSignature(self: *MachO, comp: *const Compilation, code_sig: *Cod
 
 /// Writes Mach-O file header.
 pub fn writeHeader(self: *MachO, ncmds: u32, sizeofcmds: u32) !void {
+    const output_mode = self.base.comp.config.output_mode;
+
     var header: macho.mach_header_64 = .{};
     header.flags = macho.MH_NOUNDEFS | macho.MH_DYLDLINK | macho.MH_PIE | macho.MH_TWOLEVEL;
 
@@ -4727,7 +4770,7 @@ pub fn writeHeader(self: *MachO, ncmds: u32, sizeofcmds: u32) !void {
         else => unreachable,
     }
 
-    switch (self.base.options.output_mode) {
+    switch (output_mode) {
         .Exe => {
             header.filetype = macho.MH_EXECUTE;
         },
src/link/NvPtx.zig
@@ -24,46 +24,56 @@ const LlvmObject = @import("../codegen/llvm.zig").Object;
 
 base: link.File,
 llvm_object: *LlvmObject,
-ptx_file_name: []const u8,
 
-pub fn createEmpty(gpa: Allocator, options: link.Options) !*NvPtx {
-    if (!options.use_llvm) return error.PtxArchNotSupported;
+pub fn createEmpty(arena: Allocator, options: link.File.OpenOptions) !*NvPtx {
+    if (build_options.only_c) unreachable;
 
-    if (!options.target.cpu.arch.isNvptx()) return error.PtxArchNotSupported;
+    const target = options.comp.root_mod.resolved_target.result;
+    const use_lld = build_options.have_llvm and options.comp.config.use_lld;
+    const use_llvm = options.comp.config.use_llvm;
 
-    switch (options.target.os.tag) {
+    assert(use_llvm); // Caught by Compilation.Config.resolve.
+    assert(!use_lld); // Caught by Compilation.Config.resolve.
+    assert(target.cpu.arch.isNvptx()); // Caught by Compilation.Config.resolve.
+
+    switch (target.os.tag) {
         // TODO: does it also work with nvcl ?
         .cuda => {},
         else => return error.PtxArchNotSupported,
     }
 
-    const llvm_object = try LlvmObject.create(gpa, options);
-    const nvptx = try gpa.create(NvPtx);
+    const llvm_object = try LlvmObject.create(arena, options);
+    const nvptx = try arena.create(NvPtx);
     nvptx.* = .{
         .base = .{
             .tag = .nvptx,
-            .options = options,
+            .comp = options.comp,
+            .emit = options.emit,
+            .gc_sections = options.gc_sections orelse false,
+            .stack_size = options.stack_size orelse 0,
+            .allow_shlib_undefined = options.allow_shlib_undefined orelse false,
             .file = null,
-            .allocator = gpa,
+            .disable_lld_caching = options.disable_lld_caching,
+            .build_id = options.build_id,
+            .rpath_list = options.rpath_list,
+            .force_undefined_symbols = options.force_undefined_symbols,
+            .function_sections = options.function_sections,
+            .data_sections = options.data_sections,
         },
         .llvm_object = llvm_object,
-        .ptx_file_name = try std.mem.join(gpa, "", &[_][]const u8{ options.root_name, ".ptx" }),
     };
 
     return nvptx;
 }
 
-pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*NvPtx {
-    if (!options.use_llvm) return error.PtxArchNotSupported;
-    assert(options.target.ofmt == .nvptx);
-
-    log.debug("Opening .ptx target file {s}", .{sub_path});
-    return createEmpty(allocator, options);
+pub fn open(arena: Allocator, options: link.FileOpenOptions) !*NvPtx {
+    const target = options.comp.root_mod.resolved_target.result;
+    assert(target.ofmt == .nvptx);
+    return createEmpty(arena, options);
 }
 
 pub fn deinit(self: *NvPtx) void {
-    self.llvm_object.destroy(self.base.allocator);
-    self.base.allocator.free(self.ptx_file_name);
+    self.llvm_object.deinit();
 }
 
 pub fn updateFunc(self: *NvPtx, module: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
@@ -110,7 +120,7 @@ pub fn flushModule(self: *NvPtx, comp: *Compilation, prog_node: *std.Progress.No
     comp.emit_asm = .{
         // 'null' means using the default cache dir: zig-cache/o/...
         .directory = null,
-        .basename = self.ptx_file_name,
+        .basename = self.base.emit.sub_path,
     };
     defer {
         comp.bin_file.options.emit = outfile;
src/link/Plan9.zig
@@ -318,12 +318,12 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Plan9 {
         .magic = try aout.magicFromArch(self.base.options.target.cpu.arch),
     };
     // a / will always be in a file path
-    try self.file_segments.put(self.base.allocator, "/", 1);
+    try self.file_segments.put(gpa, "/", 1);
     return self;
 }
 
 fn putFn(self: *Plan9, decl_index: InternPool.DeclIndex, out: FnDeclOutput) !void {
-    const gpa = self.base.allocator;
+    const gpa = self.base.comp.gpa;
     const mod = self.base.options.module.?;
     const decl = mod.declPtr(decl_index);
     const fn_map_res = try self.fn_decl_table.getOrPut(gpa, decl.getFileScope(mod));
@@ -379,6 +379,7 @@ fn putFn(self: *Plan9, decl_index: InternPool.DeclIndex, out: FnDeclOutput) !voi
 }
 
 fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !void {
+    const gpa = self.base.comp.gpa;
     const sep = std.fs.path.sep;
     var it = std.mem.tokenizeScalar(u8, path, sep);
     while (it.next()) |component| {
@@ -386,7 +387,7 @@ fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !voi
             try a.writer().writeInt(u16, num, .big);
         } else {
             self.file_segments_i += 1;
-            try self.file_segments.put(self.base.allocator, component, self.file_segments_i);
+            try self.file_segments.put(gpa, component, self.file_segments_i);
             try a.writer().writeInt(u16, self.file_segments_i, .big);
         }
     }
@@ -397,6 +398,7 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air:
         @panic("Attempted to compile for object format that was disabled by build configuration");
     }
 
+    const gpa = self.base.comp.gpa;
     const func = mod.funcInfo(func_index);
     const decl_index = func.owner_decl;
     const decl = mod.declPtr(decl_index);
@@ -404,10 +406,10 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air:
 
     const atom_idx = try self.seeDecl(decl_index);
 
-    var code_buffer = std.ArrayList(u8).init(self.base.allocator);
+    var code_buffer = std.ArrayList(u8).init(gpa);
     defer code_buffer.deinit();
     var dbg_info_output: DebugInfoOutput = .{
-        .dbg_line = std.ArrayList(u8).init(self.base.allocator),
+        .dbg_line = std.ArrayList(u8).init(gpa),
         .start_line = null,
         .end_line = undefined,
         .pcop_change_index = null,
@@ -448,14 +450,15 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air:
 }
 
 pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: InternPool.DeclIndex) !u32 {
+    const gpa = self.base.comp.gpa;
     _ = try self.seeDecl(decl_index);
-    var code_buffer = std.ArrayList(u8).init(self.base.allocator);
+    var code_buffer = std.ArrayList(u8).init(gpa);
     defer code_buffer.deinit();
 
     const mod = self.base.options.module.?;
     const decl = mod.declPtr(decl_index);
 
-    const gop = try self.unnamed_const_atoms.getOrPut(self.base.allocator, decl_index);
+    const gop = try self.unnamed_const_atoms.getOrPut(gpa, decl_index);
     if (!gop.found_existing) {
         gop.value_ptr.* = .{};
     }
@@ -465,7 +468,7 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: InternPool.De
 
     const index = unnamed_consts.items.len;
     // name is freed when the unnamed const is freed
-    const name = try std.fmt.allocPrint(self.base.allocator, "__unnamed_{s}_{d}", .{ decl_name, index });
+    const name = try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
 
     const sym_index = try self.allocateSymbolIndex();
     const new_atom_idx = try self.createAtom();
@@ -498,17 +501,18 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: InternPool.De
         },
     };
     // duped_code is freed when the unnamed const is freed
-    const duped_code = try self.base.allocator.dupe(u8, code);
-    errdefer self.base.allocator.free(duped_code);
+    const duped_code = try gpa.dupe(u8, code);
+    errdefer gpa.free(duped_code);
     const new_atom = self.getAtomPtr(new_atom_idx);
     new_atom.* = info;
     new_atom.code = .{ .code_ptr = duped_code.ptr, .other = .{ .code_len = duped_code.len } };
-    try unnamed_consts.append(self.base.allocator, new_atom_idx);
+    try unnamed_consts.append(gpa, new_atom_idx);
     // we return the new_atom_idx to codegen
     return new_atom_idx;
 }
 
 pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: InternPool.DeclIndex) !void {
+    const gpa = self.base.comp.gpa;
     const decl = mod.declPtr(decl_index);
 
     if (decl.isExtern(mod)) {
@@ -517,7 +521,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: InternPool.DeclIndex)
     }
     const atom_idx = try self.seeDecl(decl_index);
 
-    var code_buffer = std.ArrayList(u8).init(self.base.allocator);
+    var code_buffer = std.ArrayList(u8).init(gpa);
     defer code_buffer.deinit();
     const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val;
     // TODO we need the symbol index for symbol in the table of locals for the containing atom
@@ -535,16 +539,17 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: InternPool.DeclIndex)
             return;
         },
     };
-    try self.data_decl_table.ensureUnusedCapacity(self.base.allocator, 1);
-    const duped_code = try self.base.allocator.dupe(u8, code);
+    try self.data_decl_table.ensureUnusedCapacity(gpa, 1);
+    const duped_code = try gpa.dupe(u8, code);
     self.getAtomPtr(self.decls.get(decl_index).?.index).code = .{ .code_ptr = null, .other = .{ .decl_index = decl_index } };
     if (self.data_decl_table.fetchPutAssumeCapacity(decl_index, duped_code)) |old_entry| {
-        self.base.allocator.free(old_entry.value);
+        gpa.free(old_entry.value);
     }
     return self.updateFinish(decl_index);
 }
 /// called at the end of update{Decl,Func}
 fn updateFinish(self: *Plan9, decl_index: InternPool.DeclIndex) !void {
+    const gpa = self.base.comp.gpa;
     const mod = self.base.options.module.?;
     const decl = mod.declPtr(decl_index);
     const is_fn = (decl.ty.zigTypeTag(mod) == .Fn);
@@ -558,7 +563,7 @@ fn updateFinish(self: *Plan9, decl_index: InternPool.DeclIndex) !void {
     const sym: aout.Sym = .{
         .value = undefined, // the value of stuff gets filled in in flushModule
         .type = atom.type,
-        .name = try self.base.allocator.dupe(u8, mod.intern_pool.stringToSlice(decl.name)),
+        .name = try gpa.dupe(u8, mod.intern_pool.stringToSlice(decl.name)),
     };
 
     if (atom.sym_index) |s| {
@@ -571,10 +576,11 @@ fn updateFinish(self: *Plan9, decl_index: InternPool.DeclIndex) !void {
 }
 
 fn allocateSymbolIndex(self: *Plan9) !usize {
+    const gpa = self.base.comp.gpa;
     if (self.syms_index_free_list.popOrNull()) |i| {
         return i;
     } else {
-        _ = try self.syms.addOne(self.base.allocator);
+        _ = try self.syms.addOne(gpa);
         return self.syms.items.len - 1;
     }
 }
@@ -589,7 +595,8 @@ fn allocateGotIndex(self: *Plan9) usize {
 }
 
 pub fn flush(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.Node) link.File.FlushError!void {
-    assert(!self.base.options.use_lld);
+    const use_lld = build_options.have_llvm and self.base.comp.config.use_lld;
+    assert(!use_lld);
 
     switch (self.base.options.effectiveOutputMode()) {
         .Exe => {},
@@ -650,7 +657,8 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
         @panic("Attempted to compile for object format that was disabled by build configuration");
     }
 
-    _ = comp;
+    const gpa = comp.gpa;
+
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -691,12 +699,12 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
     const atom_count = self.atomCount();
     assert(self.got_len == atom_count + self.got_index_free_list.items.len);
     const got_size = self.got_len * if (!self.sixtyfour_bit) @as(u32, 4) else 8;
-    var got_table = try self.base.allocator.alloc(u8, got_size);
-    defer self.base.allocator.free(got_table);
+    var got_table = try gpa.alloc(u8, got_size);
+    defer gpa.free(got_table);
 
     // + 4 for header, got, symbols, linecountinfo
-    var iovecs = try self.base.allocator.alloc(std.os.iovec_const, self.atomCount() + 4 - self.externCount());
-    defer self.base.allocator.free(iovecs);
+    var iovecs = try gpa.alloc(std.os.iovec_const, self.atomCount() + 4 - self.externCount());
+    defer gpa.free(iovecs);
 
     const file = self.base.file.?;
 
@@ -709,7 +717,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
     var iovecs_i: usize = 1;
     var text_i: u64 = 0;
 
-    var linecountinfo = std.ArrayList(u8).init(self.base.allocator);
+    var linecountinfo = std.ArrayList(u8).init(gpa);
     defer linecountinfo.deinit();
     // text
     {
@@ -901,10 +909,10 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
             }
         }
     }
-    var sym_buf = std.ArrayList(u8).init(self.base.allocator);
+    var sym_buf = std.ArrayList(u8).init(gpa);
     try self.writeSyms(&sym_buf);
     const syms = try sym_buf.toOwnedSlice();
-    defer self.base.allocator.free(syms);
+    defer gpa.free(syms);
     assert(2 + self.atomCount() - self.externCount() == iovecs_i); // we didn't write all the decls
     iovecs[iovecs_i] = .{ .iov_base = syms.ptr, .iov_len = syms.len };
     iovecs_i += 1;
@@ -985,6 +993,7 @@ fn addDeclExports(
     decl_index: InternPool.DeclIndex,
     exports: []const *Module.Export,
 ) !void {
+    const gpa = self.base.comp.gpa;
     const metadata = self.decls.getPtr(decl_index).?;
     const atom = self.getAtom(metadata.index);
 
@@ -994,7 +1003,7 @@ fn addDeclExports(
         if (exp.opts.section.unwrap()) |section_name| {
             if (!mod.intern_pool.stringEqlSlice(section_name, ".text") and !mod.intern_pool.stringEqlSlice(section_name, ".data")) {
                 try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create(
-                    self.base.allocator,
+                    gpa,
                     mod.declPtr(decl_index).srcLoc(mod),
                     "plan9 does not support extra sections",
                     .{},
@@ -1005,19 +1014,20 @@ fn addDeclExports(
         const sym = .{
             .value = atom.offset.?,
             .type = atom.type.toGlobal(),
-            .name = try self.base.allocator.dupe(u8, exp_name),
+            .name = try gpa.dupe(u8, exp_name),
         };
 
         if (metadata.getExport(self, exp_name)) |i| {
             self.syms.items[i] = sym;
         } else {
-            try self.syms.append(self.base.allocator, sym);
-            try metadata.exports.append(self.base.allocator, self.syms.items.len - 1);
+            try self.syms.append(gpa, sym);
+            try metadata.exports.append(gpa, self.syms.items.len - 1);
         }
     }
 }
 
 pub fn freeDecl(self: *Plan9, decl_index: InternPool.DeclIndex) void {
+    const gpa = self.base.comp.gpa;
     // TODO audit the lifetimes of decls table entries. It's possible to get
     // freeDecl without any updateDecl in between.
     // However that is planned to change, see the TODO comment in Module.zig
@@ -1029,17 +1039,17 @@ pub fn freeDecl(self: *Plan9, decl_index: InternPool.DeclIndex) void {
         const symidx_and_submap = self.fn_decl_table.get(decl.getFileScope(mod)).?;
         var submap = symidx_and_submap.functions;
         if (submap.fetchSwapRemove(decl_index)) |removed_entry| {
-            self.base.allocator.free(removed_entry.value.code);
-            self.base.allocator.free(removed_entry.value.lineinfo);
+            gpa.free(removed_entry.value.code);
+            gpa.free(removed_entry.value.lineinfo);
         }
         if (submap.count() == 0) {
             self.syms.items[symidx_and_submap.sym_index] = aout.Sym.undefined_symbol;
-            self.syms_index_free_list.append(self.base.allocator, symidx_and_submap.sym_index) catch {};
-            submap.deinit(self.base.allocator);
+            self.syms_index_free_list.append(gpa, symidx_and_submap.sym_index) catch {};
+            submap.deinit(gpa);
         }
     } else {
         if (self.data_decl_table.fetchSwapRemove(decl_index)) |removed_entry| {
-            self.base.allocator.free(removed_entry.value);
+            gpa.free(removed_entry.value);
         }
     }
     if (self.decls.fetchRemove(decl_index)) |const_kv| {
@@ -1047,35 +1057,36 @@ pub fn freeDecl(self: *Plan9, decl_index: InternPool.DeclIndex) void {
         const atom = self.getAtom(kv.value.index);
         if (atom.got_index) |i| {
             // TODO: if this catch {} is triggered, an assertion in flushModule will be triggered, because got_index_free_list will have the wrong length
-            self.got_index_free_list.append(self.base.allocator, i) catch {};
+            self.got_index_free_list.append(gpa, i) catch {};
         }
         if (atom.sym_index) |i| {
-            self.syms_index_free_list.append(self.base.allocator, i) catch {};
+            self.syms_index_free_list.append(gpa, i) catch {};
             self.syms.items[i] = aout.Sym.undefined_symbol;
         }
-        kv.value.exports.deinit(self.base.allocator);
+        kv.value.exports.deinit(gpa);
     }
     self.freeUnnamedConsts(decl_index);
     {
         const atom_index = self.decls.get(decl_index).?.index;
         const relocs = self.relocs.getPtr(atom_index) orelse return;
-        relocs.clearAndFree(self.base.allocator);
+        relocs.clearAndFree(gpa);
         assert(self.relocs.remove(atom_index));
     }
 }
 fn freeUnnamedConsts(self: *Plan9, decl_index: InternPool.DeclIndex) void {
+    const gpa = self.base.comp.gpa;
     const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return;
     for (unnamed_consts.items) |atom_idx| {
         const atom = self.getAtom(atom_idx);
-        self.base.allocator.free(self.syms.items[atom.sym_index.?].name);
+        gpa.free(self.syms.items[atom.sym_index.?].name);
         self.syms.items[atom.sym_index.?] = aout.Sym.undefined_symbol;
-        self.syms_index_free_list.append(self.base.allocator, atom.sym_index.?) catch {};
+        self.syms_index_free_list.append(gpa, atom.sym_index.?) catch {};
     }
-    unnamed_consts.clearAndFree(self.base.allocator);
+    unnamed_consts.clearAndFree(gpa);
 }
 
 fn createAtom(self: *Plan9) !Atom.Index {
-    const gpa = self.base.allocator;
+    const gpa = self.base.comp.gpa;
     const index = @as(Atom.Index, @intCast(self.atoms.items.len));
     const atom = try self.atoms.addOne(gpa);
     atom.* = .{
@@ -1089,7 +1100,8 @@ fn createAtom(self: *Plan9) !Atom.Index {
 }
 
 pub fn seeDecl(self: *Plan9, decl_index: InternPool.DeclIndex) !Atom.Index {
-    const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
+    const gpa = self.base.comp.gpa;
+    const gop = try self.decls.getOrPut(gpa, decl_index);
     if (!gop.found_existing) {
         const index = try self.createAtom();
         self.getAtomPtr(index).got_index = self.allocateGotIndex();
@@ -1134,7 +1146,8 @@ pub fn updateExports(
 }
 
 pub fn getOrCreateAtomForLazySymbol(self: *Plan9, sym: File.LazySymbol) !Atom.Index {
-    const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl(self.base.options.module.?));
+    const gpa = self.base.comp.gpa;
+    const gop = try self.lazy_syms.getOrPut(gpa, sym.getDecl(self.base.options.module.?));
     errdefer _ = if (!gop.found_existing) self.lazy_syms.pop();
 
     if (!gop.found_existing) gop.value_ptr.* = .{};
@@ -1160,7 +1173,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *Plan9, sym: File.LazySymbol) !Atom.In
 }
 
 fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Index) !void {
-    const gpa = self.base.allocator;
+    const gpa = self.base.comp.gpa;
     const mod = self.base.options.module.?;
 
     var required_alignment: InternPool.Alignment = .none;
@@ -1206,8 +1219,8 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind
         },
     };
     // duped_code is freed when the atom is freed
-    const duped_code = try self.base.allocator.dupe(u8, code);
-    errdefer self.base.allocator.free(duped_code);
+    const duped_code = try gpa.dupe(u8, code);
+    errdefer gpa.free(duped_code);
     self.getAtomPtr(atom_index).code = .{
         .code_ptr = duped_code.ptr,
         .other = .{ .code_len = duped_code.len },
@@ -1215,13 +1228,13 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind
 }
 
 pub fn deinit(self: *Plan9) void {
-    const gpa = self.base.allocator;
+    const gpa = self.base.comp.gpa;
     {
         var it = self.relocs.valueIterator();
         while (it.next()) |relocs| {
-            relocs.deinit(self.base.allocator);
+            relocs.deinit(gpa);
         }
-        self.relocs.deinit(self.base.allocator);
+        self.relocs.deinit(gpa);
     }
     // free the unnamed consts
     var it_unc = self.unnamed_const_atoms.iterator();
@@ -1280,24 +1293,36 @@ pub fn deinit(self: *Plan9) void {
     }
 }
 
-pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Plan9 {
-    if (options.use_llvm)
-        return error.LLVMBackendDoesNotSupportPlan9;
-    assert(options.target.ofmt == .plan9);
+pub fn open(arena: Allocator, options: link.File.OpenOptions) !*Plan9 {
+    if (build_options.only_c) unreachable;
+
+    const target = options.comp.root_mod.resolved_target.result;
+    const use_lld = build_options.have_llvm and options.comp.config.use_lld;
+    const use_llvm = options.comp.config.use_llvm;
 
-    const self = try createEmpty(allocator, options);
+    assert(!use_llvm); // Caught by Compilation.Config.resolve.
+    assert(!use_lld); // Caught by Compilation.Config.resolve.
+    assert(target.ofmt == .plan9);
+
+    const self = try createEmpty(arena, options);
     errdefer self.base.destroy();
 
-    const file = try options.emit.?.directory.handle.createFile(sub_path, .{
+    const file = try options.emit.directory.handle.createFile(options.emit.sub_path, .{
         .read = true,
-        .mode = link.determineMode(options),
+        .mode = link.File.determineMode(
+            use_lld,
+            options.comp.config.output_mode,
+            options.comp.config.link_mode,
+        ),
     });
     errdefer file.close();
     self.base.file = file;
 
-    self.bases = defaultBaseAddrs(options.target.cpu.arch);
+    self.bases = defaultBaseAddrs(target.cpu.arch);
+
+    const gpa = options.comp.gpa;
 
-    try self.syms.appendSlice(self.base.allocator, &.{
+    try self.syms.appendSlice(gpa, &.{
         // we include the global offset table to make it easier for debugging
         .{
             .value = self.getAddr(0, .d), // the global offset table starts at 0
@@ -1490,7 +1515,7 @@ pub fn lowerAnonDecl(self: *Plan9, decl_val: InternPool.Index, src_loc: Module.S
     // be used by more than one function, however, its address is being used so we need
     // to put it in some location.
     // ...
-    const gpa = self.base.allocator;
+    const gpa = self.base.comp.gpa;
     const gop = try self.anon_decls.getOrPut(gpa, decl_val);
     const mod = self.base.options.module.?;
     if (!gop.found_existing) {
@@ -1538,11 +1563,12 @@ pub fn getAnonDeclVAddr(self: *Plan9, decl_val: InternPool.Index, reloc_info: li
 }
 
 pub fn addReloc(self: *Plan9, parent_index: Atom.Index, reloc: Reloc) !void {
-    const gop = try self.relocs.getOrPut(self.base.allocator, parent_index);
+    const gpa = self.base.comp.gpa;
+    const gop = try self.relocs.getOrPut(gpa, parent_index);
     if (!gop.found_existing) {
         gop.value_ptr.* = .{};
     }
-    try gop.value_ptr.append(self.base.allocator, reloc);
+    try gop.value_ptr.append(gpa, reloc);
 }
 
 pub fn getAtom(self: *const Plan9, index: Atom.Index) Atom {
src/link/SpirV.zig
@@ -47,48 +47,65 @@ base: link.File,
 
 object: codegen.Object,
 
-pub fn createEmpty(gpa: Allocator, options: link.Options) !*SpirV {
-    const self = try gpa.create(SpirV);
+pub fn createEmpty(arena: Allocator, options: link.File.OpenOptions) !*SpirV {
+    const gpa = options.comp.gpa;
+    const target = options.comp.root_mod.resolved_target.result;
+
+    const self = try arena.create(SpirV);
     self.* = .{
         .base = .{
             .tag = .spirv,
-            .options = options,
+            .comp = options.comp,
+            .emit = options.emit,
+            .gc_sections = options.gc_sections orelse false,
+            .stack_size = options.stack_size orelse 0,
+            .allow_shlib_undefined = options.allow_shlib_undefined orelse false,
             .file = null,
-            .allocator = gpa,
+            .disable_lld_caching = options.disable_lld_caching,
+            .build_id = options.build_id,
+            .rpath_list = options.rpath_list,
+            .force_undefined_symbols = options.force_undefined_symbols,
+            .function_sections = options.function_sections,
+            .data_sections = options.data_sections,
         },
         .object = codegen.Object.init(gpa),
     };
     errdefer self.deinit();
 
-    // TODO: Figure out where to put all of these
-    switch (options.target.cpu.arch) {
+    switch (target.cpu.arch) {
         .spirv32, .spirv64 => {},
-        else => return error.TODOArchNotSupported,
+        else => unreachable, // Caught by Compilation.Config.resolve.
     }
 
-    switch (options.target.os.tag) {
+    switch (target.os.tag) {
         .opencl, .glsl450, .vulkan => {},
-        else => return error.TODOOsNotSupported,
+        else => unreachable, // Caught by Compilation.Config.resolve.
     }
 
-    if (options.target.abi != .none) {
-        return error.TODOAbiNotSupported;
-    }
+    assert(target.abi != .none); // Caught by Compilation.Config.resolve.
 
     return self;
 }
 
-pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*SpirV {
-    assert(options.target.ofmt == .spirv);
+pub fn open(arena: Allocator, options: link.File.OpenOptions) !*SpirV {
+    if (build_options.only_c) unreachable;
+
+    const target = options.comp.root_mod.resolved_target.result;
+    const use_lld = build_options.have_llvm and options.comp.config.use_lld;
+    const use_llvm = options.comp.config.use_llvm;
 
-    if (options.use_llvm) return error.LLVM_BackendIsTODO_ForSpirV; // TODO: LLVM Doesn't support SpirV at all.
-    if (options.use_lld) return error.LLD_LinkingIsTODO_ForSpirV; // TODO: LLD Doesn't support SpirV at all.
+    assert(!use_llvm); // Caught by Compilation.Config.resolve.
+    assert(!use_lld); // Caught by Compilation.Config.resolve.
+    assert(target.ofmt == .spirv); // Caught by Compilation.Config.resolve.
 
-    const spirv = try createEmpty(allocator, options);
+    const spirv = try createEmpty(arena, options);
     errdefer spirv.base.destroy();
 
     // TODO: read the file and keep valid parts instead of truncating
-    const file = try options.emit.?.directory.handle.createFile(sub_path, .{ .truncate = true, .read = true });
+    const file = try options.emit.?.directory.handle.createFile(options.emit.sub_path, .{
+        .truncate = true,
+        .read = true,
+    });
     spirv.base.file = file;
     return spirv;
 }
@@ -150,11 +167,7 @@ pub fn freeDecl(self: *SpirV, decl_index: InternPool.DeclIndex) void {
 }
 
 pub fn flush(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.Node) link.File.FlushError!void {
-    if (build_options.have_llvm and self.base.options.use_lld) {
-        return error.LLD_LinkingIsTODO_ForSpirV; // TODO: LLD Doesn't support SpirV at all.
-    } else {
-        return self.flushModule(comp, prog_node);
-    }
+    return self.flushModule(comp, prog_node);
 }
 
 pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.Node) link.File.FlushError!void {
src/link/Wasm.zig
@@ -191,6 +191,9 @@ synthetic_functions: std.ArrayListUnmanaged(Atom.Index) = .{},
 /// Map for storing anonymous declarations. Each anonymous decl maps to its Atom's index.
 anon_decls: std.AutoArrayHashMapUnmanaged(InternPool.Index, Atom.Index) = .{},
 
+import_table: bool,
+export_table: bool,
+
 pub const Alignment = types.Alignment;
 
 pub const Segment = struct {
@@ -363,63 +366,71 @@ pub const StringTable = struct {
     }
 };
 
-pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Wasm {
-    assert(options.target.ofmt == .wasm);
+pub fn open(arena: Allocator, options: link.File.OpenOptions) !*Wasm {
+    if (build_options.only_c) unreachable;
+    const gpa = options.comp.gpa;
+    const target = options.comp.root_mod.resolved_target.result;
+    assert(target.ofmt == .wasm);
 
-    if (options.use_llvm and options.use_lld) {
-        return createEmpty(allocator, options);
-    }
+    const use_lld = build_options.have_llvm and options.comp.config.use_lld;
+    const use_llvm = options.comp.config.use_llvm;
+    const output_mode = options.comp.config.output_mode;
 
-    const wasm_bin = try createEmpty(allocator, options);
-    errdefer wasm_bin.base.destroy();
+    const wasm = try createEmpty(arena, options);
+    errdefer wasm.base.destroy();
 
-    // We are not using LLD at this point, so ensure we set the intermediary basename
-    if (build_options.have_llvm and options.use_llvm and options.module != null) {
-        // TODO this intermediary_basename isn't enough; in the case of `zig build-exe`,
-        // we also want to put the intermediary object file in the cache while the
-        // main emit directory is the cwd.
-        wasm_bin.base.intermediary_basename = try std.fmt.allocPrint(allocator, "{s}{s}", .{
-            options.emit.?.sub_path, options.target.ofmt.fileExt(options.target.cpu.arch),
-        });
+    if (use_lld and use_llvm) {
+        // LLVM emits the object file; LLD links it into the final product.
+        return wasm;
     }
 
+    const sub_path = if (!use_lld) options.emit.sub_path else p: {
+        // Open a temporary object file, not the final output file because we
+        // want to link with LLD.
+        const o_file_path = try std.fmt.allocPrint(arena, "{s}{s}", .{
+            options.emit.sub_path, target.ofmt.fileExt(target.cpu.arch),
+        });
+        wasm.base.intermediary_basename = o_file_path;
+        break :p o_file_path;
+    };
+
     // TODO: read the file and keep valid parts instead of truncating
     const file = try options.emit.?.directory.handle.createFile(sub_path, .{
         .truncate = true,
         .read = true,
         .mode = if (fs.has_executable_bit)
-            if (options.target.os.tag == .wasi and options.output_mode == .Exe)
+            if (target.os.tag == .wasi and output_mode == .Exe)
                 fs.File.default_mode | 0b001_000_000
             else
                 fs.File.default_mode
         else
             0,
     });
-    wasm_bin.base.file = file;
-    wasm_bin.name = sub_path;
+    wasm.base.file = file;
+    wasm.name = sub_path;
 
     // create stack pointer symbol
     {
-        const loc = try wasm_bin.createSyntheticSymbol("__stack_pointer", .global);
-        const symbol = loc.getSymbol(wasm_bin);
+        const loc = try wasm.createSyntheticSymbol("__stack_pointer", .global);
+        const symbol = loc.getSymbol(wasm);
         // For object files we will import the stack pointer symbol
-        if (options.output_mode == .Obj) {
+        if (output_mode == .Obj) {
             symbol.setUndefined(true);
-            symbol.index = @as(u32, @intCast(wasm_bin.imported_globals_count));
-            wasm_bin.imported_globals_count += 1;
-            try wasm_bin.imports.putNoClobber(
-                allocator,
+            symbol.index = @intCast(wasm.imported_globals_count);
+            wasm.imported_globals_count += 1;
+            try wasm.imports.putNoClobber(
+                gpa,
                 loc,
                 .{
-                    .module_name = try wasm_bin.string_table.put(allocator, wasm_bin.host_name),
+                    .module_name = try wasm.string_table.put(gpa, wasm.host_name),
                     .name = symbol.name,
                     .kind = .{ .global = .{ .valtype = .i32, .mutable = true } },
                 },
             );
         } else {
-            symbol.index = @intCast(wasm_bin.imported_globals_count + wasm_bin.wasm_globals.items.len);
+            symbol.index = @intCast(wasm.imported_globals_count + wasm.wasm_globals.items.len);
             symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
-            const global = try wasm_bin.wasm_globals.addOne(allocator);
+            const global = try wasm.wasm_globals.addOne(gpa);
             global.* = .{
                 .global_type = .{
                     .valtype = .i32,
@@ -432,25 +443,25 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
 
     // create indirect function pointer symbol
     {
-        const loc = try wasm_bin.createSyntheticSymbol("__indirect_function_table", .table);
-        const symbol = loc.getSymbol(wasm_bin);
+        const loc = try wasm.createSyntheticSymbol("__indirect_function_table", .table);
+        const symbol = loc.getSymbol(wasm);
         const table: std.wasm.Table = .{
             .limits = .{ .flags = 0, .min = 0, .max = undefined }, // will be overwritten during `mapFunctionTable`
             .reftype = .funcref,
         };
-        if (options.output_mode == .Obj or options.import_table) {
+        if (output_mode == .Obj or options.import_table) {
             symbol.setUndefined(true);
-            symbol.index = @intCast(wasm_bin.imported_tables_count);
-            wasm_bin.imported_tables_count += 1;
-            try wasm_bin.imports.put(allocator, loc, .{
-                .module_name = try wasm_bin.string_table.put(allocator, wasm_bin.host_name),
+            symbol.index = @intCast(wasm.imported_tables_count);
+            wasm.imported_tables_count += 1;
+            try wasm.imports.put(gpa, loc, .{
+                .module_name = try wasm.string_table.put(gpa, wasm.host_name),
                 .name = symbol.name,
                 .kind = .{ .table = table },
             });
         } else {
-            symbol.index = @as(u32, @intCast(wasm_bin.imported_tables_count + wasm_bin.tables.items.len));
-            try wasm_bin.tables.append(allocator, table);
-            if (options.export_table) {
+            symbol.index = @as(u32, @intCast(wasm.imported_tables_count + wasm.tables.items.len));
+            try wasm.tables.append(gpa, table);
+            if (wasm.export_table) {
                 symbol.setFlag(.WASM_SYM_EXPORTED);
             } else {
                 symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
@@ -460,8 +471,8 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
 
     // create __wasm_call_ctors
     {
-        const loc = try wasm_bin.createSyntheticSymbol("__wasm_call_ctors", .function);
-        const symbol = loc.getSymbol(wasm_bin);
+        const loc = try wasm.createSyntheticSymbol("__wasm_call_ctors", .function);
+        const symbol = loc.getSymbol(wasm);
         symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
         // we do not know the function index until after we merged all sections.
         // Therefore we set `symbol.index` and create its corresponding references
@@ -469,67 +480,76 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
     }
 
     // shared-memory symbols for TLS support
-    if (wasm_bin.base.options.shared_memory) {
+    if (wasm.base.options.shared_memory) {
         {
-            const loc = try wasm_bin.createSyntheticSymbol("__tls_base", .global);
-            const symbol = loc.getSymbol(wasm_bin);
+            const loc = try wasm.createSyntheticSymbol("__tls_base", .global);
+            const symbol = loc.getSymbol(wasm);
             symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
-            symbol.index = @intCast(wasm_bin.imported_globals_count + wasm_bin.wasm_globals.items.len);
-            try wasm_bin.wasm_globals.append(wasm_bin.base.allocator, .{
+            symbol.index = @intCast(wasm.imported_globals_count + wasm.wasm_globals.items.len);
+            try wasm.wasm_globals.append(gpa, .{
                 .global_type = .{ .valtype = .i32, .mutable = true },
                 .init = .{ .i32_const = undefined },
             });
         }
         {
-            const loc = try wasm_bin.createSyntheticSymbol("__tls_size", .global);
-            const symbol = loc.getSymbol(wasm_bin);
+            const loc = try wasm.createSyntheticSymbol("__tls_size", .global);
+            const symbol = loc.getSymbol(wasm);
             symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
-            symbol.index = @intCast(wasm_bin.imported_globals_count + wasm_bin.wasm_globals.items.len);
-            try wasm_bin.wasm_globals.append(wasm_bin.base.allocator, .{
+            symbol.index = @intCast(wasm.imported_globals_count + wasm.wasm_globals.items.len);
+            try wasm.wasm_globals.append(gpa, .{
                 .global_type = .{ .valtype = .i32, .mutable = false },
                 .init = .{ .i32_const = undefined },
             });
         }
         {
-            const loc = try wasm_bin.createSyntheticSymbol("__tls_align", .global);
-            const symbol = loc.getSymbol(wasm_bin);
+            const loc = try wasm.createSyntheticSymbol("__tls_align", .global);
+            const symbol = loc.getSymbol(wasm);
             symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
-            symbol.index = @intCast(wasm_bin.imported_globals_count + wasm_bin.wasm_globals.items.len);
-            try wasm_bin.wasm_globals.append(wasm_bin.base.allocator, .{
+            symbol.index = @intCast(wasm.imported_globals_count + wasm.wasm_globals.items.len);
+            try wasm.wasm_globals.append(gpa, .{
                 .global_type = .{ .valtype = .i32, .mutable = false },
                 .init = .{ .i32_const = undefined },
             });
         }
         {
-            const loc = try wasm_bin.createSyntheticSymbol("__wasm_init_tls", .function);
-            const symbol = loc.getSymbol(wasm_bin);
+            const loc = try wasm.createSyntheticSymbol("__wasm_init_tls", .function);
+            const symbol = loc.getSymbol(wasm);
             symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
         }
     }
 
-    // if (!options.strip and options.module != null) {
-    //     wasm_bin.dwarf = Dwarf.init(allocator, &wasm_bin.base, .dwarf32);
-    //     try wasm_bin.initDebugSections();
-    // }
-
-    return wasm_bin;
+    return wasm;
 }
 
-pub fn createEmpty(gpa: Allocator, options: link.Options) !*Wasm {
-    const wasm = try gpa.create(Wasm);
-    errdefer gpa.destroy(wasm);
+pub fn createEmpty(arena: Allocator, options: link.File.OpenOptions) !*Wasm {
+    const use_llvm = options.comp.config.use_llvm;
+    const output_mode = options.comp.config.output_mode;
+
+    const wasm = try arena.create(Wasm);
     wasm.* = .{
         .base = .{
             .tag = .wasm,
-            .options = options,
+            .comp = options.comp,
+            .emit = options.emit,
+            .gc_sections = options.gc_sections orelse (output_mode != .Obj),
+            .stack_size = options.stack_size orelse std.wasm.page_size * 16, // 1MB
+            .allow_shlib_undefined = options.allow_shlib_undefined orelse false,
             .file = null,
-            .allocator = gpa,
+            .disable_lld_caching = options.disable_lld_caching,
+            .build_id = options.build_id,
+            .rpath_list = options.rpath_list,
+            .force_undefined_symbols = options.force_undefined_symbols,
+            .debug_format = options.debug_format orelse .{ .dwarf = .@"32" },
+            .function_sections = options.function_sections,
+            .data_sections = options.data_sections,
         },
         .name = undefined,
+        .import_table = options.import_table,
+        .export_table = options.export_table,
     };
 
-    if (options.use_llvm) {
-        wasm.llvm_object = try LlvmObject.create(gpa, options);
+    if (use_llvm) {
+        wasm.llvm_object = try LlvmObject.create(arena, options);
     }
     return wasm;
 }
@@ -537,22 +557,24 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Wasm {
 /// For a given name, creates a new global synthetic symbol.
 /// Leaves index undefined and the default flags (0).
 fn createSyntheticSymbol(wasm: *Wasm, name: []const u8, tag: Symbol.Tag) !SymbolLoc {
-    const name_offset = try wasm.string_table.put(wasm.base.allocator, name);
+    const gpa = wasm.base.comp.gpa;
+    const name_offset = try wasm.string_table.put(gpa, name);
     return wasm.createSyntheticSymbolOffset(name_offset, tag);
 }
 
 fn createSyntheticSymbolOffset(wasm: *Wasm, name_offset: u32, tag: Symbol.Tag) !SymbolLoc {
     const sym_index = @as(u32, @intCast(wasm.symbols.items.len));
     const loc: SymbolLoc = .{ .index = sym_index, .file = null };
-    try wasm.symbols.append(wasm.base.allocator, .{
+    const gpa = wasm.base.comp.gpa;
+    try wasm.symbols.append(gpa, .{
         .name = name_offset,
         .flags = 0,
         .tag = tag,
         .index = undefined,
         .virtual_address = undefined,
     });
-    try wasm.resolved_symbols.putNoClobber(wasm.base.allocator, loc, {});
-    try wasm.globals.put(wasm.base.allocator, name_offset, loc);
+    try wasm.resolved_symbols.putNoClobber(gpa, loc, {});
+    try wasm.globals.put(gpa, name_offset, loc);
     return loc;
 }
 
@@ -589,12 +611,13 @@ fn parseObjectFile(wasm: *Wasm, path: []const u8) !bool {
     const file = try fs.cwd().openFile(path, .{});
     errdefer file.close();
 
-    var object = Object.create(wasm.base.allocator, file, path, null) catch |err| switch (err) {
+    const gpa = wasm.base.comp.gpa;
+    var object = Object.create(gpa, file, path, null) catch |err| switch (err) {
         error.InvalidMagicByte, error.NotObjectFile => return false,
         else => |e| return e,
     };
-    errdefer object.deinit(wasm.base.allocator);
-    try wasm.objects.append(wasm.base.allocator, object);
+    errdefer object.deinit(gpa);
+    try wasm.objects.append(gpa, object);
     return true;
 }
 
@@ -602,7 +625,8 @@ fn parseObjectFile(wasm: *Wasm, path: []const u8) !bool {
 /// When the index was not found, a new `Atom` will be created, and its index will be returned.
 /// The newly created Atom is empty with default fields as specified by `Atom.empty`.
 pub fn getOrCreateAtomForDecl(wasm: *Wasm, decl_index: InternPool.DeclIndex) !Atom.Index {
-    const gop = try wasm.decls.getOrPut(wasm.base.allocator, decl_index);
+    const gpa = wasm.base.comp.gpa;
+    const gop = try wasm.decls.getOrPut(gpa, decl_index);
     if (!gop.found_existing) {
         const atom_index = try wasm.createAtom();
         gop.value_ptr.* = atom_index;
@@ -611,18 +635,19 @@ pub fn getOrCreateAtomForDecl(wasm: *Wasm, decl_index: InternPool.DeclIndex) !At
         const mod = wasm.base.options.module.?;
         const decl = mod.declPtr(decl_index);
         const full_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
-        symbol.name = try wasm.string_table.put(wasm.base.allocator, full_name);
+        symbol.name = try wasm.string_table.put(gpa, full_name);
     }
     return gop.value_ptr.*;
 }
 
 /// Creates a new empty `Atom` and returns its `Atom.Index`
 fn createAtom(wasm: *Wasm) !Atom.Index {
-    const index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len));
-    const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
+    const gpa = wasm.base.comp.gpa;
+    const index: Atom.Index = @intCast(wasm.managed_atoms.items.len);
+    const atom = try wasm.managed_atoms.addOne(gpa);
     atom.* = Atom.empty;
     atom.sym_index = try wasm.allocateSymbol();
-    try wasm.symbol_atom.putNoClobber(wasm.base.allocator, .{ .file = null, .index = atom.sym_index }, index);
+    try wasm.symbol_atom.putNoClobber(gpa, .{ .file = null, .index = atom.sym_index }, index);
 
     return index;
 }
@@ -644,6 +669,8 @@ pub inline fn getAtomPtr(wasm: *Wasm, index: Atom.Index) *Atom {
 /// When false, it will only link with object files that contain symbols that
 /// are referenced by other object files or Zig code.
 fn parseArchive(wasm: *Wasm, path: []const u8, force_load: bool) !bool {
+    const gpa = wasm.base.comp.gpa;
+
     const file = try fs.cwd().openFile(path, .{});
     errdefer file.close();
 
@@ -651,25 +678,25 @@ fn parseArchive(wasm: *Wasm, path: []const u8, force_load: bool) !bool {
         .file = file,
         .name = path,
     };
-    archive.parse(wasm.base.allocator) catch |err| switch (err) {
+    archive.parse(gpa) catch |err| switch (err) {
         error.EndOfStream, error.NotArchive => {
-            archive.deinit(wasm.base.allocator);
+            archive.deinit(gpa);
             return false;
         },
         else => |e| return e,
     };
 
     if (!force_load) {
-        errdefer archive.deinit(wasm.base.allocator);
-        try wasm.archives.append(wasm.base.allocator, archive);
+        errdefer archive.deinit(gpa);
+        try wasm.archives.append(gpa, archive);
         return true;
     }
-    defer archive.deinit(wasm.base.allocator);
+    defer archive.deinit(gpa);
 
     // In this case we must force link all embedded object files within the archive
     // We loop over all symbols, and then group them by offset as the offset
     // notates where the object file starts.
-    var offsets = std.AutoArrayHashMap(u32, void).init(wasm.base.allocator);
+    var offsets = std.AutoArrayHashMap(u32, void).init(gpa);
     defer offsets.deinit();
     for (archive.toc.values()) |symbol_offsets| {
         for (symbol_offsets.items) |sym_offset| {
@@ -678,8 +705,8 @@ fn parseArchive(wasm: *Wasm, path: []const u8, force_load: bool) !bool {
     }
 
     for (offsets.keys()) |file_offset| {
-        const object = try wasm.objects.addOne(wasm.base.allocator);
-        object.* = try archive.parseObject(wasm.base.allocator, file_offset);
+        const object = try wasm.objects.addOne(gpa);
+        object.* = try archive.parseObject(gpa, file_offset);
     }
 
     return true;
@@ -695,6 +722,7 @@ fn requiresTLSReloc(wasm: *const Wasm) bool {
 }
 
 fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void {
+    const gpa = wasm.base.comp.gpa;
     const object: Object = wasm.objects.items[object_index];
     log.debug("Resolving symbols in object: '{s}'", .{object.name});
 
@@ -708,7 +736,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void {
         if (mem.eql(u8, sym_name, "__indirect_function_table")) {
             continue;
         }
-        const sym_name_index = try wasm.string_table.put(wasm.base.allocator, sym_name);
+        const sym_name_index = try wasm.string_table.put(gpa, sym_name);
 
         if (symbol.isLocal()) {
             if (symbol.isUndefined()) {
@@ -716,17 +744,17 @@ fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void {
                 log.err("  symbol '{s}' defined in '{s}'", .{ sym_name, object.name });
                 return error.UndefinedLocal;
             }
-            try wasm.resolved_symbols.putNoClobber(wasm.base.allocator, location, {});
+            try wasm.resolved_symbols.putNoClobber(gpa, location, {});
             continue;
         }
 
-        const maybe_existing = try wasm.globals.getOrPut(wasm.base.allocator, sym_name_index);
+        const maybe_existing = try wasm.globals.getOrPut(gpa, sym_name_index);
         if (!maybe_existing.found_existing) {
             maybe_existing.value_ptr.* = location;
-            try wasm.resolved_symbols.putNoClobber(wasm.base.allocator, location, {});
+            try wasm.resolved_symbols.putNoClobber(gpa, location, {});
 
             if (symbol.isUndefined()) {
-                try wasm.undefs.putNoClobber(wasm.base.allocator, sym_name_index, location);
+                try wasm.undefs.putNoClobber(gpa, sym_name_index, location);
             }
             continue;
         }
@@ -753,7 +781,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void {
                 return error.SymbolCollision;
             }
 
-            try wasm.discarded.put(wasm.base.allocator, location, existing_loc);
+            try wasm.discarded.put(gpa, location, existing_loc);
             continue; // Do not overwrite defined symbols with undefined symbols
         }
 
@@ -791,7 +819,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void {
             }
 
             // both undefined so skip overwriting existing symbol and discard the new symbol
-            try wasm.discarded.put(wasm.base.allocator, location, existing_loc);
+            try wasm.discarded.put(gpa, location, existing_loc);
             continue;
         }
 
@@ -822,7 +850,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void {
         // symbol is weak and the new one isn't, in which case we *do* overwrite it.
         if (existing_sym.isWeak() and symbol.isWeak()) blk: {
             if (existing_sym.isUndefined() and !symbol.isUndefined()) break :blk;
-            try wasm.discarded.put(wasm.base.allocator, location, existing_loc);
+            try wasm.discarded.put(gpa, location, existing_loc);
             continue;
         }
 
@@ -830,10 +858,10 @@ fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void {
         log.debug("Overwriting symbol '{s}'", .{sym_name});
         log.debug("  old definition in '{s}'", .{existing_file_path});
         log.debug("  new definition in '{s}'", .{object.name});
-        try wasm.discarded.putNoClobber(wasm.base.allocator, existing_loc, location);
+        try wasm.discarded.putNoClobber(gpa, existing_loc, location);
         maybe_existing.value_ptr.* = location;
-        try wasm.globals.put(wasm.base.allocator, sym_name_index, location);
-        try wasm.resolved_symbols.put(wasm.base.allocator, location, {});
+        try wasm.globals.put(gpa, sym_name_index, location);
+        try wasm.resolved_symbols.put(gpa, location, {});
         assert(wasm.resolved_symbols.swapRemove(existing_loc));
         if (existing_sym.isUndefined()) {
             _ = wasm.undefs.swapRemove(sym_name_index);
@@ -842,6 +870,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void {
 }
 
 fn resolveSymbolsInArchives(wasm: *Wasm) !void {
+    const gpa = wasm.base.comp.gpa;
     if (wasm.archives.items.len == 0) return;
 
     log.debug("Resolving symbols in archives", .{});
@@ -860,9 +889,9 @@ fn resolveSymbolsInArchives(wasm: *Wasm) !void {
             // Symbol is found in unparsed object file within current archive.
             // Parse object and and resolve symbols again before we check remaining
             // undefined symbols.
-            const object_file_index = @as(u16, @intCast(wasm.objects.items.len));
-            const object = try archive.parseObject(wasm.base.allocator, offset.items[0]);
-            try wasm.objects.append(wasm.base.allocator, object);
+            const object_file_index: u16 = @intCast(wasm.objects.items.len);
+            const object = try archive.parseObject(gpa, offset.items[0]);
+            try wasm.objects.append(gpa, object);
             try wasm.resolveSymbolsInObject(object_file_index);
 
             // continue loop for any remaining undefined symbols that still exist
@@ -880,6 +909,8 @@ fn writeI32Const(writer: anytype, val: u32) !void {
 }
 
 fn setupInitMemoryFunction(wasm: *Wasm) !void {
+    const gpa = wasm.base.comp.gpa;
+
     // Passive segments are used to avoid memory being reinitialized on each
     // thread's instantiation. These passive segments are initialized and
     // dropped in __wasm_init_memory, which is registered as the start function
@@ -896,7 +927,7 @@ fn setupInitMemoryFunction(wasm: *Wasm) !void {
         break :address loc.getSymbol(wasm).virtual_address;
     } else 0;
 
-    var function_body = std.ArrayList(u8).init(wasm.base.allocator);
+    var function_body = std.ArrayList(u8).init(gpa);
     defer function_body.deinit();
     const writer = function_body.writer();
 
@@ -1040,6 +1071,8 @@ fn setupInitMemoryFunction(wasm: *Wasm) !void {
 /// Constructs a synthetic function that performs runtime relocations for
 /// TLS symbols. This function is called by `__wasm_init_tls`.
 fn setupTLSRelocationsFunction(wasm: *Wasm) !void {
+    const gpa = wasm.base.comp.gpa;
+
     // When we have TLS GOT entries and shared memory is enabled,
     // we must perform runtime relocations or else we don't create the function.
     if (!wasm.base.options.shared_memory or !wasm.requiresTLSReloc()) {
@@ -1047,7 +1080,7 @@ fn setupTLSRelocationsFunction(wasm: *Wasm) !void {
     }
 
     // const loc = try wasm.createSyntheticSymbol("__wasm_apply_global_tls_relocs");
-    var function_body = std.ArrayList(u8).init(wasm.base.allocator);
+    var function_body = std.ArrayList(u8).init(gpa);
     defer function_body.deinit();
     const writer = function_body.writer();
 
@@ -1221,10 +1254,12 @@ fn validateFeatures(
 /// if one or multiple undefined references exist. When none exist, the symbol will
 /// not be created, ensuring we don't unneccesarily emit unreferenced symbols.
 fn resolveLazySymbols(wasm: *Wasm) !void {
+    const gpa = wasm.base.comp.gpa;
+
     if (wasm.string_table.getOffset("__heap_base")) |name_offset| {
         if (wasm.undefs.fetchSwapRemove(name_offset)) |kv| {
             const loc = try wasm.createSyntheticSymbolOffset(name_offset, .data);
-            try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
+            try wasm.discarded.putNoClobber(gpa, kv.value, loc);
             _ = wasm.resolved_symbols.swapRemove(loc); // we don't want to emit this symbol, only use it for relocations.
         }
     }
@@ -1232,7 +1267,7 @@ fn resolveLazySymbols(wasm: *Wasm) !void {
     if (wasm.string_table.getOffset("__heap_end")) |name_offset| {
         if (wasm.undefs.fetchSwapRemove(name_offset)) |kv| {
             const loc = try wasm.createSyntheticSymbolOffset(name_offset, .data);
-            try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
+            try wasm.discarded.putNoClobber(gpa, kv.value, loc);
             _ = wasm.resolved_symbols.swapRemove(loc);
         }
     }
@@ -1241,12 +1276,12 @@ fn resolveLazySymbols(wasm: *Wasm) !void {
         if (wasm.string_table.getOffset("__tls_base")) |name_offset| {
             if (wasm.undefs.fetchSwapRemove(name_offset)) |kv| {
                 const loc = try wasm.createSyntheticSymbolOffset(name_offset, .global);
-                try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
+                try wasm.discarded.putNoClobber(gpa, kv.value, loc);
                 _ = wasm.resolved_symbols.swapRemove(kv.value);
                 const symbol = loc.getSymbol(wasm);
                 symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
                 symbol.index = @intCast(wasm.imported_globals_count + wasm.wasm_globals.items.len);
-                try wasm.wasm_globals.append(wasm.base.allocator, .{
+                try wasm.wasm_globals.append(gpa, .{
                     .global_type = .{ .valtype = .i32, .mutable = true },
                     .init = .{ .i32_const = undefined },
                 });
@@ -1256,7 +1291,7 @@ fn resolveLazySymbols(wasm: *Wasm) !void {
     if (wasm.string_table.getOffset("__zig_errors_len")) |name_offset| {
         if (wasm.undefs.fetchSwapRemove(name_offset)) |kv| {
             const loc = try wasm.createSyntheticSymbolOffset(name_offset, .data);
-            try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
+            try wasm.discarded.putNoClobber(gpa, kv.value, loc);
             _ = wasm.resolved_symbols.swapRemove(kv.value);
         }
     }
@@ -1292,8 +1327,8 @@ fn checkUndefinedSymbols(wasm: *const Wasm) !void {
 }
 
 pub fn deinit(wasm: *Wasm) void {
-    const gpa = wasm.base.allocator;
-    if (wasm.llvm_object) |llvm_object| llvm_object.destroy(gpa);
+    const gpa = wasm.base.comp.gpa;
+    if (wasm.llvm_object) |llvm_object| llvm_object.deinit();
 
     for (wasm.func_types.items) |*func_type| {
         func_type.deinit(gpa);
@@ -1378,7 +1413,9 @@ pub fn deinit(wasm: *Wasm) void {
 /// Allocates a new symbol and returns its index.
 /// Will re-use slots when a symbol was freed at an earlier stage.
 pub fn allocateSymbol(wasm: *Wasm) !u32 {
-    try wasm.symbols.ensureUnusedCapacity(wasm.base.allocator, 1);
+    const gpa = wasm.base.comp.gpa;
+
+    try wasm.symbols.ensureUnusedCapacity(gpa, 1);
     const symbol: Symbol = .{
         .name = std.math.maxInt(u32), // will be set after updateDecl as well as during atom creation for decls
         .flags = @intFromEnum(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
@@ -1404,6 +1441,7 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func_index: InternPool.Index, air:
     const tracy = trace(@src());
     defer tracy.end();
 
+    const gpa = wasm.base.comp.gpa;
     const func = mod.funcInfo(func_index);
     const decl_index = func.owner_decl;
     const decl = mod.declPtr(decl_index);
@@ -1414,7 +1452,7 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func_index: InternPool.Index, air:
     // var decl_state: ?Dwarf.DeclState = if (wasm.dwarf) |*dwarf| try dwarf.initDeclState(mod, decl_index) else null;
     // defer if (decl_state) |*ds| ds.deinit();
 
-    var code_writer = std.ArrayList(u8).init(wasm.base.allocator);
+    var code_writer = std.ArrayList(u8).init(gpa);
     defer code_writer.deinit();
     // const result = try codegen.generateFunction(
     //     &wasm.base,
@@ -1477,6 +1515,7 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: InternPool.DeclIndex) !
         return;
     }
 
+    const gpa = wasm.base.comp.gpa;
     const atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
     const atom = wasm.getAtomPtr(atom_index);
     atom.clear();
@@ -1489,7 +1528,7 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: InternPool.DeclIndex) !
     }
     const val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val;
 
-    var code_writer = std.ArrayList(u8).init(wasm.base.allocator);
+    var code_writer = std.ArrayList(u8).init(gpa);
     defer code_writer.deinit();
 
     const res = try codegen.generateSymbol(
@@ -1528,16 +1567,17 @@ pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Module, decl_index: InternPool.De
 }
 
 fn finishUpdateDecl(wasm: *Wasm, decl_index: InternPool.DeclIndex, code: []const u8, symbol_tag: Symbol.Tag) !void {
+    const gpa = wasm.base.comp.gpa;
     const mod = wasm.base.options.module.?;
     const decl = mod.declPtr(decl_index);
     const atom_index = wasm.decls.get(decl_index).?;
     const atom = wasm.getAtomPtr(atom_index);
     const symbol = &wasm.symbols.items[atom.sym_index];
     const full_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
-    symbol.name = try wasm.string_table.put(wasm.base.allocator, full_name);
+    symbol.name = try wasm.string_table.put(gpa, full_name);
     symbol.tag = symbol_tag;
-    try atom.code.appendSlice(wasm.base.allocator, code);
-    try wasm.resolved_symbols.put(wasm.base.allocator, atom.symbolLoc(), {});
+    try atom.code.appendSlice(gpa, code);
+    try wasm.resolved_symbols.put(gpa, atom.symbolLoc(), {});
 
     atom.size = @intCast(code.len);
     if (code.len == 0) return;
@@ -1591,6 +1631,7 @@ fn getFunctionSignature(wasm: *const Wasm, loc: SymbolLoc) std.wasm.Type {
 /// Returns the symbol index of the local
 /// The given `decl` is the parent decl whom owns the constant.
 pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: InternPool.DeclIndex) !u32 {
+    const gpa = wasm.base.comp.gpa;
     const mod = wasm.base.options.module.?;
     assert(tv.ty.zigTypeTag(mod) != .Fn); // cannot create local symbols for functions
     const decl = mod.declPtr(decl_index);
@@ -1599,14 +1640,14 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: InternPool.Dec
     const parent_atom = wasm.getAtom(parent_atom_index);
     const local_index = parent_atom.locals.items.len;
     const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
-    const name = try std.fmt.allocPrintZ(wasm.base.allocator, "__unnamed_{s}_{d}", .{
+    const name = try std.fmt.allocPrintZ(gpa, "__unnamed_{s}_{d}", .{
         fqn, local_index,
     });
-    defer wasm.base.allocator.free(name);
+    defer gpa.free(name);
 
     switch (try wasm.lowerConst(name, tv, decl.srcLoc(mod))) {
         .ok => |atom_index| {
-            try wasm.getAtomPtr(parent_atom_index).locals.append(wasm.base.allocator, atom_index);
+            try wasm.getAtomPtr(parent_atom_index).locals.append(gpa, atom_index);
             return wasm.getAtom(atom_index).getSymbolIndex().?;
         },
         .fail => |em| {
@@ -1623,24 +1664,25 @@ const LowerConstResult = union(enum) {
 };
 
 fn lowerConst(wasm: *Wasm, name: []const u8, tv: TypedValue, src_loc: Module.SrcLoc) !LowerConstResult {
+    const gpa = wasm.base.comp.gpa;
     const mod = wasm.base.options.module.?;
 
     // Create and initialize a new local symbol and atom
     const atom_index = try wasm.createAtom();
-    var value_bytes = std.ArrayList(u8).init(wasm.base.allocator);
+    var value_bytes = std.ArrayList(u8).init(gpa);
     defer value_bytes.deinit();
 
     const code = code: {
         const atom = wasm.getAtomPtr(atom_index);
         atom.alignment = tv.ty.abiAlignment(mod);
         wasm.symbols.items[atom.sym_index] = .{
-            .name = try wasm.string_table.put(wasm.base.allocator, name),
+            .name = try wasm.string_table.put(gpa, name),
             .flags = @intFromEnum(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
             .tag = .data,
             .index = undefined,
             .virtual_address = undefined,
         };
-        try wasm.resolved_symbols.putNoClobber(wasm.base.allocator, atom.symbolLoc(), {});
+        try wasm.resolved_symbols.putNoClobber(gpa, atom.symbolLoc(), {});
 
         const result = try codegen.generateSymbol(
             &wasm.base,
@@ -1663,7 +1705,7 @@ fn lowerConst(wasm: *Wasm, name: []const u8, tv: TypedValue, src_loc: Module.Src
 
     const atom = wasm.getAtomPtr(atom_index);
     atom.size = @intCast(code.len);
-    try atom.code.appendSlice(wasm.base.allocator, code);
+    try atom.code.appendSlice(gpa, code);
     return .{ .ok = atom_index };
 }
 
@@ -1673,8 +1715,9 @@ fn lowerConst(wasm: *Wasm, name: []const u8, tv: TypedValue, src_loc: Module.Src
 /// and then returns the index to it.
 pub fn getGlobalSymbol(wasm: *Wasm, name: []const u8, lib_name: ?[]const u8) !u32 {
     _ = lib_name;
-    const name_index = try wasm.string_table.put(wasm.base.allocator, name);
-    const gop = try wasm.globals.getOrPut(wasm.base.allocator, name_index);
+    const gpa = wasm.base.comp.gpa;
+    const name_index = try wasm.string_table.put(gpa, name);
+    const gop = try wasm.globals.getOrPut(gpa, name_index);
     if (gop.found_existing) {
         return gop.value_ptr.*.index;
     }
@@ -1691,14 +1734,14 @@ pub fn getGlobalSymbol(wasm: *Wasm, name: []const u8, lib_name: ?[]const u8) !u3
 
     const sym_index = if (wasm.symbols_free_list.popOrNull()) |index| index else blk: {
         const index: u32 = @intCast(wasm.symbols.items.len);
-        try wasm.symbols.ensureUnusedCapacity(wasm.base.allocator, 1);
+        try wasm.symbols.ensureUnusedCapacity(gpa, 1);
         wasm.symbols.items.len += 1;
         break :blk index;
     };
     wasm.symbols.items[sym_index] = symbol;
     gop.value_ptr.* = .{ .index = sym_index, .file = null };
-    try wasm.resolved_symbols.put(wasm.base.allocator, gop.value_ptr.*, {});
-    try wasm.undefs.putNoClobber(wasm.base.allocator, name_index, gop.value_ptr.*);
+    try wasm.resolved_symbols.put(gpa, gop.value_ptr.*, {});
+    try wasm.undefs.putNoClobber(gpa, name_index, gop.value_ptr.*);
     return sym_index;
 }
 
@@ -1709,6 +1752,7 @@ pub fn getDeclVAddr(
     decl_index: InternPool.DeclIndex,
     reloc_info: link.File.RelocInfo,
 ) !u64 {
+    const gpa = wasm.base.comp.gpa;
     const mod = wasm.base.options.module.?;
     const decl = mod.declPtr(decl_index);
 
@@ -1725,17 +1769,17 @@ pub fn getDeclVAddr(
         // as function pointers are not allowed to be stored inside the data section.
         // They are instead stored in a function table which are called by index.
         try wasm.addTableFunction(target_symbol_index);
-        try atom.relocs.append(wasm.base.allocator, .{
+        try atom.relocs.append(gpa, .{
             .index = target_symbol_index,
-            .offset = @as(u32, @intCast(reloc_info.offset)),
+            .offset = @intCast(reloc_info.offset),
             .relocation_type = if (is_wasm32) .R_WASM_TABLE_INDEX_I32 else .R_WASM_TABLE_INDEX_I64,
         });
     } else {
-        try atom.relocs.append(wasm.base.allocator, .{
+        try atom.relocs.append(gpa, .{
             .index = target_symbol_index,
-            .offset = @as(u32, @intCast(reloc_info.offset)),
+            .offset = @intCast(reloc_info.offset),
             .relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_I32 else .R_WASM_MEMORY_ADDR_I64,
-            .addend = @as(i32, @intCast(reloc_info.addend)),
+            .addend = @intCast(reloc_info.addend),
         });
     }
     // we do not know the final address at this point,
@@ -1751,7 +1795,8 @@ pub fn lowerAnonDecl(
     explicit_alignment: Alignment,
     src_loc: Module.SrcLoc,
 ) !codegen.Result {
-    const gop = try wasm.anon_decls.getOrPut(wasm.base.allocator, decl_val);
+    const gpa = wasm.base.comp.gpa;
+    const gop = try wasm.anon_decls.getOrPut(gpa, decl_val);
     if (!gop.found_existing) {
         const mod = wasm.base.options.module.?;
         const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val));
@@ -1779,6 +1824,7 @@ pub fn lowerAnonDecl(
 }
 
 pub fn getAnonDeclVAddr(wasm: *Wasm, decl_val: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 {
+    const gpa = wasm.base.comp.gpa;
     const atom_index = wasm.anon_decls.get(decl_val).?;
     const target_symbol_index = wasm.getAtom(atom_index).getSymbolIndex().?;
 
@@ -1793,17 +1839,17 @@ pub fn getAnonDeclVAddr(wasm: *Wasm, decl_val: InternPool.Index, reloc_info: lin
         // as function pointers are not allowed to be stored inside the data section.
         // They are instead stored in a function table which are called by index.
         try wasm.addTableFunction(target_symbol_index);
-        try parent_atom.relocs.append(wasm.base.allocator, .{
+        try parent_atom.relocs.append(gpa, .{
             .index = target_symbol_index,
-            .offset = @as(u32, @intCast(reloc_info.offset)),
+            .offset = @intCast(reloc_info.offset),
             .relocation_type = if (is_wasm32) .R_WASM_TABLE_INDEX_I32 else .R_WASM_TABLE_INDEX_I64,
         });
     } else {
-        try parent_atom.relocs.append(wasm.base.allocator, .{
+        try parent_atom.relocs.append(gpa, .{
             .index = target_symbol_index,
-            .offset = @as(u32, @intCast(reloc_info.offset)),
+            .offset = @intCast(reloc_info.offset),
             .relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_I32 else .R_WASM_MEMORY_ADDR_I64,
-            .addend = @as(i32, @intCast(reloc_info.addend)),
+            .addend = @intCast(reloc_info.addend),
         });
     }
 
@@ -1840,8 +1886,6 @@ pub fn updateExports(
     }
     if (wasm.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, exports);
 
-    if (wasm.base.options.emit == null) return;
-
     const decl_index = switch (exported) {
         .decl_index => |i| i,
         .value => |val| {
@@ -1880,7 +1924,7 @@ pub fn updateExports(
         };
         const exported_atom_index = try wasm.getOrCreateAtomForDecl(exported_decl_index);
         const exported_atom = wasm.getAtom(exported_atom_index);
-        const export_name = try wasm.string_table.put(wasm.base.allocator, mod.intern_pool.stringToSlice(exp.opts.name));
+        const export_name = try wasm.string_table.put(gpa, mod.intern_pool.stringToSlice(exp.opts.name));
         const sym_loc = exported_atom.symbolLoc();
         const symbol = sym_loc.getSymbol(wasm);
         symbol.setGlobal(true);
@@ -1915,7 +1959,7 @@ pub fn updateExports(
 
             if (!existing_sym.isUndefined()) blk: {
                 if (symbol.isWeak()) {
-                    try wasm.discarded.put(wasm.base.allocator, existing_loc, sym_loc);
+                    try wasm.discarded.put(gpa, existing_loc, sym_loc);
                     continue; // to-be-exported symbol is weak, so we keep the existing symbol
                 }
 
@@ -1939,18 +1983,18 @@ pub fn updateExports(
             }
 
             // in this case the existing symbol must be replaced either because it's weak or undefined.
-            try wasm.discarded.put(wasm.base.allocator, existing_loc, sym_loc);
+            try wasm.discarded.put(gpa, existing_loc, sym_loc);
             _ = wasm.imports.remove(existing_loc);
             _ = wasm.undefs.swapRemove(existing_sym.name);
         }
 
         // Ensure the symbol will be exported using the given name
         if (!mod.intern_pool.stringEqlSlice(exp.opts.name, sym_loc.getName(wasm))) {
-            try wasm.export_names.put(wasm.base.allocator, sym_loc, export_name);
+            try wasm.export_names.put(gpa, sym_loc, export_name);
         }
 
         try wasm.globals.put(
-            wasm.base.allocator,
+            gpa,
             export_name,
             sym_loc,
         );
@@ -1959,18 +2003,19 @@ pub fn updateExports(
 
 pub fn freeDecl(wasm: *Wasm, decl_index: InternPool.DeclIndex) void {
     if (wasm.llvm_object) |llvm_object| return llvm_object.freeDecl(decl_index);
+    const gpa = wasm.base.comp.gpa;
     const mod = wasm.base.options.module.?;
     const decl = mod.declPtr(decl_index);
     const atom_index = wasm.decls.get(decl_index).?;
     const atom = wasm.getAtomPtr(atom_index);
-    wasm.symbols_free_list.append(wasm.base.allocator, atom.sym_index) catch {};
+    wasm.symbols_free_list.append(gpa, atom.sym_index) catch {};
     _ = wasm.decls.remove(decl_index);
     wasm.symbols.items[atom.sym_index].tag = .dead;
     for (atom.locals.items) |local_atom_index| {
         const local_atom = wasm.getAtom(local_atom_index);
         const local_symbol = &wasm.symbols.items[local_atom.sym_index];
         local_symbol.tag = .dead; // also for any local symbol
-        wasm.symbols_free_list.append(wasm.base.allocator, local_atom.sym_index) catch {};
+        wasm.symbols_free_list.append(gpa, local_atom.sym_index) catch {};
         assert(wasm.resolved_symbols.swapRemove(local_atom.symbolLoc()));
         assert(wasm.symbol_atom.remove(local_atom.symbolLoc()));
     }
@@ -1999,8 +2044,9 @@ pub fn freeDecl(wasm: *Wasm, decl_index: InternPool.DeclIndex) void {
 
 /// Appends a new entry to the indirect function table
 pub fn addTableFunction(wasm: *Wasm, symbol_index: u32) !void {
-    const index = @as(u32, @intCast(wasm.function_table.count()));
-    try wasm.function_table.put(wasm.base.allocator, .{ .file = null, .index = symbol_index }, index);
+    const gpa = wasm.base.comp.gpa;
+    const index: u32 = @intCast(wasm.function_table.count());
+    try wasm.function_table.put(gpa, .{ .file = null, .index = symbol_index }, index);
 }
 
 /// Assigns indexes to all indirect functions.
@@ -2019,7 +2065,7 @@ fn mapFunctionTable(wasm: *Wasm) void {
         }
     }
 
-    if (wasm.base.options.import_table or wasm.base.options.output_mode == .Obj) {
+    if (wasm.import_table or wasm.base.options.output_mode == .Obj) {
         const sym_loc = wasm.findGlobalSymbol("__indirect_function_table").?;
         const import = wasm.imports.getPtr(sym_loc).?;
         import.kind.table.limits.min = index - 1; // we start at index 1.
@@ -2048,6 +2094,7 @@ pub fn addOrUpdateImport(
     /// is asserted instead.
     type_index: ?u32,
 ) !void {
+    const gpa = wasm.base.comp.gpa;
     assert(symbol_index != 0);
     // For the import name, we use the decl's name, rather than the fully qualified name
     // Also mangle the name when the lib name is set and not equal to "C" so imports with the same
@@ -2055,11 +2102,11 @@ pub fn addOrUpdateImport(
     const mangle_name = lib_name != null and
         !std.mem.eql(u8, lib_name.?, "c");
     const full_name = if (mangle_name) full_name: {
-        break :full_name try std.fmt.allocPrint(wasm.base.allocator, "{s}|{s}", .{ name, lib_name.? });
+        break :full_name try std.fmt.allocPrint(gpa, "{s}|{s}", .{ name, lib_name.? });
     } else name;
-    defer if (mangle_name) wasm.base.allocator.free(full_name);
+    defer if (mangle_name) gpa.free(full_name);
 
-    const decl_name_index = try wasm.string_table.put(wasm.base.allocator, full_name);
+    const decl_name_index = try wasm.string_table.put(gpa, full_name);
     const symbol: *Symbol = &wasm.symbols.items[symbol_index];
     symbol.setUndefined(true);
     symbol.setGlobal(true);
@@ -2068,12 +2115,12 @@ pub fn addOrUpdateImport(
         // we specified a specific name for the symbol that does not match the import name
         symbol.setFlag(.WASM_SYM_EXPLICIT_NAME);
     }
-    const global_gop = try wasm.globals.getOrPut(wasm.base.allocator, decl_name_index);
+    const global_gop = try wasm.globals.getOrPut(gpa, decl_name_index);
     if (!global_gop.found_existing) {
         const loc: SymbolLoc = .{ .file = null, .index = symbol_index };
         global_gop.value_ptr.* = loc;
-        try wasm.resolved_symbols.put(wasm.base.allocator, loc, {});
-        try wasm.undefs.putNoClobber(wasm.base.allocator, decl_name_index, loc);
+        try wasm.resolved_symbols.put(gpa, loc, {});
+        try wasm.undefs.putNoClobber(gpa, decl_name_index, loc);
     } else if (global_gop.value_ptr.*.index != symbol_index) {
         // We are not updating a symbol, but found an existing global
         // symbol with the same name. This means we always favor the
@@ -2081,21 +2128,21 @@ pub fn addOrUpdateImport(
         // We can also skip storing the import as we will not output
         // this symbol.
         return wasm.discarded.put(
-            wasm.base.allocator,
+            gpa,
             .{ .file = null, .index = symbol_index },
             global_gop.value_ptr.*,
         );
     }
 
     if (type_index) |ty_index| {
-        const gop = try wasm.imports.getOrPut(wasm.base.allocator, .{ .index = symbol_index, .file = null });
+        const gop = try wasm.imports.getOrPut(gpa, .{ .index = symbol_index, .file = null });
         const module_name = if (lib_name) |l_name| blk: {
             break :blk l_name;
         } else wasm.host_name;
         if (!gop.found_existing) {
             gop.value_ptr.* = .{
-                .module_name = try wasm.string_table.put(wasm.base.allocator, module_name),
-                .name = try wasm.string_table.put(wasm.base.allocator, name),
+                .module_name = try wasm.string_table.put(gpa, module_name),
+                .name = try wasm.string_table.put(gpa, name),
                 .kind = .{ .function = ty_index },
             };
         }
@@ -2132,10 +2179,10 @@ const Kind = union(enum) {
 
 /// Parses an Atom and inserts its metadata into the corresponding sections.
 fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
+    const gpa = wasm.base.comp.gpa;
     const atom = wasm.getAtomPtr(atom_index);
     const symbol = (SymbolLoc{ .file = null, .index = atom.sym_index }).getSymbol(wasm);
-    const do_garbage_collect = wasm.base.options.gc_sections orelse
-        (wasm.base.options.output_mode != .Obj);
+    const do_garbage_collect = wasm.base.gc_sections;
 
     if (symbol.isDead() and do_garbage_collect) {
         // Prevent unreferenced symbols from being parsed.
@@ -2147,7 +2194,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
             const index: u32 = @intCast(wasm.functions.count() + wasm.imported_functions_count);
             const type_index = wasm.atom_types.get(atom_index).?;
             try wasm.functions.putNoClobber(
-                wasm.base.allocator,
+                gpa,
                 .{ .file = null, .index = index },
                 .{ .func = .{ .type_index = type_index }, .sym_index = atom.sym_index },
             );
@@ -2156,7 +2203,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
 
             if (wasm.code_section_index == null) {
                 wasm.code_section_index = @intCast(wasm.segments.items.len);
-                try wasm.segments.append(wasm.base.allocator, .{
+                try wasm.segments.append(gpa, .{
                     .alignment = atom.alignment,
                     .size = atom.size,
                     .offset = 0,
@@ -2167,11 +2214,11 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
             break :result wasm.code_section_index.?;
         },
         .data => result: {
-            const segment_name = try std.mem.concat(wasm.base.allocator, u8, &.{
+            const segment_name = try std.mem.concat(gpa, u8, &.{
                 kind.segmentName(),
                 wasm.string_table.get(symbol.name),
             });
-            errdefer wasm.base.allocator.free(segment_name);
+            errdefer gpa.free(segment_name);
             const segment_info: types.Segment = .{
                 .name = segment_name,
                 .alignment = atom.alignment,
@@ -2188,14 +2235,14 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
             }
 
             const should_merge = wasm.base.options.output_mode != .Obj;
-            const gop = try wasm.data_segments.getOrPut(wasm.base.allocator, segment_info.outputName(should_merge));
+            const gop = try wasm.data_segments.getOrPut(gpa, segment_info.outputName(should_merge));
             if (gop.found_existing) {
                 const index = gop.value_ptr.*;
                 wasm.segments.items[index].size += atom.size;
 
                 symbol.index = @intCast(wasm.segment_info.getIndex(index).?);
                 // segment info already exists, so free its memory
-                wasm.base.allocator.free(segment_name);
+                gpa.free(segment_name);
                 break :result index;
             } else {
                 const index: u32 = @intCast(wasm.segments.items.len);
@@ -2203,7 +2250,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
                 if (wasm.base.options.shared_memory) {
                     flags |= @intFromEnum(Segment.Flag.WASM_DATA_SEGMENT_IS_PASSIVE);
                 }
-                try wasm.segments.append(wasm.base.allocator, .{
+                try wasm.segments.append(gpa, .{
                     .alignment = atom.alignment,
                     .size = 0,
                     .offset = 0,
@@ -2212,7 +2259,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
                 gop.value_ptr.* = index;
 
                 const info_index: u32 = @intCast(wasm.segment_info.count());
-                try wasm.segment_info.put(wasm.base.allocator, index, segment_info);
+                try wasm.segment_info.put(gpa, index, segment_info);
                 symbol.index = info_index;
                 break :result index;
             }
@@ -2228,6 +2275,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
 /// From a given index, append the given `Atom` at the back of the linked list.
 /// Simply inserts it into the map of atoms when it doesn't exist yet.
 pub fn appendAtomAtIndex(wasm: *Wasm, index: u32, atom_index: Atom.Index) !void {
+    const gpa = wasm.base.comp.gpa;
     const atom = wasm.getAtomPtr(atom_index);
     if (wasm.atoms.getPtr(index)) |last_index_ptr| {
         const last = wasm.getAtomPtr(last_index_ptr.*);
@@ -2235,7 +2283,7 @@ pub fn appendAtomAtIndex(wasm: *Wasm, index: u32, atom_index: Atom.Index) !void
         atom.prev = last_index_ptr.*;
         last_index_ptr.* = atom_index;
     } else {
-        try wasm.atoms.putNoClobber(wasm.base.allocator, index, atom_index);
+        try wasm.atoms.putNoClobber(gpa, index, atom_index);
     }
 }
 
@@ -2363,12 +2411,13 @@ fn allocateVirtualAddresses(wasm: *Wasm) void {
 }
 
 fn sortDataSegments(wasm: *Wasm) !void {
+    const gpa = wasm.base.comp.gpa;
     var new_mapping: std.StringArrayHashMapUnmanaged(u32) = .{};
-    try new_mapping.ensureUnusedCapacity(wasm.base.allocator, wasm.data_segments.count());
-    errdefer new_mapping.deinit(wasm.base.allocator);
+    try new_mapping.ensureUnusedCapacity(gpa, wasm.data_segments.count());
+    errdefer new_mapping.deinit(gpa);
 
-    const keys = try wasm.base.allocator.dupe([]const u8, wasm.data_segments.keys());
-    defer wasm.base.allocator.free(keys);
+    const keys = try gpa.dupe([]const u8, wasm.data_segments.keys());
+    defer gpa.free(keys);
 
     const SortContext = struct {
         fn sort(_: void, lhs: []const u8, rhs: []const u8) bool {
@@ -2388,7 +2437,7 @@ fn sortDataSegments(wasm: *Wasm) !void {
         const segment_index = wasm.data_segments.get(key).?;
         new_mapping.putAssumeCapacity(key, segment_index);
     }
-    wasm.data_segments.deinit(wasm.base.allocator);
+    wasm.data_segments.deinit(gpa);
     wasm.data_segments = new_mapping;
 }
 
@@ -2401,8 +2450,9 @@ fn sortDataSegments(wasm: *Wasm) !void {
 /// original functions and their types. We need to know the type to verify it doesn't
 /// contain any parameters.
 fn setupInitFunctions(wasm: *Wasm) !void {
+    const gpa = wasm.base.comp.gpa;
     for (wasm.objects.items, 0..) |object, file_index| {
-        try wasm.init_funcs.ensureUnusedCapacity(wasm.base.allocator, object.init_funcs.len);
+        try wasm.init_funcs.ensureUnusedCapacity(gpa, object.init_funcs.len);
         for (object.init_funcs) |init_func| {
             const symbol = object.symtable[init_func.symbol_index];
             const ty: std.wasm.Type = if (symbol.isUndefined()) ty: {
@@ -2439,6 +2489,7 @@ fn setupInitFunctions(wasm: *Wasm) !void {
 /// Generates an atom containing the global error set' size.
 /// This will only be generated if the symbol exists.
 fn setupErrorsLen(wasm: *Wasm) !void {
+    const gpa = wasm.base.comp.gpa;
     const loc = wasm.findGlobalSymbol("__zig_errors_len") orelse return;
 
     const errors_len = wasm.base.options.module.?.global_error_set.count();
@@ -2456,19 +2507,19 @@ fn setupErrorsLen(wasm: *Wasm) !void {
             prev_atom.next = atom.next;
             atom.prev = null;
         }
-        atom.deinit(wasm.base.allocator);
+        atom.deinit(gpa);
         break :blk index;
     } else new_atom: {
         const atom_index: Atom.Index = @intCast(wasm.managed_atoms.items.len);
-        try wasm.symbol_atom.put(wasm.base.allocator, loc, atom_index);
-        try wasm.managed_atoms.append(wasm.base.allocator, undefined);
+        try wasm.symbol_atom.put(gpa, loc, atom_index);
+        try wasm.managed_atoms.append(gpa, undefined);
         break :new_atom atom_index;
     };
     const atom = wasm.getAtomPtr(atom_index);
     atom.* = Atom.empty;
     atom.sym_index = loc.index;
     atom.size = 2;
-    try atom.code.writer(wasm.base.allocator).writeInt(u16, @intCast(errors_len), .little);
+    try atom.code.writer(gpa).writeInt(u16, @intCast(errors_len), .little);
 
     try wasm.parseAtom(atom_index, .{ .data = .read_only });
 }
@@ -2480,16 +2531,17 @@ fn setupErrorsLen(wasm: *Wasm) !void {
 /// references to the function stored in the symbol have been finalized so we end
 /// up calling the resolved function.
 fn initializeCallCtorsFunction(wasm: *Wasm) !void {
+    const gpa = wasm.base.comp.gpa;
     // No code to emit, so also no ctors to call
     if (wasm.code_section_index == null) {
         // Make sure to remove it from the resolved symbols so we do not emit
         // it within any section. TODO: Remove this once we implement garbage collection.
         const loc = wasm.findGlobalSymbol("__wasm_call_ctors").?;
-        std.debug.assert(wasm.resolved_symbols.swapRemove(loc));
+        assert(wasm.resolved_symbols.swapRemove(loc));
         return;
     }
 
-    var function_body = std.ArrayList(u8).init(wasm.base.allocator);
+    var function_body = std.ArrayList(u8).init(gpa);
     defer function_body.deinit();
     const writer = function_body.writer();
 
@@ -2531,6 +2583,7 @@ fn createSyntheticFunction(
     func_ty: std.wasm.Type,
     function_body: *std.ArrayList(u8),
 ) !void {
+    const gpa = wasm.base.comp.gpa;
     const loc = wasm.findGlobalSymbol(symbol_name) orelse
         try wasm.createSyntheticSymbol(symbol_name, .function);
     const symbol = loc.getSymbol(wasm);
@@ -2541,7 +2594,7 @@ fn createSyntheticFunction(
     // create function with above type
     const func_index = wasm.imported_functions_count + @as(u32, @intCast(wasm.functions.count()));
     try wasm.functions.putNoClobber(
-        wasm.base.allocator,
+        gpa,
         .{ .file = null, .index = func_index },
         .{ .func = .{ .type_index = ty_index }, .sym_index = loc.index },
     );
@@ -2549,7 +2602,7 @@ fn createSyntheticFunction(
 
     // create the atom that will be output into the final binary
     const atom_index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len));
-    const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
+    const atom = try wasm.managed_atoms.addOne(gpa);
     atom.* = .{
         .size = @as(u32, @intCast(function_body.items.len)),
         .offset = 0,
@@ -2562,7 +2615,7 @@ fn createSyntheticFunction(
         .original_offset = 0,
     };
     try wasm.appendAtomAtIndex(wasm.code_section_index.?, atom_index);
-    try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom_index);
+    try wasm.symbol_atom.putNoClobber(gpa, loc, atom_index);
 
     // `allocateAtoms` has already been called, set the atom's offset manually.
     // This is fine to do manually as we insert the atom at the very end.
@@ -2582,10 +2635,11 @@ pub fn createFunction(
     function_body: *std.ArrayList(u8),
     relocations: *std.ArrayList(Relocation),
 ) !u32 {
+    const gpa = wasm.base.comp.gpa;
     const loc = try wasm.createSyntheticSymbol(symbol_name, .function);
 
-    const atom_index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len));
-    const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
+    const atom_index: Atom.Index = @intCast(wasm.managed_atoms.items.len);
+    const atom = try wasm.managed_atoms.addOne(gpa);
     atom.* = .{
         .size = @intCast(function_body.items.len),
         .offset = 0,
@@ -2607,9 +2661,9 @@ pub fn createFunction(
         break :idx index;
     };
     try wasm.appendAtomAtIndex(section_index, atom_index);
-    try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom_index);
-    try wasm.atom_types.put(wasm.base.allocator, atom_index, try wasm.putOrGetFuncType(func_ty));
-    try wasm.synthetic_functions.append(wasm.base.allocator, atom_index);
+    try wasm.symbol_atom.putNoClobber(gpa, loc, atom_index);
+    try wasm.atom_types.put(gpa, atom_index, try wasm.putOrGetFuncType(func_ty));
+    try wasm.synthetic_functions.append(gpa, atom_index);
 
     return loc.index;
 }
@@ -2622,9 +2676,11 @@ fn setupStartSection(wasm: *Wasm) !void {
 }
 
 fn initializeTLSFunction(wasm: *Wasm) !void {
+    const gpa = wasm.base.comp.gpa;
+
     if (!wasm.base.options.shared_memory) return;
 
-    var function_body = std.ArrayList(u8).init(wasm.base.allocator);
+    var function_body = std.ArrayList(u8).init(gpa);
     defer function_body.deinit();
     const writer = function_body.writer();
 
@@ -2684,6 +2740,7 @@ fn initializeTLSFunction(wasm: *Wasm) !void {
 }
 
 fn setupImports(wasm: *Wasm) !void {
+    const gpa = wasm.base.comp.gpa;
     log.debug("Merging imports", .{});
     var discarded_it = wasm.discarded.keyIterator();
     while (discarded_it.next()) |discarded| {
@@ -2718,12 +2775,12 @@ fn setupImports(wasm: *Wasm) !void {
         // We copy the import to a new import to ensure the names contain references
         // to the internal string table, rather than of the object file.
         const new_imp: types.Import = .{
-            .module_name = try wasm.string_table.put(wasm.base.allocator, object.string_table.get(import.module_name)),
-            .name = try wasm.string_table.put(wasm.base.allocator, object.string_table.get(import.name)),
+            .module_name = try wasm.string_table.put(gpa, object.string_table.get(import.module_name)),
+            .name = try wasm.string_table.put(gpa, object.string_table.get(import.name)),
             .kind = import.kind,
         };
         // TODO: De-duplicate imports when they contain the same names and type
-        try wasm.imports.putNoClobber(wasm.base.allocator, symbol_loc, new_imp);
+        try wasm.imports.putNoClobber(gpa, symbol_loc, new_imp);
     }
 
     // Assign all indexes of the imports to their representing symbols
@@ -2764,7 +2821,9 @@ fn setupImports(wasm: *Wasm) !void {
 /// Takes the global, function and table section from each linked object file
 /// and merges it into a single section for each.
 fn mergeSections(wasm: *Wasm) !void {
-    var removed_duplicates = std.ArrayList(SymbolLoc).init(wasm.base.allocator);
+    const gpa = wasm.base.comp.gpa;
+
+    var removed_duplicates = std.ArrayList(SymbolLoc).init(gpa);
     defer removed_duplicates.deinit();
 
     for (wasm.resolved_symbols.keys()) |sym_loc| {
@@ -2791,7 +2850,7 @@ fn mergeSections(wasm: *Wasm) !void {
         switch (symbol.tag) {
             .function => {
                 const gop = try wasm.functions.getOrPut(
-                    wasm.base.allocator,
+                    gpa,
                     .{ .file = sym_loc.file, .index = symbol.index },
                 );
                 if (gop.found_existing) {
@@ -2800,7 +2859,7 @@ fn mergeSections(wasm: *Wasm) !void {
                     // we only emit a single function, instead of duplicates.
                     symbol.unmark();
                     try wasm.discarded.putNoClobber(
-                        wasm.base.allocator,
+                        gpa,
                         sym_loc,
                         .{ .file = gop.key_ptr.*.file, .index = gop.value_ptr.*.sym_index },
                     );
@@ -2813,12 +2872,12 @@ fn mergeSections(wasm: *Wasm) !void {
             .global => {
                 const original_global = object.globals[index];
                 symbol.index = @as(u32, @intCast(wasm.wasm_globals.items.len)) + wasm.imported_globals_count;
-                try wasm.wasm_globals.append(wasm.base.allocator, original_global);
+                try wasm.wasm_globals.append(gpa, original_global);
             },
             .table => {
                 const original_table = object.tables[index];
                 symbol.index = @as(u32, @intCast(wasm.tables.items.len)) + wasm.imported_tables_count;
-                try wasm.tables.append(wasm.base.allocator, original_table);
+                try wasm.tables.append(gpa, original_table);
             },
             else => unreachable,
         }
@@ -2838,10 +2897,11 @@ fn mergeSections(wasm: *Wasm) !void {
 /// 'types' section, while assigning the type index to the representing
 /// section (import, export, function).
 fn mergeTypes(wasm: *Wasm) !void {
+    const gpa = wasm.base.comp.gpa;
     // A map to track which functions have already had their
     // type inserted. If we do this for the same function multiple times,
     // it will be overwritten with the incorrect type.
-    var dirty = std.AutoHashMap(u32, void).init(wasm.base.allocator);
+    var dirty = std.AutoHashMap(u32, void).init(gpa);
     try dirty.ensureUnusedCapacity(@as(u32, @intCast(wasm.functions.count())));
     defer dirty.deinit();
 
@@ -2873,6 +2933,7 @@ fn mergeTypes(wasm: *Wasm) !void {
 }
 
 fn setupExports(wasm: *Wasm) !void {
+    const gpa = wasm.base.comp.gpa;
     if (wasm.base.options.output_mode == .Obj) return;
     log.debug("Building exports from symbols", .{});
 
@@ -2903,11 +2964,11 @@ fn setupExports(wasm: *Wasm) !void {
         const sym_name = sym_loc.getName(wasm);
         const export_name = if (wasm.export_names.get(sym_loc)) |name| name else blk: {
             if (sym_loc.file == null) break :blk symbol.name;
-            break :blk try wasm.string_table.put(wasm.base.allocator, sym_name);
+            break :blk try wasm.string_table.put(gpa, sym_name);
         };
         const exp: types.Export = if (symbol.tag == .data) exp: {
             const global_index = @as(u32, @intCast(wasm.imported_globals_count + wasm.wasm_globals.items.len));
-            try wasm.wasm_globals.append(wasm.base.allocator, .{
+            try wasm.wasm_globals.append(gpa, .{
                 .global_type = .{ .valtype = .i32, .mutable = false },
                 .init = .{ .i32_const = @as(i32, @intCast(symbol.virtual_address)) },
             });
@@ -2926,7 +2987,7 @@ fn setupExports(wasm: *Wasm) !void {
             wasm.string_table.get(exp.name),
             exp.index,
         });
-        try wasm.exports.append(wasm.base.allocator, exp);
+        try wasm.exports.append(gpa, exp);
     }
 
     log.debug("Completed building exports. Total count: ({d})", .{wasm.exports.items.len});
@@ -2957,8 +3018,6 @@ fn setupStart(wasm: *Wasm) !void {
 fn setupMemory(wasm: *Wasm) !void {
     log.debug("Setting up memory layout", .{});
     const page_size = std.wasm.page_size; // 64kb
-    // Use the user-provided stack size or else we use 1MB by default
-    const stack_size = wasm.base.options.stack_size_override orelse page_size * 16;
     const stack_alignment: Alignment = .@"16"; // wasm's stack alignment as specified by tool-convention
     const heap_alignment: Alignment = .@"16"; // wasm's heap alignment as specified by tool-convention
 
@@ -2974,7 +3033,7 @@ fn setupMemory(wasm: *Wasm) !void {
 
     if (place_stack_first and !is_obj) {
         memory_ptr = stack_alignment.forward(memory_ptr);
-        memory_ptr += stack_size;
+        memory_ptr += wasm.base.stack_size;
         // We always put the stack pointer global at index 0
         wasm.wasm_globals.items[0].init.i32_const = @as(i32, @bitCast(@as(u32, @intCast(memory_ptr))));
     }
@@ -3021,7 +3080,7 @@ fn setupMemory(wasm: *Wasm) !void {
 
     if (!place_stack_first and !is_obj) {
         memory_ptr = stack_alignment.forward(memory_ptr);
-        memory_ptr += stack_size;
+        memory_ptr += wasm.base.stack_size;
         wasm.wasm_globals.items[0].init.i32_const = @as(i32, @bitCast(@as(u32, @intCast(memory_ptr))));
     }
 
@@ -3088,29 +3147,30 @@ fn setupMemory(wasm: *Wasm) !void {
 /// index of the segment within the final data section. When the segment does not yet
 /// exist, a new one will be initialized and appended. The new index will be returned in that case.
 pub fn getMatchingSegment(wasm: *Wasm, object_index: u16, symbol_index: u32) !u32 {
+    const gpa = wasm.base.comp.gpa;
     const object: Object = wasm.objects.items[object_index];
     const symbol = object.symtable[symbol_index];
-    const index = @as(u32, @intCast(wasm.segments.items.len));
+    const index: u32 = @intCast(wasm.segments.items.len);
 
     switch (symbol.tag) {
         .data => {
             const segment_info = object.segment_info[symbol.index];
             const merge_segment = wasm.base.options.output_mode != .Obj;
-            const result = try wasm.data_segments.getOrPut(wasm.base.allocator, segment_info.outputName(merge_segment));
+            const result = try wasm.data_segments.getOrPut(gpa, segment_info.outputName(merge_segment));
             if (!result.found_existing) {
                 result.value_ptr.* = index;
                 var flags: u32 = 0;
                 if (wasm.base.options.shared_memory) {
                     flags |= @intFromEnum(Segment.Flag.WASM_DATA_SEGMENT_IS_PASSIVE);
                 }
-                try wasm.segments.append(wasm.base.allocator, .{
+                try wasm.segments.append(gpa, .{
                     .alignment = .@"1",
                     .size = 0,
                     .offset = 0,
                     .flags = flags,
                 });
-                try wasm.segment_info.putNoClobber(wasm.base.allocator, index, .{
-                    .name = try wasm.base.allocator.dupe(u8, segment_info.name),
+                try wasm.segment_info.putNoClobber(gpa, index, .{
+                    .name = try gpa.dupe(u8, segment_info.name),
                     .alignment = segment_info.alignment,
                     .flags = segment_info.flags,
                 });
@@ -3183,7 +3243,8 @@ pub fn getMatchingSegment(wasm: *Wasm, object_index: u16, symbol_index: u32) !u3
 
 /// Appends a new segment with default field values
 fn appendDummySegment(wasm: *Wasm) !void {
-    try wasm.segments.append(wasm.base.allocator, .{
+    const gpa = wasm.base.comp.gpa;
+    try wasm.segments.append(gpa, .{
         .alignment = .@"1",
         .size = 0,
         .offset = 0,
@@ -3203,6 +3264,7 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 {
     // and then return said symbol's index. The final table will be populated
     // during `flush` when we know all possible error names.
 
+    const gpa = wasm.base.comp.gpa;
     const atom_index = try wasm.createAtom();
     const atom = wasm.getAtomPtr(atom_index);
     const slice_ty = Type.slice_const_u8_sentinel_0;
@@ -3210,7 +3272,7 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 {
     atom.alignment = slice_ty.abiAlignment(mod);
     const sym_index = atom.sym_index;
 
-    const sym_name = try wasm.string_table.put(wasm.base.allocator, "__zig_err_name_table");
+    const sym_name = try wasm.string_table.put(gpa, "__zig_err_name_table");
     const symbol = &wasm.symbols.items[sym_index];
     symbol.* = .{
         .name = sym_name,
@@ -3222,7 +3284,7 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 {
     symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
     symbol.mark();
 
-    try wasm.resolved_symbols.put(wasm.base.allocator, atom.symbolLoc(), {});
+    try wasm.resolved_symbols.put(gpa, atom.symbolLoc(), {});
 
     log.debug("Error name table was created with symbol index: ({d})", .{sym_index});
     wasm.error_table_symbol = sym_index;
@@ -3234,6 +3296,7 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 {
 /// This creates a table that consists of pointers and length to each error name.
 /// The table is what is being pointed to within the runtime bodies that are generated.
 fn populateErrorNameTable(wasm: *Wasm) !void {
+    const gpa = wasm.base.comp.gpa;
     const symbol_index = wasm.error_table_symbol orelse return;
     const atom_index = wasm.symbol_atom.get(.{ .file = null, .index = symbol_index }).?;
 
@@ -3243,7 +3306,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
     const names_atom_index = try wasm.createAtom();
     const names_atom = wasm.getAtomPtr(names_atom_index);
     names_atom.alignment = .@"1";
-    const sym_name = try wasm.string_table.put(wasm.base.allocator, "__zig_err_names");
+    const sym_name = try wasm.string_table.put(gpa, "__zig_err_names");
     const names_symbol = &wasm.symbols.items[names_atom.sym_index];
     names_symbol.* = .{
         .name = sym_name,
@@ -3269,10 +3332,10 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
         const slice_ty = Type.slice_const_u8_sentinel_0;
         const offset = @as(u32, @intCast(atom.code.items.len));
         // first we create the data for the slice of the name
-        try atom.code.appendNTimes(wasm.base.allocator, 0, 4); // ptr to name, will be relocated
-        try atom.code.writer(wasm.base.allocator).writeInt(u32, len - 1, .little);
+        try atom.code.appendNTimes(gpa, 0, 4); // ptr to name, will be relocated
+        try atom.code.writer(gpa).writeInt(u32, len - 1, .little);
         // create relocation to the error name
-        try atom.relocs.append(wasm.base.allocator, .{
+        try atom.relocs.append(gpa, .{
             .index = names_atom.sym_index,
             .relocation_type = .R_WASM_MEMORY_ADDR_I32,
             .offset = offset,
@@ -3282,7 +3345,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
         addend += len;
 
         // as we updated the error name table, we now store the actual name within the names atom
-        try names_atom.code.ensureUnusedCapacity(wasm.base.allocator, len);
+        try names_atom.code.ensureUnusedCapacity(gpa, len);
         names_atom.code.appendSliceAssumeCapacity(error_name);
         names_atom.code.appendAssumeCapacity(0);
 
@@ -3291,8 +3354,8 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
     names_atom.size = addend;
 
     const name_loc = names_atom.symbolLoc();
-    try wasm.resolved_symbols.put(wasm.base.allocator, name_loc, {});
-    try wasm.symbol_atom.put(wasm.base.allocator, name_loc, names_atom_index);
+    try wasm.resolved_symbols.put(gpa, name_loc, {});
+    try wasm.symbol_atom.put(gpa, name_loc, names_atom_index);
 
     // link the atoms with the rest of the binary so they can be allocated
     // and relocations will be performed.
@@ -3304,7 +3367,8 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
 /// This initializes the index, appends a new segment,
 /// and finally, creates a managed `Atom`.
 pub fn createDebugSectionForIndex(wasm: *Wasm, index: *?u32, name: []const u8) !Atom.Index {
-    const new_index = @as(u32, @intCast(wasm.segments.items.len));
+    const gpa = wasm.base.comp.gpa;
+    const new_index: u32 = @intCast(wasm.segments.items.len);
     index.* = new_index;
     try wasm.appendDummySegment();
 
@@ -3312,7 +3376,7 @@ pub fn createDebugSectionForIndex(wasm: *Wasm, index: *?u32, name: []const u8) !
     const atom = wasm.getAtomPtr(atom_index);
     wasm.symbols.items[atom.sym_index] = .{
         .tag = .section,
-        .name = try wasm.string_table.put(wasm.base.allocator, name),
+        .name = try wasm.string_table.put(gpa, name),
         .index = 0,
         .flags = @intFromEnum(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
     };
@@ -3322,8 +3386,10 @@ pub fn createDebugSectionForIndex(wasm: *Wasm, index: *?u32, name: []const u8) !
 }
 
 fn resetState(wasm: *Wasm) void {
+    const gpa = wasm.base.comp.gpa;
+
     for (wasm.segment_info.values()) |segment_info| {
-        wasm.base.allocator.free(segment_info.name);
+        gpa.free(segment_info.name);
     }
 
     var atom_it = wasm.decls.valueIterator();
@@ -3358,16 +3424,12 @@ fn resetState(wasm: *Wasm) void {
 }
 
 pub fn flush(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) link.File.FlushError!void {
-    if (wasm.base.options.emit == null) {
-        if (wasm.llvm_object) |llvm_object| {
-            return try llvm_object.flushModule(comp, prog_node);
-        }
-        return;
-    }
+    const use_lld = build_options.have_llvm and wasm.base.comp.config.use_lld;
+    const use_llvm = wasm.base.comp.config.use_llvm;
 
-    if (build_options.have_llvm and wasm.base.options.use_lld) {
+    if (use_lld) {
         return wasm.linkWithLLD(comp, prog_node);
-    } else if (wasm.base.options.use_llvm and !wasm.base.options.use_lld) {
+    } else if (use_llvm and !use_lld) {
         return wasm.linkWithZld(comp, prog_node);
     } else {
         return wasm.flushModule(comp, prog_node);
@@ -3379,21 +3441,22 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l
     const tracy = trace(@src());
     defer tracy.end();
 
-    const gpa = wasm.base.allocator;
-    const options = wasm.base.options;
+    const gpa = wasm.base.comp.gpa;
 
     // Used for all temporary memory allocated during flushin
     var arena_instance = std.heap.ArenaAllocator.init(gpa);
     defer arena_instance.deinit();
     const arena = arena_instance.allocator();
 
-    const directory = options.emit.?.directory; // Just an alias to make it shorter to type.
-    const full_out_path = try directory.join(arena, &[_][]const u8{options.emit.?.sub_path});
+    const directory = wasm.base.emit.directory; // Just an alias to make it shorter to type.
+    const full_out_path = try directory.join(arena, &[_][]const u8{wasm.base.emit.sub_path});
+    const opt_zcu = wasm.base.comp.module;
+    const use_llvm = wasm.base.comp.config.use_llvm;
 
     // If there is no Zig code to compile, then we should skip flushing the output file because it
     // will not be part of the linker line anyway.
-    const module_obj_path: ?[]const u8 = if (options.module != null) blk: {
-        assert(options.use_llvm); // `linkWithZld` should never be called when the Wasm backend is used
+    const module_obj_path: ?[]const u8 = if (opt_zcu != null) blk: {
+        assert(use_llvm); // `linkWithZld` should never be called when the Wasm backend is used
         try wasm.flushModule(comp, prog_node);
 
         if (fs.path.dirname(full_out_path)) |dirname| {
@@ -3416,12 +3479,14 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l
     const id_symlink_basename = "zld.id";
 
     var man: Cache.Manifest = undefined;
-    defer if (!options.disable_lld_caching) man.deinit();
+    defer if (!wasm.base.disable_lld_caching) man.deinit();
     var digest: [Cache.hex_digest_len]u8 = undefined;
 
+    const objects = wasm.base.comp.objects;
+
     // NOTE: The following section must be maintained to be equal
     // as the section defined in `linkWithLLD`
-    if (!options.disable_lld_caching) {
+    if (!wasm.base.disable_lld_caching) {
         man = comp.cache_parent.obtain();
 
         // We are about to obtain this lock, so here we give other processes a chance first.
@@ -3429,7 +3494,7 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l
 
         comptime assert(Compilation.link_hash_implementation_version == 10);
 
-        for (options.objects) |obj| {
+        for (objects) |obj| {
             _ = try man.addFile(obj.path, null);
             man.hash.add(obj.must_link);
         }
@@ -3438,19 +3503,19 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l
         }
         try man.addOptionalFile(module_obj_path);
         try man.addOptionalFile(compiler_rt_path);
-        man.hash.addOptionalBytes(options.entry);
-        man.hash.addOptional(options.stack_size_override);
-        man.hash.add(wasm.base.options.build_id);
-        man.hash.add(options.import_memory);
-        man.hash.add(options.import_table);
-        man.hash.add(options.export_table);
-        man.hash.addOptional(options.initial_memory);
-        man.hash.addOptional(options.max_memory);
-        man.hash.add(options.shared_memory);
-        man.hash.addOptional(options.global_base);
-        man.hash.add(options.export_symbol_names.len);
+        man.hash.addOptionalBytes(wasm.base.comp.config.entry);
+        man.hash.add(wasm.base.stack_size);
+        man.hash.add(wasm.base.build_id);
+        man.hash.add(wasm.base.comp.config.import_memory);
+        man.hash.add(wasm.base.comp.config.shared_memory);
+        man.hash.add(wasm.import_table);
+        man.hash.add(wasm.export_table);
+        man.hash.addOptional(wasm.initial_memory);
+        man.hash.addOptional(wasm.max_memory);
+        man.hash.addOptional(wasm.global_base);
+        man.hash.add(wasm.export_symbol_names.len);
         // strip does not need to go into the linker hash because it is part of the hash namespace
-        for (options.export_symbol_names) |symbol_name| {
+        for (wasm.export_symbol_names) |symbol_name| {
             man.hash.addBytes(symbol_name);
         }
 
@@ -3485,30 +3550,36 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l
 
     // Positional arguments to the linker such as object files and static archives.
     var positionals = std.ArrayList([]const u8).init(arena);
-    try positionals.ensureUnusedCapacity(options.objects.len);
+    try positionals.ensureUnusedCapacity(objects.len);
+
+    const target = wasm.base.comp.root_mod.resolved_target.result;
+    const output_mode = wasm.base.comp.config.output_mode;
+    const link_mode = wasm.base.comp.config.link_mode;
+    const link_libc = wasm.base.comp.config.link_libc;
+    const link_libcpp = wasm.base.comp.config.link_libcpp;
+    const wasi_exec_model = wasm.base.comp.config.wasi_exec_model;
 
     // When the target os is WASI, we allow linking with WASI-LIBC
-    if (options.target.os.tag == .wasi) {
-        const is_exe_or_dyn_lib = wasm.base.options.output_mode == .Exe or
-            (wasm.base.options.output_mode == .Lib and wasm.base.options.link_mode == .Dynamic);
+    if (target.os.tag == .wasi) {
+        const is_exe_or_dyn_lib = output_mode == .Exe or
+            (output_mode == .Lib and link_mode == .Dynamic);
         if (is_exe_or_dyn_lib) {
-            const wasi_emulated_libs = wasm.base.options.wasi_emulated_libs;
-            for (wasi_emulated_libs) |crt_file| {
+            for (wasm.wasi_emulated_libs) |crt_file| {
                 try positionals.append(try comp.get_libc_crt_file(
                     arena,
                     wasi_libc.emulatedLibCRFileLibName(crt_file),
                 ));
             }
 
-            if (wasm.base.options.link_libc) {
+            if (link_libc) {
                 try positionals.append(try comp.get_libc_crt_file(
                     arena,
-                    wasi_libc.execModelCrtFileFullName(wasm.base.options.wasi_exec_model),
+                    wasi_libc.execModelCrtFileFullName(wasi_exec_model),
                 ));
                 try positionals.append(try comp.get_libc_crt_file(arena, "libc.a"));
             }
 
-            if (wasm.base.options.link_libcpp) {
+            if (link_libcpp) {
                 try positionals.append(comp.libcxx_static_lib.?.full_object_path);
                 try positionals.append(comp.libcxxabi_static_lib.?.full_object_path);
             }
@@ -3519,7 +3590,7 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l
         try positionals.append(path);
     }
 
-    for (options.objects) |object| {
+    for (objects) |object| {
         try positionals.append(object.path);
     }
 
@@ -3562,7 +3633,7 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l
     try wasm.setupExports();
     try wasm.writeToFile(enabled_features, emit_features_count, arena);
 
-    if (!wasm.base.options.disable_lld_caching) {
+    if (!wasm.base.disable_lld_caching) {
         // Update the file with the digest. If it fails we can continue; it only
         // means that the next invocation will have an unnecessary cache miss.
         Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
@@ -3594,15 +3665,18 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
     try wasm.populateErrorNameTable();
 
     // Used for all temporary memory allocated during flushin
-    var arena_instance = std.heap.ArenaAllocator.init(wasm.base.allocator);
+    const gpa = wasm.base.comp.gpa;
+    var arena_instance = std.heap.ArenaAllocator.init(gpa);
     defer arena_instance.deinit();
     const arena = arena_instance.allocator();
 
+    const objects = wasm.base.comp.objects;
+
     // Positional arguments to the linker such as object files and static archives.
     var positionals = std.ArrayList([]const u8).init(arena);
-    try positionals.ensureUnusedCapacity(wasm.base.options.objects.len);
+    try positionals.ensureUnusedCapacity(objects.len);
 
-    for (wasm.base.options.objects) |object| {
+    for (objects) |object| {
         positionals.appendAssumeCapacity(object.path);
     }
 
@@ -3711,6 +3785,10 @@ fn writeToFile(
     feature_count: u32,
     arena: Allocator,
 ) !void {
+    const gpa = wasm.base.comp.gpa;
+    const use_llvm = wasm.base.comp.config.use_llvm;
+    const use_lld = build_options.have_llvm and wasm.base.comp.config.use_lld;
+
     // Size of each section header
     const header_size = 5 + 1;
     // The amount of sections that will be written
@@ -3719,9 +3797,9 @@ fn writeToFile(
     var code_section_index: ?u32 = null;
     // Index of the data section. Used to tell relocation table where the section lives.
     var data_section_index: ?u32 = null;
-    const is_obj = wasm.base.options.output_mode == .Obj or (!wasm.base.options.use_llvm and wasm.base.options.use_lld);
+    const is_obj = wasm.base.options.output_mode == .Obj or (!use_llvm and use_lld);
 
-    var binary_bytes = std.ArrayList(u8).init(wasm.base.allocator);
+    var binary_bytes = std.ArrayList(u8).init(gpa);
     defer binary_bytes.deinit();
     const binary_writer = binary_bytes.writer();
 
@@ -3774,8 +3852,8 @@ fn writeToFile(
         if (import_memory) {
             const mem_name = if (is_obj) "__linear_memory" else "memory";
             const mem_imp: types.Import = .{
-                .module_name = try wasm.string_table.put(wasm.base.allocator, wasm.host_name),
-                .name = try wasm.string_table.put(wasm.base.allocator, mem_name),
+                .module_name = try wasm.string_table.put(gpa, wasm.host_name),
+                .name = try wasm.string_table.put(gpa, mem_name),
                 .kind = .{ .memory = wasm.memories.limits },
             };
             try wasm.emitImport(binary_writer, mem_imp);
@@ -3955,7 +4033,7 @@ fn writeToFile(
         var atom_index = wasm.atoms.get(code_index).?;
 
         // The code section must be sorted in line with the function order.
-        var sorted_atoms = try std.ArrayList(*const Atom).initCapacity(wasm.base.allocator, wasm.functions.count());
+        var sorted_atoms = try std.ArrayList(*const Atom).initCapacity(gpa, wasm.functions.count());
         defer sorted_atoms.deinit();
 
         while (true) {
@@ -3966,7 +4044,7 @@ fn writeToFile(
             sorted_atoms.appendAssumeCapacity(atom); // found more code atoms than functions
             atom_index = atom.prev orelse break;
         }
-        std.debug.assert(wasm.functions.count() == sorted_atoms.items.len);
+        assert(wasm.functions.count() == sorted_atoms.items.len);
 
         const atom_sort_fn = struct {
             fn sort(ctx: *const Wasm, lhs: *const Atom, rhs: *const Atom) bool {
@@ -4086,7 +4164,7 @@ fn writeToFile(
     if (!wasm.base.options.strip) {
         // The build id must be computed on the main sections only,
         // so we have to do it now, before the debug sections.
-        switch (wasm.base.options.build_id) {
+        switch (wasm.base.build_id) {
             .none => {},
             .fast => {
                 var id: [16]u8 = undefined;
@@ -4121,7 +4199,7 @@ fn writeToFile(
         //     try dwarf.writeDbgLineHeader();
         // }
 
-        var debug_bytes = std.ArrayList(u8).init(wasm.base.allocator);
+        var debug_bytes = std.ArrayList(u8).init(gpa);
         defer debug_bytes.deinit();
 
         const DebugSection = struct {
@@ -4362,8 +4440,10 @@ fn emitNameSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), arena: std.mem
 }
 
 fn emitNameSubsection(wasm: *Wasm, section_id: std.wasm.NameSubsection, names: anytype, writer: anytype) !void {
+    const gpa = wasm.base.comp.gpa;
+
     // We must emit subsection size, so first write to a temporary list
-    var section_list = std.ArrayList(u8).init(wasm.base.allocator);
+    var section_list = std.ArrayList(u8).init(gpa);
     defer section_list.deinit();
     const sub_writer = section_list.writer();
 
@@ -4445,12 +4525,13 @@ fn linkWithLLD(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
     const tracy = trace(@src());
     defer tracy.end();
 
-    var arena_allocator = std.heap.ArenaAllocator.init(wasm.base.allocator);
+    const gpa = wasm.base.comp.gpa;
+    var arena_allocator = std.heap.ArenaAllocator.init(gpa);
     defer arena_allocator.deinit();
     const arena = arena_allocator.allocator();
 
-    const directory = wasm.base.options.emit.?.directory; // Just an alias to make it shorter to type.
-    const full_out_path = try directory.join(arena, &[_][]const u8{wasm.base.options.emit.?.sub_path});
+    const directory = wasm.base.emit.directory; // Just an alias to make it shorter to type.
+    const full_out_path = try directory.join(arena, &[_][]const u8{wasm.base.emit.sub_path});
 
     // If there is no Zig code to compile, then we should skip flushing the output file because it
     // will not be part of the linker line anyway.
@@ -4481,11 +4562,11 @@ fn linkWithLLD(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
     const id_symlink_basename = "lld.id";
 
     var man: Cache.Manifest = undefined;
-    defer if (!wasm.base.options.disable_lld_caching) man.deinit();
+    defer if (!wasm.base.disable_lld_caching) man.deinit();
 
     var digest: [Cache.hex_digest_len]u8 = undefined;
 
-    if (!wasm.base.options.disable_lld_caching) {
+    if (!wasm.base.disable_lld_caching) {
         man = comp.cache_parent.obtain();
 
         // We are about to obtain this lock, so here we give other processes a chance first.
@@ -4502,13 +4583,13 @@ fn linkWithLLD(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
         }
         try man.addOptionalFile(module_obj_path);
         try man.addOptionalFile(compiler_rt_path);
-        man.hash.addOptionalBytes(wasm.base.options.entry);
-        man.hash.addOptional(wasm.base.options.stack_size_override);
-        man.hash.add(wasm.base.options.build_id);
+        man.hash.addOptionalBytes(wasm.base.comp.config.entry);
+        man.hash.add(wasm.base.stack_size);
+        man.hash.add(wasm.base.build_id);
         man.hash.add(wasm.base.options.import_memory);
         man.hash.add(wasm.base.options.export_memory);
-        man.hash.add(wasm.base.options.import_table);
-        man.hash.add(wasm.base.options.export_table);
+        man.hash.add(wasm.import_table);
+        man.hash.add(wasm.export_table);
         man.hash.addOptional(wasm.base.options.initial_memory);
         man.hash.addOptional(wasm.base.options.max_memory);
         man.hash.add(wasm.base.options.shared_memory);
@@ -4573,7 +4654,7 @@ fn linkWithLLD(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
         }
     } else {
         // Create an LLD command line and invoke it.
-        var argv = std.ArrayList([]const u8).init(wasm.base.allocator);
+        var argv = std.ArrayList([]const u8).init(gpa);
         defer argv.deinit();
         // We will invoke ourselves as a child process to gain access to LLD.
         // This is necessary because LLD does not behave properly as a library -
@@ -4598,22 +4679,20 @@ fn linkWithLLD(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
             try argv.append("--export-memory");
         }
 
-        if (wasm.base.options.import_table) {
-            assert(!wasm.base.options.export_table);
+        if (wasm.import_table) {
+            assert(!wasm.export_table);
             try argv.append("--import-table");
         }
 
-        if (wasm.base.options.export_table) {
-            assert(!wasm.base.options.import_table);
+        if (wasm.export_table) {
+            assert(!wasm.import_table);
             try argv.append("--export-table");
         }
 
-        if (wasm.base.options.gc_sections) |gc| {
-            // For wasm-ld we only need to specify '--no-gc-sections' when the user explicitly
-            // specified it as garbage collection is enabled by default.
-            if (!gc) {
-                try argv.append("--no-gc-sections");
-            }
+        // For wasm-ld we only need to specify '--no-gc-sections' when the user explicitly
+        // specified it as garbage collection is enabled by default.
+        if (!wasm.base.gc_sections) {
+            try argv.append("--no-gc-sections");
         }
 
         if (wasm.base.options.strip) {
@@ -4662,12 +4741,10 @@ fn linkWithLLD(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
             try argv.append("--no-entry");
         }
 
-        // Increase the default stack size to a more reasonable value of 1MB instead of
-        // the default of 1 Wasm page being 64KB, unless overridden by the user.
-        try argv.append("-z");
-        const stack_size = wasm.base.options.stack_size_override orelse std.wasm.page_size * 16;
-        const arg = try std.fmt.allocPrint(arena, "stack-size={d}", .{stack_size});
-        try argv.append(arg);
+        try argv.appendSlice(&.{
+            "-z",
+            try std.fmt.allocPrint(arena, "stack-size={d}", .{wasm.base.stack_size}),
+        });
 
         if (wasm.base.options.import_symbols) {
             try argv.append("--allow-undefined");
@@ -4681,7 +4758,7 @@ fn linkWithLLD(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
         }
 
         // XXX - TODO: add when wasm-ld supports --build-id.
-        // if (wasm.base.options.build_id) {
+        // if (wasm.base.build_id) {
         //     try argv.append("--build-id=tree");
         // }
 
@@ -4695,8 +4772,7 @@ fn linkWithLLD(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
             const is_exe_or_dyn_lib = wasm.base.options.output_mode == .Exe or
                 (wasm.base.options.output_mode == .Lib and wasm.base.options.link_mode == .Dynamic);
             if (is_exe_or_dyn_lib) {
-                const wasi_emulated_libs = wasm.base.options.wasi_emulated_libs;
-                for (wasi_emulated_libs) |crt_file| {
+                for (wasm.wasi_emulated_libs) |crt_file| {
                     try argv.append(try comp.get_libc_crt_file(
                         arena,
                         wasi_libc.emulatedLibCRFileLibName(crt_file),
@@ -4753,7 +4829,7 @@ fn linkWithLLD(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
             try argv.append(p);
         }
 
-        if (wasm.base.options.verbose_link) {
+        if (wasm.base.comp.verbose_link) {
             // Skip over our own name so that the LLD linker name is the first argv item.
             Compilation.dump_argv(argv.items[1..]);
         }
@@ -4838,7 +4914,7 @@ fn linkWithLLD(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
         }
     }
 
-    if (!wasm.base.options.disable_lld_caching) {
+    if (!wasm.base.disable_lld_caching) {
         // Update the file with the digest. If it fails we can continue; it only
         // means that the next invocation will have an unnecessary cache miss.
         Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
@@ -5113,14 +5189,15 @@ pub fn putOrGetFuncType(wasm: *Wasm, func_type: std.wasm.Type) !u32 {
     if (wasm.getTypeIndex(func_type)) |index| {
         return index;
     }
+    const gpa = wasm.base.comp.gpa;
 
     // functype does not exist.
-    const index = @as(u32, @intCast(wasm.func_types.items.len));
-    const params = try wasm.base.allocator.dupe(std.wasm.Valtype, func_type.params);
-    errdefer wasm.base.allocator.free(params);
-    const returns = try wasm.base.allocator.dupe(std.wasm.Valtype, func_type.returns);
-    errdefer wasm.base.allocator.free(returns);
-    try wasm.func_types.append(wasm.base.allocator, .{
+    const index: u32 = @intCast(wasm.func_types.items.len);
+    const params = try gpa.dupe(std.wasm.Valtype, func_type.params);
+    errdefer gpa.free(params);
+    const returns = try gpa.dupe(std.wasm.Valtype, func_type.returns);
+    errdefer gpa.free(returns);
+    try wasm.func_types.append(gpa, .{
         .params = params,
         .returns = returns,
     });
@@ -5131,9 +5208,10 @@ pub fn putOrGetFuncType(wasm: *Wasm, func_type: std.wasm.Type) !u32 {
 /// Asserts declaration has an associated `Atom`.
 /// Returns the index into the list of types.
 pub fn storeDeclType(wasm: *Wasm, decl_index: InternPool.DeclIndex, func_type: std.wasm.Type) !u32 {
+    const gpa = wasm.base.comp.gpa;
     const atom_index = wasm.decls.get(decl_index).?;
     const index = try wasm.putOrGetFuncType(func_type);
-    try wasm.atom_types.put(wasm.base.allocator, atom_index, index);
+    try wasm.atom_types.put(gpa, atom_index, index);
     return index;
 }
 
@@ -5142,8 +5220,7 @@ pub fn storeDeclType(wasm: *Wasm, decl_index: InternPool.DeclIndex, func_type: s
 fn markReferences(wasm: *Wasm) !void {
     const tracy = trace(@src());
     defer tracy.end();
-    const do_garbage_collect = wasm.base.options.gc_sections orelse
-        (wasm.base.options.output_mode != .Obj);
+    const do_garbage_collect = wasm.base.gc_sections;
 
     for (wasm.resolved_symbols.keys()) |sym_loc| {
         const sym = sym_loc.getSymbol(wasm);
src/Compilation.zig
@@ -69,6 +69,8 @@ root_name: [:0]const u8,
 cache_mode: CacheMode,
 include_compiler_rt: bool,
 objects: []Compilation.LinkObject,
+/// Needed only for passing -F args to clang.
+framework_dirs: []const []const u8,
 /// These are *always* dynamically linked. Static libraries will be
 /// provided as positional arguments.
 system_libs: std.StringArrayHashMapUnmanaged(SystemLib),
@@ -133,6 +135,7 @@ verbose_llvm_ir: ?[]const u8,
 verbose_llvm_bc: ?[]const u8,
 verbose_cimport: bool,
 verbose_llvm_cpu_features: bool,
+verbose_link: bool,
 disable_c_depfile: bool,
 time_report: bool,
 stack_report: bool,
@@ -220,6 +223,8 @@ emit_llvm_bc: ?EmitLoc,
 work_queue_wait_group: WaitGroup = .{},
 astgen_wait_group: WaitGroup = .{},
 
+llvm_opt_bisect_limit: c_int,
+
 pub const Emit = struct {
     /// Where the output will go.
     directory: Directory,
@@ -340,7 +345,7 @@ const Job = union(enum) {
     /// one of WASI libc static objects
     wasi_libc_crt_file: wasi_libc.CRTFile,
 
-    /// The value is the index into `link.File.Options.system_libs`.
+    /// The value is the index into `system_libs`.
     windows_import_lib: usize,
 };
 
@@ -819,6 +824,20 @@ pub const cache_helpers = struct {
         addEmitLoc(hh, optional_emit_loc orelse return);
     }
 
+    pub fn addOptionalDebugFormat(hh: *Cache.HashHelper, x: ?link.File.DebugFormat) void {
+        hh.add(x != null);
+        addDebugFormat(hh, x orelse return);
+    }
+
+    pub fn addDebugFormat(hh: *Cache.HashHelper, x: link.File.DebugFormat) void {
+        const tag: @typeInfo(link.File.DebugFormat).Union.tag_type.? = x;
+        hh.add(tag);
+        switch (x) {
+            .strip, .code_view => {},
+            .dwarf => |f| hh.add(f),
+        }
+    }
+
     pub fn hashCSource(self: *Cache.Manifest, c_source: CSourceFile) !void {
         _ = try self.addFile(c_source.src_path, null);
         // Hash the extra flags, with special care to call addFile for file parameters.
@@ -846,7 +865,7 @@ pub const ClangPreprocessorMode = enum {
     stdout,
 };
 
-pub const Framework = link.Framework;
+pub const Framework = link.File.MachO.Framework;
 pub const SystemLib = link.SystemLib;
 pub const CacheMode = link.CacheMode;
 
@@ -952,7 +971,7 @@ pub const InitOptions = struct {
     linker_print_gc_sections: bool = false,
     linker_print_icf_sections: bool = false,
     linker_print_map: bool = false,
-    linker_opt_bisect_limit: i32 = -1,
+    llvm_opt_bisect_limit: i32 = -1,
     each_lib_rpath: ?bool = null,
     build_id: ?std.zig.BuildId = null,
     disable_c_depfile: bool = false,
@@ -994,7 +1013,7 @@ pub const InitOptions = struct {
     hash_style: link.HashStyle = .both,
     entry: ?[]const u8 = null,
     force_undefined_symbols: std.StringArrayHashMapUnmanaged(void) = .{},
-    stack_size_override: ?u64 = null,
+    stack_size: ?u64 = null,
     image_base_override: ?u64 = null,
     version: ?std.SemanticVersion = null,
     compatibility_version: ?std.SemanticVersion = null,
@@ -1007,7 +1026,7 @@ pub const InitOptions = struct {
     test_name_prefix: ?[]const u8 = null,
     test_runner_path: ?[]const u8 = null,
     subsystem: ?std.Target.SubSystem = null,
-    dwarf_format: ?std.dwarf.Format = null,
+    debug_format: ?link.File.DebugFormat = null,
     /// (Zig compiler development) Enable dumping linker's state as JSON.
     enable_link_snapshots: bool = false,
     /// (Darwin) Install name of the dylib
@@ -1297,7 +1316,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
         cache.hash.add(options.config.link_libcpp);
         cache.hash.add(options.config.link_libunwind);
         cache.hash.add(output_mode);
-        cache.hash.addOptional(options.dwarf_format);
+        cache_helpers.addOptionalDebugFormat(&cache.hash, options.debug_format);
         cache_helpers.addOptionalEmitLoc(&cache.hash, options.emit_bin);
         cache_helpers.addOptionalEmitLoc(&cache.hash, options.emit_implib);
         cache_helpers.addOptionalEmitLoc(&cache.hash, options.emit_docs);
@@ -1596,6 +1615,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
             .verbose_llvm_bc = options.verbose_llvm_bc,
             .verbose_cimport = options.verbose_cimport,
             .verbose_llvm_cpu_features = options.verbose_llvm_cpu_features,
+            .verbose_link = options.verbose_link,
             .disable_c_depfile = options.disable_c_depfile,
             .owned_link_dir = owned_link_dir,
             .color = options.color,
@@ -1617,6 +1637,8 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
             .libc_installation = libc_dirs.libc_installation,
             .include_compiler_rt = include_compiler_rt,
             .objects = options.link_objects,
+            .framework_dirs = options.framework_dirs,
+            .llvm_opt_bisect_limit = options.llvm_opt_bisect_limit,
         };
 
         if (bin_file_emit) |emit| {
@@ -1636,7 +1658,6 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
                 .z_max_page_size = options.linker_z_max_page_size,
                 .darwin_sdk_layout = libc_dirs.darwin_sdk_layout,
                 .frameworks = options.frameworks,
-                .framework_dirs = options.framework_dirs,
                 .wasi_emulated_libs = options.wasi_emulated_libs,
                 .lib_dirs = options.lib_dirs,
                 .rpath_list = options.rpath_list,
@@ -1659,13 +1680,12 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
                 .print_gc_sections = options.linker_print_gc_sections,
                 .print_icf_sections = options.linker_print_icf_sections,
                 .print_map = options.linker_print_map,
-                .opt_bisect_limit = options.linker_opt_bisect_limit,
                 .tsaware = options.linker_tsaware,
                 .nxcompat = options.linker_nxcompat,
                 .dynamicbase = options.linker_dynamicbase,
                 .major_subsystem_version = options.major_subsystem_version,
                 .minor_subsystem_version = options.minor_subsystem_version,
-                .stack_size_override = options.stack_size_override,
+                .stack_size = options.stack_size,
                 .image_base_override = options.image_base_override,
                 .version_script = options.version_script,
                 .gc_sections = options.linker_gc_sections,
@@ -1674,7 +1694,6 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
                 .rdynamic = options.rdynamic,
                 .soname = options.soname,
                 .compatibility_version = options.compatibility_version,
-                .verbose_link = options.verbose_link,
                 .dll_export_fns = dll_export_fns,
                 .skip_linker_dependencies = options.skip_linker_dependencies,
                 .parent_compilation_link_libc = options.parent_compilation_link_libc,
@@ -1682,7 +1701,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
                 .build_id = build_id,
                 .disable_lld_caching = options.disable_lld_caching or cache_mode == .whole,
                 .subsystem = options.subsystem,
-                .dwarf_format = options.dwarf_format,
+                .debug_format = options.debug_format,
                 .hash_style = options.hash_style,
                 .enable_link_snapshots = options.enable_link_snapshots,
                 .install_name = options.install_name,
@@ -1826,7 +1845,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
 
             // When linking mingw-w64 there are some import libs we always need.
             for (mingw.always_link_libs) |name| {
-                try comp.bin_file.options.system_libs.put(comp.gpa, name, .{
+                try comp.system_libs.put(comp.gpa, name, .{
                     .needed = false,
                     .weak = false,
                     .path = null,
@@ -1835,7 +1854,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
         }
         // Generate Windows import libs.
         if (target.os.tag == .windows) {
-            const count = comp.bin_file.options.system_libs.count();
+            const count = comp.system_libs.count();
             try comp.work_queue.ensureUnusedCapacity(count);
             for (0..count) |i| {
                 comp.work_queue.writeItemAssumeCapacity(.{ .windows_import_lib = i });
@@ -2450,7 +2469,7 @@ fn addNonIncrementalStuffToCacheManifest(comp: *Compilation, man: *Cache.Manifes
     cache_helpers.addOptionalEmitLoc(&man.hash, comp.emit_llvm_ir);
     cache_helpers.addOptionalEmitLoc(&man.hash, comp.emit_llvm_bc);
 
-    man.hash.addOptional(comp.bin_file.options.stack_size_override);
+    man.hash.add(comp.bin_file.stack_size);
     man.hash.addOptional(comp.bin_file.options.image_base_override);
     man.hash.addOptional(comp.bin_file.options.gc_sections);
     man.hash.add(comp.bin_file.options.eh_frame_hdr);
@@ -2460,7 +2479,7 @@ fn addNonIncrementalStuffToCacheManifest(comp: *Compilation, man: *Cache.Manifes
     man.hash.addListOfBytes(comp.bin_file.options.rpath_list);
     man.hash.addListOfBytes(comp.bin_file.options.symbol_wrap_set.keys());
     man.hash.add(comp.bin_file.options.each_lib_rpath);
-    man.hash.add(comp.bin_file.options.build_id);
+    man.hash.add(comp.bin_file.build_id);
     man.hash.add(comp.bin_file.options.skip_linker_dependencies);
     man.hash.add(comp.bin_file.options.z_nodelete);
     man.hash.add(comp.bin_file.options.z_notext);
@@ -2488,9 +2507,9 @@ fn addNonIncrementalStuffToCacheManifest(comp: *Compilation, man: *Cache.Manifes
     }
     man.hash.addOptionalBytes(comp.bin_file.options.soname);
     man.hash.addOptional(comp.bin_file.options.version);
-    try link.hashAddSystemLibs(man, comp.bin_file.options.system_libs);
+    try link.hashAddSystemLibs(man, comp.system_libs);
     man.hash.addListOfBytes(comp.bin_file.options.force_undefined_symbols.keys());
-    man.hash.addOptional(comp.bin_file.options.allow_shlib_undefined);
+    man.hash.addOptional(comp.bin_file.allow_shlib_undefined);
     man.hash.add(comp.bin_file.options.bind_global_refs_locally);
     man.hash.add(comp.bin_file.options.tsan);
     man.hash.addOptionalBytes(comp.bin_file.options.sysroot);
@@ -2505,7 +2524,7 @@ fn addNonIncrementalStuffToCacheManifest(comp: *Compilation, man: *Cache.Manifes
     man.hash.addOptional(comp.bin_file.options.global_base);
 
     // Mach-O specific stuff
-    man.hash.addListOfBytes(comp.bin_file.options.framework_dirs);
+    man.hash.addListOfBytes(comp.framework_dirs);
     try link.hashAddFrameworks(man, comp.bin_file.options.frameworks);
     try man.addOptionalFile(comp.bin_file.options.entitlements);
     man.hash.addOptional(comp.bin_file.options.pagezero_size);
@@ -3561,7 +3580,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v
             const named_frame = tracy.namedFrame("windows_import_lib");
             defer named_frame.end();
 
-            const link_lib = comp.bin_file.options.system_libs.keys()[index];
+            const link_lib = comp.system_libs.keys()[index];
             mingw.buildImportLib(comp, link_lib) catch |err| {
                 // TODO Surface more error details.
                 comp.lockAndSetMiscFailure(
@@ -4964,7 +4983,7 @@ pub fn addCCArgs(
                 try argv.appendSlice(&.{ "-iframework", framework_dir });
             }
 
-            for (comp.bin_file.options.framework_dirs) |framework_dir| {
+            for (comp.framework_dirs) |framework_dir| {
                 try argv.appendSlice(&.{ "-F", framework_dir });
             }
 
@@ -5219,22 +5238,21 @@ pub fn addCCArgs(
         },
     }
 
-    if (!comp.bin_file.options.strip) {
-        switch (target.ofmt) {
-            .coff => {
-                // -g is required here because -gcodeview doesn't trigger debug info
-                // generation, it only changes the type of information generated.
-                try argv.appendSlice(&.{ "-g", "-gcodeview" });
-            },
-            .elf, .macho => {
-                try argv.append("-gdwarf-4");
-                if (comp.bin_file.options.dwarf_format) |f| switch (f) {
-                    .@"32" => try argv.append("-gdwarf32"),
-                    .@"64" => try argv.append("-gdwarf64"),
-                };
-            },
-            else => try argv.append("-g"),
-        }
+    try argv.ensureUnusedCapacity(2);
+    switch (comp.bin_file.debug_format) {
+        .strip => {},
+        .code_view => {
+            // -g is required here because -gcodeview doesn't trigger debug info
+            // generation, it only changes the type of information generated.
+            argv.appendSliceAssumeCapacity(&.{ "-g", "-gcodeview" });
+        },
+        .dwarf => |f| {
+            argv.appendAssumeCapacity("-gdwarf-4");
+            switch (f) {
+                .@"32" => argv.appendAssumeCapacity("-gdwarf32"),
+                .@"64" => argv.appendAssumeCapacity("-gdwarf64"),
+            }
+        },
     }
 
     if (target_util.llvmMachineAbi(target)) |mabi| {
@@ -6306,7 +6324,7 @@ pub fn addLinkLib(comp: *Compilation, lib_name: []const u8) !void {
     // This happens when an `extern "foo"` function is referenced.
     // If we haven't seen this library yet and we're targeting Windows, we need
     // to queue up a work item to produce the DLL import library for this.
-    const gop = try comp.bin_file.options.system_libs.getOrPut(comp.gpa, lib_name);
+    const gop = try comp.system_libs.getOrPut(comp.gpa, lib_name);
     if (!gop.found_existing and comp.getTarget().os.tag == .windows) {
         gop.value_ptr.* = .{
             .needed = true,
@@ -6314,7 +6332,7 @@ pub fn addLinkLib(comp: *Compilation, lib_name: []const u8) !void {
             .path = null,
         };
         try comp.work_queue.writeItem(.{
-            .windows_import_lib = comp.bin_file.options.system_libs.count() - 1,
+            .windows_import_lib = comp.system_libs.count() - 1,
         });
     }
 }
src/link.zig
@@ -32,13 +32,6 @@ pub const SystemLib = struct {
     path: ?[]const u8,
 };
 
-/// When adding a new field, remember to update `hashAddFrameworks`.
-pub const Framework = struct {
-    needed: bool = false,
-    weak: bool = false,
-    path: []const u8,
-};
-
 pub const SortSection = enum { name, alignment };
 
 pub const CacheMode = enum { incremental, whole };
@@ -56,14 +49,6 @@ pub fn hashAddSystemLibs(
     }
 }
 
-pub fn hashAddFrameworks(man: *Cache.Manifest, hm: []const Framework) !void {
-    for (hm) |value| {
-        man.hash.add(value.needed);
-        man.hash.add(value.weak);
-        _ = try man.addFile(value.path, null);
-    }
-}
-
 pub const producer_string = if (builtin.is_test) "zig test" else "zig " ++ build_options.version;
 
 pub const HashStyle = enum { sysv, gnu, both };
@@ -81,6 +66,19 @@ pub const File = struct {
     /// When linking with LLD, this linker code will output an object file only at
     /// this location, and then this path can be placed on the LLD linker line.
     intermediary_basename: ?[]const u8 = null,
+    disable_lld_caching: bool,
+    gc_sections: bool,
+    build_id: std.zig.BuildId,
+    rpath_list: []const []const u8,
+    /// List of symbols forced as undefined in the symbol table
+    /// thus forcing their resolution by the linker.
+    /// Corresponds to `-u <symbol>` for ELF/MachO and `/include:<symbol>` for COFF/PE.
+    force_undefined_symbols: std.StringArrayHashMapUnmanaged(void),
+    allow_shlib_undefined: bool,
+    stack_size: u64,
+    debug_format: DebugFormat,
+    function_sections: bool,
+    data_sections: bool,
 
     /// Prevents other processes from clobbering files in the output directory
     /// of this linking operation.
@@ -88,6 +86,12 @@ pub const File = struct {
 
     child_pid: ?std.ChildProcess.Id = null,
 
+    pub const DebugFormat = union(enum) {
+        strip,
+        dwarf: std.dwarf.Format,
+        code_view,
+    };
+
     pub const OpenOptions = struct {
         comp: *Compilation,
         emit: Compilation.Emit,
@@ -97,7 +101,7 @@ pub const File = struct {
 
         /// Virtual address of the entry point procedure relative to image base.
         entry_addr: ?u64,
-        stack_size_override: ?u64,
+        stack_size: ?u64,
         image_base_override: ?u64,
         function_sections: bool,
         data_sections: bool,
@@ -128,7 +132,6 @@ pub const File = struct {
         max_memory: ?u64,
         export_symbol_names: []const []const u8,
         global_base: ?u64,
-        verbose_link: bool,
         dll_export_fns: bool,
         skip_linker_dependencies: bool,
         parent_compilation_link_libc: bool,
@@ -139,7 +142,7 @@ pub const File = struct {
         sort_section: ?SortSection,
         major_subsystem_version: ?u32,
         minor_subsystem_version: ?u32,
-        gc_sections: ?bool = null,
+        gc_sections: ?bool,
         allow_shlib_undefined: ?bool,
         subsystem: ?std.Target.SubSystem,
         version_script: ?[]const u8,
@@ -147,11 +150,7 @@ pub const File = struct {
         print_gc_sections: bool,
         print_icf_sections: bool,
         print_map: bool,
-        opt_bisect_limit: i32,
 
-        /// List of symbols forced as undefined in the symbol table
-        /// thus forcing their resolution by the linker.
-        /// Corresponds to `-u <symbol>` for ELF/MachO and `/include:<symbol>` for COFF/PE.
         force_undefined_symbols: std.StringArrayHashMapUnmanaged(void),
         /// Use a wrapper function for symbol. Any undefined reference to symbol
         /// will be resolved to __wrap_symbol. Any undefined reference to
@@ -163,7 +162,7 @@ pub const File = struct {
 
         compatibility_version: ?std.SemanticVersion,
 
-        dwarf_format: ?std.dwarf.Format,
+        debug_format: ?DebugFormat,
 
         // TODO: remove this. libraries are resolved by the frontend.
         lib_dirs: []const []const u8,
@@ -184,8 +183,7 @@ pub const File = struct {
         headerpad_max_install_names: bool,
         /// (Darwin) remove dylibs that are unreachable by the entry point or exported symbols
         dead_strip_dylibs: bool,
-        framework_dirs: []const []const u8,
-        frameworks: []const Framework,
+        frameworks: []const MachO.Framework,
         darwin_sdk_layout: ?MachO.SdkLayout,
 
         /// (Windows) PDB source path prefix to instruct the linker how to resolve relative
@@ -228,7 +226,7 @@ pub const File = struct {
             .coff, .elf, .macho, .plan9, .wasm => {
                 if (build_options.only_c) unreachable;
                 if (base.file != null) return;
-                const emit = base.options.emit orelse return;
+                const emit = base.emit;
                 if (base.child_pid) |pid| {
                     if (builtin.os.tag == .windows) {
                         base.cast(Coff).?.ptraceAttach(pid) catch |err| {
@@ -256,10 +254,13 @@ pub const File = struct {
                         }
                     }
                 }
+                const use_lld = build_options.have_llvm and base.comp.config.use_lld;
+                const output_mode = base.comp.config.output_mode;
+                const link_mode = base.comp.config.link_mode;
                 base.file = try emit.directory.handle.createFile(emit.sub_path, .{
                     .truncate = false,
                     .read = true,
-                    .mode = determineMode(base.options),
+                    .mode = determineMode(use_lld, output_mode, link_mode),
                 });
             },
             .c, .spirv, .nvptx => {},
@@ -267,9 +268,13 @@ pub const File = struct {
     }
 
     pub fn makeExecutable(base: *File) !void {
-        switch (base.options.output_mode) {
+        const output_mode = base.comp.config.output_mode;
+        const link_mode = base.comp.config.link_mode;
+        const use_lld = build_options.have_llvm and base.comp.config.use_lld;
+
+        switch (output_mode) {
             .Obj => return,
-            .Lib => switch (base.options.link_mode) {
+            .Lib => switch (link_mode) {
                 .Static => return,
                 .Dynamic => {},
             },
@@ -278,7 +283,6 @@ pub const File = struct {
         switch (base.tag) {
             .elf => if (base.file) |f| {
                 if (build_options.only_c) unreachable;
-                const use_lld = build_options.have_llvm and base.options.use_lld;
                 if (base.intermediary_basename != null and use_lld) {
                     // The file we have open is not the final file that we want to
                     // make executable, so we don't have to close it.
@@ -596,7 +600,7 @@ pub const File = struct {
             return @fieldParentPtr(C, "base", base).flush(comp, prog_node);
         }
         if (comp.clang_preprocessor_mode == .yes) {
-            const emit = base.options.emit orelse return; // -fno-emit-bin
+            const emit = base.emit;
             // TODO: avoid extra link step when it's just 1 object file (the `zig cc -c` case)
             // Until then, we do `lld -r -o output.o input.o` even though the output is the same
             // as the input. For the preprocessing case (`zig cc -E -o foo`) we copy the file
@@ -610,8 +614,10 @@ pub const File = struct {
             return;
         }
 
-        const use_lld = build_options.have_llvm and base.options.use_lld;
-        if (use_lld and base.options.output_mode == .Lib and base.options.link_mode == .Static) {
+        const use_lld = build_options.have_llvm and base.comp.config.use_lld;
+        const output_mode = base.comp.config.output_mode;
+        const link_mode = base.comp.config.link_mode;
+        if (use_lld and output_mode == .Lib and link_mode == .Static) {
             return base.linkAsArchive(comp, prog_node);
         }
         switch (base.tag) {
@@ -845,8 +851,6 @@ pub const File = struct {
     }
 
     pub fn linkAsArchive(base: *File, comp: *Compilation, prog_node: *std.Progress.Node) FlushError!void {
-        const emit = base.options.emit orelse return;
-
         const tracy = trace(@src());
         defer tracy.end();
 
@@ -854,22 +858,23 @@ pub const File = struct {
         defer arena_allocator.deinit();
         const arena = arena_allocator.allocator();
 
-        const directory = emit.directory; // Just an alias to make it shorter to type.
-        const full_out_path = try directory.join(arena, &[_][]const u8{emit.sub_path});
+        const directory = base.emit.directory; // Just an alias to make it shorter to type.
+        const full_out_path = try directory.join(arena, &[_][]const u8{base.emit.sub_path});
         const full_out_path_z = try arena.dupeZ(u8, full_out_path);
+        const opt_zcu = base.comp.module;
 
         // If there is no Zig code to compile, then we should skip flushing the output file
         // because it will not be part of the linker line anyway.
-        const module_obj_path: ?[]const u8 = if (base.options.module != null) blk: {
+        const zcu_obj_path: ?[]const u8 = if (opt_zcu != null) blk: {
             try base.flushModule(comp, prog_node);
 
             const dirname = fs.path.dirname(full_out_path_z) orelse ".";
             break :blk try fs.path.join(arena, &.{ dirname, base.intermediary_basename.? });
         } else null;
 
-        log.debug("module_obj_path={s}", .{if (module_obj_path) |s| s else "(null)"});
+        log.debug("zcu_obj_path={s}", .{if (zcu_obj_path) |s| s else "(null)"});
 
-        const compiler_rt_path: ?[]const u8 = if (base.options.include_compiler_rt)
+        const compiler_rt_path: ?[]const u8 = if (base.comp.include_compiler_rt)
             comp.compiler_rt_obj.?.full_object_path
         else
             null;
@@ -881,17 +886,19 @@ pub const File = struct {
         const id_symlink_basename = "llvm-ar.id";
 
         var man: Cache.Manifest = undefined;
-        defer if (!base.options.disable_lld_caching) man.deinit();
+        defer if (!base.disable_lld_caching) man.deinit();
+
+        const objects = base.comp.objects;
 
         var digest: [Cache.hex_digest_len]u8 = undefined;
 
-        if (!base.options.disable_lld_caching) {
+        if (!base.disable_lld_caching) {
             man = comp.cache_parent.obtain();
 
             // We are about to obtain this lock, so here we give other processes a chance first.
             base.releaseLock();
 
-            for (base.options.objects) |obj| {
+            for (objects) |obj| {
                 _ = try man.addFile(obj.path, null);
                 man.hash.add(obj.must_link);
                 man.hash.add(obj.loption);
@@ -904,7 +911,7 @@ pub const File = struct {
                     _ = try man.addFile(key.status.success.res_path, null);
                 }
             }
-            try man.addOptionalFile(module_obj_path);
+            try man.addOptionalFile(zcu_obj_path);
             try man.addOptionalFile(compiler_rt_path);
 
             // We don't actually care whether it's a cache hit or miss; we just need the digest and the lock.
@@ -934,11 +941,11 @@ pub const File = struct {
         }
 
         const win32_resource_table_len = if (build_options.only_core_functionality) 0 else comp.win32_resource_table.count();
-        const num_object_files = base.options.objects.len + comp.c_object_table.count() + win32_resource_table_len + 2;
+        const num_object_files = objects.len + comp.c_object_table.count() + win32_resource_table_len + 2;
         var object_files = try std.ArrayList([*:0]const u8).initCapacity(base.allocator, num_object_files);
         defer object_files.deinit();
 
-        for (base.options.objects) |obj| {
+        for (objects) |obj| {
             object_files.appendAssumeCapacity(try arena.dupeZ(u8, obj.path));
         }
         for (comp.c_object_table.keys()) |key| {
@@ -949,14 +956,14 @@ pub const File = struct {
                 object_files.appendAssumeCapacity(try arena.dupeZ(u8, key.status.success.res_path));
             }
         }
-        if (module_obj_path) |p| {
+        if (zcu_obj_path) |p| {
             object_files.appendAssumeCapacity(try arena.dupeZ(u8, p));
         }
         if (compiler_rt_path) |p| {
             object_files.appendAssumeCapacity(try arena.dupeZ(u8, p));
         }
 
-        if (base.options.verbose_link) {
+        if (comp.verbose_link) {
             std.debug.print("ar rcs {s}", .{full_out_path_z});
             for (object_files.items) |arg| {
                 std.debug.print(" {s}", .{arg});
@@ -972,7 +979,7 @@ pub const File = struct {
         const bad = llvm_bindings.WriteArchive(full_out_path_z, object_files.items.ptr, object_files.items.len, os_tag);
         if (bad) return error.UnableToWriteArchive;
 
-        if (!base.options.disable_lld_caching) {
+        if (!base.disable_lld_caching) {
             Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
                 log.warn("failed to save archive hash digest file: {s}", .{@errorName(err)});
             };
@@ -1090,6 +1097,34 @@ pub const File = struct {
         }
     }
 
+    pub fn isStatic(self: File) bool {
+        return self.base.options.link_mode == .Static;
+    }
+
+    pub fn isObject(self: File) bool {
+        const output_mode = self.comp.config.output_mode;
+        return output_mode == .Obj;
+    }
+
+    pub fn isExe(self: File) bool {
+        const output_mode = self.comp.config.output_mode;
+        return output_mode == .Exe;
+    }
+
+    pub fn isStaticLib(self: File) bool {
+        const output_mode = self.comp.config.output_mode;
+        return output_mode == .Lib and self.isStatic();
+    }
+
+    pub fn isRelocatable(self: File) bool {
+        return self.isObject() or self.isStaticLib();
+    }
+
+    pub fn isDynLib(self: File) bool {
+        const output_mode = self.comp.config.output_mode;
+        return output_mode == .Lib and !self.isStatic();
+    }
+
     pub const C = @import("link/C.zig");
     pub const Coff = @import("link/Coff.zig");
     pub const Plan9 = @import("link/Plan9.zig");
src/main.zig
@@ -842,7 +842,7 @@ fn buildOutputType(
     var linker_print_gc_sections: bool = false;
     var linker_print_icf_sections: bool = false;
     var linker_print_map: bool = false;
-    var linker_opt_bisect_limit: i32 = -1;
+    var llvm_opt_bisect_limit: c_int = -1;
     var linker_z_nocopyreloc = false;
     var linker_z_nodelete = false;
     var linker_z_notext = false;
@@ -859,7 +859,7 @@ fn buildOutputType(
     var linker_module_definition_file: ?[]const u8 = null;
     var test_no_exec = false;
     var force_undefined_symbols: std.StringArrayHashMapUnmanaged(void) = .{};
-    var stack_size_override: ?u64 = null;
+    var stack_size: ?u64 = null;
     var image_base_override: ?u64 = null;
     var link_eh_frame_hdr = false;
     var link_emit_relocs = false;
@@ -892,7 +892,7 @@ fn buildOutputType(
     var contains_res_file: bool = false;
     var reference_trace: ?u32 = null;
     var pdb_out_path: ?[]const u8 = null;
-    var dwarf_format: ?std.dwarf.Format = null;
+    var debug_format: ?link.File.DebugFormat = null;
     var error_limit: ?Module.ErrorInt = null;
     var want_structured_cfg: ?bool = null;
     // These are before resolving sysroot.
@@ -1129,10 +1129,7 @@ fn buildOutputType(
                     } else if (mem.eql(u8, arg, "--force_undefined")) {
                         try force_undefined_symbols.put(arena, args_iter.nextOrFatal(), {});
                     } else if (mem.eql(u8, arg, "--stack")) {
-                        const next_arg = args_iter.nextOrFatal();
-                        stack_size_override = std.fmt.parseUnsigned(u64, next_arg, 0) catch |err| {
-                            fatal("unable to parse stack size '{s}': {s}", .{ next_arg, @errorName(err) });
-                        };
+                        stack_size = parseStackSize(args_iter.nextOrFatal());
                     } else if (mem.eql(u8, arg, "--image-base")) {
                         const next_arg = args_iter.nextOrFatal();
                         image_base_override = std.fmt.parseUnsigned(u64, next_arg, 0) catch |err| {
@@ -1487,9 +1484,9 @@ fn buildOutputType(
                     } else if (mem.eql(u8, arg, "-fno-strip")) {
                         mod_opts.strip = false;
                     } else if (mem.eql(u8, arg, "-gdwarf32")) {
-                        dwarf_format = .@"32";
+                        debug_format = .{ .dwarf = .@"32" };
                     } else if (mem.eql(u8, arg, "-gdwarf64")) {
-                        dwarf_format = .@"64";
+                        debug_format = .{ .dwarf = .@"64" };
                     } else if (mem.eql(u8, arg, "-fformatted-panics")) {
                         formatted_panics = true;
                     } else if (mem.eql(u8, arg, "-fno-formatted-panics")) {
@@ -1511,7 +1508,9 @@ fn buildOutputType(
                     } else if (mem.eql(u8, arg, "-fno-builtin")) {
                         no_builtin = true;
                     } else if (mem.startsWith(u8, arg, "-fopt-bisect-limit=")) {
-                        linker_opt_bisect_limit = std.math.lossyCast(i32, parseIntSuffix(arg, "-fopt-bisect-limit=".len));
+                        const next_arg = arg["-fopt-bisect-limit=".len..];
+                        llvm_opt_bisect_limit = std.fmt.parseInt(c_int, next_arg, 0) catch |err|
+                            fatal("unable to parse '{s}': {s}", .{ arg, @errorName(err) });
                     } else if (mem.eql(u8, arg, "--eh-frame-hdr")) {
                         link_eh_frame_hdr = true;
                     } else if (mem.eql(u8, arg, "--dynamicbase")) {
@@ -1994,11 +1993,11 @@ fn buildOutputType(
                     },
                     .gdwarf32 => {
                         mod_opts.strip = false;
-                        dwarf_format = .@"32";
+                        debug_format = .{ .dwarf = .@"32" };
                     },
                     .gdwarf64 => {
                         mod_opts.strip = false;
-                        dwarf_format = .@"64";
+                        debug_format = .{ .dwarf = .@"64" };
                     },
                     .sanitize => {
                         if (mem.eql(u8, it.only_arg, "undefined")) {
@@ -2257,10 +2256,7 @@ fn buildOutputType(
                     } else if (mem.eql(u8, z_arg, "norelro")) {
                         linker_z_relro = false;
                     } else if (mem.startsWith(u8, z_arg, "stack-size=")) {
-                        const next_arg = z_arg["stack-size=".len..];
-                        stack_size_override = std.fmt.parseUnsigned(u64, next_arg, 0) catch |err| {
-                            fatal("unable to parse stack size '{s}': {s}", .{ next_arg, @errorName(err) });
-                        };
+                        stack_size = parseStackSize(z_arg["stack-size=".len..]);
                     } else if (mem.startsWith(u8, z_arg, "common-page-size=")) {
                         linker_z_common_page_size = parseIntSuffix(z_arg, "common-page-size=".len);
                     } else if (mem.startsWith(u8, z_arg, "max-page-size=")) {
@@ -2285,10 +2281,7 @@ fn buildOutputType(
                 } else if (mem.eql(u8, arg, "-u")) {
                     try force_undefined_symbols.put(arena, linker_args_it.nextOrFatal(), {});
                 } else if (mem.eql(u8, arg, "--stack") or mem.eql(u8, arg, "-stack_size")) {
-                    const stack_size = linker_args_it.nextOrFatal();
-                    stack_size_override = std.fmt.parseUnsigned(u64, stack_size, 0) catch |err| {
-                        fatal("unable to parse stack size override '{s}': {s}", .{ stack_size, @errorName(err) });
-                    };
+                    stack_size = parseStackSize(linker_args_it.nextOrFatal());
                 } else if (mem.eql(u8, arg, "--image-base")) {
                     const image_base = linker_args_it.nextOrFatal();
                     image_base_override = std.fmt.parseUnsigned(u64, image_base, 0) catch |err| {
@@ -3407,7 +3400,7 @@ fn buildOutputType(
         .linker_print_gc_sections = linker_print_gc_sections,
         .linker_print_icf_sections = linker_print_icf_sections,
         .linker_print_map = linker_print_map,
-        .linker_opt_bisect_limit = linker_opt_bisect_limit,
+        .llvm_opt_bisect_limit = llvm_opt_bisect_limit,
         .linker_global_base = linker_global_base,
         .linker_export_symbol_names = linker_export_symbol_names.items,
         .linker_z_nocopyreloc = linker_z_nocopyreloc,
@@ -3430,7 +3423,7 @@ fn buildOutputType(
         .link_eh_frame_hdr = link_eh_frame_hdr,
         .link_emit_relocs = link_emit_relocs,
         .force_undefined_symbols = force_undefined_symbols,
-        .stack_size_override = stack_size_override,
+        .stack_size = stack_size,
         .image_base_override = image_base_override,
         .formatted_panics = formatted_panics,
         .function_sections = function_sections,
@@ -3459,7 +3452,7 @@ fn buildOutputType(
         .test_runner_path = test_runner_path,
         .disable_lld_caching = !output_to_cache,
         .subsystem = subsystem,
-        .dwarf_format = dwarf_format,
+        .debug_format = debug_format,
         .debug_compile_errors = debug_compile_errors,
         .enable_link_snapshots = enable_link_snapshots,
         .install_name = install_name,
@@ -7688,3 +7681,8 @@ fn resolveTargetQueryOrFatal(target_query: std.Target.Query) std.Target {
     return std.zig.system.resolveTargetQuery(target_query) catch |err|
         fatal("unable to resolve target: {s}", .{@errorName(err)});
 }
+
+fn parseStackSize(s: []const u8) u64 {
+    return std.fmt.parseUnsigned(u64, s, 0) catch |err|
+        fatal("unable to parse stack size '{s}': {s}", .{ s, @errorName(err) });
+}
src/musl.zig
@@ -226,7 +226,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: *std.Progr
                 .is_native_abi = false,
                 .self_exe_path = comp.self_exe_path,
                 .verbose_cc = comp.verbose_cc,
-                .verbose_link = comp.bin_file.options.verbose_link,
+                .verbose_link = comp.verbose_link,
                 .verbose_air = comp.verbose_air,
                 .verbose_llvm_ir = comp.verbose_llvm_ir,
                 .verbose_cimport = comp.verbose_cimport,
src/target.zig
@@ -323,6 +323,14 @@ pub fn hasLlvmSupport(target: std.Target, ofmt: std.Target.ObjectFormat) bool {
     };
 }
 
+/// The set of targets that Zig supports using LLD to link for.
+pub fn hasLldSupport(ofmt: std.Target.ObjectFormat) bool {
+    return switch (ofmt) {
+        .elf, .coff, .wasm => true,
+        else => false,
+    };
+}
+
 /// The set of targets that our own self-hosted backends have robust support for.
 /// Used to select between LLVM backend and self-hosted backend when compiling in
 /// debug mode. A given target should only return true here if it is passing greater