Commit a710368054

Timon Kruiper <timonkruiper@gmail.com>
2021-03-08 00:09:03
stage2: restructure LLVM backend
The LLVM backend is now structured into 3 different structs, namely Object, DeclGen and FuncGen. Object represents an object that is generated by the LLVM backend. DeclGen is responsible for generating a decl and FuncGen is responsible for generating llvm instructions from tzir in a function.
1 parent 56677f2
Changed files (3)
src
src/codegen/llvm.zig
@@ -16,7 +16,6 @@ const Value = @import("../value.zig").Value;
 const Type = @import("../type.zig").Type;
 
 const LazySrcLoc = Module.LazySrcLoc;
-const SrcLoc = Module.SrcLoc;
 
 pub fn targetTriple(allocator: *Allocator, target: std.Target) ![:0]u8 {
     const llvm_arch = switch (target.cpu.arch) {
@@ -146,83 +145,42 @@ pub fn targetTriple(allocator: *Allocator, target: std.Target) ![:0]u8 {
     return std.fmt.allocPrintZ(allocator, "{s}-unknown-{s}-{s}", .{ llvm_arch, llvm_os, llvm_abi });
 }
 
-pub const LLVMIRModule = struct {
-    module: *Module,
+pub const Object = struct {
     llvm_module: *const llvm.Module,
     context: *const llvm.Context,
     target_machine: *const llvm.TargetMachine,
-    builder: *const llvm.Builder,
-
-    object_path: []const u8,
-
-    gpa: *Allocator,
-    err_msg: ?*Module.ErrorMsg = null,
-
-    // TODO: The fields below should really move into a different struct,
-    //       because they are only valid when generating a function
-
-    /// TODO: this should not be undefined since it should be in another per-decl struct
-    /// Curent decl we are analysing. Stored to get source locations from relative info
-    decl: *Module.Decl = undefined,
-
-    /// This stores the LLVM values used in a function, such that they can be
-    /// referred to in other instructions. This table is cleared before every function is generated.
-    /// TODO: Change this to a stack of Branch. Currently we store all the values from all the blocks
-    /// in here, however if a block ends, the instructions can be thrown away.
-    func_inst_table: std.AutoHashMapUnmanaged(*Inst, *const llvm.Value) = .{},
-
-    /// These fields are used to refer to the LLVM value of the function paramaters in an Arg instruction.
-    args: []*const llvm.Value = &[_]*const llvm.Value{},
-    arg_index: usize = 0,
-
-    entry_block: *const llvm.BasicBlock = undefined,
-    /// This fields stores the last alloca instruction, such that we can append more alloca instructions
-    /// to the top of the function.
-    latest_alloca_inst: ?*const llvm.Value = null,
-
-    llvm_func: *const llvm.Value = undefined,
-
-    /// This data structure is used to implement breaking to blocks.
-    blocks: std.AutoHashMapUnmanaged(*Inst.Block, struct {
-        parent_bb: *const llvm.BasicBlock,
-        break_bbs: *BreakBasicBlocks,
-        break_vals: *BreakValues,
-    }) = .{},
-
-    src_loc: Module.SrcLoc,
-
-    const BreakBasicBlocks = std.ArrayListUnmanaged(*const llvm.BasicBlock);
-    const BreakValues = std.ArrayListUnmanaged(*const llvm.Value);
+    object_pathZ: [:0]const u8,
 
-    pub fn create(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*LLVMIRModule {
-        const self = try allocator.create(LLVMIRModule);
+    pub fn create(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*Object {
+        const self = try allocator.create(Object);
         errdefer allocator.destroy(self);
 
-        const gpa = options.module.?.gpa;
-
-        const obj_basename = try std.zig.binNameAlloc(gpa, .{
+        const obj_basename = try std.zig.binNameAlloc(allocator, .{
             .root_name = options.root_name,
             .target = options.target,
             .output_mode = .Obj,
         });
-        defer gpa.free(obj_basename);
+        defer allocator.free(obj_basename);
 
         const o_directory = options.module.?.zig_cache_artifact_directory;
-        const object_path = try o_directory.join(gpa, &[_][]const u8{obj_basename});
-        errdefer gpa.free(object_path);
+        const object_path = try o_directory.join(allocator, &[_][]const u8{obj_basename});
+        defer allocator.free(object_path);
+
+        const object_pathZ = try allocator.dupeZ(u8, object_path);
+        errdefer allocator.free(object_pathZ);
 
         const context = llvm.Context.create();
         errdefer context.dispose();
 
         initializeLLVMTargets();
 
-        const root_nameZ = try gpa.dupeZ(u8, options.root_name);
-        defer gpa.free(root_nameZ);
+        const root_nameZ = try allocator.dupeZ(u8, options.root_name);
+        defer allocator.free(root_nameZ);
         const llvm_module = llvm.Module.createWithName(root_nameZ.ptr, context);
         errdefer llvm_module.dispose();
 
-        const llvm_target_triple = try targetTriple(gpa, options.target);
-        defer gpa.free(llvm_target_triple);
+        const llvm_target_triple = try targetTriple(allocator, options.target);
+        defer allocator.free(llvm_target_triple);
 
         var error_message: [*:0]const u8 = undefined;
         var target: *const llvm.Target = undefined;
@@ -257,34 +215,21 @@ pub const LLVMIRModule = struct {
         );
         errdefer target_machine.dispose();
 
-        const builder = context.createBuilder();
-        errdefer builder.dispose();
-
         self.* = .{
-            .module = options.module.?,
             .llvm_module = llvm_module,
             .context = context,
             .target_machine = target_machine,
-            .builder = builder,
-            .object_path = object_path,
-            .gpa = gpa,
-            // TODO move this field into a struct that is only instantiated per gen() call
-            .src_loc = undefined,
+            .object_pathZ = object_pathZ,
         };
         return self;
     }
 
-    pub fn deinit(self: *LLVMIRModule, allocator: *Allocator) void {
-        self.builder.dispose();
+    pub fn deinit(self: *Object, allocator: *Allocator) void {
         self.target_machine.dispose();
         self.llvm_module.dispose();
         self.context.dispose();
 
-        self.func_inst_table.deinit(self.gpa);
-        self.gpa.free(self.object_path);
-
-        self.blocks.deinit(self.gpa);
-
+        allocator.free(self.object_pathZ);
         allocator.destroy(self);
     }
 
@@ -296,7 +241,7 @@ pub const LLVMIRModule = struct {
         llvm.initializeAllAsmParsers();
     }
 
-    pub fn flushModule(self: *LLVMIRModule, comp: *Compilation) !void {
+    pub fn flushModule(self: *Object, comp: *Compilation) !void {
         if (comp.verbose_llvm_ir) {
             const dump = self.llvm_module.printToString();
             defer llvm.disposeMessage(dump);
@@ -317,13 +262,10 @@ pub const LLVMIRModule = struct {
             }
         }
 
-        const object_pathZ = try self.gpa.dupeZ(u8, self.object_path);
-        defer self.gpa.free(object_pathZ);
-
         var error_message: [*:0]const u8 = undefined;
         if (self.target_machine.emitToFile(
             self.llvm_module,
-            object_pathZ.ptr,
+            self.object_pathZ.ptr,
             .ObjectFile,
             &error_message,
         ).toBool()) {
@@ -335,23 +277,55 @@ pub const LLVMIRModule = struct {
         }
     }
 
-    pub fn updateDecl(self: *LLVMIRModule, module: *Module, decl: *Module.Decl) !void {
-        self.gen(module, decl) catch |err| switch (err) {
+    pub fn updateDecl(self: *Object, module: *Module, decl: *Module.Decl) !void {
+        var dg: DeclGen = .{
+            .object = self,
+            .module = module,
+            .decl = decl,
+            .err_msg = null,
+            .gpa = module.gpa,
+        };
+        dg.genDecl() catch |err| switch (err) {
             error.CodegenFail => {
                 decl.analysis = .codegen_failure;
-                try module.failed_decls.put(module.gpa, decl, self.err_msg.?);
-                self.err_msg = null;
+                try module.failed_decls.put(module.gpa, decl, dg.err_msg.?);
+                dg.err_msg = null;
                 return;
             },
             else => |e| return e,
         };
     }
+};
+
+pub const DeclGen = struct {
+    object: *Object,
+    module: *Module,
+    decl: *Module.Decl,
+    err_msg: ?*Module.ErrorMsg,
+
+    gpa: *Allocator,
+
+    fn fail(self: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
+        @setCold(true);
+        assert(self.err_msg == null);
+        const src_loc = src.toSrcLocWithDecl(self.decl);
+        self.err_msg = try Module.ErrorMsg.create(self.gpa, src_loc, format, args);
+        return error.CodegenFail;
+    }
+
+    fn llvmModule(self: *DeclGen) *const llvm.Module {
+        return self.object.llvm_module;
+    }
+
+    fn context(self: *DeclGen) *const llvm.Context {
+        return self.object.context;
+    }
 
-    fn gen(self: *LLVMIRModule, module: *Module, decl: *Module.Decl) !void {
+    fn genDecl(self: *DeclGen) !void {
+        const decl = self.decl;
         const typed_value = decl.typed_value.most_recent.typed_value;
-        self.src_loc = decl.srcLoc();
-        self.decl = decl;
-        const src = self.src_loc.lazy;
+
+        const src = decl.srcLoc().lazy;
 
         log.debug("gen: {s} type: {}, value: {}", .{ decl.name, typed_value.ty, typed_value.val });
 
@@ -363,16 +337,10 @@ pub const LLVMIRModule = struct {
             // This gets the LLVM values from the function and stores them in `self.args`.
             const fn_param_len = func.owner_decl.typed_value.most_recent.typed_value.ty.fnParamLen();
             var args = try self.gpa.alloc(*const llvm.Value, fn_param_len);
-            defer self.gpa.free(args);
 
             for (args) |*arg, i| {
                 arg.* = llvm.getParam(llvm_func, @intCast(c_uint, i));
             }
-            self.args = args;
-            self.arg_index = 0;
-
-            // Make sure no other LLVM values from other functions can be referenced
-            self.func_inst_table.clearRetainingCapacity();
 
             // We remove all the basic blocks of a function to support incremental
             // compilation!
@@ -381,12 +349,25 @@ pub const LLVMIRModule = struct {
                 bb.deleteBasicBlock();
             }
 
-            self.entry_block = self.context.appendBasicBlock(llvm_func, "Entry");
-            self.builder.positionBuilderAtEnd(self.entry_block);
-            self.latest_alloca_inst = null;
-            self.llvm_func = llvm_func;
+            const builder = self.context().createBuilder();
+
+            const entry_block = self.context().appendBasicBlock(llvm_func, "Entry");
+            builder.positionBuilderAtEnd(entry_block);
+
+            var fg: FuncGen = .{
+                .dg = self,
+                .builder = builder,
+                .args = args,
+                .arg_index = 0,
+                .func_inst_table = .{},
+                .entry_block = entry_block,
+                .latest_alloca_inst = null,
+                .llvm_func = llvm_func,
+                .blocks = .{},
+            };
+            defer fg.deinit();
 
-            try self.genBody(func.body);
+            try fg.genBody(func.body);
         } else if (typed_value.val.castTag(.extern_fn)) |extern_fn| {
             _ = try self.resolveLLVMFunction(extern_fn.data, src);
         } else {
@@ -394,7 +375,267 @@ pub const LLVMIRModule = struct {
         }
     }
 
-    fn genBody(self: *LLVMIRModule, body: ir.Body) error{ OutOfMemory, CodegenFail }!void {
+    /// If the llvm function does not exist, create it
+    fn resolveLLVMFunction(self: *DeclGen, func: *Module.Decl, src: LazySrcLoc) !*const llvm.Value {
+        // TODO: do we want to store this in our own datastructure?
+        if (self.llvmModule().getNamedFunction(func.name)) |llvm_fn| return llvm_fn;
+
+        const zig_fn_type = func.typed_value.most_recent.typed_value.ty;
+        const return_type = zig_fn_type.fnReturnType();
+
+        const fn_param_len = zig_fn_type.fnParamLen();
+
+        const fn_param_types = try self.gpa.alloc(Type, fn_param_len);
+        defer self.gpa.free(fn_param_types);
+        zig_fn_type.fnParamTypes(fn_param_types);
+
+        const llvm_param = try self.gpa.alloc(*const llvm.Type, fn_param_len);
+        defer self.gpa.free(llvm_param);
+
+        for (fn_param_types) |fn_param, i| {
+            llvm_param[i] = try self.getLLVMType(fn_param, src);
+        }
+
+        const fn_type = llvm.Type.functionType(
+            try self.getLLVMType(return_type, src),
+            if (fn_param_len == 0) null else llvm_param.ptr,
+            @intCast(c_uint, fn_param_len),
+            .False,
+        );
+        const llvm_fn = self.llvmModule().addFunction(func.name, fn_type);
+
+        if (return_type.tag() == .noreturn) {
+            self.addFnAttr(llvm_fn, "noreturn");
+        }
+
+        return llvm_fn;
+    }
+
+    fn resolveGlobalDecl(self: *DeclGen, decl: *Module.Decl, src: LazySrcLoc) error{ OutOfMemory, CodegenFail }!*const llvm.Value {
+        // TODO: do we want to store this in our own datastructure?
+        if (self.llvmModule().getNamedGlobal(decl.name)) |val| return val;
+
+        const typed_value = decl.typed_value.most_recent.typed_value;
+
+        // TODO: remove this redundant `getLLVMType`, it is also called in `genTypedValue`.
+        const llvm_type = try self.getLLVMType(typed_value.ty, src);
+        const val = try self.genTypedValue(src, typed_value, null);
+        const global = self.llvmModule().addGlobal(llvm_type, decl.name);
+        llvm.setInitializer(global, val);
+
+        // TODO ask the Decl if it is const
+        // https://github.com/ziglang/zig/issues/7582
+
+        return global;
+    }
+
+    fn getLLVMType(self: *DeclGen, t: Type, src: LazySrcLoc) error{ OutOfMemory, CodegenFail }!*const llvm.Type {
+        switch (t.zigTypeTag()) {
+            .Void => return self.context().voidType(),
+            .NoReturn => return self.context().voidType(),
+            .Int => {
+                const info = t.intInfo(self.module.getTarget());
+                return self.context().intType(info.bits);
+            },
+            .Bool => return self.context().intType(1),
+            .Pointer => {
+                if (t.isSlice()) {
+                    return self.fail(src, "TODO: LLVM backend: implement slices", .{});
+                } else {
+                    const elem_type = try self.getLLVMType(t.elemType(), src);
+                    return elem_type.pointerType(0);
+                }
+            },
+            .Array => {
+                const elem_type = try self.getLLVMType(t.elemType(), src);
+                return elem_type.arrayType(@intCast(c_uint, t.abiSize(self.module.getTarget())));
+            },
+            .Optional => {
+                if (!t.isPtrLikeOptional()) {
+                    var buf: Type.Payload.ElemType = undefined;
+                    const child_type = t.optionalChild(&buf);
+
+                    var optional_types: [2]*const llvm.Type = .{
+                        try self.getLLVMType(child_type, src),
+                        self.context().intType(1),
+                    };
+                    return self.context().structType(&optional_types, 2, .False);
+                } else {
+                    return self.fail(src, "TODO implement optional pointers as actual pointers", .{});
+                }
+            },
+            else => return self.fail(src, "TODO implement getLLVMType for type '{}'", .{t}),
+        }
+    }
+
+    // TODO: figure out a way to remove the FuncGen argument
+    fn genTypedValue(self: *DeclGen, src: LazySrcLoc, tv: TypedValue, fg: ?*FuncGen) error{ OutOfMemory, CodegenFail }!*const llvm.Value {
+        const llvm_type = try self.getLLVMType(tv.ty, src);
+
+        if (tv.val.isUndef())
+            return llvm_type.getUndef();
+
+        switch (tv.ty.zigTypeTag()) {
+            .Bool => return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(),
+            .Int => {
+                var bigint_space: Value.BigIntSpace = undefined;
+                const bigint = tv.val.toBigInt(&bigint_space);
+
+                if (bigint.eqZero()) return llvm_type.constNull();
+
+                if (bigint.limbs.len != 1) {
+                    return self.fail(src, "TODO implement bigger bigint", .{});
+                }
+                const llvm_int = llvm_type.constInt(bigint.limbs[0], .False);
+                if (!bigint.positive) {
+                    return llvm.constNeg(llvm_int);
+                }
+                return llvm_int;
+            },
+            .Pointer => switch (tv.val.tag()) {
+                .decl_ref => {
+                    const decl = tv.val.castTag(.decl_ref).?.data;
+                    const val = try self.resolveGlobalDecl(decl, src);
+
+                    const usize_type = try self.getLLVMType(Type.initTag(.usize), src);
+
+                    // TODO: second index should be the index into the memory!
+                    var indices: [2]*const llvm.Value = .{
+                        usize_type.constNull(),
+                        usize_type.constNull(),
+                    };
+
+                    // TODO: consider using buildInBoundsGEP2 for opaque pointers
+                    return fg.?.builder.buildInBoundsGEP(val, &indices, 2, "");
+                },
+                .ref_val => {
+                    const elem_value = tv.val.castTag(.ref_val).?.data;
+                    const elem_type = tv.ty.castPointer().?.data;
+                    const alloca = fg.?.buildAlloca(try self.getLLVMType(elem_type, src));
+                    _ = fg.?.builder.buildStore(try self.genTypedValue(src, .{ .ty = elem_type, .val = elem_value }, fg), alloca);
+                    return alloca;
+                },
+                else => return self.fail(src, "TODO implement const of pointer type '{}'", .{tv.ty}),
+            },
+            .Array => {
+                if (tv.val.castTag(.bytes)) |payload| {
+                    const zero_sentinel = if (tv.ty.sentinel()) |sentinel| blk: {
+                        if (sentinel.tag() == .zero) break :blk true;
+                        return self.fail(src, "TODO handle other sentinel values", .{});
+                    } else false;
+
+                    return self.context().constString(payload.data.ptr, @intCast(c_uint, payload.data.len), llvm.Bool.fromBool(!zero_sentinel));
+                } else {
+                    return self.fail(src, "TODO handle more array values", .{});
+                }
+            },
+            .Optional => {
+                if (!tv.ty.isPtrLikeOptional()) {
+                    var buf: Type.Payload.ElemType = undefined;
+                    const child_type = tv.ty.optionalChild(&buf);
+                    const llvm_child_type = try self.getLLVMType(child_type, src);
+
+                    if (tv.val.tag() == .null_value) {
+                        var optional_values: [2]*const llvm.Value = .{
+                            llvm_child_type.constNull(),
+                            self.context().intType(1).constNull(),
+                        };
+                        return self.context().constStruct(&optional_values, 2, .False);
+                    } else {
+                        var optional_values: [2]*const llvm.Value = .{
+                            try self.genTypedValue(src, .{ .ty = child_type, .val = tv.val }, fg),
+                            self.context().intType(1).constAllOnes(),
+                        };
+                        return self.context().constStruct(&optional_values, 2, .False);
+                    }
+                } else {
+                    return self.fail(src, "TODO implement const of optional pointer", .{});
+                }
+            },
+            else => return self.fail(src, "TODO implement const of type '{}'", .{tv.ty}),
+        }
+    }
+
+    // Helper functions
+    fn addAttr(self: *DeclGen, val: *const llvm.Value, index: llvm.AttributeIndex, name: []const u8) void {
+        const kind_id = llvm.getEnumAttributeKindForName(name.ptr, name.len);
+        assert(kind_id != 0);
+        const llvm_attr = self.context().createEnumAttribute(kind_id, 0);
+        val.addAttributeAtIndex(index, llvm_attr);
+    }
+
+    fn addFnAttr(self: *DeclGen, val: *const llvm.Value, attr_name: []const u8) void {
+        // TODO: improve this API, `addAttr(-1, attr_name)`
+        self.addAttr(val, std.math.maxInt(llvm.AttributeIndex), attr_name);
+    }
+};
+
+pub const FuncGen = struct {
+    dg: *DeclGen,
+
+    builder: *const llvm.Builder,
+
+    /// This stores the LLVM values used in a function, such that they can be
+    /// referred to in other instructions. This table is cleared before every function is generated.
+    /// TODO: Change this to a stack of Branch. Currently we store all the values from all the blocks
+    /// in here, however if a block ends, the instructions can be thrown away.
+    func_inst_table: std.AutoHashMapUnmanaged(*Inst, *const llvm.Value),
+
+    /// These fields are used to refer to the LLVM value of the function paramaters in an Arg instruction.
+    args: []*const llvm.Value,
+    arg_index: usize,
+
+    entry_block: *const llvm.BasicBlock,
+    /// This fields stores the last alloca instruction, such that we can append more alloca instructions
+    /// to the top of the function.
+    latest_alloca_inst: ?*const llvm.Value,
+
+    llvm_func: *const llvm.Value,
+
+    /// This data structure is used to implement breaking to blocks.
+    blocks: std.AutoHashMapUnmanaged(*Inst.Block, struct {
+        parent_bb: *const llvm.BasicBlock,
+        break_bbs: *BreakBasicBlocks,
+        break_vals: *BreakValues,
+    }),
+
+    const BreakBasicBlocks = std.ArrayListUnmanaged(*const llvm.BasicBlock);
+    const BreakValues = std.ArrayListUnmanaged(*const llvm.Value);
+
+    fn deinit(self: *FuncGen) void {
+        self.builder.dispose();
+        self.func_inst_table.deinit(self.gpa());
+        self.gpa().free(self.args);
+        self.blocks.deinit(self.gpa());
+    }
+
+    fn fail(self: *FuncGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
+        @setCold(true);
+        return self.dg.fail(src, format, args);
+    }
+
+    fn llvmModule(self: *FuncGen) *const llvm.Module {
+        return self.dg.object.llvm_module;
+    }
+
+    fn context(self: *FuncGen) *const llvm.Context {
+        return self.dg.object.context;
+    }
+
+    fn gpa(self: *FuncGen) *Allocator {
+        return self.dg.gpa;
+    }
+
+    fn resolveInst(self: *FuncGen, inst: *ir.Inst) !*const llvm.Value {
+        if (inst.value()) |val| {
+            return self.dg.genTypedValue(inst.src, .{ .ty = inst.ty, .val = val }, self);
+        }
+        if (self.func_inst_table.get(inst)) |value| return value;
+
+        return self.fail(inst.src, "TODO implement global llvm values (or the value is not in the func_inst_table table)", .{});
+    }
+
+    fn genBody(self: *FuncGen, body: ir.Body) error{ OutOfMemory, CodegenFail }!void {
         for (body.instructions) |inst| {
             const opt_value = switch (inst.tag) {
                 .add => try self.genAdd(inst.castTag(.add).?),
@@ -434,11 +675,11 @@ pub const LLVMIRModule = struct {
                 },
                 else => |tag| return self.fail(inst.src, "TODO implement LLVM codegen for Zir instruction: {}", .{tag}),
             };
-            if (opt_value) |val| try self.func_inst_table.putNoClobber(self.gpa, inst, val);
+            if (opt_value) |val| try self.func_inst_table.putNoClobber(self.gpa(), inst, val);
         }
     }
 
-    fn genCall(self: *LLVMIRModule, inst: *Inst.Call) !?*const llvm.Value {
+    fn genCall(self: *FuncGen, inst: *Inst.Call) !?*const llvm.Value {
         if (inst.func.value()) |func_value| {
             const fn_decl = if (func_value.castTag(.extern_fn)) |extern_fn|
                 extern_fn.data
@@ -448,12 +689,12 @@ pub const LLVMIRModule = struct {
                 unreachable;
 
             const zig_fn_type = fn_decl.typed_value.most_recent.typed_value.ty;
-            const llvm_fn = try self.resolveLLVMFunction(fn_decl, inst.base.src);
+            const llvm_fn = try self.dg.resolveLLVMFunction(fn_decl, inst.base.src);
 
             const num_args = inst.args.len;
 
-            const llvm_param_vals = try self.gpa.alloc(*const llvm.Value, num_args);
-            defer self.gpa.free(llvm_param_vals);
+            const llvm_param_vals = try self.gpa().alloc(*const llvm.Value, num_args);
+            defer self.gpa().free(llvm_param_vals);
 
             for (inst.args) |arg, i| {
                 llvm_param_vals[i] = try self.resolveInst(arg);
@@ -482,17 +723,17 @@ pub const LLVMIRModule = struct {
         }
     }
 
-    fn genRetVoid(self: *LLVMIRModule, inst: *Inst.NoOp) ?*const llvm.Value {
+    fn genRetVoid(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value {
         _ = self.builder.buildRetVoid();
         return null;
     }
 
-    fn genRet(self: *LLVMIRModule, inst: *Inst.UnOp) !?*const llvm.Value {
+    fn genRet(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
         _ = self.builder.buildRet(try self.resolveInst(inst.operand));
         return null;
     }
 
-    fn genCmp(self: *LLVMIRModule, inst: *Inst.BinOp, op: math.CompareOperator) !?*const llvm.Value {
+    fn genCmp(self: *FuncGen, inst: *Inst.BinOp, op: math.CompareOperator) !?*const llvm.Value {
         const lhs = try self.resolveInst(inst.lhs);
         const rhs = try self.resolveInst(inst.rhs);
 
@@ -513,21 +754,21 @@ pub const LLVMIRModule = struct {
         return self.builder.buildICmp(operation, lhs, rhs, "");
     }
 
-    fn genBlock(self: *LLVMIRModule, inst: *Inst.Block) !?*const llvm.Value {
-        const parent_bb = self.context.createBasicBlock("Block");
+    fn genBlock(self: *FuncGen, inst: *Inst.Block) !?*const llvm.Value {
+        const parent_bb = self.context().createBasicBlock("Block");
 
         // 5 breaks to a block seems like a reasonable default.
-        var break_bbs = try BreakBasicBlocks.initCapacity(self.gpa, 5);
-        var break_vals = try BreakValues.initCapacity(self.gpa, 5);
-        try self.blocks.putNoClobber(self.gpa, inst, .{
+        var break_bbs = try BreakBasicBlocks.initCapacity(self.gpa(), 5);
+        var break_vals = try BreakValues.initCapacity(self.gpa(), 5);
+        try self.blocks.putNoClobber(self.gpa(), inst, .{
             .parent_bb = parent_bb,
             .break_bbs = &break_bbs,
             .break_vals = &break_vals,
         });
         defer {
             self.blocks.removeAssertDiscard(inst);
-            break_bbs.deinit(self.gpa);
-            break_vals.deinit(self.gpa);
+            break_bbs.deinit(self.gpa());
+            break_vals.deinit(self.gpa());
         }
 
         try self.genBody(inst.body);
@@ -538,7 +779,7 @@ pub const LLVMIRModule = struct {
         // If the block does not return a value, we dont have to create a phi node.
         if (!inst.base.ty.hasCodeGenBits()) return null;
 
-        const phi_node = self.builder.buildPhi(try self.getLLVMType(inst.base.ty, inst.base.src), "");
+        const phi_node = self.builder.buildPhi(try self.dg.getLLVMType(inst.base.ty, inst.base.src), "");
         phi_node.addIncoming(
             break_vals.items.ptr,
             break_bbs.items.ptr,
@@ -547,7 +788,7 @@ pub const LLVMIRModule = struct {
         return phi_node;
     }
 
-    fn genBr(self: *LLVMIRModule, inst: *Inst.Br) !?*const llvm.Value {
+    fn genBr(self: *FuncGen, inst: *Inst.Br) !?*const llvm.Value {
         var block = self.blocks.get(inst.block).?;
 
         // If the break doesn't break a value, then we don't have to add
@@ -560,25 +801,25 @@ pub const LLVMIRModule = struct {
 
             // For the phi node, we need the basic blocks and the values of the
             // break instructions.
-            try block.break_bbs.append(self.gpa, self.builder.getInsertBlock());
-            try block.break_vals.append(self.gpa, val);
+            try block.break_bbs.append(self.gpa(), self.builder.getInsertBlock());
+            try block.break_vals.append(self.gpa(), val);
 
             _ = self.builder.buildBr(block.parent_bb);
         }
         return null;
     }
 
-    fn genBrVoid(self: *LLVMIRModule, inst: *Inst.BrVoid) !?*const llvm.Value {
+    fn genBrVoid(self: *FuncGen, inst: *Inst.BrVoid) !?*const llvm.Value {
         var block = self.blocks.get(inst.block).?;
         _ = self.builder.buildBr(block.parent_bb);
         return null;
     }
 
-    fn genCondBr(self: *LLVMIRModule, inst: *Inst.CondBr) !?*const llvm.Value {
+    fn genCondBr(self: *FuncGen, inst: *Inst.CondBr) !?*const llvm.Value {
         const condition_value = try self.resolveInst(inst.condition);
 
-        const then_block = self.context.appendBasicBlock(self.llvm_func, "Then");
-        const else_block = self.context.appendBasicBlock(self.llvm_func, "Else");
+        const then_block = self.context().appendBasicBlock(self.llvm_func, "Then");
+        const else_block = self.context().appendBasicBlock(self.llvm_func, "Else");
         {
             const prev_block = self.builder.getInsertBlock();
             defer self.builder.positionBuilderAtEnd(prev_block);
@@ -593,8 +834,8 @@ pub const LLVMIRModule = struct {
         return null;
     }
 
-    fn genLoop(self: *LLVMIRModule, inst: *Inst.Loop) !?*const llvm.Value {
-        const loop_block = self.context.appendBasicBlock(self.llvm_func, "Loop");
+    fn genLoop(self: *FuncGen, inst: *Inst.Loop) !?*const llvm.Value {
+        const loop_block = self.context().appendBasicBlock(self.llvm_func, "Loop");
         _ = self.builder.buildBr(loop_block);
 
         self.builder.positionBuilderAtEnd(loop_block);
@@ -604,20 +845,20 @@ pub const LLVMIRModule = struct {
         return null;
     }
 
-    fn genNot(self: *LLVMIRModule, inst: *Inst.UnOp) !?*const llvm.Value {
+    fn genNot(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
         return self.builder.buildNot(try self.resolveInst(inst.operand), "");
     }
 
-    fn genUnreach(self: *LLVMIRModule, inst: *Inst.NoOp) ?*const llvm.Value {
+    fn genUnreach(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value {
         _ = self.builder.buildUnreachable();
         return null;
     }
 
-    fn genIsNonNull(self: *LLVMIRModule, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
+    fn genIsNonNull(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
         const operand = try self.resolveInst(inst.operand);
 
         if (operand_is_ptr) {
-            const index_type = self.context.intType(32);
+            const index_type = self.context().intType(32);
 
             var indices: [2]*const llvm.Value = .{
                 index_type.constNull(),
@@ -630,15 +871,15 @@ pub const LLVMIRModule = struct {
         }
     }
 
-    fn genIsNull(self: *LLVMIRModule, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
+    fn genIsNull(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
         return self.builder.buildNot((try self.genIsNonNull(inst, operand_is_ptr)).?, "");
     }
 
-    fn genOptionalPayload(self: *LLVMIRModule, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
+    fn genOptionalPayload(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
         const operand = try self.resolveInst(inst.operand);
 
         if (operand_is_ptr) {
-            const index_type = self.context.intType(32);
+            const index_type = self.context().intType(32);
 
             var indices: [2]*const llvm.Value = .{
                 index_type.constNull(),
@@ -651,7 +892,7 @@ pub const LLVMIRModule = struct {
         }
     }
 
-    fn genAdd(self: *LLVMIRModule, inst: *Inst.BinOp) !?*const llvm.Value {
+    fn genAdd(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value {
         const lhs = try self.resolveInst(inst.lhs);
         const rhs = try self.resolveInst(inst.rhs);
 
@@ -664,7 +905,7 @@ pub const LLVMIRModule = struct {
             self.builder.buildNUWAdd(lhs, rhs, "");
     }
 
-    fn genSub(self: *LLVMIRModule, inst: *Inst.BinOp) !?*const llvm.Value {
+    fn genSub(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value {
         const lhs = try self.resolveInst(inst.lhs);
         const rhs = try self.resolveInst(inst.rhs);
 
@@ -677,44 +918,44 @@ pub const LLVMIRModule = struct {
             self.builder.buildNUWSub(lhs, rhs, "");
     }
 
-    fn genIntCast(self: *LLVMIRModule, inst: *Inst.UnOp) !?*const llvm.Value {
+    fn genIntCast(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
         const val = try self.resolveInst(inst.operand);
 
         const signed = inst.base.ty.isSignedInt();
         // TODO: Should we use intcast here or just a simple bitcast?
         //       LLVM does truncation vs bitcast (+signed extension) in the intcast depending on the sizes
-        return self.builder.buildIntCast2(val, try self.getLLVMType(inst.base.ty, inst.base.src), llvm.Bool.fromBool(signed), "");
+        return self.builder.buildIntCast2(val, try self.dg.getLLVMType(inst.base.ty, inst.base.src), llvm.Bool.fromBool(signed), "");
     }
 
-    fn genBitCast(self: *LLVMIRModule, inst: *Inst.UnOp) !?*const llvm.Value {
+    fn genBitCast(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
         const val = try self.resolveInst(inst.operand);
-        const dest_type = try self.getLLVMType(inst.base.ty, inst.base.src);
+        const dest_type = try self.dg.getLLVMType(inst.base.ty, inst.base.src);
 
         return self.builder.buildBitCast(val, dest_type, "");
     }
 
-    fn genArg(self: *LLVMIRModule, inst: *Inst.Arg) !?*const llvm.Value {
+    fn genArg(self: *FuncGen, inst: *Inst.Arg) !?*const llvm.Value {
         const arg_val = self.args[self.arg_index];
         self.arg_index += 1;
 
-        const ptr_val = self.buildAlloca(try self.getLLVMType(inst.base.ty, inst.base.src));
+        const ptr_val = self.buildAlloca(try self.dg.getLLVMType(inst.base.ty, inst.base.src));
         _ = self.builder.buildStore(arg_val, ptr_val);
         return self.builder.buildLoad(ptr_val, "");
     }
 
-    fn genAlloc(self: *LLVMIRModule, inst: *Inst.NoOp) !?*const llvm.Value {
+    fn genAlloc(self: *FuncGen, inst: *Inst.NoOp) !?*const llvm.Value {
         // buildAlloca expects the pointee type, not the pointer type, so assert that
         // a Payload.PointerSimple is passed to the alloc instruction.
         const pointee_type = inst.base.ty.castPointer().?.data;
 
         // TODO: figure out a way to get the name of the var decl.
         // TODO: set alignment and volatile
-        return self.buildAlloca(try self.getLLVMType(pointee_type, inst.base.src));
+        return self.buildAlloca(try self.dg.getLLVMType(pointee_type, inst.base.src));
     }
 
     /// Use this instead of builder.buildAlloca, because this function makes sure to
     /// put the alloca instruction at the top of the function!
-    fn buildAlloca(self: *LLVMIRModule, t: *const llvm.Type) *const llvm.Value {
+    fn buildAlloca(self: *FuncGen, t: *const llvm.Type) *const llvm.Value {
         const prev_block = self.builder.getInsertBlock();
         defer self.builder.positionBuilderAtEnd(prev_block);
 
@@ -736,240 +977,30 @@ pub const LLVMIRModule = struct {
         return val;
     }
 
-    fn genStore(self: *LLVMIRModule, inst: *Inst.BinOp) !?*const llvm.Value {
+    fn genStore(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value {
         const val = try self.resolveInst(inst.rhs);
         const ptr = try self.resolveInst(inst.lhs);
         _ = self.builder.buildStore(val, ptr);
         return null;
     }
 
-    fn genLoad(self: *LLVMIRModule, inst: *Inst.UnOp) !?*const llvm.Value {
+    fn genLoad(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
         const ptr_val = try self.resolveInst(inst.operand);
         return self.builder.buildLoad(ptr_val, "");
     }
 
-    fn genBreakpoint(self: *LLVMIRModule, inst: *Inst.NoOp) !?*const llvm.Value {
+    fn genBreakpoint(self: *FuncGen, inst: *Inst.NoOp) !?*const llvm.Value {
         const llvn_fn = self.getIntrinsic("llvm.debugtrap");
         _ = self.builder.buildCall(llvn_fn, null, 0, "");
         return null;
     }
 
-    fn getIntrinsic(self: *LLVMIRModule, name: []const u8) *const llvm.Value {
+    fn getIntrinsic(self: *FuncGen, name: []const u8) *const llvm.Value {
         const id = llvm.lookupIntrinsicID(name.ptr, name.len);
         assert(id != 0);
         // TODO: add support for overload intrinsics by passing the prefix of the intrinsic
         //       to `lookupIntrinsicID` and then passing the correct types to
         //       `getIntrinsicDeclaration`
-        return self.llvm_module.getIntrinsicDeclaration(id, null, 0);
-    }
-
-    fn resolveInst(self: *LLVMIRModule, inst: *ir.Inst) !*const llvm.Value {
-        if (inst.value()) |val| {
-            return self.genTypedValue(inst.src, .{ .ty = inst.ty, .val = val });
-        }
-        if (self.func_inst_table.get(inst)) |value| return value;
-
-        return self.fail(inst.src, "TODO implement global llvm values (or the value is not in the func_inst_table table)", .{});
-    }
-
-    fn genTypedValue(self: *LLVMIRModule, src: LazySrcLoc, tv: TypedValue) error{ OutOfMemory, CodegenFail }!*const llvm.Value {
-        const llvm_type = try self.getLLVMType(tv.ty, src);
-
-        if (tv.val.isUndef())
-            return llvm_type.getUndef();
-
-        switch (tv.ty.zigTypeTag()) {
-            .Bool => return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(),
-            .Int => {
-                var bigint_space: Value.BigIntSpace = undefined;
-                const bigint = tv.val.toBigInt(&bigint_space);
-
-                if (bigint.eqZero()) return llvm_type.constNull();
-
-                if (bigint.limbs.len != 1) {
-                    return self.fail(src, "TODO implement bigger bigint", .{});
-                }
-                const llvm_int = llvm_type.constInt(bigint.limbs[0], .False);
-                if (!bigint.positive) {
-                    return llvm.constNeg(llvm_int);
-                }
-                return llvm_int;
-            },
-            .Pointer => switch (tv.val.tag()) {
-                .decl_ref => {
-                    const decl = tv.val.castTag(.decl_ref).?.data;
-                    const val = try self.resolveGlobalDecl(decl, src);
-
-                    const usize_type = try self.getLLVMType(Type.initTag(.usize), src);
-
-                    // TODO: second index should be the index into the memory!
-                    var indices: [2]*const llvm.Value = .{
-                        usize_type.constNull(),
-                        usize_type.constNull(),
-                    };
-
-                    // TODO: consider using buildInBoundsGEP2 for opaque pointers
-                    return self.builder.buildInBoundsGEP(val, &indices, 2, "");
-                },
-                .ref_val => {
-                    const elem_value = tv.val.castTag(.ref_val).?.data;
-                    const elem_type = tv.ty.castPointer().?.data;
-                    const alloca = self.buildAlloca(try self.getLLVMType(elem_type, src));
-                    _ = self.builder.buildStore(try self.genTypedValue(src, .{ .ty = elem_type, .val = elem_value }), alloca);
-                    return alloca;
-                },
-                else => return self.fail(src, "TODO implement const of pointer type '{}'", .{tv.ty}),
-            },
-            .Array => {
-                if (tv.val.castTag(.bytes)) |payload| {
-                    const zero_sentinel = if (tv.ty.sentinel()) |sentinel| blk: {
-                        if (sentinel.tag() == .zero) break :blk true;
-                        return self.fail(src, "TODO handle other sentinel values", .{});
-                    } else false;
-
-                    return self.context.constString(payload.data.ptr, @intCast(c_uint, payload.data.len), llvm.Bool.fromBool(!zero_sentinel));
-                } else {
-                    return self.fail(src, "TODO handle more array values", .{});
-                }
-            },
-            .Optional => {
-                if (!tv.ty.isPtrLikeOptional()) {
-                    var buf: Type.Payload.ElemType = undefined;
-                    const child_type = tv.ty.optionalChild(&buf);
-                    const llvm_child_type = try self.getLLVMType(child_type, src);
-
-                    if (tv.val.tag() == .null_value) {
-                        var optional_values: [2]*const llvm.Value = .{
-                            llvm_child_type.constNull(),
-                            self.context.intType(1).constNull(),
-                        };
-                        return self.context.constStruct(&optional_values, 2, .False);
-                    } else {
-                        var optional_values: [2]*const llvm.Value = .{
-                            try self.genTypedValue(src, .{ .ty = child_type, .val = tv.val }),
-                            self.context.intType(1).constAllOnes(),
-                        };
-                        return self.context.constStruct(&optional_values, 2, .False);
-                    }
-                } else {
-                    return self.fail(src, "TODO implement const of optional pointer", .{});
-                }
-            },
-            else => return self.fail(src, "TODO implement const of type '{}'", .{tv.ty}),
-        }
-    }
-
-    fn getLLVMType(self: *LLVMIRModule, t: Type, src: LazySrcLoc) error{ OutOfMemory, CodegenFail }!*const llvm.Type {
-        switch (t.zigTypeTag()) {
-            .Void => return self.context.voidType(),
-            .NoReturn => return self.context.voidType(),
-            .Int => {
-                const info = t.intInfo(self.module.getTarget());
-                return self.context.intType(info.bits);
-            },
-            .Bool => return self.context.intType(1),
-            .Pointer => {
-                if (t.isSlice()) {
-                    return self.fail(src, "TODO: LLVM backend: implement slices", .{});
-                } else {
-                    const elem_type = try self.getLLVMType(t.elemType(), src);
-                    return elem_type.pointerType(0);
-                }
-            },
-            .Array => {
-                const elem_type = try self.getLLVMType(t.elemType(), src);
-                return elem_type.arrayType(@intCast(c_uint, t.abiSize(self.module.getTarget())));
-            },
-            .Optional => {
-                if (!t.isPtrLikeOptional()) {
-                    var buf: Type.Payload.ElemType = undefined;
-                    const child_type = t.optionalChild(&buf);
-
-                    var optional_types: [2]*const llvm.Type = .{
-                        try self.getLLVMType(child_type, src),
-                        self.context.intType(1),
-                    };
-                    return self.context.structType(&optional_types, 2, .False);
-                } else {
-                    return self.fail(src, "TODO implement optional pointers as actual pointers", .{});
-                }
-            },
-            else => return self.fail(src, "TODO implement getLLVMType for type '{}'", .{t}),
-        }
-    }
-
-    fn resolveGlobalDecl(self: *LLVMIRModule, decl: *Module.Decl, src: LazySrcLoc) error{ OutOfMemory, CodegenFail }!*const llvm.Value {
-        // TODO: do we want to store this in our own datastructure?
-        if (self.llvm_module.getNamedGlobal(decl.name)) |val| return val;
-
-        const typed_value = decl.typed_value.most_recent.typed_value;
-
-        // TODO: remove this redundant `getLLVMType`, it is also called in `genTypedValue`.
-        const llvm_type = try self.getLLVMType(typed_value.ty, src);
-        const val = try self.genTypedValue(src, typed_value);
-        const global = self.llvm_module.addGlobal(llvm_type, decl.name);
-        llvm.setInitializer(global, val);
-
-        // TODO ask the Decl if it is const
-        // https://github.com/ziglang/zig/issues/7582
-
-        return global;
-    }
-
-    /// If the llvm function does not exist, create it
-    fn resolveLLVMFunction(self: *LLVMIRModule, func: *Module.Decl, src: LazySrcLoc) !*const llvm.Value {
-        // TODO: do we want to store this in our own datastructure?
-        if (self.llvm_module.getNamedFunction(func.name)) |llvm_fn| return llvm_fn;
-
-        const zig_fn_type = func.typed_value.most_recent.typed_value.ty;
-        const return_type = zig_fn_type.fnReturnType();
-
-        const fn_param_len = zig_fn_type.fnParamLen();
-
-        const fn_param_types = try self.gpa.alloc(Type, fn_param_len);
-        defer self.gpa.free(fn_param_types);
-        zig_fn_type.fnParamTypes(fn_param_types);
-
-        const llvm_param = try self.gpa.alloc(*const llvm.Type, fn_param_len);
-        defer self.gpa.free(llvm_param);
-
-        for (fn_param_types) |fn_param, i| {
-            llvm_param[i] = try self.getLLVMType(fn_param, src);
-        }
-
-        const fn_type = llvm.Type.functionType(
-            try self.getLLVMType(return_type, src),
-            if (fn_param_len == 0) null else llvm_param.ptr,
-            @intCast(c_uint, fn_param_len),
-            .False,
-        );
-        const llvm_fn = self.llvm_module.addFunction(func.name, fn_type);
-
-        if (return_type.tag() == .noreturn) {
-            self.addFnAttr(llvm_fn, "noreturn");
-        }
-
-        return llvm_fn;
-    }
-
-    // Helper functions
-    fn addAttr(self: LLVMIRModule, val: *const llvm.Value, index: llvm.AttributeIndex, name: []const u8) void {
-        const kind_id = llvm.getEnumAttributeKindForName(name.ptr, name.len);
-        assert(kind_id != 0);
-        const llvm_attr = self.context.createEnumAttribute(kind_id, 0);
-        val.addAttributeAtIndex(index, llvm_attr);
-    }
-
-    fn addFnAttr(self: *LLVMIRModule, val: *const llvm.Value, attr_name: []const u8) void {
-        // TODO: improve this API, `addAttr(-1, attr_name)`
-        self.addAttr(val, std.math.maxInt(llvm.AttributeIndex), attr_name);
-    }
-
-    pub fn fail(self: *LLVMIRModule, src: LazySrcLoc, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
-        @setCold(true);
-        assert(self.err_msg == null);
-        const src_loc = src.toSrcLocWithDecl(self.decl);
-        self.err_msg = try Module.ErrorMsg.create(self.gpa, src_loc, format, args);
-        return error.CodegenFail;
+        return self.llvmModule().getIntrinsicDeclaration(id, null, 0);
     }
 };
src/link/Coff.zig
@@ -34,7 +34,7 @@ pub const base_tag: link.File.Tag = .coff;
 const msdos_stub = @embedFile("msdos-stub.bin");
 
 /// If this is not null, an object file is created by LLVM and linked with LLD afterwards.
-llvm_ir_module: ?*llvm_backend.LLVMIRModule = null,
+llvm_object: ?*llvm_backend.Object = null,
 
 base: link.File,
 ptr_width: PtrWidth,
@@ -129,7 +129,7 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
         const self = try createEmpty(allocator, options);
         errdefer self.base.destroy();
 
-        self.llvm_ir_module = try llvm_backend.LLVMIRModule.create(allocator, sub_path, options);
+        self.llvm_object = try llvm_backend.Object.create(allocator, sub_path, options);
         return self;
     }
 
@@ -413,7 +413,7 @@ pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Coff {
 }
 
 pub fn allocateDeclIndexes(self: *Coff, decl: *Module.Decl) !void {
-    if (self.llvm_ir_module) |_| return;
+    if (self.llvm_object) |_| return;
 
     try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1);
 
@@ -660,7 +660,7 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void {
     defer tracy.end();
 
     if (build_options.have_llvm)
-        if (self.llvm_ir_module) |llvm_ir_module| return try llvm_ir_module.updateDecl(module, decl);
+        if (self.llvm_object) |llvm_object| return try llvm_object.updateDecl(module, decl);
 
     const typed_value = decl.typed_value.most_recent.typed_value;
     if (typed_value.val.tag() == .extern_fn) {
@@ -720,7 +720,7 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void {
 }
 
 pub fn freeDecl(self: *Coff, decl: *Module.Decl) void {
-    if (self.llvm_ir_module) |_| return;
+    if (self.llvm_object) |_| return;
 
     // Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
     self.freeTextBlock(&decl.link.coff);
@@ -728,7 +728,7 @@ pub fn freeDecl(self: *Coff, decl: *Module.Decl) void {
 }
 
 pub fn updateDeclExports(self: *Coff, module: *Module, decl: *Module.Decl, exports: []const *Module.Export) !void {
-    if (self.llvm_ir_module) |_| return;
+    if (self.llvm_object) |_| return;
 
     for (exports) |exp| {
         if (exp.options.section) |section_name| {
@@ -771,7 +771,7 @@ pub fn flushModule(self: *Coff, comp: *Compilation) !void {
     defer tracy.end();
 
     if (build_options.have_llvm)
-        if (self.llvm_ir_module) |llvm_ir_module| return try llvm_ir_module.flushModule(comp);
+        if (self.llvm_object) |llvm_object| return try llvm_object.flushModule(comp);
 
     if (self.text_section_size_dirty) {
         // Write the new raw size in the .text header
@@ -1308,7 +1308,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
 }
 
 pub fn getDeclVAddr(self: *Coff, decl: *const Module.Decl) u64 {
-    assert(self.llvm_ir_module == null);
+    assert(self.llvm_object == null);
     return self.text_section_virtual_address + decl.link.coff.text_offset;
 }
 
@@ -1318,7 +1318,7 @@ pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !v
 
 pub fn deinit(self: *Coff) void {
     if (build_options.have_llvm)
-        if (self.llvm_ir_module) |ir_module| ir_module.deinit(self.base.allocator);
+        if (self.llvm_object) |ir_module| ir_module.deinit(self.base.allocator);
 
     self.text_block_free_list.deinit(self.base.allocator);
     self.offset_table.deinit(self.base.allocator);
src/link/Elf.zig
@@ -35,7 +35,7 @@ base: File,
 ptr_width: PtrWidth,
 
 /// If this is not null, an object file is created by LLVM and linked with LLD afterwards.
-llvm_ir_module: ?*llvm_backend.LLVMIRModule = null,
+llvm_object: ?*llvm_backend.Object = null,
 
 /// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
 /// Same order as in the file.
@@ -232,7 +232,7 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
         const self = try createEmpty(allocator, options);
         errdefer self.base.destroy();
 
-        self.llvm_ir_module = try llvm_backend.LLVMIRModule.create(allocator, sub_path, options);
+        self.llvm_object = try llvm_backend.Object.create(allocator, sub_path, options);
         return self;
     }
 
@@ -299,7 +299,7 @@ pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Elf {
 
 pub fn deinit(self: *Elf) void {
     if (build_options.have_llvm)
-        if (self.llvm_ir_module) |ir_module|
+        if (self.llvm_object) |ir_module|
             ir_module.deinit(self.base.allocator);
 
     self.sections.deinit(self.base.allocator);
@@ -318,7 +318,7 @@ pub fn deinit(self: *Elf) void {
 }
 
 pub fn getDeclVAddr(self: *Elf, decl: *const Module.Decl) u64 {
-    assert(self.llvm_ir_module == null);
+    assert(self.llvm_object == null);
     assert(decl.link.elf.local_sym_index != 0);
     return self.local_symbols.items[decl.link.elf.local_sym_index].st_value;
 }
@@ -438,7 +438,7 @@ fn updateString(self: *Elf, old_str_off: u32, new_name: []const u8) !u32 {
 }
 
 pub fn populateMissingMetadata(self: *Elf) !void {
-    assert(self.llvm_ir_module == null);
+    assert(self.llvm_object == null);
 
     const small_ptr = switch (self.ptr_width) {
         .p32 => true,
@@ -745,7 +745,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
     defer tracy.end();
 
     if (build_options.have_llvm)
-        if (self.llvm_ir_module) |llvm_ir_module| return try llvm_ir_module.flushModule(comp);
+        if (self.llvm_object) |llvm_object| return try llvm_object.flushModule(comp);
 
     // TODO This linker code currently assumes there is only 1 compilation unit and it corresponds to the
     // Zig source code.
@@ -2111,7 +2111,7 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
 }
 
 pub fn allocateDeclIndexes(self: *Elf, decl: *Module.Decl) !void {
-    if (self.llvm_ir_module) |_| return;
+    if (self.llvm_object) |_| return;
 
     if (decl.link.elf.local_sym_index != 0) return;
 
@@ -2149,7 +2149,7 @@ pub fn allocateDeclIndexes(self: *Elf, decl: *Module.Decl) !void {
 }
 
 pub fn freeDecl(self: *Elf, decl: *Module.Decl) void {
-    if (self.llvm_ir_module) |_| return;
+    if (self.llvm_object) |_| return;
 
     // Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
     self.freeTextBlock(&decl.link.elf);
@@ -2189,7 +2189,7 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
     defer tracy.end();
 
     if (build_options.have_llvm)
-        if (self.llvm_ir_module) |llvm_ir_module| return try llvm_ir_module.updateDecl(module, decl);
+        if (self.llvm_object) |llvm_object| return try llvm_object.updateDecl(module, decl);
 
     const typed_value = decl.typed_value.most_recent.typed_value;
     if (typed_value.val.tag() == .extern_fn) {
@@ -2673,7 +2673,7 @@ pub fn updateDeclExports(
     decl: *Module.Decl,
     exports: []const *Module.Export,
 ) !void {
-    if (self.llvm_ir_module) |_| return;
+    if (self.llvm_object) |_| return;
 
     const tracy = trace(@src());
     defer tracy.end();
@@ -2748,7 +2748,7 @@ pub fn updateDeclLineNumber(self: *Elf, module: *Module, decl: *const Module.Dec
     const tracy = trace(@src());
     defer tracy.end();
 
-    if (self.llvm_ir_module) |_| return;
+    if (self.llvm_object) |_| return;
 
     const tree = decl.container.file_scope.tree;
     const node_tags = tree.nodes.items(.tag);
@@ -2773,7 +2773,7 @@ pub fn updateDeclLineNumber(self: *Elf, module: *Module, decl: *const Module.Dec
 }
 
 pub fn deleteExport(self: *Elf, exp: Export) void {
-    if (self.llvm_ir_module) |_| return;
+    if (self.llvm_object) |_| return;
 
     const sym_index = exp.sym_index orelse return;
     self.global_symbol_free_list.append(self.base.allocator, sym_index) catch {};